Compare commits

...

25 Commits

Author SHA1 Message Date
psychedelicious
825f163492 chore: bump version to v5.4.0a1 2024-10-30 11:06:01 +11:00
psychedelicious
bc42205593 fix(ui): remember to disable isFiltering when finishing filtering 2024-10-30 09:19:30 +11:00
psychedelicious
2e3cba6416 fix(ui): flash of original layer when applying filter/segment
Let the parent module adopt the filtered/segemented image instead of destroying it and making the parent re-create it, which results in a brief flash of the parent layer's original objects before the new image is rendered.
2024-10-30 09:19:30 +11:00
psychedelicious
7852aacd11 fix(uI): track whether graph succeeded in runGraphAndReturnImageOutput
This prevents extraneous graph cancel requests when cleaning up the abort signal after a successful run of a graph.
2024-10-30 09:19:30 +11:00
psychedelicious
6cccd67ecd feat(ui): update SAM module to w/ minor improvements from filter module 2024-10-30 09:19:30 +11:00
psychedelicious
a7a89c9de1 feat(ui): use more resilient logic in canvas filter module, same as in SAM module 2024-10-30 09:19:30 +11:00
psychedelicious
5ca8eed89e tidy(ui): remove all buffer renderer interactions in SAM module
We don't use the buffer rendere in this module; there's no reason to clear it.
2024-10-30 09:19:30 +11:00
psychedelicious
c885c3c9a6 fix(ui): filter layer data pushed to parent rendered when saving as 2024-10-30 09:19:30 +11:00
Mary Hipp
d81c38c350 update announcements 2024-10-29 09:53:13 -04:00
Riku
92d5b73215 fix(ui): seamless zod parameter cleanup 2024-10-29 20:43:44 +11:00
Riku
097e92db6a fix(ui): always write seamless metadata
Ensure images without seamless enabled correctly reset the setting
when all parameters are recalled
2024-10-29 20:43:44 +11:00
Riku
84c6209a45 feat(ui): display seamless values in metadata viewer 2024-10-29 20:43:44 +11:00
Riku
107e48808a fix(ui): recall seamless settings 2024-10-29 20:43:44 +11:00
dunkeroni
47168b5505 chore: make ruff 2024-10-29 14:07:20 +11:00
dunkeroni
58152ec981 fix preview progress bar pre-denoise 2024-10-29 14:07:20 +11:00
dunkeroni
c74afbf332 convert to bgr on sdxl t2i 2024-10-29 14:07:20 +11:00
psychedelicious
7cdda00a54 feat(ui): rearrange canvas paste back nodes to save an image step
We were scaling the unscaled image and mask down before doing the paste-back, but this adds an extraneous step & image output.

We can do the paste-back first, then scale to output size after. So instead of 2 resizes before the paste-back, we have 1 resize after.

The end result is the same.
2024-10-29 11:13:31 +11:00
psychedelicious
a74282bce6 feat(ui): graph builders use objects for arg instead of many args 2024-10-29 11:13:31 +11:00
psychedelicious
107f048c7a feat(ui): extract canvas output node prefix to constant 2024-10-29 11:13:31 +11:00
Ryan Dick
a2486a5f06 Remove unused prediction_type and upcast_attention from from_single_file(...) calls. 2024-10-28 13:05:17 -04:00
Ryan Dick
07ab116efb Remove load_safety_checker=False from calls to from_single_file(...).
This param has been deprecated, and by including it (even when set to
False) the safety checker automatically gets downloaded.
2024-10-28 13:05:17 -04:00
Ryan Dick
1a13af3c7a Fix huggingface_hub.errors imports after version bump. 2024-10-28 13:05:17 -04:00
Ryan Dick
f2966a2594 Fix changed import for FromOriginalControlNetMixin after diffusers bump. 2024-10-28 13:05:17 -04:00
Ryan Dick
58bb97e3c6 Bump diffusers, accelerate, and huggingface-hub. 2024-10-28 13:05:17 -04:00
psychedelicious
a84aa5c049 fix(ui): canvas alerts blocking metadata panel 2024-10-27 09:46:01 +11:00
35 changed files with 625 additions and 327 deletions

View File

@@ -13,6 +13,7 @@ from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
from diffusers.schedulers.scheduling_dpmsolver_sde import DPMSolverSDEScheduler
from diffusers.schedulers.scheduling_tcd import TCDScheduler
from diffusers.schedulers.scheduling_utils import SchedulerMixin as Scheduler
from PIL import Image
from pydantic import field_validator
from torchvision.transforms.functional import resize as tv_resize
from transformers import CLIPVisionModelWithProjection
@@ -510,6 +511,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
context: InvocationContext,
t2i_adapters: Optional[Union[T2IAdapterField, list[T2IAdapterField]]],
ext_manager: ExtensionsManager,
bgr_mode: bool = False,
) -> None:
if t2i_adapters is None:
return
@@ -519,6 +521,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
t2i_adapters = [t2i_adapters]
for t2i_adapter_field in t2i_adapters:
image = context.images.get_pil(t2i_adapter_field.image.image_name)
if bgr_mode: # SDXL t2i trained on cv2's BGR outputs, but PIL won't convert straight to BGR
r, g, b = image.split()
image = Image.merge("RGB", (b, g, r))
ext_manager.add_extension(
T2IAdapterExt(
node_context=context,
@@ -623,6 +629,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
max_unet_downscale = 8
elif t2i_adapter_model_config.base == BaseModelType.StableDiffusionXL:
max_unet_downscale = 4
# SDXL adapters are trained on cv2's BGR outputs
r, g, b = image.split()
image = Image.merge("RGB", (b, g, r))
else:
raise ValueError(f"Unexpected T2I-Adapter base model type: '{t2i_adapter_model_config.base}'.")
@@ -900,7 +910,8 @@ class DenoiseLatentsInvocation(BaseInvocation):
# ext = extension_field.to_extension(exit_stack, context, ext_manager)
# ext_manager.add_extension(ext)
self.parse_controlnet_field(exit_stack, context, self.control, ext_manager)
self.parse_t2i_adapter_field(exit_stack, context, self.t2i_adapter, ext_manager)
bgr_mode = self.unet.unet.base == BaseModelType.StableDiffusionXL
self.parse_t2i_adapter_field(exit_stack, context, self.t2i_adapter, ext_manager, bgr_mode)
# ext: t2i/ip adapter
ext_manager.run_callback(ExtensionCallbackType.SETUP, denoise_ctx)

View File

@@ -117,8 +117,6 @@ class StableDiffusionDiffusersModel(GenericDiffusersLoader):
load_class = load_classes[config.base][config.variant]
except KeyError as e:
raise Exception(f"No diffusers pipeline known for base={config.base}, variant={config.variant}") from e
prediction_type = config.prediction_type.value
upcast_attention = config.upcast_attention
# Without SilenceWarnings we get log messages like this:
# site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
@@ -129,13 +127,7 @@ class StableDiffusionDiffusersModel(GenericDiffusersLoader):
# ['text_model.embeddings.position_ids']
with SilenceWarnings():
pipeline = load_class.from_single_file(
config.path,
torch_dtype=self._torch_dtype,
prediction_type=prediction_type,
upcast_attention=upcast_attention,
load_safety_checker=False,
)
pipeline = load_class.from_single_file(config.path, torch_dtype=self._torch_dtype)
if not submodel_type:
return pipeline

View File

@@ -20,7 +20,7 @@ from typing import Optional
import requests
from huggingface_hub import HfApi, configure_http_backend, hf_hub_url
from huggingface_hub.utils._errors import RepositoryNotFoundError, RevisionNotFoundError
from huggingface_hub.errors import RepositoryNotFoundError, RevisionNotFoundError
from pydantic.networks import AnyHttpUrl
from requests.sessions import Session

View File

@@ -33,7 +33,7 @@ class PreviewExt(ExtensionBase):
def initial_preview(self, ctx: DenoiseContext):
self.callback(
PipelineIntermediateState(
step=-1,
step=0,
order=ctx.scheduler.order,
total_steps=len(ctx.inputs.timesteps),
timestep=int(ctx.scheduler.config.num_train_timesteps), # TODO: is there any code which uses it?

View File

@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
import diffusers
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.loaders import FromOriginalControlNetMixin
from diffusers.loaders.single_file_model import FromOriginalModelMixin
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor
from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
from diffusers.models.embeddings import (
@@ -32,7 +32,9 @@ from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger(__name__)
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin):
# NOTE(ryand): I'm not the origina author of this code, but for future reference, it appears that this class was copied
# from diffusers in order to add support for the encoder_attention_mask argument.
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
"""
A ControlNet model.

View File

@@ -682,7 +682,8 @@
"recallParameters": "Recall Parameters",
"recallParameter": "Recall {{label}}",
"scheduler": "Scheduler",
"seamless": "Seamless",
"seamlessXAxis": "Seamless X Axis",
"seamlessYAxis": "Seamless Y Axis",
"seed": "Seed",
"steps": "Steps",
"strength": "Image to image strength",
@@ -2081,13 +2082,11 @@
},
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"canvasV2Announcement": {
"newCanvas": "A powerful new control canvas",
"newLayerTypes": "New layer types for even more control",
"fluxSupport": "Support for the Flux family of models",
"readReleaseNotes": "Read Release Notes",
"watchReleaseVideo": "Watch Release Video",
"watchUiUpdatesOverview": "Watch UI Updates Overview"
}
"line1": "<ItalicComponent>Select Object</ItalicComponent> tool for precise object selection and editing",
"line2": "Expanded Flux support, now with Global Reference Images",
"line3": "Improved tooltips and context menus",
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",
"watchUiUpdatesOverview": "Watch UI Updates Overview"
}
}

View File

@@ -36,7 +36,7 @@ const FilterContent = memo(
const config = useStore(adapter.filterer.$filterConfig);
const isCanvasFocused = useIsRegionFocused('canvas');
const isProcessing = useStore(adapter.filterer.$isProcessing);
const hasProcessed = useStore(adapter.filterer.$hasProcessed);
const hasImageState = useStore(adapter.filterer.$hasImageState);
const autoProcess = useAppSelector(selectAutoProcess);
const onChangeFilterConfig = useCallback(
@@ -118,7 +118,7 @@ const FilterContent = memo(
variant="ghost"
onClick={adapter.filterer.processImmediate}
loadingText={t('controlLayers.filter.process')}
isDisabled={isProcessing || !isValid || autoProcess}
isDisabled={isProcessing || !isValid || (autoProcess && hasImageState)}
>
{t('controlLayers.filter.process')}
{isProcessing && <Spinner ms={3} boxSize={5} color="base.600" />}
@@ -136,7 +136,7 @@ const FilterContent = memo(
onClick={adapter.filterer.apply}
loadingText={t('controlLayers.filter.apply')}
variant="ghost"
isDisabled={isProcessing || !isValid || !hasProcessed}
isDisabled={isProcessing || !isValid || !hasImageState}
>
{t('controlLayers.filter.apply')}
</Button>
@@ -145,22 +145,22 @@ const FilterContent = memo(
as={Button}
loadingText={t('controlLayers.selectObject.saveAs')}
variant="ghost"
isDisabled={isProcessing || !isValid || !hasProcessed}
isDisabled={isProcessing || !isValid || !hasImageState}
rightIcon={<PiCaretDownBold />}
>
{t('controlLayers.selectObject.saveAs')}
</MenuButton>
<MenuList>
<MenuItem isDisabled={!isValid || !hasProcessed} onClick={saveAsInpaintMask}>
<MenuItem isDisabled={isProcessing || !isValid || !hasImageState} onClick={saveAsInpaintMask}>
{t('controlLayers.newInpaintMask')}
</MenuItem>
<MenuItem isDisabled={!isValid || !hasProcessed} onClick={saveAsRegionalGuidance}>
<MenuItem isDisabled={isProcessing || !isValid || !hasImageState} onClick={saveAsRegionalGuidance}>
{t('controlLayers.newRegionalGuidance')}
</MenuItem>
<MenuItem isDisabled={!isValid || !hasProcessed} onClick={saveAsControlLayer}>
<MenuItem isDisabled={isProcessing || !isValid || !hasImageState} onClick={saveAsControlLayer}>
{t('controlLayers.newControlLayer')}
</MenuItem>
<MenuItem isDisabled={!isValid || !hasProcessed} onClick={saveAsRasterLayer}>
<MenuItem isDisabled={isProcessing || !isValid || !hasImageState} onClick={saveAsRasterLayer}>
{t('controlLayers.newRasterLayer')}
</MenuItem>
</MenuList>

View File

@@ -115,7 +115,7 @@ const SelectObjectContent = memo(
onClick={adapter.segmentAnything.processImmediate}
loadingText={t('controlLayers.selectObject.process')}
variant="ghost"
isDisabled={isProcessing || !hasPoints || autoProcess}
isDisabled={isProcessing || !hasPoints || (autoProcess && hasImageState)}
>
{t('controlLayers.selectObject.process')}
{isProcessing && <Spinner ms={3} boxSize={5} color="base.600" />}

View File

@@ -1,29 +1,36 @@
import { deepClone } from 'common/util/deepClone';
import { withResult, withResultAsync } from 'common/util/result';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { CanvasObjectImage } from 'features/controlLayers/konva/CanvasObject/CanvasObjectImage';
import { addCoords, getKonvaNodeDebugAttrs, getPrefixedId } from 'features/controlLayers/konva/util';
import { selectAutoProcess } from 'features/controlLayers/store/canvasSettingsSlice';
import type { FilterConfig } from 'features/controlLayers/store/filters';
import { getFilterForModel, IMAGE_FILTERS } from 'features/controlLayers/store/filters';
import type { CanvasEntityType, CanvasImageState } from 'features/controlLayers/store/types';
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
import Konva from 'konva';
import { debounce } from 'lodash-es';
import { atom } from 'nanostores';
import { atom, computed } from 'nanostores';
import type { Logger } from 'roarr';
import { serializeError } from 'serialize-error';
import { buildSelectModelConfig } from 'services/api/hooks/modelsByType';
import { isControlNetOrT2IAdapterModelConfig } from 'services/api/types';
import stableHash from 'stable-hash';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
type CanvasEntityFiltererConfig = {
processDebounceMs: number;
/**
* The debounce time in milliseconds for processing the filter.
*/
PROCESS_DEBOUNCE_MS: number;
};
const DEFAULT_CONFIG: CanvasEntityFiltererConfig = {
processDebounceMs: 1000,
PROCESS_DEBOUNCE_MS: 1000,
};
export class CanvasEntityFilterer extends CanvasModuleBase {
@@ -34,20 +41,65 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
readonly manager: CanvasManager;
readonly log: Logger;
imageState: CanvasImageState | null = null;
subscriptions = new Set<() => void>();
config: CanvasEntityFiltererConfig = DEFAULT_CONFIG;
subscriptions = new Set<() => void>();
/**
* The AbortController used to cancel the filter processing.
*/
abortController: AbortController | null = null;
/**
* Whether the module is currently filtering an image.
*/
$isFiltering = atom<boolean>(false);
$hasProcessed = atom<boolean>(false);
/**
* The hash of the last processed config. This is used to prevent re-processing the same config.
*/
$lastProcessedHash = atom<string>('');
/**
* Whether the module is currently processing the filter.
*/
$isProcessing = atom<boolean>(false);
/**
* The config for the filter.
*/
$filterConfig = atom<FilterConfig>(IMAGE_FILTERS.canny_edge_detection.buildDefaults());
/**
* The initial filter config, used to reset the filter config.
*/
$initialFilterConfig = atom<FilterConfig | null>(null);
/**
* The ephemeral image state of the filtered image.
*/
$imageState = atom<CanvasImageState | null>(null);
/**
* Whether the module has an image state. This is a computed value based on $imageState.
*/
$hasImageState = computed(this.$imageState, (imageState) => imageState !== null);
/**
* The filtered image object module, if it exists.
*/
imageModule: CanvasObjectImage | null = null;
/**
* The Konva nodes for the module.
*/
konva: {
/**
* The main Konva group node for the module. This is added to the parent layer on start, and removed on teardown.
*/
group: Konva.Group;
};
KONVA_GROUP_NAME = `${this.type}:group`;
constructor(parent: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer) {
super();
this.id = getPrefixedId(this.type);
@@ -57,9 +109,17 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
this.log = this.manager.buildLogger(this);
this.log.debug('Creating filter module');
this.konva = {
group: new Konva.Group({ name: this.KONVA_GROUP_NAME }),
};
}
/**
* Adds event listeners needed while filtering the entity.
*/
subscribe = () => {
// As the filter config changes, process the filter
this.subscriptions.add(
this.$filterConfig.listen(() => {
if (this.manager.stateApi.getSettings().autoProcess && this.$isFiltering.get()) {
@@ -67,6 +127,7 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
}
})
);
// When auto-process is enabled, process the filter
this.subscriptions.add(
this.manager.stateApi.createStoreSubscription(selectAutoProcess, (autoProcess) => {
if (autoProcess && this.$isFiltering.get()) {
@@ -76,11 +137,18 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
);
};
/**
* Removes event listeners used while filtering the entity.
*/
unsubscribe = () => {
this.subscriptions.forEach((unsubscribe) => unsubscribe());
this.subscriptions.clear();
};
/**
* Starts the filter module.
* @param config The filter config to start with. If omitted, the default filter config is used.
*/
start = (config?: FilterConfig) => {
const filteringAdapter = this.manager.stateApi.$filteringAdapter.get();
if (filteringAdapter) {
@@ -90,30 +158,57 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
this.log.trace('Initializing filter');
this.subscribe();
// Reset any previous state
this.resetEphemeralState();
this.$isFiltering.set(true);
// Update the konva group's position to match the parent entity
const pixelRect = this.parent.transformer.$pixelRect.get();
const position = addCoords(this.parent.state.position, pixelRect);
this.konva.group.setAttrs(position);
// Add the group to the parent layer
this.parent.konva.layer.add(this.konva.group);
if (config) {
// If a config is provided, use it
this.$filterConfig.set(config);
} else if (this.parent.type === 'control_layer_adapter' && this.parent.state.controlAdapter.model) {
this.$initialFilterConfig.set(config);
} else {
this.$filterConfig.set(this.createInitialFilterConfig());
}
this.$initialFilterConfig.set(this.$filterConfig.get());
this.subscribe();
this.manager.stateApi.$filteringAdapter.set(this.parent);
if (this.manager.stateApi.getSettings().autoProcess) {
this.processImmediate();
}
};
createInitialFilterConfig = (): FilterConfig => {
if (this.parent.type === 'control_layer_adapter' && this.parent.state.controlAdapter.model) {
// If the parent is a control layer adapter, we should check if the model has a default filter and set it if so
const selectModelConfig = buildSelectModelConfig(
this.parent.state.controlAdapter.model.key,
isControlNetOrT2IAdapterModelConfig
);
const modelConfig = this.manager.stateApi.runSelector(selectModelConfig);
// This always returns a filter
const filter = getFilterForModel(modelConfig);
this.$filterConfig.set(filter.buildDefaults());
return filter.buildDefaults();
} else {
// Otherwise, set the default filter
this.$filterConfig.set(IMAGE_FILTERS.canny_edge_detection.buildDefaults());
}
this.$isFiltering.set(true);
this.manager.stateApi.$filteringAdapter.set(this.parent);
if (this.manager.stateApi.getSettings().autoProcess) {
this.processImmediate();
// Otherwise, used the default filter
return IMAGE_FILTERS.canny_edge_detection.buildDefaults();
}
};
/**
* Processes the filter, updating the module's state and rendering the filtered image.
*/
processImmediate = async () => {
const config = this.$filterConfig.get();
const filterData = IMAGE_FILTERS[config.type];
@@ -125,6 +220,12 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
return;
}
const hash = stableHash({ config });
if (hash === this.$lastProcessedHash.get()) {
this.log.trace('Already processed config');
return;
}
this.log.trace({ config }, 'Processing filter');
const rect = this.parent.transformer.getRelativeRect();
@@ -158,78 +259,105 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
this.manager.stateApi.runGraphAndReturnImageOutput({
graph,
outputNodeId,
// The filter graph should always be prepended to the queue so it's processed ASAP.
prepend: true,
/**
* The filter node may need to download a large model. Currently, the models required by the filter nodes are
* downloaded just-in-time, as required by the filter. If we use a timeout here, we might get into a catch-22
* where the filter node is waiting for the model to download, but the download gets canceled if the filter
* node times out.
*
* (I suspect the model download will actually _not_ be canceled if the graph is canceled, but let's not chance it!)
*
* TODO(psyche): Figure out a better way to handle this. Probably need to download the models ahead of time.
*/
// timeout: 5000,
/**
* The filter node should be able to cancel the request if it's taking too long. This will cancel the graph's
* queue item and clear any event listeners on the request.
*/
signal: controller.signal,
})
);
// If there is an error, log it and bail out of this processing run
if (filterResult.isErr()) {
this.log.error({ error: serializeError(filterResult.error) }, 'Error processing filter');
this.log.error({ error: serializeError(filterResult.error) }, 'Error filtering');
this.$isProcessing.set(false);
// Clean up the abort controller as needed
if (!this.abortController.signal.aborted) {
this.abortController.abort();
}
this.abortController = null;
return;
}
this.log.trace({ imageDTO: filterResult.value }, 'Filter processed');
this.imageState = imageDTOToImageObject(filterResult.value);
this.log.trace({ imageDTO: filterResult.value }, 'Filtered');
await this.parent.bufferRenderer.setBuffer(this.imageState, true);
// Prepare the ephemeral image state
const imageState = imageDTOToImageObject(filterResult.value);
this.$imageState.set(imageState);
// Destroy any existing masked image and create a new one
if (this.imageModule) {
this.imageModule.destroy();
}
this.imageModule = new CanvasObjectImage(imageState, this);
// Force update the masked image - after awaiting, the image will be rendered (in memory)
await this.imageModule.update(imageState, true);
this.konva.group.add(this.imageModule.konva.group);
// The porcessing is complete, set can set the last processed hash and isProcessing to false
this.$lastProcessedHash.set(hash);
this.$isProcessing.set(false);
this.$hasProcessed.set(true);
// Clean up the abort controller as needed
if (!this.abortController.signal.aborted) {
this.abortController.abort();
}
this.abortController = null;
};
process = debounce(this.processImmediate, this.config.processDebounceMs);
/**
* Debounced version of processImmediate.
*/
process = debounce(this.processImmediate, this.config.PROCESS_DEBOUNCE_MS);
/**
* Applies the filter image to the entity, replacing the entity's objects with the filtered image.
*/
apply = () => {
const imageState = this.imageState;
if (!imageState) {
const filteredImageObjectState = this.$imageState.get();
if (!filteredImageObjectState) {
this.log.warn('No image state to apply filter to');
return;
}
this.log.trace('Applying filter');
this.parent.bufferRenderer.commitBuffer();
this.log.trace('Applying');
// Have the parent adopt the image module - this prevents a flash of the original layer content before the filtered
// image is rendered
if (this.imageModule) {
this.parent.renderer.adoptObjectRenderer(this.imageModule);
}
// Rasterize the entity, replacing the objects with the masked image
const rect = this.parent.transformer.getRelativeRect();
this.manager.stateApi.rasterizeEntity({
entityIdentifier: this.parent.entityIdentifier,
imageObject: imageState,
imageObject: filteredImageObjectState,
position: {
x: Math.round(rect.x),
y: Math.round(rect.y),
},
replaceObjects: true,
});
this.imageState = null;
this.unsubscribe();
this.$isFiltering.set(false);
this.$hasProcessed.set(false);
this.manager.stateApi.$filteringAdapter.set(null);
// Final cleanup and teardown, returning user to main canvas UI
this.resetEphemeralState();
this.teardown();
};
/**
* Saves the filtered image as a new entity of the given type.
* @param type The type of entity to save the filtered image as.
*/
saveAs = (type: Exclude<CanvasEntityType, 'reference_image'>) => {
const imageState = this.imageState;
const imageState = this.$imageState.get();
if (!imageState) {
this.log.warn('No image state to apply filter to');
return;
}
this.log.trace('Applying filter');
this.parent.bufferRenderer.commitBuffer();
this.log.trace(`Saving as ${type}`);
const rect = this.parent.transformer.getRelativeRect();
const arg = {
overrides: {
@@ -259,34 +387,53 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
assert<Equals<typeof type, never>>(false);
}
this.imageState = null;
// Final cleanup and teardown, returning user to main canvas UI
this.resetEphemeralState();
this.teardown();
};
resetEphemeralState = () => {
// First we need to bail out of any processing
if (this.abortController && !this.abortController.signal.aborted) {
this.abortController.abort();
}
this.abortController = null;
// If the image module exists, and is a child of the group, destroy it. It might not be a child of the group if
// the user has applied the filter and the image has been adopted by the parent entity.
if (this.imageModule && this.imageModule.konva.group.parent === this.konva.group) {
this.imageModule.destroy();
this.imageModule = null;
}
const initialFilterConfig = this.$initialFilterConfig.get() ?? this.createInitialFilterConfig();
this.$filterConfig.set(initialFilterConfig);
this.$imageState.set(null);
this.$lastProcessedHash.set('');
this.$isProcessing.set(false);
};
teardown = () => {
this.$initialFilterConfig.set(null);
this.konva.group.remove();
this.unsubscribe();
this.$isFiltering.set(false);
this.$hasProcessed.set(false);
this.manager.stateApi.$filteringAdapter.set(null);
};
/**
* Resets the module (e.g. remove all points and the mask image).
*
* Does not cancel or otherwise complete the segmenting process.
*/
reset = () => {
this.log.trace('Resetting filter');
this.abortController?.abort();
this.abortController = null;
this.parent.bufferRenderer.clearBuffer();
this.parent.transformer.updatePosition();
this.parent.renderer.syncKonvaCache(true);
this.imageState = null;
this.$hasProcessed.set(false);
this.log.trace('Resetting');
this.resetEphemeralState();
};
cancel = () => {
this.log.trace('Cancelling filter');
this.reset();
this.unsubscribe();
this.$isProcessing.set(false);
this.$isFiltering.set(false);
this.$hasProcessed.set(false);
this.manager.stateApi.$filteringAdapter.set(null);
this.log.trace('Canceling');
this.resetEphemeralState();
this.teardown();
};
repr = () => {
@@ -294,11 +441,14 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
id: this.id,
type: this.type,
path: this.path,
parent: this.parent.id,
config: this.config,
imageState: deepClone(this.$imageState.get()),
$isFiltering: this.$isFiltering.get(),
$hasProcessed: this.$hasProcessed.get(),
$lastProcessedHash: this.$lastProcessedHash.get(),
$isProcessing: this.$isProcessing.get(),
$filterConfig: this.$filterConfig.get(),
konva: { group: getKonvaNodeDebugAttrs(this.konva.group) },
};
};
@@ -309,5 +459,6 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
}
this.abortController = null;
this.unsubscribe();
this.konva.group.destroy();
};
}

View File

@@ -1,6 +1,7 @@
import { Mutex } from 'async-mutex';
import { deepClone } from 'common/util/deepClone';
import type { CanvasEntityBufferObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityBufferObjectRenderer';
import type { CanvasEntityFilterer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityFilterer';
import type { CanvasEntityObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
@@ -21,7 +22,8 @@ export class CanvasObjectImage extends CanvasModuleBase {
| CanvasEntityObjectRenderer
| CanvasEntityBufferObjectRenderer
| CanvasStagingAreaModule
| CanvasSegmentAnythingModule;
| CanvasSegmentAnythingModule
| CanvasEntityFilterer;
readonly manager: CanvasManager;
readonly log: Logger;
@@ -43,6 +45,7 @@ export class CanvasObjectImage extends CanvasModuleBase {
| CanvasEntityBufferObjectRenderer
| CanvasStagingAreaModule
| CanvasSegmentAnythingModule
| CanvasEntityFilterer
) {
super();
this.id = state.id;

View File

@@ -114,7 +114,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
subscriptions = new Set<() => void>();
/**
* The AbortController used to cancel the filter processing.
* The AbortController used to cancel the segment processing.
*/
abortController: AbortController | null = null;
@@ -178,16 +178,16 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
$invert = atom<boolean>(false);
/**
* The masked image object, if it exists.
* The masked image object module, if it exists.
*/
maskedImage: CanvasObjectImage | null = null;
imageModule: CanvasObjectImage | null = null;
/**
* The Konva nodes for the module.
*/
konva: {
/**
* The main Konva group node for the module.
* The main Konva group node for the module. This is added to the parent layer on start, and removed on teardown.
*/
group: Konva.Group;
/**
@@ -488,7 +488,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
};
/**
* Adds event listeners needed while segmenting the entity.
* Removes event listeners used while segmenting the entity.
*/
unsubscribe = () => {
this.subscriptions.forEach((unsubscribe) => unsubscribe());
@@ -607,18 +607,18 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
this.$imageState.set(imageState);
// Destroy any existing masked image and create a new one
if (this.maskedImage) {
this.maskedImage.destroy();
if (this.imageModule) {
this.imageModule.destroy();
}
if (this.konva.maskTween) {
this.konva.maskTween.destroy();
this.konva.maskTween = null;
}
this.maskedImage = new CanvasObjectImage(imageState, this);
this.imageModule = new CanvasObjectImage(imageState, this);
// Force update the masked image - after awaiting, the image will be rendered (in memory)
await this.maskedImage.update(imageState, true);
await this.imageModule.update(imageState, true);
// Update the compositing rect to match the image size
this.konva.compositingRect.setAttrs({
@@ -629,7 +629,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
// Now we can add the masked image to the mask group. It will be rendered above the compositing rect, but should be
// under it, so we will move the compositing rect to the top
this.konva.maskGroup.add(this.maskedImage.konva.group);
this.konva.maskGroup.add(this.imageModule.konva.group);
this.konva.compositingRect.moveToTop();
// Cache the group to ensure the mask is rendered correctly w/ opacity
@@ -666,7 +666,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
process = debounce(this.processImmediate, this.config.PROCESS_DEBOUNCE_MS);
/**
* Applies the segmented image to the entity.
* Applies the segmented image to the entity, replacing the entity's objects with the masked image.
*/
apply = () => {
const imageState = this.$imageState.get();
@@ -676,10 +676,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
}
this.log.trace('Applying');
// Commit the buffer, which will move the buffer to from the layers' buffer renderer to its main renderer
this.parent.bufferRenderer.commitBuffer();
// Rasterize the entity, this time replacing the objects with the masked image
// Rasterize the entity, replacing the objects with the masked image
const rect = this.parent.transformer.getRelativeRect();
this.manager.stateApi.rasterizeEntity({
entityIdentifier: this.parent.entityIdentifier,
@@ -697,7 +694,8 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
};
/**
* Applies the segmented image to the entity.
* Saves the segmented image as a new entity of the given type.
* @param type The type of entity to save the segmented image as.
*/
saveAs = (type: Exclude<CanvasEntityType, 'reference_image'>) => {
const imageState = this.$imageState.get();
@@ -707,8 +705,11 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
}
this.log.trace(`Saving as ${type}`);
// Clear the buffer - we are creating a new entity, so we don't want to keep the old one
this.parent.bufferRenderer.clearBuffer();
// Have the parent adopt the image module - this prevents a flash of the original layer content before the
// segmented image is rendered
if (this.imageModule) {
this.parent.renderer.adoptObjectRenderer(this.imageModule);
}
// Create the new entity with the masked image as its only object
const rect = this.parent.transformer.getRelativeRect();
@@ -801,8 +802,12 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
for (const point of this.$points.get()) {
point.konva.circle.destroy();
}
if (this.maskedImage) {
this.maskedImage.destroy();
// If the image module exists, and is a child of the group, destroy it. It might not be a child of the group if
// the user has applied the segmented image and the image has been adopted by the parent entity.
if (this.imageModule && this.imageModule.konva.group.parent === this.konva.group) {
this.imageModule.destroy();
this.imageModule = null;
}
if (this.konva.maskTween) {
this.konva.maskTween.destroy();
@@ -820,10 +825,6 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
// Reset non-ephemeral konva nodes
this.konva.compositingRect.visible(false);
this.konva.maskGroup.clearCache();
// The parent module's buffer should be reset & forcibly sync the cache
this.parent.bufferRenderer.clearBuffer();
this.parent.renderer.syncKonvaCache(true);
};
/**
@@ -888,7 +889,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
circle: getKonvaNodeDebugAttrs(konva.circle),
})),
imageState: deepClone(this.$imageState.get()),
maskedImage: this.maskedImage?.repr(),
imageModule: this.imageModule?.repr(),
config: deepClone(this.config),
$isSegmenting: this.$isSegmenting.get(),
$lastProcessedHash: this.$lastProcessedHash.get(),

View File

@@ -293,6 +293,8 @@ export class CanvasStateApiModule extends CanvasModuleBase {
},
};
let didSuceed = false;
/**
* If a timeout is provided, we will cancel the graph if it takes too long - but we need a way to clear the timeout
* if the graph completes or errors before the timeout.
@@ -344,6 +346,8 @@ export class CanvasStateApiModule extends CanvasModuleBase {
return;
}
didSuceed = true;
// Ok!
resolve(getImageDTOResult.value);
};
@@ -434,6 +438,10 @@ export class CanvasStateApiModule extends CanvasModuleBase {
if (timeout) {
timeoutId = window.setTimeout(() => {
if (didSuceed) {
// If we already succeeded, we don't need to do anything
return;
}
this.log.trace('Graph canceled by timeout');
clearListeners();
cancelGraph();
@@ -443,6 +451,10 @@ export class CanvasStateApiModule extends CanvasModuleBase {
if (signal) {
signal.addEventListener('abort', () => {
if (didSuceed) {
// If we already succeeded, we don't need to do anything
return;
}
this.log.trace('Graph canceled by signal');
_clearTimeout();
clearListeners();

View File

@@ -35,6 +35,8 @@ const ImageMetadataActions = (props: Props) => {
<MetadataItem metadata={metadata} handlers={handlers.cfgRescaleMultiplier} />
<MetadataItem metadata={metadata} handlers={handlers.guidance} />
{activeTabName !== 'canvas' && <MetadataItem metadata={metadata} handlers={handlers.strength} />}
<MetadataItem metadata={metadata} handlers={handlers.seamlessX} />
<MetadataItem metadata={metadata} handlers={handlers.seamlessY} />
<MetadataItem metadata={metadata} handlers={handlers.hrfEnabled} />
<MetadataItem metadata={metadata} handlers={handlers.hrfMethod} />
<MetadataItem metadata={metadata} handlers={handlers.hrfStrength} />

View File

@@ -3,6 +3,7 @@ import { useStore } from '@nanostores/react';
import { skipToken } from '@reduxjs/toolkit/query';
import { useAppSelector } from 'app/store/storeHooks';
import IAIDndImage from 'common/components/IAIDndImage';
import { CanvasAlertsSendingToCanvas } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSendingTo';
import type { TypesafeDraggableData } from 'features/dnd/types';
import ImageMetadataViewer from 'features/gallery/components/ImageMetadataViewer/ImageMetadataViewer';
import NextPrevImageButtons from 'features/gallery/components/NextPrevImageButtons';
@@ -73,6 +74,9 @@ const CurrentImagePreview = () => {
dataTestId="image-preview"
/>
)}
<Box position="absolute" top={0} insetInlineStart={0}>
<CanvasAlertsSendingToCanvas />
</Box>
{shouldShowImageDetails && imageDTO && (
<Box position="absolute" opacity={0.8} top={0} width="full" height="full" borderRadius="base">
<ImageMetadataViewer image={imageDTO} />

View File

@@ -2,7 +2,6 @@ import { Box, Flex, IconButton } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useFocusRegion } from 'common/hooks/focus';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { CanvasAlertsSendingToCanvas } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSendingTo';
import { CompareToolbar } from 'features/gallery/components/ImageViewer/CompareToolbar';
import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview';
import { ImageComparison } from 'features/gallery/components/ImageViewer/ImageComparison';
@@ -46,7 +45,7 @@ export const ImageViewer = memo(({ closeButton }: Props) => {
right={0}
bottom={0}
left={0}
rowGap={4}
rowGap={2}
alignItems="center"
justifyContent="center"
>
@@ -57,9 +56,6 @@ export const ImageViewer = memo(({ closeButton }: Props) => {
{hasImageToCompare && <ImageComparison containerDims={containerDims} />}
</Box>
<ImageComparisonDroppable />
<Box position="absolute" top={14} insetInlineStart={2}>
<CanvasAlertsSendingToCanvas />
</Box>
</Flex>
);
});

View File

@@ -199,6 +199,16 @@ export const handlers = {
parser: parsers.sdxlPositiveStylePrompt,
recaller: recallers.sdxlPositiveStylePrompt,
}),
seamlessX: buildHandlers({
getLabel: () => t('metadata.seamlessXAxis'),
parser: parsers.seamlessX,
recaller: recallers.seamlessX,
}),
seamlessY: buildHandlers({
getLabel: () => t('metadata.seamlessYAxis'),
parser: parsers.seamlessY,
recaller: recallers.seamlessY,
}),
seed: buildHandlers({ getLabel: () => t('metadata.seed'), parser: parsers.seed, recaller: recallers.seed }),
steps: buildHandlers({ getLabel: () => t('metadata.steps'), parser: parsers.steps, recaller: recallers.steps }),
strength: buildHandlers({

View File

@@ -41,6 +41,8 @@ import type {
ParameterSDXLRefinerNegativeAestheticScore,
ParameterSDXLRefinerPositiveAestheticScore,
ParameterSDXLRefinerStart,
ParameterSeamlessX,
ParameterSeamlessY,
ParameterSeed,
ParameterSteps,
ParameterStrength,
@@ -63,6 +65,8 @@ import {
isParameterSDXLRefinerNegativeAestheticScore,
isParameterSDXLRefinerPositiveAestheticScore,
isParameterSDXLRefinerStart,
isParameterSeamlessX,
isParameterSeamlessY,
isParameterSeed,
isParameterSteps,
isParameterStrength,
@@ -160,6 +164,12 @@ const parseSteps: MetadataParseFunc<ParameterSteps> = (metadata) => getProperty(
const parseStrength: MetadataParseFunc<ParameterStrength> = (metadata) =>
getProperty(metadata, 'strength', isParameterStrength);
const parseSeamlessX: MetadataParseFunc<ParameterSeamlessX> = (metadata) =>
getProperty(metadata, 'seamless_x', isParameterSeamlessX);
const parseSeamlessY: MetadataParseFunc<ParameterSeamlessY> = (metadata) =>
getProperty(metadata, 'seamless_y', isParameterSeamlessY);
const parseHRFEnabled: MetadataParseFunc<ParameterHRFEnabled> = async (metadata) => {
try {
return await getProperty(metadata, 'hrf_enabled', isParameterHRFEnabled);
@@ -647,6 +657,8 @@ export const parsers = {
height: parseHeight,
steps: parseSteps,
strength: parseStrength,
seamlessX: parseSeamlessX,
seamlessY: parseSeamlessY,
hrfEnabled: parseHRFEnabled,
hrfStrength: parseHRFStrength,
hrfMethod: parseHRFMethod,

View File

@@ -18,6 +18,8 @@ import {
setRefinerStart,
setRefinerSteps,
setScheduler,
setSeamlessXAxis,
setSeamlessYAxis,
setSeed,
setSteps,
t5EncoderModelSelected,
@@ -44,6 +46,8 @@ import type {
ParameterSDXLRefinerNegativeAestheticScore,
ParameterSDXLRefinerPositiveAestheticScore,
ParameterSDXLRefinerStart,
ParameterSeamlessX,
ParameterSeamlessY,
ParameterSeed,
ParameterSteps,
ParameterStrength,
@@ -106,6 +110,14 @@ const recallStrength: MetadataRecallFunc<ParameterStrength> = (strength) => {
getStore().dispatch(setImg2imgStrength(strength));
};
const recallSeamlessX: MetadataRecallFunc<ParameterSeamlessX> = (enabled) => {
getStore().dispatch(setSeamlessXAxis(enabled));
};
const recallSeamlessY: MetadataRecallFunc<ParameterSeamlessY> = (enabled) => {
getStore().dispatch(setSeamlessYAxis(enabled));
};
const recallHRFEnabled: MetadataRecallFunc<ParameterHRFEnabled> = (hrfEnabled) => {
getStore().dispatch(setHrfEnabled(hrfEnabled));
};
@@ -211,6 +223,8 @@ export const recallers = {
height: recallHeight,
steps: recallSteps,
strength: recallStrength,
seamlessX: recallSeamlessX,
seamlessY: recallSeamlessY,
hrfEnabled: recallHRFEnabled,
hrfStrength: recallHRFStrength,
hrfMethod: recallHRFMethod,

View File

@@ -6,18 +6,31 @@ import { addImageToLatents } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addImageToImage = async (
g: Graph,
manager: CanvasManager,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
denoise: Invocation<'denoise_latents' | 'flux_denoise'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
bbox: CanvasState['bbox'],
denoising_start: number,
fp32: boolean
): Promise<Invocation<'img_resize' | 'l2i' | 'flux_vae_decode'>> => {
type AddImageToImageArg = {
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
denoise: Invocation<'denoise_latents' | 'flux_denoise'>;
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>;
originalSize: Dimensions;
scaledSize: Dimensions;
bbox: CanvasState['bbox'];
denoising_start: number;
fp32: boolean;
};
export const addImageToImage = async ({
g,
manager,
l2i,
denoise,
vaeSource,
originalSize,
scaledSize,
bbox,
denoising_start,
fp32,
}: AddImageToImageArg): Promise<Invocation<'img_resize' | 'l2i' | 'flux_vae_decode'>> => {
denoise.denoising_start = denoising_start;
const { image_name } = await manager.compositor.getCompositeRasterLayerImageDTO(bbox.rect);

View File

@@ -10,19 +10,33 @@ import { addImageToLatents } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addInpaint = async (
state: RootState,
g: Graph,
manager: CanvasManager,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
denoise: Invocation<'denoise_latents' | 'flux_denoise'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
denoising_start: number,
fp32: boolean
): Promise<Invocation<'canvas_v2_mask_and_crop'>> => {
type AddInpaintArg = {
state: RootState;
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
denoise: Invocation<'denoise_latents' | 'flux_denoise'>;
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>;
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>;
originalSize: Dimensions;
scaledSize: Dimensions;
denoising_start: number;
fp32: boolean;
};
export const addInpaint = async ({
state,
g,
manager,
l2i,
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
denoising_start,
fp32,
}: AddInpaintArg): Promise<Invocation<'canvas_v2_mask_and_crop' | 'img_resize'>> => {
denoise.denoising_start = denoising_start;
const params = selectParamsSlice(state);
@@ -55,16 +69,6 @@ export const addInpaint = async (
type: 'img_resize',
...scaledSize,
});
const resizeImageToOriginalSize = g.addNode({
id: getPrefixedId('resize_image_to_original_size'),
type: 'img_resize',
...originalSize,
});
const resizeMaskToOriginalSize = g.addNode({
id: getPrefixedId('resize_mask_to_original_size'),
type: 'img_resize',
...originalSize,
});
const createGradientMask = g.addNode({
id: getPrefixedId('create_gradient_mask'),
type: 'create_gradient_mask',
@@ -78,6 +82,11 @@ export const addInpaint = async (
type: 'canvas_v2_mask_and_crop',
mask_blur: params.maskBlur,
});
const resizeOutput = g.addNode({
id: getPrefixedId('resize_output'),
type: 'img_resize',
...originalSize,
});
// Resize initial image and mask to scaled size, feed into to gradient mask
g.addEdge(alphaToMask, 'image', resizeMaskToScaledSize, 'image');
@@ -94,21 +103,20 @@ export const addInpaint = async (
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
// After denoising, resize the image and mask back to original size
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeMaskToOriginalSize, 'image');
// Paste the generated masked image back onto the original image
g.addEdge(l2i, 'image', canvasPasteBack, 'generated_image');
g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask');
// Finally, paste the generated masked image back onto the original image
g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'generated_image');
g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
// Finally, resize the output back to the original size
g.addEdge(canvasPasteBack, 'image', resizeOutput, 'image');
// Do the paste back if we are sending to gallery (in which case we want to see the full image), or if we are sending
// to canvas but not outputting only masked regions
if (!canvasSettings.sendToCanvas || !canvasSettings.outputOnlyMaskedRegions) {
canvasPasteBack.source_image = { image_name: initialImage.image_name };
g.addEdge(resizeImageToScaledSize, 'image', canvasPasteBack, 'source_image');
}
return canvasPasteBack;
return resizeOutput;
} else {
// No scale before processing, much simpler
const i2l = addImageToLatents(g, modelLoader.type === 'flux_model_loader', fp32, initialImage.image_name);

View File

@@ -10,19 +10,33 @@ import { addImageToLatents, getInfill } from 'features/nodes/util/graph/graphBui
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addOutpaint = async (
state: RootState,
g: Graph,
manager: CanvasManager,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
denoise: Invocation<'denoise_latents' | 'flux_denoise'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
denoising_start: number,
fp32: boolean
): Promise<Invocation<'canvas_v2_mask_and_crop'>> => {
type AddOutpaintArg = {
state: RootState;
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
denoise: Invocation<'denoise_latents' | 'flux_denoise'>;
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>;
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>;
originalSize: Dimensions;
scaledSize: Dimensions;
denoising_start: number;
fp32: boolean;
};
export const addOutpaint = async ({
state,
g,
manager,
l2i,
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
denoising_start,
fp32,
}: AddOutpaintArg): Promise<Invocation<'canvas_v2_mask_and_crop' | 'img_resize'>> => {
denoise.denoising_start = denoising_start;
const params = selectParamsSlice(state);
@@ -98,40 +112,33 @@ export const addOutpaint = async (
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(i2l, 'latents', denoise, 'latents');
// Resize the output image back to the original size
const resizeOutputImageToOriginalSize = g.addNode({
id: getPrefixedId('resize_image_to_original_size'),
type: 'img_resize',
...originalSize,
});
const resizeOutputMaskToOriginalSize = g.addNode({
id: getPrefixedId('resize_mask_to_original_size'),
type: 'img_resize',
...originalSize,
});
const canvasPasteBack = g.addNode({
id: getPrefixedId('canvas_v2_mask_and_crop'),
type: 'canvas_v2_mask_and_crop',
mask_blur: params.maskBlur,
});
const resizeOutput = g.addNode({
id: getPrefixedId('resize_output'),
type: 'img_resize',
...originalSize,
});
// Resize initial image and mask to scaled size, feed into to gradient mask
// After denoising, resize the image and mask back to original size
g.addEdge(l2i, 'image', resizeOutputImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeOutputMaskToOriginalSize, 'image');
// Paste the generated masked image back onto the original image
g.addEdge(l2i, 'image', canvasPasteBack, 'generated_image');
g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask');
// Finally, paste the generated masked image back onto the original image
g.addEdge(resizeOutputImageToOriginalSize, 'image', canvasPasteBack, 'generated_image');
g.addEdge(resizeOutputMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
// Finally, resize the output back to the original size
g.addEdge(canvasPasteBack, 'image', resizeOutput, 'image');
// Do the paste back if we are sending to gallery (in which case we want to see the full image), or if we are sending
// to canvas but not outputting only masked regions
if (!canvasSettings.sendToCanvas || !canvasSettings.outputOnlyMaskedRegions) {
canvasPasteBack.source_image = { image_name: initialImage.image_name };
g.addEdge(resizeInputImageToScaledSize, 'image', canvasPasteBack, 'source_image');
}
return canvasPasteBack;
return resizeOutput;
} else {
infill.image = { image_name: initialImage.image_name };
// No scale before processing, much simpler

View File

@@ -23,6 +23,12 @@ export const addSeamless = (
): Invocation<'seamless'> | null => {
const { seamlessXAxis: seamless_x, seamlessYAxis: seamless_y } = state.params;
// Always write seamless metadata to ensure recalling all parameters will reset the seamless settings
g.upsertMetadata({
seamless_x,
seamless_y,
});
if (!seamless_x && !seamless_y) {
return null;
}
@@ -34,11 +40,6 @@ export const addSeamless = (
seamless_y,
});
g.upsertMetadata({
seamless_x: seamless_x || undefined,
seamless_y: seamless_y || undefined,
});
// Seamless slots into the graph between the model loader and the denoise node
g.deleteEdgesFrom(modelLoader, ['unet']);
g.deleteEdgesFrom(modelLoader, ['vae']);

View File

@@ -4,12 +4,19 @@ import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addTextToImage = (
g: Graph,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
originalSize: Dimensions,
scaledSize: Dimensions
): Invocation<'img_resize' | 'l2i' | 'flux_vae_decode'> => {
type AddTextToImageArg = {
g: Graph;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
originalSize: Dimensions;
scaledSize: Dimensions;
};
export const addTextToImage = ({
g,
l2i,
originalSize,
scaledSize,
}: AddTextToImageArg): Invocation<'img_resize' | 'l2i' | 'flux_vae_decode'> => {
if (!isEqual(scaledSize, originalSize)) {
// We need to resize the output image back to the original size
const resizeImageToOriginalSize = g.addNode({

View File

@@ -14,9 +14,15 @@ import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import {
CANVAS_OUTPUT_PREFIX,
getBoardField,
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { addControlNets } from './addControlAdapters';
@@ -74,7 +80,7 @@ export const buildFLUXGraph = async (
prompt: positivePrompt,
});
const noise = g.addNode({
const denoise = g.addNode({
type: 'flux_denoise',
id: getPrefixedId('flux_denoise'),
guidance,
@@ -91,23 +97,19 @@ export const buildFLUXGraph = async (
id: getPrefixedId('flux_vae_decode'),
});
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
g.addEdge(modelLoader, 'transformer', noise, 'transformer');
g.addEdge(modelLoader, 'vae', noise, 'controlnet_vae');
g.addEdge(modelLoader, 'transformer', denoise, 'transformer');
g.addEdge(modelLoader, 'vae', denoise, 'controlnet_vae');
g.addEdge(modelLoader, 'vae', l2i, 'vae');
g.addEdge(modelLoader, 'clip', posCond, 'clip');
g.addEdge(modelLoader, 't5_encoder', posCond, 't5_encoder');
g.addEdge(modelLoader, 'max_seq_len', posCond, 't5_max_seq_len');
addFLUXLoRAs(state, g, noise, modelLoader, posCond);
addFLUXLoRAs(state, g, denoise, modelLoader, posCond);
g.addEdge(posCond, 'conditioning', noise, 'positive_text_conditioning');
g.addEdge(posCond, 'conditioning', denoise, 'positive_text_conditioning');
g.addEdge(noise, 'latents', l2i, 'latents');
g.addEdge(denoise, 'latents', l2i, 'latents');
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
assert(modelConfig.base === 'flux');
@@ -126,59 +128,65 @@ export const buildFLUXGraph = async (
clip_embed_model: clipEmbedModel,
});
let denoisingStart: number;
let denoising_start: number;
if (optimizedDenoisingEnabled) {
// We rescale the img2imgStrength (with exponent 0.2) to effectively use the entire range [0, 1] and make the scale
// more user-friendly for FLUX. Without this, most of the 'change' is concentrated in the high denoise strength
// range (>0.9).
denoisingStart = 1 - img2imgStrength ** 0.2;
denoising_start = 1 - img2imgStrength ** 0.2;
} else {
denoisingStart = 1 - img2imgStrength;
denoising_start = 1 - img2imgStrength;
}
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage(g, l2i, originalSize, scaledSize);
canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize });
} else if (generationMode === 'img2img') {
canvasOutput = await addImageToImage(
canvasOutput = await addImageToImage({
g,
manager,
l2i,
noise,
modelLoader,
denoise,
vaeSource: modelLoader,
originalSize,
scaledSize,
bbox,
denoisingStart,
false
);
denoising_start,
fp32: false,
});
} else if (generationMode === 'inpaint') {
canvasOutput = await addInpaint(
canvasOutput = await addInpaint({
state,
g,
manager,
l2i,
noise,
modelLoader,
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
denoisingStart,
false
);
denoising_start,
fp32: false,
});
} else if (generationMode === 'outpaint') {
canvasOutput = await addOutpaint(
canvasOutput = await addOutpaint({
state,
g,
manager,
l2i,
noise,
modelLoader,
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
denoisingStart,
false
);
denoising_start,
fp32: false,
});
} else {
assert<Equals<typeof generationMode, never>>(false);
}
const controlNetCollector = g.addNode({
@@ -194,7 +202,7 @@ export const buildFLUXGraph = async (
modelConfig.base
);
if (controlNetResult.addedControlNets > 0) {
g.addEdge(controlNetCollector, 'collection', noise, 'control');
g.addEdge(controlNetCollector, 'collection', denoise, 'control');
} else {
g.deleteNode(controlNetCollector.id);
}
@@ -221,14 +229,14 @@ export const buildFLUXGraph = async (
g.addEdge(modelLoader, 'clip', negCond, 'clip');
g.addEdge(modelLoader, 't5_encoder', negCond, 't5_encoder');
g.addEdge(modelLoader, 'max_seq_len', negCond, 't5_max_seq_len');
g.addEdge(negCond, 'conditioning', noise, 'negative_text_conditioning');
g.addEdge(negCond, 'conditioning', denoise, 'negative_text_conditioning');
g.updateNode(noise, {
g.updateNode(denoise, {
cfg_scale: 3,
cfg_scale_start_step,
cfg_scale_end_step,
});
g.addEdge(ipAdapterCollector, 'collection', noise, 'ip_adapter');
g.addEdge(ipAdapterCollector, 'collection', denoise, 'ip_adapter');
} else {
g.deleteNode(ipAdapterCollector.id);
}
@@ -250,12 +258,12 @@ export const buildFLUXGraph = async (
}
g.updateNode(canvasOutput, {
id: getPrefixedId('canvas_output'),
id: getPrefixedId(CANVAS_OUTPUT_PREFIX),
is_intermediate,
use_cache: false,
board,
});
g.setMetadataReceivingNode(canvasOutput);
return { g, noise, posCond };
return { g, noise: denoise, posCond };
};

View File

@@ -18,9 +18,15 @@ import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import {
CANVAS_OUTPUT_PREFIX,
getBoardField,
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { addRegions } from './addRegions';
@@ -120,10 +126,6 @@ export const buildSD1Graph = async (
})
: null;
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', clipSkip, 'clip');
g.addEdge(clipSkip, 'clip', posCond, 'clip');
@@ -165,10 +167,16 @@ export const buildSD1Graph = async (
> = seamless ?? vaeLoader ?? modelLoader;
g.addEdge(vaeSource, 'vae', l2i, 'vae');
const denoising_start = 1 - params.img2imgStrength;
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage(g, l2i, originalSize, scaledSize);
canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize });
} else if (generationMode === 'img2img') {
canvasOutput = await addImageToImage(
canvasOutput = await addImageToImage({
g,
manager,
l2i,
@@ -177,11 +185,11 @@ export const buildSD1Graph = async (
originalSize,
scaledSize,
bbox,
1 - params.img2imgStrength,
vaePrecision === 'fp32'
);
denoising_start,
fp32: vaePrecision === 'fp32',
});
} else if (generationMode === 'inpaint') {
canvasOutput = await addInpaint(
canvasOutput = await addInpaint({
state,
g,
manager,
@@ -191,11 +199,11 @@ export const buildSD1Graph = async (
modelLoader,
originalSize,
scaledSize,
1 - params.img2imgStrength,
vaePrecision === 'fp32'
);
denoising_start,
fp32: vaePrecision === 'fp32',
});
} else if (generationMode === 'outpaint') {
canvasOutput = await addOutpaint(
canvasOutput = await addOutpaint({
state,
g,
manager,
@@ -205,9 +213,11 @@ export const buildSD1Graph = async (
modelLoader,
originalSize,
scaledSize,
1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else {
assert<Equals<typeof generationMode, never>>(false);
}
const controlNetCollector = g.addNode({
@@ -291,7 +301,7 @@ export const buildSD1Graph = async (
}
g.updateNode(canvasOutput, {
id: getPrefixedId('canvas_output'),
id: getPrefixedId(CANVAS_OUTPUT_PREFIX),
is_intermediate,
use_cache: false,
board,

View File

@@ -18,9 +18,15 @@ import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import {
CANVAS_OUTPUT_PREFIX,
getBoardField,
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { addRegions } from './addRegions';
@@ -118,10 +124,6 @@ export const buildSDXLGraph = async (
})
: null;
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', posCond, 'clip');
g.addEdge(modelLoader, 'clip', negCond, 'clip');
@@ -168,10 +170,18 @@ export const buildSDXLGraph = async (
await addSDXLRefiner(state, g, denoise, seamless, posCond, negCond, l2i);
}
const denoising_start = refinerModel
? Math.min(refinerStart, 1 - params.img2imgStrength)
: 1 - params.img2imgStrength;
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage(g, l2i, originalSize, scaledSize);
canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize });
} else if (generationMode === 'img2img') {
canvasOutput = await addImageToImage(
canvasOutput = await addImageToImage({
g,
manager,
l2i,
@@ -180,11 +190,11 @@ export const buildSDXLGraph = async (
originalSize,
scaledSize,
bbox,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else if (generationMode === 'inpaint') {
canvasOutput = await addInpaint(
canvasOutput = await addInpaint({
state,
g,
manager,
@@ -194,11 +204,11 @@ export const buildSDXLGraph = async (
modelLoader,
originalSize,
scaledSize,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else if (generationMode === 'outpaint') {
canvasOutput = await addOutpaint(
canvasOutput = await addOutpaint({
state,
g,
manager,
@@ -208,9 +218,11 @@ export const buildSDXLGraph = async (
modelLoader,
originalSize,
scaledSize,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else {
assert<Equals<typeof generationMode, never>>(false);
}
const controlNetCollector = g.addNode({
@@ -294,7 +306,7 @@ export const buildSDXLGraph = async (
}
g.updateNode(canvasOutput, {
id: getPrefixedId('canvas_output'),
id: getPrefixedId(CANVAS_OUTPUT_PREFIX),
is_intermediate,
use_cache: false,
board,

View File

@@ -129,3 +129,5 @@ export const addImageToLatents = (g: Graph, isFlux: boolean, fp32: boolean, imag
return g.addNode({ id: 'i2l', type: 'i2l', fp32, image: image_name ? { image_name } : undefined });
}
};
export const CANVAS_OUTPUT_PREFIX = 'canvas_output';

View File

@@ -141,6 +141,20 @@ export const isParameterStrength = (val: unknown): val is ParameterStrength =>
zParameterStrength.safeParse(val).success;
// #endregion
// #region SeamlessX
const zParameterSeamlessX = z.boolean();
export type ParameterSeamlessX = z.infer<typeof zParameterSeamlessX>;
export const isParameterSeamlessX = (val: unknown): val is ParameterSeamlessX =>
zParameterSeamlessX.safeParse(val).success;
// #endregion
// #region SeamlessY
const zParameterSeamlessY = z.boolean();
export type ParameterSeamlessY = z.infer<typeof zParameterSeamlessY>;
export const isParameterSeamlessY = (val: unknown): val is ParameterSeamlessY =>
zParameterSeamlessY.safeParse(val).success;
// #endregion
// #region Precision
const zParameterPrecision = z.enum(['fp16', 'fp32']);
export type ParameterPrecision = z.infer<typeof zParameterPrecision>;

View File

@@ -21,14 +21,14 @@ import { useTranslation } from 'react-i18next';
import { PiLightbulbFilamentBold } from 'react-icons/pi';
import { useGetAppVersionQuery } from 'services/api/endpoints/appInfo';
import { CanvasV2Announcement } from './CanvasV2Announcement';
import { WhatsNew } from './WhatsNew';
const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
export const Notifications = () => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const shouldShowNotification = useAppSelector((s) => s.ui.shouldShowNotification);
const shouldShowNotification = useAppSelector((s) => s.ui.shouldShowNotificationV2);
const resetIndicator = useCallback(() => {
dispatch(shouldShowNotificationChanged(false));
}, [dispatch]);
@@ -58,11 +58,16 @@ export const Notifications = () => {
<Flex alignItems="center" gap={3}>
<Image src={InvokeSymbol} boxSize={6} />
{t('whatsNew.whatsNewInInvoke')}
{isLocal && <Text variant="subtext">{`v${data.version}`}</Text>}
{!!data.version.length &&
(isLocal ? (
<Text variant="subtext">{`v${data.version}`}</Text>
) : (
<Text variant="subtext">{data.version}</Text>
))}
</Flex>
</PopoverHeader>
<PopoverBody p={2}>
<CanvasV2Announcement />
<PopoverBody p={2} maxW={300}>
<WhatsNew />
</PopoverBody>
</PopoverContent>
</Popover>

View File

@@ -1,27 +1,34 @@
import { ExternalLink, Flex, ListItem, UnorderedList } from '@invoke-ai/ui-library';
import { ExternalLink, Flex, ListItem, Text, UnorderedList } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { selectConfigSlice } from 'features/system/store/configSlice';
import { useTranslation } from 'react-i18next';
import { Trans, useTranslation } from 'react-i18next';
const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
export const CanvasV2Announcement = () => {
export const WhatsNew = () => {
const { t } = useTranslation();
const isLocal = useAppSelector(selectIsLocal);
return (
<Flex gap={4} flexDir="column">
<UnorderedList fontSize="sm">
<ListItem>{t('whatsNew.canvasV2Announcement.newCanvas')}</ListItem>
<ListItem>{t('whatsNew.canvasV2Announcement.newLayerTypes')}</ListItem>
<ListItem>{t('whatsNew.canvasV2Announcement.fluxSupport')}</ListItem>
<ListItem>
<Trans
i18nKey="whatsNew.line1"
components={{
ItalicComponent: <Text as="span" color="white" fontSize="sm" fontStyle="italic" />,
}}
/>
</ListItem>
<ListItem>{t('whatsNew.line2')}</ListItem>
<ListItem>{t('whatsNew.line3')}</ListItem>
</UnorderedList>
<Flex flexDir="column" gap={1}>
<ExternalLink
fontSize="sm"
fontWeight="semibold"
label={t('whatsNew.canvasV2Announcement.readReleaseNotes')}
label={t('whatsNew.readReleaseNotes')}
href={
isLocal
? 'https://github.com/invoke-ai/InvokeAI/releases/tag/v5.0.0'
@@ -31,14 +38,8 @@ export const CanvasV2Announcement = () => {
<ExternalLink
fontSize="sm"
fontWeight="semibold"
label={t('whatsNew.canvasV2Announcement.watchReleaseVideo')}
href="https://www.youtube.com/watch?v=y80W3PjR0Gc"
/>
<ExternalLink
fontSize="sm"
fontWeight="semibold"
label={t('whatsNew.canvasV2Announcement.watchUiUpdatesOverview')}
href="https://www.youtube.com/watch?v=Tl-69JvwJ2s"
label={t('whatsNew.watchRecentReleaseVideos')}
href="https://www.youtube.com/@invokeai/videos"
/>
</Flex>
</Flex>

View File

@@ -15,7 +15,7 @@ const initialUIState: UIState = {
shouldShowProgressInViewer: true,
accordions: {},
expanders: {},
shouldShowNotification: true,
shouldShowNotificationV2: true,
};
export const uiSlice = createSlice({
@@ -43,7 +43,7 @@ export const uiSlice = createSlice({
state.expanders[id] = isOpen;
},
shouldShowNotificationChanged: (state, action: PayloadAction<boolean>) => {
state.shouldShowNotification = action.payload;
state.shouldShowNotificationV2 = action.payload;
},
},
extraReducers(builder) {

View File

@@ -31,7 +31,7 @@ export interface UIState {
*/
expanders: Record<string, boolean>;
/**
* Whether or not to show the user the open notification.
* Whether or not to show the user the open notification. Bump version to reset users who may have closed previous version.
*/
shouldShowNotification: boolean;
shouldShowNotificationV2: boolean;
}

View File

@@ -6,6 +6,7 @@ import { stagingAreaImageStaged } from 'features/controlLayers/store/canvasStagi
import { boardIdSelected, galleryViewChanged, imageSelected, offsetChanged } from 'features/gallery/store/gallerySlice';
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
import { zNodeStatus } from 'features/nodes/types/invocation';
import { CANVAS_OUTPUT_PREFIX } from 'features/nodes/util/graph/graphBuilderUtils';
import { boardsApi } from 'services/api/endpoints/boards';
import { getImageDTOSafe, imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO, S } from 'services/api/types';
@@ -15,7 +16,7 @@ import { $lastProgressEvent } from 'services/events/stores';
const log = logger('events');
const isCanvasOutputNode = (data: S['InvocationCompleteEvent']) => {
return data.invocation_source_id.split(':')[0] === 'canvas_output';
return data.invocation_source_id.split(':')[0] === CANVAS_OUTPUT_PREFIX;
};
const nodeTypeDenylist = ['load_image', 'image'];

View File

@@ -1 +1 @@
__version__ = "5.3.0"
__version__ = "5.4.0a1"

View File

@@ -33,12 +33,12 @@ classifiers = [
]
dependencies = [
# Core generation dependencies, pinned for reproducible builds.
"accelerate==0.30.1",
"accelerate==1.0.1",
"bitsandbytes==0.43.3; sys_platform!='darwin'",
"clip_anytorch==2.6.0", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
"compel==2.0.2",
"controlnet-aux==0.0.7",
"diffusers[torch]==0.27.2",
"diffusers[torch]==0.31.0",
"gguf==0.10.0",
"invisible-watermark==0.2.0", # needed to install SDXL base and refiner using their repo_ids
"mediapipe>=0.10.7", # needed for "mediapipeface" controlnet model
@@ -61,7 +61,7 @@ dependencies = [
# Core application dependencies, pinned for reproducible builds.
"fastapi-events==0.11.1",
"fastapi==0.111.0",
"huggingface-hub==0.23.1",
"huggingface-hub==0.26.1",
"pydantic-settings==2.2.1",
"pydantic==2.7.2",
"python-socketio==5.11.1",