Compare commits

..

19 Commits

Author SHA1 Message Date
Kevin Turner
e06024d8ed fix(upscale_sdx4): upgrade for v3.1 nodes API 2023-09-16 14:21:29 -07:00
Kevin Turner
caf52cfcce fix(upscale_sdx4): remove workaround for diffusers#4349 (fixed upstream) 2023-09-16 14:15:00 -07:00
Kevin Turner
16891401c8 Merge branch 'main' into feat/upscale4x 2023-09-16 13:55:17 -07:00
Kevin Turner
5dd9eb3e9a chore(upscale_sdx4): update for nodes #4114 2023-08-16 16:11:19 -07:00
Kevin Turner
825ff212fa Merge branch 'main' into feat/upscale4x 2023-08-16 15:06:57 -07:00
Kevin Turner
14bd61df9d Merge branch 'main' into feat/upscale4x 2023-08-12 12:44:18 -07:00
Kevin Turner
9b658fecd0 lint(upscale_sdx4): formatting 2023-08-07 13:15:06 -07:00
Kevin Turner
3b6a1737f4 Merge branch 'main' into feat/upscale4x 2023-08-07 13:12:17 -07:00
Kevin Turner
0e38f762ef Merge branch 'main' into feat/upscale4x 2023-08-01 20:23:39 -07:00
Kevin Turner
3d54beab67 refactor(upscale_sdx4): bug filed upstream 2023-07-28 11:56:32 -07:00
Kevin Turner
269de738fe stopgap(upscale_sdx4): temp kludge to allow loading upscaling model 2023-07-28 11:15:17 -07:00
Kevin Turner
34ecb06f57 refactor(upscale_sdx4): remove workaround for check_inputs bug
Fixed upstream by https://github.com/huggingface/diffusers/pull/4278 before I even got to reporting it — thank you!
2023-07-28 11:14:31 -07:00
Kevin Turner
b3e3f79158 Merge remote-tracking branch 'origin/main' into feat/upscale4x 2023-07-28 11:01:20 -07:00
Kevin Turner
68cea7e5ad Merge remote-tracking branch 'origin/main' into feat/upscale4x
# Conflicts:
#	invokeai/backend/model_management/model_probe.py
2023-07-27 13:36:55 -07:00
Kevin Turner
a63b08721d Merge branch 'main' into feat/upscale4x 2023-07-26 15:42:24 -07:00
Kevin Turner
9b7cb074e8 Merge remote-tracking branch 'origin/main' into feat/upscale4x 2023-07-25 21:52:04 -07:00
Kevin Turner
829721ba10 refactor(upscale_sdx4): streamlining a bit 2023-07-25 21:51:28 -07:00
Kevin Turner
5acb6af447 Merge branch 'main' into feat/upscale4x 2023-07-25 19:32:37 -07:00
Kevin Turner
19114dff3e feat: upscale with the Stable Diffusion x4 upscaler model. [proof of concept] 2023-07-25 18:54:24 -07:00
53 changed files with 527 additions and 1765 deletions

View File

@@ -9,7 +9,6 @@ from invokeai.app.services.boards import BoardService, BoardServiceDependencies
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.image_record_storage import SqliteImageRecordStorage
from invokeai.app.services.images import ImageService, ImageServiceDependencies
from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache
from invokeai.app.services.resource_name import SimpleNameService
from invokeai.app.services.urls import LocalUrlService
from invokeai.backend.util.logging import InvokeAILogger
@@ -127,7 +126,6 @@ class ApiDependencies:
configuration=config,
performance_statistics=InvocationStatsService(graph_execution_manager),
logger=logger,
invocation_cache=MemoryInvocationCache(max_cache_size=config.node_cache_size),
)
create_system_graphs(services.graph_library)

View File

@@ -1,7 +1,5 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache
from .services.config import InvokeAIAppConfig
# parse_args() must be called before any other imports. if it is not called first, consumers of the config
@@ -311,7 +309,6 @@ def invoke_cli():
performance_statistics=InvocationStatsService(graph_execution_manager),
logger=logger,
configuration=config,
invocation_cache=MemoryInvocationCache(max_cache_size=config.node_cache_size),
)
system_graphs = create_system_graphs(services.graph_library)

View File

@@ -568,24 +568,7 @@ class BaseInvocation(ABC, BaseModel):
raise RequiredConnectionException(self.__fields__["type"].default, field_name)
elif _input == Input.Any:
raise MissingInputException(self.__fields__["type"].default, field_name)
output: BaseInvocationOutput
if self.use_cache:
key = context.services.invocation_cache.create_key(self)
cached_value = context.services.invocation_cache.get(key)
if cached_value is None:
context.services.logger.debug(f'Invocation cache miss for type "{self.get_type()}": {self.id}')
output = self.invoke(context)
context.services.invocation_cache.save(key, output)
return output
else:
context.services.logger.debug(f'Invocation cache hit for type "{self.get_type()}": {self.id}')
return cached_value
else:
context.services.logger.debug(f'Skipping invocation cache for "{self.get_type()}": {self.id}')
return self.invoke(context)
def get_type(self) -> str:
return self.__fields__["type"].default
return self.invoke(context)
id: str = Field(
description="The id of this instance of an invocation. Must be unique among all instances of invocations."
@@ -598,7 +581,6 @@ class BaseInvocation(ABC, BaseModel):
description="The workflow to save with the image",
ui_type=UIType.WorkflowField,
)
use_cache: bool = InputField(default=True, description="Whether or not to use the cache")
@validator("workflow", pre=True)
def validate_workflow_is_json(cls, v):
@@ -622,7 +604,6 @@ def invocation(
tags: Optional[list[str]] = None,
category: Optional[str] = None,
version: Optional[str] = None,
use_cache: Optional[bool] = True,
) -> Callable[[Type[GenericBaseInvocation]], Type[GenericBaseInvocation]]:
"""
Adds metadata to an invocation.
@@ -655,8 +636,6 @@ def invocation(
except ValueError as e:
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
cls.UIConfig.version = version
if use_cache is not None:
cls.__fields__["use_cache"].default = use_cache
# Add the invocation type to the pydantic model of the invocation
invocation_type_annotation = Literal[invocation_type] # type: ignore

View File

@@ -56,7 +56,6 @@ class RangeOfSizeInvocation(BaseInvocation):
tags=["range", "integer", "random", "collection"],
category="collections",
version="1.0.0",
use_cache=False,
)
class RandomRangeInvocation(BaseInvocation):
"""Creates a collection of random numbers"""

View File

@@ -965,42 +965,3 @@ class ImageChannelMultiplyInvocation(BaseInvocation):
width=image_dto.width,
height=image_dto.height,
)
@invocation(
"save_image",
title="Save Image",
tags=["primitives", "image"],
category="primitives",
version="1.0.0",
use_cache=False,
)
class SaveImageInvocation(BaseInvocation):
"""Saves an image. Unlike an image primitive, this invocation stores a copy of the image."""
image: ImageField = InputField(description="The image to load")
metadata: CoreMetadata = InputField(
default=None,
description=FieldDescriptions.core_metadata,
ui_hidden=True,
)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
image_dto = context.services.images.create(
image=image,
image_origin=ResourceOrigin.INTERNAL,
image_category=ImageCategory.GENERAL,
node_id=self.id,
session_id=context.graph_execution_state_id,
is_intermediate=self.is_intermediate,
metadata=self.metadata.dict() if self.metadata else None,
workflow=self.workflow,
)
return ImageOutput(
image=ImageField(image_name=image_dto.image_name),
width=image_dto.width,
height=image_dto.height,
)

View File

@@ -54,14 +54,7 @@ class DivideInvocation(BaseInvocation):
return IntegerOutput(value=int(self.a / self.b))
@invocation(
"rand_int",
title="Random Integer",
tags=["math", "random"],
category="math",
version="1.0.0",
use_cache=False,
)
@invocation("rand_int", title="Random Integer", tags=["math", "random"], category="math", version="1.0.0")
class RandomIntInvocation(BaseInvocation):
"""Outputs a single random integer."""

View File

@@ -10,14 +10,7 @@ from invokeai.app.invocations.primitives import StringCollectionOutput
from .baseinvocation import BaseInvocation, InputField, InvocationContext, UIComponent, invocation
@invocation(
"dynamic_prompt",
title="Dynamic Prompt",
tags=["prompt", "collection"],
category="prompt",
version="1.0.0",
use_cache=False,
)
@invocation("dynamic_prompt", title="Dynamic Prompt", tags=["prompt", "collection"], category="prompt", version="1.0.0")
class DynamicPromptInvocation(BaseInvocation):
"""Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator"""

View File

@@ -0,0 +1,173 @@
from typing import List, Union
import torch
from diffusers import StableDiffusionUpscalePipeline
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
FieldDescriptions,
Input,
InputField,
InvocationContext,
UIType,
invocation,
)
from invokeai.app.invocations.image import ImageOutput
from invokeai.app.invocations.latent import SAMPLER_NAME_VALUES, get_scheduler
from invokeai.app.invocations.metadata import CoreMetadata
from invokeai.app.invocations.model import UNetField, VaeField
from invokeai.app.invocations.primitives import ConditioningField, ImageField
from invokeai.app.models.image import ImageCategory, ResourceOrigin
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend import BaseModelType
from invokeai.backend.stable_diffusion import ConditioningData, PipelineIntermediateState, PostprocessingSettings
@invocation("upscale_sdx4", title="Upscale (Stable Diffusion x4)", tags=["upscale"], version="0.1.0")
class UpscaleLatentsInvocation(BaseInvocation):
"""Upscales an image using an upscaling diffusion model.
https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler
The upscaling model is its own thing, independent of other Stable Diffusion text-to-image
models. We don't have ControlNet or LoRA support for it. It has its own VAE.
"""
# Inputs
image: ImageField = InputField(description="The image to upscale")
positive_conditioning: ConditioningField = InputField(
description=FieldDescriptions.positive_cond, input=Input.Connection
)
negative_conditioning: ConditioningField = InputField(
description=FieldDescriptions.negative_cond, input=Input.Connection
)
steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps)
cfg_scale: Union[float, List[float]] = InputField(
default=7.5, ge=1, description=FieldDescriptions.cfg_scale, ui_type=UIType.Float
)
scheduler: SAMPLER_NAME_VALUES = InputField(default="euler", description=FieldDescriptions.scheduler)
seed: int = InputField(default=0, description=FieldDescriptions.seed)
unet: UNetField = InputField(description=FieldDescriptions.unet, input=Input.Connection)
vae: VaeField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
metadata: CoreMetadata = InputField(default=None, description=FieldDescriptions.core_metadata)
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled)
@torch.inference_mode()
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
model_manager = context.services.model_manager
unet_info = model_manager.get_model(**self.unet.unet.dict(), context=context)
vae_info = model_manager.get_model(**self.vae.vae.dict(), context=context)
with unet_info as unet, vae_info as vae:
# don't re-use the same scheduler instance for both fields
low_res_scheduler = get_scheduler(context, self.unet.scheduler, self.scheduler, self.seed ^ 0xFFFFFFFF)
scheduler = get_scheduler(context, self.unet.scheduler, self.scheduler, self.seed ^ 0xF7F7F7F7)
conditioning_data = self.get_conditioning_data(context, scheduler, unet, self.seed)
pipeline = StableDiffusionUpscalePipeline(
vae=vae,
text_encoder=None,
tokenizer=None,
unet=unet,
low_res_scheduler=low_res_scheduler,
scheduler=scheduler,
)
if self.tiled or context.services.configuration.tiled_decode:
vae.enable_tiling()
else:
vae.disable_tiling()
generator = torch.Generator().manual_seed(self.seed)
output = pipeline(
image=image,
# latents=noise,
num_inference_steps=self.steps,
guidance_scale=self.cfg_scale,
# noise_level =
generator=generator,
prompt_embeds=conditioning_data.text_embeddings.embeds.data,
negative_prompt_embeds=conditioning_data.unconditioned_embeddings.embeds.data,
output_type="pil",
callback=lambda *args: self.dispatch_upscale_progress(context, *args),
)
result_image = output.images[0]
image_dto = context.services.images.create(
image=result_image,
image_origin=ResourceOrigin.INTERNAL,
image_category=ImageCategory.GENERAL,
node_id=self.id,
session_id=context.graph_execution_state_id,
is_intermediate=self.is_intermediate,
metadata=self.metadata.dict() if self.metadata else None,
workflow=self.workflow,
)
return ImageOutput(
image=ImageField(image_name=image_dto.image_name),
width=image_dto.width,
height=image_dto.height,
)
def get_conditioning_data(
self,
context: InvocationContext,
scheduler,
unet,
seed,
) -> ConditioningData:
# FIXME: duplicated from DenoiseLatentsInvocation.get_conditoning_data
positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name)
c = positive_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype)
extra_conditioning_info = c.extra_conditioning
negative_cond_data = context.services.latents.get(self.negative_conditioning.conditioning_name)
uc = negative_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype)
conditioning_data = ConditioningData(
unconditioned_embeddings=uc,
text_embeddings=c,
guidance_scale=self.cfg_scale,
extra=extra_conditioning_info,
postprocessing_settings=PostprocessingSettings(
threshold=0.0, # threshold,
warmup=0.2, # warmup,
h_symmetry_time_pct=None, # h_symmetry_time_pct,
v_symmetry_time_pct=None, # v_symmetry_time_pct,
),
)
conditioning_data = conditioning_data.add_scheduler_args_if_applicable(
scheduler,
# for ddim scheduler
eta=0.0, # ddim_eta
# for ancestral and sde schedulers
# FIXME: why do we need both a generator here and a seed argument to get_scheduler?
generator=torch.Generator(device=unet.device).manual_seed(seed ^ 0xFFFFFFFF),
)
return conditioning_data
def dispatch_upscale_progress(self, context, step, timestep, latents):
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
intermediate_state = PipelineIntermediateState(
step=step,
order=1, # FIXME: fudging this, but why does it need both order and total-steps anyway?
total_steps=self.steps,
timestep=timestep,
latents=latents,
)
stable_diffusion_step_callback(
context=context,
intermediate_state=intermediate_state,
node=self.dict(),
source_node_id=source_node_id,
base_model=BaseModelType.StableDiffusionXLRefiner, # FIXME: this upscaler needs its own model type
)

View File

@@ -253,7 +253,6 @@ class InvokeAIAppConfig(InvokeAISettings):
attention_type : Literal[tuple(["auto", "normal", "xformers", "sliced", "torch-sdp"])] = Field(default="auto", description="Attention type", category="Generation", )
attention_slice_size: Literal[tuple(["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8])] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', category="Generation", )
force_tiled_decode: bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",)
node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", category="Generation", )
# NODES
allow_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.", category="Nodes")

View File

@@ -1,29 +0,0 @@
from abc import ABC, abstractmethod
from typing import Optional, Union
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput
class InvocationCacheBase(ABC):
"""Base class for invocation caches."""
@abstractmethod
def get(self, key: Union[int, str]) -> Optional[BaseInvocationOutput]:
"""Retrieves and invocation output from the cache"""
pass
@abstractmethod
def save(self, key: Union[int, str], value: BaseInvocationOutput) -> None:
"""Stores an invocation output in the cache"""
pass
@abstractmethod
def delete(self, key: Union[int, str]) -> None:
"""Deleted an invocation output from the cache"""
pass
@classmethod
@abstractmethod
def create_key(cls, value: BaseInvocation) -> Union[int, str]:
"""Creates the cache key for an invocation"""
pass

View File

@@ -1,34 +0,0 @@
from queue import Queue
from typing import Optional, Union
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput
from invokeai.app.services.invocation_cache.invocation_cache_base import InvocationCacheBase
class MemoryInvocationCache(InvocationCacheBase):
__cache: dict[Union[int, str], BaseInvocationOutput]
__max_cache_size: int
__cache_ids: Queue
def __init__(self, max_cache_size: int = 512) -> None:
self.__cache = dict()
self.__max_cache_size = max_cache_size
self.__cache_ids = Queue()
def get(self, key: Union[int, str]) -> Optional[BaseInvocationOutput]:
return self.__cache.get(key, None)
def save(self, key: Union[int, str], value: BaseInvocationOutput) -> None:
if key not in self.__cache:
self.__cache[key] = value
self.__cache_ids.put(key)
if self.__cache_ids.qsize() > self.__max_cache_size:
self.__cache.pop(self.__cache_ids.get())
def delete(self, key: Union[int, str]) -> None:
if key in self.__cache:
del self.__cache[key]
@classmethod
def create_key(cls, value: BaseInvocation) -> Union[int, str]:
return hash(value.json(exclude={"id"}))

View File

@@ -12,7 +12,6 @@ if TYPE_CHECKING:
from invokeai.app.services.events import EventServiceBase
from invokeai.app.services.graph import GraphExecutionState, LibraryGraph
from invokeai.app.services.images import ImageServiceABC
from invokeai.app.services.invocation_cache.invocation_cache_base import InvocationCacheBase
from invokeai.app.services.invocation_queue import InvocationQueueABC
from invokeai.app.services.invocation_stats import InvocationStatsServiceBase
from invokeai.app.services.invoker import InvocationProcessorABC
@@ -38,7 +37,6 @@ class InvocationServices:
processor: "InvocationProcessorABC"
performance_statistics: "InvocationStatsServiceBase"
queue: "InvocationQueueABC"
invocation_cache: "InvocationCacheBase"
def __init__(
self,
@@ -55,7 +53,6 @@ class InvocationServices:
processor: "InvocationProcessorABC",
performance_statistics: "InvocationStatsServiceBase",
queue: "InvocationQueueABC",
invocation_cache: "InvocationCacheBase",
):
self.board_images = board_images
self.boards = boards
@@ -71,4 +68,3 @@ class InvocationServices:
self.processor = processor
self.performance_statistics = performance_statistics
self.queue = queue
self.invocation_cache = invocation_cache

View File

@@ -47,6 +47,7 @@ class ModelProbe(object):
CLASS2TYPE = {
"StableDiffusionPipeline": ModelType.Main,
"StableDiffusionInpaintPipeline": ModelType.Main,
"StableDiffusionUpscalePipeline": ModelType.Main,
"StableDiffusionXLPipeline": ModelType.Main,
"StableDiffusionXLImg2ImgPipeline": ModelType.Main,
"StableDiffusionXLInpaintPipeline": ModelType.Main,

View File

@@ -194,6 +194,8 @@ class StableDiffusion2Model(DiffusersModel):
variant = ModelVariantType.Depth
elif in_channels == 4:
variant = ModelVariantType.Normal
elif in_channels == 7:
variant = ModelVariantType.Normal # FIXME: temp kludge for 4x upscaler
else:
raise Exception("Unkown stable diffusion 2.* model format")

View File

@@ -1,5 +0,0 @@
import { Store } from '@reduxjs/toolkit';
import { atom } from 'nanostores';
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export const $store = atom<Store<any> | undefined>();

View File

@@ -31,7 +31,6 @@ import { actionSanitizer } from './middleware/devtools/actionSanitizer';
import { actionsDenylist } from './middleware/devtools/actionsDenylist';
import { stateSanitizer } from './middleware/devtools/stateSanitizer';
import { listenerMiddleware } from './middleware/listenerMiddleware';
import { $store } from './nanostores/store';
const allReducers = {
canvas: canvasReducer,
@@ -122,4 +121,3 @@ export type RootState = ReturnType<typeof store.getState>;
export type AppThunkDispatch = ThunkDispatch<RootState, any, AnyAction>;
export type AppDispatch = typeof store.dispatch;
export const stateSelector = (state: RootState) => state;
$store.set(store);

View File

@@ -1,9 +1,8 @@
import { CoreMetadata, LoRAMetadataItem } from 'features/nodes/types/types';
import { CoreMetadata } from 'features/nodes/types/types';
import { useRecallParameters } from 'features/parameters/hooks/useRecallParameters';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { isValidLoRAModel } from '../../../parameters/types/parameterSchemas';
import ImageMetadataItem from './ImageMetadataItem';
import { useTranslation } from 'react-i18next';
type Props = {
metadata?: CoreMetadata;
@@ -25,7 +24,6 @@ const ImageMetadataActions = (props: Props) => {
recallWidth,
recallHeight,
recallStrength,
recallLoRA,
} = useRecallParameters();
const handleRecallPositivePrompt = useCallback(() => {
@@ -68,13 +66,6 @@ const ImageMetadataActions = (props: Props) => {
recallStrength(metadata?.strength);
}, [metadata?.strength, recallStrength]);
const handleRecallLoRA = useCallback(
(lora: LoRAMetadataItem) => {
recallLoRA(lora);
},
[recallLoRA]
);
if (!metadata || Object.keys(metadata).length === 0) {
return null;
}
@@ -139,6 +130,20 @@ const ImageMetadataActions = (props: Props) => {
onClick={handleRecallHeight}
/>
)}
{/* {metadata.threshold !== undefined && (
<MetadataItem
label={t('metadata.threshold')}
value={metadata.threshold}
onClick={() => dispatch(setThreshold(Number(metadata.threshold)))}
/>
)}
{metadata.perlin !== undefined && (
<MetadataItem
label={t('metadata.perlin')}
value={metadata.perlin}
onClick={() => dispatch(setPerlin(Number(metadata.perlin)))}
/>
)} */}
{metadata.scheduler && (
<ImageMetadataItem
label={t('metadata.scheduler')}
@@ -160,6 +165,40 @@ const ImageMetadataActions = (props: Props) => {
onClick={handleRecallCfgScale}
/>
)}
{/* {metadata.variations && metadata.variations.length > 0 && (
<MetadataItem
label="{t('metadata.variations')}
value={seedWeightsToString(metadata.variations)}
onClick={() =>
dispatch(
setSeedWeights(seedWeightsToString(metadata.variations))
)
}
/>
)}
{metadata.seamless && (
<MetadataItem
label={t('metadata.seamless')}
value={metadata.seamless}
onClick={() => dispatch(setSeamless(metadata.seamless))}
/>
)}
{metadata.hires_fix && (
<MetadataItem
label={t('metadata.hiresFix')}
value={metadata.hires_fix}
onClick={() => dispatch(setHiresFix(metadata.hires_fix))}
/>
)} */}
{/* {init_image_path && (
<MetadataItem
label={t('metadata.initImage')}
value={init_image_path}
isLink
onClick={() => dispatch(setInitialImage(init_image_path))}
/>
)} */}
{metadata.strength && (
<ImageMetadataItem
label={t('metadata.strength')}
@@ -167,19 +206,13 @@ const ImageMetadataActions = (props: Props) => {
onClick={handleRecallStrength}
/>
)}
{metadata.loras &&
metadata.loras.map((lora, index) => {
if (isValidLoRAModel(lora.lora)) {
return (
<ImageMetadataItem
key={index}
label="LoRA"
value={`${lora.lora.model_name} - ${lora.weight}`}
onClick={() => handleRecallLoRA(lora)}
/>
);
}
})}
{/* {metadata.fit && (
<MetadataItem
label={t('metadata.fit')}
value={metadata.fit}
onClick={() => dispatch(setShouldFitToWidthHeight(metadata.fit))}
/>
)} */}
</>
);
};

View File

@@ -27,13 +27,6 @@ export const loraSlice = createSlice({
const { model_name, id, base_model } = action.payload;
state.loras[id] = { id, model_name, base_model, ...defaultLoRAConfig };
},
loraRecalled: (
state,
action: PayloadAction<LoRAModelConfigEntity & { weight: number }>
) => {
const { model_name, id, base_model, weight } = action.payload;
state.loras[id] = { id, model_name, base_model, weight };
},
loraRemoved: (state, action: PayloadAction<string>) => {
const id = action.payload;
delete state.loras[id];
@@ -69,7 +62,6 @@ export const {
loraWeightChanged,
loraWeightReset,
lorasCleared,
loraRecalled,
} = loraSlice.actions;
export default loraSlice.reducer;

View File

@@ -27,7 +27,7 @@ const EmbedWorkflowCheckbox = ({ nodeId }: { nodeId: string }) => {
return (
<FormControl as={Flex} sx={{ alignItems: 'center', gap: 2, w: 'auto' }}>
<FormLabel sx={{ fontSize: 'xs', mb: '1px' }}>Workflow</FormLabel>
<FormLabel sx={{ fontSize: 'xs', mb: '1px' }}>Embed Workflow</FormLabel>
<Checkbox
className="nopan"
size="sm"

View File

@@ -1,13 +1,14 @@
import { Flex, Grid, GridItem } from '@chakra-ui/react';
import { useAnyOrDirectInputFieldNames } from 'features/nodes/hooks/useAnyOrDirectInputFieldNames';
import { useConnectionInputFieldNames } from 'features/nodes/hooks/useConnectionInputFieldNames';
import { useOutputFieldNames } from 'features/nodes/hooks/useOutputFieldNames';
import { memo } from 'react';
import NodeWrapper from '../common/NodeWrapper';
import InvocationNodeFooter from './InvocationNodeFooter';
import InvocationNodeHeader from './InvocationNodeHeader';
import InputField from './fields/InputField';
import NodeWrapper from '../common/NodeWrapper';
import OutputField from './fields/OutputField';
import InputField from './fields/InputField';
import { useOutputFieldNames } from 'features/nodes/hooks/useOutputFieldNames';
import { useWithFooter } from 'features/nodes/hooks/useWithFooter';
import { useConnectionInputFieldNames } from 'features/nodes/hooks/useConnectionInputFieldNames';
import { useAnyOrDirectInputFieldNames } from 'features/nodes/hooks/useAnyOrDirectInputFieldNames';
type Props = {
nodeId: string;
@@ -21,6 +22,7 @@ const InvocationNode = ({ nodeId, isOpen, label, type, selected }: Props) => {
const inputConnectionFieldNames = useConnectionInputFieldNames(nodeId);
const inputAnyOrDirectFieldNames = useAnyOrDirectInputFieldNames(nodeId);
const outputFieldNames = useOutputFieldNames(nodeId);
const withFooter = useWithFooter(nodeId);
return (
<NodeWrapper nodeId={nodeId} selected={selected}>
@@ -41,7 +43,7 @@ const InvocationNode = ({ nodeId, isOpen, label, type, selected }: Props) => {
h: 'full',
py: 2,
gap: 1,
borderBottomRadius: 0,
borderBottomRadius: withFooter ? 0 : 'base',
}}
>
<Flex sx={{ flexDir: 'column', px: 2, w: 'full', h: 'full' }}>
@@ -74,7 +76,7 @@ const InvocationNode = ({ nodeId, isOpen, label, type, selected }: Props) => {
))}
</Flex>
</Flex>
<InvocationNodeFooter nodeId={nodeId} />
{withFooter && <InvocationNodeFooter nodeId={nodeId} />}
</>
)}
</NodeWrapper>

View File

@@ -3,15 +3,12 @@ import { DRAG_HANDLE_CLASSNAME } from 'features/nodes/types/constants';
import { memo } from 'react';
import EmbedWorkflowCheckbox from './EmbedWorkflowCheckbox';
import SaveToGalleryCheckbox from './SaveToGalleryCheckbox';
import UseCacheCheckbox from './UseCacheCheckbox';
import { useHasImageOutput } from 'features/nodes/hooks/useHasImageOutput';
type Props = {
nodeId: string;
};
const InvocationNodeFooter = ({ nodeId }: Props) => {
const hasImageOutput = useHasImageOutput(nodeId);
return (
<Flex
className={DRAG_HANDLE_CLASSNAME}
@@ -25,9 +22,8 @@ const InvocationNodeFooter = ({ nodeId }: Props) => {
justifyContent: 'space-between',
}}
>
{hasImageOutput && <EmbedWorkflowCheckbox nodeId={nodeId} />}
<UseCacheCheckbox nodeId={nodeId} />
{hasImageOutput && <SaveToGalleryCheckbox nodeId={nodeId} />}
<EmbedWorkflowCheckbox nodeId={nodeId} />
<SaveToGalleryCheckbox nodeId={nodeId} />
</Flex>
);
};

View File

@@ -1,35 +0,0 @@
import { Checkbox, Flex, FormControl, FormLabel } from '@chakra-ui/react';
import { useAppDispatch } from 'app/store/storeHooks';
import { useUseCache } from 'features/nodes/hooks/useUseCache';
import { nodeUseCacheChanged } from 'features/nodes/store/nodesSlice';
import { ChangeEvent, memo, useCallback } from 'react';
const UseCacheCheckbox = ({ nodeId }: { nodeId: string }) => {
const dispatch = useAppDispatch();
const useCache = useUseCache(nodeId);
const handleChange = useCallback(
(e: ChangeEvent<HTMLInputElement>) => {
dispatch(
nodeUseCacheChanged({
nodeId,
useCache: e.target.checked,
})
);
},
[dispatch, nodeId]
);
return (
<FormControl as={Flex} sx={{ alignItems: 'center', gap: 2, w: 'auto' }}>
<FormLabel sx={{ fontSize: 'xs', mb: '1px' }}>Use Cache</FormLabel>
<Checkbox
className="nopan"
size="sm"
onChange={handleChange}
isChecked={useCache}
/>
</FormControl>
);
};
export default memo(UseCacheCheckbox);

View File

@@ -146,7 +146,6 @@ export const useBuildNodeData = () => {
isIntermediate: true,
inputs,
outputs,
useCache: template.useCache,
},
};

View File

@@ -1,29 +0,0 @@
import { createSelector } from '@reduxjs/toolkit';
import { stateSelector } from 'app/store/store';
import { useAppSelector } from 'app/store/storeHooks';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import { useMemo } from 'react';
import { isInvocationNode } from '../types/types';
export const useUseCache = (nodeId: string) => {
const selector = useMemo(
() =>
createSelector(
stateSelector,
({ nodes }) => {
const node = nodes.nodes.find((node) => node.id === nodeId);
if (!isInvocationNode(node)) {
return false;
}
// cast to boolean to support older workflows that didn't have useCache
// TODO: handle this better somehow
return node.data.useCache;
},
defaultSelectorOptions
),
[nodeId]
);
const useCache = useAppSelector(selector);
return useCache;
};

View File

@@ -7,7 +7,7 @@ import { useMemo } from 'react';
import { FOOTER_FIELDS } from '../types/constants';
import { isInvocationNode } from '../types/types';
export const useHasImageOutputs = (nodeId: string) => {
export const useWithFooter = (nodeId: string) => {
const selector = useMemo(
() =>
createSelector(

View File

@@ -260,20 +260,6 @@ const nodesSlice = createSlice({
}
node.data.embedWorkflow = embedWorkflow;
},
nodeUseCacheChanged: (
state,
action: PayloadAction<{ nodeId: string; useCache: boolean }>
) => {
const { nodeId, useCache } = action.payload;
const nodeIndex = state.nodes.findIndex((n) => n.id === nodeId);
const node = state.nodes?.[nodeIndex];
if (!isInvocationNode(node)) {
return;
}
node.data.useCache = useCache;
},
nodeIsIntermediateChanged: (
state,
action: PayloadAction<{ nodeId: string; isIntermediate: boolean }>
@@ -918,7 +904,6 @@ export const {
nodeIsIntermediateChanged,
mouseOverNodeChanged,
nodeExclusivelySelected,
nodeUseCacheChanged,
} = nodesSlice.actions;
export default nodesSlice.reducer;

View File

@@ -1,4 +1,3 @@
import { $store } from 'app/store/nanostores/store';
import {
SchedulerParam,
zBaseModel,
@@ -8,8 +7,7 @@ import {
zSDXLRefinerModel,
zScheduler,
} from 'features/parameters/types/parameterSchemas';
import i18n from 'i18next';
import { has, keyBy } from 'lodash-es';
import { keyBy } from 'lodash-es';
import { OpenAPIV3 } from 'openapi-types';
import { RgbaColor } from 'react-colorful';
import { Node } from 'reactflow';
@@ -22,6 +20,7 @@ import {
import { O } from 'ts-toolbelt';
import { JsonObject } from 'type-fest';
import { z } from 'zod';
import i18n from 'i18next';
export type NonNullableGraph = O.Required<Graph, 'nodes' | 'edges'>;
@@ -58,10 +57,6 @@ export type InvocationTemplate = {
* The invocation's version.
*/
version?: string;
/**
* Whether or not this node should use the cache
*/
useCache: boolean;
};
export type FieldUIConfig = {
@@ -981,9 +976,6 @@ export type InvocationSchemaExtra = {
type: Omit<OpenAPIV3.SchemaObject, 'default'> & {
default: AnyInvocationType;
};
use_cache: Omit<OpenAPIV3.SchemaObject, 'default'> & {
default: boolean;
};
};
};
@@ -1065,13 +1057,6 @@ export const isInvocationFieldSchema = (
export type InvocationEdgeExtra = { type: 'default' | 'collapsed' };
const zLoRAMetadataItem = z.object({
lora: zLoRAModelField.deepPartial(),
weight: z.number(),
});
export type LoRAMetadataItem = z.infer<typeof zLoRAMetadataItem>;
export const zCoreMetadata = z
.object({
app_version: z.string().nullish(),
@@ -1091,7 +1076,14 @@ export const zCoreMetadata = z
.union([zMainModel.deepPartial(), zOnnxModel.deepPartial()])
.nullish(),
controlnets: z.array(zControlField.deepPartial()).nullish(),
loras: z.array(zLoRAMetadataItem).nullish(),
loras: z
.array(
z.object({
lora: zLoRAModelField.deepPartial(),
weight: z.number(),
})
)
.nullish(),
vae: zVaeModelField.nullish(),
strength: z.number().nullish(),
init_image: z.string().nullish(),
@@ -1147,37 +1139,9 @@ export const zInvocationNodeData = z.object({
version: zSemVer.optional(),
});
export const zInvocationNodeDataV2 = z.preprocess(
(arg) => {
try {
const data = zInvocationNodeData.parse(arg);
if (!has(data, 'useCache')) {
const nodeTemplates = $store.get()?.getState().nodes.nodeTemplates as
| Record<string, InvocationTemplate>
| undefined;
const template = nodeTemplates?.[data.type];
let useCache = true;
if (template) {
useCache = template.useCache;
}
Object.assign(data, { useCache });
}
return data;
} catch {
return arg;
}
},
zInvocationNodeData.extend({
useCache: z.boolean(),
})
);
// Massage this to get better type safety while developing
export type InvocationNodeData = Omit<
z.infer<typeof zInvocationNodeDataV2>,
z.infer<typeof zInvocationNodeData>,
'type'
> & {
type: AnyInvocationType;
@@ -1205,7 +1169,7 @@ const zDimension = z.number().gt(0).nullish();
export const zWorkflowInvocationNode = z.object({
id: z.string().trim().min(1),
type: z.literal('invocation'),
data: zInvocationNodeDataV2,
data: zInvocationNodeData,
width: zDimension,
height: zDimension,
position: zPosition,
@@ -1267,8 +1231,6 @@ export type WorkflowWarning = {
data: JsonObject;
};
const CURRENT_WORKFLOW_VERSION = '1.0.0';
export const zWorkflow = z.object({
name: z.string().default(''),
author: z.string().default(''),
@@ -1284,7 +1246,7 @@ export const zWorkflow = z.object({
.object({
version: zSemVer,
})
.default({ version: CURRENT_WORKFLOW_VERSION }),
.default({ version: '1.0.0' }),
});
export const zValidatedWorkflow = zWorkflow.transform((workflow) => {

View File

@@ -1,32 +1,46 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import {
ImageNSFWBlurInvocation,
LatentsToImageInvocation,
MetadataAccumulatorInvocation,
} from 'services/api/types';
import { LATENTS_TO_IMAGE, NSFW_CHECKER } from './constants';
import {
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NSFW_CHECKER,
} from './constants';
export const addNSFWCheckerToGraph = (
state: RootState,
graph: NonNullableGraph,
nodeIdToAddTo = LATENTS_TO_IMAGE
): void => {
const activeTabName = activeTabNameSelector(state);
const is_intermediate =
activeTabName === 'unifiedCanvas' ? !state.canvas.shouldAutoSave : false;
const nodeToAddTo = graph.nodes[nodeIdToAddTo] as
| LatentsToImageInvocation
| undefined;
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (!nodeToAddTo) {
// something has gone terribly awry
return;
}
nodeToAddTo.is_intermediate = true;
nodeToAddTo.use_cache = true;
const nsfwCheckerNode: ImageNSFWBlurInvocation = {
id: NSFW_CHECKER,
type: 'img_nsfw',
is_intermediate: true,
is_intermediate,
};
graph.nodes[NSFW_CHECKER] = nsfwCheckerNode as ImageNSFWBlurInvocation;
@@ -40,4 +54,17 @@ export const addNSFWCheckerToGraph = (
field: 'image',
},
});
if (metadataAccumulator) {
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: NSFW_CHECKER,
field: 'metadata',
},
});
}
};

View File

@@ -1,92 +0,0 @@
import { NonNullableGraph } from 'features/nodes/types/types';
import {
CANVAS_OUTPUT,
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NSFW_CHECKER,
SAVE_IMAGE,
WATERMARKER,
} from './constants';
import {
MetadataAccumulatorInvocation,
SaveImageInvocation,
} from 'services/api/types';
import { RootState } from 'app/store/store';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
/**
* Set the `use_cache` field on the linear/canvas graph's final image output node to False.
*/
export const addSaveImageNode = (
state: RootState,
graph: NonNullableGraph
): void => {
const activeTabName = activeTabNameSelector(state);
const is_intermediate =
activeTabName === 'unifiedCanvas' ? !state.canvas.shouldAutoSave : false;
const saveImageNode: SaveImageInvocation = {
id: SAVE_IMAGE,
type: 'save_image',
is_intermediate,
use_cache: false,
};
graph.nodes[SAVE_IMAGE] = saveImageNode;
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (metadataAccumulator) {
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: SAVE_IMAGE,
field: 'metadata',
},
});
}
const destination = {
node_id: SAVE_IMAGE,
field: 'image',
};
if (WATERMARKER in graph.nodes) {
graph.edges.push({
source: {
node_id: WATERMARKER,
field: 'image',
},
destination,
});
} else if (NSFW_CHECKER in graph.nodes) {
graph.edges.push({
source: {
node_id: NSFW_CHECKER,
field: 'image',
},
destination,
});
} else if (CANVAS_OUTPUT in graph.nodes) {
graph.edges.push({
source: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
destination,
});
} else if (LATENTS_TO_IMAGE in graph.nodes) {
graph.edges.push({
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination,
});
}
};

View File

@@ -51,7 +51,6 @@ export const addWatermarkerToGraph = (
// no matter the situation, we want the l2i node to be intermediate
nodeToAddTo.is_intermediate = true;
nodeToAddTo.use_cache = true;
if (nsfwCheckerNode) {
// if we are using NSFW checker, we need to "disable" it output by marking it intermediate,

View File

@@ -25,7 +25,6 @@ import {
POSITIVE_CONDITIONING,
SEAMLESS,
} from './constants';
import { addSaveImageNode } from './addSaveImageNode';
/**
* Builds the Canvas tab's Image to Image graph.
@@ -54,10 +53,14 @@ export const buildCanvasImageToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
@@ -89,31 +92,31 @@ export const buildCanvasImageToImageGraph = (
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: modelLoaderNodeId,
is_intermediate,
is_intermediate: true,
model,
},
[CLIP_SKIP]: {
type: 'clip_skip',
id: CLIP_SKIP,
is_intermediate,
is_intermediate: true,
skipped_layers: clipSkip,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
is_intermediate,
is_intermediate: true,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
is_intermediate,
is_intermediate: true,
prompt: negativePrompt,
},
[NOISE]: {
type: 'noise',
id: NOISE,
is_intermediate,
is_intermediate: true,
use_cpu,
width: !isUsingScaledDimensions
? width
@@ -125,12 +128,12 @@ export const buildCanvasImageToImageGraph = (
[IMAGE_TO_LATENTS]: {
type: 'i2l',
id: IMAGE_TO_LATENTS,
is_intermediate,
is_intermediate: true,
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
id: DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
cfg_scale,
scheduler,
steps,
@@ -140,7 +143,7 @@ export const buildCanvasImageToImageGraph = (
[CANVAS_OUTPUT]: {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate,
is_intermediate: !shouldAutoSave,
},
},
edges: [
@@ -235,7 +238,7 @@ export const buildCanvasImageToImageGraph = (
graph.nodes[IMG2IMG_RESIZE] = {
id: IMG2IMG_RESIZE,
type: 'img_resize',
is_intermediate,
is_intermediate: true,
image: initialImage,
width: scaledBoundingBoxDimensions.width,
height: scaledBoundingBoxDimensions.height,
@@ -243,13 +246,13 @@ export const buildCanvasImageToImageGraph = (
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
is_intermediate,
is_intermediate: true,
fp32,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate,
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
@@ -290,7 +293,7 @@ export const buildCanvasImageToImageGraph = (
graph.nodes[CANVAS_OUTPUT] = {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate,
is_intermediate: !shouldAutoSave,
fp32,
};
@@ -334,6 +337,17 @@ export const buildCanvasImageToImageGraph = (
init_image: initialImage.image_name,
};
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'metadata',
},
});
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
@@ -363,7 +377,5 @@ export const buildCanvasImageToImageGraph = (
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -44,7 +44,6 @@ import {
RANGE_OF_SIZE,
SEAMLESS,
} from './constants';
import { addSaveImageNode } from './addSaveImageNode';
/**
* Builds the Canvas tab's Inpaint graph.
@@ -88,8 +87,12 @@ export const buildCanvasInpaintGraph = (
const { width, height } = state.canvas.boundingBoxDimensions;
// We may need to set the inpaint width and height to scale the image
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const is_intermediate = true;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const fp32 = vaePrecision === 'fp32';
const isUsingScaledDimensions = ['auto', 'manual'].includes(
@@ -108,56 +111,56 @@ export const buildCanvasInpaintGraph = (
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: modelLoaderNodeId,
is_intermediate,
is_intermediate: true,
model,
},
[CLIP_SKIP]: {
type: 'clip_skip',
id: CLIP_SKIP,
is_intermediate,
is_intermediate: true,
skipped_layers: clipSkip,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
is_intermediate,
is_intermediate: true,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
is_intermediate,
is_intermediate: true,
prompt: negativePrompt,
},
[MASK_BLUR]: {
type: 'img_blur',
id: MASK_BLUR,
is_intermediate,
is_intermediate: true,
radius: maskBlur,
blur_type: maskBlurMethod,
},
[INPAINT_IMAGE]: {
type: 'i2l',
id: INPAINT_IMAGE,
is_intermediate,
is_intermediate: true,
fp32,
},
[NOISE]: {
type: 'noise',
id: NOISE,
use_cpu,
is_intermediate,
is_intermediate: true,
},
[INPAINT_CREATE_MASK]: {
type: 'create_denoise_mask',
id: INPAINT_CREATE_MASK,
is_intermediate,
is_intermediate: true,
fp32,
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
id: DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
steps: steps,
cfg_scale: cfg_scale,
scheduler: scheduler,
@@ -168,18 +171,18 @@ export const buildCanvasInpaintGraph = (
type: 'noise',
id: NOISE,
use_cpu,
is_intermediate,
is_intermediate: true,
},
[CANVAS_COHERENCE_NOISE_INCREMENT]: {
type: 'add',
id: CANVAS_COHERENCE_NOISE_INCREMENT,
b: 1,
is_intermediate,
is_intermediate: true,
},
[CANVAS_COHERENCE_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: CANVAS_COHERENCE_DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
steps: canvasCoherenceSteps,
cfg_scale: cfg_scale,
scheduler: scheduler,
@@ -189,19 +192,19 @@ export const buildCanvasInpaintGraph = (
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
is_intermediate,
is_intermediate: true,
fp32,
},
[CANVAS_OUTPUT]: {
type: 'color_correct',
id: CANVAS_OUTPUT,
is_intermediate,
is_intermediate: !shouldAutoSave,
reference: canvasInitImage,
},
[RANGE_OF_SIZE]: {
type: 'range_of_size',
id: RANGE_OF_SIZE,
is_intermediate,
is_intermediate: true,
// seed - must be connected manually
// start: 0,
size: iterations,
@@ -210,7 +213,7 @@ export const buildCanvasInpaintGraph = (
[ITERATE]: {
type: 'iterate',
id: ITERATE,
is_intermediate,
is_intermediate: true,
},
},
edges: [
@@ -433,7 +436,7 @@ export const buildCanvasInpaintGraph = (
graph.nodes[INPAINT_IMAGE_RESIZE_UP] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_UP,
is_intermediate,
is_intermediate: true,
width: scaledWidth,
height: scaledHeight,
image: canvasInitImage,
@@ -441,7 +444,7 @@ export const buildCanvasInpaintGraph = (
graph.nodes[MASK_RESIZE_UP] = {
type: 'img_resize',
id: MASK_RESIZE_UP,
is_intermediate,
is_intermediate: true,
width: scaledWidth,
height: scaledHeight,
image: canvasMaskImage,
@@ -449,14 +452,14 @@ export const buildCanvasInpaintGraph = (
graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_DOWN,
is_intermediate,
is_intermediate: true,
width: width,
height: height,
};
graph.nodes[MASK_RESIZE_DOWN] = {
type: 'img_resize',
id: MASK_RESIZE_DOWN,
is_intermediate,
is_intermediate: true,
width: width,
height: height,
};
@@ -594,7 +597,7 @@ export const buildCanvasInpaintGraph = (
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
type: 'create_denoise_mask',
id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
is_intermediate,
is_intermediate: true,
fp32,
};
@@ -647,7 +650,7 @@ export const buildCanvasInpaintGraph = (
graph.nodes[CANVAS_COHERENCE_MASK_EDGE] = {
type: 'mask_edge',
id: CANVAS_COHERENCE_MASK_EDGE,
is_intermediate,
is_intermediate: true,
edge_blur: maskBlur,
edge_size: maskBlur * 2,
low_threshold: 100,
@@ -744,7 +747,5 @@ export const buildCanvasInpaintGraph = (
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -46,7 +46,6 @@ import {
RANGE_OF_SIZE,
SEAMLESS,
} from './constants';
import { addSaveImageNode } from './addSaveImageNode';
/**
* Builds the Canvas tab's Outpaint graph.
@@ -92,10 +91,14 @@ export const buildCanvasOutpaintGraph = (
const { width, height } = state.canvas.boundingBoxDimensions;
// We may need to set the inpaint width and height to scale the image
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
@@ -112,61 +115,61 @@ export const buildCanvasOutpaintGraph = (
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: modelLoaderNodeId,
is_intermediate,
is_intermediate: true,
model,
},
[CLIP_SKIP]: {
type: 'clip_skip',
id: CLIP_SKIP,
is_intermediate,
is_intermediate: true,
skipped_layers: clipSkip,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
is_intermediate,
is_intermediate: true,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
is_intermediate,
is_intermediate: true,
prompt: negativePrompt,
},
[MASK_FROM_ALPHA]: {
type: 'tomask',
id: MASK_FROM_ALPHA,
is_intermediate,
is_intermediate: true,
image: canvasInitImage,
},
[MASK_COMBINE]: {
type: 'mask_combine',
id: MASK_COMBINE,
is_intermediate,
is_intermediate: true,
mask2: canvasMaskImage,
},
[INPAINT_IMAGE]: {
type: 'i2l',
id: INPAINT_IMAGE,
is_intermediate,
is_intermediate: true,
fp32,
},
[NOISE]: {
type: 'noise',
id: NOISE,
use_cpu,
is_intermediate,
is_intermediate: true,
},
[INPAINT_CREATE_MASK]: {
type: 'create_denoise_mask',
id: INPAINT_CREATE_MASK,
is_intermediate,
is_intermediate: true,
fp32,
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
id: DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
steps: steps,
cfg_scale: cfg_scale,
scheduler: scheduler,
@@ -177,18 +180,18 @@ export const buildCanvasOutpaintGraph = (
type: 'noise',
id: NOISE,
use_cpu,
is_intermediate,
is_intermediate: true,
},
[CANVAS_COHERENCE_NOISE_INCREMENT]: {
type: 'add',
id: CANVAS_COHERENCE_NOISE_INCREMENT,
b: 1,
is_intermediate,
is_intermediate: true,
},
[CANVAS_COHERENCE_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: CANVAS_COHERENCE_DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
steps: canvasCoherenceSteps,
cfg_scale: cfg_scale,
scheduler: scheduler,
@@ -198,18 +201,18 @@ export const buildCanvasOutpaintGraph = (
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
is_intermediate,
is_intermediate: true,
fp32,
},
[CANVAS_OUTPUT]: {
type: 'color_correct',
id: CANVAS_OUTPUT,
is_intermediate,
is_intermediate: !shouldAutoSave,
},
[RANGE_OF_SIZE]: {
type: 'range_of_size',
id: RANGE_OF_SIZE,
is_intermediate,
is_intermediate: true,
// seed - must be connected manually
// start: 0,
size: iterations,
@@ -218,7 +221,7 @@ export const buildCanvasOutpaintGraph = (
[ITERATE]: {
type: 'iterate',
id: ITERATE,
is_intermediate,
is_intermediate: true,
},
},
edges: [
@@ -469,7 +472,7 @@ export const buildCanvasOutpaintGraph = (
graph.nodes[INPAINT_INFILL] = {
type: 'infill_patchmatch',
id: INPAINT_INFILL,
is_intermediate,
is_intermediate: true,
downscale: infillPatchmatchDownscaleSize,
};
}
@@ -478,7 +481,7 @@ export const buildCanvasOutpaintGraph = (
graph.nodes[INPAINT_INFILL] = {
type: 'infill_lama',
id: INPAINT_INFILL,
is_intermediate,
is_intermediate: true,
};
}
@@ -486,7 +489,7 @@ export const buildCanvasOutpaintGraph = (
graph.nodes[INPAINT_INFILL] = {
type: 'infill_cv2',
id: INPAINT_INFILL,
is_intermediate,
is_intermediate: true,
};
}
@@ -494,7 +497,7 @@ export const buildCanvasOutpaintGraph = (
graph.nodes[INPAINT_INFILL] = {
type: 'infill_tile',
id: INPAINT_INFILL,
is_intermediate,
is_intermediate: true,
tile_size: infillTileSize,
};
}
@@ -508,7 +511,7 @@ export const buildCanvasOutpaintGraph = (
graph.nodes[INPAINT_IMAGE_RESIZE_UP] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_UP,
is_intermediate,
is_intermediate: true,
width: scaledWidth,
height: scaledHeight,
image: canvasInitImage,
@@ -516,28 +519,28 @@ export const buildCanvasOutpaintGraph = (
graph.nodes[MASK_RESIZE_UP] = {
type: 'img_resize',
id: MASK_RESIZE_UP,
is_intermediate,
is_intermediate: true,
width: scaledWidth,
height: scaledHeight,
};
graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_DOWN,
is_intermediate,
is_intermediate: true,
width: width,
height: height,
};
graph.nodes[INPAINT_INFILL_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_INFILL_RESIZE_DOWN,
is_intermediate,
is_intermediate: true,
width: width,
height: height,
};
graph.nodes[MASK_RESIZE_DOWN] = {
type: 'img_resize',
id: MASK_RESIZE_DOWN,
is_intermediate,
is_intermediate: true,
width: width,
height: height,
};
@@ -696,7 +699,7 @@ export const buildCanvasOutpaintGraph = (
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
type: 'create_denoise_mask',
id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
is_intermediate,
is_intermediate: true,
fp32,
};
@@ -743,7 +746,7 @@ export const buildCanvasOutpaintGraph = (
graph.nodes[CANVAS_COHERENCE_MASK_EDGE] = {
type: 'mask_edge',
id: CANVAS_COHERENCE_MASK_EDGE,
is_intermediate,
is_intermediate: true,
edge_blur: maskBlur,
edge_size: maskBlur * 2,
low_threshold: 100,
@@ -846,7 +849,5 @@ export const buildCanvasOutpaintGraph = (
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -27,7 +27,6 @@ import {
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
import { addSaveImageNode } from './addSaveImageNode';
/**
* Builds the Canvas tab's Image to Image graph.
@@ -62,10 +61,14 @@ export const buildCanvasSDXLImageToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
@@ -119,7 +122,7 @@ export const buildCanvasSDXLImageToImageGraph = (
[NOISE]: {
type: 'noise',
id: NOISE,
is_intermediate,
is_intermediate: true,
use_cpu,
width: !isUsingScaledDimensions
? width
@@ -131,13 +134,13 @@ export const buildCanvasSDXLImageToImageGraph = (
[IMAGE_TO_LATENTS]: {
type: 'i2l',
id: IMAGE_TO_LATENTS,
is_intermediate,
is_intermediate: true,
fp32,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
cfg_scale,
scheduler,
steps,
@@ -248,7 +251,7 @@ export const buildCanvasSDXLImageToImageGraph = (
graph.nodes[IMG2IMG_RESIZE] = {
id: IMG2IMG_RESIZE,
type: 'img_resize',
is_intermediate,
is_intermediate: true,
image: initialImage,
width: scaledBoundingBoxDimensions.width,
height: scaledBoundingBoxDimensions.height,
@@ -256,13 +259,13 @@ export const buildCanvasSDXLImageToImageGraph = (
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
is_intermediate,
is_intermediate: true,
fp32,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate,
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
@@ -303,7 +306,7 @@ export const buildCanvasSDXLImageToImageGraph = (
graph.nodes[CANVAS_OUTPUT] = {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate,
is_intermediate: !shouldAutoSave,
fp32,
};
@@ -400,7 +403,5 @@ export const buildCanvasSDXLImageToImageGraph = (
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -46,7 +46,6 @@ import {
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
import { addSaveImageNode } from './addSaveImageNode';
/**
* Builds the Canvas tab's Inpaint graph.
@@ -95,10 +94,14 @@ export const buildCanvasSDXLInpaintGraph = (
const { width, height } = state.canvas.boundingBoxDimensions;
// We may need to set the inpaint width and height to scale the image
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
@@ -136,32 +139,32 @@ export const buildCanvasSDXLInpaintGraph = (
[MASK_BLUR]: {
type: 'img_blur',
id: MASK_BLUR,
is_intermediate,
is_intermediate: true,
radius: maskBlur,
blur_type: maskBlurMethod,
},
[INPAINT_IMAGE]: {
type: 'i2l',
id: INPAINT_IMAGE,
is_intermediate,
is_intermediate: true,
fp32,
},
[NOISE]: {
type: 'noise',
id: NOISE,
use_cpu,
is_intermediate,
is_intermediate: true,
},
[INPAINT_CREATE_MASK]: {
type: 'create_denoise_mask',
id: INPAINT_CREATE_MASK,
is_intermediate,
is_intermediate: true,
fp32,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
steps: steps,
cfg_scale: cfg_scale,
scheduler: scheduler,
@@ -174,18 +177,18 @@ export const buildCanvasSDXLInpaintGraph = (
type: 'noise',
id: NOISE,
use_cpu,
is_intermediate,
is_intermediate: true,
},
[CANVAS_COHERENCE_NOISE_INCREMENT]: {
type: 'add',
id: CANVAS_COHERENCE_NOISE_INCREMENT,
b: 1,
is_intermediate,
is_intermediate: true,
},
[CANVAS_COHERENCE_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: CANVAS_COHERENCE_DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
steps: canvasCoherenceSteps,
cfg_scale: cfg_scale,
scheduler: scheduler,
@@ -195,19 +198,19 @@ export const buildCanvasSDXLInpaintGraph = (
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
is_intermediate,
is_intermediate: true,
fp32,
},
[CANVAS_OUTPUT]: {
type: 'color_correct',
id: CANVAS_OUTPUT,
is_intermediate,
is_intermediate: !shouldAutoSave,
reference: canvasInitImage,
},
[RANGE_OF_SIZE]: {
type: 'range_of_size',
id: RANGE_OF_SIZE,
is_intermediate,
is_intermediate: true,
// seed - must be connected manually
// start: 0,
size: iterations,
@@ -216,7 +219,7 @@ export const buildCanvasSDXLInpaintGraph = (
[ITERATE]: {
type: 'iterate',
id: ITERATE,
is_intermediate,
is_intermediate: true,
},
},
edges: [
@@ -448,7 +451,7 @@ export const buildCanvasSDXLInpaintGraph = (
graph.nodes[INPAINT_IMAGE_RESIZE_UP] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_UP,
is_intermediate,
is_intermediate: true,
width: scaledWidth,
height: scaledHeight,
image: canvasInitImage,
@@ -456,7 +459,7 @@ export const buildCanvasSDXLInpaintGraph = (
graph.nodes[MASK_RESIZE_UP] = {
type: 'img_resize',
id: MASK_RESIZE_UP,
is_intermediate,
is_intermediate: true,
width: scaledWidth,
height: scaledHeight,
image: canvasMaskImage,
@@ -464,14 +467,14 @@ export const buildCanvasSDXLInpaintGraph = (
graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_DOWN,
is_intermediate,
is_intermediate: true,
width: width,
height: height,
};
graph.nodes[MASK_RESIZE_DOWN] = {
type: 'img_resize',
id: MASK_RESIZE_DOWN,
is_intermediate,
is_intermediate: true,
width: width,
height: height,
};
@@ -609,7 +612,7 @@ export const buildCanvasSDXLInpaintGraph = (
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
type: 'create_denoise_mask',
id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
is_intermediate,
is_intermediate: true,
fp32,
};
@@ -662,7 +665,7 @@ export const buildCanvasSDXLInpaintGraph = (
graph.nodes[CANVAS_COHERENCE_MASK_EDGE] = {
type: 'mask_edge',
id: CANVAS_COHERENCE_MASK_EDGE,
is_intermediate,
is_intermediate: true,
edge_blur: maskBlur,
edge_size: maskBlur * 2,
low_threshold: 100,
@@ -773,7 +776,5 @@ export const buildCanvasSDXLInpaintGraph = (
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -48,7 +48,6 @@ import {
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
import { addSaveImageNode } from './addSaveImageNode';
/**
* Builds the Canvas tab's Outpaint graph.
@@ -99,10 +98,14 @@ export const buildCanvasSDXLOutpaintGraph = (
const { width, height } = state.canvas.boundingBoxDimensions;
// We may need to set the inpaint width and height to scale the image
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
@@ -140,37 +143,37 @@ export const buildCanvasSDXLOutpaintGraph = (
[MASK_FROM_ALPHA]: {
type: 'tomask',
id: MASK_FROM_ALPHA,
is_intermediate,
is_intermediate: true,
image: canvasInitImage,
},
[MASK_COMBINE]: {
type: 'mask_combine',
id: MASK_COMBINE,
is_intermediate,
is_intermediate: true,
mask2: canvasMaskImage,
},
[INPAINT_IMAGE]: {
type: 'i2l',
id: INPAINT_IMAGE,
is_intermediate,
is_intermediate: true,
fp32,
},
[NOISE]: {
type: 'noise',
id: NOISE,
use_cpu,
is_intermediate,
is_intermediate: true,
},
[INPAINT_CREATE_MASK]: {
type: 'create_denoise_mask',
id: INPAINT_CREATE_MASK,
is_intermediate,
is_intermediate: true,
fp32,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
steps: steps,
cfg_scale: cfg_scale,
scheduler: scheduler,
@@ -183,18 +186,18 @@ export const buildCanvasSDXLOutpaintGraph = (
type: 'noise',
id: NOISE,
use_cpu,
is_intermediate,
is_intermediate: true,
},
[CANVAS_COHERENCE_NOISE_INCREMENT]: {
type: 'add',
id: CANVAS_COHERENCE_NOISE_INCREMENT,
b: 1,
is_intermediate,
is_intermediate: true,
},
[CANVAS_COHERENCE_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: CANVAS_COHERENCE_DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
steps: canvasCoherenceSteps,
cfg_scale: cfg_scale,
scheduler: scheduler,
@@ -204,18 +207,18 @@ export const buildCanvasSDXLOutpaintGraph = (
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
is_intermediate,
is_intermediate: true,
fp32,
},
[CANVAS_OUTPUT]: {
type: 'color_correct',
id: CANVAS_OUTPUT,
is_intermediate,
is_intermediate: !shouldAutoSave,
},
[RANGE_OF_SIZE]: {
type: 'range_of_size',
id: RANGE_OF_SIZE,
is_intermediate,
is_intermediate: true,
// seed - must be connected manually
// start: 0,
size: iterations,
@@ -224,7 +227,7 @@ export const buildCanvasSDXLOutpaintGraph = (
[ITERATE]: {
type: 'iterate',
id: ITERATE,
is_intermediate,
is_intermediate: true,
},
},
edges: [
@@ -484,7 +487,7 @@ export const buildCanvasSDXLOutpaintGraph = (
graph.nodes[INPAINT_INFILL] = {
type: 'infill_patchmatch',
id: INPAINT_INFILL,
is_intermediate,
is_intermediate: true,
downscale: infillPatchmatchDownscaleSize,
};
}
@@ -493,7 +496,7 @@ export const buildCanvasSDXLOutpaintGraph = (
graph.nodes[INPAINT_INFILL] = {
type: 'infill_lama',
id: INPAINT_INFILL,
is_intermediate,
is_intermediate: true,
};
}
@@ -501,7 +504,7 @@ export const buildCanvasSDXLOutpaintGraph = (
graph.nodes[INPAINT_INFILL] = {
type: 'infill_cv2',
id: INPAINT_INFILL,
is_intermediate,
is_intermediate: true,
};
}
@@ -509,7 +512,7 @@ export const buildCanvasSDXLOutpaintGraph = (
graph.nodes[INPAINT_INFILL] = {
type: 'infill_tile',
id: INPAINT_INFILL,
is_intermediate,
is_intermediate: true,
tile_size: infillTileSize,
};
}
@@ -523,7 +526,7 @@ export const buildCanvasSDXLOutpaintGraph = (
graph.nodes[INPAINT_IMAGE_RESIZE_UP] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_UP,
is_intermediate,
is_intermediate: true,
width: scaledWidth,
height: scaledHeight,
image: canvasInitImage,
@@ -531,28 +534,28 @@ export const buildCanvasSDXLOutpaintGraph = (
graph.nodes[MASK_RESIZE_UP] = {
type: 'img_resize',
id: MASK_RESIZE_UP,
is_intermediate,
is_intermediate: true,
width: scaledWidth,
height: scaledHeight,
};
graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_DOWN,
is_intermediate,
is_intermediate: true,
width: width,
height: height,
};
graph.nodes[INPAINT_INFILL_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_INFILL_RESIZE_DOWN,
is_intermediate,
is_intermediate: true,
width: width,
height: height,
};
graph.nodes[MASK_RESIZE_DOWN] = {
type: 'img_resize',
id: MASK_RESIZE_DOWN,
is_intermediate,
is_intermediate: true,
width: width,
height: height,
};
@@ -712,7 +715,7 @@ export const buildCanvasSDXLOutpaintGraph = (
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
type: 'create_denoise_mask',
id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
is_intermediate,
is_intermediate: true,
fp32,
};
@@ -759,7 +762,7 @@ export const buildCanvasSDXLOutpaintGraph = (
graph.nodes[CANVAS_COHERENCE_MASK_EDGE] = {
type: 'mask_edge',
id: CANVAS_COHERENCE_MASK_EDGE,
is_intermediate,
is_intermediate: true,
edge_blur: maskBlur,
edge_size: maskBlur * 2,
low_threshold: 100,
@@ -876,7 +879,5 @@ export const buildCanvasSDXLOutpaintGraph = (
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -29,7 +29,6 @@ import {
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
import { addSaveImageNode } from './addSaveImageNode';
/**
* Builds the Canvas tab's Text to Image graph.
@@ -56,10 +55,14 @@ export const buildCanvasSDXLTextToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
@@ -91,7 +94,7 @@ export const buildCanvasSDXLTextToImageGraph = (
? {
type: 't2l_onnx',
id: SDXL_DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
cfg_scale,
scheduler,
steps,
@@ -99,7 +102,7 @@ export const buildCanvasSDXLTextToImageGraph = (
: {
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
cfg_scale,
scheduler,
steps,
@@ -128,27 +131,27 @@ export const buildCanvasSDXLTextToImageGraph = (
[modelLoaderNodeId]: {
type: modelLoaderNodeType,
id: modelLoaderNodeId,
is_intermediate,
is_intermediate: true,
model,
},
[POSITIVE_CONDITIONING]: {
type: isUsingOnnxModel ? 'prompt_onnx' : 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
is_intermediate,
is_intermediate: true,
prompt: positivePrompt,
style: craftedPositiveStylePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: isUsingOnnxModel ? 'prompt_onnx' : 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
is_intermediate,
is_intermediate: true,
prompt: negativePrompt,
style: craftedNegativeStylePrompt,
},
[NOISE]: {
type: 'noise',
id: NOISE,
is_intermediate,
is_intermediate: true,
width: !isUsingScaledDimensions
? width
: scaledBoundingBoxDimensions.width,
@@ -250,14 +253,14 @@ export const buildCanvasSDXLTextToImageGraph = (
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
is_intermediate,
is_intermediate: true,
fp32,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate,
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
@@ -288,7 +291,7 @@ export const buildCanvasSDXLTextToImageGraph = (
graph.nodes[CANVAS_OUTPUT] = {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: CANVAS_OUTPUT,
is_intermediate,
is_intermediate: !shouldAutoSave,
fp32,
};
@@ -380,7 +383,5 @@ export const buildCanvasSDXLTextToImageGraph = (
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -10,7 +10,6 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSaveImageNode } from './addSaveImageNode';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
@@ -54,10 +53,14 @@ export const buildCanvasTextToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
@@ -86,7 +89,7 @@ export const buildCanvasTextToImageGraph = (
? {
type: 't2l_onnx',
id: DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
cfg_scale,
scheduler,
steps,
@@ -94,7 +97,7 @@ export const buildCanvasTextToImageGraph = (
: {
type: 'denoise_latents',
id: DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
cfg_scale,
scheduler,
steps,
@@ -119,31 +122,31 @@ export const buildCanvasTextToImageGraph = (
[modelLoaderNodeId]: {
type: modelLoaderNodeType,
id: modelLoaderNodeId,
is_intermediate,
is_intermediate: true,
model,
},
[CLIP_SKIP]: {
type: 'clip_skip',
id: CLIP_SKIP,
is_intermediate,
is_intermediate: true,
skipped_layers: clipSkip,
},
[POSITIVE_CONDITIONING]: {
type: isUsingOnnxModel ? 'prompt_onnx' : 'compel',
id: POSITIVE_CONDITIONING,
is_intermediate,
is_intermediate: true,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: isUsingOnnxModel ? 'prompt_onnx' : 'compel',
id: NEGATIVE_CONDITIONING,
is_intermediate,
is_intermediate: true,
prompt: negativePrompt,
},
[NOISE]: {
type: 'noise',
id: NOISE,
is_intermediate,
is_intermediate: true,
width: !isUsingScaledDimensions
? width
: scaledBoundingBoxDimensions.width,
@@ -236,14 +239,14 @@ export const buildCanvasTextToImageGraph = (
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
is_intermediate,
is_intermediate: true,
fp32,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate,
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
@@ -274,7 +277,7 @@ export const buildCanvasTextToImageGraph = (
graph.nodes[CANVAS_OUTPUT] = {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: CANVAS_OUTPUT,
is_intermediate,
is_intermediate: !shouldAutoSave,
fp32,
};
@@ -353,7 +356,5 @@ export const buildCanvasTextToImageGraph = (
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -27,7 +27,6 @@ import {
RESIZE,
SEAMLESS,
} from './constants';
import { addSaveImageNode } from './addSaveImageNode';
/**
* Builds the Image to Image tab graph.
@@ -86,7 +85,6 @@ export const buildLinearImageToImageGraph = (
}
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
let modelLoaderNodeId = MAIN_MODEL_LOADER;
@@ -102,37 +100,31 @@ export const buildLinearImageToImageGraph = (
type: 'main_model_loader',
id: modelLoaderNodeId,
model,
is_intermediate,
},
[CLIP_SKIP]: {
type: 'clip_skip',
id: CLIP_SKIP,
skipped_layers: clipSkip,
is_intermediate,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
is_intermediate,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
is_intermediate,
},
[NOISE]: {
type: 'noise',
id: NOISE,
use_cpu,
is_intermediate,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
fp32,
is_intermediate,
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
@@ -142,7 +134,6 @@ export const buildLinearImageToImageGraph = (
steps,
denoising_start: 1 - strength,
denoising_end: 1,
is_intermediate,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
@@ -152,7 +143,6 @@ export const buildLinearImageToImageGraph = (
// image_name: initialImage.image_name,
// },
fp32,
is_intermediate,
},
},
edges: [
@@ -385,7 +375,5 @@ export const buildLinearImageToImageGraph = (
addWatermarkerToGraph(state, graph);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -29,7 +29,6 @@ import {
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
import { addSaveImageNode } from './addSaveImageNode';
/**
* Builds the Image to Image tab graph.
@@ -86,7 +85,6 @@ export const buildLinearSDXLImageToImageGraph = (
}
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
// Model Loader ID
let modelLoaderNodeId = SDXL_MODEL_LOADER;
@@ -107,33 +105,28 @@ export const buildLinearSDXLImageToImageGraph = (
type: 'sdxl_model_loader',
id: modelLoaderNodeId,
model,
is_intermediate,
},
[POSITIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
style: craftedPositiveStylePrompt,
is_intermediate,
},
[NEGATIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
style: craftedNegativeStylePrompt,
is_intermediate,
},
[NOISE]: {
type: 'noise',
id: NOISE,
use_cpu,
is_intermediate,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
fp32,
is_intermediate,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
@@ -145,7 +138,6 @@ export const buildLinearSDXLImageToImageGraph = (
? Math.min(refinerStart, 1 - strength)
: 1 - strength,
denoising_end: shouldUseSDXLRefiner ? refinerStart : 1,
is_intermediate,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
@@ -155,7 +147,6 @@ export const buildLinearSDXLImageToImageGraph = (
// image_name: initialImage.image_name,
// },
fp32,
is_intermediate,
},
},
edges: [
@@ -407,7 +398,5 @@ export const buildLinearSDXLImageToImageGraph = (
addWatermarkerToGraph(state, graph);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -23,7 +23,6 @@ import {
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
import { addSaveImageNode } from './addSaveImageNode';
export const buildLinearSDXLTextToImageGraph = (
state: RootState
@@ -57,13 +56,13 @@ export const buildLinearSDXLTextToImageGraph = (
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: initialGenerationState.shouldUseCpuNoise;
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
// Construct Style Prompt
const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } =
@@ -89,21 +88,18 @@ export const buildLinearSDXLTextToImageGraph = (
type: 'sdxl_model_loader',
id: modelLoaderNodeId,
model,
is_intermediate,
},
[POSITIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
style: craftedPositiveStylePrompt,
is_intermediate,
},
[NEGATIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
style: craftedNegativeStylePrompt,
is_intermediate,
},
[NOISE]: {
type: 'noise',
@@ -111,7 +107,6 @@ export const buildLinearSDXLTextToImageGraph = (
width,
height,
use_cpu,
is_intermediate,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
@@ -121,13 +116,11 @@ export const buildLinearSDXLTextToImageGraph = (
steps,
denoising_start: 0,
denoising_end: shouldUseSDXLRefiner ? refinerStart : 1,
is_intermediate,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
fp32,
is_intermediate,
},
},
edges: [
@@ -298,7 +291,5 @@ export const buildLinearSDXLTextToImageGraph = (
addWatermarkerToGraph(state, graph);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -26,7 +26,6 @@ import {
SEAMLESS,
TEXT_TO_IMAGE_GRAPH,
} from './constants';
import { addSaveImageNode } from './addSaveImageNode';
export const buildLinearTextToImageGraph = (
state: RootState
@@ -59,7 +58,7 @@ export const buildLinearTextToImageGraph = (
}
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingOnnxModel = model.model_type === 'onnx';
let modelLoaderNodeId = isUsingOnnxModel
@@ -75,7 +74,7 @@ export const buildLinearTextToImageGraph = (
? {
type: 't2l_onnx',
id: DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
cfg_scale,
scheduler,
steps,
@@ -83,7 +82,7 @@ export const buildLinearTextToImageGraph = (
: {
type: 'denoise_latents',
id: DENOISE_LATENTS,
is_intermediate,
is_intermediate: true,
cfg_scale,
scheduler,
steps,
@@ -109,26 +108,26 @@ export const buildLinearTextToImageGraph = (
[modelLoaderNodeId]: {
type: modelLoaderNodeType,
id: modelLoaderNodeId,
is_intermediate,
is_intermediate: true,
model,
},
[CLIP_SKIP]: {
type: 'clip_skip',
id: CLIP_SKIP,
skipped_layers: clipSkip,
is_intermediate,
is_intermediate: true,
},
[POSITIVE_CONDITIONING]: {
type: isUsingOnnxModel ? 'prompt_onnx' : 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
is_intermediate,
is_intermediate: true,
},
[NEGATIVE_CONDITIONING]: {
type: isUsingOnnxModel ? 'prompt_onnx' : 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
is_intermediate,
is_intermediate: true,
},
[NOISE]: {
type: 'noise',
@@ -136,14 +135,13 @@ export const buildLinearTextToImageGraph = (
width,
height,
use_cpu,
is_intermediate,
is_intermediate: true,
},
[t2lNode.id]: t2lNode,
[LATENTS_TO_IMAGE]: {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: LATENTS_TO_IMAGE,
fp32,
is_intermediate,
},
},
edges: [
@@ -295,7 +293,5 @@ export const buildLinearTextToImageGraph = (
addWatermarkerToGraph(state, graph);
}
addSaveImageNode(state, graph);
return graph;
};

View File

@@ -55,9 +55,6 @@ export const buildNodesGraph = (nodesState: NodesState): Graph => {
{} as Record<Exclude<string, 'id' | 'type'>, unknown>
);
// add reserved use_cache
transformedInputs['use_cache'] = node.data.useCache;
// Build this specific node
const graphNode = {
type,

View File

@@ -3,7 +3,6 @@ export const POSITIVE_CONDITIONING = 'positive_conditioning';
export const NEGATIVE_CONDITIONING = 'negative_conditioning';
export const DENOISE_LATENTS = 'denoise_latents';
export const LATENTS_TO_IMAGE = 'latents_to_image';
export const SAVE_IMAGE = 'save_image';
export const NSFW_CHECKER = 'nsfw_checker';
export const WATERMARKER = 'invisible_watermark';
export const NOISE = 'noise';

View File

@@ -16,7 +16,7 @@ import {
} from '../types/types';
import { buildInputFieldTemplate, getFieldType } from './fieldTemplateBuilders';
const RESERVED_INPUT_FIELD_NAMES = ['id', 'type', 'metadata', 'use_cache'];
const RESERVED_INPUT_FIELD_NAMES = ['id', 'type', 'metadata'];
const RESERVED_OUTPUT_FIELD_NAMES = ['type'];
const RESERVED_FIELD_TYPES = [
'WorkflowField',
@@ -235,8 +235,6 @@ export const parseSchema = (
{} as Record<string, OutputFieldTemplate>
);
const useCache = schema.properties.use_cache.default;
const invocation: InvocationTemplate = {
title,
type,
@@ -246,7 +244,6 @@ export const parseSchema = (
outputType,
inputs,
outputs,
useCache,
};
Object.assign(invocationsAccumulator, { [type]: invocation });

View File

@@ -1,8 +1,6 @@
import { createSelector } from '@reduxjs/toolkit';
import { useAppToaster } from 'app/components/Toaster';
import { stateSelector } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { CoreMetadata, LoRAMetadataItem } from 'features/nodes/types/types';
import { useAppDispatch } from 'app/store/storeHooks';
import { CoreMetadata } from 'features/nodes/types/types';
import {
refinerModelChanged,
setNegativeStylePromptSDXL,
@@ -17,11 +15,6 @@ import {
import { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { ImageDTO } from 'services/api/types';
import {
loraModelsAdapter,
useGetLoRAModelsQuery,
} from '../../../services/api/endpoints/models';
import { loraRecalled } from '../../lora/store/loraSlice';
import { initialImageSelected, modelSelected } from '../store/actions';
import {
setCfgScale,
@@ -37,7 +30,6 @@ import {
import {
isValidCfgScale,
isValidHeight,
isValidLoRAModel,
isValidMainModel,
isValidNegativePrompt,
isValidPositivePrompt,
@@ -54,16 +46,10 @@ import {
isValidWidth,
} from '../types/parameterSchemas';
const selector = createSelector(stateSelector, ({ generation }) => {
const { model } = generation;
return { model };
});
export const useRecallParameters = () => {
const dispatch = useAppDispatch();
const toaster = useAppToaster();
const { t } = useTranslation();
const { model } = useAppSelector(selector);
const parameterSetToast = useCallback(() => {
toaster({
@@ -74,18 +60,14 @@ export const useRecallParameters = () => {
});
}, [t, toaster]);
const parameterNotSetToast = useCallback(
(description?: string) => {
toaster({
title: t('toast.parameterNotSet'),
description,
status: 'warning',
duration: 2500,
isClosable: true,
});
},
[t, toaster]
);
const parameterNotSetToast = useCallback(() => {
toaster({
title: t('toast.parameterNotSet'),
status: 'warning',
duration: 2500,
isClosable: true,
});
}, [t, toaster]);
const allParameterSetToast = useCallback(() => {
toaster({
@@ -96,18 +78,14 @@ export const useRecallParameters = () => {
});
}, [t, toaster]);
const allParameterNotSetToast = useCallback(
(description?: string) => {
toaster({
title: t('toast.parametersNotSet'),
status: 'warning',
description,
duration: 2500,
isClosable: true,
});
},
[t, toaster]
);
const allParameterNotSetToast = useCallback(() => {
toaster({
title: t('toast.parametersNotSet'),
status: 'warning',
duration: 2500,
isClosable: true,
});
}, [t, toaster]);
/**
* Recall both prompts with toast
@@ -329,67 +307,6 @@ export const useRecallParameters = () => {
[dispatch, parameterSetToast, parameterNotSetToast]
);
/**
* Recall LoRA with toast
*/
const { loras } = useGetLoRAModelsQuery(undefined, {
selectFromResult: (result) => ({
loras: result.data
? loraModelsAdapter.getSelectors().selectAll(result.data)
: [],
}),
});
const prepareLoRAMetadataItem = useCallback(
(loraMetadataItem: LoRAMetadataItem) => {
if (!isValidLoRAModel(loraMetadataItem.lora)) {
return { lora: null, error: 'Invalid LoRA model' };
}
const { base_model, model_name } = loraMetadataItem.lora;
const matchingLoRA = loras.find(
(l) => l.base_model === base_model && l.model_name === model_name
);
if (!matchingLoRA) {
return { lora: null, error: 'LoRA model is not installed' };
}
const isCompatibleBaseModel =
matchingLoRA?.base_model === model?.base_model;
if (!isCompatibleBaseModel) {
return {
lora: null,
error: 'LoRA incompatible with currently-selected model',
};
}
return { lora: matchingLoRA, error: null };
},
[loras, model?.base_model]
);
const recallLoRA = useCallback(
(loraMetadataItem: LoRAMetadataItem) => {
const result = prepareLoRAMetadataItem(loraMetadataItem);
if (!result.lora) {
parameterNotSetToast(result.error);
return;
}
dispatch(
loraRecalled({ ...result.lora, weight: loraMetadataItem.weight })
);
parameterSetToast();
},
[prepareLoRAMetadataItem, dispatch, parameterSetToast, parameterNotSetToast]
);
/*
* Sets image as initial image with toast
*/
@@ -427,7 +344,6 @@ export const useRecallParameters = () => {
refiner_positive_aesthetic_score,
refiner_negative_aesthetic_score,
refiner_start,
loras,
} = metadata;
if (isValidCfgScale(cfg_scale)) {
@@ -509,21 +425,9 @@ export const useRecallParameters = () => {
dispatch(setRefinerStart(refiner_start));
}
loras?.forEach((lora) => {
const result = prepareLoRAMetadataItem(lora);
if (result.lora) {
dispatch(loraRecalled({ ...result.lora, weight: lora.weight }));
}
});
allParameterSetToast();
},
[
allParameterNotSetToast,
allParameterSetToast,
dispatch,
prepareLoRAMetadataItem,
]
[allParameterNotSetToast, allParameterSetToast, dispatch]
);
return {
@@ -540,7 +444,6 @@ export const useRecallParameters = () => {
recallWidth,
recallHeight,
recallStrength,
recallLoRA,
recallAllParameters,
sendToImageToImage,
};

View File

@@ -128,7 +128,7 @@ export const mainModelsAdapter = createEntityAdapter<MainModelConfigEntity>({
const onnxModelsAdapter = createEntityAdapter<OnnxModelConfigEntity>({
sortComparer: (a, b) => a.model_name.localeCompare(b.model_name),
});
export const loraModelsAdapter = createEntityAdapter<LoRAModelConfigEntity>({
const loraModelsAdapter = createEntityAdapter<LoRAModelConfigEntity>({
sortComparer: (a, b) => a.model_name.localeCompare(b.model_name),
});
export const controlNetModelsAdapter =

File diff suppressed because one or more lines are too long

View File

@@ -132,7 +132,6 @@ export type DivideInvocation = s['DivideInvocation'];
export type ImageNSFWBlurInvocation = s['ImageNSFWBlurInvocation'];
export type ImageWatermarkInvocation = s['ImageWatermarkInvocation'];
export type SeamlessModeInvocation = s['SeamlessModeInvocation'];
export type SaveImageInvocation = s['SaveImageInvocation'];
// ControlNet Nodes
export type ControlNetInvocation = s['ControlNetInvocation'];

View File

@@ -198,13 +198,6 @@ output = "coverage/index.xml"
max-line-length = 120
ignore = ["E203", "E266", "E501", "W503"]
select = ["B", "C", "E", "F", "W", "T4"]
exclude = [
".git",
"__pycache__",
"build",
"dist",
"invokeai/frontend/web/node_modules/"
]
[tool.black]
line-length = 120

View File

@@ -1,9 +1,5 @@
import logging
import pytest
from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache
# This import must happen before other invoke imports or test in other files(!!) break
from .test_nodes import ( # isort: split
PromptCollectionTestInvocation,
@@ -46,7 +42,7 @@ def mock_services() -> InvocationServices:
return InvocationServices(
model_manager=None, # type: ignore
events=TestEventService(),
logger=logging, # type: ignore
logger=None, # type: ignore
images=None, # type: ignore
latents=None, # type: ignore
boards=None, # type: ignore
@@ -57,7 +53,6 @@ def mock_services() -> InvocationServices:
performance_statistics=InvocationStatsService(graph_execution_manager),
processor=DefaultInvocationProcessor(),
configuration=None, # type: ignore
invocation_cache=MemoryInvocationCache(), # type: ignore
)

View File

@@ -1,9 +1,6 @@
import logging
import pytest
from invokeai.app.services.graph import Graph, GraphExecutionState, LibraryGraph
from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache
from invokeai.app.services.invocation_queue import MemoryInvocationQueue
from invokeai.app.services.invocation_services import InvocationServices
from invokeai.app.services.invocation_stats import InvocationStatsService
@@ -42,7 +39,7 @@ def mock_services() -> InvocationServices:
return InvocationServices(
model_manager=None, # type: ignore
events=TestEventService(),
logger=logging, # type: ignore
logger=None, # type: ignore
images=None, # type: ignore
latents=None, # type: ignore
boards=None, # type: ignore
@@ -53,7 +50,6 @@ def mock_services() -> InvocationServices:
processor=DefaultInvocationProcessor(),
performance_statistics=InvocationStatsService(graph_execution_manager),
configuration=None, # type: ignore
invocation_cache=MemoryInvocationCache(),
)