mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-21 02:28:12 -05:00
Compare commits
16 Commits
release/v3
...
feat/nodes
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8918501869 | ||
|
|
b152fbf72f | ||
|
|
f95111772a | ||
|
|
14ce7cf09c | ||
|
|
28a1a6939f | ||
|
|
6d2b4013f8 | ||
|
|
ca7a7b57bb | ||
|
|
c5d0e65a24 | ||
|
|
6cc7b55ec5 | ||
|
|
883e9973ec | ||
|
|
9e7d829906 | ||
|
|
456a0a59e0 | ||
|
|
4f2bf7e7e8 | ||
|
|
77e93888cf | ||
|
|
fa54974bff | ||
|
|
7ac99d6bc3 |
@@ -645,13 +645,19 @@ class ColorCorrectInvocation(BaseInvocation):
|
||||
mask_blur_radius: float = InputField(default=8, description="Mask blur radius")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
result = context.services.images.get_pil_image(self.image.image_name).convert("RGBA")
|
||||
|
||||
init_image = context.services.images.get_pil_image(self.reference.image_name)
|
||||
# fit reference image to the input image
|
||||
if init_image.size != result.size:
|
||||
init_image = init_image.resize((result.width, result.height), Image.BILINEAR)
|
||||
|
||||
pil_init_mask = None
|
||||
if self.mask is not None:
|
||||
pil_init_mask = context.services.images.get_pil_image(self.mask.image_name).convert("L")
|
||||
|
||||
init_image = context.services.images.get_pil_image(self.reference.image_name)
|
||||
|
||||
result = context.services.images.get_pil_image(self.image.image_name).convert("RGBA")
|
||||
# fit mask to the input image
|
||||
if pil_init_mask.size != result.size:
|
||||
pil_init_mask = pil_init_mask.resize((result.width, result.height), Image.BILINEAR)
|
||||
|
||||
# if init_image is None or init_mask is None:
|
||||
# return result
|
||||
|
||||
@@ -77,6 +77,25 @@ DEFAULT_PRECISION = choose_precision(choose_torch_device())
|
||||
SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))]
|
||||
|
||||
|
||||
def fit_latents(latents: torch.Tensor, max_latents_size: int, device: torch.device) -> torch.Tensor:
|
||||
if max_latents_size == 0:
|
||||
return latents
|
||||
|
||||
latents_area = latents.shape[2] * latents.shape[3]
|
||||
if latents_area <= max_latents_size:
|
||||
return latents
|
||||
|
||||
scale_factor = np.sqrt(max_latents_size / latents_area)
|
||||
scaled_latents = torch.nn.functional.interpolate(
|
||||
latents.to(device),
|
||||
scale_factor=scale_factor,
|
||||
mode="bilinear",
|
||||
antialias=True,
|
||||
)
|
||||
|
||||
return scaled_latents
|
||||
|
||||
|
||||
@invocation_output("scheduler_output")
|
||||
class SchedulerOutput(BaseInvocationOutput):
|
||||
scheduler: SAMPLER_NAME_VALUES = OutputField(description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler)
|
||||
@@ -500,14 +519,20 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
with SilenceWarnings(): # this quenches NSFW nag from diffusers
|
||||
seed = None
|
||||
noise = None
|
||||
max_image_size = context.services.configuration.max_image_size
|
||||
|
||||
if self.noise is not None:
|
||||
noise = context.services.latents.get(self.noise.latents_name)
|
||||
seed = self.noise.seed
|
||||
noise = fit_latents(latents=noise, max_latents_size=max_image_size // 64, device=choose_torch_device())
|
||||
|
||||
if self.latents is not None:
|
||||
latents = context.services.latents.get(self.latents.latents_name)
|
||||
if seed is None:
|
||||
seed = self.latents.seed
|
||||
latents = fit_latents(
|
||||
latents=latents, max_latents_size=max_image_size // 64, device=choose_torch_device()
|
||||
)
|
||||
|
||||
if noise is not None and noise.shape[1:] != latents.shape[1:]:
|
||||
raise Exception(f"Incompatable 'noise' and 'latents' shapes: {latents.shape=} {noise.shape=}")
|
||||
@@ -532,8 +557,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
def _lora_loader():
|
||||
for lora in self.unet.loras:
|
||||
lora_info = context.services.model_manager.get_model(
|
||||
**lora.dict(exclude={"weight"}),
|
||||
context=context,
|
||||
**lora.dict(exclude={"weight"}), context=context
|
||||
)
|
||||
yield (lora_info.context.model, lora.weight)
|
||||
del lora_info
|
||||
|
||||
@@ -255,6 +255,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', category="Generation", )
|
||||
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",)
|
||||
force_tiled_decode: bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",)
|
||||
max_image_size : int = Field(default=512 * 512, description="The maximum size of images, in pixels. The maximum size for latents is inferred from this evaluating `max_image_size // 8`. If the size is exceeded during denoising, the latents will be resized.", category="Generation", )
|
||||
|
||||
# QUEUE
|
||||
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", category="Queue", )
|
||||
@@ -277,6 +278,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
|
||||
class Config:
|
||||
validate_assignment = True
|
||||
env_prefix = "INVOKEAI"
|
||||
|
||||
def parse_args(self, argv: Optional[list[str]] = None, conf: Optional[DictConfig] = None, clobber=False):
|
||||
"""
|
||||
|
||||
@@ -92,30 +92,34 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
self.__invoker.services.logger
|
||||
while not stop_event.is_set():
|
||||
poll_now_event.clear()
|
||||
try:
|
||||
# do not dequeue if there is already a session running
|
||||
if self.__queue_item is None and resume_event.is_set():
|
||||
queue_item = self.__invoker.services.session_queue.dequeue()
|
||||
|
||||
# do not dequeue if there is already a session running
|
||||
if self.__queue_item is None and resume_event.is_set():
|
||||
queue_item = self.__invoker.services.session_queue.dequeue()
|
||||
if queue_item is not None:
|
||||
self.__invoker.services.logger.debug(f"Executing queue item {queue_item.item_id}")
|
||||
self.__queue_item = queue_item
|
||||
self.__invoker.services.graph_execution_manager.set(queue_item.session)
|
||||
self.__invoker.invoke(
|
||||
session_queue_batch_id=queue_item.batch_id,
|
||||
session_queue_id=queue_item.queue_id,
|
||||
session_queue_item_id=queue_item.item_id,
|
||||
graph_execution_state=queue_item.session,
|
||||
invoke_all=True,
|
||||
)
|
||||
queue_item = None
|
||||
|
||||
if queue_item is not None:
|
||||
self.__invoker.services.logger.debug(f"Executing queue item {queue_item.item_id}")
|
||||
self.__queue_item = queue_item
|
||||
self.__invoker.services.graph_execution_manager.set(queue_item.session)
|
||||
self.__invoker.invoke(
|
||||
session_queue_batch_id=queue_item.batch_id,
|
||||
session_queue_id=queue_item.queue_id,
|
||||
session_queue_item_id=queue_item.item_id,
|
||||
graph_execution_state=queue_item.session,
|
||||
invoke_all=True,
|
||||
)
|
||||
queue_item = None
|
||||
|
||||
if queue_item is None:
|
||||
self.__invoker.services.logger.debug("Waiting for next polling interval or event")
|
||||
if queue_item is None:
|
||||
self.__invoker.services.logger.debug("Waiting for next polling interval or event")
|
||||
poll_now_event.wait(POLLING_INTERVAL)
|
||||
continue
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.error(f"Error in session processor: {e}")
|
||||
poll_now_event.wait(POLLING_INTERVAL)
|
||||
continue
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.error(f"Error in session processor: {e}")
|
||||
self.__invoker.services.logger.error(f"Fatal Error in session processor: {e}")
|
||||
pass
|
||||
finally:
|
||||
stop_event.clear()
|
||||
|
||||
169
invokeai/frontend/web/dist/assets/App-61ae6f9e.js
vendored
169
invokeai/frontend/web/dist/assets/App-61ae6f9e.js
vendored
File diff suppressed because one or more lines are too long
169
invokeai/frontend/web/dist/assets/App-dbf8f111.js
vendored
Normal file
169
invokeai/frontend/web/dist/assets/App-dbf8f111.js
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
310
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-a3380d0c.js
vendored
Normal file
310
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-a3380d0c.js
vendored
Normal file
File diff suppressed because one or more lines are too long
128
invokeai/frontend/web/dist/assets/index-eac60e23.js
vendored
128
invokeai/frontend/web/dist/assets/index-eac60e23.js
vendored
File diff suppressed because one or more lines are too long
128
invokeai/frontend/web/dist/assets/index-f6c3f475.js
vendored
Normal file
128
invokeai/frontend/web/dist/assets/index-f6c3f475.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
invokeai/frontend/web/dist/assets/menu-c9cc8c3d.js
vendored
Normal file
1
invokeai/frontend/web/dist/assets/menu-c9cc8c3d.js
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
invokeai/frontend/web/dist/index.html
vendored
2
invokeai/frontend/web/dist/index.html
vendored
@@ -12,7 +12,7 @@
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="module" crossorigin src="./assets/index-eac60e23.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index-f6c3f475.js"></script>
|
||||
</head>
|
||||
|
||||
<body dir="ltr">
|
||||
|
||||
254
invokeai/frontend/web/dist/locales/en.json
vendored
254
invokeai/frontend/web/dist/locales/en.json
vendored
@@ -13,15 +13,14 @@
|
||||
"reset": "Reset",
|
||||
"rotateClockwise": "Rotate Clockwise",
|
||||
"rotateCounterClockwise": "Rotate Counter-Clockwise",
|
||||
"showGalleryPanel": "Show Gallery Panel",
|
||||
"showGallery": "Show Gallery",
|
||||
"showOptionsPanel": "Show Side Panel",
|
||||
"toggleAutoscroll": "Toggle autoscroll",
|
||||
"toggleLogViewer": "Toggle Log Viewer",
|
||||
"uploadImage": "Upload Image",
|
||||
"useThisParameter": "Use this parameter",
|
||||
"zoomIn": "Zoom In",
|
||||
"zoomOut": "Zoom Out",
|
||||
"loadMore": "Load More"
|
||||
"zoomOut": "Zoom Out"
|
||||
},
|
||||
"boards": {
|
||||
"addBoard": "Add Board",
|
||||
@@ -111,7 +110,6 @@
|
||||
"statusModelChanged": "Model Changed",
|
||||
"statusModelConverted": "Model Converted",
|
||||
"statusPreparing": "Preparing",
|
||||
"statusProcessing": "Processing",
|
||||
"statusProcessingCanceled": "Processing Canceled",
|
||||
"statusProcessingComplete": "Processing Complete",
|
||||
"statusRestoringFaces": "Restoring Faces",
|
||||
@@ -205,81 +203,6 @@
|
||||
"incompatibleModel": "Incompatible base model:",
|
||||
"noMatchingEmbedding": "No matching Embeddings"
|
||||
},
|
||||
"queue": {
|
||||
"queue": "Queue",
|
||||
"queueFront": "Add to Front of Queue",
|
||||
"queueBack": "Add to Queue",
|
||||
"queueCountPrediction": "Add {{predicted}} to Queue",
|
||||
"queueMaxExceeded": "Max of {{max_queue_size}} exceeded, would skip {{skip}}",
|
||||
"queuedCount": "{{pending}} Pending",
|
||||
"queueTotal": "{{total}} Total",
|
||||
"queueEmpty": "Queue Empty",
|
||||
"enqueueing": "Queueing Batch",
|
||||
"resume": "Resume",
|
||||
"resumeTooltip": "Resume Processor",
|
||||
"resumeSucceeded": "Processor Resumed",
|
||||
"resumeFailed": "Problem Resuming Processor",
|
||||
"pause": "Pause",
|
||||
"pauseTooltip": "Pause Processor",
|
||||
"pauseSucceeded": "Processor Paused",
|
||||
"pauseFailed": "Problem Pausing Processor",
|
||||
"cancel": "Cancel",
|
||||
"cancelTooltip": "Cancel Current Item",
|
||||
"cancelSucceeded": "Item Canceled",
|
||||
"cancelFailed": "Problem Canceling Item",
|
||||
"prune": "Prune",
|
||||
"pruneTooltip": "Prune {{item_count}} Completed Items",
|
||||
"pruneSucceeded": "Pruned {{item_count}} Completed Items from Queue",
|
||||
"pruneFailed": "Problem Pruning Queue",
|
||||
"clear": "Clear",
|
||||
"clearTooltip": "Cancel and Clear All Items",
|
||||
"clearSucceeded": "Queue Cleared",
|
||||
"clearFailed": "Problem Clearing Queue",
|
||||
"cancelBatch": "Cancel Batch",
|
||||
"cancelItem": "Cancel Item",
|
||||
"cancelBatchSucceeded": "Batch Canceled",
|
||||
"cancelBatchFailed": "Problem Canceling Batch",
|
||||
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely.",
|
||||
"clearQueueAlertDialog2": "Are you sure you want to clear the queue?",
|
||||
"current": "Current",
|
||||
"next": "Next",
|
||||
"status": "Status",
|
||||
"total": "Total",
|
||||
"pending": "Pending",
|
||||
"in_progress": "In Progress",
|
||||
"completed": "Completed",
|
||||
"failed": "Failed",
|
||||
"canceled": "Canceled",
|
||||
"completedIn": "Completed in",
|
||||
"batch": "Batch",
|
||||
"item": "Item",
|
||||
"session": "Session",
|
||||
"batchValues": "Batch Values",
|
||||
"notReady": "Unable to Queue",
|
||||
"batchQueued": "Batch Queued",
|
||||
"batchQueuedDesc": "Added {{item_count}} sessions to {{direction}} of queue",
|
||||
"front": "front",
|
||||
"back": "back",
|
||||
"batchFailedToQueue": "Failed to Queue Batch",
|
||||
"graphQueued": "Graph queued",
|
||||
"graphFailedToQueue": "Failed to queue graph"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "Invocation Cache",
|
||||
"cacheSize": "Cache Size",
|
||||
"maxCacheSize": "Max Cache Size",
|
||||
"hits": "Cache Hits",
|
||||
"misses": "Cache Misses",
|
||||
"clear": "Clear",
|
||||
"clearSucceeded": "Invocation Cache Cleared",
|
||||
"clearFailed": "Problem Clearing Invocation Cache",
|
||||
"enable": "Enable",
|
||||
"enableSucceeded": "Invocation Cache Enabled",
|
||||
"enableFailed": "Problem Enabling Invocation Cache",
|
||||
"disable": "Disable",
|
||||
"disableSucceeded": "Invocation Cache Disabled",
|
||||
"disableFailed": "Problem Disabling Invocation Cache"
|
||||
},
|
||||
"gallery": {
|
||||
"allImagesLoaded": "All Images Loaded",
|
||||
"assets": "Assets",
|
||||
@@ -718,8 +641,7 @@
|
||||
"collectionItemDescription": "TODO",
|
||||
"colorCodeEdges": "Color-Code Edges",
|
||||
"colorCodeEdgesHelp": "Color-code edges according to their connected fields",
|
||||
"colorCollection": "A collection of colors.",
|
||||
"colorCollectionDescription": "TODO",
|
||||
"colorCollectionDescription": "A collection of colors.",
|
||||
"colorField": "Color",
|
||||
"colorFieldDescription": "A RGBA color.",
|
||||
"colorPolymorphic": "Color Polymorphic",
|
||||
@@ -766,8 +688,7 @@
|
||||
"imageFieldDescription": "Images may be passed between nodes.",
|
||||
"imagePolymorphic": "Image Polymorphic",
|
||||
"imagePolymorphicDescription": "A collection of images.",
|
||||
"inputField": "Input Field",
|
||||
"inputFields": "Input Fields",
|
||||
"inputFields": "Input Feilds",
|
||||
"inputMayOnlyHaveOneConnection": "Input may only have one connection",
|
||||
"inputNode": "Input Node",
|
||||
"integer": "Integer",
|
||||
@@ -785,7 +706,6 @@
|
||||
"latentsPolymorphicDescription": "Latents may be passed between nodes.",
|
||||
"loadingNodes": "Loading Nodes...",
|
||||
"loadWorkflow": "Load Workflow",
|
||||
"noWorkflow": "No Workflow",
|
||||
"loRAModelField": "LoRA",
|
||||
"loRAModelFieldDescription": "TODO",
|
||||
"mainModelField": "Model",
|
||||
@@ -807,15 +727,14 @@
|
||||
"noImageFoundState": "No initial image found in state",
|
||||
"noMatchingNodes": "No matching nodes",
|
||||
"noNodeSelected": "No node selected",
|
||||
"nodeOpacity": "Node Opacity",
|
||||
"noOpacity": "Node Opacity",
|
||||
"noOutputRecorded": "No outputs recorded",
|
||||
"noOutputSchemaName": "No output schema name found in ref object",
|
||||
"notes": "Notes",
|
||||
"notesDescription": "Add notes about your workflow",
|
||||
"oNNXModelField": "ONNX Model",
|
||||
"oNNXModelFieldDescription": "ONNX model field.",
|
||||
"outputField": "Output Field",
|
||||
"outputFields": "Output Fields",
|
||||
"outputFields": "Output Feilds",
|
||||
"outputNode": "Output node",
|
||||
"outputSchemaNotFound": "Output schema not found",
|
||||
"pickOne": "Pick One",
|
||||
@@ -864,7 +783,6 @@
|
||||
"unknownNode": "Unknown Node",
|
||||
"unknownTemplate": "Unknown Template",
|
||||
"unkownInvocation": "Unknown Invocation type",
|
||||
"updateNode": "Update Node",
|
||||
"updateApp": "Update App",
|
||||
"vaeField": "Vae",
|
||||
"vaeFieldDescription": "Vae submodel.",
|
||||
@@ -901,7 +819,6 @@
|
||||
},
|
||||
"cfgScale": "CFG Scale",
|
||||
"clipSkip": "CLIP Skip",
|
||||
"clipSkipWithLayerCount": "CLIP Skip {{layerCount}}",
|
||||
"closeViewer": "Close Viewer",
|
||||
"codeformerFidelity": "Fidelity",
|
||||
"coherenceMode": "Mode",
|
||||
@@ -940,7 +857,6 @@
|
||||
"noInitialImageSelected": "No initial image selected",
|
||||
"noModelForControlNet": "ControlNet {{index}} has no model selected.",
|
||||
"noModelSelected": "No model selected",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"readyToInvoke": "Ready to Invoke",
|
||||
"systemBusy": "System busy",
|
||||
@@ -959,12 +875,7 @@
|
||||
"perlinNoise": "Perlin Noise",
|
||||
"positivePromptPlaceholder": "Positive Prompt",
|
||||
"randomizeSeed": "Randomize Seed",
|
||||
"manualSeed": "Manual Seed",
|
||||
"randomSeed": "Random Seed",
|
||||
"restoreFaces": "Restore Faces",
|
||||
"iterations": "Iterations",
|
||||
"iterationsWithCount_one": "{{count}} Iteration",
|
||||
"iterationsWithCount_other": "{{count}} Iterations",
|
||||
"scale": "Scale",
|
||||
"scaleBeforeProcessing": "Scale Before Processing",
|
||||
"scaledHeight": "Scaled H",
|
||||
@@ -975,17 +886,13 @@
|
||||
"seamlessTiling": "Seamless Tiling",
|
||||
"seamlessXAxis": "X Axis",
|
||||
"seamlessYAxis": "Y Axis",
|
||||
"seamlessX": "Seamless X",
|
||||
"seamlessY": "Seamless Y",
|
||||
"seamlessX&Y": "Seamless X & Y",
|
||||
"seamLowThreshold": "Low",
|
||||
"seed": "Seed",
|
||||
"seedWeights": "Seed Weights",
|
||||
"imageActions": "Image Actions",
|
||||
"sendTo": "Send to",
|
||||
"sendToImg2Img": "Send to Image to Image",
|
||||
"sendToUnifiedCanvas": "Send To Unified Canvas",
|
||||
"showOptionsPanel": "Show Side Panel (O or T)",
|
||||
"showOptionsPanel": "Show Options Panel",
|
||||
"showPreview": "Show Preview",
|
||||
"shuffle": "Shuffle Seed",
|
||||
"steps": "Steps",
|
||||
@@ -994,13 +901,11 @@
|
||||
"tileSize": "Tile Size",
|
||||
"toggleLoopback": "Toggle Loopback",
|
||||
"type": "Type",
|
||||
"upscale": "Upscale (Shift + U)",
|
||||
"upscale": "Upscale",
|
||||
"upscaleImage": "Upscale Image",
|
||||
"upscaling": "Upscaling",
|
||||
"useAll": "Use All",
|
||||
"useCpuNoise": "Use CPU Noise",
|
||||
"cpuNoise": "CPU Noise",
|
||||
"gpuNoise": "GPU Noise",
|
||||
"useInitImg": "Use Initial Image",
|
||||
"usePrompt": "Use Prompt",
|
||||
"useSeed": "Use Seed",
|
||||
@@ -1009,20 +914,11 @@
|
||||
"vSymmetryStep": "V Symmetry Step",
|
||||
"width": "Width"
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"prompt": {
|
||||
"combinatorial": "Combinatorial Generation",
|
||||
"dynamicPrompts": "Dynamic Prompts",
|
||||
"enableDynamicPrompts": "Enable Dynamic Prompts",
|
||||
"maxPrompts": "Max Prompts",
|
||||
"promptsWithCount_one": "{{count}} Prompt",
|
||||
"promptsWithCount_other": "{{count}} Prompts",
|
||||
"seedBehaviour": {
|
||||
"label": "Seed Behaviour",
|
||||
"perIterationLabel": "Seed per Iteration",
|
||||
"perIterationDesc": "Use a different seed for each iteration",
|
||||
"perPromptLabel": "Seed per Prompt",
|
||||
"perPromptDesc": "Use a different seed for each prompt"
|
||||
}
|
||||
"maxPrompts": "Max Prompts"
|
||||
},
|
||||
"sdxl": {
|
||||
"cfgScale": "CFG Scale",
|
||||
@@ -1170,136 +1066,6 @@
|
||||
"variations": "Try a variation with a value between 0.1 and 1.0 to change the result for a given seed. Interesting variations of the seed are between 0.1 and 0.3."
|
||||
}
|
||||
},
|
||||
"popovers": {
|
||||
"clipSkip": {
|
||||
"heading": "CLIP Skip",
|
||||
"paragraph": "Choose how many layers of the CLIP model to skip. Certain models are better suited to be used with CLIP Skip."
|
||||
},
|
||||
"compositingBlur": {
|
||||
"heading": "Blur",
|
||||
"paragraph": "The blur radius of the mask."
|
||||
},
|
||||
"compositingBlurMethod": {
|
||||
"heading": "Blur Method",
|
||||
"paragraph": "The method of blur applied to the masked area."
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
"heading": "Coherence Pass",
|
||||
"paragraph": "Composite the Inpainted/Outpainted images."
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "Mode",
|
||||
"paragraph": "The mode of the Coherence Pass."
|
||||
},
|
||||
"compositingCoherenceSteps": {
|
||||
"heading": "Steps",
|
||||
"paragraph": "Number of steps in the Coherence Pass. Similar to Denoising Steps."
|
||||
},
|
||||
"compositingStrength": {
|
||||
"heading": "Strength",
|
||||
"paragraph": "Amount of noise added for the Coherence Pass. Similar to Denoising Strength."
|
||||
},
|
||||
"compositingMaskAdjustments": {
|
||||
"heading": "Mask Adjustments",
|
||||
"paragraph": "Adjust the mask."
|
||||
},
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
"paragraph": "Which parts of the denoising process will have the ControlNet applied. ControlNets applied at the start of the process guide composition, and ControlNets applied at the end guide details."
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
"heading": "Control Mode",
|
||||
"paragraph": "Lends more weight to either the prompt or ControlNet."
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
"heading": "Resize Mode",
|
||||
"paragraph": "How the ControlNet image will be fit to the image generation Ratio"
|
||||
},
|
||||
"controlNetToggle": {
|
||||
"heading": "Enable ControlNet",
|
||||
"paragraph": "ControlNets provide guidance to the generation process, helping create images with controlled composition, structure, or style, depending on the model selected."
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraph": "How strongly the ControlNet will impact the generated image."
|
||||
},
|
||||
"dynamicPromptsToggle": {
|
||||
"heading": "Enable Dynamic Prompts",
|
||||
"paragraph": "Dynamic prompts allow multiple options within a prompt. Dynamic prompts can be used by: {option1|option2|option3}. Combinations of prompts will be randomly generated until the “Images” number has been reached."
|
||||
},
|
||||
"dynamicPromptsCombinatorial": {
|
||||
"heading": "Combinatorial Generation",
|
||||
"paragraph": "Generate an image for every possible combination of Dynamic Prompts until the Max Prompts is reached."
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "Infill Method",
|
||||
"paragraph": "Method to infill the selected area."
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA Weight",
|
||||
"paragraph": "Weight of the LoRA. Higher weight will lead to larger impacts on the final image."
|
||||
},
|
||||
"noiseEnable": {
|
||||
"heading": "Enable Noise Settings",
|
||||
"paragraph": "Advanced control over noise generation."
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
"heading": "Use CPU Noise",
|
||||
"paragraph": "Uses the CPU to generate random noise."
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"heading": "CFG Scale",
|
||||
"paragraph": "Controls how much your prompt influences the generation process."
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"heading": "Denoising Strength",
|
||||
"paragraph": "How much noise is added to the input image. 0 will result in an identical image, while 1 will result in a completely new image."
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "Iterations",
|
||||
"paragraph": "The number of images to generate. If Dynamic Prompts is enabled, each of the prompts will be generated this many times."
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "Model",
|
||||
"paragraph": "Model used for the denoising steps. Different models are trained to specialize in producing different aesthetic results and content."
|
||||
},
|
||||
"paramNegativeConditioning": {
|
||||
"heading": "Negative Prompt",
|
||||
"paragraph": "The generation process avoids the concepts in the negative prompt. Use this to exclude qualities or objects from the output. Supports Compel syntax and embeddings."
|
||||
},
|
||||
"paramPositiveConditioning": {
|
||||
"heading": "Positive Prompt",
|
||||
"paragraph": "Guides the generation process. You may use any words or phrases. Supports Compel and Dynamic Prompts syntaxes and embeddings."
|
||||
},
|
||||
"paramRatio": {
|
||||
"heading": "Ratio",
|
||||
"paragraph": "The ratio of the dimensions of the image generated. An image size (in number of pixels) equivalent to 512x512 is recommended for SD1.5 models and a size equivalent to 1024x1024 is recommended for SDXL models."
|
||||
},
|
||||
"paramScheduler": {
|
||||
"heading": "Scheduler",
|
||||
"paragraph": "Scheduler defines how to iteratively add noise to an image or how to update a sample based on a model's output."
|
||||
},
|
||||
"paramSeed": {
|
||||
"heading": "Seed",
|
||||
"paragraph": "Controls the starting noise used for generation. Disable “Random Seed” to produce identical results with the same generation settings."
|
||||
},
|
||||
"paramSteps": {
|
||||
"heading": "Steps",
|
||||
"paragraph": "Number of steps that will be performed in each generation. Higher step counts will typically create better images but will require more generation time."
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE",
|
||||
"paragraph": "Model used for translating AI output into the final image."
|
||||
},
|
||||
"paramVAEPrecision": {
|
||||
"heading": "VAE Precision",
|
||||
"paragraph": "The precision used during VAE encoding and decoding. Fp16/Half precision is more efficient, at the expense of minor image variations."
|
||||
},
|
||||
"scaleBeforeProcessing": {
|
||||
"heading": "Scale Before Processing",
|
||||
"paragraph": "Scales the selected area to the size best suited for the model before the image generation process."
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
"hideProgressImages": "Hide Progress Images",
|
||||
"lockRatio": "Lock Ratio",
|
||||
|
||||
@@ -36,7 +36,8 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
|
||||
|
||||
const logger = useLogger('system');
|
||||
const dispatch = useAppDispatch();
|
||||
const { handlePreselectedImage } = usePreselectedImage();
|
||||
const { handleSendToCanvas, handleSendToImg2Img, handleUseAllMetadata } =
|
||||
usePreselectedImage(selectedImage?.imageName);
|
||||
const handleReset = useCallback(() => {
|
||||
localStorage.clear();
|
||||
location.reload();
|
||||
@@ -59,8 +60,22 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
|
||||
}, [dispatch]);
|
||||
|
||||
useEffect(() => {
|
||||
handlePreselectedImage(selectedImage);
|
||||
}, [handlePreselectedImage, selectedImage]);
|
||||
if (selectedImage && selectedImage.action === 'sendToCanvas') {
|
||||
handleSendToCanvas();
|
||||
}
|
||||
}, [selectedImage, handleSendToCanvas]);
|
||||
|
||||
useEffect(() => {
|
||||
if (selectedImage && selectedImage.action === 'sendToImg2Img') {
|
||||
handleSendToImg2Img();
|
||||
}
|
||||
}, [selectedImage, handleSendToImg2Img]);
|
||||
|
||||
useEffect(() => {
|
||||
if (selectedImage && selectedImage.action === 'useAllParameters') {
|
||||
handleUseAllMetadata();
|
||||
}
|
||||
}, [selectedImage, handleUseAllMetadata]);
|
||||
|
||||
const headerComponent = useStore($headerComponent);
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { skipToken } from '@reduxjs/toolkit/dist/query';
|
||||
import { CoreMetadata } from 'features/nodes/types/types';
|
||||
import { t } from 'i18next';
|
||||
import { useCallback, useState } from 'react';
|
||||
import { useCallback } from 'react';
|
||||
import { useAppToaster } from '../../../app/components/Toaster';
|
||||
import { useAppDispatch } from '../../../app/store/storeHooks';
|
||||
import {
|
||||
@@ -13,70 +13,46 @@ import { setActiveTab } from '../../ui/store/uiSlice';
|
||||
import { initialImageSelected } from '../store/actions';
|
||||
import { useRecallParameters } from './useRecallParameters';
|
||||
|
||||
type SelectedImage = {
|
||||
imageName: string;
|
||||
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
|
||||
};
|
||||
|
||||
export const usePreselectedImage = () => {
|
||||
export const usePreselectedImage = (imageName?: string) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const [imageNameForDto, setImageNameForDto] = useState<string | undefined>();
|
||||
const [imageNameForMetadata, setImageNameForMetadata] = useState<
|
||||
string | undefined
|
||||
>();
|
||||
|
||||
const { recallAllParameters } = useRecallParameters();
|
||||
const toaster = useAppToaster();
|
||||
|
||||
const { currentData: selectedImageDto } = useGetImageDTOQuery(
|
||||
imageNameForDto ?? skipToken
|
||||
imageName ?? skipToken
|
||||
);
|
||||
|
||||
const { currentData: selectedImageMetadata } = useGetImageMetadataQuery(
|
||||
imageNameForMetadata ?? skipToken
|
||||
imageName ?? skipToken
|
||||
);
|
||||
|
||||
const handlePreselectedImage = useCallback(
|
||||
(selectedImage?: SelectedImage) => {
|
||||
if (!selectedImage) {
|
||||
return;
|
||||
}
|
||||
const handleSendToCanvas = useCallback(() => {
|
||||
if (selectedImageDto) {
|
||||
dispatch(setInitialCanvasImage(selectedImageDto));
|
||||
dispatch(setActiveTab('unifiedCanvas'));
|
||||
toaster({
|
||||
title: t('toast.sentToUnifiedCanvas'),
|
||||
status: 'info',
|
||||
duration: 2500,
|
||||
isClosable: true,
|
||||
});
|
||||
}
|
||||
}, [dispatch, toaster, selectedImageDto]);
|
||||
|
||||
if (selectedImage.action === 'sendToCanvas') {
|
||||
setImageNameForDto(selectedImage?.imageName);
|
||||
if (selectedImageDto) {
|
||||
dispatch(setInitialCanvasImage(selectedImageDto));
|
||||
dispatch(setActiveTab('unifiedCanvas'));
|
||||
toaster({
|
||||
title: t('toast.sentToUnifiedCanvas'),
|
||||
status: 'info',
|
||||
duration: 2500,
|
||||
isClosable: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
const handleSendToImg2Img = useCallback(() => {
|
||||
if (selectedImageDto) {
|
||||
dispatch(initialImageSelected(selectedImageDto));
|
||||
}
|
||||
}, [dispatch, selectedImageDto]);
|
||||
|
||||
if (selectedImage.action === 'sendToImg2Img') {
|
||||
setImageNameForDto(selectedImage?.imageName);
|
||||
if (selectedImageDto) {
|
||||
dispatch(initialImageSelected(selectedImageDto));
|
||||
}
|
||||
}
|
||||
const handleUseAllMetadata = useCallback(() => {
|
||||
if (selectedImageMetadata) {
|
||||
recallAllParameters(selectedImageMetadata.metadata as CoreMetadata);
|
||||
}
|
||||
// disabled because `recallAllParameters` changes the model, but its dep to prepare LoRAs has model as a dep. this introduces circular logic that causes infinite re-renders
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [selectedImageMetadata]);
|
||||
|
||||
if (selectedImage.action === 'useAllParameters') {
|
||||
setImageNameForMetadata(selectedImage?.imageName);
|
||||
if (selectedImageMetadata) {
|
||||
recallAllParameters(selectedImageMetadata.metadata as CoreMetadata);
|
||||
}
|
||||
}
|
||||
},
|
||||
[
|
||||
dispatch,
|
||||
selectedImageDto,
|
||||
selectedImageMetadata,
|
||||
recallAllParameters,
|
||||
toaster,
|
||||
]
|
||||
);
|
||||
|
||||
return { handlePreselectedImage };
|
||||
return { handleSendToCanvas, handleSendToImg2Img, handleUseAllMetadata };
|
||||
};
|
||||
|
||||
@@ -21,7 +21,7 @@ import {
|
||||
loraModelsAdapter,
|
||||
useGetLoRAModelsQuery,
|
||||
} from '../../../services/api/endpoints/models';
|
||||
import { loraRecalled } from '../../lora/store/loraSlice';
|
||||
import { loraRecalled, lorasCleared } from '../../lora/store/loraSlice';
|
||||
import { initialImageSelected, modelSelected } from '../store/actions';
|
||||
import {
|
||||
setCfgScale,
|
||||
@@ -509,6 +509,7 @@ export const useRecallParameters = () => {
|
||||
dispatch(setRefinerStart(refiner_start));
|
||||
}
|
||||
|
||||
dispatch(lorasCleared());
|
||||
loras?.forEach((lora) => {
|
||||
const result = prepareLoRAMetadataItem(lora);
|
||||
if (result.lora) {
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "3.2.0rc2"
|
||||
__version__ = "3.1.1"
|
||||
|
||||
@@ -121,6 +121,12 @@ def test_env_override(patch_rootdir):
|
||||
conf.parse_args(conf=init1, argv=[])
|
||||
assert conf.max_cache_size == 20
|
||||
|
||||
# make sure that prefix is respected
|
||||
del os.environ["INVOKEAI_always_use_cpu"]
|
||||
os.environ["always_use_cpu"] = "True"
|
||||
conf.parse_args(conf=init1, argv=[])
|
||||
assert conf.always_use_cpu is False
|
||||
|
||||
|
||||
def test_root_resists_cwd(patch_rootdir):
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
Reference in New Issue
Block a user