mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
* feat(ui): group nodes by category in add-node dialog Add collapsible category grouping to the node picker command palette. Categories are parsed from the backend schema and displayed as expandable sections with caret icons. All categories auto-expand when searching. * feat(ui): add toggle for category grouping in add-node dialog and prioritize exact matches Add a persistent "Group Nodes by Category" setting to workflow editor settings, allowing users to switch between grouped and flat node list views. Also sort exact title matches to the top when searching. * fix: update test schema categories to match expected templates * feat: add expand/collapse all buttons to node picker and fix node categories Add "Expand All" and "Collapse All" link-buttons above the grouped category list in the add-node dialog so users can quickly open or close all categories at once. Buttons are hidden during search since categories auto-expand while searching. Fix two miscategorized nodes: Z-Image ControlNet was in "Control" instead of "Controlnet", and Upscale (RealESRGAN) was in "Esrgan" instead of "Upscale". * refactor(nodes): clean up node category taxonomy Reorganize all built-in invocation categories into a consistent set of 18 groups (model, prompt, conditioning, controlnet_preprocessors, latents, image, mask, inpaint, tiles, upscale, segmentation, math, strings, primitives, batch, metadata, multimodal, canvas). - Move denoise/i2l/l2i nodes consistently into "latents" - Move all mask creation/manipulation nodes into "mask" - Split ControlNet preprocessors out of "controlnet" into their own group - Fold "unet", "vllm", "string", "ip_adapter", "t2i_adapter" into larger groups - Move metadata_linked denoise wrappers from "latents" to "metadata" - Add missing category to ideal_size - Introduce dedicated "canvas" group for canvas/output/panel nodes Also adds the now-required `category` field to invocation template fixtures in validateConnection.test.ts. * Chore Ruff Format --------- Co-authored-by: dunkeroni <dunkeroni@gmail.com>
77 lines
3.1 KiB
Python
77 lines
3.1 KiB
Python
import math
|
|
from typing import Tuple
|
|
|
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
|
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
|
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField
|
|
from invokeai.app.invocations.model import UNetField
|
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
|
from invokeai.backend.model_manager.taxonomy import BaseModelType
|
|
|
|
|
|
@invocation_output("ideal_size_output")
|
|
class IdealSizeOutput(BaseInvocationOutput):
|
|
"""Base class for invocations that output an image"""
|
|
|
|
width: int = OutputField(description="The ideal width of the image (in pixels)")
|
|
height: int = OutputField(description="The ideal height of the image (in pixels)")
|
|
|
|
|
|
@invocation(
|
|
"ideal_size",
|
|
title="Ideal Size - SD1.5, SDXL",
|
|
tags=["latents", "math", "ideal_size"],
|
|
category="latents",
|
|
version="1.0.6",
|
|
)
|
|
class IdealSizeInvocation(BaseInvocation):
|
|
"""Calculates the ideal size for generation to avoid duplication"""
|
|
|
|
width: int = InputField(default=1024, description="Final image width")
|
|
height: int = InputField(default=576, description="Final image height")
|
|
unet: UNetField = InputField(description=FieldDescriptions.unet)
|
|
multiplier: float = InputField(
|
|
default=1.0,
|
|
description="Amount to multiply the model's dimensions by when calculating the ideal size (may result in "
|
|
"initial generation artifacts if too large)",
|
|
)
|
|
|
|
def trim_to_multiple_of(self, *args: int, multiple_of: int = LATENT_SCALE_FACTOR) -> Tuple[int, ...]:
|
|
return tuple((x - x % multiple_of) for x in args)
|
|
|
|
def invoke(self, context: InvocationContext) -> IdealSizeOutput:
|
|
unet_config = context.models.get_config(self.unet.unet.key)
|
|
aspect = self.width / self.height
|
|
|
|
if unet_config.base == BaseModelType.StableDiffusion1:
|
|
dimension = 512
|
|
elif unet_config.base == BaseModelType.StableDiffusion2:
|
|
dimension = 768
|
|
elif unet_config.base in (
|
|
BaseModelType.StableDiffusionXL,
|
|
BaseModelType.Flux,
|
|
BaseModelType.Flux2,
|
|
BaseModelType.StableDiffusion3,
|
|
):
|
|
dimension = 1024
|
|
else:
|
|
raise ValueError(f"Unsupported model type: {unet_config.base}")
|
|
|
|
dimension = dimension * self.multiplier
|
|
min_dimension = math.floor(dimension * 0.5)
|
|
model_area = dimension * dimension # hardcoded for now since all models are trained on square images
|
|
|
|
if aspect > 1.0:
|
|
init_height = max(min_dimension, math.sqrt(model_area / aspect))
|
|
init_width = init_height * aspect
|
|
else:
|
|
init_width = max(min_dimension, math.sqrt(model_area * aspect))
|
|
init_height = init_width / aspect
|
|
|
|
scaled_width, scaled_height = self.trim_to_multiple_of(
|
|
math.floor(init_width),
|
|
math.floor(init_height),
|
|
)
|
|
|
|
return IdealSizeOutput(width=scaled_width, height=scaled_height)
|