Compare commits

...

35 Commits

Author SHA1 Message Date
psychedelicious
ccc55069d1 chore: bump version to v6.3.0 2025-08-05 10:30:26 +10:00
psychedelicious
61ff9ee3a7 feat(ui): add button to ref image to recall size & optimize for model
This is useful for FLUX Kontext, where you typically want the generation
size to at least roughly match the first ref image size.
2025-08-05 10:28:44 +10:00
psychedelicious
111408c046 feat(mm): add flux krea to starter models 2025-08-05 10:25:14 +10:00
psychedelicious
d7619d465e feat(mm): change anime upscaling model to one that doesn't trigger picklescan 2025-08-05 10:25:14 +10:00
Kent Keirsey
8ad4f6e56d updates & fix 2025-08-05 10:10:52 +10:00
Cursor Agent
bf4899526f Add 'shift+s' hotkey for fitting bbox to canvas
Co-authored-by: kent <kent@invoke.ai>
2025-08-05 10:10:52 +10:00
psychedelicious
6435d265c6 fix(ui): overflow w/ long board names 2025-08-05 10:06:55 +10:00
Linos
3163ef454d translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2065 of 2065 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-08-05 10:04:20 +10:00
Riccardo Giovanetti
7ea636df70 translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2037 of 2065 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.6% (2037 of 2065 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.5% (2036 of 2065 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.6% (2014 of 2042 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-08-05 10:04:20 +10:00
Hosted Weblate
1869824803 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

translationBot(ui): update translation files

Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-08-05 10:04:20 +10:00
psychedelicious
66fc8af8a6 fix(ui): reset session button actions
- Do not reset dimensions when resetting generation settings (they are
model-dependent, and we don't change model-dependent settings w/ that
butotn)
- Do not reset bbox when resetting canvas layers
- Show reset canvas layers button only on canvas tab
- Show reset generation settings button only on canvas or generate tab
2025-08-05 10:01:22 +10:00
psychedelicious
48cb6b12f0 fix(ui): add style ref launchpad using wrong dnd config
I don't think this actually caused problems bc the two DND targets were
very similar, but it was wrong.
2025-08-05 09:57:11 +10:00
psychedelicious
68e30a9864 feat(ui): prevent creating new canvases while staging
Disable these items while staging:
- New Canvas From Image context menu
- Edit image hook & launchpad button
- Generate from Text launchpad button (only while on canvas tab)
- Use a Layout Image launchpad button
2025-08-05 09:57:11 +10:00
psychedelicious
f65dc2c081 chore(ui): typegen 2025-08-05 09:54:00 +10:00
psychedelicious
0cd77443a7 feat(app): add setting to disable picklescan
When unsafe_disable_picklescan is enabled, instead of erroring on
detections or scan failures, a warning is logged.

A warning is also logged on app startup when this setting is enabled.

The setting is disabled by default and there is no change in behaviour
when disabled.
2025-08-05 09:54:00 +10:00
Mary Hipp
185ed86424 fix graph building 2025-08-04 12:32:27 -04:00
Mary Hipp
fed817ab83 add image concatenation to flux kontext graph if more than one refernece image 2025-08-04 11:27:02 -04:00
Mary Hipp
e0b45db69a remove check in readiness for multiple reg images 2025-08-04 11:27:02 -04:00
psychedelicious
2beac1fb04 chore: bump version to v6.3.0rc2 2025-08-04 23:55:04 +10:00
psychedelicious
e522de33f8 refactor(nodes): roll back latent-space resizing of kontext images 2025-08-04 23:03:12 +10:00
psychedelicious
d591b50c25 feat(ui): use image-space concatenation in FLUX graphs 2025-08-04 23:03:12 +10:00
psychedelicious
b365aad6d8 chore(ui): typegen 2025-08-04 23:03:12 +10:00
psychedelicious
65ad392361 feat(nodes): add node to prep images for FLUX Kontext 2025-08-04 23:03:12 +10:00
psychedelicious
56d75e1c77 feat(backend): use VAE mean encoding for Kontext reference images
Use distribution mean without sampling noise for more stable and
consistent reference image encoding, matching ComfyUI implementation
2025-08-04 23:03:12 +10:00
psychedelicious
df77a12efe refactor(backend): use torchvision transforms for Kontext image preprocessing
Replace numpy-based normalization with torchvision transforms for
consistency with other image processing in the codebase
2025-08-04 23:03:12 +10:00
psychedelicious
faf662d12e refactor(backend): use BICUBIC resampling for Kontext images
Switch from LANCZOS to BICUBIC for smoother image resizing to reduce
artifacts in reference image processing
2025-08-04 23:03:12 +10:00
psychedelicious
44a7dfd486 fix(backend): use consistent idx_offset=1 for all Kontext images
Changes from per-image index offsets to a consistent value of 1 for
all reference images, matching the ComfyUI implementation
2025-08-04 23:03:12 +10:00
psychedelicious
bb15e5cf06 feat(backend): add spatial tiling for multiple Kontext reference images
Implements intelligent spatial tiling that arranges multiple reference
images in a virtual canvas, choosing between horizontal and vertical
placement to maintain a square-like aspect ratio
2025-08-04 23:03:12 +10:00
psychedelicious
1a1c846be3 feat(backend): include reference images in negative CFG pass for Kontext
Maintains consistency between positive and negative passes to prevent
CFG artifacts when using Kontext reference images
2025-08-04 23:03:12 +10:00
psychedelicious
93c896a370 fix(backend): use img_cond_seq to check for Kontext slicing
Was incorrectly checking img_input_ids instead of img_cond_seq
2025-08-04 23:03:12 +10:00
psychedelicious
053d7c8c8e feat(ui): support disabling roarr output styling via localstorage 2025-07-31 23:02:45 +10:00
psychedelicious
5296263954 feat(ui): add missing translations 2025-07-31 22:51:33 +10:00
psychedelicious
a36b70c01c fix(ui): add image name data attr to gallery placeholder image elements
This fixes an issue where gallery's auto-scroll-into-view for selected
images didn't work, and users instead saw a "Unable to find image..."
debug log message in JS console.
2025-07-31 22:48:42 +10:00
psychedelicious
854a2a5a7a chore: bump version to v6.3.0rc1 2025-07-31 14:17:18 +10:00
psychedelicious
f9c64b0609 chore(ui): update whats new 2025-07-31 14:17:18 +10:00
53 changed files with 758 additions and 210 deletions

View File

@@ -157,6 +157,12 @@ def overridden_redoc() -> HTMLResponse:
web_root_path = Path(list(web_dir.__path__)[0])
if app_config.unsafe_disable_picklescan:
logger.warning(
"The unsafe_disable_picklescan option is enabled. This disables malware scanning while installing and"
"loading models, which may allow malicious code to be executed. Use at your own risk."
)
try:
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
except RuntimeError:

View File

@@ -1347,3 +1347,96 @@ class PasteImageIntoBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoar
image_dto = context.images.save(image=target_image)
return ImageOutput.build(image_dto)
@invocation(
"flux_kontext_image_prep",
title="FLUX Kontext Image Prep",
tags=["image", "concatenate", "flux", "kontext"],
category="image",
version="1.0.0",
)
class FluxKontextConcatenateImagesInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Prepares an image or images for use with FLUX Kontext. The first/single image is resized to the nearest
preferred Kontext resolution. All other images are concatenated horizontally, maintaining their aspect ratio."""
images: list[ImageField] = InputField(
description="The images to concatenate",
min_length=1,
max_length=10,
)
use_preferred_resolution: bool = InputField(
default=True, description="Use FLUX preferred resolutions for the first image"
)
def invoke(self, context: InvocationContext) -> ImageOutput:
from invokeai.backend.flux.util import PREFERED_KONTEXT_RESOLUTIONS
# Step 1: Load all images
pil_images = []
for image_field in self.images:
image = context.images.get_pil(image_field.image_name, mode="RGBA")
pil_images.append(image)
# Step 2: Determine target resolution for the first image
first_image = pil_images[0]
width, height = first_image.size
if self.use_preferred_resolution:
aspect_ratio = width / height
# Find the closest preferred resolution for the first image
_, target_width, target_height = min(
((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS), key=lambda x: x[0]
)
# Apply BFL's scaling formula
scaled_height = 2 * int(target_height / 16)
final_height = 8 * scaled_height # This will be consistent for all images
scaled_width = 2 * int(target_width / 16)
first_width = 8 * scaled_width
else:
# Use original dimensions of first image, ensuring divisibility by 16
final_height = 16 * (height // 16)
first_width = 16 * (width // 16)
# Ensure minimum dimensions
if final_height < 16:
final_height = 16
if first_width < 16:
first_width = 16
# Step 3: Process and resize all images with consistent height
processed_images = []
total_width = 0
for i, image in enumerate(pil_images):
if i == 0:
# First image uses the calculated dimensions
final_width = first_width
else:
# Subsequent images maintain aspect ratio with the same height
img_aspect_ratio = image.width / image.height
# Calculate width that maintains aspect ratio at the target height
calculated_width = int(final_height * img_aspect_ratio)
# Ensure width is divisible by 16 for proper VAE encoding
final_width = 16 * (calculated_width // 16)
# Ensure minimum width
if final_width < 16:
final_width = 16
# Resize image to calculated dimensions
resized_image = image.resize((final_width, final_height), Image.Resampling.LANCZOS)
processed_images.append(resized_image)
total_width += final_width
# Step 4: Concatenate images horizontally
concatenated_image = Image.new("RGB", (total_width, final_height))
x_offset = 0
for img in processed_images:
concatenated_image.paste(img, (x_offset, 0))
x_offset += img.width
# Save the concatenated image
image_dto = context.images.save(image=concatenated_image)
return ImageOutput.build(image_dto)

View File

@@ -107,6 +107,7 @@ class InvokeAIAppConfig(BaseSettings):
hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.<br>Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`
remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.
unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.
"""
_root: Optional[Path] = PrivateAttr(default=None)
@@ -196,6 +197,7 @@ class InvokeAIAppConfig(BaseSettings):
hashing_algorithm: HASHING_ALGORITHMS = Field(default="blake3_single", description="Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.")
remote_api_tokens: Optional[list[URLRegexTokenPair]] = Field(default=None, description="List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.")
scan_models_on_startup: bool = Field(default=False, description="Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.")
unsafe_disable_picklescan: bool = Field(default=False, description="UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.")
# fmt: on

View File

@@ -87,9 +87,21 @@ class ModelLoadService(ModelLoadServiceBase):
def torch_load_file(checkpoint: Path) -> AnyModel:
scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0:
raise Exception(f"The model at {checkpoint} is potentially infected by malware. Aborting load.")
if self._app_config.unsafe_disable_picklescan:
self._logger.warning(
f"Model at {checkpoint} is potentially infected by malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise Exception(f"The model at {checkpoint} is potentially infected by malware. Aborting load.")
if scan_result.scan_err:
raise Exception(f"Error scanning model at {checkpoint} for malware. Aborting load.")
if self._app_config.unsafe_disable_picklescan:
self._logger.warning(
f"Error scanning model at {checkpoint} for malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise Exception(f"Error scanning model at {checkpoint} for malware. Aborting load.")
result = torch_load(checkpoint, map_location="cpu")
return result

View File

@@ -112,7 +112,7 @@ def denoise(
)
# Slice prediction to only include the main image tokens
if img_input_ids is not None:
if img_cond_seq is not None:
pred = pred[:, :original_seq_len]
step_cfg_scale = cfg_scale[step_index]
@@ -125,9 +125,26 @@ def denoise(
if neg_regional_prompting_extension is None:
raise ValueError("Negative text conditioning is required when cfg_scale is not 1.0.")
# For negative prediction with Kontext, we need to include the reference images
# to maintain consistency between positive and negative passes. Without this,
# CFG would create artifacts as the attention mechanism would see different
# spatial structures in each pass
neg_img_input = img
neg_img_input_ids = img_ids
# Add channel-wise conditioning for negative pass if present
if img_cond is not None:
neg_img_input = torch.cat((neg_img_input, img_cond), dim=-1)
# Add sequence-wise conditioning (Kontext) for negative pass
# This ensures reference images are processed consistently
if img_cond_seq is not None:
neg_img_input = torch.cat((neg_img_input, img_cond_seq), dim=1)
neg_img_input_ids = torch.cat((neg_img_input_ids, img_cond_seq_ids), dim=1)
neg_pred = model(
img=img,
img_ids=img_ids,
img=neg_img_input,
img_ids=neg_img_input_ids,
txt=neg_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
txt_ids=neg_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
y=neg_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
@@ -140,6 +157,10 @@ def denoise(
ip_adapter_extensions=neg_ip_adapter_extensions,
regional_prompting_extension=neg_regional_prompting_extension,
)
# Slice negative prediction to match main image tokens
if img_cond_seq is not None:
neg_pred = neg_pred[:, :original_seq_len]
pred = neg_pred + step_cfg_scale * (pred - neg_pred)
preview_img = img - t_curr * pred

View File

@@ -1,15 +1,14 @@
import einops
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms as T
from einops import repeat
from PIL import Image
from invokeai.app.invocations.fields import FluxKontextConditioningField
from invokeai.app.invocations.flux_vae_encode import FluxVaeEncodeInvocation
from invokeai.app.invocations.model import VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.flux.sampling_utils import pack
from invokeai.backend.flux.util import PREFERED_KONTEXT_RESOLUTIONS
from invokeai.backend.util.devices import TorchDevice
def generate_img_ids_with_offset(
@@ -19,8 +18,10 @@ def generate_img_ids_with_offset(
device: torch.device,
dtype: torch.dtype,
idx_offset: int = 0,
h_offset: int = 0,
w_offset: int = 0,
) -> torch.Tensor:
"""Generate tensor of image position ids with an optional offset.
"""Generate tensor of image position ids with optional index and spatial offsets.
Args:
latent_height (int): Height of image in latent space (after packing, this becomes h//2).
@@ -28,7 +29,9 @@ def generate_img_ids_with_offset(
batch_size (int): Number of images in the batch.
device (torch.device): Device to create tensors on.
dtype (torch.dtype): Data type for the tensors.
idx_offset (int): Offset to add to the first dimension of the image ids.
idx_offset (int): Offset to add to the first dimension of the image ids (default: 0).
h_offset (int): Spatial offset for height/y-coordinates in latent space (default: 0).
w_offset (int): Spatial offset for width/x-coordinates in latent space (default: 0).
Returns:
torch.Tensor: Image position ids with shape [batch_size, (latent_height//2 * latent_width//2), 3].
@@ -42,6 +45,10 @@ def generate_img_ids_with_offset(
packed_height = latent_height // 2
packed_width = latent_width // 2
# Convert spatial offsets from latent space to packed space
packed_h_offset = h_offset // 2
packed_w_offset = w_offset // 2
# Create base tensor for position IDs with shape [packed_height, packed_width, 3]
# The 3 channels represent: [batch_offset, y_position, x_position]
img_ids = torch.zeros(packed_height, packed_width, 3, device=device, dtype=dtype)
@@ -49,13 +56,13 @@ def generate_img_ids_with_offset(
# Set the batch offset for all positions
img_ids[..., 0] = idx_offset
# Create y-coordinate indices (vertical positions)
y_indices = torch.arange(packed_height, device=device, dtype=dtype)
# Create y-coordinate indices (vertical positions) with spatial offset
y_indices = torch.arange(packed_height, device=device, dtype=dtype) + packed_h_offset
# Broadcast y_indices to match the spatial dimensions [packed_height, 1]
img_ids[..., 1] = y_indices[:, None]
# Create x-coordinate indices (horizontal positions)
x_indices = torch.arange(packed_width, device=device, dtype=dtype)
# Create x-coordinate indices (horizontal positions) with spatial offset
x_indices = torch.arange(packed_width, device=device, dtype=dtype) + packed_w_offset
# Broadcast x_indices to match the spatial dimensions [1, packed_width]
img_ids[..., 2] = x_indices[None, :]
@@ -93,60 +100,93 @@ class KontextExtension:
self.kontext_latents, self.kontext_ids = self._prepare_kontext()
def _prepare_kontext(self) -> tuple[torch.Tensor, torch.Tensor]:
"""Encodes the reference images and prepares their concatenated latents and IDs."""
"""Encodes the reference images and prepares their concatenated latents and IDs with spatial tiling."""
all_latents = []
all_ids = []
# Track cumulative dimensions for spatial tiling
# These track the running extent of the virtual canvas in latent space
h = 0 # Running height extent
w = 0 # Running width extent
vae_info = self._context.models.load(self._vae_field.vae)
for idx, kontext_field in enumerate(self.kontext_conditioning):
image = self._context.images.get_pil(kontext_field.image.image_name)
# Calculate aspect ratio of input image
width, height = image.size
aspect_ratio = width / height
# Find the closest preferred resolution by aspect ratio
_, target_width, target_height = min(
((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS), key=lambda x: x[0]
)
# Apply BFL's scaling formula
# This ensures compatibility with the model's training
scaled_width = 2 * int(target_width / 16)
scaled_height = 2 * int(target_height / 16)
# Resize to the exact resolution used during training
# Convert to RGB
image = image.convert("RGB")
final_width = 8 * scaled_width
final_height = 8 * scaled_height
image = image.resize((final_width, final_height), Image.Resampling.LANCZOS)
# Convert to tensor with same normalization as BFL
image_np = np.array(image)
image_tensor = torch.from_numpy(image_np).float() / 127.5 - 1.0
image_tensor = einops.rearrange(image_tensor, "h w c -> 1 c h w")
# Convert to tensor using torchvision transforms for consistency
transformation = T.Compose(
[
T.ToTensor(), # Converts PIL image to tensor and scales to [0, 1]
]
)
image_tensor = transformation(image)
# Convert from [0, 1] to [-1, 1] range expected by VAE
image_tensor = image_tensor * 2.0 - 1.0
image_tensor = image_tensor.unsqueeze(0) # Add batch dimension
image_tensor = image_tensor.to(self._device)
# Continue with VAE encoding
kontext_latents_unpacked = FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
# Don't sample from the distribution for reference images - use the mean (matching ComfyUI)
with vae_info as vae:
assert isinstance(vae, AutoEncoder)
vae_dtype = next(iter(vae.parameters())).dtype
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
# Use sample=False to get the distribution mean without noise
kontext_latents_unpacked = vae.encode(image_tensor, sample=False)
# Extract tensor dimensions
batch_size, _, latent_height, latent_width = kontext_latents_unpacked.shape
# Pad latents to be compatible with patch_size=2
# This ensures dimensions are even for the pack() function
pad_h = (2 - latent_height % 2) % 2
pad_w = (2 - latent_width % 2) % 2
if pad_h > 0 or pad_w > 0:
kontext_latents_unpacked = F.pad(kontext_latents_unpacked, (0, pad_w, 0, pad_h), mode="circular")
# Update dimensions after padding
_, _, latent_height, latent_width = kontext_latents_unpacked.shape
# Pack the latents
kontext_latents_packed = pack(kontext_latents_unpacked).to(self._device, self._dtype)
# Generate IDs with offset based on image index
# Determine spatial offsets for this reference image
# - Compare the potential new canvas dimensions if we add the image vertically vs horizontally
# - Choose the placement that results in a more square-like canvas
h_offset = 0
w_offset = 0
if idx > 0: # First image starts at (0, 0)
# Check which placement would result in better canvas dimensions
# If adding to height would make the canvas taller than wide, tile horizontally
# Otherwise, tile vertically
if latent_height + h > latent_width + w:
# Tile horizontally (to the right of existing images)
w_offset = w
else:
# Tile vertically (below existing images)
h_offset = h
# Generate IDs with both index offset and spatial offsets
kontext_ids = generate_img_ids_with_offset(
latent_height=latent_height,
latent_width=latent_width,
batch_size=batch_size,
device=self._device,
dtype=self._dtype,
idx_offset=idx + 1, # Each image gets a unique offset
idx_offset=1, # All reference images use index=1 (matching ComfyUI implementation)
h_offset=h_offset,
w_offset=w_offset,
)
# Update cumulative dimensions
# Track the maximum extent of the virtual canvas after placing this image
h = max(h, latent_height + h_offset)
w = max(w, latent_width + w_offset)
all_latents.append(kontext_latents_packed)
all_ids.append(kontext_ids)

View File

@@ -9,6 +9,7 @@ import spandrel
import torch
import invokeai.backend.util.logging as logger
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.misc import uuid_string
from invokeai.backend.flux.controlnet.state_dict_utils import (
is_state_dict_instantx_controlnet,
@@ -493,9 +494,21 @@ class ModelProbe(object):
# scan model
scan_result = pscan.scan_file_path(checkpoint)
if scan_result.infected_files != 0:
raise Exception(f"The model {model_name} is potentially infected by malware. Aborting import.")
if get_config().unsafe_disable_picklescan:
logger.warning(
f"The model {model_name} is potentially infected by malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(f"The model {model_name} is potentially infected by malware. Aborting import.")
if scan_result.scan_err:
raise Exception(f"Error scanning model {model_name} for malware. Aborting import.")
if get_config().unsafe_disable_picklescan:
logger.warning(
f"Error scanning the model at {model_name} for malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(f"Error scanning the model at {model_name} for malware. Aborting import.")
# Probing utilities

View File

@@ -6,13 +6,17 @@ import torch
from picklescan.scanner import scan_file_path
from safetensors import safe_open
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.backend.util.silence_warnings import SilenceWarnings
StateDict: TypeAlias = dict[str | int, Any] # When are the keys int?
logger = InvokeAILogger.get_logger()
class ModelOnDisk:
"""A utility class representing a model stored on disk."""
@@ -79,8 +83,24 @@ class ModelOnDisk:
with SilenceWarnings():
if path.suffix.endswith((".ckpt", ".pt", ".pth", ".bin")):
scan_result = scan_file_path(path)
if scan_result.infected_files != 0 or scan_result.scan_err:
raise RuntimeError(f"The model {path.stem} is potentially infected by malware. Aborting import.")
if scan_result.infected_files != 0:
if get_config().unsafe_disable_picklescan:
logger.warning(
f"The model {path.stem} is potentially infected by malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(
f"The model {path.stem} is potentially infected by malware. Aborting import."
)
if scan_result.scan_err:
if get_config().unsafe_disable_picklescan:
logger.warning(
f"Error scanning the model at {path.stem} for malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(f"Error scanning the model at {path.stem} for malware. Aborting import.")
checkpoint = torch.load(path, map_location="cpu")
assert isinstance(checkpoint, dict)
elif path.suffix.endswith(".gguf"):

View File

@@ -149,13 +149,29 @@ flux_kontext = StarterModel(
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
)
flux_kontext_quantized = StarterModel(
name="FLUX.1 Kontext dev (Quantized)",
name="FLUX.1 Kontext dev (quantized)",
base=BaseModelType.Flux,
source="https://huggingface.co/unsloth/FLUX.1-Kontext-dev-GGUF/resolve/main/flux1-kontext-dev-Q4_K_M.gguf",
description="FLUX.1 Kontext dev quantized (q4_k_m). Total size with dependencies: ~14GB",
type=ModelType.Main,
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
)
flux_krea = StarterModel(
name="FLUX.1 Krea dev",
base=BaseModelType.Flux,
source="https://huggingface.co/InvokeAI/FLUX.1-Krea-dev/resolve/main/flux1-krea-dev.safetensors",
description="FLUX.1 Krea dev. Total size with dependencies: ~33GB",
type=ModelType.Main,
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
)
flux_krea_quantized = StarterModel(
name="FLUX.1 Krea dev (quantized)",
base=BaseModelType.Flux,
source="https://huggingface.co/InvokeAI/FLUX.1-Krea-dev-GGUF/resolve/main/flux1-krea-dev-Q4_K_M.gguf",
description="FLUX.1 Krea dev quantized (q4_k_m). Total size with dependencies: ~14GB",
type=ModelType.Main,
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
)
sd35_medium = StarterModel(
name="SD3.5 Medium",
base=BaseModelType.StableDiffusion3,
@@ -580,13 +596,14 @@ t2i_sketch_sdxl = StarterModel(
)
# endregion
# region SpandrelImageToImage
realesrgan_anime = StarterModel(
name="RealESRGAN_x4plus_anime_6B",
animesharp_v4_rcan = StarterModel(
name="2x-AnimeSharpV4_RCAN",
base=BaseModelType.Any,
source="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
description="A Real-ESRGAN 4x upscaling model (optimized for anime images).",
source="https://github.com/Kim2091/Kim2091-Models/releases/download/2x-AnimeSharpV4/2x-AnimeSharpV4_RCAN.safetensors",
description="A 2x upscaling model (optimized for anime images).",
type=ModelType.SpandrelImageToImage,
)
realesrgan_x4 = StarterModel(
name="RealESRGAN_x4plus",
base=BaseModelType.Any,
@@ -732,7 +749,7 @@ STARTER_MODELS: list[StarterModel] = [
t2i_lineart_sdxl,
t2i_sketch_sdxl,
realesrgan_x4,
realesrgan_anime,
animesharp_v4_rcan,
realesrgan_x2,
swinir,
t5_base_encoder,
@@ -743,6 +760,8 @@ STARTER_MODELS: list[StarterModel] = [
llava_onevision,
flux_fill,
cogview4,
flux_krea,
flux_krea_quantized,
]
sd1_bundle: list[StarterModel] = [
@@ -794,6 +813,7 @@ flux_bundle: list[StarterModel] = [
flux_redux,
flux_fill,
flux_kontext_quantized,
flux_krea_quantized,
]
STARTER_BUNDLES: dict[str, StarterModelBundle] = {

View File

@@ -8,8 +8,12 @@ import picklescan.scanner as pscan
import safetensors
import torch
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.model_manager.taxonomy import ClipVariantType
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger()
def _fast_safetensors_reader(path: str) -> Dict[str, torch.Tensor]:
@@ -59,9 +63,21 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str,
if scan:
scan_result = pscan.scan_file_path(path)
if scan_result.infected_files != 0:
raise Exception(f"The model at {path} is potentially infected by malware. Aborting import.")
if get_config().unsafe_disable_picklescan:
logger.warning(
f"The model {path} is potentially infected by malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(f"The model {path} is potentially infected by malware. Aborting import.")
if scan_result.scan_err:
raise Exception(f"Error scanning model at {path} for malware. Aborting import.")
if get_config().unsafe_disable_picklescan:
logger.warning(
f"Error scanning the model at {path} for malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(f"Error scanning the model at {path} for malware. Aborting import.")
checkpoint = torch.load(path, map_location=torch.device("meta"))
return checkpoint

View File

@@ -1470,7 +1470,6 @@
"ui": {
"tabs": {
"queue": "Warteschlange",
"generation": "Erzeugung",
"gallery": "Galerie",
"models": "Modelle",
"upscaling": "Hochskalierung",

View File

@@ -610,6 +610,10 @@
"title": "Toggle Non-Raster Layers",
"desc": "Show or hide all non-raster layer categories (Control Layers, Inpaint Masks, Regional Guidance)."
},
"fitBboxToLayers": {
"title": "Fit Bbox To Layers",
"desc": "Automatically adjust the generation bounding box to fit visible layers"
},
"fitBboxToMasks": {
"title": "Fit Bbox To Masks",
"desc": "Automatically adjust the generation bounding box to fit visible inpaint masks"
@@ -2066,6 +2070,8 @@
"asControlLayer": "As $t(controlLayers.controlLayer)",
"asControlLayerResize": "As $t(controlLayers.controlLayer) (Resize)",
"referenceImage": "Reference Image",
"maxRefImages": "Max Ref Images",
"useAsReferenceImage": "Use as Reference Image",
"regionalReferenceImage": "Regional Reference Image",
"globalReferenceImage": "Global Reference Image",
"sendingToCanvas": "Staging Generations on Canvas",
@@ -2533,7 +2539,7 @@
},
"ui": {
"tabs": {
"generation": "Generation",
"generate": "Generate",
"canvas": "Canvas",
"workflows": "Workflows",
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
@@ -2544,6 +2550,12 @@
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
"gallery": "Gallery"
},
"panels": {
"launchpad": "Launchpad",
"workflowEditor": "Workflow Editor",
"imageViewer": "Image Viewer",
"canvas": "Canvas"
},
"launchpad": {
"workflowsTitle": "Go deep with Workflows.",
"upscalingTitle": "Upscale and add detail.",
@@ -2551,6 +2563,28 @@
"generateTitle": "Generate images from text prompts.",
"modelGuideText": "Want to learn what prompts work best for each model?",
"modelGuideLink": "Check out our Model Guide.",
"createNewWorkflowFromScratch": "Create a new Workflow from scratch",
"browseAndLoadWorkflows": "Browse and load existing workflows",
"addStyleRef": {
"title": "Add a Style Reference",
"description": "Add an image to transfer its look."
},
"editImage": {
"title": "Edit Image",
"description": "Add an image to refine."
},
"generateFromText": {
"title": "Generate from Text",
"description": "Enter a prompt and Invoke."
},
"useALayoutImage": {
"title": "Use a Layout Image",
"description": "Add an image to control composition."
},
"generate": {
"canvasCalloutTitle": "Looking to get more control, edit, and iterate on your images?",
"canvasCalloutLink": "Navigate to Canvas for more capabilities."
},
"workflows": {
"description": "Workflows are reusable templates that automate image generation tasks, allowing you to quickly perform complex operations and get consistent results.",
"learnMoreLink": "Learn more about creating workflows",
@@ -2587,6 +2621,13 @@
"upscaleModel": "Upscale Model",
"model": "Model",
"scale": "Scale",
"creativityAndStructure": {
"title": "Creativity & Structure Defaults",
"conservative": "Conservative",
"balanced": "Balanced",
"creative": "Creative",
"artistic": "Artistic"
},
"helpText": {
"promptAdvice": "When upscaling, use a prompt that describes the medium and style. Avoid describing specific content details in the image.",
"styleAdvice": "Upscaling works best with the general style of your image."
@@ -2631,10 +2672,8 @@
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"items": [
"New setting to send all Canvas generations directly to the Gallery.",
"New Invert Mask (Shift+V) and Fit BBox to Mask (Shift+B) capabilities.",
"Expanded support for Model Thumbnails and configurations.",
"Various other quality of life updates and fixes"
"Studio state is saved to the server, allowing you to continue your work on any device.",
"Support for multiple reference images for FLUX Kontext (local model only)."
],
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",

View File

@@ -399,7 +399,6 @@
"ui": {
"tabs": {
"canvas": "Lienzo",
"generation": "Generación",
"queue": "Cola",
"workflows": "Flujos de trabajo",
"models": "Modelos",

View File

@@ -1820,7 +1820,6 @@
"upscaling": "Agrandissement",
"gallery": "Galerie",
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
"generation": "Génération",
"workflows": "Workflows",
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
"models": "Modèles",

View File

@@ -1173,8 +1173,8 @@
"layeringStrategy": "Strategia livelli",
"longestPath": "Percorso più lungo",
"layoutDirection": "Direzione schema",
"layoutDirectionRight": "Orizzontale",
"layoutDirectionDown": "Verticale",
"layoutDirectionRight": "A destra",
"layoutDirectionDown": "In basso",
"alignment": "Allineamento nodi",
"alignmentUL": "In alto a sinistra",
"alignmentDL": "In basso a sinistra",
@@ -1728,7 +1728,7 @@
"structure": {
"heading": "Struttura",
"paragraphs": [
"La struttura determina quanto l'immagine finale rispecchierà il layout dell'originale. Un valore struttura basso permette cambiamenti significativi, mentre un valore struttura alto conserva la composizione e lo schema originali."
"La struttura determina quanto l'immagine finale rispecchierà lo schema dell'originale. Un valore struttura basso permette cambiamenti significativi, mentre un valore struttura alto conserva la composizione e lo schema originali."
]
},
"fluxDevLicense": {
@@ -2495,11 +2495,12 @@
"off": "Spento"
},
"invertMask": "Inverti maschera",
"fitBboxToMasks": "Adatta il riquadro di delimitazione alle maschere"
"fitBboxToMasks": "Adatta il riquadro di delimitazione alle maschere",
"maxRefImages": "Max Immagini di rif.to",
"useAsReferenceImage": "Usa come immagine di riferimento"
},
"ui": {
"tabs": {
"generation": "Generazione",
"canvas": "Tela",
"workflows": "Flussi di lavoro",
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
@@ -2508,7 +2509,8 @@
"queue": "Coda",
"upscaling": "Amplia",
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
"gallery": "Galleria"
"gallery": "Galleria",
"generate": "Genera"
},
"launchpad": {
"workflowsTitle": "Approfondisci i flussi di lavoro.",
@@ -2556,8 +2558,43 @@
"helpText": {
"promptAdvice": "Durante l'ampliamento, utilizza un prompt che descriva il mezzo e lo stile. Evita di descrivere dettagli specifici del contenuto dell'immagine.",
"styleAdvice": "L'ampliamento funziona meglio con lo stile generale dell'immagine."
},
"creativityAndStructure": {
"title": "Creatività e struttura predefinite",
"conservative": "Conservativo",
"balanced": "Bilanciato",
"creative": "Creativo",
"artistic": "Artistico"
}
},
"createNewWorkflowFromScratch": "Crea un nuovo flusso di lavoro da zero",
"browseAndLoadWorkflows": "Sfoglia e carica i flussi di lavoro esistenti",
"addStyleRef": {
"title": "Aggiungi un riferimento di stile",
"description": "Aggiungi un'immagine per trasferirne l'aspetto."
},
"editImage": {
"title": "Modifica immagine",
"description": "Aggiungi un'immagine da perfezionare."
},
"generateFromText": {
"title": "Genera da testo",
"description": "Inserisci un prompt e genera."
},
"useALayoutImage": {
"description": "Aggiungi un'immagine per controllare la composizione.",
"title": "Usa una immagine guida"
},
"generate": {
"canvasCalloutTitle": "Vuoi avere più controllo, modificare e affinare le tue immagini?",
"canvasCalloutLink": "Per ulteriori funzionalità, vai su Tela."
}
},
"panels": {
"launchpad": "Rampa di lancio",
"workflowEditor": "Editor del flusso di lavoro",
"imageViewer": "Visualizzatore immagini",
"canvas": "Tela"
}
},
"upscaling": {
@@ -2648,10 +2685,8 @@
"watchRecentReleaseVideos": "Guarda i video su questa versione",
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
"items": [
"Nuova impostazione per inviare tutte le generazioni della Tela direttamente alla Galleria.",
"Nuove funzionalità Inverti maschera (Maiusc+V) e Adatta il Riquadro di delimitazione alla maschera (Maiusc+B).",
"Supporto esteso per miniature e configurazioni dei modelli.",
"Vari altri aggiornamenti e correzioni per la qualità della vita"
"Lo stato dello studio viene salvato sul server, consentendoti di continuare a lavorare su qualsiasi dispositivo.",
"Supporto per più immagini di riferimento per FLUX Kontext (solo modello locale)."
]
},
"system": {

View File

@@ -1783,7 +1783,6 @@
"workflows": "ワークフロー",
"models": "モデル",
"gallery": "ギャラリー",
"generation": "生成",
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
"upscaling": "アップスケーリング",

View File

@@ -1931,7 +1931,6 @@
},
"ui": {
"tabs": {
"generation": "Генерация",
"canvas": "Холст",
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
"models": "Модели",

View File

@@ -2238,7 +2238,9 @@
"switchOnFinish": "Khi Kết Thúc"
},
"fitBboxToMasks": "Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ",
"invertMask": "Đảo Ngược Lớp Phủ"
"invertMask": "Đảo Ngược Lớp Phủ",
"maxRefImages": "Ảnh Mẫu Tối Đa",
"useAsReferenceImage": "Dùng Làm Ảnh Mẫu"
},
"stylePresets": {
"negativePrompt": "Lệnh Tiêu Cực",
@@ -2414,14 +2416,14 @@
"tabs": {
"gallery": "Thư Viện Ảnh",
"models": "Models",
"generation": "Generation (Máy Tạo Sinh)",
"upscaling": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)",
"canvas": "Canvas (Vùng Ảnh)",
"upscalingTab": "$t(common.tab) $t(ui.tabs.upscaling)",
"modelsTab": "$t(common.tab) $t(ui.tabs.models)",
"queue": "Queue (Hàng Đợi)",
"workflows": "Workflow (Luồng Làm Việc)",
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)"
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)",
"generate": "Tạo Sinh"
},
"launchpad": {
"workflowsTitle": "Đi sâu hơn với Workflow.",
@@ -2469,8 +2471,43 @@
"promptAdvice": "Khi upscale, dùng lệnh để mô tả phương thức và phong cách. Tránh mô tả các chi tiết cụ thể trong ảnh.",
"styleAdvice": "Upscale thích hợp nhất cho phong cách chung của ảnh."
},
"scale": "Kích Thước"
"scale": "Kích Thước",
"creativityAndStructure": {
"title": "Độ Sáng Tạo & Cấu Trúc Mặc Định",
"conservative": "Bảo toàn",
"balanced": "Cân bằng",
"creative": "Sáng tạo",
"artistic": "Thẩm mỹ"
}
},
"createNewWorkflowFromScratch": "Tạo workflow mới từ đầu",
"browseAndLoadWorkflows": "Duyệt và tải workflow có sẵn",
"addStyleRef": {
"title": "Thêm Phong Cách Mẫu",
"description": "Thêm ảnh để chuyển đổi diện mạo của nó."
},
"editImage": {
"title": "Biên Tập Ảnh",
"description": "Thêm ảnh để chỉnh sửa."
},
"generateFromText": {
"title": "Tạo Sinh Từ Chữ",
"description": "Nhập lệnh vào và Kích Hoạt."
},
"useALayoutImage": {
"title": "Dùng Bố Cục Ảnh",
"description": "Thêm ảnh để điều khiển bố cục."
},
"generate": {
"canvasCalloutTitle": "Đang tìm cách để điều khiển, chỉnh sửa, và làm lại ảnh?",
"canvasCalloutLink": "Vào Canvas cho nhiều tính năng hơn."
}
},
"panels": {
"launchpad": "Launchpad",
"workflowEditor": "Trình Biên Tập Workflow",
"imageViewer": "Trình Xem Ảnh",
"canvas": "Canvas"
}
},
"workflows": {
@@ -2642,10 +2679,8 @@
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
"items": [
"Thiết lập mới để gửi các sản phẩm tạo sinh từ Canvas trực tiếp đến Thư Viện Ảnh.",
"Chức năng mới Đảo Ngược Lớp Phủ (Shift+V) và khả năng Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ (Shift+B).",
"Mở rộng hỗ trợ cho Ảnh Minh Hoạ và thiết lập model.",
"Nhiều bản cập nhật và sửa lỗi chất lượng"
"Trạng thái Studio được lưu vào server, giúp bạn tiếp tục công việc ở mọi thiết bị.",
"Hỗ trợ nhiều ảnh mẫu cho FLUX KONTEXT (chỉ cho model trên máy)."
]
},
"upsell": {

View File

@@ -1772,7 +1772,6 @@
},
"ui": {
"tabs": {
"generation": "生成",
"queue": "队列",
"canvas": "画布",
"upscaling": "放大中",

View File

@@ -93,5 +93,7 @@ export const configureLogging = (
localStorage.setItem('ROARR_FILTER', filter);
}
ROARR.write = createLogWriter();
const styleOutput = localStorage.getItem('ROARR_STYLE_OUTPUT') === 'false' ? false : true;
ROARR.write = createLogWriter({ styleOutput });
};

View File

@@ -1,9 +1,9 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { canvasReset } from 'features/controlLayers/store/actions';
import { inpaintMaskAdded } from 'features/controlLayers/store/canvasSlice';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { allEntitiesDeleted, inpaintMaskAdded } from 'features/controlLayers/store/canvasSlice';
import { $canvasManager } from 'features/controlLayers/store/ephemeral';
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowsCounterClockwiseBold } from 'react-icons/pi';
@@ -11,9 +11,10 @@ import { PiArrowsCounterClockwiseBold } from 'react-icons/pi';
export const SessionMenuItems = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const tab = useAppSelector(selectActiveTab);
const resetCanvasLayers = useCallback(() => {
dispatch(canvasReset());
dispatch(allEntitiesDeleted());
dispatch(inpaintMaskAdded({ isSelected: true, isBookmarked: true }));
$canvasManager.get()?.stage.fitBboxToStage();
}, [dispatch]);
@@ -22,12 +23,16 @@ export const SessionMenuItems = memo(() => {
}, [dispatch]);
return (
<>
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetCanvasLayers}>
{t('controlLayers.resetCanvasLayers')}
</MenuItem>
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetGenerationSettings}>
{t('controlLayers.resetGenerationSettings')}
</MenuItem>
{tab === 'canvas' && (
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetCanvasLayers}>
{t('controlLayers.resetCanvasLayers')}
</MenuItem>
)}
{(tab === 'canvas' || tab === 'generate') && (
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetGenerationSettings}>
{t('controlLayers.resetGenerationSettings')}
</MenuItem>
)}
</>
);
});

View File

@@ -1,15 +1,20 @@
import { Flex } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { skipToken } from '@reduxjs/toolkit/query';
import { useAppSelector, useAppStore } from 'app/store/storeHooks';
import { UploadImageIconButton } from 'common/hooks/useImageUploadButton';
import { bboxSizeOptimized, bboxSizeRecalled } from 'features/controlLayers/store/canvasSlice';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { sizeOptimized, sizeRecalled } from 'features/controlLayers/store/paramsSlice';
import type { ImageWithDims } from 'features/controlLayers/store/types';
import type { setGlobalReferenceImageDndTarget, setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
import { DndDropTarget } from 'features/dnd/DndDropTarget';
import { DndImage } from 'features/dnd/DndImage';
import { DndImageIcon } from 'features/dnd/DndImageIcon';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo, useCallback, useEffect } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowCounterClockwiseBold } from 'react-icons/pi';
import { PiArrowCounterClockwiseBold, PiRulerBold } from 'react-icons/pi';
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
import { $isConnected } from 'services/events/stores';
@@ -29,7 +34,10 @@ export const RefImageImage = memo(
dndTargetData,
}: Props<T>) => {
const { t } = useTranslation();
const store = useAppStore();
const isConnected = useStore($isConnected);
const tab = useAppSelector(selectActiveTab);
const isStaging = useCanvasIsStaging();
const { currentData: imageDTO, isError } = useGetImageDTOQuery(image?.image_name ?? skipToken);
const handleResetControlImage = useCallback(() => {
onChangeImage(null);
@@ -48,6 +56,20 @@ export const RefImageImage = memo(
[onChangeImage]
);
const recallSizeAndOptimize = useCallback(() => {
if (!imageDTO || (tab === 'canvas' && isStaging)) {
return;
}
const { width, height } = imageDTO;
if (tab === 'canvas') {
store.dispatch(bboxSizeRecalled({ width, height }));
store.dispatch(bboxSizeOptimized());
} else if (tab === 'generate') {
store.dispatch(sizeRecalled({ width, height }));
store.dispatch(sizeOptimized());
}
}, [imageDTO, isStaging, store, tab]);
return (
<Flex position="relative" w="full" h="full" alignItems="center" data-error={!imageDTO && !image?.image_name}>
{!imageDTO && (
@@ -69,6 +91,14 @@ export const RefImageImage = memo(
tooltip={t('common.reset')}
/>
</Flex>
<Flex position="absolute" flexDir="column" bottom={2} insetInlineEnd={2} gap={1}>
<DndImageIcon
onClick={recallSizeAndOptimize}
icon={<PiRulerBold size={16} />}
tooltip={t('parameters.useSize')}
isDisabled={!imageDTO || (tab === 'canvas' && isStaging)}
/>
</Flex>
</>
)}
<DndDropTarget dndTarget={dndTarget} dndTargetData={dndTargetData} label={t('gallery.drop')} />

View File

@@ -63,6 +63,7 @@ RefImageList.displayName = 'RefImageList';
const dndTargetData = addGlobalReferenceImageDndTarget.getData();
const MaxRefImages = memo(() => {
const { t } = useTranslation();
return (
<Button
position="relative"
@@ -75,7 +76,7 @@ const MaxRefImages = memo(() => {
borderRadius="base"
isDisabled
>
Max Ref Images
{t('controlLayers.maxRefImages')}
</Button>
);
});
@@ -83,6 +84,7 @@ MaxRefImages.displayName = 'MaxRefImages';
const AddRefImageDropTargetAndButton = memo(() => {
const { dispatch, getState } = useAppStore();
const { t } = useTranslation();
const tab = useAppSelector(selectActiveTab);
const uploadOptions = useMemo(
@@ -114,7 +116,7 @@ const AddRefImageDropTargetAndButton = memo(() => {
leftIcon={<PiUploadBold />}
{...uploadApi.getUploadButtonProps()}
>
Reference Image
{t('controlLayers.referenceImage')}
<input {...uploadApi.getUploadInputProps()} />
<DndDropTarget label="Drop" dndTarget={addGlobalReferenceImageDndTarget} dndTargetData={dndTargetData} />
</Button>

View File

@@ -1,6 +1,8 @@
import { IconButton } from '@invoke-ai/ui-library';
import { useIsRegionFocused } from 'common/hooks/focus';
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiResizeBold } from 'react-icons/pi';
@@ -9,9 +11,23 @@ export const CanvasToolbarFitBboxToLayersButton = memo(() => {
const { t } = useTranslation();
const canvasManager = useCanvasManager();
const isBusy = useCanvasIsBusy();
const isCanvasFocused = useIsRegionFocused('canvas');
const onClick = useCallback(() => {
canvasManager.tool.tools.bbox.fitToLayers();
}, [canvasManager.tool.tools.bbox]);
canvasManager.stage.fitLayersToStage();
}, [canvasManager.tool.tools.bbox, canvasManager.stage]);
useRegisteredHotkeys({
id: 'fitBboxToLayers',
category: 'canvas',
callback: () => {
canvasManager.tool.tools.bbox.fitToLayers();
canvasManager.stage.fitLayersToStage();
},
options: { enabled: isCanvasFocused && !isBusy, preventDefault: true },
dependencies: [isCanvasFocused, isBusy],
});
return (
<IconButton

View File

@@ -1091,6 +1091,15 @@ const slice = createSlice({
syncScaledSize(state);
},
bboxSizeRecalled: (state, action: PayloadAction<{ width: number; height: number }>) => {
const { width, height } = action.payload;
const gridSize = getGridSize(state.bbox.modelBase);
state.bbox.rect.width = Math.max(roundDownToMultiple(width, gridSize), 64);
state.bbox.rect.height = Math.max(roundDownToMultiple(height, gridSize), 64);
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
state.bbox.aspectRatio.id = 'Free';
state.bbox.aspectRatio.isLocked = true;
},
bboxAspectRatioLockToggled: (state) => {
state.bbox.aspectRatio.isLocked = !state.bbox.aspectRatio.isLocked;
syncScaledSize(state);
@@ -1619,6 +1628,7 @@ export const {
entityArrangedToBack,
entityOpacityChanged,
entitiesReordered,
allEntitiesDeleted,
allEntitiesOfTypeIsHiddenToggled,
allNonRasterLayersIsHiddenToggled,
// bbox
@@ -1626,6 +1636,7 @@ export const {
bboxScaledWidthChanged,
bboxScaledHeightChanged,
bboxScaleMethodChanged,
bboxSizeRecalled,
bboxWidthChanged,
bboxHeightChanged,
bboxAspectRatioLockToggled,

View File

@@ -241,6 +241,15 @@ const slice = createSlice({
},
//#region Dimensions
sizeRecalled: (state, action: PayloadAction<{ width: number; height: number }>) => {
const { width, height } = action.payload;
const gridSize = getGridSize(state.model?.base);
state.dimensions.rect.width = Math.max(roundDownToMultiple(width, gridSize), 64);
state.dimensions.rect.height = Math.max(roundDownToMultiple(height, gridSize), 64);
state.dimensions.aspectRatio.value = state.dimensions.rect.width / state.dimensions.rect.height;
state.dimensions.aspectRatio.id = 'Free';
state.dimensions.aspectRatio.isLocked = true;
},
widthChanged: (state, action: PayloadAction<{ width: number; updateAspectRatio?: boolean; clamp?: boolean }>) => {
const { width, updateAspectRatio, clamp } = action.payload;
const gridSize = getGridSize(state.model?.base);
@@ -369,14 +378,16 @@ const slice = createSlice({
const resetState = (state: ParamsState): ParamsState => {
// When a new session is requested, we need to keep the current model selections, plus dependent state
// like VAE precision. Everything else gets reset to default.
const oldState = deepClone(state);
const newState = getInitialParamsState();
newState.model = state.model;
newState.vae = state.vae;
newState.fluxVAE = state.fluxVAE;
newState.vaePrecision = state.vaePrecision;
newState.t5EncoderModel = state.t5EncoderModel;
newState.clipEmbedModel = state.clipEmbedModel;
newState.refinerModel = state.refinerModel;
newState.dimensions = oldState.dimensions;
newState.model = oldState.model;
newState.vae = oldState.vae;
newState.fluxVAE = oldState.fluxVAE;
newState.vaePrecision = oldState.vaePrecision;
newState.t5EncoderModel = oldState.t5EncoderModel;
newState.clipEmbedModel = oldState.clipEmbedModel;
newState.refinerModel = oldState.refinerModel;
return newState;
};
@@ -427,6 +438,7 @@ export const {
modelChanged,
// Dimensions
sizeRecalled,
widthChanged,
heightChanged,
aspectRatioLockToggled,

View File

@@ -27,6 +27,7 @@ export const DndImageIcon = memo((props: Props) => {
return (
<IconButton
onClick={onClick}
tooltip={tooltip}
aria-label={tooltip}
icon={icon}
variant="link"

View File

@@ -53,6 +53,7 @@ export const BoardEditableTitle = memo(({ board, isSelected }: Props) => {
color={isSelected ? 'base.100' : 'base.300'}
onDoubleClick={editable.startEditing}
cursor="text"
noOfLines={1}
>
{editable.value}
</Text>

View File

@@ -37,6 +37,7 @@ export const BoardTooltip = ({ board }: Props) => {
/>
)}
<Flex flexDir="column" alignItems="center">
{board && <Text fontWeight="semibold">{board.board_name}</Text>}
<Text noOfLines={1}>
{t('boards.imagesWithCount', { count: imagesTotal })}, {t('boards.assetsWithCount', { count: assetsTotal })}
</Text>

View File

@@ -59,7 +59,7 @@ export const BoardsPanel = memo(() => {
onClick={collapsibleApi.toggle}
leftIcon={isCollapsed ? <PiCaretDownBold /> : <PiCaretUpBold />}
>
Boards
{t('boards.boards')}
</Button>
</Flex>
<Flex>

View File

@@ -75,6 +75,7 @@ export const GalleryPanel = memo(() => {
variant="ghost"
onClick={collapsibleApi.toggle}
leftIcon={isCollapsed ? <PiCaretDownBold /> : <PiCaretUpBold />}
noOfLines={1}
>
{boardName}
</Button>

View File

@@ -2,6 +2,7 @@ import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
import { useAppStore } from 'app/store/storeHooks';
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
import { useCanvasIsBusySafe } from 'features/controlLayers/hooks/useCanvasIsBusy';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { newCanvasFromImage } from 'features/imageActions/actions';
import { toast } from 'features/toast/toast';
@@ -17,6 +18,7 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
const store = useAppStore();
const imageDTO = useImageDTOContext();
const isBusy = useCanvasIsBusySafe();
const isStaging = useCanvasIsStaging();
const onClickNewCanvasWithRasterLayerFromImage = useCallback(async () => {
const { dispatch, getState } = store;
@@ -97,27 +99,31 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
<SubMenuButtonContent label={t('controlLayers.newCanvasFromImage')} />
</MenuButton>
<MenuList {...subMenu.menuListProps}>
<MenuItem icon={<PiFileBold />} onClickCapture={onClickNewCanvasWithRasterLayerFromImage} isDisabled={isBusy}>
<MenuItem
icon={<PiFileBold />}
onClickCapture={onClickNewCanvasWithRasterLayerFromImage}
isDisabled={isStaging || isBusy}
>
{t('controlLayers.asRasterLayer')}
</MenuItem>
<MenuItem
icon={<PiFileBold />}
onClickCapture={onClickNewCanvasWithRasterLayerFromImageWithResize}
isDisabled={isBusy}
isDisabled={isStaging || isBusy}
>
{t('controlLayers.asRasterLayerResize')}
</MenuItem>
<MenuItem
icon={<PiFileBold />}
onClickCapture={onClickNewCanvasWithControlLayerFromImage}
isDisabled={isBusy}
isDisabled={isStaging || isBusy}
>
{t('controlLayers.asControlLayer')}
</MenuItem>
<MenuItem
icon={<PiFileBold />}
onClickCapture={onClickNewCanvasWithControlLayerFromImageWithResize}
isDisabled={isBusy}
isDisabled={isStaging || isBusy}
>
{t('controlLayers.asControlLayerResize')}
</MenuItem>

View File

@@ -28,7 +28,7 @@ export const ImageMenuItemUseAsRefImage = memo(() => {
return (
<MenuItem icon={<PiImageBold />} onClickCapture={onClickNewGlobalReferenceImageFromImage}>
Use as Reference Image
{t('controlLayers.useAsReferenceImage')}
</MenuItem>
);
});

View File

@@ -59,7 +59,7 @@ const ImageAtPosition = memo(({ imageName }: { index: number; imageName: string
imagesApi.endpoints.getImageDTO.useQuerySubscription(imageName, { skip: isUninitialized });
if (!imageDTO) {
return <GalleryImagePlaceholder />;
return <GalleryImagePlaceholder data-image-name={imageName} />;
}
return <GalleryImage imageDTO={imageDTO} />;

View File

@@ -1,5 +1,6 @@
import { useAppStore } from 'app/store/storeHooks';
import { useCanvasManagerSafe } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { newCanvasFromImage } from 'features/imageActions/actions';
import { toast } from 'features/toast/toast';
import { navigationApi } from 'features/ui/layouts/navigation-api';
@@ -13,13 +14,17 @@ export const useEditImage = (imageDTO?: ImageDTO | null) => {
const { getState, dispatch } = useAppStore();
const canvasManager = useCanvasManagerSafe();
const isStaging = useCanvasIsStaging();
const isEnabled = useMemo(() => {
if (!imageDTO) {
return false;
}
if (isStaging) {
return false;
}
return true;
}, [imageDTO]);
}, [imageDTO, isStaging]);
const edit = useCallback(async () => {
if (!imageDTO) {

View File

@@ -35,7 +35,7 @@ const CleanEditorContent = () => {
<Flex flexDir="column" alignItems="flex-start" gap={2}>
<Heading size="sm">{t('nodes.newWorkflow')}</Heading>
<Text color="base.300" fontSize="sm">
Create a new workflow from scratch
{t('ui.launchpad.createNewWorkflowFromScratch')}
</Text>
</Flex>
</LaunchpadButton>
@@ -44,7 +44,7 @@ const CleanEditorContent = () => {
<Flex flexDir="column" alignItems="flex-start" gap={2}>
<Heading size="sm">{t('nodes.loadWorkflow')}</Heading>
<Text color="base.300" fontSize="sm">
Browse and load existing workflows
{t('ui.launchpad.browseAndLoadWorkflows')}
</Text>
</Flex>
</LaunchpadButton>

View File

@@ -5,6 +5,7 @@ import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlic
import { selectCanvasMetadata, selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { isFluxKontextReferenceImageConfig } from 'features/controlLayers/store/types';
import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators';
import { zImageField } from 'features/nodes/types/common';
import { addFLUXFill } from 'features/nodes/util/graph/generation/addFLUXFill';
import { addFLUXLoRAs } from 'features/nodes/util/graph/generation/addFLUXLoRAs';
import { addFLUXReduxes } from 'features/nodes/util/graph/generation/addFLUXRedux';
@@ -155,24 +156,17 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
.filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0);
if (validFLUXKontextConfigs.length > 0) {
const kontextCollector = g.addNode({
id: getPrefixedId('flux_kontext_collector'),
type: 'collect',
const kontextConcatenator = g.addNode({
id: getPrefixedId('flux_kontext_image_prep'),
type: 'flux_kontext_image_prep',
images: validFLUXKontextConfigs.map(({ config }) => zImageField.parse(config.image)),
});
g.addEdge(kontextCollector, 'collection', denoise, 'kontext_conditioning');
for (const kontextConfig of validFLUXKontextConfigs) {
const { image } = kontextConfig.config;
assert(image, 'getGlobalReferenceImageWarnings checks if the image is there, this should never raise');
const kontextConditioning = g.addNode({
type: 'flux_kontext',
id: getPrefixedId(`flux_kontext_${kontextConfig.id}`),
image,
});
g.addEdge(kontextConditioning, 'kontext_cond', kontextCollector, 'item');
}
const kontextConditioning = g.addNode({
type: 'flux_kontext',
id: getPrefixedId('flux_kontext'),
});
g.addEdge(kontextConcatenator, 'image', kontextConditioning, 'image');
g.addEdge(kontextConditioning, 'kontext_cond', denoise, 'kontext_conditioning');
g.upsertMetadata({ ref_images: [validFLUXKontextConfigs] }, 'merge');
}

View File

@@ -4,8 +4,7 @@ import { selectMainModelConfig } from 'features/controlLayers/store/paramsSlice'
import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
import { isFluxKontextAspectRatioID, isFluxKontextReferenceImageConfig } from 'features/controlLayers/store/types';
import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators';
import type { ImageField } from 'features/nodes/types/common';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { zImageField, zModelIdentifierField } from 'features/nodes/types/common';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import {
getOriginalAndScaledSizesForTextToImage,
@@ -39,34 +38,63 @@ export const buildFluxKontextGraph = (arg: GraphBuilderArg): GraphBuilderReturn
const validRefImages = refImages.entities
.filter((entity) => entity.isEnabled)
.filter((entity) => isFluxKontextReferenceImageConfig(entity.config))
.filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0)
.toReversed(); // sends them in order they are displayed in the list
let input_image: ImageField | undefined = undefined;
if (validRefImages[0]) {
assert(validRefImages.length === 1, 'Flux Kontext can have at most one reference image');
assert(validRefImages[0].config.image, 'Image is required for reference image');
input_image = {
image_name: validRefImages[0].config.image.image_name,
};
}
.filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0);
const g = new Graph(getPrefixedId('flux_kontext_txt2img_graph'));
const positivePrompt = g.addNode({
id: getPrefixedId('positive_prompt'),
type: 'string',
});
const fluxKontextImage = g.addNode({
// @ts-expect-error: These nodes are not available in the OSS application
type: input_image ? 'flux_kontext_edit_image' : 'flux_kontext_generate_image',
model: zModelIdentifierField.parse(model),
aspect_ratio: aspectRatio.id,
input_image,
prompt_upsampling: true,
...selectCanvasOutputFields(state),
});
let fluxKontextImage;
if (validRefImages.length > 0) {
if (validRefImages.length === 1) {
// Single reference image - use it directly
const firstImage = validRefImages[0]?.config.image;
assert(firstImage, 'First image should exist when validRefImages.length > 0');
fluxKontextImage = g.addNode({
// @ts-expect-error: These nodes are not available in the OSS application
type: 'flux_kontext_edit_image',
model: zModelIdentifierField.parse(model),
aspect_ratio: aspectRatio.id,
prompt_upsampling: true,
input_image: {
image_name: firstImage.image_name,
},
...selectCanvasOutputFields(state),
});
} else {
// Multiple reference images - use concatenation
const kontextConcatenator = g.addNode({
id: getPrefixedId('flux_kontext_image_prep'),
type: 'flux_kontext_image_prep',
images: validRefImages.map(({ config }) => zImageField.parse(config.image)),
});
fluxKontextImage = g.addNode({
// @ts-expect-error: These nodes are not available in the OSS application
type: 'flux_kontext_edit_image',
model: zModelIdentifierField.parse(model),
aspect_ratio: aspectRatio.id,
prompt_upsampling: true,
...selectCanvasOutputFields(state),
});
// @ts-expect-error: These nodes are not available in the OSS application
g.addEdge(kontextConcatenator, 'image', fluxKontextImage, 'input_image');
}
} else {
fluxKontextImage = g.addNode({
// @ts-expect-error: These nodes are not available in the OSS application
type: 'flux_kontext_generate_image',
model: zModelIdentifierField.parse(model),
aspect_ratio: aspectRatio.id,
prompt_upsampling: true,
...selectCanvasOutputFields(state),
});
}
g.addEdge(
positivePrompt,
@@ -83,6 +111,10 @@ export const buildFluxKontextGraph = (arg: GraphBuilderArg): GraphBuilderReturn
height: originalSize.height,
});
if (validRefImages.length > 0) {
g.upsertMetadata({ ref_images: [validRefImages] }, 'merge');
}
g.setMetadataReceivingNode(fluxKontextImage);
return {

View File

@@ -278,12 +278,6 @@ const getReasonsWhyCannotEnqueueGenerateTab = (arg: {
}
const enabledRefImages = refImages.entities.filter(({ isEnabled }) => isEnabled);
const referenceImageCount = enabledRefImages.length;
// FLUX Kontext via BFL API only supports 1x Reference Image at a time.
if (model?.base === 'flux-kontext' && referenceImageCount > 1) {
reasons.push({ content: i18n.t('parameters.invoke.fluxKontextMultipleReferenceImages') });
}
enabledRefImages.forEach((entity, i) => {
const layerNumber = i + 1;
@@ -633,12 +627,6 @@ const getReasonsWhyCannotEnqueueCanvasTab = (arg: {
});
const enabledRefImages = refImages.entities.filter(({ isEnabled }) => isEnabled);
const referenceImageCount = enabledRefImages.length;
// FLUX Kontext via BFL API only supports 1x Reference Image at a time.
if (model?.base === 'flux-kontext' && referenceImageCount > 1) {
reasons.push({ content: i18n.t('parameters.invoke.fluxKontextMultipleReferenceImages') });
}
enabledRefImages.forEach((entity, i) => {
const layerNumber = i + 1;

View File

@@ -103,6 +103,7 @@ export const useHotkeyData = (): HotkeysData => {
addHotkey('canvas', 'setFillToWhite', ['d']);
addHotkey('canvas', 'fitLayersToCanvas', ['mod+0']);
addHotkey('canvas', 'fitBboxToCanvas', ['mod+shift+0']);
addHotkey('canvas', 'fitBboxToLayers', ['shift+n']);
addHotkey('canvas', 'setZoomTo100Percent', ['mod+1']);
addHotkey('canvas', 'setZoomTo200Percent', ['mod+2']);
addHotkey('canvas', 'setZoomTo400Percent', ['mod+3']);

View File

@@ -42,7 +42,7 @@ export const VerticalNavBar = memo(() => {
<Flex flexDir="column" alignItems="center" py={6} ps={4} pe={2} gap={4} minW={0} flexShrink={0}>
<InvokeAILogoComponent />
<Flex gap={6} pt={6} h="full" flexDir="column">
{withGenerateTab && <TabButton tab="generate" icon={<PiTextAaBold />} label="Generate" />}
{withGenerateTab && <TabButton tab="generate" icon={<PiTextAaBold />} label={t('ui.tabs.generate')} />}
{withCanvasTab && <TabButton tab="canvas" icon={<PiBoundingBoxBold />} label={t('ui.tabs.canvas')} />}
{withUpscalingTab && <TabButton tab="upscaling" icon={<PiFrameCornersBold />} label={t('ui.tabs.upscaling')} />}
{withWorkflowsTab && <TabButton tab="workflows" icon={<PiFlowArrowBold />} label={t('ui.tabs.workflows')} />}

View File

@@ -1,6 +1,7 @@
import { Alert, Button, Flex, Grid, Text } from '@invoke-ai/ui-library';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { InitialStateMainModelPicker } from './InitialStateMainModelPicker';
import { LaunchpadAddStyleReference } from './LaunchpadAddStyleReference';
@@ -8,17 +9,18 @@ import { LaunchpadContainer } from './LaunchpadContainer';
import { LaunchpadGenerateFromTextButton } from './LaunchpadGenerateFromTextButton';
export const GenerateLaunchpadPanel = memo(() => {
const { t } = useTranslation();
const newCanvasSession = useCallback(() => {
navigationApi.switchToTab('canvas');
}, []);
return (
<LaunchpadContainer heading="Generate images from text prompts.">
<LaunchpadContainer heading={t('ui.launchpad.generateTitle')}>
<Grid gridTemplateColumns="1fr 1fr" gap={8}>
<InitialStateMainModelPicker />
<Flex flexDir="column" gap={2} justifyContent="center">
<Text>
Want to learn what prompts work best for each model?{' '}
{t('ui.launchpad.modelGuideText')}{' '}
<Button
as="a"
variant="link"
@@ -27,7 +29,7 @@ export const GenerateLaunchpadPanel = memo(() => {
rel="noopener noreferrer"
size="sm"
>
Check out our Model Guide.
{t('ui.launchpad.modelGuideLink')}
</Button>
</Text>
</Flex>
@@ -36,10 +38,10 @@ export const GenerateLaunchpadPanel = memo(() => {
<LaunchpadAddStyleReference />
<Alert status="info" borderRadius="base" flexDir="column" gap={2} overflow="unset">
<Text fontSize="md" fontWeight="semibold">
Looking to get more control, edit, and iterate on your images?
{t('ui.launchpad.generate.canvasCalloutTitle')}
</Text>
<Button variant="link" onClick={newCanvasSession}>
Navigate to Canvas for more capabilities.
{t('ui.launchpad.generate.canvasCalloutLink')}
</Button>
</Alert>
</LaunchpadContainer>

View File

@@ -4,16 +4,18 @@ import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
import { getDefaultRefImageConfig } from 'features/controlLayers/hooks/addLayerHooks';
import { refImageAdded } from 'features/controlLayers/store/refImagesSlice';
import { imageDTOToImageWithDims } from 'features/controlLayers/store/util';
import { addGlobalReferenceImageDndTarget, newCanvasFromImageDndTarget } from 'features/dnd/dnd';
import { addGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
import { DndDropTarget } from 'features/dnd/DndDropTarget';
import { LaunchpadButton } from 'features/ui/layouts/LaunchpadButton';
import { memo, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiUploadBold, PiUserCircleGearBold } from 'react-icons/pi';
import type { ImageDTO } from 'services/api/types';
const dndTargetData = addGlobalReferenceImageDndTarget.getData();
export const LaunchpadAddStyleReference = memo((props: { extraAction?: () => void }) => {
const { t } = useTranslation();
const { dispatch, getState } = useAppStore();
const uploadOptions = useMemo(
@@ -36,14 +38,14 @@ export const LaunchpadAddStyleReference = memo((props: { extraAction?: () => voi
<LaunchpadButton {...uploadApi.getUploadButtonProps()} position="relative" gap={8}>
<Icon as={PiUserCircleGearBold} boxSize={8} color="base.500" />
<Flex flexDir="column" alignItems="flex-start" gap={2}>
<Heading size="sm">Add a Style Reference</Heading>
<Text color="base.300">Add an image to transfer its look.</Text>
<Heading size="sm">{t('ui.launchpad.addStyleRef.title')}</Heading>
<Text>{t('ui.launchpad.addStyleRef.description')}</Text>
</Flex>
<Flex position="absolute" right={3} bottom={3}>
<PiUploadBold />
<input {...uploadApi.getUploadInputProps()} />
</Flex>
<DndDropTarget dndTarget={newCanvasFromImageDndTarget} dndTargetData={dndTargetData} label="Drop" />
<DndDropTarget dndTarget={addGlobalReferenceImageDndTarget} dndTargetData={dndTargetData} label="Drop" />
</LaunchpadButton>
);
});

View File

@@ -1,11 +1,13 @@
import { Flex, Heading, Icon, Text } from '@invoke-ai/ui-library';
import { useAppStore } from 'app/store/storeHooks';
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { newCanvasFromImageDndTarget } from 'features/dnd/dnd';
import { DndDropTarget } from 'features/dnd/DndDropTarget';
import { newCanvasFromImage } from 'features/imageActions/actions';
import { LaunchpadButton } from 'features/ui/layouts/LaunchpadButton';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiPencilBold, PiUploadBold } from 'react-icons/pi';
import type { ImageDTO } from 'services/api/types';
@@ -14,7 +16,9 @@ const NEW_CANVAS_OPTIONS = { type: 'raster_layer', withInpaintMask: true } as co
const dndTargetData = newCanvasFromImageDndTarget.getData(NEW_CANVAS_OPTIONS);
export const LaunchpadEditImageButton = memo((props: { extraAction?: () => void }) => {
const { t } = useTranslation();
const { getState, dispatch } = useAppStore();
const isStaging = useCanvasIsStaging();
const onUpload = useCallback(
(imageDTO: ImageDTO) => {
@@ -26,17 +30,22 @@ export const LaunchpadEditImageButton = memo((props: { extraAction?: () => void
const uploadApi = useImageUploadButton({ allowMultiple: false, onUpload });
return (
<LaunchpadButton {...uploadApi.getUploadButtonProps()} position="relative" gap={8}>
<LaunchpadButton {...uploadApi.getUploadButtonProps()} position="relative" gap={8} isDisabled={isStaging}>
<Icon as={PiPencilBold} boxSize={8} color="base.500" />
<Flex flexDir="column" alignItems="flex-start" gap={2}>
<Heading size="sm">Edit Image</Heading>
<Text color="base.300">Add an image to refine.</Text>
<Heading size="sm">{t('ui.launchpad.editImage.title')}</Heading>
<Text>{t('ui.launchpad.editImage.description')}</Text>
</Flex>
<Flex position="absolute" right={3} bottom={3}>
<PiUploadBold />
<input {...uploadApi.getUploadInputProps()} />
</Flex>
<DndDropTarget dndTarget={newCanvasFromImageDndTarget} dndTargetData={dndTargetData} label="Drop" />
<DndDropTarget
dndTarget={newCanvasFromImageDndTarget}
dndTargetData={dndTargetData}
label="Drop"
isDisabled={isStaging}
/>
</LaunchpadButton>
);
});

View File

@@ -1,6 +1,10 @@
import { Flex, Heading, Icon, Text } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { LaunchpadButton } from 'features/ui/layouts/LaunchpadButton';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiCursorTextBold, PiTextAaBold } from 'react-icons/pi';
const focusOnPrompt = () => {
@@ -12,16 +16,20 @@ const focusOnPrompt = () => {
};
export const LaunchpadGenerateFromTextButton = memo((props: { extraAction?: () => void }) => {
const { t } = useTranslation();
const tab = useAppSelector(selectActiveTab);
const isStaging = useCanvasIsStaging();
const onClick = useCallback(() => {
focusOnPrompt();
props.extraAction?.();
}, [props]);
return (
<LaunchpadButton onClick={onClick} position="relative" gap={8}>
<LaunchpadButton onClick={onClick} position="relative" gap={8} isDisabled={tab === 'canvas' && isStaging}>
<Icon as={PiTextAaBold} boxSize={8} color="base.500" />
<Flex flexDir="column" alignItems="flex-start" gap={2}>
<Heading size="sm">Generate from Text</Heading>
<Text color="base.300">Enter a prompt and Invoke.</Text>
<Heading size="sm">{t('ui.launchpad.generateFromText.title')}</Heading>
<Text>{t('ui.launchpad.generateFromText.description')}</Text>
</Flex>
<Flex position="absolute" right={3} bottom={3}>
<PiCursorTextBold />

View File

@@ -1,10 +1,12 @@
import { Flex, Heading, Icon, Text } from '@invoke-ai/ui-library';
import { useAppStore } from 'app/store/storeHooks';
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { newCanvasFromImageDndTarget } from 'features/dnd/dnd';
import { DndDropTarget } from 'features/dnd/DndDropTarget';
import { newCanvasFromImage } from 'features/imageActions/actions';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiRectangleDashedBold, PiUploadBold } from 'react-icons/pi';
import type { ImageDTO } from 'services/api/types';
@@ -15,7 +17,9 @@ const NEW_CANVAS_OPTIONS = { type: 'control_layer', withResize: true } as const;
const dndTargetData = newCanvasFromImageDndTarget.getData(NEW_CANVAS_OPTIONS);
export const LaunchpadUseALayoutImageButton = memo((props: { extraAction?: () => void }) => {
const { t } = useTranslation();
const { getState, dispatch } = useAppStore();
const isStaging = useCanvasIsStaging();
const onUpload = useCallback(
(imageDTO: ImageDTO) => {
@@ -27,17 +31,22 @@ export const LaunchpadUseALayoutImageButton = memo((props: { extraAction?: () =>
const uploadApi = useImageUploadButton({ allowMultiple: false, onUpload });
return (
<LaunchpadButton {...uploadApi.getUploadButtonProps()} position="relative" gap={8}>
<LaunchpadButton {...uploadApi.getUploadButtonProps()} position="relative" gap={8} isDisabled={isStaging}>
<Icon as={PiRectangleDashedBold} boxSize={8} color="base.500" />
<Flex flexDir="column" alignItems="flex-start" gap={2}>
<Heading size="sm">Use a Layout Image</Heading>
<Text color="base.300">Add an image to control composition.</Text>
<Heading size="sm">{t('ui.launchpad.useALayoutImage.title')}</Heading>
<Text>{t('ui.launchpad.useALayoutImage.description')}</Text>
</Flex>
<Flex position="absolute" right={3} bottom={3}>
<PiUploadBold />
<input {...uploadApi.getUploadInputProps()} />
</Flex>
<DndDropTarget dndTarget={newCanvasFromImageDndTarget} dndTargetData={dndTargetData} label="Drop" />
<DndDropTarget
dndTarget={newCanvasFromImageDndTarget}
dndTargetData={dndTargetData}
label="Drop"
isDisabled={isStaging}
/>
</LaunchpadButton>
);
});

View File

@@ -117,7 +117,7 @@ export const UpscalingLaunchpadPanel = memo(() => {
{/* Left Column: Creativity and Structural Defaults */}
<Box>
<Text fontWeight="semibold" fontSize="sm" mb={3}>
Creativity & Structure Defaults
{t('ui.launchpad.upscaling.creativityAndStructure.title')}
</Text>
<ButtonGroup size="sm" orientation="vertical" variant="outline" w="full">
<Button
@@ -126,7 +126,7 @@ export const UpscalingLaunchpadPanel = memo(() => {
onClick={onConservativeClick}
leftIcon={<PiShieldCheckBold />}
>
Conservative
{t('ui.launchpad.upscaling.creativityAndStructure.conservative')}
</Button>
<Button
colorScheme={creativity === 0 && structure === 0 ? 'invokeBlue' : undefined}
@@ -134,7 +134,7 @@ export const UpscalingLaunchpadPanel = memo(() => {
onClick={onBalancedClick}
leftIcon={<PiScalesBold />}
>
Balanced
{t('ui.launchpad.upscaling.creativityAndStructure.balanced')}
</Button>
<Button
colorScheme={creativity === 5 && structure === -2 ? 'invokeBlue' : undefined}
@@ -142,7 +142,7 @@ export const UpscalingLaunchpadPanel = memo(() => {
onClick={onCreativeClick}
leftIcon={<PiPaletteBold />}
>
Creative
{t('ui.launchpad.upscaling.creativityAndStructure.creative')}
</Button>
<Button
colorScheme={creativity === 8 && structure === -5 ? 'invokeBlue' : undefined}
@@ -150,7 +150,7 @@ export const UpscalingLaunchpadPanel = memo(() => {
onClick={onArtisticClick}
leftIcon={<PiSparkleBold />}
>
Artistic
{t('ui.launchpad.upscaling.creativityAndStructure.artistic')}
</Button>
</ButtonGroup>
</Box>

View File

@@ -16,6 +16,7 @@ import { AutoLayoutProvider, useAutoLayoutContext, withPanelContainer } from 'fe
import { CanvasLaunchpadPanel } from 'features/ui/layouts/CanvasLaunchpadPanel';
import type { TabName } from 'features/ui/store/uiTypes';
import { dockviewTheme } from 'features/ui/styles/theme';
import { t } from 'i18next';
import { memo, useCallback, useEffect } from 'react';
import { CanvasTabLeftPanel } from './CanvasTabLeftPanel';
@@ -65,7 +66,7 @@ const initializeCenterPanelLayout = (tab: TabName, api: DockviewApi) => {
const launchpad = api.addPanel<PanelParameters>({
id: LAUNCHPAD_PANEL_ID,
component: LAUNCHPAD_PANEL_ID,
title: 'Launchpad',
title: t('ui.panels.launchpad'),
tabComponent: DOCKVIEW_TAB_LAUNCHPAD_ID,
params: {
tab,
@@ -76,7 +77,7 @@ const initializeCenterPanelLayout = (tab: TabName, api: DockviewApi) => {
api.addPanel<PanelParameters>({
id: WORKSPACE_PANEL_ID,
component: WORKSPACE_PANEL_ID,
title: 'Canvas',
title: t('ui.panels.canvas'),
tabComponent: DOCKVIEW_TAB_CANVAS_WORKSPACE_ID,
params: {
tab,
@@ -91,7 +92,7 @@ const initializeCenterPanelLayout = (tab: TabName, api: DockviewApi) => {
api.addPanel<PanelParameters>({
id: VIEWER_PANEL_ID,
component: VIEWER_PANEL_ID,
title: 'Image Viewer',
title: t('ui.panels.imageViewer'),
tabComponent: DOCKVIEW_TAB_CANVAS_VIEWER_ID,
params: {
tab,

View File

@@ -14,6 +14,7 @@ import type {
import { AutoLayoutProvider, useAutoLayoutContext, withPanelContainer } from 'features/ui/layouts/auto-layout-context';
import type { TabName } from 'features/ui/store/uiTypes';
import { dockviewTheme } from 'features/ui/styles/theme';
import { t } from 'i18next';
import { memo, useCallback, useEffect } from 'react';
import { DockviewTab } from './DockviewTab';
@@ -59,7 +60,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
const launchpad = api.addPanel<PanelParameters>({
id: LAUNCHPAD_PANEL_ID,
component: LAUNCHPAD_PANEL_ID,
title: 'Launchpad',
title: t('ui.panels.launchpad'),
tabComponent: DOCKVIEW_TAB_LAUNCHPAD_ID,
params: {
tab,
@@ -70,7 +71,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
api.addPanel<PanelParameters>({
id: VIEWER_PANEL_ID,
component: VIEWER_PANEL_ID,
title: 'Image Viewer',
title: t('ui.panels.imageViewer'),
tabComponent: DOCKVIEW_TAB_PROGRESS_ID,
params: {
tab,

View File

@@ -15,6 +15,7 @@ import { AutoLayoutProvider, useAutoLayoutContext, withPanelContainer } from 'fe
import { DockviewTab } from 'features/ui/layouts/DockviewTab';
import type { TabName } from 'features/ui/store/uiTypes';
import { dockviewTheme } from 'features/ui/styles/theme';
import { t } from 'i18next';
import { memo, useCallback, useEffect } from 'react';
import { DockviewTabLaunchpad } from './DockviewTabLaunchpad';
@@ -59,7 +60,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
const launchpad = api.addPanel<PanelParameters>({
id: LAUNCHPAD_PANEL_ID,
component: LAUNCHPAD_PANEL_ID,
title: 'Launchpad',
title: t('ui.panels.launchpad'),
tabComponent: DOCKVIEW_TAB_LAUNCHPAD_ID,
params: {
tab,
@@ -70,7 +71,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
api.addPanel<PanelParameters>({
id: VIEWER_PANEL_ID,
component: VIEWER_PANEL_ID,
title: 'Image Viewer',
title: t('ui.panels.imageViewer'),
tabComponent: DOCKVIEW_TAB_PROGRESS_ID,
params: {
tab,

View File

@@ -17,6 +17,7 @@ import { AutoLayoutProvider, useAutoLayoutContext, withPanelContainer } from 'fe
import { DockviewTab } from 'features/ui/layouts/DockviewTab';
import type { TabName } from 'features/ui/store/uiTypes';
import { dockviewTheme } from 'features/ui/styles/theme';
import { t } from 'i18next';
import { memo, useCallback, useEffect } from 'react';
import { DockviewTabLaunchpad } from './DockviewTabLaunchpad';
@@ -62,7 +63,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
const launchpad = api.addPanel<PanelParameters>({
id: LAUNCHPAD_PANEL_ID,
component: LAUNCHPAD_PANEL_ID,
title: 'Launchpad',
title: t('ui.panels.launchpad'),
tabComponent: DOCKVIEW_TAB_LAUNCHPAD_ID,
params: {
tab,
@@ -73,7 +74,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
api.addPanel<PanelParameters>({
id: WORKSPACE_PANEL_ID,
component: WORKSPACE_PANEL_ID,
title: 'Workflow Editor',
title: t('ui.panels.workflowEditor'),
tabComponent: DOCKVIEW_TAB_ID,
params: {
tab,
@@ -88,7 +89,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
api.addPanel<PanelParameters>({
id: VIEWER_PANEL_ID,
component: VIEWER_PANEL_ID,
title: 'Image Viewer',
title: t('ui.panels.imageViewer'),
tabComponent: DOCKVIEW_TAB_PROGRESS_ID,
params: {
tab,

File diff suppressed because one or more lines are too long

View File

@@ -1 +1 @@
__version__ = "6.3.0a1"
__version__ = "6.3.0"