mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 14:18:04 -05:00
Compare commits
13 Commits
bria-UI
...
bria-clone
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bd71459955 | ||
|
|
a5542370a6 | ||
|
|
c296fd2305 | ||
|
|
c08a6a852d | ||
|
|
e1139de551 | ||
|
|
44b7b9c29d | ||
|
|
2d55dbe67a | ||
|
|
04ea87b0bb | ||
|
|
7140f2ec72 | ||
|
|
9e5e1ec0da | ||
|
|
a139885bf7 | ||
|
|
f5423133a8 | ||
|
|
9c9265cdad |
@@ -28,14 +28,12 @@ from invokeai.invocation_api import Classification, ImageOutput
|
||||
DEPTH_SMALL_V2_URL = "depth-anything/Depth-Anything-V2-Small-hf"
|
||||
HF_LLLYASVIEL = "https://huggingface.co/lllyasviel/Annotators/resolve/main/"
|
||||
|
||||
|
||||
class BriaControlNetField(BaseModel):
|
||||
image: ImageField = Field(description="The control image")
|
||||
model: ModelIdentifierField = Field(description="The ControlNet model to use")
|
||||
mode: BRIA_CONTROL_MODES = Field(description="The mode of the ControlNet")
|
||||
conditioning_scale: float = Field(description="The weight given to the ControlNet")
|
||||
|
||||
|
||||
@invocation_output("bria_controlnet_output")
|
||||
class BriaControlNetOutput(BaseInvocationOutput):
|
||||
"""Bria ControlNet info"""
|
||||
@@ -59,8 +57,12 @@ class BriaControlNetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
control_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.controlnet_model, ui_type=UIType.BriaControlNetModel
|
||||
)
|
||||
control_mode: BRIA_CONTROL_MODES = InputField(default="depth", description="The mode of the ControlNet")
|
||||
control_weight: float = InputField(default=1.0, ge=-1, le=2, description="The weight given to the ControlNet")
|
||||
control_mode: BRIA_CONTROL_MODES = InputField(
|
||||
default="depth", description="The mode of the ControlNet"
|
||||
)
|
||||
control_weight: float = InputField(
|
||||
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> BriaControlNetOutput:
|
||||
image_in = resize_img(context.images.get_pil(self.control_image.image_name))
|
||||
@@ -105,7 +107,6 @@ RATIO_CONFIGS_1024 = {
|
||||
1.7708333333333333: {"width": 1360, "height": 768},
|
||||
}
|
||||
|
||||
|
||||
def extract_depth(image: Image.Image, context: InvocationContext):
|
||||
loaded_model = context.models.load_remote_model(DEPTH_SMALL_V2_URL, DepthAnythingPipeline.load_model)
|
||||
|
||||
@@ -114,7 +115,6 @@ def extract_depth(image: Image.Image, context: InvocationContext):
|
||||
depth_map = depth_anything_detector.generate_depth(image)
|
||||
return depth_map
|
||||
|
||||
|
||||
def extract_openpose(image: Image.Image, context: InvocationContext):
|
||||
body_model = context.models.load_remote_model(f"{HF_LLLYASVIEL}body_pose_model.pth", Body)
|
||||
hand_model = context.models.load_remote_model(f"{HF_LLLYASVIEL}hand_pose_model.pth", Hand)
|
||||
@@ -138,17 +138,13 @@ def extract_canny(input_image):
|
||||
|
||||
|
||||
def convert_to_grayscale(image):
|
||||
gray_image = image.convert("L").convert("RGB")
|
||||
gray_image = image.convert('L').convert('RGB')
|
||||
return gray_image
|
||||
|
||||
|
||||
def tile(downscale_factor, input_image):
|
||||
control_image = input_image.resize(
|
||||
(input_image.size[0] // downscale_factor, input_image.size[1] // downscale_factor)
|
||||
).resize(input_image.size, Image.Resampling.NEAREST)
|
||||
control_image = input_image.resize((input_image.size[0] // downscale_factor, input_image.size[1] // downscale_factor)).resize(input_image.size, Image.Resampling.NEAREST)
|
||||
return control_image
|
||||
|
||||
|
||||
def resize_img(control_image):
|
||||
image_ratio = control_image.width / control_image.height
|
||||
ratio = min(RATIO_CONFIGS_1024.keys(), key=lambda k: abs(k - image_ratio))
|
||||
|
||||
@@ -33,7 +33,7 @@ class BriaDecoderInvocation(BaseInvocation):
|
||||
|
||||
with context.models.load(self.vae.vae) as vae:
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
latents = latents / vae.config.scaling_factor
|
||||
latents = (latents / vae.config.scaling_factor)
|
||||
latents = latents.to(device=vae.device, dtype=vae.dtype)
|
||||
|
||||
decoded_output = vae.decode(latents)
|
||||
|
||||
@@ -81,7 +81,7 @@ class BriaDenoiseInvocation(BaseInvocation):
|
||||
description="ControlNet",
|
||||
input=Input.Connection,
|
||||
title="ControlNet",
|
||||
default=None,
|
||||
default = None,
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
@@ -151,9 +151,16 @@ class BriaDenoiseInvocation(BaseInvocation):
|
||||
latents_output = LatentsField(latents_name=saved_input_latents_tensor)
|
||||
return BriaDenoiseInvocationOutput(latents=latents_output)
|
||||
|
||||
|
||||
def _prepare_multi_control(
|
||||
self, context: InvocationContext, vae: AutoencoderKL, width: int, height: int, device: torch.device
|
||||
self,
|
||||
context: InvocationContext,
|
||||
vae: AutoencoderKL,
|
||||
width: int,
|
||||
height: int,
|
||||
device: torch.device
|
||||
) -> Tuple[BriaMultiControlNetModel, List[torch.Tensor], List[torch.Tensor], List[float]]:
|
||||
|
||||
control = self.control if isinstance(self.control, list) else [self.control]
|
||||
control_images, control_models, control_modes, control_scales = [], [], [], []
|
||||
for controlnet in control:
|
||||
@@ -164,9 +171,7 @@ class BriaDenoiseInvocation(BaseInvocation):
|
||||
try:
|
||||
control_images.append(context.images.get_pil(controlnet.image.image_name))
|
||||
except Exception:
|
||||
raise FileNotFoundError(
|
||||
f"Control image {controlnet.image.image_name} not found. Make sure not to delete the preprocessed image before finishing the pipeline."
|
||||
)
|
||||
raise FileNotFoundError(f"Control image {controlnet.image.image_name} not found. Make sure not to delete the preprocessed image before finishing the pipeline.")
|
||||
|
||||
control_model = BriaMultiControlNetModel(control_models).to(device)
|
||||
tensored_control_images, tensored_control_modes = prepare_control_images(
|
||||
@@ -176,5 +181,5 @@ class BriaDenoiseInvocation(BaseInvocation):
|
||||
width=width,
|
||||
height=height,
|
||||
device=device,
|
||||
)
|
||||
)
|
||||
return control_model, tensored_control_images, tensored_control_modes, control_scales
|
||||
|
||||
@@ -63,7 +63,7 @@ class BriaLatentSamplerInvocation(BaseInvocation):
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
generator=generator,
|
||||
)
|
||||
)
|
||||
|
||||
saved_latents_tensor = context.tensors.save(latents)
|
||||
saved_latent_image_ids_tensor = context.tensors.save(latent_image_ids)
|
||||
|
||||
@@ -51,7 +51,6 @@ from invokeai.backend.model_manager.metadata import (
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMetadata
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType
|
||||
from invokeai.backend.model_manager.util.lora_metadata_extractor import apply_lora_metadata
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
from invokeai.backend.util.catch_sigint import catch_sigint
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
@@ -668,10 +667,6 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
|
||||
info = info or self._probe(model_path, config)
|
||||
|
||||
# Apply LoRA metadata if applicable
|
||||
model_images_path = self.app_config.models_path / "model_images"
|
||||
apply_lora_metadata(info, model_path.resolve(), model_images_path)
|
||||
|
||||
model_path = model_path.resolve()
|
||||
|
||||
# Models in the Invoke-managed models dir should use relative paths.
|
||||
|
||||
@@ -8,16 +8,7 @@ from invokeai.backend.bria.controlnet_aux.util import HWC3, resize_image
|
||||
|
||||
|
||||
class CannyDetector:
|
||||
def __call__(
|
||||
self,
|
||||
input_image=None,
|
||||
low_threshold=100,
|
||||
high_threshold=200,
|
||||
detect_resolution=512,
|
||||
image_resolution=512,
|
||||
output_type=None,
|
||||
**kwargs,
|
||||
):
|
||||
def __call__(self, input_image=None, low_threshold=100, high_threshold=200, detect_resolution=512, image_resolution=512, output_type=None, **kwargs):
|
||||
if "img" in kwargs:
|
||||
warnings.warn("img is deprecated, please use `input_image=...` instead.", DeprecationWarning, stacklevel=2)
|
||||
input_image = kwargs.pop("img")
|
||||
|
||||
@@ -29,14 +29,12 @@ from invokeai.backend.bria.controlnet_aux.util import HWC3, resize_image
|
||||
HandResult = List[Keypoint]
|
||||
FaceResult = List[Keypoint]
|
||||
|
||||
|
||||
class PoseResult(NamedTuple):
|
||||
body: BodyResult
|
||||
left_hand: Union[HandResult, None]
|
||||
right_hand: Union[HandResult, None]
|
||||
face: Union[FaceResult, None]
|
||||
|
||||
|
||||
def draw_poses(poses: List[PoseResult], H, W, draw_body=True, draw_hand=True, draw_face=True):
|
||||
"""
|
||||
Draw the detected poses on an empty canvas.
|
||||
@@ -75,22 +73,14 @@ class OpenposeDetector:
|
||||
Attributes:
|
||||
model_dir (str): Path to the directory where the pose models are stored.
|
||||
"""
|
||||
|
||||
def __init__(self, body_estimation, hand_estimation=None, face_estimation=None):
|
||||
self.body_estimation = body_estimation
|
||||
self.hand_estimation = hand_estimation
|
||||
self.face_estimation = face_estimation
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(
|
||||
cls,
|
||||
pretrained_model_or_path,
|
||||
filename=None,
|
||||
hand_filename=None,
|
||||
face_filename=None,
|
||||
cache_dir=None,
|
||||
local_files_only=False,
|
||||
):
|
||||
def from_pretrained(cls, pretrained_model_or_path, filename=None, hand_filename=None, face_filename=None, cache_dir=None, local_files_only=False):
|
||||
|
||||
if pretrained_model_or_path == "lllyasviel/ControlNet":
|
||||
filename = filename or "annotator/ckpts/body_pose_model.pth"
|
||||
hand_filename = hand_filename or "annotator/ckpts/hand_pose_model.pth"
|
||||
@@ -109,15 +99,9 @@ class OpenposeDetector:
|
||||
hand_model_path = os.path.join(pretrained_model_or_path, hand_filename)
|
||||
face_model_path = os.path.join(face_pretrained_model_or_path, face_filename)
|
||||
else:
|
||||
body_model_path = hf_hub_download(
|
||||
pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only
|
||||
)
|
||||
hand_model_path = hf_hub_download(
|
||||
pretrained_model_or_path, hand_filename, cache_dir=cache_dir, local_files_only=local_files_only
|
||||
)
|
||||
face_model_path = hf_hub_download(
|
||||
face_pretrained_model_or_path, face_filename, cache_dir=cache_dir, local_files_only=local_files_only
|
||||
)
|
||||
body_model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only)
|
||||
hand_model_path = hf_hub_download(pretrained_model_or_path, hand_filename, cache_dir=cache_dir, local_files_only=local_files_only)
|
||||
face_model_path = hf_hub_download(face_pretrained_model_or_path, face_filename, cache_dir=cache_dir, local_files_only=local_files_only)
|
||||
|
||||
body_estimation = Body(body_model_path)
|
||||
hand_estimation = Hand(hand_model_path)
|
||||
@@ -136,12 +120,15 @@ class OpenposeDetector:
|
||||
right_hand = None
|
||||
H, W, _ = oriImg.shape
|
||||
for x, y, w, is_left in util.handDetect(body, oriImg):
|
||||
peaks = self.hand_estimation(oriImg[y : y + w, x : x + w, :]).astype(np.float32)
|
||||
peaks = self.hand_estimation(oriImg[y:y+w, x:x+w, :]).astype(np.float32)
|
||||
if peaks.ndim == 2 and peaks.shape[1] == 2:
|
||||
peaks[:, 0] = np.where(peaks[:, 0] < 1e-6, -1, peaks[:, 0] + x) / float(W)
|
||||
peaks[:, 1] = np.where(peaks[:, 1] < 1e-6, -1, peaks[:, 1] + y) / float(H)
|
||||
|
||||
hand_result = [Keypoint(x=peak[0], y=peak[1]) for peak in peaks]
|
||||
hand_result = [
|
||||
Keypoint(x=peak[0], y=peak[1])
|
||||
for peak in peaks
|
||||
]
|
||||
|
||||
if is_left:
|
||||
left_hand = hand_result
|
||||
@@ -157,12 +144,15 @@ class OpenposeDetector:
|
||||
|
||||
x, y, w = face
|
||||
H, W, _ = oriImg.shape
|
||||
heatmaps = self.face_estimation(oriImg[y : y + w, x : x + w, :])
|
||||
heatmaps = self.face_estimation(oriImg[y:y+w, x:x+w, :])
|
||||
peaks = self.face_estimation.compute_peaks_from_heatmaps(heatmaps).astype(np.float32)
|
||||
if peaks.ndim == 2 and peaks.shape[1] == 2:
|
||||
peaks[:, 0] = np.where(peaks[:, 0] < 1e-6, -1, peaks[:, 0] + x) / float(W)
|
||||
peaks[:, 1] = np.where(peaks[:, 1] < 1e-6, -1, peaks[:, 1] + y) / float(H)
|
||||
return [Keypoint(x=peak[0], y=peak[1]) for peak in peaks]
|
||||
return [
|
||||
Keypoint(x=peak[0], y=peak[1])
|
||||
for peak in peaks
|
||||
]
|
||||
|
||||
return None
|
||||
|
||||
@@ -191,44 +181,23 @@ class OpenposeDetector:
|
||||
if include_face:
|
||||
face = self.detect_face(body, oriImg)
|
||||
|
||||
results.append(
|
||||
PoseResult(
|
||||
BodyResult(
|
||||
keypoints=[
|
||||
Keypoint(x=keypoint.x / float(W), y=keypoint.y / float(H))
|
||||
if keypoint is not None
|
||||
else None
|
||||
for keypoint in body.keypoints
|
||||
],
|
||||
total_score=body.total_score,
|
||||
total_parts=body.total_parts,
|
||||
),
|
||||
left_hand,
|
||||
right_hand,
|
||||
face,
|
||||
)
|
||||
)
|
||||
results.append(PoseResult(BodyResult(
|
||||
keypoints=[
|
||||
Keypoint(
|
||||
x=keypoint.x / float(W),
|
||||
y=keypoint.y / float(H)
|
||||
) if keypoint is not None else None
|
||||
for keypoint in body.keypoints
|
||||
],
|
||||
total_score=body.total_score,
|
||||
total_parts=body.total_parts
|
||||
), left_hand, right_hand, face))
|
||||
|
||||
return results
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
input_image,
|
||||
detect_resolution=512,
|
||||
image_resolution=512,
|
||||
include_body=True,
|
||||
include_hand=False,
|
||||
include_face=False,
|
||||
hand_and_face=None,
|
||||
output_type="pil",
|
||||
**kwargs,
|
||||
):
|
||||
def __call__(self, input_image, detect_resolution=512, image_resolution=512, include_body=True, include_hand=False, include_face=False, hand_and_face=None, output_type="pil", **kwargs):
|
||||
if hand_and_face is not None:
|
||||
warnings.warn(
|
||||
"hand_and_face is deprecated. Use include_hand and include_face instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
warnings.warn("hand_and_face is deprecated. Use include_hand and include_face instead.", DeprecationWarning, stacklevel=2)
|
||||
include_hand = hand_and_face
|
||||
include_face = hand_and_face
|
||||
|
||||
@@ -236,10 +205,7 @@ class OpenposeDetector:
|
||||
warnings.warn("return_pil is deprecated. Use output_type instead.", DeprecationWarning, stacklevel=2)
|
||||
output_type = "pil" if kwargs["return_pil"] else "np"
|
||||
if type(output_type) is bool:
|
||||
warnings.warn(
|
||||
"Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions",
|
||||
stacklevel=2,
|
||||
)
|
||||
warnings.warn("Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions", stacklevel=2)
|
||||
if output_type:
|
||||
output_type = "pil"
|
||||
|
||||
|
||||
@@ -70,17 +70,17 @@ class Body(object):
|
||||
# heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
|
||||
heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
|
||||
heatmap = util.smart_resize_k(heatmap, fx=stride, fy=stride)
|
||||
heatmap = heatmap[: imageToTest_padded.shape[0] - pad[2], : imageToTest_padded.shape[1] - pad[3], :]
|
||||
heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
|
||||
heatmap = util.smart_resize(heatmap, (oriImg.shape[0], oriImg.shape[1]))
|
||||
|
||||
# paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
|
||||
paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
|
||||
paf = util.smart_resize_k(paf, fx=stride, fy=stride)
|
||||
paf = paf[: imageToTest_padded.shape[0] - pad[2], : imageToTest_padded.shape[1] - pad[3], :]
|
||||
paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
|
||||
paf = util.smart_resize(paf, (oriImg.shape[0], oriImg.shape[1]))
|
||||
|
||||
heatmap_avg += heatmap_avg + heatmap / len(multiplier)
|
||||
paf_avg += +paf / len(multiplier)
|
||||
paf_avg += + paf / len(multiplier)
|
||||
|
||||
all_peaks = []
|
||||
peak_counter = 0
|
||||
@@ -99,14 +99,7 @@ class Body(object):
|
||||
map_down[:, :-1] = one_heatmap[:, 1:]
|
||||
|
||||
peaks_binary = np.logical_and.reduce(
|
||||
(
|
||||
one_heatmap >= map_left,
|
||||
one_heatmap >= map_right,
|
||||
one_heatmap >= map_up,
|
||||
one_heatmap >= map_down,
|
||||
one_heatmap > thre1,
|
||||
)
|
||||
)
|
||||
(one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
|
||||
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0], strict=False)) # note reverse
|
||||
peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
|
||||
peak_id = range(peak_counter, peak_counter + len(peaks))
|
||||
@@ -116,49 +109,13 @@ class Body(object):
|
||||
peak_counter += len(peaks)
|
||||
|
||||
# find connection in the specified sequence, center 29 is in the position 15
|
||||
limbSeq = [
|
||||
[2, 3],
|
||||
[2, 6],
|
||||
[3, 4],
|
||||
[4, 5],
|
||||
[6, 7],
|
||||
[7, 8],
|
||||
[2, 9],
|
||||
[9, 10],
|
||||
[10, 11],
|
||||
[2, 12],
|
||||
[12, 13],
|
||||
[13, 14],
|
||||
[2, 1],
|
||||
[1, 15],
|
||||
[15, 17],
|
||||
[1, 16],
|
||||
[16, 18],
|
||||
[3, 17],
|
||||
[6, 18],
|
||||
]
|
||||
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
|
||||
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
|
||||
[1, 16], [16, 18], [3, 17], [6, 18]]
|
||||
# the middle joints heatmap correpondence
|
||||
mapIdx = [
|
||||
[31, 32],
|
||||
[39, 40],
|
||||
[33, 34],
|
||||
[35, 36],
|
||||
[41, 42],
|
||||
[43, 44],
|
||||
[19, 20],
|
||||
[21, 22],
|
||||
[23, 24],
|
||||
[25, 26],
|
||||
[27, 28],
|
||||
[29, 30],
|
||||
[47, 48],
|
||||
[49, 50],
|
||||
[53, 54],
|
||||
[51, 52],
|
||||
[55, 56],
|
||||
[37, 38],
|
||||
[45, 46],
|
||||
]
|
||||
mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
|
||||
[23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
|
||||
[55, 56], [37, 38], [45, 46]]
|
||||
|
||||
connection_all = []
|
||||
special_k = []
|
||||
@@ -171,7 +128,7 @@ class Body(object):
|
||||
nA = len(candA)
|
||||
nB = len(candB)
|
||||
indexA, indexB = limbSeq[k]
|
||||
if nA != 0 and nB != 0:
|
||||
if (nA != 0 and nB != 0):
|
||||
connection_candidate = []
|
||||
for i in range(nA):
|
||||
for j in range(nB):
|
||||
@@ -180,45 +137,30 @@ class Body(object):
|
||||
norm = max(0.001, norm)
|
||||
vec = np.divide(vec, norm)
|
||||
|
||||
startend = list(
|
||||
zip(
|
||||
np.linspace(candA[i][0], candB[j][0], num=mid_num),
|
||||
np.linspace(candA[i][1], candB[j][1], num=mid_num),
|
||||
strict=False,
|
||||
)
|
||||
)
|
||||
startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
|
||||
np.linspace(candA[i][1], candB[j][1], num=mid_num), strict=False))
|
||||
|
||||
vec_x = np.array(
|
||||
[
|
||||
score_mid[int(round(startend[i][1])), int(round(startend[i][0])), 0]
|
||||
for i in range(len(startend))
|
||||
]
|
||||
)
|
||||
vec_y = np.array(
|
||||
[
|
||||
score_mid[int(round(startend[i][1])), int(round(startend[i][0])), 1]
|
||||
for i in range(len(startend))
|
||||
]
|
||||
)
|
||||
vec_x = np.array([score_mid[int(round(startend[i][1])), int(round(startend[i][0])), 0] \
|
||||
for i in range(len(startend))])
|
||||
vec_y = np.array([score_mid[int(round(startend[i][1])), int(round(startend[i][0])), 1] \
|
||||
for i in range(len(startend))])
|
||||
|
||||
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
|
||||
score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
|
||||
0.5 * oriImg.shape[0] / norm - 1, 0
|
||||
)
|
||||
0.5 * oriImg.shape[0] / norm - 1, 0)
|
||||
criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
|
||||
criterion2 = score_with_dist_prior > 0
|
||||
if criterion1 and criterion2:
|
||||
connection_candidate.append(
|
||||
[i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]
|
||||
)
|
||||
[i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
|
||||
|
||||
connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
|
||||
connection = np.zeros((0, 5))
|
||||
for c in range(len(connection_candidate)):
|
||||
i, j, s = connection_candidate[c][0:3]
|
||||
if i not in connection[:, 3] and j not in connection[:, 4]:
|
||||
if (i not in connection[:, 3] and j not in connection[:, 4]):
|
||||
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
|
||||
if len(connection) >= min(nA, nB):
|
||||
if (len(connection) >= min(nA, nB)):
|
||||
break
|
||||
|
||||
connection_all.append(connection)
|
||||
@@ -255,7 +197,7 @@ class Body(object):
|
||||
j1, j2 = subset_idx
|
||||
membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
|
||||
if len(np.nonzero(membership == 2)[0]) == 0: # merge
|
||||
subset[j1][:-2] += subset[j2][:-2] + 1
|
||||
subset[j1][:-2] += (subset[j2][:-2] + 1)
|
||||
subset[j1][-2:] += subset[j2][-2:]
|
||||
subset[j1][-2] += connection_all[k][i][2]
|
||||
subset = np.delete(subset, j2, 0)
|
||||
@@ -306,14 +248,12 @@ class Body(object):
|
||||
x=candidate[candidate_index][0],
|
||||
y=candidate[candidate_index][1],
|
||||
score=candidate[candidate_index][2],
|
||||
id=candidate[candidate_index][3],
|
||||
)
|
||||
if candidate_index != -1
|
||||
else None
|
||||
id=candidate[candidate_index][3]
|
||||
) if candidate_index != -1 else None
|
||||
for candidate_index in person[:18].astype(int)
|
||||
],
|
||||
total_score=person[18],
|
||||
total_parts=person[19],
|
||||
total_parts=person[19]
|
||||
)
|
||||
for person in subset
|
||||
]
|
||||
|
||||
@@ -10,77 +10,179 @@ from invokeai.backend.bria.controlnet_aux.open_pose import util
|
||||
|
||||
|
||||
class FaceNet(Module):
|
||||
"""Model the cascading heatmaps."""
|
||||
|
||||
"""Model the cascading heatmaps. """
|
||||
def __init__(self):
|
||||
super(FaceNet, self).__init__()
|
||||
# cnn to make feature map
|
||||
self.relu = ReLU()
|
||||
self.max_pooling_2d = MaxPool2d(kernel_size=2, stride=2)
|
||||
self.conv1_1 = Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1)
|
||||
self.conv1_2 = Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1)
|
||||
self.conv2_1 = Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1)
|
||||
self.conv2_2 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1)
|
||||
self.conv3_1 = Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1)
|
||||
self.conv3_2 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1)
|
||||
self.conv3_3 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1)
|
||||
self.conv3_4 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1)
|
||||
self.conv4_1 = Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1)
|
||||
self.conv4_2 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)
|
||||
self.conv4_3 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)
|
||||
self.conv4_4 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)
|
||||
self.conv5_1 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)
|
||||
self.conv5_2 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)
|
||||
self.conv5_3_CPM = Conv2d(in_channels=512, out_channels=128, kernel_size=3, stride=1, padding=1)
|
||||
self.conv1_1 = Conv2d(in_channels=3, out_channels=64,
|
||||
kernel_size=3, stride=1, padding=1)
|
||||
self.conv1_2 = Conv2d(
|
||||
in_channels=64, out_channels=64, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv2_1 = Conv2d(
|
||||
in_channels=64, out_channels=128, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv2_2 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv3_1 = Conv2d(
|
||||
in_channels=128, out_channels=256, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv3_2 = Conv2d(
|
||||
in_channels=256, out_channels=256, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv3_3 = Conv2d(
|
||||
in_channels=256, out_channels=256, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv3_4 = Conv2d(
|
||||
in_channels=256, out_channels=256, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv4_1 = Conv2d(
|
||||
in_channels=256, out_channels=512, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv4_2 = Conv2d(
|
||||
in_channels=512, out_channels=512, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv4_3 = Conv2d(
|
||||
in_channels=512, out_channels=512, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv4_4 = Conv2d(
|
||||
in_channels=512, out_channels=512, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv5_1 = Conv2d(
|
||||
in_channels=512, out_channels=512, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv5_2 = Conv2d(
|
||||
in_channels=512, out_channels=512, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
self.conv5_3_CPM = Conv2d(
|
||||
in_channels=512, out_channels=128, kernel_size=3, stride=1,
|
||||
padding=1)
|
||||
|
||||
# stage1
|
||||
self.conv6_1_CPM = Conv2d(in_channels=128, out_channels=512, kernel_size=1, stride=1, padding=0)
|
||||
self.conv6_2_CPM = Conv2d(in_channels=512, out_channels=71, kernel_size=1, stride=1, padding=0)
|
||||
self.conv6_1_CPM = Conv2d(
|
||||
in_channels=128, out_channels=512, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
self.conv6_2_CPM = Conv2d(
|
||||
in_channels=512, out_channels=71, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
|
||||
# stage2
|
||||
self.Mconv1_stage2 = Conv2d(in_channels=199, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv2_stage2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv3_stage2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv4_stage2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv5_stage2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv6_stage2 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0)
|
||||
self.Mconv7_stage2 = Conv2d(in_channels=128, out_channels=71, kernel_size=1, stride=1, padding=0)
|
||||
self.Mconv1_stage2 = Conv2d(
|
||||
in_channels=199, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv2_stage2 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv3_stage2 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv4_stage2 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv5_stage2 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv6_stage2 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
self.Mconv7_stage2 = Conv2d(
|
||||
in_channels=128, out_channels=71, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
|
||||
# stage3
|
||||
self.Mconv1_stage3 = Conv2d(in_channels=199, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv2_stage3 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv3_stage3 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv4_stage3 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv5_stage3 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv6_stage3 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0)
|
||||
self.Mconv7_stage3 = Conv2d(in_channels=128, out_channels=71, kernel_size=1, stride=1, padding=0)
|
||||
self.Mconv1_stage3 = Conv2d(
|
||||
in_channels=199, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv2_stage3 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv3_stage3 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv4_stage3 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv5_stage3 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv6_stage3 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
self.Mconv7_stage3 = Conv2d(
|
||||
in_channels=128, out_channels=71, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
|
||||
# stage4
|
||||
self.Mconv1_stage4 = Conv2d(in_channels=199, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv2_stage4 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv3_stage4 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv4_stage4 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv5_stage4 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv6_stage4 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0)
|
||||
self.Mconv7_stage4 = Conv2d(in_channels=128, out_channels=71, kernel_size=1, stride=1, padding=0)
|
||||
self.Mconv1_stage4 = Conv2d(
|
||||
in_channels=199, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv2_stage4 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv3_stage4 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv4_stage4 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv5_stage4 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv6_stage4 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
self.Mconv7_stage4 = Conv2d(
|
||||
in_channels=128, out_channels=71, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
|
||||
# stage5
|
||||
self.Mconv1_stage5 = Conv2d(in_channels=199, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv2_stage5 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv3_stage5 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv4_stage5 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv5_stage5 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv6_stage5 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0)
|
||||
self.Mconv7_stage5 = Conv2d(in_channels=128, out_channels=71, kernel_size=1, stride=1, padding=0)
|
||||
self.Mconv1_stage5 = Conv2d(
|
||||
in_channels=199, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv2_stage5 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv3_stage5 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv4_stage5 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv5_stage5 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv6_stage5 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
self.Mconv7_stage5 = Conv2d(
|
||||
in_channels=128, out_channels=71, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
|
||||
# stage6
|
||||
self.Mconv1_stage6 = Conv2d(in_channels=199, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv2_stage6 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv3_stage6 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv4_stage6 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv5_stage6 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3)
|
||||
self.Mconv6_stage6 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0)
|
||||
self.Mconv7_stage6 = Conv2d(in_channels=128, out_channels=71, kernel_size=1, stride=1, padding=0)
|
||||
self.Mconv1_stage6 = Conv2d(
|
||||
in_channels=199, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv2_stage6 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv3_stage6 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv4_stage6 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv5_stage6 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=7, stride=1,
|
||||
padding=3)
|
||||
self.Mconv6_stage6 = Conv2d(
|
||||
in_channels=128, out_channels=128, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
self.Mconv7_stage6 = Conv2d(
|
||||
in_channels=128, out_channels=71, kernel_size=1, stride=1,
|
||||
padding=0)
|
||||
|
||||
for m in self.modules():
|
||||
if isinstance(m, Conv2d):
|
||||
@@ -179,74 +281,24 @@ TOPIL = ToPILImage()
|
||||
|
||||
|
||||
params = {
|
||||
"gaussian_sigma": 2.5,
|
||||
"inference_img_size": 736, # 368, 736, 1312
|
||||
"heatmap_peak_thresh": 0.1,
|
||||
"crop_scale": 1.5,
|
||||
"line_indices": [
|
||||
[0, 1],
|
||||
[1, 2],
|
||||
[2, 3],
|
||||
[3, 4],
|
||||
[4, 5],
|
||||
[5, 6],
|
||||
[6, 7],
|
||||
[7, 8],
|
||||
[8, 9],
|
||||
[9, 10],
|
||||
[10, 11],
|
||||
[11, 12],
|
||||
[12, 13],
|
||||
[13, 14],
|
||||
[14, 15],
|
||||
[15, 16],
|
||||
[17, 18],
|
||||
[18, 19],
|
||||
[19, 20],
|
||||
[20, 21],
|
||||
[22, 23],
|
||||
[23, 24],
|
||||
[24, 25],
|
||||
[25, 26],
|
||||
[27, 28],
|
||||
[28, 29],
|
||||
[29, 30],
|
||||
[31, 32],
|
||||
[32, 33],
|
||||
[33, 34],
|
||||
[34, 35],
|
||||
[36, 37],
|
||||
[37, 38],
|
||||
[38, 39],
|
||||
[39, 40],
|
||||
[40, 41],
|
||||
[41, 36],
|
||||
[42, 43],
|
||||
[43, 44],
|
||||
[44, 45],
|
||||
[45, 46],
|
||||
[46, 47],
|
||||
[47, 42],
|
||||
[48, 49],
|
||||
[49, 50],
|
||||
[50, 51],
|
||||
[51, 52],
|
||||
[52, 53],
|
||||
[53, 54],
|
||||
[54, 55],
|
||||
[55, 56],
|
||||
[56, 57],
|
||||
[57, 58],
|
||||
[58, 59],
|
||||
[59, 48],
|
||||
[60, 61],
|
||||
[61, 62],
|
||||
[62, 63],
|
||||
[63, 64],
|
||||
[64, 65],
|
||||
[65, 66],
|
||||
[66, 67],
|
||||
[67, 60],
|
||||
'gaussian_sigma': 2.5,
|
||||
'inference_img_size': 736, # 368, 736, 1312
|
||||
'heatmap_peak_thresh': 0.1,
|
||||
'crop_scale': 1.5,
|
||||
'line_indices': [
|
||||
[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6],
|
||||
[6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12], [12, 13],
|
||||
[13, 14], [14, 15], [15, 16],
|
||||
[17, 18], [18, 19], [19, 20], [20, 21],
|
||||
[22, 23], [23, 24], [24, 25], [25, 26],
|
||||
[27, 28], [28, 29], [29, 30],
|
||||
[31, 32], [32, 33], [33, 34], [34, 35],
|
||||
[36, 37], [37, 38], [38, 39], [39, 40], [40, 41], [41, 36],
|
||||
[42, 43], [43, 44], [44, 45], [45, 46], [46, 47], [47, 42],
|
||||
[48, 49], [49, 50], [50, 51], [51, 52], [52, 53], [53, 54],
|
||||
[54, 55], [55, 56], [56, 57], [57, 58], [58, 59], [59, 48],
|
||||
[60, 61], [61, 62], [62, 63], [63, 64], [64, 65], [65, 66],
|
||||
[66, 67], [67, 60]
|
||||
],
|
||||
}
|
||||
|
||||
@@ -262,10 +314,12 @@ class Face(object):
|
||||
heatmap_peak_thresh: return landmark if over threshold, default 0.1
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, face_model_path, inference_size=None, gaussian_sigma=None, heatmap_peak_thresh=None):
|
||||
def __init__(self, face_model_path,
|
||||
inference_size=None,
|
||||
gaussian_sigma=None,
|
||||
heatmap_peak_thresh=None):
|
||||
self.inference_size = inference_size or params["inference_img_size"]
|
||||
self.sigma = gaussian_sigma or params["gaussian_sigma"]
|
||||
self.sigma = gaussian_sigma or params['gaussian_sigma']
|
||||
self.threshold = heatmap_peak_thresh or params["heatmap_peak_thresh"]
|
||||
self.model = FaceNet()
|
||||
self.model.load_state_dict(torch.load(face_model_path))
|
||||
@@ -286,7 +340,10 @@ class Face(object):
|
||||
|
||||
with torch.no_grad():
|
||||
hs = self.model(x_data[None, ...])
|
||||
heatmaps = F.interpolate(hs[-1], (H, W), mode="bilinear", align_corners=True).cpu().numpy()[0]
|
||||
heatmaps = F.interpolate(
|
||||
hs[-1],
|
||||
(H, W),
|
||||
mode='bilinear', align_corners=True).cpu().numpy()[0]
|
||||
return heatmaps
|
||||
|
||||
def compute_peaks_from_heatmaps(self, heatmaps):
|
||||
|
||||
@@ -53,7 +53,7 @@ class Hand(object):
|
||||
# extract outputs, resize, and remove padding
|
||||
heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps
|
||||
heatmap = util.smart_resize_k(heatmap, fx=stride, fy=stride)
|
||||
heatmap = heatmap[: imageToTest_padded.shape[0] - pad[2], : imageToTest_padded.shape[1] - pad[3], :]
|
||||
heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
|
||||
heatmap = util.smart_resize(heatmap, (wsize, wsize))
|
||||
|
||||
heatmap_avg += heatmap / len(multiplier)
|
||||
@@ -78,14 +78,13 @@ class Hand(object):
|
||||
all_peaks.append([x, y])
|
||||
return np.array(all_peaks)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
hand_estimation = Hand("../model/hand_pose_model.pth")
|
||||
hand_estimation = Hand('../model/hand_pose_model.pth')
|
||||
|
||||
# test_image = '../images/hand.jpg'
|
||||
test_image = "../images/hand.jpg"
|
||||
test_image = '../images/hand.jpg'
|
||||
oriImg = cv2.imread(test_image) # B,G,R order
|
||||
peaks = hand_estimation(oriImg)
|
||||
canvas = util.draw_handpose(oriImg, peaks, True)
|
||||
cv2.imshow("", canvas)
|
||||
cv2.imshow('', canvas)
|
||||
cv2.waitKey(0)
|
||||
|
||||
@@ -7,127 +7,112 @@ import torch.nn as nn
|
||||
def make_layers(block, no_relu_layers):
|
||||
layers = []
|
||||
for layer_name, v in block.items():
|
||||
if "pool" in layer_name:
|
||||
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
|
||||
if 'pool' in layer_name:
|
||||
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
|
||||
padding=v[2])
|
||||
layers.append((layer_name, layer))
|
||||
else:
|
||||
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4])
|
||||
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
|
||||
kernel_size=v[2], stride=v[3],
|
||||
padding=v[4])
|
||||
layers.append((layer_name, conv2d))
|
||||
if layer_name not in no_relu_layers:
|
||||
layers.append(("relu_" + layer_name, nn.ReLU(inplace=True)))
|
||||
layers.append(('relu_'+layer_name, nn.ReLU(inplace=True)))
|
||||
|
||||
return nn.Sequential(OrderedDict(layers))
|
||||
|
||||
|
||||
class bodypose_model(nn.Module):
|
||||
def __init__(self):
|
||||
super(bodypose_model, self).__init__()
|
||||
|
||||
# these layers have no relu layer
|
||||
no_relu_layers = [
|
||||
"conv5_5_CPM_L1",
|
||||
"conv5_5_CPM_L2",
|
||||
"Mconv7_stage2_L1",
|
||||
"Mconv7_stage2_L2",
|
||||
"Mconv7_stage3_L1",
|
||||
"Mconv7_stage3_L2",
|
||||
"Mconv7_stage4_L1",
|
||||
"Mconv7_stage4_L2",
|
||||
"Mconv7_stage5_L1",
|
||||
"Mconv7_stage5_L2",
|
||||
"Mconv7_stage6_L1",
|
||||
"Mconv7_stage6_L1",
|
||||
]
|
||||
no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\
|
||||
'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\
|
||||
'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\
|
||||
'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
|
||||
blocks = {}
|
||||
block0 = OrderedDict(
|
||||
[
|
||||
("conv1_1", [3, 64, 3, 1, 1]),
|
||||
("conv1_2", [64, 64, 3, 1, 1]),
|
||||
("pool1_stage1", [2, 2, 0]),
|
||||
("conv2_1", [64, 128, 3, 1, 1]),
|
||||
("conv2_2", [128, 128, 3, 1, 1]),
|
||||
("pool2_stage1", [2, 2, 0]),
|
||||
("conv3_1", [128, 256, 3, 1, 1]),
|
||||
("conv3_2", [256, 256, 3, 1, 1]),
|
||||
("conv3_3", [256, 256, 3, 1, 1]),
|
||||
("conv3_4", [256, 256, 3, 1, 1]),
|
||||
("pool3_stage1", [2, 2, 0]),
|
||||
("conv4_1", [256, 512, 3, 1, 1]),
|
||||
("conv4_2", [512, 512, 3, 1, 1]),
|
||||
("conv4_3_CPM", [512, 256, 3, 1, 1]),
|
||||
("conv4_4_CPM", [256, 128, 3, 1, 1]),
|
||||
]
|
||||
)
|
||||
block0 = OrderedDict([
|
||||
('conv1_1', [3, 64, 3, 1, 1]),
|
||||
('conv1_2', [64, 64, 3, 1, 1]),
|
||||
('pool1_stage1', [2, 2, 0]),
|
||||
('conv2_1', [64, 128, 3, 1, 1]),
|
||||
('conv2_2', [128, 128, 3, 1, 1]),
|
||||
('pool2_stage1', [2, 2, 0]),
|
||||
('conv3_1', [128, 256, 3, 1, 1]),
|
||||
('conv3_2', [256, 256, 3, 1, 1]),
|
||||
('conv3_3', [256, 256, 3, 1, 1]),
|
||||
('conv3_4', [256, 256, 3, 1, 1]),
|
||||
('pool3_stage1', [2, 2, 0]),
|
||||
('conv4_1', [256, 512, 3, 1, 1]),
|
||||
('conv4_2', [512, 512, 3, 1, 1]),
|
||||
('conv4_3_CPM', [512, 256, 3, 1, 1]),
|
||||
('conv4_4_CPM', [256, 128, 3, 1, 1])
|
||||
])
|
||||
|
||||
|
||||
# Stage 1
|
||||
block1_1 = OrderedDict(
|
||||
[
|
||||
("conv5_1_CPM_L1", [128, 128, 3, 1, 1]),
|
||||
("conv5_2_CPM_L1", [128, 128, 3, 1, 1]),
|
||||
("conv5_3_CPM_L1", [128, 128, 3, 1, 1]),
|
||||
("conv5_4_CPM_L1", [128, 512, 1, 1, 0]),
|
||||
("conv5_5_CPM_L1", [512, 38, 1, 1, 0]),
|
||||
]
|
||||
)
|
||||
block1_1 = OrderedDict([
|
||||
('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
|
||||
('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
|
||||
('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
|
||||
('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
|
||||
('conv5_5_CPM_L1', [512, 38, 1, 1, 0])
|
||||
])
|
||||
|
||||
block1_2 = OrderedDict(
|
||||
[
|
||||
("conv5_1_CPM_L2", [128, 128, 3, 1, 1]),
|
||||
("conv5_2_CPM_L2", [128, 128, 3, 1, 1]),
|
||||
("conv5_3_CPM_L2", [128, 128, 3, 1, 1]),
|
||||
("conv5_4_CPM_L2", [128, 512, 1, 1, 0]),
|
||||
("conv5_5_CPM_L2", [512, 19, 1, 1, 0]),
|
||||
]
|
||||
)
|
||||
blocks["block1_1"] = block1_1
|
||||
blocks["block1_2"] = block1_2
|
||||
block1_2 = OrderedDict([
|
||||
('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
|
||||
('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
|
||||
('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
|
||||
('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
|
||||
('conv5_5_CPM_L2', [512, 19, 1, 1, 0])
|
||||
])
|
||||
blocks['block1_1'] = block1_1
|
||||
blocks['block1_2'] = block1_2
|
||||
|
||||
self.model0 = make_layers(block0, no_relu_layers)
|
||||
|
||||
# Stages 2 - 6
|
||||
for i in range(2, 7):
|
||||
blocks["block%d_1" % i] = OrderedDict(
|
||||
[
|
||||
("Mconv1_stage%d_L1" % i, [185, 128, 7, 1, 3]),
|
||||
("Mconv2_stage%d_L1" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv3_stage%d_L1" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv4_stage%d_L1" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv5_stage%d_L1" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv6_stage%d_L1" % i, [128, 128, 1, 1, 0]),
|
||||
("Mconv7_stage%d_L1" % i, [128, 38, 1, 1, 0]),
|
||||
]
|
||||
)
|
||||
blocks['block%d_1' % i] = OrderedDict([
|
||||
('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
|
||||
('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
|
||||
('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
|
||||
])
|
||||
|
||||
blocks["block%d_2" % i] = OrderedDict(
|
||||
[
|
||||
("Mconv1_stage%d_L2" % i, [185, 128, 7, 1, 3]),
|
||||
("Mconv2_stage%d_L2" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv3_stage%d_L2" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv4_stage%d_L2" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv5_stage%d_L2" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv6_stage%d_L2" % i, [128, 128, 1, 1, 0]),
|
||||
("Mconv7_stage%d_L2" % i, [128, 19, 1, 1, 0]),
|
||||
]
|
||||
)
|
||||
blocks['block%d_2' % i] = OrderedDict([
|
||||
('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
|
||||
('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
|
||||
('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
|
||||
])
|
||||
|
||||
for k in blocks.keys():
|
||||
blocks[k] = make_layers(blocks[k], no_relu_layers)
|
||||
|
||||
self.model1_1 = blocks["block1_1"]
|
||||
self.model2_1 = blocks["block2_1"]
|
||||
self.model3_1 = blocks["block3_1"]
|
||||
self.model4_1 = blocks["block4_1"]
|
||||
self.model5_1 = blocks["block5_1"]
|
||||
self.model6_1 = blocks["block6_1"]
|
||||
self.model1_1 = blocks['block1_1']
|
||||
self.model2_1 = blocks['block2_1']
|
||||
self.model3_1 = blocks['block3_1']
|
||||
self.model4_1 = blocks['block4_1']
|
||||
self.model5_1 = blocks['block5_1']
|
||||
self.model6_1 = blocks['block6_1']
|
||||
|
||||
self.model1_2 = blocks['block1_2']
|
||||
self.model2_2 = blocks['block2_2']
|
||||
self.model3_2 = blocks['block3_2']
|
||||
self.model4_2 = blocks['block4_2']
|
||||
self.model5_2 = blocks['block5_2']
|
||||
self.model6_2 = blocks['block6_2']
|
||||
|
||||
self.model1_2 = blocks["block1_2"]
|
||||
self.model2_2 = blocks["block2_2"]
|
||||
self.model3_2 = blocks["block3_2"]
|
||||
self.model4_2 = blocks["block4_2"]
|
||||
self.model5_2 = blocks["block5_2"]
|
||||
self.model6_2 = blocks["block6_2"]
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
out1 = self.model0(x)
|
||||
|
||||
out1_1 = self.model1_1(out1)
|
||||
@@ -155,74 +140,66 @@ class bodypose_model(nn.Module):
|
||||
|
||||
return out6_1, out6_2
|
||||
|
||||
|
||||
class handpose_model(nn.Module):
|
||||
def __init__(self):
|
||||
super(handpose_model, self).__init__()
|
||||
|
||||
# these layers have no relu layer
|
||||
no_relu_layers = [
|
||||
"conv6_2_CPM",
|
||||
"Mconv7_stage2",
|
||||
"Mconv7_stage3",
|
||||
"Mconv7_stage4",
|
||||
"Mconv7_stage5",
|
||||
"Mconv7_stage6",
|
||||
]
|
||||
no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\
|
||||
'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
|
||||
# stage 1
|
||||
block1_0 = OrderedDict(
|
||||
[
|
||||
("conv1_1", [3, 64, 3, 1, 1]),
|
||||
("conv1_2", [64, 64, 3, 1, 1]),
|
||||
("pool1_stage1", [2, 2, 0]),
|
||||
("conv2_1", [64, 128, 3, 1, 1]),
|
||||
("conv2_2", [128, 128, 3, 1, 1]),
|
||||
("pool2_stage1", [2, 2, 0]),
|
||||
("conv3_1", [128, 256, 3, 1, 1]),
|
||||
("conv3_2", [256, 256, 3, 1, 1]),
|
||||
("conv3_3", [256, 256, 3, 1, 1]),
|
||||
("conv3_4", [256, 256, 3, 1, 1]),
|
||||
("pool3_stage1", [2, 2, 0]),
|
||||
("conv4_1", [256, 512, 3, 1, 1]),
|
||||
("conv4_2", [512, 512, 3, 1, 1]),
|
||||
("conv4_3", [512, 512, 3, 1, 1]),
|
||||
("conv4_4", [512, 512, 3, 1, 1]),
|
||||
("conv5_1", [512, 512, 3, 1, 1]),
|
||||
("conv5_2", [512, 512, 3, 1, 1]),
|
||||
("conv5_3_CPM", [512, 128, 3, 1, 1]),
|
||||
]
|
||||
)
|
||||
block1_0 = OrderedDict([
|
||||
('conv1_1', [3, 64, 3, 1, 1]),
|
||||
('conv1_2', [64, 64, 3, 1, 1]),
|
||||
('pool1_stage1', [2, 2, 0]),
|
||||
('conv2_1', [64, 128, 3, 1, 1]),
|
||||
('conv2_2', [128, 128, 3, 1, 1]),
|
||||
('pool2_stage1', [2, 2, 0]),
|
||||
('conv3_1', [128, 256, 3, 1, 1]),
|
||||
('conv3_2', [256, 256, 3, 1, 1]),
|
||||
('conv3_3', [256, 256, 3, 1, 1]),
|
||||
('conv3_4', [256, 256, 3, 1, 1]),
|
||||
('pool3_stage1', [2, 2, 0]),
|
||||
('conv4_1', [256, 512, 3, 1, 1]),
|
||||
('conv4_2', [512, 512, 3, 1, 1]),
|
||||
('conv4_3', [512, 512, 3, 1, 1]),
|
||||
('conv4_4', [512, 512, 3, 1, 1]),
|
||||
('conv5_1', [512, 512, 3, 1, 1]),
|
||||
('conv5_2', [512, 512, 3, 1, 1]),
|
||||
('conv5_3_CPM', [512, 128, 3, 1, 1])
|
||||
])
|
||||
|
||||
block1_1 = OrderedDict([("conv6_1_CPM", [128, 512, 1, 1, 0]), ("conv6_2_CPM", [512, 22, 1, 1, 0])])
|
||||
block1_1 = OrderedDict([
|
||||
('conv6_1_CPM', [128, 512, 1, 1, 0]),
|
||||
('conv6_2_CPM', [512, 22, 1, 1, 0])
|
||||
])
|
||||
|
||||
blocks = {}
|
||||
blocks["block1_0"] = block1_0
|
||||
blocks["block1_1"] = block1_1
|
||||
blocks['block1_0'] = block1_0
|
||||
blocks['block1_1'] = block1_1
|
||||
|
||||
# stage 2-6
|
||||
for i in range(2, 7):
|
||||
blocks["block%d" % i] = OrderedDict(
|
||||
[
|
||||
("Mconv1_stage%d" % i, [150, 128, 7, 1, 3]),
|
||||
("Mconv2_stage%d" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv3_stage%d" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv4_stage%d" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv5_stage%d" % i, [128, 128, 7, 1, 3]),
|
||||
("Mconv6_stage%d" % i, [128, 128, 1, 1, 0]),
|
||||
("Mconv7_stage%d" % i, [128, 22, 1, 1, 0]),
|
||||
]
|
||||
)
|
||||
blocks['block%d' % i] = OrderedDict([
|
||||
('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
|
||||
('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
|
||||
('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
|
||||
('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
|
||||
])
|
||||
|
||||
for k in blocks.keys():
|
||||
blocks[k] = make_layers(blocks[k], no_relu_layers)
|
||||
|
||||
self.model1_0 = blocks["block1_0"]
|
||||
self.model1_1 = blocks["block1_1"]
|
||||
self.model2 = blocks["block2"]
|
||||
self.model3 = blocks["block3"]
|
||||
self.model4 = blocks["block4"]
|
||||
self.model5 = blocks["block5"]
|
||||
self.model6 = blocks["block6"]
|
||||
self.model1_0 = blocks['block1_0']
|
||||
self.model1_1 = blocks['block1_1']
|
||||
self.model2 = blocks['block2']
|
||||
self.model3 = blocks['block3']
|
||||
self.model4 = blocks['block4']
|
||||
self.model5 = blocks['block5']
|
||||
self.model6 = blocks['block6']
|
||||
|
||||
def forward(self, x):
|
||||
out1_0 = self.model1_0(x)
|
||||
|
||||
@@ -42,19 +42,19 @@ def padRightDownCorner(img, stride, padValue):
|
||||
w = img.shape[1]
|
||||
|
||||
pad = 4 * [None]
|
||||
pad[0] = 0 # up
|
||||
pad[1] = 0 # left
|
||||
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
|
||||
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
|
||||
pad[0] = 0 # up
|
||||
pad[1] = 0 # left
|
||||
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
|
||||
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
|
||||
|
||||
img_padded = img
|
||||
pad_up = np.tile(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1))
|
||||
pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
|
||||
img_padded = np.concatenate((pad_up, img_padded), axis=0)
|
||||
pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1))
|
||||
pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
|
||||
img_padded = np.concatenate((pad_left, img_padded), axis=1)
|
||||
pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1))
|
||||
pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
|
||||
img_padded = np.concatenate((img_padded, pad_down), axis=0)
|
||||
pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1))
|
||||
pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
|
||||
img_padded = np.concatenate((img_padded, pad_right), axis=1)
|
||||
|
||||
return img_padded, pad
|
||||
@@ -63,7 +63,7 @@ def padRightDownCorner(img, stride, padValue):
|
||||
def transfer(model, model_weights):
|
||||
transfered_model_weights = {}
|
||||
for weights_name in model.state_dict().keys():
|
||||
transfered_model_weights[weights_name] = model_weights[".".join(weights_name.split(".")[1:])]
|
||||
transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
|
||||
return transfered_model_weights
|
||||
|
||||
|
||||
@@ -85,45 +85,16 @@ def draw_bodypose(canvas: np.ndarray, keypoints: List[Keypoint]) -> np.ndarray:
|
||||
stickwidth = 4
|
||||
|
||||
limbSeq = [
|
||||
[2, 3],
|
||||
[2, 6],
|
||||
[3, 4],
|
||||
[4, 5],
|
||||
[6, 7],
|
||||
[7, 8],
|
||||
[2, 9],
|
||||
[9, 10],
|
||||
[10, 11],
|
||||
[2, 12],
|
||||
[12, 13],
|
||||
[13, 14],
|
||||
[2, 1],
|
||||
[1, 15],
|
||||
[15, 17],
|
||||
[1, 16],
|
||||
[2, 3], [2, 6], [3, 4], [4, 5],
|
||||
[6, 7], [7, 8], [2, 9], [9, 10],
|
||||
[10, 11], [2, 12], [12, 13], [13, 14],
|
||||
[2, 1], [1, 15], [15, 17], [1, 16],
|
||||
[16, 18],
|
||||
]
|
||||
|
||||
colors = [
|
||||
[255, 0, 0],
|
||||
[255, 85, 0],
|
||||
[255, 170, 0],
|
||||
[255, 255, 0],
|
||||
[170, 255, 0],
|
||||
[85, 255, 0],
|
||||
[0, 255, 0],
|
||||
[0, 255, 85],
|
||||
[0, 255, 170],
|
||||
[0, 255, 255],
|
||||
[0, 170, 255],
|
||||
[0, 85, 255],
|
||||
[0, 0, 255],
|
||||
[85, 0, 255],
|
||||
[170, 0, 255],
|
||||
[255, 0, 255],
|
||||
[255, 0, 170],
|
||||
[255, 0, 85],
|
||||
]
|
||||
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
|
||||
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
|
||||
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
|
||||
|
||||
for (k1_index, k2_index), color in zip(limbSeq, colors, strict=False):
|
||||
keypoint1 = keypoints[k1_index - 1]
|
||||
@@ -155,7 +126,6 @@ def draw_bodypose(canvas: np.ndarray, keypoints: List[Keypoint]) -> np.ndarray:
|
||||
|
||||
def draw_handpose(canvas: np.ndarray, keypoints: Union[List[Keypoint], None]) -> np.ndarray:
|
||||
import matplotlib
|
||||
|
||||
"""
|
||||
Draw keypoints and connections representing hand pose on a given canvas.
|
||||
|
||||
@@ -175,28 +145,8 @@ def draw_handpose(canvas: np.ndarray, keypoints: Union[List[Keypoint], None]) ->
|
||||
|
||||
H, W, C = canvas.shape
|
||||
|
||||
edges = [
|
||||
[0, 1],
|
||||
[1, 2],
|
||||
[2, 3],
|
||||
[3, 4],
|
||||
[0, 5],
|
||||
[5, 6],
|
||||
[6, 7],
|
||||
[7, 8],
|
||||
[0, 9],
|
||||
[9, 10],
|
||||
[10, 11],
|
||||
[11, 12],
|
||||
[0, 13],
|
||||
[13, 14],
|
||||
[14, 15],
|
||||
[15, 16],
|
||||
[0, 17],
|
||||
[17, 18],
|
||||
[18, 19],
|
||||
[19, 20],
|
||||
]
|
||||
edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
|
||||
[10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
|
||||
|
||||
for ie, (e1, e2) in enumerate(edges):
|
||||
k1 = keypoints[e1]
|
||||
@@ -209,13 +159,7 @@ def draw_handpose(canvas: np.ndarray, keypoints: Union[List[Keypoint], None]) ->
|
||||
x2 = int(k2.x * W)
|
||||
y2 = int(k2.y * H)
|
||||
if x1 > eps and y1 > eps and x2 > eps and y2 > eps:
|
||||
cv2.line(
|
||||
canvas,
|
||||
(x1, y1),
|
||||
(x2, y2),
|
||||
matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255,
|
||||
thickness=2,
|
||||
)
|
||||
cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255, thickness=2)
|
||||
|
||||
for keypoint in keypoints:
|
||||
x, y = keypoint.x, keypoint.y
|
||||
@@ -295,14 +239,22 @@ def handDetect(body: BodyResult, oriImg) -> List[Tuple[int, int, int, bool]]:
|
||||
return []
|
||||
|
||||
hands = []
|
||||
# left hand
|
||||
#left hand
|
||||
if has_left:
|
||||
hands.append([left_shoulder.x, left_shoulder.y, left_elbow.x, left_elbow.y, left_wrist.x, left_wrist.y, True])
|
||||
hands.append([
|
||||
left_shoulder.x, left_shoulder.y,
|
||||
left_elbow.x, left_elbow.y,
|
||||
left_wrist.x, left_wrist.y,
|
||||
True
|
||||
])
|
||||
# right hand
|
||||
if has_right:
|
||||
hands.append(
|
||||
[right_shoulder.x, right_shoulder.y, right_elbow.x, right_elbow.y, right_wrist.x, right_wrist.y, False]
|
||||
)
|
||||
hands.append([
|
||||
right_shoulder.x, right_shoulder.y,
|
||||
right_elbow.x, right_elbow.y,
|
||||
right_wrist.x, right_wrist.y,
|
||||
False
|
||||
])
|
||||
|
||||
for x1, y1, x2, y2, x3, y3, is_left in hands:
|
||||
# pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
|
||||
@@ -337,11 +289,11 @@ def handDetect(body: BodyResult, oriImg) -> List[Tuple[int, int, int, bool]]:
|
||||
if width >= 20:
|
||||
detect_result.append((int(x), int(y), int(width), is_left))
|
||||
|
||||
"""
|
||||
'''
|
||||
return value: [[x, y, w, True if left hand else False]].
|
||||
width=height since the network require squared input.
|
||||
x, y is the coordinate of top left.
|
||||
"""
|
||||
'''
|
||||
return detect_result
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
annotator_ckpts_path = os.path.join(os.path.dirname(__file__), "ckpts")
|
||||
annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts')
|
||||
|
||||
|
||||
def HWC3(x):
|
||||
@@ -30,7 +30,7 @@ def HWC3(x):
|
||||
def make_noise_disk(H, W, C, F):
|
||||
noise = np.random.uniform(low=0, high=1, size=((H // F) + 2, (W // F) + 2, C))
|
||||
noise = cv2.resize(noise, (W + 2 * F, H + 2 * F), interpolation=cv2.INTER_CUBIC)
|
||||
noise = noise[F : F + H, F : F + W]
|
||||
noise = noise[F: F + H, F: F + W]
|
||||
noise -= np.min(noise)
|
||||
noise /= np.max(noise)
|
||||
if C == 1:
|
||||
@@ -55,7 +55,6 @@ def nms(x, t, s):
|
||||
z[y > t] = 255
|
||||
return z
|
||||
|
||||
|
||||
def min_max_norm(x):
|
||||
x -= np.min(x)
|
||||
x /= np.maximum(np.max(x), 1e-5)
|
||||
@@ -106,155 +105,42 @@ def torch_gc():
|
||||
|
||||
def ade_palette():
|
||||
"""ADE20K palette that maps each class to RGB values."""
|
||||
return [
|
||||
[120, 120, 120],
|
||||
[180, 120, 120],
|
||||
[6, 230, 230],
|
||||
[80, 50, 50],
|
||||
[4, 200, 3],
|
||||
[120, 120, 80],
|
||||
[140, 140, 140],
|
||||
[204, 5, 255],
|
||||
[230, 230, 230],
|
||||
[4, 250, 7],
|
||||
[224, 5, 255],
|
||||
[235, 255, 7],
|
||||
[150, 5, 61],
|
||||
[120, 120, 70],
|
||||
[8, 255, 51],
|
||||
[255, 6, 82],
|
||||
[143, 255, 140],
|
||||
[204, 255, 4],
|
||||
[255, 51, 7],
|
||||
[204, 70, 3],
|
||||
[0, 102, 200],
|
||||
[61, 230, 250],
|
||||
[255, 6, 51],
|
||||
[11, 102, 255],
|
||||
[255, 7, 71],
|
||||
[255, 9, 224],
|
||||
[9, 7, 230],
|
||||
[220, 220, 220],
|
||||
[255, 9, 92],
|
||||
[112, 9, 255],
|
||||
[8, 255, 214],
|
||||
[7, 255, 224],
|
||||
[255, 184, 6],
|
||||
[10, 255, 71],
|
||||
[255, 41, 10],
|
||||
[7, 255, 255],
|
||||
[224, 255, 8],
|
||||
[102, 8, 255],
|
||||
[255, 61, 6],
|
||||
[255, 194, 7],
|
||||
[255, 122, 8],
|
||||
[0, 255, 20],
|
||||
[255, 8, 41],
|
||||
[255, 5, 153],
|
||||
[6, 51, 255],
|
||||
[235, 12, 255],
|
||||
[160, 150, 20],
|
||||
[0, 163, 255],
|
||||
[140, 140, 140],
|
||||
[250, 10, 15],
|
||||
[20, 255, 0],
|
||||
[31, 255, 0],
|
||||
[255, 31, 0],
|
||||
[255, 224, 0],
|
||||
[153, 255, 0],
|
||||
[0, 0, 255],
|
||||
[255, 71, 0],
|
||||
[0, 235, 255],
|
||||
[0, 173, 255],
|
||||
[31, 0, 255],
|
||||
[11, 200, 200],
|
||||
[255, 82, 0],
|
||||
[0, 255, 245],
|
||||
[0, 61, 255],
|
||||
[0, 255, 112],
|
||||
[0, 255, 133],
|
||||
[255, 0, 0],
|
||||
[255, 163, 0],
|
||||
[255, 102, 0],
|
||||
[194, 255, 0],
|
||||
[0, 143, 255],
|
||||
[51, 255, 0],
|
||||
[0, 82, 255],
|
||||
[0, 255, 41],
|
||||
[0, 255, 173],
|
||||
[10, 0, 255],
|
||||
[173, 255, 0],
|
||||
[0, 255, 153],
|
||||
[255, 92, 0],
|
||||
[255, 0, 255],
|
||||
[255, 0, 245],
|
||||
[255, 0, 102],
|
||||
[255, 173, 0],
|
||||
[255, 0, 20],
|
||||
[255, 184, 184],
|
||||
[0, 31, 255],
|
||||
[0, 255, 61],
|
||||
[0, 71, 255],
|
||||
[255, 0, 204],
|
||||
[0, 255, 194],
|
||||
[0, 255, 82],
|
||||
[0, 10, 255],
|
||||
[0, 112, 255],
|
||||
[51, 0, 255],
|
||||
[0, 194, 255],
|
||||
[0, 122, 255],
|
||||
[0, 255, 163],
|
||||
[255, 153, 0],
|
||||
[0, 255, 10],
|
||||
[255, 112, 0],
|
||||
[143, 255, 0],
|
||||
[82, 0, 255],
|
||||
[163, 255, 0],
|
||||
[255, 235, 0],
|
||||
[8, 184, 170],
|
||||
[133, 0, 255],
|
||||
[0, 255, 92],
|
||||
[184, 0, 255],
|
||||
[255, 0, 31],
|
||||
[0, 184, 255],
|
||||
[0, 214, 255],
|
||||
[255, 0, 112],
|
||||
[92, 255, 0],
|
||||
[0, 224, 255],
|
||||
[112, 224, 255],
|
||||
[70, 184, 160],
|
||||
[163, 0, 255],
|
||||
[153, 0, 255],
|
||||
[71, 255, 0],
|
||||
[255, 0, 163],
|
||||
[255, 204, 0],
|
||||
[255, 0, 143],
|
||||
[0, 255, 235],
|
||||
[133, 255, 0],
|
||||
[255, 0, 235],
|
||||
[245, 0, 255],
|
||||
[255, 0, 122],
|
||||
[255, 245, 0],
|
||||
[10, 190, 212],
|
||||
[214, 255, 0],
|
||||
[0, 204, 255],
|
||||
[20, 0, 255],
|
||||
[255, 255, 0],
|
||||
[0, 153, 255],
|
||||
[0, 41, 255],
|
||||
[0, 255, 204],
|
||||
[41, 0, 255],
|
||||
[41, 255, 0],
|
||||
[173, 0, 255],
|
||||
[0, 245, 255],
|
||||
[71, 0, 255],
|
||||
[122, 0, 255],
|
||||
[0, 255, 184],
|
||||
[0, 92, 255],
|
||||
[184, 255, 0],
|
||||
[0, 133, 255],
|
||||
[255, 214, 0],
|
||||
[25, 194, 194],
|
||||
[102, 255, 0],
|
||||
[92, 0, 255],
|
||||
]
|
||||
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
||||
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
||||
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
||||
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
||||
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
||||
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
||||
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
||||
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
||||
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
||||
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
||||
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
||||
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
||||
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
||||
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
||||
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
|
||||
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
|
||||
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
|
||||
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
|
||||
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
|
||||
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
|
||||
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
|
||||
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
|
||||
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
|
||||
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
|
||||
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
|
||||
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
|
||||
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
|
||||
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
|
||||
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
|
||||
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
|
||||
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
|
||||
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
|
||||
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
|
||||
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
|
||||
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
|
||||
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
|
||||
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
|
||||
[102, 255, 0], [92, 0, 255]]
|
||||
|
||||
|
||||
@@ -39,8 +39,6 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
|
||||
BRIA_CONTROL_MODES = Literal["depth", "canny", "colorgrid", "recolor", "tile", "pose"]
|
||||
|
||||
|
||||
class BriaControlModes(Enum):
|
||||
depth = 0
|
||||
canny = 1
|
||||
@@ -90,7 +88,9 @@ class BriaControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
|
||||
# self.time_text_embed = text_time_guidance_cls(
|
||||
# embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim
|
||||
# )
|
||||
self.time_embed = TimestepProjEmbeddings(embedding_dim=self.inner_dim, time_theta=time_theta)
|
||||
self.time_embed = TimestepProjEmbeddings(
|
||||
embedding_dim=self.inner_dim, time_theta=time_theta
|
||||
)
|
||||
|
||||
self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim)
|
||||
self.x_embedder = torch.nn.Linear(in_channels, self.inner_dim)
|
||||
@@ -328,16 +328,12 @@ class BriaControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
|
||||
|
||||
# Validate controlnet_mode values are within the valid range
|
||||
if torch.any(controlnet_mode < 0) or torch.any(controlnet_mode >= self.num_mode):
|
||||
raise ValueError(
|
||||
f"`controlnet_mode` values must be in range [0, {self.num_mode - 1}], but got values outside this range"
|
||||
)
|
||||
raise ValueError(f"`controlnet_mode` values must be in range [0, {self.num_mode-1}], but got values outside this range")
|
||||
|
||||
# union mode emb
|
||||
controlnet_mode_emb = self.controlnet_mode_embedder(controlnet_mode)
|
||||
if controlnet_mode_emb.shape[0] < encoder_hidden_states.shape[0]: # duplicate mode emb for each batch
|
||||
controlnet_mode_emb = controlnet_mode_emb.expand(
|
||||
encoder_hidden_states.shape[0], 1, encoder_hidden_states.shape[2]
|
||||
)
|
||||
if controlnet_mode_emb.shape[0] < encoder_hidden_states.shape[0]: # duplicate mode emb for each batch
|
||||
controlnet_mode_emb = controlnet_mode_emb.expand(encoder_hidden_states.shape[0], 1, encoder_hidden_states.shape[2])
|
||||
encoder_hidden_states = torch.cat([controlnet_mode_emb, encoder_hidden_states], dim=1)
|
||||
|
||||
txt_ids = torch.cat((txt_ids[0:1, :], txt_ids), dim=0)
|
||||
@@ -415,9 +411,7 @@ class BriaControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
|
||||
controlnet_block_samples = controlnet_block_samples + (block_sample,)
|
||||
|
||||
controlnet_single_block_samples = ()
|
||||
for single_block_sample, controlnet_block in zip(
|
||||
single_block_samples, self.controlnet_single_blocks, strict=False
|
||||
):
|
||||
for single_block_sample, controlnet_block in zip(single_block_samples, self.controlnet_single_blocks, strict=False):
|
||||
single_block_sample = controlnet_block(single_block_sample)
|
||||
controlnet_single_block_samples = controlnet_single_block_samples + (single_block_sample,)
|
||||
|
||||
@@ -478,9 +472,7 @@ class BriaMultiControlNetModel(ModelMixin):
|
||||
if len(self.nets) == 1 and self.nets[0].union:
|
||||
controlnet = self.nets[0]
|
||||
|
||||
for i, (image, mode, scale) in enumerate(
|
||||
zip(controlnet_cond, controlnet_mode, conditioning_scale, strict=False)
|
||||
):
|
||||
for i, (image, mode, scale) in enumerate(zip(controlnet_cond, controlnet_mode, conditioning_scale, strict=False)):
|
||||
block_samples, single_block_samples = controlnet(
|
||||
hidden_states=hidden_states,
|
||||
controlnet_cond=image,
|
||||
@@ -503,9 +495,7 @@ class BriaMultiControlNetModel(ModelMixin):
|
||||
else:
|
||||
control_block_samples = [
|
||||
control_block_sample + block_sample
|
||||
for control_block_sample, block_sample in zip(
|
||||
control_block_samples, block_samples, strict=False
|
||||
)
|
||||
for control_block_sample, block_sample in zip(control_block_samples, block_samples, strict=False)
|
||||
]
|
||||
|
||||
control_single_block_samples = [
|
||||
@@ -544,9 +534,7 @@ class BriaMultiControlNetModel(ModelMixin):
|
||||
if block_samples is not None and control_block_samples is not None:
|
||||
control_block_samples = [
|
||||
control_block_sample + block_sample
|
||||
for control_block_sample, block_sample in zip(
|
||||
control_block_samples, block_samples, strict=False
|
||||
)
|
||||
for control_block_sample, block_sample in zip(control_block_samples, block_samples, strict=False)
|
||||
]
|
||||
if single_block_samples is not None and control_single_block_samples is not None:
|
||||
control_single_block_samples = [
|
||||
|
||||
@@ -15,6 +15,7 @@ def prepare_control_images(
|
||||
height: int,
|
||||
device: torch.device,
|
||||
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
|
||||
|
||||
tensored_control_images = []
|
||||
tensored_control_modes = []
|
||||
for idx, control_image_ in enumerate(control_images):
|
||||
@@ -39,13 +40,11 @@ def prepare_control_images(
|
||||
width_control_image,
|
||||
)
|
||||
tensored_control_images.append(tensored_control_image)
|
||||
tensored_control_modes.append(
|
||||
torch.tensor(control_modes[idx]).expand(tensored_control_image.shape[0]).to(device, dtype=torch.long)
|
||||
)
|
||||
tensored_control_modes.append(torch.tensor(control_modes[idx]).expand(
|
||||
tensored_control_image.shape[0]).to(device, dtype=torch.long))
|
||||
|
||||
return tensored_control_images, tensored_control_modes
|
||||
|
||||
|
||||
def _prepare_image(
|
||||
image: Image.Image,
|
||||
width: int,
|
||||
@@ -59,10 +58,10 @@ def _prepare_image(
|
||||
image = image.to(device=device, dtype=dtype)
|
||||
return image
|
||||
|
||||
|
||||
def _pack_latents(latents, height, width):
|
||||
latents = latents.view(1, 4, height // 2, 2, width // 2, 2)
|
||||
latents = latents.permute(0, 2, 4, 1, 3, 5)
|
||||
latents = latents.reshape(1, (height // 2) * (width // 2), 16)
|
||||
|
||||
return latents
|
||||
|
||||
|
||||
@@ -52,8 +52,6 @@ Based on FluxPipeline with several changes:
|
||||
- We use zero padding for prompts
|
||||
- No guidance embedding since this is not a distilled version
|
||||
"""
|
||||
|
||||
|
||||
class BriaPipeline(FluxPipeline):
|
||||
r"""
|
||||
Args:
|
||||
@@ -75,10 +73,10 @@ class BriaPipeline(FluxPipeline):
|
||||
def __init__(
|
||||
self,
|
||||
transformer: BriaTransformer2DModel,
|
||||
scheduler: Union[FlowMatchEulerDiscreteScheduler, KarrasDiffusionSchedulers],
|
||||
scheduler: Union[FlowMatchEulerDiscreteScheduler,KarrasDiffusionSchedulers],
|
||||
vae: AutoencoderKL,
|
||||
text_encoder: T5EncoderModel,
|
||||
tokenizer: T5TokenizerFast,
|
||||
tokenizer: T5TokenizerFast
|
||||
):
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
@@ -93,14 +91,15 @@ class BriaPipeline(FluxPipeline):
|
||||
2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16
|
||||
)
|
||||
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
||||
self.default_sample_size = 64 # due to patchify=> 128,128 => res of 1k,1k
|
||||
self.default_sample_size = 64 # due to patchify=> 128,128 => res of 1k,1k
|
||||
|
||||
# T5 is senstive to precision so we use the precision used for precompute and cast as needed
|
||||
|
||||
if self.vae.config.shift_factor is None:
|
||||
self.vae.config.shift_factor = 0
|
||||
self.vae.config.shift_factor=0
|
||||
self.vae.to(dtype=torch.float32)
|
||||
|
||||
|
||||
def encode_prompt(
|
||||
self,
|
||||
prompt: Union[str, List[str]],
|
||||
@@ -165,9 +164,7 @@ class BriaPipeline(FluxPipeline):
|
||||
|
||||
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
||||
if not is_ng_none(negative_prompt):
|
||||
negative_prompt = (
|
||||
batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
||||
)
|
||||
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
||||
|
||||
if prompt is not None and type(prompt) is not type(negative_prompt):
|
||||
raise TypeError(
|
||||
@@ -207,6 +204,7 @@ class BriaPipeline(FluxPipeline):
|
||||
def guidance_scale(self):
|
||||
return self._guidance_scale
|
||||
|
||||
|
||||
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
||||
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
||||
# corresponds to doing no classifier free guidance.
|
||||
@@ -248,8 +246,8 @@ class BriaPipeline(FluxPipeline):
|
||||
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
||||
callback_on_step_end_tensor_inputs: Optional[List[str]] = None,
|
||||
max_sequence_length: int = 128,
|
||||
clip_value: Union[None, float] = None,
|
||||
normalize: bool = False,
|
||||
clip_value:Union[None,float] = None,
|
||||
normalize:bool = False
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
@@ -328,9 +326,7 @@ class BriaPipeline(FluxPipeline):
|
||||
width = width or self.default_sample_size * self.vae_scale_factor
|
||||
|
||||
# 1. Check inputs. Raise error if not correct
|
||||
callback_on_step_end_tensor_inputs = (
|
||||
["latents"] if callback_on_step_end_tensor_inputs is None else callback_on_step_end_tensor_inputs
|
||||
)
|
||||
callback_on_step_end_tensor_inputs = ["latents"] if callback_on_step_end_tensor_inputs is None else callback_on_step_end_tensor_inputs
|
||||
self.check_inputs(
|
||||
prompt=prompt,
|
||||
height=height,
|
||||
@@ -354,9 +350,15 @@ class BriaPipeline(FluxPipeline):
|
||||
|
||||
device = self._execution_device
|
||||
|
||||
lora_scale = self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
|
||||
lora_scale = (
|
||||
self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
|
||||
)
|
||||
|
||||
(prompt_embeds, negative_prompt_embeds, text_ids) = self.encode_prompt(
|
||||
(
|
||||
prompt_embeds,
|
||||
negative_prompt_embeds,
|
||||
text_ids
|
||||
) = self.encode_prompt(
|
||||
prompt=prompt,
|
||||
negative_prompt=negative_prompt,
|
||||
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
||||
@@ -371,8 +373,10 @@ class BriaPipeline(FluxPipeline):
|
||||
if self.do_classifier_free_guidance:
|
||||
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
||||
|
||||
|
||||
|
||||
# 5. Prepare latent variables
|
||||
num_channels_latents = self.transformer.config.in_channels // 4 # due to patch=2, we devide by 4
|
||||
num_channels_latents = self.transformer.config.in_channels // 4 # due to patch=2, we devide by 4
|
||||
latents, latent_image_ids = self.prepare_latents(
|
||||
batch_size * num_images_per_prompt,
|
||||
num_channels_latents,
|
||||
@@ -384,12 +388,9 @@ class BriaPipeline(FluxPipeline):
|
||||
latents,
|
||||
)
|
||||
|
||||
if (
|
||||
isinstance(self.scheduler, FlowMatchEulerDiscreteScheduler)
|
||||
and self.scheduler.config["use_dynamic_shifting"]
|
||||
):
|
||||
if isinstance(self.scheduler,FlowMatchEulerDiscreteScheduler) and self.scheduler.config['use_dynamic_shifting']:
|
||||
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
|
||||
image_seq_len = latents.shape[1] # Shift by height - Why just height?
|
||||
image_seq_len = latents.shape[1] # Shift by height - Why just height?
|
||||
print(f"Using dynamic shift in pipeline with sequence length {image_seq_len}")
|
||||
|
||||
mu = calculate_shift(
|
||||
@@ -410,26 +411,19 @@ class BriaPipeline(FluxPipeline):
|
||||
else:
|
||||
# 4. Prepare timesteps
|
||||
# Sample from training sigmas
|
||||
if isinstance(self.scheduler, DDIMScheduler) or isinstance(self.scheduler, EulerAncestralDiscreteScheduler):
|
||||
timesteps, num_inference_steps = retrieve_timesteps(
|
||||
self.scheduler, num_inference_steps, device, None, None
|
||||
)
|
||||
if isinstance(self.scheduler,DDIMScheduler) or isinstance(self.scheduler,EulerAncestralDiscreteScheduler):
|
||||
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, None, None)
|
||||
else:
|
||||
sigmas = get_original_sigmas(
|
||||
num_train_timesteps=self.scheduler.config.num_train_timesteps,
|
||||
num_inference_steps=num_inference_steps,
|
||||
)
|
||||
timesteps, num_inference_steps = retrieve_timesteps(
|
||||
self.scheduler, num_inference_steps, device, timesteps, sigmas=sigmas
|
||||
)
|
||||
sigmas = get_original_sigmas(num_train_timesteps=self.scheduler.config.num_train_timesteps,num_inference_steps=num_inference_steps)
|
||||
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps,sigmas=sigmas)
|
||||
|
||||
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
||||
self._num_timesteps = len(timesteps)
|
||||
|
||||
# Supprot different diffusers versions
|
||||
if diffusers.__version__ >= "0.32.0":
|
||||
latent_image_ids = latent_image_ids[0]
|
||||
text_ids = text_ids[0]
|
||||
if diffusers.__version__>='0.32.0':
|
||||
latent_image_ids=latent_image_ids[0]
|
||||
text_ids=text_ids[0]
|
||||
|
||||
# 6. Denoising loop
|
||||
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
||||
@@ -463,11 +457,11 @@ class BriaPipeline(FluxPipeline):
|
||||
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
if normalize:
|
||||
noise_pred = noise_pred * (0.7 * (cfg_noise_pred_text / noise_pred.std())) + 0.3 * noise_pred
|
||||
noise_pred = noise_pred * (0.7 *(cfg_noise_pred_text/noise_pred.std())) + 0.3 * noise_pred
|
||||
|
||||
if clip_value:
|
||||
assert clip_value > 0
|
||||
noise_pred = noise_pred.clip(-clip_value, clip_value)
|
||||
assert clip_value>0
|
||||
noise_pred = noise_pred.clip(-clip_value,clip_value)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents_dtype = latents.dtype
|
||||
@@ -548,6 +542,7 @@ class BriaPipeline(FluxPipeline):
|
||||
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
||||
)
|
||||
|
||||
|
||||
if max_sequence_length is not None and max_sequence_length > 512:
|
||||
raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
|
||||
|
||||
@@ -558,11 +553,13 @@ class BriaPipeline(FluxPipeline):
|
||||
for block in self.text_encoder.encoder.block:
|
||||
block.layer[-1].DenseReluDense.wo.to(dtype=torch.float32)
|
||||
|
||||
if self.vae.config.shift_factor == 0 and self.vae.dtype != torch.float32:
|
||||
if self.vae.config.shift_factor == 0 and self.vae.dtype!=torch.float32:
|
||||
self.vae.to(dtype=torch.float32)
|
||||
|
||||
|
||||
return self
|
||||
|
||||
|
||||
def prepare_latents(
|
||||
self,
|
||||
batch_size,
|
||||
@@ -577,7 +574,7 @@ class BriaPipeline(FluxPipeline):
|
||||
# VAE applies 8x compression on images but we must also account for packing which requires
|
||||
# latent height and width to be divisible by 2.
|
||||
height = 2 * (int(height) // self.vae_scale_factor)
|
||||
width = 2 * (int(width) // self.vae_scale_factor)
|
||||
width = 2 * (int(width) // self.vae_scale_factor )
|
||||
|
||||
shape = (batch_size, num_channels_latents, height, width)
|
||||
|
||||
@@ -634,3 +631,10 @@ class BriaPipeline(FluxPipeline):
|
||||
)
|
||||
|
||||
return latent_image_ids.to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -144,9 +144,7 @@ class BriaControlNetPipeline(BriaPipeline):
|
||||
|
||||
return control_image, control_mode
|
||||
|
||||
def prepare_multi_control(
|
||||
self, control_image, width, height, batch_size, num_images_per_prompt, device, control_mode
|
||||
):
|
||||
def prepare_multi_control(self, control_image, width, height, batch_size, num_images_per_prompt, device, control_mode):
|
||||
num_channels_latents = self.transformer.config.in_channels // 4
|
||||
control_images = []
|
||||
for _, control_image_ in enumerate(control_image):
|
||||
@@ -327,9 +325,7 @@ class BriaControlNetPipeline(BriaPipeline):
|
||||
)
|
||||
|
||||
# 1. Check inputs. Raise error if not correct
|
||||
callback_on_step_end_tensor_inputs = (
|
||||
["latents"] if callback_on_step_end_tensor_inputs is None else callback_on_step_end_tensor_inputs
|
||||
)
|
||||
callback_on_step_end_tensor_inputs = ["latents"] if callback_on_step_end_tensor_inputs is None else callback_on_step_end_tensor_inputs
|
||||
self.check_inputs(
|
||||
prompt,
|
||||
height,
|
||||
@@ -347,13 +343,12 @@ class BriaControlNetPipeline(BriaPipeline):
|
||||
|
||||
device = self._execution_device
|
||||
|
||||
|
||||
# 4. Prepare timesteps
|
||||
if (
|
||||
isinstance(self.scheduler, FlowMatchEulerDiscreteScheduler)
|
||||
and self.scheduler.config["use_dynamic_shifting"]
|
||||
):
|
||||
if isinstance(self.scheduler,FlowMatchEulerDiscreteScheduler) and self.scheduler.config['use_dynamic_shifting']:
|
||||
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
|
||||
|
||||
|
||||
# Determine image sequence length
|
||||
if control_image is not None:
|
||||
if isinstance(control_image, list):
|
||||
@@ -401,9 +396,9 @@ class BriaControlNetPipeline(BriaPipeline):
|
||||
control_guidance_end=control_guidance_end,
|
||||
)
|
||||
|
||||
if diffusers.__version__ >= "0.32.0":
|
||||
latent_image_ids = latent_image_ids[0]
|
||||
text_ids = text_ids[0]
|
||||
if diffusers.__version__>='0.32.0':
|
||||
latent_image_ids=latent_image_ids[0]
|
||||
text_ids=text_ids[0]
|
||||
|
||||
if self.do_classifier_free_guidance:
|
||||
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
||||
@@ -430,9 +425,7 @@ class BriaControlNetPipeline(BriaPipeline):
|
||||
if isinstance(controlnet_conditioning_scale, list):
|
||||
cond_scale = controlnet_conditioning_scale
|
||||
else:
|
||||
cond_scale = [
|
||||
c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i], strict=False)
|
||||
]
|
||||
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i], strict=False)]
|
||||
else:
|
||||
controlnet_cond_scale = controlnet_conditioning_scale
|
||||
if isinstance(controlnet_cond_scale, list):
|
||||
@@ -515,17 +508,17 @@ class BriaControlNetPipeline(BriaPipeline):
|
||||
|
||||
|
||||
def encode_prompt(
|
||||
prompt: Union[str, List[str]],
|
||||
tokenizer: T5TokenizerFast,
|
||||
text_encoder: T5EncoderModel,
|
||||
device: Optional[torch.device] = None,
|
||||
num_images_per_prompt: int = 1,
|
||||
negative_prompt: Optional[Union[str, List[str]]] = None,
|
||||
prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
max_sequence_length: int = 128,
|
||||
lora_scale: Optional[float] = None,
|
||||
):
|
||||
prompt: Union[str, List[str]],
|
||||
tokenizer: T5TokenizerFast,
|
||||
text_encoder: T5EncoderModel,
|
||||
device: Optional[torch.device] = None,
|
||||
num_images_per_prompt: int = 1,
|
||||
negative_prompt: Optional[Union[str, List[str]]] = None,
|
||||
prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
max_sequence_length: int = 128,
|
||||
lora_scale: Optional[float] = None,
|
||||
):
|
||||
r"""
|
||||
|
||||
Args:
|
||||
@@ -626,7 +619,7 @@ def prepare_latents(
|
||||
# latent height and width to be divisible by 2.
|
||||
vae_scale_factor = 16
|
||||
height = 2 * (int(height) // vae_scale_factor)
|
||||
width = 2 * (int(width) // vae_scale_factor)
|
||||
width = 2 * (int(width) // vae_scale_factor )
|
||||
|
||||
shape = (batch_size, num_channels_latents, height, width)
|
||||
|
||||
@@ -663,6 +656,8 @@ def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
|
||||
return latent_image_ids.to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
|
||||
|
||||
def _pack_latents(latents, batch_size, num_channels_latents, height, width):
|
||||
latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
|
||||
latents = latents.permute(0, 2, 4, 1, 3, 5)
|
||||
|
||||
@@ -56,7 +56,6 @@ class BriaControlNetDiffusersModel(GenericDiffusersLoader):
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Bria, type=ModelType.Main, format=ModelFormat.Diffusers)
|
||||
class BriaDiffusersModel(GenericDiffusersLoader):
|
||||
"""Class to load Bria main models."""
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
"""Utility functions for extracting metadata from LoRA model files."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Set, Tuple
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.util.thumbnails import make_thumbnail
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, ModelType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def extract_lora_metadata(
|
||||
model_path: Path, model_key: str, model_images_path: Path
|
||||
) -> Tuple[Optional[str], Optional[Set[str]]]:
|
||||
"""
|
||||
Extract metadata for a LoRA model from associated JSON and image files.
|
||||
|
||||
Args:
|
||||
model_path: Path to the LoRA model file
|
||||
model_key: Unique key for the model
|
||||
model_images_path: Path to the model images directory
|
||||
|
||||
Returns:
|
||||
Tuple of (description, trigger_phrases)
|
||||
"""
|
||||
model_stem = model_path.stem
|
||||
model_dir = model_path.parent
|
||||
|
||||
# Find and process preview image
|
||||
_process_preview_image(model_stem, model_dir, model_key, model_images_path)
|
||||
|
||||
# Extract metadata from JSON
|
||||
description, trigger_phrases = _extract_json_metadata(model_stem, model_dir)
|
||||
|
||||
return description, trigger_phrases
|
||||
|
||||
|
||||
def _process_preview_image(model_stem: str, model_dir: Path, model_key: str, model_images_path: Path) -> bool:
|
||||
"""Find and process a preview image for the model, saving it to the model images store."""
|
||||
image_extensions = [".png", ".jpg", ".jpeg", ".webp"]
|
||||
|
||||
for ext in image_extensions:
|
||||
image_path = model_dir / f"{model_stem}{ext}"
|
||||
if image_path.exists():
|
||||
try:
|
||||
# Open the image
|
||||
with Image.open(image_path) as img:
|
||||
# Create thumbnail and save to model images directory
|
||||
thumbnail = make_thumbnail(img, 256)
|
||||
thumbnail_path = model_images_path / f"{model_key}.webp"
|
||||
thumbnail.save(thumbnail_path, format="webp")
|
||||
|
||||
logger.info(f"Processed preview image {image_path.name} for model {model_key}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process preview image {image_path.name}: {e}")
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _extract_json_metadata(model_stem: str, model_dir: Path) -> Tuple[Optional[str], Optional[Set[str]]]:
|
||||
"""Extract metadata from a JSON file with the same name as the model."""
|
||||
json_path = model_dir / f"{model_stem}.json"
|
||||
|
||||
if not json_path.exists():
|
||||
return None, None
|
||||
|
||||
try:
|
||||
with open(json_path, "r", encoding="utf-8") as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Extract description
|
||||
description = _build_description(metadata)
|
||||
|
||||
# Extract trigger phrases
|
||||
trigger_phrases = _extract_trigger_phrases(metadata)
|
||||
|
||||
if description or trigger_phrases:
|
||||
logger.info(f"Applied metadata from {json_path.name}")
|
||||
|
||||
return description, trigger_phrases
|
||||
|
||||
except (json.JSONDecodeError, IOError, Exception) as e:
|
||||
logger.warning(f"Failed to read metadata from {json_path}: {e}")
|
||||
return None, None
|
||||
|
||||
|
||||
def _build_description(metadata: Dict[str, Any]) -> Optional[str]:
|
||||
"""Build a description from metadata fields."""
|
||||
description_parts = []
|
||||
|
||||
if description := metadata.get("description"):
|
||||
description_parts.append(str(description).strip())
|
||||
|
||||
if notes := metadata.get("notes"):
|
||||
description_parts.append(str(notes).strip())
|
||||
|
||||
return " | ".join(description_parts) if description_parts else None
|
||||
|
||||
|
||||
def _extract_trigger_phrases(metadata: Dict[str, Any]) -> Optional[Set[str]]:
|
||||
"""Extract trigger phrases from metadata."""
|
||||
if not (activation_text := metadata.get("activation text")):
|
||||
return None
|
||||
|
||||
activation_text = str(activation_text).strip()
|
||||
if not activation_text:
|
||||
return None
|
||||
|
||||
# Split on commas and clean up each phrase
|
||||
phrases = [phrase.strip() for phrase in activation_text.split(",") if phrase.strip()]
|
||||
|
||||
return set(phrases) if phrases else None
|
||||
|
||||
|
||||
def apply_lora_metadata(info: AnyModelConfig, model_path: Path, model_images_path: Path) -> None:
|
||||
"""
|
||||
Apply extracted metadata to a LoRA model configuration.
|
||||
|
||||
Args:
|
||||
info: The model configuration to update
|
||||
model_path: Path to the LoRA model file
|
||||
model_images_path: Path to the model images directory
|
||||
"""
|
||||
# Only process LoRA models
|
||||
if info.type != ModelType.LoRA:
|
||||
return
|
||||
|
||||
# Extract and apply metadata
|
||||
description, trigger_phrases = extract_lora_metadata(model_path, info.key, model_images_path)
|
||||
|
||||
# We don't set cover_image path in the config anymore since images are stored
|
||||
# separately in the model images store by model key
|
||||
|
||||
if description:
|
||||
info.description = description
|
||||
|
||||
if trigger_phrases:
|
||||
info.trigger_phrases = trigger_phrases
|
||||
@@ -711,8 +711,7 @@
|
||||
"gaussianBlur": "Gaußsche Unschärfe",
|
||||
"sendToUpscale": "An Hochskalieren senden",
|
||||
"useCpuNoise": "CPU-Rauschen verwenden",
|
||||
"sendToCanvas": "An Leinwand senden",
|
||||
"disabledNoRasterContent": "Deaktiviert (kein Rasterinhalt)"
|
||||
"sendToCanvas": "An Leinwand senden"
|
||||
},
|
||||
"settings": {
|
||||
"displayInProgress": "Zwischenbilder anzeigen",
|
||||
@@ -790,10 +789,7 @@
|
||||
"pasteSuccess": "Eingefügt in {{destination}}",
|
||||
"pasteFailed": "Einfügen fehlgeschlagen",
|
||||
"unableToCopy": "Kopieren nicht möglich",
|
||||
"unableToCopyDesc_theseSteps": "diese Schritte",
|
||||
"noRasterLayers": "Keine Rasterebenen gefunden",
|
||||
"noActiveRasterLayers": "Keine aktiven Rasterebenen",
|
||||
"noVisibleRasterLayers": "Keine sichtbaren Rasterebenen"
|
||||
"unableToCopyDesc_theseSteps": "diese Schritte"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Bild hochladen",
|
||||
@@ -851,10 +847,7 @@
|
||||
"assetsWithCount_one": "{{count}} in der Sammlung",
|
||||
"assetsWithCount_other": "{{count}} in der Sammlung",
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
|
||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners",
|
||||
"uncategorizedImages": "Nicht kategorisierte Bilder",
|
||||
"deleteAllUncategorizedImages": "Alle nicht kategorisierten Bilder löschen",
|
||||
"deletedImagesCannotBeRestored": "Gelöschte Bilder können nicht wiederhergestellt werden."
|
||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -1201,9 +1194,6 @@
|
||||
"Die Kantengröße des Kohärenzdurchlaufs."
|
||||
],
|
||||
"heading": "Kantengröße"
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "Rasterebene"
|
||||
}
|
||||
},
|
||||
"invocationCache": {
|
||||
@@ -1441,10 +1431,7 @@
|
||||
"autoLayout": "Auto Layout",
|
||||
"copyShareLink": "Teilen-Link kopieren",
|
||||
"download": "Herunterladen",
|
||||
"convertGraph": "Graph konvertieren",
|
||||
"filterByTags": "Nach Tags filtern",
|
||||
"yourWorkflows": "Ihre Arbeitsabläufe",
|
||||
"recentlyOpened": "Kürzlich geöffnet"
|
||||
"convertGraph": "Graph konvertieren"
|
||||
},
|
||||
"sdxl": {
|
||||
"concatPromptStyle": "Verknüpfen von Prompt & Stil",
|
||||
@@ -1457,15 +1444,7 @@
|
||||
"prompt": {
|
||||
"noMatchingTriggers": "Keine passenden Trigger",
|
||||
"addPromptTrigger": "Prompt-Trigger hinzufügen",
|
||||
"compatibleEmbeddings": "Kompatible Einbettungen",
|
||||
"replace": "Ersetzen",
|
||||
"insert": "Einfügen",
|
||||
"discard": "Verwerfen",
|
||||
"generateFromImage": "Prompt aus Bild generieren",
|
||||
"expandCurrentPrompt": "Aktuelle Prompt erweitern",
|
||||
"uploadImageForPromptGeneration": "Bild zur Prompt-Generierung hochladen",
|
||||
"expandingPrompt": "Prompt wird erweitert...",
|
||||
"resultTitle": "Prompt-Erweiterung abgeschlossen"
|
||||
"compatibleEmbeddings": "Kompatible Einbettungen"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -1594,30 +1573,30 @@
|
||||
"newGlobalReferenceImage": "Neues globales Referenzbild",
|
||||
"newRegionalReferenceImage": "Neues regionales Referenzbild",
|
||||
"newControlLayer": "Neue Kontroll-Ebene",
|
||||
"newRasterLayer": "Neue Rasterebene"
|
||||
"newRasterLayer": "Neue Raster-Ebene"
|
||||
},
|
||||
"rectangle": "Rechteck",
|
||||
"saveCanvasToGallery": "Leinwand in Galerie speichern",
|
||||
"newRasterLayerError": "Problem beim Erstellen einer Rasterebene",
|
||||
"newRasterLayerError": "Problem beim Erstellen einer Raster-Ebene",
|
||||
"saveLayerToAssets": "Ebene in Galerie speichern",
|
||||
"deleteReferenceImage": "Referenzbild löschen",
|
||||
"referenceImage": "Referenzbild",
|
||||
"opacity": "Opazität",
|
||||
"removeBookmark": "Lesezeichen entfernen",
|
||||
"rasterLayer": "Rasterebene",
|
||||
"rasterLayers_withCount_visible": "Rasterebenen ({{count}})",
|
||||
"rasterLayer": "Raster-Ebene",
|
||||
"rasterLayers_withCount_visible": "Raster-Ebenen ({{count}})",
|
||||
"controlLayers_withCount_visible": "Kontroll-Ebenen ({{count}})",
|
||||
"deleteSelected": "Ausgewählte löschen",
|
||||
"newRegionalReferenceImageError": "Problem beim Erstellen eines regionalen Referenzbilds",
|
||||
"newControlLayerOk": "Kontroll-Ebene erstellt",
|
||||
"newControlLayerError": "Problem beim Erstellen einer Kontroll-Ebene",
|
||||
"newRasterLayerOk": "Rasterebene erstellt",
|
||||
"newRasterLayerOk": "Raster-Layer erstellt",
|
||||
"moveToFront": "Nach vorne bringen",
|
||||
"copyToClipboard": "In die Zwischenablage kopieren",
|
||||
"controlLayers_withCount_hidden": "Kontroll-Ebenen ({{count}} ausgeblendet)",
|
||||
"clearCaches": "Cache leeren",
|
||||
"controlLayer": "Kontroll-Ebene",
|
||||
"rasterLayers_withCount_hidden": "Rasterebenen ({{count}} ausgeblendet)",
|
||||
"rasterLayers_withCount_hidden": "Raster-Ebenen ({{count}} ausgeblendet)",
|
||||
"transparency": "Transparenz",
|
||||
"canvas": "Leinwand",
|
||||
"global": "Global",
|
||||
@@ -1703,14 +1682,7 @@
|
||||
"filterType": "Filtertyp",
|
||||
"filter": "Filter"
|
||||
},
|
||||
"bookmark": "Lesezeichen für Schnell-Umschalten",
|
||||
"asRasterLayer": "Als $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "Als $t(controlLayers.rasterLayer) (Größe anpassen)",
|
||||
"rasterLayer_withCount_one": "$t(controlLayers.rasterLayer)",
|
||||
"rasterLayer_withCount_other": "Rasterebenen",
|
||||
"newRasterLayer": "Neue $t(controlLayers.rasterLayer)",
|
||||
"showNonRasterLayers": "Nicht-Rasterebenen anzeigen (Umschalt+H)",
|
||||
"hideNonRasterLayers": "Nicht-Rasterebenen ausblenden (Umschalt+H)"
|
||||
"bookmark": "Lesezeichen für Schnell-Umschalten"
|
||||
},
|
||||
"upsell": {
|
||||
"shareAccess": "Zugang teilen",
|
||||
|
||||
@@ -253,7 +253,6 @@
|
||||
"cancel": "Cancel",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog": "Canceling all queue items except the current one will stop pending items but allow the in-progress one to finish.",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog2": "Are you sure you want to cancel all pending queue items?",
|
||||
"cancelAllExceptCurrent": "Cancel All Except Current",
|
||||
"cancelAllExceptCurrentTooltip": "Cancel All Except Current Item",
|
||||
"cancelTooltip": "Cancel Current Item",
|
||||
"cancelSucceeded": "Item Canceled",
|
||||
@@ -274,7 +273,7 @@
|
||||
"retryItem": "Retry Item",
|
||||
"cancelBatchSucceeded": "Batch Canceled",
|
||||
"cancelBatchFailed": "Problem Canceling Batch",
|
||||
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled and the Canvas Staging Area will be reset.",
|
||||
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled.",
|
||||
"clearQueueAlertDialog2": "Are you sure you want to clear the queue?",
|
||||
"current": "Current",
|
||||
"next": "Next",
|
||||
@@ -471,11 +470,6 @@
|
||||
"togglePanels": {
|
||||
"title": "Toggle Panels",
|
||||
"desc": "Show or hide both left and right panels at once."
|
||||
},
|
||||
"selectGenerateTab": {
|
||||
"title": "Select the Generate Tab",
|
||||
"desc": "Selects the Generate tab.",
|
||||
"key": "1"
|
||||
}
|
||||
},
|
||||
"canvas": {
|
||||
@@ -613,16 +607,6 @@
|
||||
"fitBboxToMasks": {
|
||||
"title": "Fit Bbox To Masks",
|
||||
"desc": "Automatically adjust the generation bounding box to fit visible inpaint masks"
|
||||
},
|
||||
"applySegmentAnything": {
|
||||
"title": "Apply Segment Anything",
|
||||
"desc": "Apply the current Segment Anything mask.",
|
||||
"key": "enter"
|
||||
},
|
||||
"cancelSegmentAnything": {
|
||||
"title": "Cancel Segment Anything",
|
||||
"desc": "Cancel the current Segment Anything operation.",
|
||||
"key": "esc"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -752,10 +736,6 @@
|
||||
"deleteSelection": {
|
||||
"title": "Delete",
|
||||
"desc": "Delete all selected images. By default, you will be prompted to confirm deletion. If the images are currently in use in the app, you will be warned."
|
||||
},
|
||||
"starImage": {
|
||||
"title": "Star/Unstar Image",
|
||||
"desc": "Star or unstar the selected image."
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1234,8 +1214,6 @@
|
||||
"modelIncompatibleBboxHeight": "Bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
|
||||
"modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
|
||||
"briaRequiresExactDimensions": "Bria requires exact {{size}}x{{size}} dimensions",
|
||||
"briaRequiresExactScaledDimensions": "Bria requires exact {{size}}x{{size}} scaled dimensions",
|
||||
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
|
||||
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with Flux Kontext",
|
||||
"canvasIsFiltering": "Canvas is busy (filtering)",
|
||||
@@ -2187,13 +2165,7 @@
|
||||
"balanced": "Balanced (recommended)",
|
||||
"prompt": "Prompt",
|
||||
"control": "Control",
|
||||
"megaControl": "Mega Control",
|
||||
"depth": "Depth",
|
||||
"canny": "Canny",
|
||||
"colorgrid": "Color Grid",
|
||||
"recolor": "Recolor",
|
||||
"tile": "Tile",
|
||||
"pose": "Pose"
|
||||
"megaControl": "Mega Control"
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"ipAdapterMethod": "Mode",
|
||||
@@ -2639,10 +2611,9 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"New setting to send all Canvas generations directly to the Gallery.",
|
||||
"New Invert Mask (Shift+V) and Fit BBox to Mask (Shift+B) capabilities.",
|
||||
"Expanded support for Model Thumbnails and configurations.",
|
||||
"Various other quality of life updates and fixes"
|
||||
"Generate images faster with new Launchpads and a simplified Generate tab.",
|
||||
"Edit with prompts using Flux Kontext Dev.",
|
||||
"Export to PSD, bulk-hide overlays, organize models & images — all in a reimagined interface built for control."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -2375,8 +2375,65 @@
|
||||
},
|
||||
"supportVideos": {
|
||||
"watch": "Regarder",
|
||||
"videos": {
|
||||
"upscaling": {
|
||||
"description": "Comment améliorer la résolution des images avec les outils d'Invoke pour les agrandir.",
|
||||
"title": "Upscaling"
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"description": "Étapes pour générer et enregistrer des images dans la galerie.",
|
||||
"title": "Comment générer et enregistrer dans la galerie ?"
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Utilisation des couche de contrôle et des guides de référence",
|
||||
"description": "Apprenez à guider la création de vos images avec des couche de contrôle et des images de référence."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"description": "Plongez dans les modèles d'IA et découvrez comment utiliser les adaptateurs de concepts pour un contrôle créatif.",
|
||||
"title": "Exploration des modèles d'IA et des adaptateurs de concepts"
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "Comment utiliser les réseaux de contrôle et les couches de contrôle ?",
|
||||
"description": "Apprenez à appliquer des couches de contrôle et des ControlNets à vos images."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"description": "Apprenez à composer des images en utilisant le canvas de contrôle d'Invoke.",
|
||||
"title": "Créer et composer sur le canvas de contrôle d'Invoke"
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "Comment puis-je modifier sur la toile ?",
|
||||
"description": "Guide pour éditer des images directement sur la toile."
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "Comment effectuer une transformation d'image à image ?",
|
||||
"description": "Tutoriel sur la réalisation de transformations d'image à image dans Invoke."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "Comment utiliser les IP Adapters globaux et les images de référence ?",
|
||||
"description": "Introduction à l'ajout d'images de référence et IP Adapters globaux."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Comment utiliser les masques d'inpainting ?",
|
||||
"description": "Comment appliquer des masques de retourche pour la correction et la variation d'image."
|
||||
},
|
||||
"creatingYourFirstImage": {
|
||||
"title": "Créer votre première image",
|
||||
"description": "Introduction à la création d'une image à partir de zéro en utilisant les outils d'Invoke."
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Comprendre l'Image-à-Image et le Débruitage",
|
||||
"description": "Aperçu des transformations d'image à image et du débruitage dans Invoke."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "Comment effectuer un outpainting ?",
|
||||
"description": "Guide pour l'extension au-delà des bordures de l'image originale."
|
||||
}
|
||||
},
|
||||
"gettingStarted": "Commencer",
|
||||
"supportVideos": "Vidéos d'assistance"
|
||||
"studioSessionsDesc1": "Consultez le <StudioSessionsPlaylistLink /> pour des approfondissements sur Invoke.",
|
||||
"studioSessionsDesc2": "Rejoignez notre <DiscordLink /> pour participer aux sessions en direct et poser vos questions. Les sessions sont ajoutée dans la playlist la semaine suivante.",
|
||||
"supportVideos": "Vidéos d'assistance",
|
||||
"controlCanvas": "Contrôler la toile"
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "Effacer le cache du modèle",
|
||||
|
||||
@@ -152,7 +152,7 @@
|
||||
"image": "immagine",
|
||||
"drop": "Rilascia",
|
||||
"unstarImage": "Rimuovi contrassegno immagine",
|
||||
"dropOrUpload": "Rilascia o carica",
|
||||
"dropOrUpload": "$t(gallery.drop) o carica",
|
||||
"starImage": "Contrassegna l'immagine",
|
||||
"dropToUpload": "$t(gallery.drop) per aggiornare",
|
||||
"bulkDownloadRequested": "Preparazione del download",
|
||||
@@ -197,8 +197,7 @@
|
||||
"boardsSettings": "Impostazioni Bacheche",
|
||||
"imagesSettings": "Impostazioni Immagini Galleria",
|
||||
"assets": "Risorse",
|
||||
"images": "Immagini",
|
||||
"useForPromptGeneration": "Usa per generare il prompt"
|
||||
"images": "Immagini"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Cerca tasti di scelta rapida",
|
||||
@@ -260,10 +259,6 @@
|
||||
"togglePanels": {
|
||||
"title": "Attiva/disattiva i pannelli",
|
||||
"desc": "Mostra o nascondi contemporaneamente i pannelli sinistro e destro."
|
||||
},
|
||||
"selectGenerateTab": {
|
||||
"title": "Seleziona la scheda Genera",
|
||||
"desc": "Seleziona la scheda Genera."
|
||||
}
|
||||
},
|
||||
"hotkeys": "Tasti di scelta rapida",
|
||||
@@ -384,32 +379,6 @@
|
||||
"applyTransform": {
|
||||
"title": "Applica trasformazione",
|
||||
"desc": "Applica la trasformazione in sospeso al livello selezionato."
|
||||
},
|
||||
"toggleNonRasterLayers": {
|
||||
"desc": "Mostra o nascondi tutte le categorie di livelli non raster (Livelli di controllo, Maschere di Inpaint, Guida regionale).",
|
||||
"title": "Attiva/disattiva livelli non raster"
|
||||
},
|
||||
"settings": {
|
||||
"behavior": "Comportamento",
|
||||
"display": "Mostra",
|
||||
"grid": "Griglia"
|
||||
},
|
||||
"invertMask": {
|
||||
"title": "Inverti maschera",
|
||||
"desc": "Inverte la maschera di inpaint selezionata, creando una nuova maschera con trasparenza opposta."
|
||||
},
|
||||
"fitBboxToMasks": {
|
||||
"title": "Adatta il riquadro di delimitazione alle maschere",
|
||||
"desc": "Regola automaticamente il riquadro di delimitazione della generazione per adattarlo alle maschere di inpaint visibili"
|
||||
},
|
||||
"applySegmentAnything": {
|
||||
"title": "Applica Segment Anything",
|
||||
"desc": "Applica la maschera Segment Anything corrente.",
|
||||
"key": "invio"
|
||||
},
|
||||
"cancelSegmentAnything": {
|
||||
"title": "Annulla Segment Anything",
|
||||
"desc": "Annulla l'operazione Segment Anything corrente."
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -654,7 +623,7 @@
|
||||
"installingXModels_one": "Installazione di {{count}} modello",
|
||||
"installingXModels_many": "Installazione di {{count}} modelli",
|
||||
"installingXModels_other": "Installazione di {{count}} modelli",
|
||||
"includesNModels": "Include {{n}} modelli e le loro dipendenze.",
|
||||
"includesNModels": "Include {{n}} modelli e le loro dipendenze",
|
||||
"starterBundleHelpText": "Installa facilmente tutti i modelli necessari per iniziare con un modello base, tra cui un modello principale, controlnet, adattatori IP e altro. Selezionando un pacchetto salterai tutti i modelli che hai già installato.",
|
||||
"noDefaultSettings": "Nessuna impostazione predefinita configurata per questo modello. Visita Gestione Modelli per aggiungere impostazioni predefinite.",
|
||||
"defaultSettingsOutOfSync": "Alcune impostazioni non corrispondono a quelle predefinite del modello:",
|
||||
@@ -687,27 +656,7 @@
|
||||
"manageModels": "Gestione modelli",
|
||||
"hfTokenReset": "Ripristino del gettone HF",
|
||||
"relatedModels": "Modelli correlati",
|
||||
"showOnlyRelatedModels": "Correlati",
|
||||
"installedModelsCount": "{{installed}} di {{total}} modelli installati.",
|
||||
"allNModelsInstalled": "Tutti i {{count}} modelli installati",
|
||||
"nToInstall": "{{count}} da installare",
|
||||
"nAlreadyInstalled": "{{count}} già installati",
|
||||
"bundleAlreadyInstalled": "Pacchetto già installato",
|
||||
"bundleAlreadyInstalledDesc": "Tutti i modelli nel pacchetto {{bundleName}} sono già installati.",
|
||||
"launchpad": {
|
||||
"description": "Per utilizzare la maggior parte delle funzionalità della piattaforma, Invoke richiede l'installazione di modelli. Scegli tra le opzioni di installazione manuale o esplora i modelli di avvio selezionati.",
|
||||
"manualInstall": "Installazione manuale",
|
||||
"urlDescription": "Installa i modelli da un URL o da un percorso file locale. Perfetto per modelli specifici che desideri aggiungere.",
|
||||
"huggingFaceDescription": "Esplora e installa i modelli direttamente dai repository di HuggingFace.",
|
||||
"scanFolderDescription": "Esegui la scansione di una cartella locale per rilevare e installare automaticamente i modelli.",
|
||||
"recommendedModels": "Modelli consigliati",
|
||||
"exploreStarter": "Oppure sfoglia tutti i modelli iniziali disponibili",
|
||||
"welcome": "Benvenuti in Gestione Modelli",
|
||||
"quickStart": "Pacchetti di avvio rapido",
|
||||
"bundleDescription": "Ogni pacchetto include modelli essenziali per ogni famiglia di modelli e modelli base selezionati per iniziare.",
|
||||
"browseAll": "Oppure scopri tutti i modelli disponibili:"
|
||||
},
|
||||
"launchpadTab": "Rampa di lancio"
|
||||
"showOnlyRelatedModels": "Correlati"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -793,10 +742,7 @@
|
||||
"modelIncompatibleBboxHeight": "L'altezza del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelIncompatibleScaledBboxWidth": "La larghezza scalata del riquadro è {{width}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "L'altezza scalata del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade.",
|
||||
"fluxKontextMultipleReferenceImages": "È possibile utilizzare solo 1 immagine di riferimento alla volta con Flux Kontext",
|
||||
"promptExpansionResultPending": "Accetta o ignora il risultato dell'espansione del prompt",
|
||||
"promptExpansionPending": "Espansione del prompt in corso"
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade."
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
@@ -938,34 +884,7 @@
|
||||
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
|
||||
"workflowUnpublished": "Flusso di lavoro non pubblicato",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting.",
|
||||
"noRasterLayers": "Nessun livello raster trovato",
|
||||
"noRasterLayersDesc": "Crea almeno un livello raster da esportare in PSD",
|
||||
"noActiveRasterLayers": "Nessun livello raster attivo",
|
||||
"noActiveRasterLayersDesc": "Abilitare almeno un livello raster da esportare in PSD",
|
||||
"noVisibleRasterLayers": "Nessun livello raster visibile",
|
||||
"noVisibleRasterLayersDesc": "Abilitare almeno un livello raster da esportare in PSD",
|
||||
"invalidCanvasDimensions": "Dimensioni della tela non valide",
|
||||
"canvasTooLarge": "Tela troppo grande",
|
||||
"canvasTooLargeDesc": "Le dimensioni della tela superano le dimensioni massime consentite per l'esportazione in formato PSD. Riduci la larghezza e l'altezza totali della tela e riprova.",
|
||||
"failedToProcessLayers": "Impossibile elaborare i livelli",
|
||||
"psdExportSuccess": "Esportazione PSD completata",
|
||||
"psdExportSuccessDesc": "Esportazione riuscita di {{count}} livelli nel file PSD",
|
||||
"problemExportingPSD": "Problema durante l'esportazione PSD",
|
||||
"noValidLayerAdapters": "Nessun adattatore di livello valido trovato",
|
||||
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext non supporta la generazione di immagini posizionate sulla tela. Riprova utilizzando la sezione Immagine di riferimento e disattiva tutti i livelli raster.",
|
||||
"canvasManagerNotAvailable": "Gestione tela non disponibile",
|
||||
"promptExpansionFailed": "Abbiamo riscontrato un problema. Riprova a eseguire l'espansione del prompt.",
|
||||
"uploadAndPromptGenerationFailed": "Impossibile caricare l'immagine e generare il prompt",
|
||||
"promptGenerationStarted": "Generazione del prompt avviata",
|
||||
"invalidBboxDesc": "Il riquadro di delimitazione non ha dimensioni valide",
|
||||
"invalidBbox": "Riquadro di delimitazione non valido",
|
||||
"noInpaintMaskSelectedDesc": "Seleziona una maschera di inpaint da invertire",
|
||||
"noInpaintMaskSelected": "Nessuna maschera di inpaint selezionata",
|
||||
"noVisibleMasksDesc": "Crea o abilita almeno una maschera inpaint da invertire",
|
||||
"noVisibleMasks": "Nessuna maschera visibile",
|
||||
"maskInvertFailed": "Impossibile invertire la maschera",
|
||||
"maskInverted": "Maschera invertita"
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||
@@ -1160,10 +1079,7 @@
|
||||
"missingField_withName": "Campo \"{{name}}\" mancante",
|
||||
"unknownFieldEditWorkflowToFix_withName": "Il flusso di lavoro contiene un campo \"{{name}}\" sconosciuto .\nModifica il flusso di lavoro per risolvere il problema.",
|
||||
"unexpectedField_withName": "Campo \"{{name}}\" inaspettato",
|
||||
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante",
|
||||
"layout": {
|
||||
"alignmentDR": "In basso a destra"
|
||||
}
|
||||
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@@ -1309,8 +1225,7 @@
|
||||
"addLora": "Aggiungi LoRA",
|
||||
"defaultVAE": "VAE predefinito",
|
||||
"concepts": "Concetti",
|
||||
"lora": "LoRA",
|
||||
"noCompatibleLoRAs": "Nessun LoRA compatibile"
|
||||
"lora": "LoRA"
|
||||
},
|
||||
"invocationCache": {
|
||||
"disable": "Disabilita",
|
||||
@@ -1768,20 +1683,6 @@
|
||||
"paragraphs": [
|
||||
"Controlla quale area viene modificata, in base all'intensità di riduzione del rumore."
|
||||
]
|
||||
},
|
||||
"tileSize": {
|
||||
"heading": "Dimensione riquadro",
|
||||
"paragraphs": [
|
||||
"Controlla la dimensione dei riquadri utilizzati durante il processo di ampliamento. Riquadri più grandi consumano più memoria, ma possono produrre risultati migliori.",
|
||||
"I modelli SD1.5 hanno un valore predefinito di 768, mentre i modelli SDXL hanno un valore predefinito di 1024. Ridurre le dimensioni dei riquadri in caso di problemi di memoria."
|
||||
]
|
||||
},
|
||||
"tileOverlap": {
|
||||
"heading": "Sovrapposizione riquadri",
|
||||
"paragraphs": [
|
||||
"Controlla la sovrapposizione tra riquadri adiacenti durante l'ampliamento. Valori di sovrapposizione più elevati aiutano a ridurre le giunzioni visibili tra i riquadri, ma consuma più memoria.",
|
||||
"Il valore predefinito di 128 è adatto alla maggior parte dei casi, ma è possibile modificarlo in base alle proprie esigenze specifiche e ai limiti di memoria."
|
||||
]
|
||||
}
|
||||
},
|
||||
"sdxl": {
|
||||
@@ -1829,7 +1730,7 @@
|
||||
"parameterSet": "Parametro {{parameter}} impostato",
|
||||
"parsingFailed": "Analisi non riuscita",
|
||||
"recallParameter": "Richiama {{label}}",
|
||||
"canvasV2Metadata": "Livelli Tela",
|
||||
"canvasV2Metadata": "Tela",
|
||||
"guidance": "Guida",
|
||||
"seamlessXAxis": "Asse X senza giunte",
|
||||
"seamlessYAxis": "Asse Y senza giunte",
|
||||
@@ -2000,16 +1901,7 @@
|
||||
"prompt": {
|
||||
"compatibleEmbeddings": "Incorporamenti compatibili",
|
||||
"addPromptTrigger": "Aggiungi Trigger nel prompt",
|
||||
"noMatchingTriggers": "Nessun Trigger corrispondente",
|
||||
"discard": "Scarta",
|
||||
"insert": "Inserisci",
|
||||
"replace": "Sostituisci",
|
||||
"resultSubtitle": "Scegli come gestire il prompt espanso:",
|
||||
"resultTitle": "Espansione del prompt completata",
|
||||
"expandingPrompt": "Espansione del prompt...",
|
||||
"uploadImageForPromptGeneration": "Carica l'immagine per la generazione del prompt",
|
||||
"expandCurrentPrompt": "Espandi il prompt corrente",
|
||||
"generateFromImage": "Genera prompt dall'immagine"
|
||||
"noMatchingTriggers": "Nessun Trigger corrispondente"
|
||||
},
|
||||
"controlLayers": {
|
||||
"addLayer": "Aggiungi Livello",
|
||||
@@ -2320,11 +2212,7 @@
|
||||
"label": "Preserva la regione mascherata"
|
||||
},
|
||||
"isolatedLayerPreview": "Anteprima livello isolato",
|
||||
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione.",
|
||||
"saveAllImagesToGallery": {
|
||||
"alert": "Invia le nuove generazioni alla Galleria, bypassando la Tela",
|
||||
"label": "Invia le nuove generazioni alla Galleria"
|
||||
}
|
||||
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione."
|
||||
},
|
||||
"transform": {
|
||||
"reset": "Reimposta",
|
||||
@@ -2374,8 +2262,7 @@
|
||||
"newRegionalGuidance": "Nuova Guida Regionale",
|
||||
"copyToClipboard": "Copia negli appunti",
|
||||
"copyCanvasToClipboard": "Copia la tela negli appunti",
|
||||
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti",
|
||||
"newResizedControlLayer": "Nuovo livello di controllo ridimensionato"
|
||||
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti"
|
||||
},
|
||||
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine",
|
||||
"copyRasterLayerTo": "Copia $t(controlLayers.rasterLayer) in",
|
||||
@@ -2412,10 +2299,10 @@
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||
"useImage": "Usa immagine",
|
||||
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton> oppure trascina un'immagine dalla galleria su questa Immagine di riferimento.",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
|
||||
"asRasterLayer": "Come $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "Come $t(controlLayers.rasterLayer) (Ridimensiona)",
|
||||
"asControlLayer": "Come $t(controlLayers.controlLayer)",
|
||||
@@ -2465,20 +2352,7 @@
|
||||
"denoiseLimit": "Limite di riduzione del rumore",
|
||||
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
|
||||
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Rumore dell'immagine",
|
||||
"exportCanvasToPSD": "Esporta la tela in PSD",
|
||||
"ruleOfThirds": "Mostra la regola dei terzi",
|
||||
"showNonRasterLayers": "Mostra livelli non raster (Shift+H)",
|
||||
"hideNonRasterLayers": "Nascondi livelli non raster (Shift+H)",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questa immagine di riferimento o <PullBboxButton>trascina il riquadro di delimitazione in questa immagine di riferimento</PullBboxButton> per iniziare.",
|
||||
"uploadOrDragAnImage": "Trascina un'immagine dalla galleria o <UploadButton>carica un'immagine</UploadButton>.",
|
||||
"autoSwitch": {
|
||||
"switchOnStart": "All'inizio",
|
||||
"switchOnFinish": "Alla fine",
|
||||
"off": "Spento"
|
||||
},
|
||||
"invertMask": "Inverti maschera",
|
||||
"fitBboxToMasks": "Adatta il riquadro di delimitazione alle maschere"
|
||||
"imageNoise": "Rumore dell'immagine"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2492,55 +2366,6 @@
|
||||
"upscaling": "Amplia",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||
"gallery": "Galleria"
|
||||
},
|
||||
"launchpad": {
|
||||
"workflowsTitle": "Approfondisci i flussi di lavoro.",
|
||||
"upscalingTitle": "Amplia e aggiungi dettagli.",
|
||||
"canvasTitle": "Modifica e perfeziona sulla tela.",
|
||||
"generateTitle": "Genera immagini da prompt testuali.",
|
||||
"modelGuideText": "Vuoi scoprire quali prompt funzionano meglio per ciascun modello?",
|
||||
"modelGuideLink": "Consulta la nostra guida ai modelli.",
|
||||
"workflows": {
|
||||
"description": "I flussi di lavoro sono modelli riutilizzabili che automatizzano le attività di generazione delle immagini, consentendo di eseguire rapidamente operazioni complesse e di ottenere risultati coerenti.",
|
||||
"learnMoreLink": "Scopri di più sulla creazione di flussi di lavoro",
|
||||
"browseTemplates": {
|
||||
"title": "Sfoglia i modelli di flusso di lavoro",
|
||||
"description": "Scegli tra flussi di lavoro predefiniti per le attività comuni"
|
||||
},
|
||||
"createNew": {
|
||||
"title": "Crea un nuovo flusso di lavoro",
|
||||
"description": "Avvia un nuovo flusso di lavoro da zero"
|
||||
},
|
||||
"loadFromFile": {
|
||||
"title": "Carica flusso di lavoro da file",
|
||||
"description": "Carica un flusso di lavoro per iniziare con una configurazione esistente"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
"uploadImage": {
|
||||
"title": "Carica l'immagine da ampliare",
|
||||
"description": "Fai clic o trascina un'immagine per ingrandirla (JPG, PNG, WebP fino a 100 MB)"
|
||||
},
|
||||
"replaceImage": {
|
||||
"title": "Sostituisci l'immagine corrente",
|
||||
"description": "Fai clic o trascina una nuova immagine per sostituire quella corrente"
|
||||
},
|
||||
"imageReady": {
|
||||
"title": "Immagine pronta",
|
||||
"description": "Premere Invoke per iniziare l'ampliamento"
|
||||
},
|
||||
"readyToUpscale": {
|
||||
"title": "Pronto per ampliare!",
|
||||
"description": "Configura le impostazioni qui sotto, quindi fai clic sul pulsante Invoke per iniziare ad ampliare l'immagine."
|
||||
},
|
||||
"upscaleModel": "Modello per l'ampliamento",
|
||||
"model": "Modello",
|
||||
"scale": "Scala",
|
||||
"helpText": {
|
||||
"promptAdvice": "Durante l'ampliamento, utilizza un prompt che descriva il mezzo e lo stile. Evita di descrivere dettagli specifici del contenuto dell'immagine.",
|
||||
"styleAdvice": "L'ampliamento funziona meglio con lo stile generale dell'immagine."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
@@ -2561,10 +2386,7 @@
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
||||
"upscale": "Amplia",
|
||||
"incompatibleBaseModel": "Architettura del modello principale non supportata per l'ampliamento",
|
||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento.",
|
||||
"tileControl": "Controllo del riquadro",
|
||||
"tileSize": "Dimensione del riquadro",
|
||||
"tileOverlap": "Sovrapposizione riquadro"
|
||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento."
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invita collaboratori",
|
||||
@@ -2614,8 +2436,7 @@
|
||||
"positivePromptColumn": "'prompt' o 'positive_prompt'",
|
||||
"noTemplates": "Nessun modello",
|
||||
"acceptedColumnsKeys": "Colonne/chiavi accettate:",
|
||||
"promptTemplateCleared": "Modello di prompt cancellato",
|
||||
"togglePromptPreviews": "Attiva/disattiva le anteprime dei prompt"
|
||||
"promptTemplateCleared": "Modello di prompt cancellato"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"gettingStartedSeries": "Desideri maggiori informazioni? Consulta la nostra <LinkComponent>Getting Started Series</LinkComponent> per suggerimenti su come sfruttare appieno il potenziale di Invoke Studio.",
|
||||
@@ -2631,9 +2452,8 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"Genera immagini più velocemente con le nuove Rampe di lancio e una scheda Genera semplificata.",
|
||||
"Modifica con prompt utilizzando Flux Kontext Dev.",
|
||||
"Esporta in PSD, nascondi sovrapposizioni in blocco, organizza modelli e immagini: il tutto in un'interfaccia riprogettata e pensata per il controllo."
|
||||
"Inpainting: livelli di rumore per maschera e limiti di denoise.",
|
||||
"Canvas: proporzioni più intelligenti per SDXL e scorrimento e zoom migliorati."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
@@ -2665,18 +2485,64 @@
|
||||
"supportVideos": {
|
||||
"gettingStarted": "Iniziare",
|
||||
"supportVideos": "Video di supporto",
|
||||
"watch": "Guarda",
|
||||
"studioSessionsDesc": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e porre domande. Le sessioni vengono caricate nella playlist la settimana successiva.",
|
||||
"videos": {
|
||||
"gettingStarted": {
|
||||
"title": "Introduzione a Invoke",
|
||||
"description": "Serie video completa che copre tutto ciò che devi sapere per iniziare a usare Invoke, dalla creazione della tua prima immagine alle tecniche avanzate."
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Utilizzo di livelli di controllo e guide di riferimento",
|
||||
"description": "Scopri come guidare la creazione delle tue immagini con livelli di controllo e immagini di riferimento."
|
||||
},
|
||||
"studioSessions": {
|
||||
"title": "Sessioni in studio",
|
||||
"description": "Sessioni approfondite che esplorano le funzionalità avanzate di Invoke, i flussi di lavoro creativi e le discussioni della community."
|
||||
"creatingYourFirstImage": {
|
||||
"description": "Introduzione alla creazione di un'immagine da zero utilizzando gli strumenti di Invoke.",
|
||||
"title": "Creazione della tua prima immagine"
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"description": "Panoramica delle trasformazioni immagine-a-immagine e della riduzione del rumore in Invoke.",
|
||||
"title": "Comprendere immagine-a-immagine e riduzione del rumore"
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"description": "Tutorial su come eseguire trasformazioni da immagine a immagine in Invoke.",
|
||||
"title": "Come si esegue la trasformazione da immagine-a-immagine?"
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Come si usano le maschere Inpaint?",
|
||||
"description": "Come applicare maschere inpaint per la correzione e la variazione delle immagini."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"description": "Guida all'outpainting oltre i confini dell'immagine originale.",
|
||||
"title": "Come posso eseguire l'outpainting?"
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"description": "Approfondisci i modelli di intelligenza artificiale e scopri come utilizzare gli adattatori concettuali per il controllo creativo.",
|
||||
"title": "Esplorazione dei modelli di IA e degli adattatori concettuali"
|
||||
},
|
||||
"upscaling": {
|
||||
"title": "Ampliamento",
|
||||
"description": "Come ampliare le immagini con gli strumenti di Invoke per migliorarne la risoluzione."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"description": "Impara a comporre immagini utilizzando la tela di controllo di Invoke.",
|
||||
"title": "Creare e comporre sulla tela di controllo di Invoke"
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"description": "Passaggi per generare e salvare le immagini nella galleria.",
|
||||
"title": "Come posso generare e salvare nella Galleria?"
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "Come posso apportare modifiche sulla tela?",
|
||||
"description": "Guida alla modifica delle immagini direttamente sulla tela."
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "Come posso utilizzare le Reti di Controllo e i Livelli di Controllo?",
|
||||
"description": "Impara ad applicare livelli di controllo e reti di controllo alle tue immagini."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "Come si utilizzano gli adattatori IP globali e le immagini di riferimento?",
|
||||
"description": "Introduzione all'aggiunta di immagini di riferimento e adattatori IP globali."
|
||||
}
|
||||
}
|
||||
},
|
||||
"controlCanvas": "Tela di Controllo",
|
||||
"watch": "Guarda",
|
||||
"studioSessionsDesc1": "Dai un'occhiata a <StudioSessionsPlaylistLink /> per approfondimenti su Invoke.",
|
||||
"studioSessionsDesc2": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e fare domande. Le sessioni vengono caricate sulla playlist la settimana successiva."
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "Cancella la cache del modello",
|
||||
|
||||
@@ -141,7 +141,7 @@
|
||||
"loading": "ロード中",
|
||||
"currentlyInUse": "この画像は現在下記の機能を使用しています:",
|
||||
"drop": "ドロップ",
|
||||
"dropOrUpload": "ドロップまたはアップロード",
|
||||
"dropOrUpload": "$t(gallery.drop) またはアップロード",
|
||||
"deleteImage_other": "画像 {{count}} 枚を削除",
|
||||
"deleteImagePermanent": "削除された画像は復元できません。",
|
||||
"download": "ダウンロード",
|
||||
@@ -193,8 +193,7 @@
|
||||
"images": "画像",
|
||||
"assetsTab": "プロジェクトで使用するためにアップロードされたファイル。",
|
||||
"imagesTab": "Invoke内で作成および保存された画像。",
|
||||
"assets": "アセット",
|
||||
"useForPromptGeneration": "プロンプト生成に使用する"
|
||||
"assets": "アセット"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "ホットキーを検索",
|
||||
@@ -364,16 +363,6 @@
|
||||
"selectRectTool": {
|
||||
"title": "矩形ツール",
|
||||
"desc": "矩形ツールを選択します。"
|
||||
},
|
||||
"settings": {
|
||||
"behavior": "行動",
|
||||
"display": "ディスプレイ",
|
||||
"grid": "グリッド",
|
||||
"debug": "デバッグ"
|
||||
},
|
||||
"toggleNonRasterLayers": {
|
||||
"title": "非ラスターレイヤーの切り替え",
|
||||
"desc": "ラスター以外のレイヤー カテゴリ (コントロール レイヤー、インペイント マスク、地域ガイダンス) を表示または非表示にします。"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -641,7 +630,7 @@
|
||||
"restoreDefaultSettings": "クリックするとモデルのデフォルト設定が使用されます.",
|
||||
"hfTokenSaved": "ハギングフェイストークンを保存しました",
|
||||
"imageEncoderModelId": "画像エンコーダーモデルID",
|
||||
"includesNModels": "{{n}}個のモデルとこれらの依存関係を含みます。",
|
||||
"includesNModels": "{{n}}個のモデルとこれらの依存関係を含みます",
|
||||
"learnMoreAboutSupportedModels": "私たちのサポートしているモデルについて更に学ぶ",
|
||||
"modelImageUpdateFailed": "モデル画像アップデート失敗",
|
||||
"scanFolder": "スキャンフォルダ",
|
||||
@@ -665,30 +654,7 @@
|
||||
"manageModels": "モデル管理",
|
||||
"hfTokenReset": "ハギングフェイストークンリセット",
|
||||
"relatedModels": "関連のあるモデル",
|
||||
"showOnlyRelatedModels": "関連している",
|
||||
"installedModelsCount": "{{total}} モデルのうち {{installed}} 個がインストールされています。",
|
||||
"allNModelsInstalled": "{{count}} 個のモデルがすべてインストールされています",
|
||||
"nToInstall": "{{count}}個をインストールする",
|
||||
"nAlreadyInstalled": "{{count}} 個すでにインストールされています",
|
||||
"bundleAlreadyInstalled": "バンドルがすでにインストールされています",
|
||||
"bundleAlreadyInstalledDesc": "{{bundleName}} バンドル内のすべてのモデルはすでにインストールされています。",
|
||||
"launchpadTab": "ランチパッド",
|
||||
"launchpad": {
|
||||
"welcome": "モデルマネジメントへようこそ",
|
||||
"description": "Invoke プラットフォームのほとんどの機能を利用するには、モデルのインストールが必要です。手動インストールオプションから選択するか、厳選されたスターターモデルをご覧ください。",
|
||||
"manualInstall": "マニュアルインストール",
|
||||
"urlDescription": "URLまたはローカルファイルパスからモデルをインストールします。特定のモデルを追加したい場合に最適です。",
|
||||
"huggingFaceDescription": "HuggingFace リポジトリからモデルを直接参照してインストールします。",
|
||||
"scanFolderDescription": "ローカルフォルダをスキャンしてモデルを自動的に検出し、インストールします。",
|
||||
"recommendedModels": "推奨モデル",
|
||||
"exploreStarter": "または、利用可能なすべてのスターターモデルを参照してください",
|
||||
"quickStart": "クイックスタートバンドル",
|
||||
"bundleDescription": "各バンドルには各モデルファミリーの必須モデルと、開始するための厳選されたベースモデルが含まれています。",
|
||||
"browseAll": "または、利用可能なすべてのモデルを参照してください。",
|
||||
"stableDiffusion15": "Stable Diffusion1.5",
|
||||
"sdxl": "SDXL",
|
||||
"fluxDev": "FLUX.1 dev"
|
||||
}
|
||||
"showOnlyRelatedModels": "関連している"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -754,10 +720,7 @@
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bboxの高さは{{height}}です",
|
||||
"noFLUXVAEModelSelected": "FLUX生成にVAEモデルが選択されていません",
|
||||
"noT5EncoderModelSelected": "FLUX生成にT5エンコーダモデルが選択されていません",
|
||||
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。",
|
||||
"fluxKontextMultipleReferenceImages": "Flux Kontext では一度に 1 つの参照画像しか使用できません",
|
||||
"promptExpansionPending": "プロンプト拡張が進行中",
|
||||
"promptExpansionResultPending": "プロンプト拡張結果を受け入れるか破棄してください"
|
||||
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。"
|
||||
},
|
||||
"aspect": "縦横比",
|
||||
"lockAspectRatio": "縦横比を固定",
|
||||
@@ -912,26 +875,7 @@
|
||||
"imageNotLoadedDesc": "画像を見つけられません",
|
||||
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください.",
|
||||
"noRasterLayers": "ラスターレイヤーが見つかりません",
|
||||
"noRasterLayersDesc": "PSDにエクスポートするには、少なくとも1つのラスターレイヤーを作成します",
|
||||
"noActiveRasterLayers": "アクティブなラスターレイヤーがありません",
|
||||
"noActiveRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
|
||||
"noVisibleRasterLayers": "表示されるラスター レイヤーがありません",
|
||||
"noVisibleRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
|
||||
"invalidCanvasDimensions": "キャンバスのサイズが無効です",
|
||||
"canvasTooLarge": "キャンバスが大きすぎます",
|
||||
"canvasTooLargeDesc": "キャンバスのサイズがPSDエクスポートの最大許容サイズを超えています。キャンバス全体の幅と高さを小さくしてから、もう一度お試しください。",
|
||||
"failedToProcessLayers": "レイヤーの処理に失敗しました",
|
||||
"psdExportSuccess": "PSDエクスポート完了",
|
||||
"psdExportSuccessDesc": "{{count}} 個のレイヤーを PSD ファイルに正常にエクスポートしました",
|
||||
"problemExportingPSD": "PSD のエクスポート中に問題が発生しました",
|
||||
"canvasManagerNotAvailable": "キャンバスマネージャーは利用できません",
|
||||
"noValidLayerAdapters": "有効なレイヤーアダプタが見つかりません",
|
||||
"fluxKontextIncompatibleGenerationMode": "Flux Kontext はテキストから画像への変換のみをサポートしています。画像から画像への変換、インペインティング、アウトペインティングのタスクには他のモデルを使用してください。",
|
||||
"promptGenerationStarted": "プロンプト生成が開始されました",
|
||||
"uploadAndPromptGenerationFailed": "画像のアップロードとプロンプトの生成に失敗しました",
|
||||
"promptExpansionFailed": "プロンプト拡張に失敗しました"
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -1070,8 +1014,7 @@
|
||||
"lora": "LoRA",
|
||||
"defaultVAE": "デフォルトVAE",
|
||||
"noLoRAsInstalled": "インストールされているLoRAはありません",
|
||||
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません",
|
||||
"noCompatibleLoRAs": "互換性のあるLoRAはありません"
|
||||
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません"
|
||||
},
|
||||
"nodes": {
|
||||
"addNode": "ノードを追加",
|
||||
@@ -1765,16 +1708,7 @@
|
||||
"prompt": {
|
||||
"addPromptTrigger": "プロンプトトリガーを追加",
|
||||
"compatibleEmbeddings": "互換性のある埋め込み",
|
||||
"noMatchingTriggers": "一致するトリガーがありません",
|
||||
"generateFromImage": "画像からプロンプトを生成する",
|
||||
"expandCurrentPrompt": "現在のプロンプトを展開",
|
||||
"uploadImageForPromptGeneration": "プロンプト生成用の画像をアップロードする",
|
||||
"expandingPrompt": "プロンプトを展開しています...",
|
||||
"resultTitle": "プロンプト拡張完了",
|
||||
"resultSubtitle": "拡張プロンプトの処理方法を選択します:",
|
||||
"replace": "交換する",
|
||||
"insert": "挿入する",
|
||||
"discard": "破棄する"
|
||||
"noMatchingTriggers": "一致するトリガーがありません"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -1782,61 +1716,7 @@
|
||||
"canvas": "キャンバス",
|
||||
"workflows": "ワークフロー",
|
||||
"models": "モデル",
|
||||
"gallery": "ギャラリー",
|
||||
"generation": "生成",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"upscaling": "アップスケーリング",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
||||
},
|
||||
"launchpad": {
|
||||
"upscaling": {
|
||||
"model": "モデル",
|
||||
"scale": "スケール",
|
||||
"helpText": {
|
||||
"promptAdvice": "アップスケールする際は、媒体とスタイルを説明するプロンプトを使用してください。画像内の具体的なコンテンツの詳細を説明することは避けてください。",
|
||||
"styleAdvice": "アップスケーリングは、画像の全体的なスタイルに最適です。"
|
||||
},
|
||||
"uploadImage": {
|
||||
"title": "アップスケール用の画像をアップロードする",
|
||||
"description": "アップスケールするには、画像をクリックまたはドラッグします(JPG、PNG、WebP、最大100MB)"
|
||||
},
|
||||
"replaceImage": {
|
||||
"title": "現在の画像を置き換える",
|
||||
"description": "新しい画像をクリックまたはドラッグして、現在の画像を置き換えます"
|
||||
},
|
||||
"imageReady": {
|
||||
"title": "画像準備完了",
|
||||
"description": "アップスケールを開始するにはInvokeを押してください"
|
||||
},
|
||||
"readyToUpscale": {
|
||||
"title": "アップスケールの準備ができました!",
|
||||
"description": "以下の設定を構成し、「Invoke」ボタンをクリックして画像のアップスケールを開始します。"
|
||||
},
|
||||
"upscaleModel": "アップスケールモデル"
|
||||
},
|
||||
"workflowsTitle": "ワークフローを詳しく見てみましょう。",
|
||||
"upscalingTitle": "アップスケールして詳細を追加します。",
|
||||
"canvasTitle": "キャンバス上で編集および調整します。",
|
||||
"generateTitle": "テキストプロンプトから画像を生成します。",
|
||||
"modelGuideText": "各モデルに最適なプロンプトを知りたいですか?",
|
||||
"modelGuideLink": "モデルガイドをご覧ください。",
|
||||
"workflows": {
|
||||
"description": "ワークフローは、画像生成タスクを自動化する再利用可能なテンプレートであり、複雑な操作を迅速に実行して一貫した結果を得ることができます。",
|
||||
"learnMoreLink": "ワークフローの作成について詳しく見る",
|
||||
"browseTemplates": {
|
||||
"title": "ワークフローテンプレートを参照する",
|
||||
"description": "一般的なタスク用にあらかじめ構築されたワークフローから選択する"
|
||||
},
|
||||
"createNew": {
|
||||
"title": "新規ワークフローを作成する",
|
||||
"description": "新しいワークフローをゼロから始める"
|
||||
},
|
||||
"loadFromFile": {
|
||||
"title": "ファイルからワークフローを読み込む",
|
||||
"description": "既存の設定から開始するためのワークフローをアップロードする"
|
||||
}
|
||||
}
|
||||
"gallery": "ギャラリー"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
@@ -1852,16 +1732,7 @@
|
||||
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
|
||||
"newGlobalReferenceImage": "新規全域参照画像",
|
||||
"newRegionalReferenceImage": "新規領域参照画像",
|
||||
"canvasGroup": "キャンバス",
|
||||
"saveToGalleryGroup": "ギャラリーに保存",
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "Bボックスをギャラリーに保存",
|
||||
"newControlLayer": "新規コントロールレイヤー",
|
||||
"newRasterLayer": "新規ラスターレイヤー",
|
||||
"newInpaintMask": "新規インペイントマスク",
|
||||
"copyToClipboard": "クリップボードにコピー",
|
||||
"copyCanvasToClipboard": "キャンバスをクリップボードにコピー",
|
||||
"copyBboxToClipboard": "Bボックスをクリップボードにコピー"
|
||||
"canvasGroup": "キャンバス"
|
||||
},
|
||||
"regionalGuidance": "領域ガイダンス",
|
||||
"globalReferenceImage": "全域参照画像",
|
||||
@@ -1872,11 +1743,7 @@
|
||||
"transform": "変形",
|
||||
"apply": "適用",
|
||||
"cancel": "キャンセル",
|
||||
"reset": "リセット",
|
||||
"fitMode": "フィットモード",
|
||||
"fitModeContain": "含む",
|
||||
"fitModeCover": "カバー",
|
||||
"fitModeFill": "満たす"
|
||||
"reset": "リセット"
|
||||
},
|
||||
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
|
||||
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
|
||||
@@ -1887,8 +1754,7 @@
|
||||
"rectangle": "矩形",
|
||||
"move": "移動",
|
||||
"eraser": "消しゴム",
|
||||
"bbox": "Bbox",
|
||||
"view": "ビュー"
|
||||
"bbox": "Bbox"
|
||||
},
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
|
||||
@@ -1908,386 +1774,25 @@
|
||||
"removeBookmark": "ブックマークを外す",
|
||||
"savedToGalleryOk": "ギャラリーに保存しました",
|
||||
"controlMode": {
|
||||
"prompt": "プロンプト",
|
||||
"controlMode": "コントロールモード",
|
||||
"balanced": "バランス(推奨)",
|
||||
"control": "コントロール",
|
||||
"megaControl": "メガコントロール"
|
||||
"prompt": "プロンプト"
|
||||
},
|
||||
"prompt": "プロンプト",
|
||||
"settings": {
|
||||
"snapToGrid": {
|
||||
"off": "オフ",
|
||||
"on": "オン",
|
||||
"label": "グリッドにスナップ"
|
||||
},
|
||||
"preserveMask": {
|
||||
"label": "マスクされた領域を保持",
|
||||
"alert": "マスクされた領域の保存"
|
||||
},
|
||||
"isolatedStagingPreview": "分離されたステージングプレビュー",
|
||||
"isolatedPreview": "分離されたプレビュー",
|
||||
"isolatedLayerPreview": "分離されたレイヤーのプレビュー",
|
||||
"isolatedLayerPreviewDesc": "フィルタリングや変換などの操作を実行するときに、このレイヤーのみを表示するかどうか。",
|
||||
"invertBrushSizeScrollDirection": "ブラシサイズのスクロール反転",
|
||||
"pressureSensitivity": "圧力感度"
|
||||
"on": "オン"
|
||||
}
|
||||
},
|
||||
"filter": {
|
||||
"filter": "フィルター",
|
||||
"spandrel_filter": {
|
||||
"model": "モデル",
|
||||
"label": "img2imgモデル",
|
||||
"description": "選択したレイヤーでimg2imgモデルを実行します。",
|
||||
"autoScale": "オートスケール",
|
||||
"autoScaleDesc": "選択したモデルは、目標スケールに達するまで実行されます。",
|
||||
"scale": "ターゲットスケール"
|
||||
"model": "モデル"
|
||||
},
|
||||
"apply": "適用",
|
||||
"reset": "リセット",
|
||||
"cancel": "キャンセル",
|
||||
"filters": "フィルター",
|
||||
"filterType": "フィルタータイプ",
|
||||
"autoProcess": "オートプロセス",
|
||||
"process": "プロセス",
|
||||
"advanced": "アドバンスド",
|
||||
"processingLayerWith": "{{type}} フィルターを使用した処理レイヤー。",
|
||||
"forMoreControl": "さらに細かく制御するには、以下の「詳細設定」をクリックしてください。",
|
||||
"canny_edge_detection": {
|
||||
"label": "キャニーエッジ検出",
|
||||
"description": "Canny エッジ検出アルゴリズムを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"low_threshold": "低閾値",
|
||||
"high_threshold": "高閾値"
|
||||
},
|
||||
"color_map": {
|
||||
"label": "カラーマップ",
|
||||
"description": "選択したレイヤーからカラーマップを作成します。",
|
||||
"tile_size": "タイルサイズ"
|
||||
},
|
||||
"content_shuffle": {
|
||||
"label": "コンテンツシャッフル",
|
||||
"description": "選択したレイヤーのコンテンツを、「液化」効果と同様にシャッフルします。",
|
||||
"scale_factor": "スケール係数"
|
||||
},
|
||||
"depth_anything_depth_estimation": {
|
||||
"label": "デプスエニシング",
|
||||
"description": "デプスエニシングモデルを使用して、選択したレイヤーから深度マップを生成します。",
|
||||
"model_size": "モデルサイズ",
|
||||
"model_size_small": "スモール",
|
||||
"model_size_small_v2": "スモールv2",
|
||||
"model_size_base": "ベース",
|
||||
"model_size_large": "ラージ"
|
||||
},
|
||||
"dw_openpose_detection": {
|
||||
"label": "DW オープンポーズ検出",
|
||||
"description": "DW Openpose モデルを使用して、選択したレイヤー内の人間のポーズを検出します。",
|
||||
"draw_hands": "手を描く",
|
||||
"draw_face": "顔を描く",
|
||||
"draw_body": "体を描く"
|
||||
},
|
||||
"hed_edge_detection": {
|
||||
"label": "HEDエッジ検出",
|
||||
"description": "HED エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"scribble": "落書き"
|
||||
},
|
||||
"lineart_anime_edge_detection": {
|
||||
"label": "線画アニメのエッジ検出",
|
||||
"description": "線画アニメエッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。"
|
||||
},
|
||||
"lineart_edge_detection": {
|
||||
"label": "線画エッジ検出",
|
||||
"description": "線画エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"coarse": "粗い"
|
||||
},
|
||||
"mediapipe_face_detection": {
|
||||
"label": "メディアパイプ顔検出",
|
||||
"description": "メディアパイプ顔検出モデルを使用して、選択したレイヤー内の顔を検出します。",
|
||||
"max_faces": "マックスフェイス",
|
||||
"min_confidence": "最小信頼度"
|
||||
},
|
||||
"mlsd_detection": {
|
||||
"label": "線分検出",
|
||||
"description": "MLSD 線分検出モデルを使用して、選択したレイヤーから線分マップを生成します。",
|
||||
"score_threshold": "スコア閾値",
|
||||
"distance_threshold": "距離閾値"
|
||||
},
|
||||
"normal_map": {
|
||||
"label": "ノーマルマップ",
|
||||
"description": "選択したレイヤーからノーマルマップを生成します。"
|
||||
},
|
||||
"pidi_edge_detection": {
|
||||
"label": "PiDiNetエッジ検出",
|
||||
"description": "PiDiNet エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"scribble": "落書き",
|
||||
"quantize_edges": "エッジを量子化する"
|
||||
},
|
||||
"img_blur": {
|
||||
"label": "画像をぼかす",
|
||||
"description": "選択したレイヤーをぼかします。",
|
||||
"blur_type": "ぼかしの種類",
|
||||
"blur_radius": "半径",
|
||||
"gaussian_type": "ガウス分布",
|
||||
"box_type": "ボックス"
|
||||
},
|
||||
"img_noise": {
|
||||
"label": "ノイズ画像",
|
||||
"description": "選択したレイヤーにノイズを追加します。",
|
||||
"noise_type": "ノイズの種類",
|
||||
"noise_amount": "総計",
|
||||
"gaussian_type": "ガウス分布",
|
||||
"salt_and_pepper_type": "塩コショウ",
|
||||
"noise_color": "カラーノイズ",
|
||||
"size": "ノイズサイズ"
|
||||
},
|
||||
"adjust_image": {
|
||||
"label": "画像を調整する",
|
||||
"description": "画像の選択したチャンネルを調整します。",
|
||||
"channel": "チャンネル",
|
||||
"value_setting": "バリュー",
|
||||
"scale_values": "スケールバリュー",
|
||||
"red": "赤(RGBA)",
|
||||
"green": "緑(RGBA)",
|
||||
"blue": "青(RGBA)",
|
||||
"alpha": "アルファ(RGBA)",
|
||||
"cyan": "シアン(CMYK)",
|
||||
"magenta": "マゼンタ(CMYK)",
|
||||
"yellow": "黄色(CMYK)",
|
||||
"black": "黒(CMYK)",
|
||||
"hue": "色相(HSV)",
|
||||
"saturation": "彩度(HSV)",
|
||||
"value": "値(HSV)",
|
||||
"luminosity": "明度(LAB)",
|
||||
"a": "A(ラボ)",
|
||||
"b": "B(ラボ)",
|
||||
"y": "Y(YCbCr)",
|
||||
"cb": "Cb(YCbCr)",
|
||||
"cr": "Cr(YCbCr)"
|
||||
}
|
||||
"cancel": "キャンセル"
|
||||
},
|
||||
"weight": "重み",
|
||||
"bookmark": "クイックスイッチのブックマーク",
|
||||
"exportCanvasToPSD": "キャンバスをPSDにエクスポート",
|
||||
"savedToGalleryError": "ギャラリーへの保存中にエラーが発生しました",
|
||||
"regionCopiedToClipboard": "{{region}} をクリップボードにコピーしました",
|
||||
"copyRegionError": "{{region}} のコピー中にエラーが発生しました",
|
||||
"newGlobalReferenceImageOk": "作成されたグローバル参照画像",
|
||||
"newGlobalReferenceImageError": "グローバル参照イメージの作成中に問題が発生しました",
|
||||
"newRegionalReferenceImageOk": "地域参照画像の作成",
|
||||
"newRegionalReferenceImageError": "地域参照画像の作成中に問題が発生しました",
|
||||
"newControlLayerOk": "制御レイヤーの作成",
|
||||
"newControlLayerError": "制御層の作成中に問題が発生しました",
|
||||
"newRasterLayerOk": "ラスターレイヤーを作成しました",
|
||||
"newRasterLayerError": "ラスターレイヤーの作成中に問題が発生しました",
|
||||
"pullBboxIntoLayerOk": "Bbox をレイヤーにプル",
|
||||
"pullBboxIntoLayerError": "BBox をレイヤーにプルする際に問題が発生しました",
|
||||
"pullBboxIntoReferenceImageOk": "Bbox が ReferenceImage にプルされました",
|
||||
"pullBboxIntoReferenceImageError": "BBox を ReferenceImage にプルする際に問題が発生しました",
|
||||
"regionIsEmpty": "選択した領域は空です",
|
||||
"mergeVisible": "マージを可視化",
|
||||
"mergeVisibleOk": "マージされたレイヤー",
|
||||
"mergeVisibleError": "レイヤーの結合エラー",
|
||||
"mergingLayers": "レイヤーのマージ",
|
||||
"clearHistory": "履歴をクリア",
|
||||
"bboxOverlay": "Bboxオーバーレイを表示",
|
||||
"ruleOfThirds": "三分割法を表示",
|
||||
"newSession": "新しいセッション",
|
||||
"clearCaches": "キャッシュをクリア",
|
||||
"recalculateRects": "長方形を再計算する",
|
||||
"clipToBbox": "ストロークをBboxにクリップ",
|
||||
"outputOnlyMaskedRegions": "生成された領域のみを出力する",
|
||||
"width": "幅",
|
||||
"autoNegative": "オートネガティブ",
|
||||
"enableAutoNegative": "オートネガティブを有効にする",
|
||||
"disableAutoNegative": "オートネガティブを無効にする",
|
||||
"deletePrompt": "プロンプトを削除",
|
||||
"deleteReferenceImage": "参照画像を削除",
|
||||
"showHUD": "HUDを表示",
|
||||
"maskFill": "マスク塗りつぶし",
|
||||
"addPositivePrompt": "$t(controlLayers.prompt) を追加します",
|
||||
"addNegativePrompt": "$t(controlLayers.negativePrompt)を追加します",
|
||||
"addReferenceImage": "$t(controlLayers.referenceImage)を追加します",
|
||||
"addImageNoise": "$t(controlLayers.imageNoise)を追加します",
|
||||
"addRasterLayer": "$t(controlLayers.rasterLayer)を追加します",
|
||||
"addControlLayer": "$t(controlLayers.controlLayer)を追加します",
|
||||
"addInpaintMask": "$t(controlLayers.inpaintMask)を追加します",
|
||||
"addRegionalGuidance": "$t(controlLayers.regionalGuidance)を追加します",
|
||||
"addGlobalReferenceImage": "$t(controlLayers.globalReferenceImage)を追加します",
|
||||
"addDenoiseLimit": "$t(controlLayers.denoiseLimit)を追加します",
|
||||
"controlLayer": "コントロールレイヤー",
|
||||
"inpaintMask": "インペイントマスク",
|
||||
"referenceImageRegional": "参考画像(地域別)",
|
||||
"referenceImageGlobal": "参考画像(グローバル)",
|
||||
"asRasterLayer": "$t(controlLayers.rasterLayer) として",
|
||||
"asRasterLayerResize": "$t(controlLayers.rasterLayer) として (リサイズ)",
|
||||
"asControlLayer": "$t(controlLayers.controlLayer) として",
|
||||
"asControlLayerResize": "$t(controlLayers.controlLayer) として (リサイズ)",
|
||||
"referenceImage": "参照画像",
|
||||
"sendingToCanvas": "キャンバスに生成をのせる",
|
||||
"sendingToGallery": "生成をギャラリーに送る",
|
||||
"sendToGallery": "ギャラリーに送る",
|
||||
"sendToGalleryDesc": "Invokeを押すとユニークな画像が生成され、ギャラリーに保存されます。",
|
||||
"sendToCanvas": "キャンバスに送る",
|
||||
"newLayerFromImage": "画像から新規レイヤー",
|
||||
"newCanvasFromImage": "画像から新規キャンバス",
|
||||
"newImg2ImgCanvasFromImage": "画像からの新規 Img2Img",
|
||||
"copyToClipboard": "クリップボードにコピー",
|
||||
"sendToCanvasDesc": "Invokeを押すと、進行中の作品がキャンバス上にステージされます。",
|
||||
"viewProgressInViewer": "<Btn>画像ビューア</Btn>で進行状況と出力を表示します。",
|
||||
"viewProgressOnCanvas": "<Btn>キャンバス</Btn> で進行状況とステージ出力を表示します。",
|
||||
"rasterLayer_withCount_other": "ラスターレイヤー",
|
||||
"controlLayer_withCount_other": "コントロールレイヤー",
|
||||
"regionalGuidance_withCount_hidden": "地域ガイダンス({{count}} 件非表示)",
|
||||
"controlLayers_withCount_hidden": "コントロールレイヤー({{count}} 個非表示)",
|
||||
"rasterLayers_withCount_hidden": "ラスター レイヤー ({{count}} 個非表示)",
|
||||
"globalReferenceImages_withCount_hidden": "グローバル参照画像({{count}} 枚非表示)",
|
||||
"regionalGuidance_withCount_visible": "地域ガイダンス ({{count}})",
|
||||
"controlLayers_withCount_visible": "コントロールレイヤー ({{count}})",
|
||||
"rasterLayers_withCount_visible": "ラスターレイヤー({{count}})",
|
||||
"globalReferenceImages_withCount_visible": "グローバル参照画像 ({{count}})",
|
||||
"layer_other": "レイヤー",
|
||||
"layer_withCount_other": "レイヤー ({{count}})",
|
||||
"convertRasterLayerTo": "$t(controlLayers.rasterLayer) を変換する",
|
||||
"convertControlLayerTo": "$t(controlLayers.controlLayer) を変換する",
|
||||
"convertRegionalGuidanceTo": "$t(controlLayers.regionalGuidance) を変換する",
|
||||
"copyRasterLayerTo": "$t(controlLayers.rasterLayer)をコピーする",
|
||||
"copyControlLayerTo": "$t(controlLayers.controlLayer) をコピーする",
|
||||
"copyRegionalGuidanceTo": "$t(controlLayers.regionalGuidance)をコピーする",
|
||||
"newRasterLayer": "新しい $t(controlLayers.rasterLayer)",
|
||||
"newControlLayer": "新しい $t(controlLayers.controlLayer)",
|
||||
"newInpaintMask": "新しい $t(controlLayers.inpaintMask)",
|
||||
"newRegionalGuidance": "新しい $t(controlLayers.regionalGuidance)",
|
||||
"pasteTo": "貼り付け先",
|
||||
"pasteToAssets": "アセット",
|
||||
"pasteToAssetsDesc": "アセットに貼り付け",
|
||||
"pasteToBbox": "Bボックス",
|
||||
"pasteToBboxDesc": "新しいレイヤー(Bbox内)",
|
||||
"pasteToCanvas": "キャンバス",
|
||||
"pasteToCanvasDesc": "新しいレイヤー(キャンバス内)",
|
||||
"pastedTo": "{{destination}} に貼り付けました",
|
||||
"transparency": "透明性",
|
||||
"enableTransparencyEffect": "透明効果を有効にする",
|
||||
"disableTransparencyEffect": "透明効果を無効にする",
|
||||
"hidingType": "{{type}} を非表示",
|
||||
"showingType": "{{type}}を表示",
|
||||
"showNonRasterLayers": "非ラスターレイヤーを表示 (Shift+H)",
|
||||
"hideNonRasterLayers": "非ラスターレイヤーを非表示にする (Shift+H)",
|
||||
"dynamicGrid": "ダイナミックグリッド",
|
||||
"logDebugInfo": "デバッグ情報をログに記録する",
|
||||
"locked": "ロックされています",
|
||||
"unlocked": "ロック解除",
|
||||
"deleteSelected": "選択項目を削除",
|
||||
"stagingOnCanvas": "ステージング画像",
|
||||
"replaceLayer": "レイヤーの置き換え",
|
||||
"pullBboxIntoLayer": "Bboxをレイヤーに引き込む",
|
||||
"pullBboxIntoReferenceImage": "Bboxを参照画像に取り込む",
|
||||
"showProgressOnCanvas": "キャンバスに進捗状況を表示",
|
||||
"useImage": "画像を使う",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"beginEndStepPercentShort": "開始/終了 %",
|
||||
"newGallerySession": "新しいギャラリーセッション",
|
||||
"newGallerySessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成した画像はギャラリーに送信されます。",
|
||||
"newCanvasSession": "新規キャンバスセッション",
|
||||
"newCanvasSessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成はキャンバス上でステージングされます。",
|
||||
"resetCanvasLayers": "キャンバスレイヤーをリセット",
|
||||
"resetGenerationSettings": "生成設定をリセット",
|
||||
"replaceCurrent": "現在のものを置き換える",
|
||||
"controlLayerEmptyState": "<UploadButton>画像をアップロード</UploadButton>、<GalleryButton>ギャラリー</GalleryButton>からこのレイヤーに画像をドラッグ、<PullBboxButton>境界ボックスをこのレイヤーにプル</PullBboxButton>、またはキャンバスに描画して開始します。",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグするか、<PullBboxButton>境界ボックスをこの参照画像にプル</PullBboxButton>します。",
|
||||
"referenceImageEmptyState": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグします。",
|
||||
"uploadOrDragAnImage": "ギャラリーから画像をドラッグするか、<UploadButton>画像をアップロード</UploadButton>します。",
|
||||
"imageNoise": "画像ノイズ",
|
||||
"denoiseLimit": "ノイズ除去制限",
|
||||
"warnings": {
|
||||
"problemsFound": "問題が見つかりました",
|
||||
"unsupportedModel": "選択したベースモデルではレイヤーがサポートされていません",
|
||||
"controlAdapterNoModelSelected": "制御レイヤーモデルが選択されていません",
|
||||
"controlAdapterIncompatibleBaseModel": "互換性のない制御レイヤーベースモデル",
|
||||
"controlAdapterNoControl": "コントロールが選択/描画されていません",
|
||||
"ipAdapterNoModelSelected": "参照画像モデルが選択されていません",
|
||||
"ipAdapterIncompatibleBaseModel": "互換性のない参照画像ベースモデル",
|
||||
"ipAdapterNoImageSelected": "参照画像が選択されていません",
|
||||
"rgNoPromptsOrIPAdapters": "テキストプロンプトや参照画像はありません",
|
||||
"rgNegativePromptNotSupported": "選択されたベースモデルでは否定プロンプトはサポートされていません",
|
||||
"rgReferenceImagesNotSupported": "選択されたベースモデルでは地域の参照画像はサポートされていません",
|
||||
"rgAutoNegativeNotSupported": "選択したベースモデルでは自動否定はサポートされていません",
|
||||
"rgNoRegion": "領域が描画されていません",
|
||||
"fluxFillIncompatibleWithControlLoRA": "コントロールLoRAはFLUX Fillと互換性がありません"
|
||||
},
|
||||
"errors": {
|
||||
"unableToFindImage": "画像が見つかりません",
|
||||
"unableToLoadImage": "画像を読み込めません"
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"ipAdapterMethod": "モード",
|
||||
"full": "スタイルと構成",
|
||||
"fullDesc": "視覚スタイル (色、テクスチャ) と構成 (レイアウト、構造) を適用します。",
|
||||
"style": "スタイル(シンプル)",
|
||||
"styleDesc": "レイアウトを考慮せずに視覚スタイル(色、テクスチャ)を適用します。以前は「スタイルのみ」と呼ばれていました。",
|
||||
"composition": "構成のみ",
|
||||
"compositionDesc": "参照スタイルを無視してレイアウトと構造を複製します。",
|
||||
"styleStrong": "スタイル(ストロング)",
|
||||
"styleStrongDesc": "構成への影響をわずかに抑えて、強力なビジュアル スタイルを適用します。",
|
||||
"stylePrecise": "スタイル(正確)",
|
||||
"stylePreciseDesc": "被写体の影響を排除し、正確な視覚スタイルを適用します。"
|
||||
},
|
||||
"fluxReduxImageInfluence": {
|
||||
"imageInfluence": "イメージの影響力",
|
||||
"lowest": "最低",
|
||||
"low": "低",
|
||||
"medium": "中",
|
||||
"high": "高",
|
||||
"highest": "最高"
|
||||
},
|
||||
"fill": {
|
||||
"fillColor": "塗りつぶし色",
|
||||
"fillStyle": "塗りつぶしスタイル",
|
||||
"solid": "固体",
|
||||
"grid": "グリッド",
|
||||
"crosshatch": "クロスハッチ",
|
||||
"vertical": "垂直",
|
||||
"horizontal": "水平",
|
||||
"diagonal": "対角線"
|
||||
},
|
||||
"selectObject": {
|
||||
"selectObject": "オブジェクトを選択",
|
||||
"pointType": "ポイントタイプ",
|
||||
"invertSelection": "選択範囲を反転",
|
||||
"include": "含む",
|
||||
"exclude": "除外",
|
||||
"neutral": "ニュートラル",
|
||||
"apply": "適用",
|
||||
"reset": "リセット",
|
||||
"saveAs": "名前を付けて保存",
|
||||
"cancel": "キャンセル",
|
||||
"process": "プロセス",
|
||||
"help1": "ターゲットオブジェクトを1つ選択します。<Bold>含める</Bold>ポイントと<Bold>除外</Bold>ポイントを追加して、レイヤーのどの部分がターゲットオブジェクトの一部であるかを示します。",
|
||||
"help2": "対象オブジェクト内に<Bold>含める</Bold>ポイントを1つ選択するところから始めます。ポイントを追加して選択範囲を絞り込みます。ポイントが少ないほど、通常はより良い結果が得られます。",
|
||||
"help3": "選択を反転して、ターゲットオブジェクト以外のすべてを選択します。",
|
||||
"clickToAdd": "レイヤーをクリックしてポイントを追加します",
|
||||
"dragToMove": "ポイントをドラッグして移動します",
|
||||
"clickToRemove": "ポイントをクリックして削除します"
|
||||
},
|
||||
"HUD": {
|
||||
"bbox": "Bボックス",
|
||||
"scaledBbox": "スケールされたBボックス",
|
||||
"entityStatus": {
|
||||
"isFiltering": "{{title}} はフィルタリング中です",
|
||||
"isTransforming": "{{title}}は変化しています",
|
||||
"isLocked": "{{title}}はロックされています",
|
||||
"isHidden": "{{title}}は非表示になっています",
|
||||
"isDisabled": "{{title}}は無効です",
|
||||
"isEmpty": "{{title}} は空です"
|
||||
}
|
||||
},
|
||||
"stagingArea": {
|
||||
"accept": "受け入れる",
|
||||
"discardAll": "すべて破棄",
|
||||
"discard": "破棄する",
|
||||
"previous": "前へ",
|
||||
"next": "次へ",
|
||||
"saveToGallery": "ギャラリーに保存",
|
||||
"showResultsOn": "結果を表示",
|
||||
"showResultsOff": "結果を隠す"
|
||||
}
|
||||
"weight": "重み"
|
||||
},
|
||||
"stylePresets": {
|
||||
"clearTemplateSelection": "選択したテンプレートをクリア",
|
||||
@@ -2305,56 +1810,13 @@
|
||||
"nameColumn": "'name'",
|
||||
"type": "タイプ",
|
||||
"private": "プライベート",
|
||||
"name": "名称",
|
||||
"active": "アクティブ",
|
||||
"copyTemplate": "テンプレートをコピー",
|
||||
"deleteImage": "画像を削除",
|
||||
"deleteTemplate": "テンプレートを削除",
|
||||
"deleteTemplate2": "このテンプレートを削除してもよろしいですか? 元に戻すことはできません。",
|
||||
"exportPromptTemplates": "プロンプトテンプレートをエクスポートする(CSV)",
|
||||
"editTemplate": "テンプレートを編集",
|
||||
"exportDownloaded": "エクスポートをダウンロードしました",
|
||||
"exportFailed": "生成とCSVのダウンロードができません",
|
||||
"importTemplates": "プロンプトテンプレートのインポート(CSV/JSON)",
|
||||
"acceptedColumnsKeys": "受け入れられる列/キー:",
|
||||
"positivePromptColumn": "'プロンプト'または'ポジティブプロンプト'",
|
||||
"insertPlaceholder": "プレースホルダーを挿入",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"noTemplates": "テンプレートがありません",
|
||||
"noMatchingTemplates": "マッチするテンプレートがありません",
|
||||
"promptTemplatesDesc1": "プロンプトテンプレートは、プロンプトボックスに書き込むプロンプトにテキストを追加します。",
|
||||
"promptTemplatesDesc2": "テンプレート内でプロンプトを含める場所を指定するには <Pre>{{placeholder}}</Pre> のプレースホルダーの文字列を使用します。",
|
||||
"promptTemplatesDesc3": "プレースホルダーを省略すると、テンプレートはプロンプトの末尾に追加されます。",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"shared": "共有",
|
||||
"sharedTemplates": "テンプレートを共有",
|
||||
"templateDeleted": "プロンプトテンプレートを削除しました",
|
||||
"unableToDeleteTemplate": "プロンプトテンプレートを削除できません",
|
||||
"updatePromptTemplate": "プロンプトテンプレートをアップデート",
|
||||
"useForTemplate": "プロンプトテンプレートに使用する",
|
||||
"viewList": "テンプレートリストを表示",
|
||||
"viewModeTooltip": "現在選択されているテンプレートでは、プロンプトはこのようになります。プロンプトを編集するには、テキストボックス内の任意の場所をクリックしてください。",
|
||||
"togglePromptPreviews": "プロンプトプレビューを切り替える"
|
||||
"name": "名称"
|
||||
},
|
||||
"upscaling": {
|
||||
"upscaleModel": "アップスケールモデル",
|
||||
"postProcessingModel": "ポストプロセスモデル",
|
||||
"upscale": "アップスケール",
|
||||
"scale": "スケール",
|
||||
"creativity": "創造性",
|
||||
"exceedsMaxSize": "アップスケール設定が最大サイズ制限を超えています",
|
||||
"exceedsMaxSizeDetails": "アップスケールの上限は{{max Upscale Dimension}} x {{max Upscale Dimension}}ピクセルです。画像を小さくするか、スケールの選択範囲を小さくしてください。",
|
||||
"structure": "構造",
|
||||
"postProcessingMissingModelWarning": "後処理 (img2img) モデルをインストールするには、<LinkComponent>モデル マネージャー</LinkComponent> にアクセスしてください。",
|
||||
"missingModelsWarning": "必要なモデルをインストールするには、<LinkComponent>モデル マネージャー</LinkComponent> にアクセスしてください。",
|
||||
"mainModelDesc": "メインモデル(SD1.5またはSDXLアーキテクチャ)",
|
||||
"tileControlNetModelDesc": "選択したメインモデルアーキテクチャのタイルコントロールネットモデル",
|
||||
"upscaleModelDesc": "アップスケール(img2img)モデル",
|
||||
"missingUpscaleInitialImage": "アップスケール用の初期画像がありません",
|
||||
"missingUpscaleModel": "アップスケールモデルがありません",
|
||||
"missingTileControlNetModel": "有効なタイル コントロールネットモデルがインストールされていません",
|
||||
"incompatibleBaseModel": "アップスケーリングにサポートされていないメインモデルアーキテクチャです",
|
||||
"incompatibleBaseModelDesc": "アップスケーリングはSD1.5およびSDXLアーキテクチャモデルでのみサポートされています。アップスケーリングを有効にするには、メインモデルを変更してください。"
|
||||
"scale": "スケール"
|
||||
},
|
||||
"sdxl": {
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
@@ -2429,34 +1891,7 @@
|
||||
"minimum": "最小",
|
||||
"publish": "公開",
|
||||
"unpublish": "非公開",
|
||||
"publishedWorkflowInputs": "インプット",
|
||||
"workflowLocked": "ワークフローがロックされました",
|
||||
"workflowLockedPublished": "公開済みのワークフローは編集用にロックされています。\nワークフローを非公開にして編集したり、コピーを作成したりできます。",
|
||||
"workflowLockedDuringPublishing": "公開の構成中にワークフローがロックされます。",
|
||||
"selectOutputNode": "出力ノードを選択",
|
||||
"changeOutputNode": "出力ノードの変更",
|
||||
"unpublishableInputs": "これらの公開できない入力は省略されます",
|
||||
"noPublishableInputs": "公開可能な入力はありません",
|
||||
"noOutputNodeSelected": "出力ノードが選択されていません",
|
||||
"cannotPublish": "ワークフローを公開できません",
|
||||
"publishWarnings": "警告",
|
||||
"errorWorkflowHasUnsavedChanges": "ワークフローに保存されていない変更があります",
|
||||
"errorWorkflowHasUnpublishableNodes": "ワークフローにはバッチ、ジェネレータ、またはメタデータ抽出ノードがあります",
|
||||
"errorWorkflowHasInvalidGraph": "ワークフロー グラフが無効です (詳細については [呼び出し] ボタンにマウスを移動してください)",
|
||||
"errorWorkflowHasNoOutputNode": "出力ノードが選択されていません",
|
||||
"warningWorkflowHasNoPublishableInputFields": "公開可能な入力フィールドが選択されていません - 公開されたワークフローはデフォルト値のみで実行されます",
|
||||
"warningWorkflowHasUnpublishableInputFields": "ワークフローには公開できない入力がいくつかあります。これらは公開されたワークフローから省略されます",
|
||||
"publishFailed": "公開失敗",
|
||||
"publishFailedDesc": "ワークフローの公開中に問題が発生しました。もう一度お試しください。",
|
||||
"publishSuccess": "ワークフローを公開しています",
|
||||
"publishSuccessDesc": "<LinkComponent>プロジェクト ダッシュボード</LinkComponent> をチェックして進捗状況を確認してください。",
|
||||
"publishInProgress": "公開中",
|
||||
"publishedWorkflowIsLocked": "公開されたワークフローはロックされています",
|
||||
"publishingValidationRun": "公開検証実行",
|
||||
"publishingValidationRunInProgress": "公開検証の実行が進行中です。",
|
||||
"publishedWorkflowsLocked": "公開済みのワークフローはロックされており、編集または実行できません。このワークフローを編集または実行するには、ワークフローを非公開にするか、コピーを保存してください。",
|
||||
"selectingOutputNode": "出力ノードの選択",
|
||||
"selectingOutputNodeDesc": "ノードをクリックして、ワークフローの出力ノードとして選択します。"
|
||||
"publishedWorkflowInputs": "インプット"
|
||||
},
|
||||
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
|
||||
"unnamedWorkflow": "名前のないワークフロー",
|
||||
@@ -2519,23 +1954,15 @@
|
||||
"models": "モデル",
|
||||
"canvas": "キャンバス",
|
||||
"metadata": "メタデータ",
|
||||
"queue": "キュー",
|
||||
"logNamespaces": "ログのネームスペース",
|
||||
"dnd": "ドラッグ&ドロップ",
|
||||
"config": "構成",
|
||||
"generation": "生成",
|
||||
"events": "イベント"
|
||||
"queue": "キュー"
|
||||
},
|
||||
"logLevel": {
|
||||
"debug": "Debug",
|
||||
"info": "Info",
|
||||
"error": "Error",
|
||||
"fatal": "Fatal",
|
||||
"warn": "Warn",
|
||||
"logLevel": "ログレベル",
|
||||
"trace": "追跡"
|
||||
},
|
||||
"enableLogging": "ログを有効にする"
|
||||
"warn": "Warn"
|
||||
}
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"promptsPreview": "プロンプトプレビュー",
|
||||
@@ -2551,34 +1978,5 @@
|
||||
"dynamicPrompts": "ダイナミックプロンプト",
|
||||
"loading": "ダイナミックプロンプトを生成...",
|
||||
"maxPrompts": "最大プロンプト"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "チームメートを招待",
|
||||
"professional": "プロフェッショナル",
|
||||
"professionalUpsell": "InvokeのProfessional Editionでご利用いただけます。詳細については、こちらをクリックするか、invoke.com/pricingをご覧ください。",
|
||||
"shareAccess": "共有アクセス"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"toGetStartedLocal": "始めるには、Invoke の実行に必要なモデルをダウンロードまたはインポートしてください。次に、ボックスにプロンプトを入力し、<StrongComponent>Invoke</StrongComponent> をクリックして最初の画像を生成します。プロンプトテンプレートを選択すると、結果が向上します。画像は <StrongComponent>Gallery</StrongComponent> に直接保存するか、<StrongComponent>Canvas</StrongComponent> で編集するかを選択できます。",
|
||||
"toGetStarted": "開始するには、ボックスにプロンプトを入力し、<StrongComponent>Invoke</StrongComponent> をクリックして最初の画像を生成します。プロンプトテンプレートを選択すると、結果が向上します。画像は <StrongComponent>Gallery</StrongComponent> に直接保存するか、<StrongComponent>Canvas</StrongComponent> で編集するかを選択できます。",
|
||||
"toGetStartedWorkflow": "開始するには、左側のフィールドに入力し、<StrongComponent>Invoke</StrongComponent> をクリックして画像を生成します。他のワークフローも試してみたい場合は、ワークフロータイトルの横にある<StrongComponent>フォルダアイコン</StrongComponent> をクリックすると、試せる他のテンプレートのリストが表示されます。",
|
||||
"gettingStartedSeries": "さらに詳しいガイダンスが必要ですか? Invoke Studio の可能性を最大限に引き出すためのヒントについては、<LinkComponent>入門シリーズ</LinkComponent>をご覧ください。",
|
||||
"lowVRAMMode": "最高のパフォーマンスを得るには、<LinkComponent>低 VRAM ガイド</LinkComponent>に従ってください。",
|
||||
"noModelsInstalled": "モデルがインストールされていないようです。<DownloadStarterModelsButton>スターターモデルバンドルをダウンロード</DownloadStarterModelsButton>するか、<ImportModelsButton>モデルをインポート</ImportModelsButton>してください。"
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "Invokeの新機能",
|
||||
"items": [
|
||||
"インペインティング: マスクごとのノイズ レベルとノイズ除去の制限。",
|
||||
"キャンバス: SDXL のアスペクト比がスマートになり、スクロールによるズームが改善されました。"
|
||||
],
|
||||
"readReleaseNotes": "リリースノートを読む",
|
||||
"watchRecentReleaseVideos": "最近のリリースビデオを見る",
|
||||
"watchUiUpdatesOverview": "Watch UI アップデートの概要"
|
||||
},
|
||||
"supportVideos": {
|
||||
"supportVideos": "サポートビデオ",
|
||||
"gettingStarted": "はじめる",
|
||||
"watch": "ウォッチ"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
"bulkDownloadFailed": "Tải Xuống Thất Bại",
|
||||
"bulkDownloadRequestFailed": "Có Vấn Đề Khi Đang Chuẩn Bị Tải Xuống",
|
||||
"download": "Tải Xuống",
|
||||
"dropOrUpload": "Kéo Thả Hoặc Tải Lên",
|
||||
"dropOrUpload": "$t(gallery.drop) Hoặc Tải Lên",
|
||||
"currentlyInUse": "Hình ảnh này hiện đang sử dụng các tính năng sau:",
|
||||
"deleteImagePermanent": "Ảnh đã xoá không thể phục hồi.",
|
||||
"exitSearch": "Thoát Tìm Kiếm Hình Ảnh",
|
||||
@@ -111,7 +111,7 @@
|
||||
"noImageSelected": "Không Có Ảnh Được Chọn",
|
||||
"noImagesInGallery": "Không Có Ảnh Để Hiển Thị",
|
||||
"assetsTab": "Tài liệu bạn đã tải lên để dùng cho dự án của mình.",
|
||||
"imagesTab": "Ảnh bạn vừa được tạo và lưu trong Invoke.",
|
||||
"imagesTab": "hình bạn vừa được tạo và lưu trong Invoke.",
|
||||
"loading": "Đang Tải",
|
||||
"oldestFirst": "Cũ Nhất Trước",
|
||||
"exitCompare": "Ngừng So Sánh",
|
||||
@@ -122,8 +122,7 @@
|
||||
"boardsSettings": "Thiết Lập Bảng",
|
||||
"imagesSettings": "Cài Đặt Ảnh Trong Thư Viện Ảnh",
|
||||
"assets": "Tài Nguyên",
|
||||
"images": "Hình Ảnh",
|
||||
"useForPromptGeneration": "Dùng Để Tạo Sinh Lệnh"
|
||||
"images": "Hình Ảnh"
|
||||
},
|
||||
"common": {
|
||||
"ipAdapter": "IP Adapter",
|
||||
@@ -255,18 +254,9 @@
|
||||
"options_withCount_other": "{{count}} thiết lập"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Thêm Trigger Cho Lệnh",
|
||||
"addPromptTrigger": "Thêm Prompt Trigger",
|
||||
"compatibleEmbeddings": "Embedding Tương Thích",
|
||||
"noMatchingTriggers": "Không có trigger phù hợp",
|
||||
"generateFromImage": "Tạo sinh lệnh từ ảnh",
|
||||
"expandCurrentPrompt": "Mở Rộng Lệnh Hiện Tại",
|
||||
"uploadImageForPromptGeneration": "Tải Ảnh Để Tạo Sinh Lệnh",
|
||||
"expandingPrompt": "Đang mở rộng lệnh...",
|
||||
"resultTitle": "Mở Rộng Lệnh Hoàn Tất",
|
||||
"resultSubtitle": "Chọn phương thức mở rộng lệnh:",
|
||||
"replace": "Thay Thế",
|
||||
"insert": "Chèn",
|
||||
"discard": "Huỷ Bỏ"
|
||||
"noMatchingTriggers": "Không có trigger phù hợp"
|
||||
},
|
||||
"queue": {
|
||||
"resume": "Tiếp Tục",
|
||||
@@ -463,16 +453,6 @@
|
||||
"applyFilter": {
|
||||
"title": "Áp Dụng Bộ Lộc",
|
||||
"desc": "Áp dụng bộ lọc đang chờ sẵn cho layer được chọn."
|
||||
},
|
||||
"settings": {
|
||||
"behavior": "Hành Vi",
|
||||
"display": "Hiển Thị",
|
||||
"grid": "Lưới",
|
||||
"debug": "Gỡ Lỗi"
|
||||
},
|
||||
"toggleNonRasterLayers": {
|
||||
"title": "Bật/Tắt Layer Không Thuộc Dạng Raster",
|
||||
"desc": "Hiện hoặc ẩn tất cả layer không thuộc dạng raster (Layer Điều Khiển Được, Lớp Phủ Inpaint, Chỉ Dẫn Khu Vực)."
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -715,7 +695,7 @@
|
||||
"cancel": "Huỷ",
|
||||
"huggingFace": "HuggingFace (HF)",
|
||||
"huggingFacePlaceholder": "chủ-sỡ-hữu/tên-model",
|
||||
"includesNModels": "Thêm vào {{n}} model và dependency của nó.",
|
||||
"includesNModels": "Thêm vào {{n}} model và dependency của nó",
|
||||
"localOnly": "chỉ ở trên máy chủ",
|
||||
"manual": "Thủ Công",
|
||||
"convertToDiffusersHelpText4": "Đây là quá trình diễn ra chỉ một lần. Nó có thể tốn tầm 30-60 giây tuỳ theo thông số kỹ thuật của máy tính.",
|
||||
@@ -762,7 +742,7 @@
|
||||
"simpleModelPlaceholder": "Url hoặc đường đẫn đến tệp hoặc thư mục chứa diffusers trong máy chủ",
|
||||
"selectModel": "Chọn Model",
|
||||
"spandrelImageToImage": "Hình Ảnh Sang Hình Ảnh (Spandrel)",
|
||||
"starterBundles": "Gói Khởi Đầu",
|
||||
"starterBundles": "Quà Tân Thủ",
|
||||
"vae": "VAE",
|
||||
"urlOrLocalPath": "URL / Đường Dẫn",
|
||||
"triggerPhrases": "Từ Ngữ Kích Hoạt",
|
||||
@@ -814,30 +794,7 @@
|
||||
"manageModels": "Quản Lý Model",
|
||||
"hfTokenReset": "Làm Mới HF Token",
|
||||
"relatedModels": "Model Liên Quan",
|
||||
"showOnlyRelatedModels": "Liên Quan",
|
||||
"installedModelsCount": "Đã tải {{installed}} trên {{total}} model.",
|
||||
"allNModelsInstalled": "Đã tải tất cả {{count}} model",
|
||||
"nToInstall": "Còn {{count}} để tải",
|
||||
"nAlreadyInstalled": "Có {{count}} đã tải",
|
||||
"bundleAlreadyInstalled": "Gói đã được cài sẵn",
|
||||
"bundleAlreadyInstalledDesc": "Tất cả model trong gói {{bundleName}} đã được cài sẵn.",
|
||||
"launchpadTab": "Launchpad",
|
||||
"launchpad": {
|
||||
"welcome": "Chào mừng đến Trình Quản Lý Model",
|
||||
"description": "Invoke yêu cầu tải model nhằm tối ưu hoá các tính năng trên nền tảng. Chọn tải các phương án thủ công hoặc khám phá các model khởi đầu thích hợp.",
|
||||
"manualInstall": "Tải Thủ Công",
|
||||
"urlDescription": "Tải model bằng URL hoặc đường dẫn trên máy. Phù hợp để cụ thể model muốn thêm vào.",
|
||||
"huggingFaceDescription": "Duyệt và cài đặt model từ các repository trên HuggingFace.",
|
||||
"scanFolderDescription": "Quét một thư mục trên máy để tự động tra và tải model.",
|
||||
"recommendedModels": "Model Khuyến Nghị",
|
||||
"exploreStarter": "Hoặc duyệt tất cả model khởi đầu có sẵn",
|
||||
"quickStart": "Gói Khởi Đầu Nhanh",
|
||||
"bundleDescription": "Các gói đều bao gồm những model cần thiết cho từng nhánh model và những model cơ sở đã chọn lọc để bắt đầu.",
|
||||
"browseAll": "Hoặc duyệt tất cả model có sẵn:",
|
||||
"stableDiffusion15": "Stable Diffusion 1.5",
|
||||
"sdxl": "SDXL",
|
||||
"fluxDev": "FLUX.1 dev"
|
||||
}
|
||||
"showOnlyRelatedModels": "Liên Quan"
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Hướng Dẫn",
|
||||
@@ -845,7 +802,7 @@
|
||||
"imageDetails": "Chi Tiết Ảnh",
|
||||
"createdBy": "Được Tạo Bởi",
|
||||
"parsingFailed": "Lỗi Cú Pháp",
|
||||
"canvasV2Metadata": "Layer Canvas",
|
||||
"canvasV2Metadata": "Canvas",
|
||||
"parameterSet": "Dữ liệu tham số {{parameter}}",
|
||||
"positivePrompt": "Lệnh Tích Cực",
|
||||
"recallParameter": "Gợi Nhớ {{label}}",
|
||||
@@ -1517,20 +1474,6 @@
|
||||
"Lát khối liền mạch bức ảnh theo trục ngang."
|
||||
],
|
||||
"heading": "Lát Khối Liền Mạch Trục X"
|
||||
},
|
||||
"tileSize": {
|
||||
"heading": "Kích Thước Khối",
|
||||
"paragraphs": [
|
||||
"Điều chỉnh kích thước của khối trong quá trình upscale. Khối càng lớn, bộ nhớ được sử dụng càng nhiều, nhưng có thể tạo sinh ảnh tốt hơn.",
|
||||
"Model SD1.5 mặt định là 768, trong khi SDXL mặc định là 1024. Giảm kích thước khối nếu các gặp vấn đề bộ nhớ."
|
||||
]
|
||||
},
|
||||
"tileOverlap": {
|
||||
"heading": "Chồng Chéo Khối",
|
||||
"paragraphs": [
|
||||
"Điều chỉnh sự chồng chéo giữa các khối liền kề trong quá trình upscale. Giá trị chồng chép lớn giúp giảm sự rõ nét của các chỗ nối nhau, nhưng ngốn nhiều bộ nhớ hơn.",
|
||||
"Giá trị mặc định (128) hoạt động tốt với đa số trường hợp, nhưng bạn có thể điều chỉnh cho phù hợp với nhu cầu cụ thể và hạn chế về bộ nhớ."
|
||||
]
|
||||
}
|
||||
},
|
||||
"models": {
|
||||
@@ -1544,8 +1487,7 @@
|
||||
"defaultVAE": "VAE Mặc Định",
|
||||
"noMatchingModels": "Không có Model phù hợp",
|
||||
"noModelsAvailable": "Không có model",
|
||||
"selectModel": "Chọn Model",
|
||||
"noCompatibleLoRAs": "Không Có LoRAs Tương Thích"
|
||||
"selectModel": "Chọn Model"
|
||||
},
|
||||
"parameters": {
|
||||
"postProcessing": "Xử Lý Hậu Kỳ (Shift + U)",
|
||||
@@ -1596,10 +1538,7 @@
|
||||
"modelIncompatibleBboxHeight": "Chiều dài hộp giới hạn là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "Chiều dài hộp giới hạn theo tỉ lệ là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp.",
|
||||
"fluxKontextMultipleReferenceImages": "Chỉ có thể dùng 1 Ảnh Mẫu cùng lúc với Flux Kontext",
|
||||
"promptExpansionPending": "Trong quá trình mở rộng lệnh",
|
||||
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn"
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp."
|
||||
},
|
||||
"cfgScale": "Thang CFG",
|
||||
"useSeed": "Dùng Hạt Giống",
|
||||
@@ -1930,8 +1869,7 @@
|
||||
"canvasGroup": "Canvas",
|
||||
"copyCanvasToClipboard": "Sao Chép Canvas Vào Clipboard",
|
||||
"copyToClipboard": "Sao Chép Vào Clipboard",
|
||||
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard",
|
||||
"newResizedControlLayer": "Layer Điều Khiển Được Đã Chỉnh Kích Thước Mới"
|
||||
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard"
|
||||
},
|
||||
"stagingArea": {
|
||||
"saveToGallery": "Lưu Vào Thư Viện Ảnh",
|
||||
@@ -2112,11 +2050,7 @@
|
||||
},
|
||||
"isolatedLayerPreviewDesc": "Có hay không hiển thị riêng layer này khi thực hiện các thao tác như lọc hay biến đổi.",
|
||||
"isolatedStagingPreview": "Xem Trước Tổng Quan Phần Cô Lập",
|
||||
"isolatedPreview": "Xem Trước Phần Cô Lập",
|
||||
"saveAllImagesToGallery": {
|
||||
"label": "Chuyển Sản Phẩm Tạo Sinh Mới Vào Thư Viện Ảnh",
|
||||
"alert": "Đang chuyển sản phẩm tạo sinh mới vào Thư Viện Ảnh, bỏ qua Canvas"
|
||||
}
|
||||
"isolatedPreview": "Xem Trước Phần Cô Lập"
|
||||
},
|
||||
"tool": {
|
||||
"eraser": "Tẩy",
|
||||
@@ -2128,8 +2062,8 @@
|
||||
"colorPicker": "Chọn Màu"
|
||||
},
|
||||
"mergingLayers": "Đang gộp layer",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ thư viện ảnh vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton> hoặc kéo ảnh từ thư viện ảnh vào Ảnh Mẫu để bắt đầu.",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
|
||||
"useImage": "Dùng Hình Ảnh",
|
||||
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
|
||||
"asRasterLayer": "Như $t(controlLayers.rasterLayer)",
|
||||
@@ -2181,18 +2115,7 @@
|
||||
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Độ Nhiễu Hình Ảnh",
|
||||
"denoiseLimit": "Giới Hạn Khử Nhiễu",
|
||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ thư viện ảnh vào Ảnh Mẫu này, hoặc <PullBboxButton>kéo hộp giới hạn vào Ảnh Mẫu này</PullBboxButton> để bắt đầu.",
|
||||
"uploadOrDragAnImage": "Kéo ảnh từ thư viện ảnh hoặc <UploadButton>tải lên ảnh</UploadButton>.",
|
||||
"exportCanvasToPSD": "Xuất Canvas Thành File PSD",
|
||||
"ruleOfThirds": "Hiển Thị Quy Tắc Một Phần Ba",
|
||||
"showNonRasterLayers": "Hiển Thị Layer Không Thuộc Dạng Raster (Shift + H)",
|
||||
"hideNonRasterLayers": "Ẩn Layer Không Thuộc Dạng Raster (Shift + H)",
|
||||
"autoSwitch": {
|
||||
"off": "Tắt",
|
||||
"switchOnStart": "Khi Bắt Đầu",
|
||||
"switchOnFinish": "Khi Kết Thúc"
|
||||
}
|
||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)"
|
||||
},
|
||||
"stylePresets": {
|
||||
"negativePrompt": "Lệnh Tiêu Cực",
|
||||
@@ -2238,8 +2161,7 @@
|
||||
"deleteImage": "Xoá Hình Ảnh",
|
||||
"exportPromptTemplates": "Xuất Mẫu Trình Bày Cho Lệnh Ra (CSV)",
|
||||
"templateDeleted": "Mẫu trình bày cho lệnh đã được xoá",
|
||||
"unableToDeleteTemplate": "Không thể xoá mẫu trình bày cho lệnh",
|
||||
"togglePromptPreviews": "Bật/Tắt Xem Trước Lệnh"
|
||||
"unableToDeleteTemplate": "Không thể xoá mẫu trình bày cho lệnh"
|
||||
},
|
||||
"system": {
|
||||
"enableLogging": "Bật Chế Độ Ghi Log",
|
||||
@@ -2335,26 +2257,7 @@
|
||||
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
|
||||
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint.",
|
||||
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext không hỗ trợ tạo sinh từ hình ảnh từ canvas. Thử sử dụng Ảnh Mẫu và tắt các Layer Dạng Raster.",
|
||||
"noRasterLayers": "Không Tìm Thấy Layer Dạng Raster",
|
||||
"noRasterLayersDesc": "Tạo ít nhất một layer dạng raster để xuất file PSD",
|
||||
"noActiveRasterLayers": "Không Có Layer Dạng Raster Hoạt Động",
|
||||
"noActiveRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
|
||||
"noVisibleRasterLayers": "Không Có Layer Dạng Raster Hiển Thị",
|
||||
"noVisibleRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
|
||||
"invalidCanvasDimensions": "Kích Thước Canvas Không Phù Hợp",
|
||||
"canvasTooLarge": "Canvas Quá Lớn",
|
||||
"canvasTooLargeDesc": "Kích thước canvas vượt mức tối đa cho phép để xuất file PSD. Giảm cả chiều dài và chiều rộng chủa canvas và thử lại.",
|
||||
"failedToProcessLayers": "Thất Bại Khi Xử Lý Layer",
|
||||
"psdExportSuccess": "Xuất File PSD Hoàn Tất",
|
||||
"psdExportSuccessDesc": "Thành công xuất {{count}} layer sang file PSD",
|
||||
"problemExportingPSD": "Có Vấn Đề Khi Xuất File PSD",
|
||||
"canvasManagerNotAvailable": "Trình Quản Lý Canvas Không Có Sẵn",
|
||||
"noValidLayerAdapters": "Không có Layer Adaper Phù Hợp",
|
||||
"promptGenerationStarted": "Trình tạo sinh lệnh khởi động",
|
||||
"uploadAndPromptGenerationFailed": "Thất bại khi tải lên ảnh để tạo sinh lệnh",
|
||||
"promptExpansionFailed": "Có vấn đề xảy ra. Hãy thử mở rộng lệnh lại."
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint."
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2368,55 +2271,6 @@
|
||||
"queue": "Queue (Hàng Đợi)",
|
||||
"workflows": "Workflow (Luồng Làm Việc)",
|
||||
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)"
|
||||
},
|
||||
"launchpad": {
|
||||
"workflowsTitle": "Đi sâu hơn với Workflow.",
|
||||
"upscalingTitle": "Upscale và thêm chi tiết.",
|
||||
"canvasTitle": "Biên tập và làm đẹp trên Canvas.",
|
||||
"generateTitle": "Tạo sinh ảnh từ lệnh chữ.",
|
||||
"modelGuideText": "Muốn biết lệnh nào tốt nhất cho từng model chứ?",
|
||||
"modelGuideLink": "Xem thêm Hướng Dẫn Model.",
|
||||
"workflows": {
|
||||
"description": "Workflow là các template tái sử dụng được sẽ tự động hoá các tác vụ tạo sinh ảnh, cho phép bạn nhanh chóng thực hiện cách thao tác phức tạp và nhận được kết quả nhất quán.",
|
||||
"learnMoreLink": "Học thêm cách tạo ra workflow",
|
||||
"browseTemplates": {
|
||||
"title": "Duyệt Template Workflow",
|
||||
"description": "Chọn từ các workflow có sẵn cho những tác vụ cơ bản"
|
||||
},
|
||||
"createNew": {
|
||||
"title": "Tạo workflow mới",
|
||||
"description": "Tạo workflow mới từ ban đầu"
|
||||
},
|
||||
"loadFromFile": {
|
||||
"title": "Tải workflow từ tệp",
|
||||
"description": "Tải lên workflow để bắt đầu với những thiết lập sẵn có"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
"uploadImage": {
|
||||
"title": "Tải Ảnh Để Upscale",
|
||||
"description": "Nhấp hoặc kéo ảnh để upscale (JPG, PNG, WebP lên đến 100MB)"
|
||||
},
|
||||
"replaceImage": {
|
||||
"title": "Thay Thế Ảnh Hiện Tại",
|
||||
"description": "Nhấp hoặc kéo ảnh mới để thay thế cái hiện tại"
|
||||
},
|
||||
"imageReady": {
|
||||
"title": "Ảnh Đã Sẵn Sàng",
|
||||
"description": "Bấm 'Kích Hoạt' để chuẩn bị upscale"
|
||||
},
|
||||
"readyToUpscale": {
|
||||
"title": "Chuẩn bị upscale!",
|
||||
"description": "Điều chỉnh thiết lập bên dưới, sau đó bấm vào nút 'Khởi Động' để chuẩn bị upscale ảnh."
|
||||
},
|
||||
"upscaleModel": "Model Upscale",
|
||||
"model": "Model",
|
||||
"helpText": {
|
||||
"promptAdvice": "Khi upscale, dùng lệnh để mô tả phương thức và phong cách. Tránh mô tả các chi tiết cụ thể trong ảnh.",
|
||||
"styleAdvice": "Upscale thích hợp nhất cho phong cách chung của ảnh."
|
||||
},
|
||||
"scale": "Kích Thước"
|
||||
}
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -2569,10 +2423,7 @@
|
||||
"postProcessingMissingModelWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model xử lý hậu kỳ (ảnh sang ảnh).",
|
||||
"missingModelsWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model cần thiết:",
|
||||
"incompatibleBaseModel": "Phiên bản model chính không được hỗ trợ để upscale",
|
||||
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale.",
|
||||
"tileControl": "Điều Chỉnh Khối",
|
||||
"tileSize": "Kích Thước Khối",
|
||||
"tileOverlap": "Chồng Chéo Khối"
|
||||
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale."
|
||||
},
|
||||
"newUserExperience": {
|
||||
"toGetStartedLocal": "Để bắt đầu, hãy chắc chắn đã tải xuống hoặc thêm vào model cần để chạy Invoke. Sau đó, nhập lệnh vào hộp và nhấp chuột vào <StrongComponent>Kích Hoạt</StrongComponent> để tạo ra bức ảnh đầu tiên. Chọn một mẫu trình bày cho lệnh để cải thiện kết quả. Bạn có thể chọn để lưu ảnh trực tiếp vào <StrongComponent>Thư Viện Ảnh</StrongComponent> hoặc chỉnh sửa chúng ở <StrongComponent>Canvas</StrongComponent>.",
|
||||
@@ -2588,9 +2439,8 @@
|
||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"Tạo sinh ảnh nhanh hơn với Launchpad và thẻ Tạo Sinh đã cơ bản hoá.",
|
||||
"Biên tập với lệnh bằng Flux Kontext Dev.",
|
||||
"Xuất ra file PSD, ẩn số lượng lớn lớp phủ, sắp xếp model & ảnh — tất cả cho một giao diện đã thiết kế lại để chuyên điều khiển."
|
||||
"Nvidia 50xx GPUs: Invoke sử dụng PyTorch 2.7.0, thứ tối quan trọng cho những GPU trên.",
|
||||
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách."
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
@@ -2602,18 +2452,64 @@
|
||||
"supportVideos": {
|
||||
"supportVideos": "Video Hỗ Trợ",
|
||||
"gettingStarted": "Bắt Đầu Làm Quen",
|
||||
"watch": "Xem",
|
||||
"studioSessionsDesc": "Tham gia <DiscordLink /> để xem các buổi phát trực tiếp và đặt câu hỏi. Các phiên được đăng lên trên playlist các tuần tiếp theo.",
|
||||
"studioSessionsDesc1": "Xem thử <StudioSessionsPlaylistLink /> để hiểu rõ Invoke hơn.",
|
||||
"studioSessionsDesc2": "Đến <DiscordLink /> để tham gia vào phiên trực tiếp và hỏi câu hỏi. Các phiên được tải lên danh sách phát vào các tuần.",
|
||||
"videos": {
|
||||
"gettingStarted": {
|
||||
"title": "Bắt Đầu Với Invoke",
|
||||
"description": "Hoàn thành các video bao hàm mọi thứ bạn cần biết để bắt đầu với Invoke, từ tạo bức ảnh đầu tiên đến các kỹ thuật phức tạp khác."
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "Làm Sao Để Tôi Dùng Trình Biến Đổi Hình Ảnh Sang Hình Ảnh?",
|
||||
"description": "Hướng dẫn cách thực hiện biến đổi ảnh sang ảnh trong Invoke."
|
||||
},
|
||||
"studioSessions": {
|
||||
"title": "Phiên Studio",
|
||||
"description": "Đào sâu vào các phiên họp để khám phá những tính năng nâng cao của Invoke, sáng tạo workflow, và thảo luận cộng đồng."
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"description": "Giới thiệu về ảnh mẫu và IP adapter toàn vùng.",
|
||||
"title": "Làm Sao Để Tôi Dùng IP Adapter Toàn Vùng Và Ảnh Mẫu?"
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"description": "Học cách sáng tạo ảnh bằng trình điều khiển canvas của Invoke.",
|
||||
"title": "Sáng Tạo Trong Trình Kiểm Soát Canvas Của Invoke"
|
||||
},
|
||||
"upscaling": {
|
||||
"description": "Cách upscale ảnh bằng bộ công cụ của Invoke để nâng cấp độ phân giải.",
|
||||
"title": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)"
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"title": "Làm Sao Để Tôi Tạo Sinh Và Lưu Vào Thư Viện Ảnh?",
|
||||
"description": "Các bước để tạo sinh và lưu ảnh vào thư viện ảnh."
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"description": "Hướng dẫn chỉnh sửa ảnh trực tiếp trên canvas.",
|
||||
"title": "Làm Sao Để Tôi Chỉnh Sửa Trên Canvas?"
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "Làm Sao Để Tôi Dùng ControlNet và Layer Điều Khiển Được?",
|
||||
"description": "Học cách áp dụng layer điều khiển được và controlnet vào ảnh của bạn."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Làm Sao Để Tôi Dùng Lớp Phủ Inpaint?",
|
||||
"description": "Cách áp dụng lớp phủ inpaint vào chỉnh sửa và thay đổi ảnh."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "Làm Sao Để Tôi Outpaint?",
|
||||
"description": "Hướng dẫn outpaint bên ngoài viền ảnh gốc."
|
||||
},
|
||||
"creatingYourFirstImage": {
|
||||
"description": "Giới thiệu về cách tạo ảnh từ ban đầu bằng công cụ Invoke.",
|
||||
"title": "Tạo Hình Ảnh Đầu Tiên Của Bạn"
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"description": "Học cách chỉ dẫn ảnh được tạo ra bằng layer điều khiển được và ảnh mẫu.",
|
||||
"title": "Dùng Layer Điều Khiển Được và Chỉ Dẫn Mẫu"
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Hiểu Rõ Trình Hình Ảnh Sang Hình Ảnh Và Trình Khử Nhiễu",
|
||||
"description": "Tổng quan về trình biến đổi ảnh sang ảnh và trình khử nhiễu trong Invoke."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"title": "Khám Phá Model AI Và Khái Niệm Về Adapter",
|
||||
"description": "Đào sâu vào model AI và cách dùng những adapter để điều khiển một cách sáng tạo."
|
||||
}
|
||||
}
|
||||
},
|
||||
"controlCanvas": "Điều Khiển Canvas",
|
||||
"watch": "Xem"
|
||||
},
|
||||
"modelCache": {
|
||||
"clearSucceeded": "Cache Model Đã Được Dọn",
|
||||
|
||||
@@ -31,7 +31,7 @@ import { diff } from 'jsondiffpatch';
|
||||
import dynamicMiddlewares from 'redux-dynamic-middlewares';
|
||||
import type { SerializeFunction, UnserializeFunction } from 'redux-remember';
|
||||
import { rememberEnhancer, rememberReducer } from 'redux-remember';
|
||||
import undoable, { newHistory } from 'redux-undo';
|
||||
import undoable from 'redux-undo';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { api } from 'services/api';
|
||||
import { authToastMiddleware } from 'services/api/authToastMiddleware';
|
||||
@@ -118,7 +118,6 @@ const unserialize: UnserializeFunction = (data, key) => {
|
||||
if (!persistConfig) {
|
||||
throw new Error(`No persist config for slice "${key}"`);
|
||||
}
|
||||
let state;
|
||||
try {
|
||||
const { initialState, migrate } = persistConfig;
|
||||
const parsed = JSON.parse(data);
|
||||
@@ -142,21 +141,13 @@ const unserialize: UnserializeFunction = (data, key) => {
|
||||
},
|
||||
`Rehydrated slice "${key}"`
|
||||
);
|
||||
state = transformed;
|
||||
return transformed;
|
||||
} catch (err) {
|
||||
log.warn(
|
||||
{ error: serializeError(err as Error) },
|
||||
`Error rehydrating slice "${key}", falling back to default initial state`
|
||||
);
|
||||
state = persistConfig.initialState;
|
||||
}
|
||||
|
||||
// If the slice is undoable, we need to wrap it in a new history - only nodes and canvas are undoable at the moment.
|
||||
// TODO(psyche): make this automatic & remove the hard-coding for specific slices.
|
||||
if (key === nodesSlice.name || key === canvasSlice.name) {
|
||||
return newHistory([], state, []);
|
||||
} else {
|
||||
return state;
|
||||
return persistConfig.initialState;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import { inpaintMaskAdded } from 'features/controlLayers/store/canvasSlice';
|
||||
import { $canvasManager } from 'features/controlLayers/store/ephemeral';
|
||||
import { allEntitiesDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -13,9 +11,7 @@ export const SessionMenuItems = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const resetCanvasLayers = useCallback(() => {
|
||||
dispatch(canvasReset());
|
||||
dispatch(inpaintMaskAdded({ isSelected: true, isBookmarked: true }));
|
||||
$canvasManager.get()?.stage.fitBboxToStage();
|
||||
dispatch(allEntitiesDeleted());
|
||||
}, [dispatch]);
|
||||
const resetGenerationSettings = useCallback(() => {
|
||||
dispatch(paramsReset());
|
||||
|
||||
@@ -139,13 +139,4 @@ export const useGlobalHotkeys = () => {
|
||||
},
|
||||
dependencies: [getState, deleteImageModalApi],
|
||||
});
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'toggleViewer',
|
||||
category: 'viewer',
|
||||
callback: () => {
|
||||
navigationApi.toggleViewerPanel();
|
||||
},
|
||||
dependencies: [],
|
||||
});
|
||||
};
|
||||
|
||||
@@ -165,9 +165,9 @@ export const CanvasEntityGroupList = memo(({ isSelected, type, children, entityI
|
||||
|
||||
<Spacer />
|
||||
</Flex>
|
||||
{type === 'raster_layer' && <RasterLayerExportPSDButton />}
|
||||
<CanvasEntityMergeVisibleButton type={type} />
|
||||
<CanvasEntityTypeIsHiddenToggle type={type} />
|
||||
{type === 'raster_layer' && <RasterLayerExportPSDButton />}
|
||||
<CanvasEntityAddOfTypeButton type={type} />
|
||||
</Flex>
|
||||
<Collapse in={collapse.isTrue} style={fixTooltipCloseOnScrollStyles}>
|
||||
|
||||
@@ -20,7 +20,7 @@ import {
|
||||
import { getFilterForModel } from 'features/controlLayers/store/filters';
|
||||
import { selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier, ControlMode } from 'features/controlLayers/store/types';
|
||||
import type { CanvasEntityIdentifier, ControlModeV2 } from 'features/controlLayers/store/types';
|
||||
import { replaceCanvasEntityObjectsWithImage } from 'features/imageActions/actions';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -56,7 +56,7 @@ export const ControlLayerControlAdapter = memo(() => {
|
||||
);
|
||||
|
||||
const onChangeControlMode = useCallback(
|
||||
(controlMode: ControlMode) => {
|
||||
(controlMode: ControlModeV2) => {
|
||||
dispatch(controlLayerControlModeChanged({ entityIdentifier, controlMode }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
@@ -169,7 +169,6 @@ export const ControlLayerControlAdapter = memo(() => {
|
||||
<ControlLayerControlAdapterControlMode
|
||||
controlMode={controlAdapter.controlMode}
|
||||
onChange={onChangeControlMode}
|
||||
model={controlAdapter.model}
|
||||
/>
|
||||
)}
|
||||
</Flex>
|
||||
|
||||
@@ -1,46 +1,32 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { ControlMode } from 'features/controlLayers/store/types';
|
||||
import { isControlMode } from 'features/controlLayers/store/types';
|
||||
import type { ControlModeV2 } from 'features/controlLayers/store/types';
|
||||
import { isControlModeV2 } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import type { ControlNetModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type Props = {
|
||||
controlMode: ControlMode;
|
||||
onChange: (controlMode: ControlMode) => void;
|
||||
model: ControlNetModelConfig | null;
|
||||
controlMode: ControlModeV2;
|
||||
onChange: (controlMode: ControlModeV2) => void;
|
||||
};
|
||||
|
||||
export const ControlLayerControlAdapterControlMode = memo(({ controlMode, onChange, model }: Props) => {
|
||||
export const ControlLayerControlAdapterControlMode = memo(({ controlMode, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const CONTROL_MODE_DATA = useMemo(() => {
|
||||
// Show BRIA-specific control modes if a BRIA model is selected
|
||||
if (model?.base === 'bria') {
|
||||
return [
|
||||
{ label: t('controlLayers.controlMode.depth'), value: 'depth' },
|
||||
{ label: t('controlLayers.controlMode.canny'), value: 'canny' },
|
||||
{ label: t('controlLayers.controlMode.colorgrid'), value: 'colorgrid' },
|
||||
{ label: t('controlLayers.controlMode.recolor'), value: 'recolor' },
|
||||
{ label: t('controlLayers.controlMode.tile'), value: 'tile' },
|
||||
{ label: t('controlLayers.controlMode.pose'), value: 'pose' },
|
||||
];
|
||||
}
|
||||
// Show standard control modes for other models
|
||||
return [
|
||||
const CONTROL_MODE_DATA = useMemo(
|
||||
() => [
|
||||
{ label: t('controlLayers.controlMode.balanced'), value: 'balanced' },
|
||||
{ label: t('controlLayers.controlMode.prompt'), value: 'more_prompt' },
|
||||
{ label: t('controlLayers.controlMode.control'), value: 'more_control' },
|
||||
{ label: t('controlLayers.controlMode.megaControl'), value: 'unbalanced' },
|
||||
];
|
||||
}, [t, model?.base]);
|
||||
],
|
||||
[t]
|
||||
);
|
||||
|
||||
const handleControlModeChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
assert(isControlMode(v?.value));
|
||||
assert(isControlModeV2(v?.value));
|
||||
onChange(v.value);
|
||||
},
|
||||
[onChange]
|
||||
|
||||
@@ -42,7 +42,7 @@ const DEFAULT_CONFIG: CanvasStageModuleConfig = {
|
||||
SCALE_FACTOR: 0.999,
|
||||
FIT_LAYERS_TO_STAGE_PADDING_PX: 48,
|
||||
SCALE_SNAP_POINTS: [0.25, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5],
|
||||
SCALE_SNAP_TOLERANCE: 0.02,
|
||||
SCALE_SNAP_TOLERANCE: 0.05,
|
||||
};
|
||||
|
||||
export class CanvasStageModule extends CanvasModuleBase {
|
||||
@@ -366,22 +366,11 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
if (deltaT > 300) {
|
||||
dynamicScaleFactor = this.config.SCALE_FACTOR + (1 - this.config.SCALE_FACTOR) / 2;
|
||||
} else if (deltaT < 300) {
|
||||
// Ensure dynamic scale factor stays below 1 to maintain zoom-out direction - if it goes over, we could end up
|
||||
// zooming in the wrong direction with small scroll amounts
|
||||
const maxScaleFactor = 0.9999;
|
||||
dynamicScaleFactor = Math.min(
|
||||
this.config.SCALE_FACTOR + (1 - this.config.SCALE_FACTOR) * (deltaT / 200),
|
||||
maxScaleFactor
|
||||
);
|
||||
dynamicScaleFactor = this.config.SCALE_FACTOR + (1 - this.config.SCALE_FACTOR) * (deltaT / 200);
|
||||
}
|
||||
|
||||
// Update the intended scale based on the last intended scale, creating a continuous zoom feel
|
||||
// Handle the sign explicitly to prevent direction reversal with small scroll amounts
|
||||
const scaleFactor =
|
||||
scrollAmount > 0
|
||||
? dynamicScaleFactor ** Math.abs(scrollAmount)
|
||||
: (1 / dynamicScaleFactor) ** Math.abs(scrollAmount);
|
||||
const newIntendedScale = this._intendedScale * scaleFactor;
|
||||
const newIntendedScale = this._intendedScale * dynamicScaleFactor ** scrollAmount;
|
||||
this._intendedScale = this.constrainScale(newIntendedScale);
|
||||
|
||||
// Pass control to the snapping logic
|
||||
@@ -408,9 +397,6 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
// User has scrolled far enough to break the snap
|
||||
this._activeSnapPoint = null;
|
||||
this._applyScale(this._intendedScale, center);
|
||||
} else {
|
||||
// Reset intended scale to prevent drift while snapped
|
||||
this._intendedScale = this._activeSnapPoint;
|
||||
}
|
||||
// Else, do nothing - we remain snapped at the current scale, creating a "dead zone"
|
||||
return;
|
||||
|
||||
@@ -20,7 +20,6 @@ import type {
|
||||
CanvasInpaintMaskState,
|
||||
CanvasMetadata,
|
||||
ControlLoRAConfig,
|
||||
ControlMode,
|
||||
EntityMovedByPayload,
|
||||
FillStyle,
|
||||
FLUXReduxImageInfluence,
|
||||
@@ -515,27 +514,10 @@ export const canvasSlice = createSlice({
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// When switching to a BRIA controlnet model, set appropriate default control mode
|
||||
if (layer.controlAdapter.type === 'controlnet' && modelConfig.base === 'bria') {
|
||||
const currentMode = layer.controlAdapter.controlMode;
|
||||
// Check if current mode is not a valid BRIA mode
|
||||
if (!['depth', 'canny', 'colorgrid', 'recolor', 'tile', 'pose'].includes(currentMode)) {
|
||||
layer.controlAdapter.controlMode = 'depth'; // Default BRIA mode
|
||||
}
|
||||
}
|
||||
// When switching from BRIA to other controlnet models, set appropriate default control mode
|
||||
else if (layer.controlAdapter.type === 'controlnet' && modelConfig.base !== 'bria') {
|
||||
const currentMode = layer.controlAdapter.controlMode;
|
||||
// Check if current mode is a BRIA-specific mode
|
||||
if (['depth', 'canny', 'colorgrid', 'recolor', 'tile', 'pose'].includes(currentMode)) {
|
||||
layer.controlAdapter.controlMode = 'balanced'; // Default standard mode
|
||||
}
|
||||
}
|
||||
},
|
||||
controlLayerControlModeChanged: (
|
||||
state,
|
||||
action: PayloadAction<EntityIdentifierPayload<{ controlMode: ControlMode }, 'control_layer'>>
|
||||
action: PayloadAction<EntityIdentifierPayload<{ controlMode: ControlModeV2 }, 'control_layer'>>
|
||||
) => {
|
||||
const { entityIdentifier, controlMode } = action.payload;
|
||||
const layer = selectEntity(state, entityIdentifier);
|
||||
@@ -1636,6 +1618,7 @@ export const {
|
||||
entityArrangedToBack,
|
||||
entityOpacityChanged,
|
||||
entitiesReordered,
|
||||
allEntitiesDeleted,
|
||||
allEntitiesOfTypeIsHiddenToggled,
|
||||
allNonRasterLayersIsHiddenToggled,
|
||||
// bbox
|
||||
|
||||
@@ -421,7 +421,6 @@ export const selectIsFLUX = createParamsSelector((params) => params.model?.base
|
||||
export const selectIsSD3 = createParamsSelector((params) => params.model?.base === 'sd-3');
|
||||
export const selectIsCogView4 = createParamsSelector((params) => params.model?.base === 'cogview4');
|
||||
export const selectIsImagen3 = createParamsSelector((params) => params.model?.base === 'imagen3');
|
||||
export const selectIsBria = createParamsSelector((params) => params.model?.base === 'bria');
|
||||
export const selectIsImagen4 = createParamsSelector((params) => params.model?.base === 'imagen4');
|
||||
export const selectIsFluxKontextApi = createParamsSelector((params) => params.model?.base === 'flux-kontext');
|
||||
export const selectIsFluxKontext = createParamsSelector((params) => {
|
||||
|
||||
@@ -74,14 +74,6 @@ const zControlModeV2 = z.enum(['balanced', 'more_prompt', 'more_control', 'unbal
|
||||
export type ControlModeV2 = z.infer<typeof zControlModeV2>;
|
||||
export const isControlModeV2 = (v: unknown): v is ControlModeV2 => zControlModeV2.safeParse(v).success;
|
||||
|
||||
const zBriaControlMode = z.enum(['depth', 'canny', 'colorgrid', 'recolor', 'tile', 'pose']);
|
||||
export type BriaControlMode = z.infer<typeof zBriaControlMode>;
|
||||
export const isBriaControlMode = (v: unknown): v is BriaControlMode => zBriaControlMode.safeParse(v).success;
|
||||
|
||||
const zControlMode = z.union([zControlModeV2, zBriaControlMode]);
|
||||
export type ControlMode = z.infer<typeof zControlMode>;
|
||||
export const isControlMode = (v: unknown): v is ControlMode => zControlMode.safeParse(v).success;
|
||||
|
||||
const zCLIPVisionModelV2 = z.enum(['ViT-H', 'ViT-G', 'ViT-L']);
|
||||
export type CLIPVisionModelV2 = z.infer<typeof zCLIPVisionModelV2>;
|
||||
export const isCLIPVisionModelV2 = (v: unknown): v is CLIPVisionModelV2 => zCLIPVisionModelV2.safeParse(v).success;
|
||||
@@ -371,7 +363,7 @@ const zControlNetConfig = z.object({
|
||||
model: zServerValidatedModelIdentifierField.nullable(),
|
||||
weight: z.number().gte(-1).lte(2),
|
||||
beginEndStepPct: zBeginEndStepPct,
|
||||
controlMode: zControlMode,
|
||||
controlMode: zControlModeV2,
|
||||
});
|
||||
export type ControlNetConfig = z.infer<typeof zControlNetConfig>;
|
||||
|
||||
|
||||
@@ -21,14 +21,7 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
const onClickNewCanvasWithRasterLayerFromImage = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
|
||||
await newCanvasFromImage({
|
||||
imageDTO,
|
||||
withResize: false,
|
||||
withInpaintMask: true,
|
||||
type: 'raster_layer',
|
||||
dispatch,
|
||||
getState,
|
||||
});
|
||||
await newCanvasFromImage({ imageDTO, withResize: false, type: 'raster_layer', dispatch, getState });
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
@@ -39,14 +32,7 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
const onClickNewCanvasWithControlLayerFromImage = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
|
||||
await newCanvasFromImage({
|
||||
imageDTO,
|
||||
withResize: false,
|
||||
withInpaintMask: true,
|
||||
type: 'control_layer',
|
||||
dispatch,
|
||||
getState,
|
||||
});
|
||||
await newCanvasFromImage({ imageDTO, withResize: false, type: 'control_layer', dispatch, getState });
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
@@ -57,14 +43,7 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
const onClickNewCanvasWithRasterLayerFromImageWithResize = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
|
||||
await newCanvasFromImage({
|
||||
imageDTO,
|
||||
withResize: true,
|
||||
withInpaintMask: true,
|
||||
type: 'raster_layer',
|
||||
dispatch,
|
||||
getState,
|
||||
});
|
||||
await newCanvasFromImage({ imageDTO, withResize: true, type: 'raster_layer', dispatch, getState });
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
@@ -75,14 +54,7 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
const onClickNewCanvasWithControlLayerFromImageWithResize = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
|
||||
await newCanvasFromImage({
|
||||
imageDTO,
|
||||
withResize: true,
|
||||
withInpaintMask: true,
|
||||
type: 'control_layer',
|
||||
dispatch,
|
||||
getState,
|
||||
});
|
||||
await newCanvasFromImage({ imageDTO, withResize: true, type: 'control_layer', dispatch, getState });
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
|
||||
@@ -2,14 +2,13 @@ import { Box, Flex, forwardRef, Grid, GridItem, Spinner, Text } from '@invoke-ai
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { useAppSelector, useAppStore } from 'app/store/storeHooks';
|
||||
import { getFocusedRegion, useIsRegionFocused } from 'common/hooks/focus';
|
||||
import { getFocusedRegion } from 'common/hooks/focus';
|
||||
import { useRangeBasedImageFetching } from 'features/gallery/hooks/useRangeBasedImageFetching';
|
||||
import type { selectGetImageNamesQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import {
|
||||
selectGalleryImageMinimumWidth,
|
||||
selectImageToCompare,
|
||||
selectLastSelectedImage,
|
||||
selectSelectionCount,
|
||||
} from 'features/gallery/store/gallerySelectors';
|
||||
import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
@@ -25,7 +24,7 @@ import type {
|
||||
VirtuosoGridHandle,
|
||||
} from 'react-virtuoso';
|
||||
import { VirtuosoGrid } from 'react-virtuoso';
|
||||
import { imagesApi, useImageDTO, useStarImagesMutation, useUnstarImagesMutation } from 'services/api/endpoints/images';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { useDebounce } from 'use-debounce';
|
||||
|
||||
import { GalleryImage, GalleryImagePlaceholder } from './ImageGrid/GalleryImage';
|
||||
@@ -451,37 +450,6 @@ const useScrollableGallery = (rootRef: RefObject<HTMLDivElement>) => {
|
||||
return scrollerRef;
|
||||
};
|
||||
|
||||
const useStarImageHotkey = () => {
|
||||
const lastSelectedImage = useAppSelector(selectLastSelectedImage);
|
||||
const selectionCount = useAppSelector(selectSelectionCount);
|
||||
const isGalleryFocused = useIsRegionFocused('gallery');
|
||||
const imageDTO = useImageDTO(lastSelectedImage);
|
||||
const [starImages] = useStarImagesMutation();
|
||||
const [unstarImages] = useUnstarImagesMutation();
|
||||
|
||||
const handleStarHotkey = useCallback(() => {
|
||||
if (!imageDTO) {
|
||||
return;
|
||||
}
|
||||
if (!isGalleryFocused) {
|
||||
return;
|
||||
}
|
||||
if (imageDTO.starred) {
|
||||
unstarImages({ image_names: [imageDTO.image_name] });
|
||||
} else {
|
||||
starImages({ image_names: [imageDTO.image_name] });
|
||||
}
|
||||
}, [imageDTO, isGalleryFocused, starImages, unstarImages]);
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'starImage',
|
||||
category: 'gallery',
|
||||
callback: handleStarHotkey,
|
||||
options: { enabled: !!imageDTO && selectionCount === 1 && isGalleryFocused },
|
||||
dependencies: [imageDTO, selectionCount, isGalleryFocused, handleStarHotkey],
|
||||
});
|
||||
};
|
||||
|
||||
export const NewGallery = memo(() => {
|
||||
const virtuosoRef = useRef<VirtuosoGridHandle>(null);
|
||||
const rangeRef = useRef<ListRange>({ startIndex: 0, endIndex: 0 });
|
||||
@@ -496,7 +464,6 @@ export const NewGallery = memo(() => {
|
||||
enabled: !isLoading,
|
||||
});
|
||||
|
||||
useStarImageHotkey();
|
||||
useKeepSelectedImageInView(imageNames, virtuosoRef, rootRef, rangeRef);
|
||||
useKeyboardNavigation(imageNames, virtuosoRef, rootRef);
|
||||
const scrollerRef = useScrollableGallery(rootRef);
|
||||
|
||||
@@ -263,7 +263,7 @@ export const Flow = memo(() => {
|
||||
noWheelClassName={NO_WHEEL_CLASS}
|
||||
noPanClassName={NO_PAN_CLASS}
|
||||
>
|
||||
<Background gap={snapGrid} offset={snapGrid} />
|
||||
<Background />
|
||||
</ReactFlow>
|
||||
<HotkeyIsolator />
|
||||
</>
|
||||
|
||||
@@ -33,8 +33,8 @@ const initialState: WorkflowSettingsState = {
|
||||
_version: 1,
|
||||
shouldShowMinimapPanel: true,
|
||||
layeringStrategy: 'network-simplex',
|
||||
nodeSpacing: 30,
|
||||
layerSpacing: 30,
|
||||
nodeSpacing: 32,
|
||||
layerSpacing: 32,
|
||||
layoutDirection: 'LR',
|
||||
nodeAlignment: 'UL',
|
||||
shouldValidateGraph: true,
|
||||
|
||||
@@ -4,7 +4,6 @@ import { range } from 'es-toolkit/compat';
|
||||
import type { SeedBehaviour } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import type { ModelIdentifierField } from 'features/nodes/types/common';
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { API_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import type { components } from 'services/api/schema';
|
||||
import type { Batch, EnqueueBatchArg, Invocation } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -19,7 +18,7 @@ const getExtendedPrompts = (arg: {
|
||||
// Normally, the seed behaviour implicity determines the batch size. But when we use models without seeds (like
|
||||
// ChatGPT 4o) in conjunction with the per-prompt seed behaviour, we lose out on that implicit batch size. To rectify
|
||||
// this, we need to create a batch of the right size by repeating the prompts.
|
||||
if (seedBehaviour === 'PER_PROMPT' || API_BASE_MODELS.includes(model.base)) {
|
||||
if (seedBehaviour === 'PER_PROMPT' || model.base === 'chatgpt-4o' || model.base === 'flux-kontext') {
|
||||
return range(iterations).flatMap(() => prompts);
|
||||
}
|
||||
return prompts;
|
||||
|
||||
@@ -158,33 +158,18 @@ const addControlNetToGraph = (
|
||||
assert(model !== null);
|
||||
const { image_name } = imageDTO;
|
||||
|
||||
if (model.base === 'bria') {
|
||||
// BRIA uses a different node type and parameters
|
||||
const controlNet = g.addNode({
|
||||
id: `control_net_${id}`,
|
||||
type: 'bria_controlnet',
|
||||
control_model: model,
|
||||
control_weight: weight,
|
||||
control_image: { image_name },
|
||||
// BRIA uses control_mode instead of controlMode
|
||||
control_mode: controlMode || 'pose', // Default to 'pose' if not specified
|
||||
});
|
||||
g.addEdge(controlNet, 'control', collector, 'item');
|
||||
} else {
|
||||
// Standard controlnet for other models
|
||||
const controlNet = g.addNode({
|
||||
id: `control_net_${id}`,
|
||||
type: model.base === 'flux' ? 'flux_controlnet' : 'controlnet',
|
||||
begin_step_percent: beginEndStepPct[0],
|
||||
end_step_percent: beginEndStepPct[1],
|
||||
control_mode: model.base === 'flux' ? undefined : controlMode,
|
||||
resize_mode: 'just_resize',
|
||||
control_model: model,
|
||||
control_weight: weight,
|
||||
image: { image_name },
|
||||
});
|
||||
g.addEdge(controlNet, 'control', collector, 'item');
|
||||
}
|
||||
const controlNet = g.addNode({
|
||||
id: `control_net_${id}`,
|
||||
type: model.base === 'flux' ? 'flux_controlnet' : 'controlnet',
|
||||
begin_step_percent: beginEndStepPct[0],
|
||||
end_step_percent: beginEndStepPct[1],
|
||||
control_mode: model.base === 'flux' ? undefined : controlMode,
|
||||
resize_mode: 'just_resize',
|
||||
control_model: model,
|
||||
control_weight: weight,
|
||||
image: { image_name },
|
||||
});
|
||||
g.addEdge(controlNet, 'control', collector, 'item');
|
||||
};
|
||||
|
||||
const addT2IAdapterToGraph = (
|
||||
|
||||
@@ -1,184 +0,0 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { selectMainModelConfig, selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
|
||||
import { selectCanvasMetadata, selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators';
|
||||
import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
|
||||
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
|
||||
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
|
||||
import { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { selectCanvasOutputFields } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import type { GraphBuilderArg, GraphBuilderReturn, ImageOutputNodes } from 'features/nodes/util/graph/types';
|
||||
import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { t } from 'i18next';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
import { addControlNets } from './addControlAdapters';
|
||||
|
||||
const log = logger('system');
|
||||
|
||||
export const buildBriaGraph = async (arg: GraphBuilderArg): Promise<GraphBuilderReturn> => {
|
||||
const { generationMode, state, manager } = arg;
|
||||
log.debug({ generationMode, manager: manager?.id }, 'Building Bria graph');
|
||||
|
||||
const model = selectMainModelConfig(state);
|
||||
assert(model, 'No model selected');
|
||||
assert(model.base === 'bria', 'Selected model is not a Bria model');
|
||||
|
||||
const params = selectParamsSlice(state);
|
||||
const canvas = selectCanvasSlice(state);
|
||||
const refImages = selectRefImagesSlice(state);
|
||||
|
||||
const { guidance, steps, seed } = params;
|
||||
|
||||
// Bria only supports txt2img for now
|
||||
if (generationMode !== 'txt2img') {
|
||||
throw new UnsupportedGenerationModeError(t('toast.briaIncompatibleGenerationMode'));
|
||||
}
|
||||
|
||||
const g = new Graph(getPrefixedId('bria_graph'));
|
||||
|
||||
// Add model loader
|
||||
const modelLoader = g.addNode({
|
||||
type: 'bria_model_loader',
|
||||
id: getPrefixedId('bria_model_loader'),
|
||||
model,
|
||||
} as Invocation<'bria_model_loader'>);
|
||||
|
||||
// Add positive prompt
|
||||
const positivePrompt = g.addNode({
|
||||
id: getPrefixedId('positive_prompt'),
|
||||
type: 'string',
|
||||
});
|
||||
|
||||
// Add text encoder
|
||||
const textEncoder = g.addNode({
|
||||
type: 'bria_text_encoder',
|
||||
id: getPrefixedId('bria_text_encoder'),
|
||||
prompt: positivePrompt,
|
||||
negative_prompt: params.negativePrompt,
|
||||
max_length: 128,
|
||||
} as Invocation<'bria_text_encoder'>);
|
||||
|
||||
// Add latent sampler for initial noise
|
||||
const latentSampler = g.addNode({
|
||||
type: 'bria_latent_sampler',
|
||||
id: getPrefixedId('bria_latent_sampler'),
|
||||
width: params.width,
|
||||
height: params.height,
|
||||
seed: seed,
|
||||
} as Invocation<'bria_latent_sampler'>);
|
||||
|
||||
// Add denoise node
|
||||
const denoise = g.addNode({
|
||||
type: 'bria_denoise',
|
||||
id: getPrefixedId('bria_denoise'),
|
||||
num_steps: steps,
|
||||
guidance_scale: guidance,
|
||||
} as Invocation<'bria_denoise'>);
|
||||
|
||||
// Add decoder
|
||||
const decoder = g.addNode({
|
||||
type: 'bria_decoder',
|
||||
id: getPrefixedId('bria_decoder'),
|
||||
} as Invocation<'bria_decoder'>);
|
||||
|
||||
// Connect model components to text encoder
|
||||
g.addEdge(modelLoader, 't5_encoder', textEncoder, 't5_encoder');
|
||||
|
||||
// Connect model components to latent sampler
|
||||
g.addEdge(modelLoader, 'transformer', latentSampler, 'transformer');
|
||||
|
||||
// Connect model components to denoise
|
||||
g.addEdge(modelLoader, 'transformer', denoise, 'transformer');
|
||||
g.addEdge(modelLoader, 't5_encoder', denoise, 't5_encoder');
|
||||
g.addEdge(modelLoader, 'vae', denoise, 'vae');
|
||||
|
||||
// Connect text encoder to denoise
|
||||
g.addEdge(textEncoder, 'pos_embeds', denoise, 'pos_embeds');
|
||||
g.addEdge(textEncoder, 'neg_embeds', denoise, 'neg_embeds');
|
||||
g.addEdge(textEncoder, 'text_ids', denoise, 'text_ids');
|
||||
|
||||
// Connect latent sampler to denoise
|
||||
g.addEdge(latentSampler, 'latents', denoise, 'latents');
|
||||
g.addEdge(latentSampler, 'latent_image_ids', denoise, 'latent_image_ids');
|
||||
|
||||
// Connect model components to decoder
|
||||
g.addEdge(modelLoader, 'vae', decoder, 'vae');
|
||||
|
||||
// Connect denoise to decoder
|
||||
g.addEdge(denoise, 'latents', decoder, 'latents');
|
||||
|
||||
// Add ControlNet support
|
||||
if (manager !== null) {
|
||||
const controlNetCollector = g.addNode({
|
||||
type: 'collect',
|
||||
id: getPrefixedId('control_net_collector'),
|
||||
});
|
||||
|
||||
const controlNetResult = await addControlNets({
|
||||
manager,
|
||||
entities: canvas.controlLayers.entities,
|
||||
g,
|
||||
rect: canvas.bbox.rect,
|
||||
collector: controlNetCollector,
|
||||
model,
|
||||
});
|
||||
|
||||
if (controlNetResult.addedControlNets > 0) {
|
||||
// Connect the collector to the denoise node's control input
|
||||
g.addEdge(controlNetCollector, 'collection', denoise, 'control');
|
||||
} else {
|
||||
// Remove the collector if no control nets were added
|
||||
g.deleteNode(controlNetCollector.id);
|
||||
}
|
||||
}
|
||||
|
||||
// Add metadata
|
||||
g.upsertMetadata({
|
||||
guidance_scale: guidance,
|
||||
model: Graph.getModelMetadataField(model),
|
||||
steps,
|
||||
generation_mode: 'bria_txt2img',
|
||||
});
|
||||
g.addEdgeToMetadata(latentSampler, 'seed', 'seed');
|
||||
g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt');
|
||||
|
||||
let canvasOutput: Invocation<ImageOutputNodes> = decoder;
|
||||
|
||||
// Add text to image handling
|
||||
canvasOutput = addTextToImage({
|
||||
g,
|
||||
state,
|
||||
denoise: decoder, // Use decoder as the denoise equivalent
|
||||
l2i: decoder,
|
||||
});
|
||||
|
||||
// Add NSFW checker
|
||||
if (state.system.shouldUseNSFWChecker) {
|
||||
canvasOutput = addNSFWChecker(g, canvasOutput);
|
||||
}
|
||||
|
||||
// Add watermarker
|
||||
if (state.system.shouldUseWatermarker) {
|
||||
canvasOutput = addWatermarker(g, canvasOutput);
|
||||
}
|
||||
|
||||
g.updateNode(canvasOutput, selectCanvasOutputFields(state));
|
||||
|
||||
if (selectActiveTab(state) === 'canvas') {
|
||||
g.upsertMetadata(selectCanvasMetadata(state));
|
||||
}
|
||||
|
||||
g.setMetadataReceivingNode(canvasOutput);
|
||||
|
||||
return {
|
||||
g,
|
||||
seed: latentSampler,
|
||||
positivePrompt,
|
||||
};
|
||||
};
|
||||
@@ -3,7 +3,7 @@ import type { BaseModelType } from 'services/api/types';
|
||||
/**
|
||||
* Gets the optimal dimension for a given base model:
|
||||
* - sd-1, sd-2: 512
|
||||
* - sdxl, flux, sd-3, cogview4, bria: 1024
|
||||
* - sdxl, flux, sd-3, cogview4: 1024
|
||||
* - default: 1024
|
||||
* @param base The base model
|
||||
* @returns The optimal dimension for the model, defaulting to 1024
|
||||
@@ -21,7 +21,6 @@ export const getOptimalDimension = (base?: BaseModelType | null): number => {
|
||||
case 'imagen4':
|
||||
case 'chatgpt-4o':
|
||||
case 'flux-kontext':
|
||||
case 'bria':
|
||||
default:
|
||||
return 1024;
|
||||
}
|
||||
@@ -64,7 +63,7 @@ export const isInSDXLTrainingDimensions = (width: number, height: number): boole
|
||||
/**
|
||||
* Gets the grid size for a given base model. For Flux, the grid size is 16, otherwise it is 8.
|
||||
* - sd-1, sd-2, sdxl: 8
|
||||
* - flux, sd-3, bria: 16
|
||||
* - flux, sd-3: 16
|
||||
* - cogview4: 32
|
||||
* - default: 8
|
||||
* @param base The base model
|
||||
@@ -76,7 +75,6 @@ export const getGridSize = (base?: BaseModelType | null): number => {
|
||||
return 32;
|
||||
case 'flux':
|
||||
case 'sd-3':
|
||||
case 'bria':
|
||||
return 16;
|
||||
case 'sd-1':
|
||||
case 'sd-2':
|
||||
|
||||
@@ -13,13 +13,14 @@ export const CancelAllExceptCurrentButton = memo((props: ButtonProps) => {
|
||||
<Button
|
||||
isDisabled={api.isDisabled}
|
||||
isLoading={api.isLoading}
|
||||
aria-label={t('queue.clear')}
|
||||
tooltip={t('queue.cancelAllExceptCurrentTooltip')}
|
||||
leftIcon={<PiXCircle />}
|
||||
colorScheme="error"
|
||||
onClick={api.openDialog}
|
||||
{...props}
|
||||
>
|
||||
{t('queue.cancelAllExceptCurrentTooltip')}
|
||||
{t('queue.clear')}
|
||||
</Button>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
import type { ButtonProps } from '@invoke-ai/ui-library';
|
||||
import { Button } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashBold } from 'react-icons/pi';
|
||||
|
||||
import { useClearQueueDialog } from './ClearQueueConfirmationAlertDialog';
|
||||
|
||||
export const ClearQueueButton = memo((props: ButtonProps) => {
|
||||
const { t } = useTranslation();
|
||||
const api = useClearQueueDialog();
|
||||
|
||||
return (
|
||||
<Button
|
||||
isDisabled={api.isDisabled}
|
||||
isLoading={api.isLoading}
|
||||
aria-label={t('queue.clear')}
|
||||
tooltip={t('queue.clearTooltip')}
|
||||
leftIcon={<PiTrashBold />}
|
||||
colorScheme="error"
|
||||
onClick={api.openDialog}
|
||||
{...props}
|
||||
>
|
||||
{t('queue.clear')}
|
||||
</Button>
|
||||
);
|
||||
});
|
||||
|
||||
ClearQueueButton.displayName = 'ClearQueueButton';
|
||||
@@ -7,7 +7,7 @@ import { useTranslation } from 'react-i18next';
|
||||
|
||||
const [useClearQueueConfirmationAlertDialog] = buildUseBoolean(false);
|
||||
|
||||
export const useClearQueueDialog = () => {
|
||||
const useClearQueueDialog = () => {
|
||||
const dialog = useClearQueueConfirmationAlertDialog();
|
||||
const clearQueue = useClearQueue();
|
||||
|
||||
|
||||
@@ -9,19 +9,15 @@ import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { navigationApi } from 'features/ui/layouts/navigation-api';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiListBold, PiPauseFill, PiPlayFill, PiQueueBold, PiTrashBold, PiXBold, PiXCircle } from 'react-icons/pi';
|
||||
|
||||
import { useClearQueueDialog } from './ClearQueueConfirmationAlertDialog';
|
||||
import { PiListBold, PiPauseFill, PiPlayFill, PiQueueBold, PiXBold, PiXCircle } from 'react-icons/pi';
|
||||
|
||||
export const QueueActionsMenuButton = memo(() => {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
const { t } = useTranslation();
|
||||
const isPauseEnabled = useFeatureStatus('pauseQueue');
|
||||
const isResumeEnabled = useFeatureStatus('resumeQueue');
|
||||
const isClearAllEnabled = useFeatureStatus('cancelAndClearAll');
|
||||
const cancelAllExceptCurrent = useCancelAllExceptCurrentQueueItemDialog();
|
||||
const cancelCurrentQueueItem = useCancelCurrentQueueItem();
|
||||
const clearQueue = useClearQueueDialog();
|
||||
const resumeProcessor = useResumeProcessor();
|
||||
const pauseProcessor = usePauseProcessor();
|
||||
const openQueue = useCallback(() => {
|
||||
@@ -59,17 +55,6 @@ export const QueueActionsMenuButton = memo(() => {
|
||||
>
|
||||
{t('queue.cancelAllExceptCurrentTooltip')}
|
||||
</MenuItem>
|
||||
{isClearAllEnabled && (
|
||||
<MenuItem
|
||||
isDestructive
|
||||
icon={<PiTrashBold />}
|
||||
onClick={clearQueue.openDialog}
|
||||
isLoading={clearQueue.isLoading}
|
||||
isDisabled={clearQueue.isDisabled}
|
||||
>
|
||||
{t('queue.clearTooltip')}
|
||||
</MenuItem>
|
||||
)}
|
||||
{isResumeEnabled && (
|
||||
<MenuItem
|
||||
icon={<PiPlayFill />}
|
||||
|
||||
@@ -4,7 +4,6 @@ import { memo } from 'react';
|
||||
|
||||
import { CancelAllExceptCurrentButton } from './CancelAllExceptCurrentButton';
|
||||
import ClearModelCacheButton from './ClearModelCacheButton';
|
||||
import { ClearQueueButton } from './ClearQueueButton';
|
||||
import PauseProcessorButton from './PauseProcessorButton';
|
||||
import PruneQueueButton from './PruneQueueButton';
|
||||
import ResumeProcessorButton from './ResumeProcessorButton';
|
||||
@@ -12,20 +11,19 @@ import ResumeProcessorButton from './ResumeProcessorButton';
|
||||
const QueueTabQueueControls = () => {
|
||||
const isPauseEnabled = useFeatureStatus('pauseQueue');
|
||||
const isResumeEnabled = useFeatureStatus('resumeQueue');
|
||||
const isClearQueueEnabled = useFeatureStatus('cancelAndClearAll');
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" layerStyle="first" borderRadius="base" p={2} gap={2}>
|
||||
<Flex gap={2}>
|
||||
{(isPauseEnabled || isResumeEnabled) && (
|
||||
<ButtonGroup orientation="vertical" size="sm">
|
||||
<ButtonGroup w={28} orientation="vertical" size="sm">
|
||||
{isResumeEnabled && <ResumeProcessorButton />}
|
||||
{isPauseEnabled && <PauseProcessorButton />}
|
||||
</ButtonGroup>
|
||||
)}
|
||||
<ButtonGroup orientation="vertical" size="sm">
|
||||
<ButtonGroup w={28} orientation="vertical" size="sm">
|
||||
<PruneQueueButton />
|
||||
{isClearQueueEnabled ? <ClearQueueButton /> : <CancelAllExceptCurrentButton />}
|
||||
<CancelAllExceptCurrentButton />
|
||||
</ButtonGroup>
|
||||
</Flex>
|
||||
<ClearModelCacheButton />
|
||||
|
||||
@@ -267,13 +267,6 @@ const getReasonsWhyCannotEnqueueGenerateTab = (arg: {
|
||||
}
|
||||
}
|
||||
|
||||
if (model?.base === 'bria') {
|
||||
if (!params.t5EncoderModel) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noT5EncoderModelSelected') });
|
||||
}
|
||||
// Bria uses fixed 1024x1024 dimensions, no need to validate dimensions
|
||||
}
|
||||
|
||||
if (model && isChatGPT4oHighModelDisabled(model)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.modelDisabledForTrial', { modelName: model.name }) });
|
||||
}
|
||||
@@ -608,34 +601,6 @@ const getReasonsWhyCannotEnqueueCanvasTab = (arg: {
|
||||
}
|
||||
}
|
||||
|
||||
if (model?.base === 'bria') {
|
||||
if (!params.t5EncoderModel) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noT5EncoderModelSelected') });
|
||||
}
|
||||
|
||||
// Bria requires fixed 1024x1024 dimensions
|
||||
const { bbox } = canvas;
|
||||
const requiredSize = 1024;
|
||||
|
||||
if (bbox.scaleMethod === 'none') {
|
||||
if (bbox.rect.width !== requiredSize || bbox.rect.height !== requiredSize) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.briaRequiresExactDimensions', {
|
||||
size: requiredSize,
|
||||
}),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (bbox.scaledSize.width !== requiredSize || bbox.scaledSize.height !== requiredSize) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.briaRequiresExactScaledDimensions', {
|
||||
size: requiredSize,
|
||||
}),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (model && isChatGPT4oHighModelDisabled(model)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.modelDisabledForTrial', { modelName: model.name }) });
|
||||
}
|
||||
|
||||
@@ -160,7 +160,6 @@ export const useHotkeyData = (): HotkeysData => {
|
||||
addHotkey('gallery', 'galleryNavDownAlt', ['alt+down']);
|
||||
addHotkey('gallery', 'galleryNavLeftAlt', ['alt+left']);
|
||||
addHotkey('gallery', 'deleteSelection', ['delete', 'backspace']);
|
||||
addHotkey('gallery', 'starImage', ['.']);
|
||||
|
||||
return data;
|
||||
}, [isMacOS, isModelManagerEnabled, t]);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { DockviewApi, GridviewApi } from 'dockview';
|
||||
import { DockviewApi as MockedDockviewApi, DockviewPanel, GridviewPanel } from 'dockview';
|
||||
import { DockviewPanel, GridviewPanel } from 'dockview';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import type { NavigationAppApi } from './navigation-api';
|
||||
@@ -12,7 +12,6 @@ import {
|
||||
RIGHT_PANEL_MIN_SIZE_PX,
|
||||
SETTINGS_PANEL_ID,
|
||||
SWITCH_TABS_FAKE_DELAY_MS,
|
||||
VIEWER_PANEL_ID,
|
||||
WORKSPACE_PANEL_ID,
|
||||
} from './shared';
|
||||
|
||||
@@ -49,7 +48,7 @@ vi.mock('dockview', async () => {
|
||||
}
|
||||
}
|
||||
|
||||
// Mock DockviewPanel class for instanceof checks
|
||||
// Mock GridviewPanel class for instanceof checks
|
||||
class MockDockviewPanel {
|
||||
api = {
|
||||
setActive: vi.fn(),
|
||||
@@ -59,21 +58,10 @@ vi.mock('dockview', async () => {
|
||||
};
|
||||
}
|
||||
|
||||
// Mock DockviewApi class for instanceof checks
|
||||
class MockDockviewApi {
|
||||
panels = [];
|
||||
activePanel = null;
|
||||
toJSON = vi.fn();
|
||||
fromJSON = vi.fn();
|
||||
onDidLayoutChange = vi.fn();
|
||||
onDidActivePanelChange = vi.fn();
|
||||
}
|
||||
|
||||
return {
|
||||
...actual,
|
||||
GridviewPanel: MockGridviewPanel,
|
||||
DockviewPanel: MockDockviewPanel,
|
||||
DockviewApi: MockDockviewApi,
|
||||
};
|
||||
});
|
||||
|
||||
@@ -1117,393 +1105,4 @@ describe('AppNavigationApi', () => {
|
||||
expect(initialize).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('toggleViewerPanel', () => {
|
||||
beforeEach(() => {
|
||||
navigationApi.connectToApp(mockAppApi);
|
||||
});
|
||||
|
||||
it('should switch to viewer panel when not currently on viewer', async () => {
|
||||
const mockViewerPanel = createMockDockPanel();
|
||||
navigationApi._registerPanel('generate', VIEWER_PANEL_ID, mockViewerPanel);
|
||||
mockGetAppTab.mockReturnValue('generate');
|
||||
|
||||
// Set current panel to something other than viewer
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', SETTINGS_PANEL_ID);
|
||||
|
||||
const result = await navigationApi.toggleViewerPanel();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(mockViewerPanel.api.setActive).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should switch to previous panel when on viewer and previous panel exists', async () => {
|
||||
const mockPreviousPanel = createMockDockPanel();
|
||||
const mockViewerPanel = createMockDockPanel();
|
||||
|
||||
navigationApi._registerPanel('generate', SETTINGS_PANEL_ID, mockPreviousPanel);
|
||||
navigationApi._registerPanel('generate', VIEWER_PANEL_ID, mockViewerPanel);
|
||||
mockGetAppTab.mockReturnValue('generate');
|
||||
|
||||
// Set current panel to viewer and previous to settings
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', VIEWER_PANEL_ID);
|
||||
navigationApi._prevActiveDockviewPanel.set('generate', SETTINGS_PANEL_ID);
|
||||
|
||||
const result = await navigationApi.toggleViewerPanel();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(mockPreviousPanel.api.setActive).toHaveBeenCalledOnce();
|
||||
expect(mockViewerPanel.api.setActive).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should switch to launchpad when on viewer and no valid previous panel', async () => {
|
||||
const mockLaunchpadPanel = createMockDockPanel();
|
||||
const mockViewerPanel = createMockDockPanel();
|
||||
|
||||
navigationApi._registerPanel('generate', LAUNCHPAD_PANEL_ID, mockLaunchpadPanel);
|
||||
navigationApi._registerPanel('generate', VIEWER_PANEL_ID, mockViewerPanel);
|
||||
mockGetAppTab.mockReturnValue('generate');
|
||||
|
||||
// Set current panel to viewer and no previous panel
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', VIEWER_PANEL_ID);
|
||||
navigationApi._prevActiveDockviewPanel.set('generate', null);
|
||||
|
||||
const result = await navigationApi.toggleViewerPanel();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(mockLaunchpadPanel.api.setActive).toHaveBeenCalledOnce();
|
||||
expect(mockViewerPanel.api.setActive).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should switch to launchpad when on viewer and previous panel is also viewer', async () => {
|
||||
const mockLaunchpadPanel = createMockDockPanel();
|
||||
const mockViewerPanel = createMockDockPanel();
|
||||
|
||||
navigationApi._registerPanel('generate', LAUNCHPAD_PANEL_ID, mockLaunchpadPanel);
|
||||
navigationApi._registerPanel('generate', VIEWER_PANEL_ID, mockViewerPanel);
|
||||
mockGetAppTab.mockReturnValue('generate');
|
||||
|
||||
// Set current panel to viewer and previous panel was also viewer
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', VIEWER_PANEL_ID);
|
||||
navigationApi._prevActiveDockviewPanel.set('generate', VIEWER_PANEL_ID);
|
||||
|
||||
const result = await navigationApi.toggleViewerPanel();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(mockLaunchpadPanel.api.setActive).toHaveBeenCalledOnce();
|
||||
expect(mockViewerPanel.api.setActive).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should return false when no active tab', async () => {
|
||||
mockGetAppTab.mockReturnValue(null);
|
||||
|
||||
const result = await navigationApi.toggleViewerPanel();
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false when viewer panel is not registered', async () => {
|
||||
mockGetAppTab.mockReturnValue('generate');
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', SETTINGS_PANEL_ID);
|
||||
|
||||
// Don't register viewer panel
|
||||
const result = await navigationApi.toggleViewerPanel();
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false when previous panel is not registered', async () => {
|
||||
const mockViewerPanel = createMockDockPanel();
|
||||
|
||||
navigationApi._registerPanel('generate', VIEWER_PANEL_ID, mockViewerPanel);
|
||||
mockGetAppTab.mockReturnValue('generate');
|
||||
|
||||
// Set current to viewer and previous to unregistered panel
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', VIEWER_PANEL_ID);
|
||||
navigationApi._prevActiveDockviewPanel.set('generate', 'unregistered-panel');
|
||||
|
||||
const result = await navigationApi.toggleViewerPanel();
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false when launchpad panel is not registered as fallback', async () => {
|
||||
const mockViewerPanel = createMockDockPanel();
|
||||
|
||||
navigationApi._registerPanel('generate', VIEWER_PANEL_ID, mockViewerPanel);
|
||||
mockGetAppTab.mockReturnValue('generate');
|
||||
|
||||
// Set current to viewer and no previous panel, but don't register launchpad
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', VIEWER_PANEL_ID);
|
||||
navigationApi._prevActiveDockviewPanel.set('generate', null);
|
||||
|
||||
const result = await navigationApi.toggleViewerPanel();
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should work across different tabs independently', async () => {
|
||||
const mockViewerPanel1 = createMockDockPanel();
|
||||
const mockViewerPanel2 = createMockDockPanel();
|
||||
const mockSettingsPanel1 = createMockDockPanel();
|
||||
const mockSettingsPanel2 = createMockDockPanel();
|
||||
const mockLaunchpadPanel = createMockDockPanel();
|
||||
|
||||
navigationApi._registerPanel('generate', VIEWER_PANEL_ID, mockViewerPanel1);
|
||||
navigationApi._registerPanel('generate', SETTINGS_PANEL_ID, mockSettingsPanel1);
|
||||
navigationApi._registerPanel('canvas', VIEWER_PANEL_ID, mockViewerPanel2);
|
||||
navigationApi._registerPanel('canvas', SETTINGS_PANEL_ID, mockSettingsPanel2);
|
||||
navigationApi._registerPanel('canvas', LAUNCHPAD_PANEL_ID, mockLaunchpadPanel);
|
||||
|
||||
// Set up different states for different tabs
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', SETTINGS_PANEL_ID);
|
||||
navigationApi._currentActiveDockviewPanel.set('canvas', VIEWER_PANEL_ID);
|
||||
navigationApi._prevActiveDockviewPanel.set('canvas', SETTINGS_PANEL_ID);
|
||||
|
||||
// Test generate tab (should switch to viewer)
|
||||
mockGetAppTab.mockReturnValue('generate');
|
||||
const result1 = await navigationApi.toggleViewerPanel();
|
||||
expect(result1).toBe(true);
|
||||
expect(mockViewerPanel1.api.setActive).toHaveBeenCalledOnce();
|
||||
|
||||
// Test canvas tab (should switch to previous panel - settings panel in canvas)
|
||||
mockGetAppTab.mockReturnValue('canvas');
|
||||
const result2 = await navigationApi.toggleViewerPanel();
|
||||
expect(result2).toBe(true);
|
||||
expect(mockSettingsPanel2.api.setActive).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should handle sequence of viewer toggles correctly', async () => {
|
||||
const mockViewerPanel = createMockDockPanel();
|
||||
const mockSettingsPanel = createMockDockPanel();
|
||||
const mockLaunchpadPanel = createMockDockPanel();
|
||||
|
||||
navigationApi._registerPanel('generate', VIEWER_PANEL_ID, mockViewerPanel);
|
||||
navigationApi._registerPanel('generate', SETTINGS_PANEL_ID, mockSettingsPanel);
|
||||
navigationApi._registerPanel('generate', LAUNCHPAD_PANEL_ID, mockLaunchpadPanel);
|
||||
mockGetAppTab.mockReturnValue('generate');
|
||||
|
||||
// Start on settings panel
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', SETTINGS_PANEL_ID);
|
||||
navigationApi._prevActiveDockviewPanel.set('generate', null);
|
||||
|
||||
// First toggle: settings -> viewer
|
||||
const result1 = await navigationApi.toggleViewerPanel();
|
||||
expect(result1).toBe(true);
|
||||
expect(mockViewerPanel.api.setActive).toHaveBeenCalledOnce();
|
||||
|
||||
// Simulate panel change tracking (normally done by dockview listener)
|
||||
navigationApi._prevActiveDockviewPanel.set('generate', SETTINGS_PANEL_ID);
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', VIEWER_PANEL_ID);
|
||||
|
||||
// Second toggle: viewer -> settings (previous panel)
|
||||
const result2 = await navigationApi.toggleViewerPanel();
|
||||
expect(result2).toBe(true);
|
||||
expect(mockSettingsPanel.api.setActive).toHaveBeenCalledOnce();
|
||||
|
||||
// Simulate panel change tracking again
|
||||
navigationApi._prevActiveDockviewPanel.set('generate', VIEWER_PANEL_ID);
|
||||
navigationApi._currentActiveDockviewPanel.set('generate', SETTINGS_PANEL_ID);
|
||||
|
||||
// Third toggle: settings -> viewer again
|
||||
const result3 = await navigationApi.toggleViewerPanel();
|
||||
expect(result3).toBe(true);
|
||||
expect(mockViewerPanel.api.setActive).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Disposable Cleanup', () => {
|
||||
beforeEach(() => {
|
||||
navigationApi.connectToApp(mockAppApi);
|
||||
});
|
||||
|
||||
it('should add disposable functions for a tab', () => {
|
||||
const dispose1 = vi.fn();
|
||||
const dispose2 = vi.fn();
|
||||
|
||||
navigationApi._addDisposeForTab('generate', dispose1);
|
||||
navigationApi._addDisposeForTab('generate', dispose2);
|
||||
|
||||
// Check that disposables are stored
|
||||
const disposables = navigationApi._disposablesForTab.get('generate');
|
||||
expect(disposables).toBeDefined();
|
||||
expect(disposables?.size).toBe(2);
|
||||
expect(disposables?.has(dispose1)).toBe(true);
|
||||
expect(disposables?.has(dispose2)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle multiple tabs independently', () => {
|
||||
const dispose1 = vi.fn();
|
||||
const dispose2 = vi.fn();
|
||||
const dispose3 = vi.fn();
|
||||
|
||||
navigationApi._addDisposeForTab('generate', dispose1);
|
||||
navigationApi._addDisposeForTab('generate', dispose2);
|
||||
navigationApi._addDisposeForTab('canvas', dispose3);
|
||||
|
||||
const generateDisposables = navigationApi._disposablesForTab.get('generate');
|
||||
const canvasDisposables = navigationApi._disposablesForTab.get('canvas');
|
||||
|
||||
expect(generateDisposables?.size).toBe(2);
|
||||
expect(canvasDisposables?.size).toBe(1);
|
||||
expect(generateDisposables?.has(dispose1)).toBe(true);
|
||||
expect(generateDisposables?.has(dispose2)).toBe(true);
|
||||
expect(canvasDisposables?.has(dispose3)).toBe(true);
|
||||
});
|
||||
|
||||
it('should call all dispose functions when unregistering a tab', () => {
|
||||
const dispose1 = vi.fn();
|
||||
const dispose2 = vi.fn();
|
||||
const dispose3 = vi.fn();
|
||||
|
||||
// Add disposables for generate tab
|
||||
navigationApi._addDisposeForTab('generate', dispose1);
|
||||
navigationApi._addDisposeForTab('generate', dispose2);
|
||||
|
||||
// Add disposable for canvas tab (should not be called)
|
||||
navigationApi._addDisposeForTab('canvas', dispose3);
|
||||
|
||||
// Unregister generate tab
|
||||
navigationApi.unregisterTab('generate');
|
||||
|
||||
// Check that generate tab disposables were called
|
||||
expect(dispose1).toHaveBeenCalledOnce();
|
||||
expect(dispose2).toHaveBeenCalledOnce();
|
||||
|
||||
// Check that canvas tab disposable was not called
|
||||
expect(dispose3).not.toHaveBeenCalled();
|
||||
|
||||
// Check that generate tab disposables are cleared
|
||||
expect(navigationApi._disposablesForTab.has('generate')).toBe(false);
|
||||
|
||||
// Check that canvas tab disposables remain
|
||||
expect(navigationApi._disposablesForTab.has('canvas')).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle unregistering tab with no disposables gracefully', () => {
|
||||
// Should not throw when unregistering tab with no disposables
|
||||
expect(() => navigationApi.unregisterTab('generate')).not.toThrow();
|
||||
});
|
||||
|
||||
it('should handle duplicate dispose functions', () => {
|
||||
const dispose1 = vi.fn();
|
||||
|
||||
// Add the same dispose function twice
|
||||
navigationApi._addDisposeForTab('generate', dispose1);
|
||||
navigationApi._addDisposeForTab('generate', dispose1);
|
||||
|
||||
const disposables = navigationApi._disposablesForTab.get('generate');
|
||||
// Set should contain only one instance (sets don't allow duplicates)
|
||||
expect(disposables?.size).toBe(1);
|
||||
|
||||
navigationApi.unregisterTab('generate');
|
||||
|
||||
// Should be called only once despite being added twice
|
||||
expect(dispose1).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should automatically add dispose functions during container registration with DockviewApi', () => {
|
||||
const tab = 'generate';
|
||||
const viewId = 'myView';
|
||||
mockGetStorage.mockReturnValue(undefined);
|
||||
|
||||
const initialize = vi.fn();
|
||||
const panel = { id: 'p1' };
|
||||
const mockDispose = vi.fn();
|
||||
|
||||
// Create a mock that will pass the instanceof DockviewApi check
|
||||
const mockApi = Object.create(MockedDockviewApi.prototype);
|
||||
Object.assign(mockApi, {
|
||||
panels: [panel],
|
||||
activePanel: { id: 'p1' },
|
||||
toJSON: vi.fn(() => ({ foo: 'bar' })),
|
||||
onDidLayoutChange: vi.fn(() => ({ dispose: vi.fn() })),
|
||||
onDidActivePanelChange: vi.fn(() => ({ dispose: mockDispose })),
|
||||
});
|
||||
|
||||
navigationApi.registerContainer(tab, viewId, mockApi, initialize);
|
||||
|
||||
// Check that dispose function was added to disposables
|
||||
const disposables = navigationApi._disposablesForTab.get(tab);
|
||||
expect(disposables).toBeDefined();
|
||||
expect(disposables?.size).toBe(1);
|
||||
|
||||
// Unregister tab and check dispose was called
|
||||
navigationApi.unregisterTab(tab);
|
||||
expect(mockDispose).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should not add dispose functions for GridviewApi during container registration', () => {
|
||||
const tab = 'generate';
|
||||
const viewId = 'myView';
|
||||
mockGetStorage.mockReturnValue(undefined);
|
||||
|
||||
const initialize = vi.fn();
|
||||
const panel = { id: 'p1' };
|
||||
|
||||
// Mock GridviewApi (not DockviewApi)
|
||||
const mockApi = {
|
||||
panels: [panel],
|
||||
toJSON: vi.fn(() => ({ foo: 'bar' })),
|
||||
onDidLayoutChange: vi.fn(() => ({ dispose: vi.fn() })),
|
||||
} as unknown as GridviewApi;
|
||||
|
||||
navigationApi.registerContainer(tab, viewId, mockApi, initialize);
|
||||
|
||||
// Check that no dispose function was added for GridviewApi
|
||||
const disposables = navigationApi._disposablesForTab.get(tab);
|
||||
expect(disposables).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should handle dispose function errors gracefully', () => {
|
||||
const goodDispose = vi.fn();
|
||||
const errorDispose = vi.fn(() => {
|
||||
throw new Error('Dispose error');
|
||||
});
|
||||
const anotherGoodDispose = vi.fn();
|
||||
|
||||
navigationApi._addDisposeForTab('generate', goodDispose);
|
||||
navigationApi._addDisposeForTab('generate', errorDispose);
|
||||
navigationApi._addDisposeForTab('generate', anotherGoodDispose);
|
||||
|
||||
// Should not throw even if one dispose function throws
|
||||
expect(() => navigationApi.unregisterTab('generate')).not.toThrow();
|
||||
|
||||
// All dispose functions should have been called
|
||||
expect(goodDispose).toHaveBeenCalledOnce();
|
||||
expect(errorDispose).toHaveBeenCalledOnce();
|
||||
expect(anotherGoodDispose).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should clear panel tracking state when unregistering tab', () => {
|
||||
const tab = 'generate';
|
||||
|
||||
// Set up some panel tracking state
|
||||
navigationApi._currentActiveDockviewPanel.set(tab, VIEWER_PANEL_ID);
|
||||
navigationApi._prevActiveDockviewPanel.set(tab, SETTINGS_PANEL_ID);
|
||||
|
||||
// Add some disposables
|
||||
const dispose1 = vi.fn();
|
||||
const dispose2 = vi.fn();
|
||||
navigationApi._addDisposeForTab(tab, dispose1);
|
||||
navigationApi._addDisposeForTab(tab, dispose2);
|
||||
|
||||
// Verify state exists before unregistering
|
||||
expect(navigationApi._currentActiveDockviewPanel.has(tab)).toBe(true);
|
||||
expect(navigationApi._prevActiveDockviewPanel.has(tab)).toBe(true);
|
||||
expect(navigationApi._disposablesForTab.has(tab)).toBe(true);
|
||||
|
||||
// Unregister tab
|
||||
navigationApi.unregisterTab(tab);
|
||||
|
||||
// Verify all state is cleared
|
||||
expect(navigationApi._currentActiveDockviewPanel.has(tab)).toBe(false);
|
||||
expect(navigationApi._prevActiveDockviewPanel.has(tab)).toBe(false);
|
||||
expect(navigationApi._disposablesForTab.has(tab)).toBe(false);
|
||||
|
||||
// Verify dispose functions were called
|
||||
expect(dispose1).toHaveBeenCalledOnce();
|
||||
expect(dispose2).toHaveBeenCalledOnce();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,21 +1,19 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { createDeferredPromise, type Deferred } from 'common/util/createDeferredPromise';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import type { GridviewApi, IDockviewPanel, IGridviewPanel } from 'dockview';
|
||||
import { DockviewApi, GridviewPanel } from 'dockview';
|
||||
import type { DockviewApi, GridviewApi, IDockviewPanel, IGridviewPanel } from 'dockview';
|
||||
import { GridviewPanel } from 'dockview';
|
||||
import { debounce } from 'es-toolkit';
|
||||
import type { Serializable, TabName } from 'features/ui/store/uiTypes';
|
||||
import type { Atom } from 'nanostores';
|
||||
import { atom } from 'nanostores';
|
||||
|
||||
import {
|
||||
LAUNCHPAD_PANEL_ID,
|
||||
LEFT_PANEL_ID,
|
||||
LEFT_PANEL_MIN_SIZE_PX,
|
||||
RIGHT_PANEL_ID,
|
||||
RIGHT_PANEL_MIN_SIZE_PX,
|
||||
SWITCH_TABS_FAKE_DELAY_MS,
|
||||
VIEWER_PANEL_ID,
|
||||
} from './shared';
|
||||
|
||||
const log = logger('system');
|
||||
@@ -71,37 +69,6 @@ export class NavigationApi {
|
||||
private _$isLoading = atom(false);
|
||||
$isLoading: Atom<boolean> = this._$isLoading;
|
||||
|
||||
/**
|
||||
* Track the _previous_ active dockview panel for each tab.
|
||||
*/
|
||||
_prevActiveDockviewPanel: Map<TabName, string | null> = new Map();
|
||||
|
||||
/**
|
||||
* Track the _current_ active dockview panel for each tab.
|
||||
*/
|
||||
_currentActiveDockviewPanel: Map<TabName, string | null> = new Map();
|
||||
|
||||
/**
|
||||
* Map of disposables for each tab.
|
||||
* This is used to clean up resources when a tab is unregistered.
|
||||
*/
|
||||
_disposablesForTab: Map<TabName, Set<() => void>> = new Map();
|
||||
|
||||
/**
|
||||
* Convenience method to add a dispose function for a specific tab.
|
||||
*/
|
||||
/**
|
||||
* Convenience method to add a dispose function for a specific tab.
|
||||
*/
|
||||
_addDisposeForTab = (tab: TabName, disposeFn: () => void): void => {
|
||||
let disposables = this._disposablesForTab.get(tab);
|
||||
if (!disposables) {
|
||||
disposables = new Set<() => void>();
|
||||
this._disposablesForTab.set(tab, disposables);
|
||||
}
|
||||
disposables.add(disposeFn);
|
||||
};
|
||||
|
||||
/**
|
||||
* Separator used to create unique keys for panels. Typo protection.
|
||||
*/
|
||||
@@ -242,18 +209,6 @@ export class NavigationApi {
|
||||
this._registerPanel(tab, panel.id, panel);
|
||||
}
|
||||
|
||||
// Set up tracking for active tab for this panel - needed for viewer toggle functionality
|
||||
if (api instanceof DockviewApi) {
|
||||
this._currentActiveDockviewPanel.set(tab, api.activePanel?.id ?? null);
|
||||
this._prevActiveDockviewPanel.set(tab, null);
|
||||
const { dispose } = api.onDidActivePanelChange((panel) => {
|
||||
const previousPanelId = this._currentActiveDockviewPanel.get(tab);
|
||||
this._prevActiveDockviewPanel.set(tab, previousPanelId ?? null);
|
||||
this._currentActiveDockviewPanel.set(tab, panel?.id ?? null);
|
||||
});
|
||||
this._addDisposeForTab(tab, dispose);
|
||||
}
|
||||
|
||||
api.onDidLayoutChange(
|
||||
debounce(() => {
|
||||
this._app?.storage.set(key, api.toJSON());
|
||||
@@ -590,42 +545,6 @@ export class NavigationApi {
|
||||
return true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Toggle between the viewer panel and the previously focused dockview panel in the current tab.
|
||||
* If currently on viewer and a previous panel exists, switch to the previous panel.
|
||||
* If not on viewer, switch to viewer.
|
||||
* If no previous panel exists, defaults to launchpad panel.
|
||||
* Only operates on dockview panels (panels with tabs), not gridview panels.
|
||||
*
|
||||
* @returns Promise that resolves to true if successful, false otherwise
|
||||
*/
|
||||
toggleViewerPanel = (): Promise<boolean> => {
|
||||
const activeTab = this._app?.activeTab.get() ?? null;
|
||||
if (!activeTab) {
|
||||
log.warn('No active tab found for viewer toggle');
|
||||
return Promise.resolve(false);
|
||||
}
|
||||
|
||||
const prevActiveDockviewPanel = this._prevActiveDockviewPanel.get(activeTab);
|
||||
const currentActiveDockviewPanel = this._currentActiveDockviewPanel.get(activeTab);
|
||||
|
||||
let targetPanel;
|
||||
|
||||
if (currentActiveDockviewPanel !== VIEWER_PANEL_ID) {
|
||||
targetPanel = VIEWER_PANEL_ID;
|
||||
} else if (prevActiveDockviewPanel && prevActiveDockviewPanel !== VIEWER_PANEL_ID) {
|
||||
targetPanel = prevActiveDockviewPanel;
|
||||
} else {
|
||||
targetPanel = LAUNCHPAD_PANEL_ID;
|
||||
}
|
||||
|
||||
if (this.getRegisteredPanels(activeTab).includes(targetPanel)) {
|
||||
return this.focusPanel(activeTab, targetPanel);
|
||||
}
|
||||
|
||||
return Promise.resolve(false);
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if a panel is registered.
|
||||
* @param tab - The tab the panel belongs to
|
||||
@@ -674,18 +593,6 @@ export class NavigationApi {
|
||||
this.waiters.delete(key);
|
||||
}
|
||||
|
||||
// Clear previous panel tracking for this tab
|
||||
this._prevActiveDockviewPanel.delete(tab);
|
||||
this._currentActiveDockviewPanel.delete(tab);
|
||||
this._disposablesForTab.get(tab)?.forEach((disposeFn) => {
|
||||
try {
|
||||
disposeFn();
|
||||
} catch (error) {
|
||||
log.error({ error: parseify(error) }, `Error disposing resource for tab ${tab}`);
|
||||
}
|
||||
});
|
||||
this._disposablesForTab.delete(tab);
|
||||
|
||||
log.trace(`Unregistered all panels for tab ${tab}`);
|
||||
};
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
__version__ = "6.1.0"
|
||||
__version__ = "6.1.0rc2"
|
||||
|
||||
Reference in New Issue
Block a user