mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-21 06:58:02 -05:00
Compare commits
4 Commits
controlnet
...
psyche/fea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37b75e75ca | ||
|
|
99f070689c | ||
|
|
0915221ad6 | ||
|
|
79f52a62d9 |
@@ -63,7 +63,7 @@ def slerp(
|
||||
title="Blend Latents",
|
||||
tags=["latents", "blend", "mask"],
|
||||
category="latents",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
)
|
||||
class BlendLatentsInvocation(BaseInvocation):
|
||||
"""Blend two latents using a given alpha. If a mask is provided, the second latents will be masked before blending.
|
||||
@@ -72,7 +72,7 @@ class BlendLatentsInvocation(BaseInvocation):
|
||||
latents_a: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
latents_b: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
mask: Optional[ImageField] = InputField(default=None, description="Mask for blending in latents B")
|
||||
alpha: float = InputField(ge=0, default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
alpha: float = InputField(ge=0.0, default=0.5, le=1.0, description=FieldDescriptions.blend_alpha)
|
||||
|
||||
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
|
||||
if mask_image.mode != "L":
|
||||
|
||||
@@ -236,7 +236,7 @@ class SDXLPromptInvocationBase:
|
||||
title="SDXL Prompt",
|
||||
tags=["sdxl", "compel", "prompt"],
|
||||
category="conditioning",
|
||||
version="1.2.0",
|
||||
version="1.2.1",
|
||||
)
|
||||
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
@@ -251,12 +251,12 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
description=FieldDescriptions.compel_prompt,
|
||||
ui_component=UIComponent.Textarea,
|
||||
)
|
||||
original_width: int = InputField(default=1024, description="")
|
||||
original_height: int = InputField(default=1024, description="")
|
||||
crop_top: int = InputField(default=0, description="")
|
||||
crop_left: int = InputField(default=0, description="")
|
||||
target_width: int = InputField(default=1024, description="")
|
||||
target_height: int = InputField(default=1024, description="")
|
||||
original_width: int = InputField(ge=8, default=1024, description="")
|
||||
original_height: int = InputField(ge=8, default=1024, description="")
|
||||
crop_top: int = InputField(default=0, ge=0, description="")
|
||||
crop_left: int = InputField(default=0, ge=0, description="")
|
||||
target_width: int = InputField(ge=8, default=1024, description="")
|
||||
target_height: int = InputField(ge=8, default=1024, description="")
|
||||
clip: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 1")
|
||||
clip2: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 2")
|
||||
mask: Optional[TensorField] = InputField(
|
||||
@@ -340,11 +340,11 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase
|
||||
description=FieldDescriptions.compel_prompt,
|
||||
ui_component=UIComponent.Textarea,
|
||||
) # TODO: ?
|
||||
original_width: int = InputField(default=1024, description="")
|
||||
original_height: int = InputField(default=1024, description="")
|
||||
crop_top: int = InputField(default=0, description="")
|
||||
crop_left: int = InputField(default=0, description="")
|
||||
aesthetic_score: float = InputField(default=6.0, description=FieldDescriptions.sdxl_aesthetic)
|
||||
original_width: int = InputField(ge=8, default=1024, description="")
|
||||
original_height: int = InputField(ge=8, default=1024, description="")
|
||||
crop_top: int = InputField(ge=0, default=0, description="")
|
||||
crop_left: int = InputField(ge=0, default=0, description="")
|
||||
aesthetic_score: float = InputField(ge=1.0, le=10.0, default=6.0, description=FieldDescriptions.sdxl_aesthetic)
|
||||
clip2: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection)
|
||||
|
||||
@torch.no_grad()
|
||||
@@ -379,13 +379,13 @@ class CLIPSkipInvocationOutput(BaseInvocationOutput):
|
||||
title="CLIP Skip",
|
||||
tags=["clipskip", "clip", "skip"],
|
||||
category="conditioning",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
)
|
||||
class CLIPSkipInvocation(BaseInvocation):
|
||||
"""Skip layers in clip text_encoder model."""
|
||||
|
||||
clip: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP")
|
||||
skipped_layers: int = InputField(default=0, ge=0, description=FieldDescriptions.skipped_layers)
|
||||
skipped_layers: int = InputField(default=0, ge=0, le=24, description=FieldDescriptions.skipped_layers)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> CLIPSkipInvocationOutput:
|
||||
self.clip.skipped_layers += self.skipped_layers
|
||||
|
||||
Reference in New Issue
Block a user