Use a Textarea component for the FluxTextEncoderInvocation prompt field.

This commit is contained in:
Ryan Dick
2024-11-22 16:17:37 +00:00
committed by psychedelicious
parent ed46acee79
commit 06a9d4e2b2

View File

@@ -5,7 +5,7 @@ import torch
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, UIComponent
from invokeai.app.invocations.model import CLIPField, T5EncoderField
from invokeai.app.invocations.primitives import FluxConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
@@ -41,7 +41,10 @@ class FluxTextEncoderInvocation(BaseInvocation):
t5_max_seq_len: Literal[256, 512] = InputField(
description="Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models."
)
prompt: str = InputField(description="Text prompt to encode.")
prompt: str = InputField(
description="Text prompt to encode.",
ui_component=UIComponent.Textarea,
)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> FluxConditioningOutput: