mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
feat(ui): add Qwen-Image UI types and update frontend schema
Add UI type definitions for Qwen-Image models: - QwenImageMainModel for the main transformer model - QwenImageVAEModel for the VAE - Qwen2_5VLModel for the text encoder - Update model loader to use proper UI types - Regenerate frontend types This enables proper UI support for selecting and using Qwen-Image models. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -1,18 +1,16 @@
|
||||
from typing import Literal
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.fields import Input, InputField, OutputField
|
||||
from invokeai.app.invocations.model import ModelIdentifierField, Qwen2_5VLField, TransformerField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import (
|
||||
CheckpointConfigBase,
|
||||
)
|
||||
from invokeai.backend.model_manager.taxonomy import SubModelType
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
|
||||
|
||||
|
||||
@invocation_output("qwen_image_model_loader_output")
|
||||
@@ -36,21 +34,24 @@ class QwenImageModelLoaderInvocation(BaseInvocation):
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description="Qwen-Image main model",
|
||||
ui_type=UIType.MainModel, # Using MainModel as we haven't defined QwenImageMainModel UI type yet
|
||||
input=Input.Direct,
|
||||
ui_model_base=BaseModelType.QwenImage,
|
||||
ui_model_type=ModelType.Main,
|
||||
)
|
||||
|
||||
qwen2_5_vl_model: ModelIdentifierField = InputField(
|
||||
description="Qwen2.5-VL vision-language model",
|
||||
ui_type=UIType.MainModel, # Using MainModel for now
|
||||
input=Input.Direct,
|
||||
title="Qwen2.5-VL Model"
|
||||
title="Qwen2.5-VL Model",
|
||||
ui_model_base=BaseModelType.QwenImage,
|
||||
# ui_model_type=ModelType.VL
|
||||
)
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description="VAE model for Qwen-Image",
|
||||
ui_type=UIType.VAEModel,
|
||||
title="VAE"
|
||||
title="VAE",
|
||||
ui_model_base=BaseModelType.QwenImage,
|
||||
ui_model_type=ModelType.VAE,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> QwenImageModelLoaderOutput:
|
||||
@@ -62,7 +63,7 @@ class QwenImageModelLoaderInvocation(BaseInvocation):
|
||||
# Create submodel references
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
|
||||
# For Qwen-Image, we use Qwen2.5-VL as the text encoder
|
||||
tokenizer = self.qwen2_5_vl_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
text_encoder = self.qwen2_5_vl_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
@@ -75,4 +76,4 @@ class QwenImageModelLoaderInvocation(BaseInvocation):
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
qwen2_5_vl=Qwen2_5VLField(tokenizer=tokenizer, text_encoder=text_encoder, loras=[]),
|
||||
vae=VAEField(vae=vae),
|
||||
)
|
||||
)
|
||||
|
||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user