Compare commits

...

1 Commits

Author SHA1 Message Date
psychedelicious
59e59e98f6 experiment: use T5Tokenizer instead of T5TokenizerFast 2025-08-28 17:31:33 +10:00

View File

@@ -13,7 +13,7 @@ from transformers import (
CLIPTextModel,
CLIPTokenizer,
T5EncoderModel,
T5TokenizerFast,
T5Tokenizer,
)
from invokeai.app.services.config.config_default import get_config
@@ -146,7 +146,7 @@ class BnbQuantizedLlmInt8bCheckpointModel(ModelLoader):
)
match submodel_type:
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
return T5TokenizerFast.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
te2_model_path = Path(config.path) / "text_encoder_2"
model_config = AutoConfig.from_pretrained(te2_model_path)
@@ -190,7 +190,7 @@ class T5EncoderCheckpointModel(ModelLoader):
match submodel_type:
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
return T5TokenizerFast.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
return T5EncoderModel.from_pretrained(
Path(config.path) / "text_encoder_2", torch_dtype="auto", low_cpu_mem_usage=True