mirror of
https://github.com/nod-ai/SHARK-Studio.git
synced 2026-01-09 22:07:55 -05:00
Fix an issue with diffusers>0.19.3 (#1775)
This commit is contained in:
@@ -34,7 +34,7 @@ from PIL import Image
|
|||||||
from tqdm.auto import tqdm
|
from tqdm.auto import tqdm
|
||||||
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||||
from diffusers.loaders import AttnProcsLayers
|
from diffusers.loaders import AttnProcsLayers
|
||||||
from diffusers.models.cross_attention import LoRACrossAttnProcessor
|
from diffusers.models.attention_processor import LoRAXFormersAttnProcessor
|
||||||
|
|
||||||
import torch_mlir
|
import torch_mlir
|
||||||
from torch_mlir.dynamo import make_simple_dynamo_backend
|
from torch_mlir.dynamo import make_simple_dynamo_backend
|
||||||
@@ -287,7 +287,7 @@ def lora_train(
|
|||||||
block_id = int(name[len("down_blocks.")])
|
block_id = int(name[len("down_blocks.")])
|
||||||
hidden_size = unet.config.block_out_channels[block_id]
|
hidden_size = unet.config.block_out_channels[block_id]
|
||||||
|
|
||||||
lora_attn_procs[name] = LoRACrossAttnProcessor(
|
lora_attn_procs[name] = LoRAXFormersAttnProcessor(
|
||||||
hidden_size=hidden_size,
|
hidden_size=hidden_size,
|
||||||
cross_attention_dim=cross_attention_dim,
|
cross_attention_dim=cross_attention_dim,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ parameterized
|
|||||||
|
|
||||||
# Add transformers, diffusers and scipy since it most commonly used
|
# Add transformers, diffusers and scipy since it most commonly used
|
||||||
transformers
|
transformers
|
||||||
diffusers==0.19.3
|
diffusers
|
||||||
#accelerate is now required for diffusers import from ckpt.
|
#accelerate is now required for diffusers import from ckpt.
|
||||||
accelerate
|
accelerate
|
||||||
scipy
|
scipy
|
||||||
|
|||||||
Reference in New Issue
Block a user