mirror of
https://github.com/nod-ai/SHARK-Studio.git
synced 2026-01-09 13:57:54 -05:00
* merge confix resolution * add support to other scripts --------- Co-authored-by: dan <dan@nod-labs.com>
248 lines
7.6 KiB
Python
248 lines
7.6 KiB
Python
import torch
|
|
import time
|
|
from dataclasses import dataclass
|
|
from apps.stable_diffusion.web.ui.utils import get_custom_model_pathfile
|
|
from apps.stable_diffusion.src import (
|
|
args,
|
|
Text2ImagePipeline,
|
|
get_schedulers,
|
|
set_init_device_flags,
|
|
utils,
|
|
clear_all,
|
|
save_output_img,
|
|
)
|
|
|
|
|
|
@dataclass
|
|
class Config:
|
|
model_id: str
|
|
ckpt_loc: str
|
|
precision: str
|
|
batch_size: int
|
|
max_length: int
|
|
height: int
|
|
width: int
|
|
device: str
|
|
|
|
|
|
txt2img_obj = None
|
|
config_obj = None
|
|
schedulers = None
|
|
|
|
# set initial values of iree_vulkan_target_triple, use_tuned and import_mlir.
|
|
init_iree_vulkan_target_triple = args.iree_vulkan_target_triple
|
|
init_use_tuned = args.use_tuned
|
|
init_import_mlir = args.import_mlir
|
|
|
|
|
|
# Exposed to UI.
|
|
def txt2img_inf(
|
|
prompt: str,
|
|
negative_prompt: str,
|
|
height: int,
|
|
width: int,
|
|
steps: int,
|
|
guidance_scale: float,
|
|
seed: int,
|
|
batch_count: int,
|
|
batch_size: int,
|
|
scheduler: str,
|
|
custom_model: str,
|
|
hf_model_id: str,
|
|
precision: str,
|
|
device: str,
|
|
max_length: int,
|
|
save_metadata_to_json: bool,
|
|
save_metadata_to_png: bool,
|
|
):
|
|
global txt2img_obj
|
|
global config_obj
|
|
global schedulers
|
|
|
|
args.prompts = [prompt]
|
|
args.negative_prompts = [negative_prompt]
|
|
args.guidance_scale = guidance_scale
|
|
args.steps = steps
|
|
args.scheduler = scheduler
|
|
|
|
# set ckpt_loc and hf_model_id.
|
|
types = (
|
|
".ckpt",
|
|
".safetensors",
|
|
) # the tuple of file types
|
|
args.ckpt_loc = ""
|
|
args.hf_model_id = ""
|
|
if custom_model == "None":
|
|
if not hf_model_id:
|
|
return (
|
|
None,
|
|
"Please provide either custom model or huggingface model ID, both must not be empty",
|
|
)
|
|
args.hf_model_id = hf_model_id
|
|
elif ".ckpt" in custom_model or ".safetensors" in custom_model:
|
|
args.ckpt_loc = get_custom_model_pathfile(custom_model)
|
|
else:
|
|
args.hf_model_id = custom_model
|
|
|
|
args.save_metadata_to_json = save_metadata_to_json
|
|
args.write_metadata_to_png = save_metadata_to_png
|
|
|
|
dtype = torch.float32 if precision == "fp32" else torch.half
|
|
cpu_scheduling = not scheduler.startswith("Shark")
|
|
new_config_obj = Config(
|
|
args.hf_model_id,
|
|
args.ckpt_loc,
|
|
precision,
|
|
batch_size,
|
|
max_length,
|
|
height,
|
|
width,
|
|
device,
|
|
)
|
|
if not txt2img_obj or config_obj != new_config_obj:
|
|
config_obj = new_config_obj
|
|
args.precision = precision
|
|
args.batch_size = batch_size
|
|
args.max_length = max_length
|
|
args.height = height
|
|
args.width = width
|
|
args.device = device.split("=>", 1)[1].strip()
|
|
args.iree_vulkan_target_triple = init_iree_vulkan_target_triple
|
|
args.use_tuned = init_use_tuned
|
|
args.import_mlir = init_import_mlir
|
|
args.img_path = None
|
|
set_init_device_flags()
|
|
model_id = (
|
|
args.hf_model_id
|
|
if args.hf_model_id
|
|
else "stabilityai/stable-diffusion-2-1-base"
|
|
)
|
|
schedulers = get_schedulers(model_id)
|
|
scheduler_obj = schedulers[scheduler]
|
|
txt2img_obj = Text2ImagePipeline.from_pretrained(
|
|
scheduler=scheduler_obj,
|
|
import_mlir=args.import_mlir,
|
|
model_id=args.hf_model_id,
|
|
ckpt_loc=args.ckpt_loc,
|
|
precision=args.precision,
|
|
max_length=args.max_length,
|
|
batch_size=args.batch_size,
|
|
height=args.height,
|
|
width=args.width,
|
|
use_base_vae=args.use_base_vae,
|
|
use_tuned=args.use_tuned,
|
|
custom_vae=args.custom_vae,
|
|
low_cpu_mem_usage=args.low_cpu_mem_usage,
|
|
debug=args.import_debug if args.import_mlir else False,
|
|
)
|
|
|
|
txt2img_obj.scheduler = schedulers[scheduler]
|
|
|
|
start_time = time.time()
|
|
txt2img_obj.log = ""
|
|
generated_imgs = []
|
|
seeds = []
|
|
img_seed = utils.sanitize_seed(seed)
|
|
for i in range(batch_count):
|
|
if i > 0:
|
|
img_seed = utils.sanitize_seed(-1)
|
|
out_imgs = txt2img_obj.generate_images(
|
|
prompt,
|
|
negative_prompt,
|
|
batch_size,
|
|
height,
|
|
width,
|
|
steps,
|
|
guidance_scale,
|
|
img_seed,
|
|
args.max_length,
|
|
dtype,
|
|
args.use_base_vae,
|
|
cpu_scheduling,
|
|
)
|
|
save_output_img(out_imgs[0], img_seed)
|
|
generated_imgs.extend(out_imgs)
|
|
seeds.append(img_seed)
|
|
txt2img_obj.log += "\n"
|
|
yield generated_imgs, generated_imgs[0], txt2img_obj.log
|
|
|
|
total_time = time.time() - start_time
|
|
text_output = f"prompt={args.prompts}"
|
|
text_output += f"\nnegative prompt={args.negative_prompts}"
|
|
text_output += f"\nmodel_id={args.hf_model_id}, ckpt_loc={args.ckpt_loc}"
|
|
text_output += f"\nscheduler={args.scheduler}, device={device}"
|
|
text_output += (
|
|
f"\nsteps={steps}, guidance_scale={guidance_scale}, seed={seeds}"
|
|
)
|
|
text_output += f"\nsize={height}x{width}, batch_count={batch_count}, batch_size={batch_size}, max_length={args.max_length}"
|
|
# text_output += txt2img_obj.log
|
|
text_output += f"\nTotal image generation time: {total_time:.4f}sec"
|
|
|
|
yield generated_imgs, text_output
|
|
|
|
|
|
if __name__ == "__main__":
|
|
if args.clear_all:
|
|
clear_all()
|
|
|
|
dtype = torch.float32 if args.precision == "fp32" else torch.half
|
|
cpu_scheduling = not args.scheduler.startswith("Shark")
|
|
set_init_device_flags()
|
|
schedulers = get_schedulers(args.hf_model_id)
|
|
scheduler_obj = schedulers[args.scheduler]
|
|
seed = args.seed
|
|
txt2img_obj = Text2ImagePipeline.from_pretrained(
|
|
scheduler=scheduler_obj,
|
|
import_mlir=args.import_mlir,
|
|
model_id=args.hf_model_id,
|
|
ckpt_loc=args.ckpt_loc,
|
|
precision=args.precision,
|
|
max_length=args.max_length,
|
|
batch_size=args.batch_size,
|
|
height=args.height,
|
|
width=args.width,
|
|
use_base_vae=args.use_base_vae,
|
|
use_tuned=args.use_tuned,
|
|
custom_vae=args.custom_vae,
|
|
low_cpu_mem_usage=args.low_cpu_mem_usage,
|
|
debug=args.import_debug if args.import_mlir else False,
|
|
)
|
|
|
|
for current_batch in range(args.batch_count):
|
|
if current_batch > 0:
|
|
seed = -1
|
|
seed = utils.sanitize_seed(seed)
|
|
|
|
start_time = time.time()
|
|
generated_imgs = txt2img_obj.generate_images(
|
|
args.prompts,
|
|
args.negative_prompts,
|
|
args.batch_size,
|
|
args.height,
|
|
args.width,
|
|
args.steps,
|
|
args.guidance_scale,
|
|
seed,
|
|
args.max_length,
|
|
dtype,
|
|
args.use_base_vae,
|
|
cpu_scheduling,
|
|
)
|
|
total_time = time.time() - start_time
|
|
text_output = f"prompt={args.prompts}"
|
|
text_output += f"\nnegative prompt={args.negative_prompts}"
|
|
text_output += (
|
|
f"\nmodel_id={args.hf_model_id}, ckpt_loc={args.ckpt_loc}"
|
|
)
|
|
text_output += f"\nscheduler={args.scheduler}, device={args.device}"
|
|
text_output += f"\nsteps={args.steps}, guidance_scale={args.guidance_scale}, seed={seed}, size={args.height}x{args.width}"
|
|
text_output += (
|
|
f", batch size={args.batch_size}, max_length={args.max_length}"
|
|
)
|
|
# TODO: if using --batch_count=x txt2img_obj.log will output on each display every iteration infos from the start
|
|
text_output += txt2img_obj.log
|
|
text_output += f"\nTotal image generation time: {total_time:.4f}sec"
|
|
|
|
save_output_img(generated_imgs[0], seed)
|
|
print(text_output)
|