[SD][web] Populate checkpoints as dropdown UI (#918)

Signed-off-by: Gaurav Shukla <gaurav@nod-labs.com>
This commit is contained in:
Gaurav Shukla
2023-02-03 03:29:50 +05:30
committed by GitHub
parent 9b90672f63
commit 339738f8a3
3 changed files with 42 additions and 28 deletions

View File

@@ -114,7 +114,8 @@ def save_output_img(output_img):
if args.save_metadata_to_json:
del new_entry["OUTPUT"]
with open(f"{output_path}/{out_img_name}.json", "w") as f:
json_path = Path(generated_imgs_path, f"{out_img_name}.json")
with open(json_path, "w") as f:
json.dump(new_entry, f, indent=4)
@@ -136,7 +137,7 @@ def txt2img_inf(
scheduler: str,
model_id: str,
custom_model_id: str,
ckpt_file_obj,
ckpt_loc: str,
precision: str,
device: str,
max_length: int,
@@ -154,7 +155,7 @@ def txt2img_inf(
args.steps = steps
args.scheduler = scheduler
args.hf_model_id = custom_model_id if custom_model_id else model_id
args.ckpt_loc = ckpt_file_obj.name if ckpt_file_obj else ""
args.ckpt_loc = "" if ckpt_loc == "None" else ckpt_loc
args.save_metadata_to_json = save_metadata_to_json
args.write_metadata_to_png = save_metadata_to_png
dtype = torch.float32 if precision == "fp32" else torch.half

View File

@@ -271,7 +271,10 @@ def set_init_device_flags():
print("Tuned models are currently not supported for this setting.")
# set import_mlir to True for unuploaded models.
if args.hf_model_id not in [
if args.ckpt_loc != "":
args.import_mlir = True
elif args.hf_model_id not in [
"Linaqruf/anything-v3.0",
"dreamlike-art/dreamlike-diffusion-1.0",
"prompthero/openjourney",
@@ -282,7 +285,7 @@ def set_init_device_flags():
]:
args.import_mlir = True
if args.height != 512 or args.width != 512 or args.batch_size != 1:
elif args.height != 512 or args.width != 512 or args.batch_size != 1:
args.import_mlir = True

View File

@@ -1,6 +1,7 @@
import os
import sys
from pathlib import Path
import glob
if "AMD_ENABLE_LLPC" not in os.environ:
os.environ["AMD_ENABLE_LLPC"] = "1"
@@ -57,29 +58,38 @@ with gr.Blocks(title="Stable Diffusion", css=demo_css) as shark_web:
with gr.Row():
with gr.Column(scale=1, min_width=600):
with gr.Row():
with gr.Group():
model_id = gr.Dropdown(
label="Model ID",
value="stabilityai/stable-diffusion-2-1-base",
choices=[
"Linaqruf/anything-v3.0",
"prompthero/openjourney",
"wavymulder/Analog-Diffusion",
"stabilityai/stable-diffusion-2-1",
"stabilityai/stable-diffusion-2-1-base",
"CompVis/stable-diffusion-v1-4",
],
)
custom_model_id = gr.Textbox(
placeholder="check here: https://huggingface.co/models eg. runwayml/stable-diffusion-v1-5",
value="",
label="HuggingFace Model ID",
)
with gr.Group():
ckpt_loc = gr.File(
label="Upload checkpoint",
file_types=[".ckpt", ".safetensors"],
)
model_id = gr.Dropdown(
label="Model ID",
value="stabilityai/stable-diffusion-2-1-base",
choices=[
"Linaqruf/anything-v3.0",
"prompthero/openjourney",
"wavymulder/Analog-Diffusion",
"stabilityai/stable-diffusion-2-1",
"stabilityai/stable-diffusion-2-1-base",
"CompVis/stable-diffusion-v1-4",
],
)
custom_model_id = gr.Textbox(
placeholder="check here: https://huggingface.co/models eg. runwayml/stable-diffusion-v1-5",
value="",
label="HuggingFace Model ID",
)
with gr.Group():
ckpt_path = resource_path("models")
types = (
"*.ckpt",
"*.safetensors",
) # the tuple of file types
ckpt_files = ["None"]
for extn in types:
files = glob.glob(os.path.join(ckpt_path, extn))
ckpt_files.extend(files)
ckpt_loc = gr.Dropdown(
label="Place all checkpoints at SHARK/apps/stable_diffusion/web/models/",
value="None",
choices=ckpt_files,
)
with gr.Group(elem_id="prompt_box_outer"):
prompt = gr.Textbox(