From a504903dd55193f14e181d09c1e1d00b331dffe4 Mon Sep 17 00:00:00 2001 From: Ean Garvey <87458719+monorimet@users.noreply.github.com> Date: Tue, 31 Jan 2023 21:42:45 -0600 Subject: [PATCH] Fix formatting issues. (#903) --- apps/stable_diffusion/scripts/txt2img.py | 1 + apps/stable_diffusion/src/models/model_wrappers.py | 1 - .../pipelines/pipeline_shark_stable_diffusion_utils.py | 1 - apps/stable_diffusion/src/utils/profiler.py | 1 + apps/stable_diffusion/src/utils/utils.py | 2 -- apps/stable_diffusion/web/index.py | 2 -- dataset/annotation_tool.py | 1 - generate_sharktank.py | 1 - shark/examples/shark_inference/ESRGAN/esrgan.py | 1 - shark/examples/shark_inference/simple_dlrm.py | 2 -- shark/examples/shark_inference/sparse_arch.py | 3 --- shark/examples/shark_inference/stable_diff.py | 3 --- shark/examples/shark_inference/stable_diff_f16.py | 3 --- shark/examples/shark_inference/stable_diffusion/main.py | 2 +- shark/examples/shark_inference/stable_diffusion/utils.py | 7 ++++--- shark/examples/shark_inference/upscaler/model_wrappers.py | 1 - .../upscaler/pipeline_shark_stable_diffusion_upscale.py | 1 - shark/examples/shark_inference/upscaler/utils.py | 2 -- .../stable-diffusion-img2img/stable_diffusion_img2img.py | 7 +++++-- shark/iree_utils/compile_utils.py | 2 -- shark/iree_utils/gpu_utils.py | 1 + shark/iree_utils/vulkan_target_env_utils.py | 8 -------- shark/shark_benchmark_runner.py | 5 ++++- shark/shark_downloader.py | 1 - shark/shark_importer.py | 1 - shark/shark_inference.py | 1 - shark/sharkdynamo/utils.py | 1 + tank/examples/opt/hacked_hf_opt.py | 3 --- tank/test_models.py | 1 - .../albert_lite_base/albert_lite_base_tflite_test.py | 1 + web/index.py | 2 -- web/models/albert_maskfill.py | 3 --- web/models/diffusion/v_diffusion.py | 1 - web/models/resnet50.py | 2 -- web/models/stable_diffusion/main.py | 1 - web/models/stable_diffusion/utils.py | 2 -- 36 files changed, 19 insertions(+), 59 deletions(-) diff --git a/apps/stable_diffusion/scripts/txt2img.py b/apps/stable_diffusion/scripts/txt2img.py index cb36e72c..72219b92 100644 --- a/apps/stable_diffusion/scripts/txt2img.py +++ b/apps/stable_diffusion/scripts/txt2img.py @@ -88,6 +88,7 @@ txt2img_obj = None config_obj = None schedulers = None + # Exposed to UI. def txt2img_inf( prompt: str, diff --git a/apps/stable_diffusion/src/models/model_wrappers.py b/apps/stable_diffusion/src/models/model_wrappers.py index f9d33fea..f3682306 100644 --- a/apps/stable_diffusion/src/models/model_wrappers.py +++ b/apps/stable_diffusion/src/models/model_wrappers.py @@ -199,7 +199,6 @@ class SharkifyStableDiffusionModel: return shark_clip def __call__(self): - for model_id in base_models: self.inputs = get_input_info( base_models[model_id], diff --git a/apps/stable_diffusion/src/pipelines/pipeline_shark_stable_diffusion_utils.py b/apps/stable_diffusion/src/pipelines/pipeline_shark_stable_diffusion_utils.py index a3408ec2..45f46ddd 100644 --- a/apps/stable_diffusion/src/pipelines/pipeline_shark_stable_diffusion_utils.py +++ b/apps/stable_diffusion/src/pipelines/pipeline_shark_stable_diffusion_utils.py @@ -114,7 +114,6 @@ class StableDiffusionPipeline: cpu_scheduling, return_all_latents=False, ): - step_time_sum = 0 latent_history = [latents] text_embeddings = torch.from_numpy(text_embeddings).to(dtype) diff --git a/apps/stable_diffusion/src/utils/profiler.py b/apps/stable_diffusion/src/utils/profiler.py index 34ccff02..c2045576 100644 --- a/apps/stable_diffusion/src/utils/profiler.py +++ b/apps/stable_diffusion/src/utils/profiler.py @@ -1,5 +1,6 @@ from .stable_args import args + # Helper function to profile the vulkan device. def start_profiling(file_path="foo.rdc", profiling_mode="queue"): if args.vulkan_debug_utils and "vulkan" in args.device: diff --git a/apps/stable_diffusion/src/utils/utils.py b/apps/stable_diffusion/src/utils/utils.py index 7c24188d..24b0a282 100644 --- a/apps/stable_diffusion/src/utils/utils.py +++ b/apps/stable_diffusion/src/utils/utils.py @@ -75,7 +75,6 @@ def compile_through_fx( f16_input_mask=None, extra_args=[], ): - mlir_module, func_name = import_with_fx( model, inputs, is_f16, f16_input_mask ) @@ -89,7 +88,6 @@ def compile_through_fx( def set_iree_runtime_flags(): - vulkan_runtime_flags = [ f"--vulkan_large_heap_block_size={args.vulkan_large_heap_block_size}", f"--vulkan_validation_layers={'true' if args.vulkan_validation_layers else 'false'}", diff --git a/apps/stable_diffusion/web/index.py b/apps/stable_diffusion/web/index.py index 6c9009f5..9dc69ccb 100644 --- a/apps/stable_diffusion/web/index.py +++ b/apps/stable_diffusion/web/index.py @@ -34,7 +34,6 @@ demo_css = resource_path("css/sd_dark_theme.css") with gr.Blocks(title="Stable Diffusion", css=demo_css) as shark_web: - with gr.Row(elem_id="ui_title"): nod_logo = Image.open(nodlogo_loc) logo2 = Image.open(sdlogo_loc) @@ -55,7 +54,6 @@ with gr.Blocks(title="Stable Diffusion", css=demo_css) as shark_web: ).style(width=150, height=100) with gr.Row(elem_id="ui_body"): - with gr.Row(): with gr.Column(scale=1, min_width=600): with gr.Row(): diff --git a/dataset/annotation_tool.py b/dataset/annotation_tool.py index c9db6261..4018017a 100644 --- a/dataset/annotation_tool.py +++ b/dataset/annotation_tool.py @@ -16,7 +16,6 @@ nodlogo_loc = shark_root.joinpath( with gr.Blocks(title="Dataset Annotation Tool", css=demo_css) as shark_web: - with gr.Row(elem_id="ui_title"): nod_logo = Image.open(nodlogo_loc) with gr.Column(scale=1, elem_id="demo_title_outer"): diff --git a/generate_sharktank.py b/generate_sharktank.py index 1e73757b..0a0e401c 100644 --- a/generate_sharktank.py +++ b/generate_sharktank.py @@ -58,7 +58,6 @@ def save_torch_model(torch_model_list): model = None input = None if model_type == "stable_diffusion": - args.use_tuned = False args.import_mlir = True args.use_tuned = False diff --git a/shark/examples/shark_inference/ESRGAN/esrgan.py b/shark/examples/shark_inference/ESRGAN/esrgan.py index c82e389e..0774b5ab 100644 --- a/shark/examples/shark_inference/ESRGAN/esrgan.py +++ b/shark/examples/shark_inference/ESRGAN/esrgan.py @@ -128,7 +128,6 @@ def load_mlir(mlir_loc): def compile_through_fx(model, inputs, mlir_loc=None): - module = load_mlir(mlir_loc) if module == None: fx_g = make_fx( diff --git a/shark/examples/shark_inference/simple_dlrm.py b/shark/examples/shark_inference/simple_dlrm.py index b5882398..e12e39b9 100644 --- a/shark/examples/shark_inference/simple_dlrm.py +++ b/shark/examples/shark_inference/simple_dlrm.py @@ -151,7 +151,6 @@ class DLRM_Net(nn.Module): and (ln_top is not None) and (arch_interaction_op is not None) ): - # save arguments self.output_d = 0 self.arch_interaction_op = arch_interaction_op @@ -216,7 +215,6 @@ class DLRM_Net(nn.Module): return ly def interact_features(self, x, ly): - if self.arch_interaction_op == "dot": # concatenate dense and sparse features (batch_size, d) = x.shape diff --git a/shark/examples/shark_inference/sparse_arch.py b/shark/examples/shark_inference/sparse_arch.py index 3607b746..f3b0a5b0 100644 --- a/shark/examples/shark_inference/sparse_arch.py +++ b/shark/examples/shark_inference/sparse_arch.py @@ -99,7 +99,6 @@ class SparseArchShark(nn.Module): ) def forward(self, *batched_inputs): - concatenated_list = [] input_enum, embedding_enum = 0, 0 @@ -121,7 +120,6 @@ class SparseArchShark(nn.Module): def test_sparse_arch() -> None: - D = 3 eb1_config = EmbeddingBagConfig( name="t1", @@ -211,7 +209,6 @@ class DLRMShark(nn.Module): def forward( self, dense_features: torch.Tensor, *sparse_features ) -> torch.Tensor: - embedded_dense = self.dense_arch(dense_features) embedded_sparse = self.sparse_arch(*sparse_features) concatenated_dense = self.inter_arch( diff --git a/shark/examples/shark_inference/stable_diff.py b/shark/examples/shark_inference/stable_diff.py index 52cc7c8d..623f7056 100644 --- a/shark/examples/shark_inference/stable_diff.py +++ b/shark/examples/shark_inference/stable_diff.py @@ -48,7 +48,6 @@ def load_mlir(mlir_loc): def compile_through_fx(model, inputs, mlir_loc=None, extra_args=[]): - module = load_mlir(mlir_loc) if mlir_loc == None: fx_g = make_fx( @@ -109,7 +108,6 @@ def compile_through_fx(model, inputs, mlir_loc=None, extra_args=[]): if __name__ == "__main__": - YOUR_TOKEN = "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" # 1. Load the autoencoder model which will be used to decode the latents into image space. @@ -224,7 +222,6 @@ if __name__ == "__main__": # print(latents, latents.shape) for i, t in tqdm(enumerate(scheduler.timesteps)): - print(f"i = {i} t = {t}") # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. latent_model_input = torch.cat([latents] * 2) diff --git a/shark/examples/shark_inference/stable_diff_f16.py b/shark/examples/shark_inference/stable_diff_f16.py index 5d8fdb47..5cbeec3c 100644 --- a/shark/examples/shark_inference/stable_diff_f16.py +++ b/shark/examples/shark_inference/stable_diff_f16.py @@ -63,7 +63,6 @@ def load_mlir(mlir_loc): def compile_through_fx(model, inputs, mlir_loc=None): - module = load_mlir(mlir_loc) if mlir_loc == None: fx_g = make_fx( @@ -121,7 +120,6 @@ def compile_through_fx(model, inputs, mlir_loc=None): if __name__ == "__main__": - YOUR_TOKEN = "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" # 1. Load the autoencoder model which will be used to decode the latents into image space. @@ -228,7 +226,6 @@ if __name__ == "__main__": # print(latents, latents.shape) for i, t in tqdm(enumerate(scheduler.timesteps)): - print(f"i = {i} t = {t}") # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. latent_model_input = torch.cat([latents] * 2) diff --git a/shark/examples/shark_inference/stable_diffusion/main.py b/shark/examples/shark_inference/stable_diffusion/main.py index fa451af2..598eea6c 100644 --- a/shark/examples/shark_inference/stable_diffusion/main.py +++ b/shark/examples/shark_inference/stable_diffusion/main.py @@ -61,6 +61,7 @@ from schedulers import ( import time from shark.iree_utils.compile_utils import dump_isas + # Helper function to profile the vulkan device. def start_profiling(file_path="foo.rdc", profiling_mode="queue"): if args.vulkan_debug_utils and "vulkan" in args.device: @@ -79,7 +80,6 @@ def end_profiling(device): if __name__ == "__main__": - dtype = torch.float32 if args.precision == "fp32" else torch.half # Make it as default prompt diff --git a/shark/examples/shark_inference/stable_diffusion/utils.py b/shark/examples/shark_inference/stable_diffusion/utils.py index de2652d5..28f28bbc 100644 --- a/shark/examples/shark_inference/stable_diffusion/utils.py +++ b/shark/examples/shark_inference/stable_diffusion/utils.py @@ -87,7 +87,6 @@ def compile_through_fx( debug=False, generate_vmfb=True, ): - from shark.parser import shark_args if "cuda" in args.device: @@ -116,7 +115,10 @@ def compile_through_fx( save_dir = os.path.join(args.local_tank_cache, model_name) - mlir_module, func_name, = import_with_fx( + ( + mlir_module, + func_name, + ) = import_with_fx( model=model, inputs=inputs, is_f16=is_f16, @@ -136,7 +138,6 @@ def compile_through_fx( def set_iree_runtime_flags(): - vulkan_runtime_flags = [ f"--vulkan_large_heap_block_size={args.vulkan_large_heap_block_size}", f"--vulkan_validation_layers={'true' if args.vulkan_validation_layers else 'false'}", diff --git a/shark/examples/shark_inference/upscaler/model_wrappers.py b/shark/examples/shark_inference/upscaler/model_wrappers.py index 3b9f6a93..1de747c4 100644 --- a/shark/examples/shark_inference/upscaler/model_wrappers.py +++ b/shark/examples/shark_inference/upscaler/model_wrappers.py @@ -18,7 +18,6 @@ model_input = { def get_clip_mlir(model_name="clip_text", extra_args=[]): - text_encoder = CLIPTextModel.from_pretrained( model_id, subfolder="text_encoder", diff --git a/shark/examples/shark_inference/upscaler/pipeline_shark_stable_diffusion_upscale.py b/shark/examples/shark_inference/upscaler/pipeline_shark_stable_diffusion_upscale.py index a37edb74..d1aeb538 100644 --- a/shark/examples/shark_inference/upscaler/pipeline_shark_stable_diffusion_upscale.py +++ b/shark/examples/shark_inference/upscaler/pipeline_shark_stable_diffusion_upscale.py @@ -339,7 +339,6 @@ class SharkStableDiffusionUpscalePipeline: ] = None, callback_steps: Optional[int] = 1, ): - # 1. Check inputs self.check_inputs(prompt, image, noise_level, callback_steps) diff --git a/shark/examples/shark_inference/upscaler/utils.py b/shark/examples/shark_inference/upscaler/utils.py index 4642fd3f..54d8b739 100644 --- a/shark/examples/shark_inference/upscaler/utils.py +++ b/shark/examples/shark_inference/upscaler/utils.py @@ -62,7 +62,6 @@ def get_shark_model(tank_url, model_name, extra_args=[]): def compile_through_fx( model, inputs, model_name, is_f16=False, f16_input_mask=None, extra_args=[] ): - mlir_module, func_name = import_with_fx( model, inputs, is_f16, f16_input_mask ) @@ -76,7 +75,6 @@ def compile_through_fx( def set_iree_runtime_flags(): - vulkan_runtime_flags = [ f"--vulkan_large_heap_block_size={args.vulkan_large_heap_block_size}", f"--vulkan_validation_layers={'true' if args.vulkan_validation_layers else 'false'}", diff --git a/shark/examples/shark_training/stable-diffusion-img2img/stable_diffusion_img2img.py b/shark/examples/shark_training/stable-diffusion-img2img/stable_diffusion_img2img.py index aea4c9ff..00ba8ffb 100644 --- a/shark/examples/shark_training/stable-diffusion-img2img/stable_diffusion_img2img.py +++ b/shark/examples/shark_training/stable-diffusion-img2img/stable_diffusion_img2img.py @@ -169,6 +169,7 @@ imagenet_style_templates_small = [ "a large painting in the style of {}", ] + # Setup the dataset class TextualInversionDataset(Dataset): def __init__( @@ -184,7 +185,6 @@ class TextualInversionDataset(Dataset): placeholder_token="*", center_crop=False, ): - self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property @@ -244,7 +244,10 @@ class TextualInversionDataset(Dataset): if self.center_crop: crop = min(img.shape[0], img.shape[1]) - h, w, = ( + ( + h, + w, + ) = ( img.shape[0], img.shape[1], ) diff --git a/shark/iree_utils/compile_utils.py b/shark/iree_utils/compile_utils.py index bbca0d73..dbb7c36b 100644 --- a/shark/iree_utils/compile_utils.py +++ b/shark/iree_utils/compile_utils.py @@ -143,7 +143,6 @@ def compile_benchmark_dirs(bench_dir, device, dispatch_benchmarks): in_dispatches = True if all_dispatches or in_dispatches: for f_ in os.listdir(f"{bench_dir}/{d_}"): - if "benchmark.mlir" in f_: dispatch_file = open(f"{bench_dir}/{d_}/{f_}", "r") module = dispatch_file.read() @@ -314,7 +313,6 @@ def get_iree_compiled_module( def load_flatbuffer(flatbuffer_path: str, device: str, device_idx: int = None): - with open(os.path.join(flatbuffer_path), "rb") as f: flatbuffer_blob = f.read() diff --git a/shark/iree_utils/gpu_utils.py b/shark/iree_utils/gpu_utils.py index 37728d70..3dd2b3c7 100644 --- a/shark/iree_utils/gpu_utils.py +++ b/shark/iree_utils/gpu_utils.py @@ -18,6 +18,7 @@ import iree.runtime as ireert import ctypes from shark.parser import shark_args + # Get the default gpu args given the architecture. def get_iree_gpu_args(): ireert.flags.FUNCTION_INPUT_VALIDATION = False diff --git a/shark/iree_utils/vulkan_target_env_utils.py b/shark/iree_utils/vulkan_target_env_utils.py index a7b89890..86c9dc68 100644 --- a/shark/iree_utils/vulkan_target_env_utils.py +++ b/shark/iree_utils/vulkan_target_env_utils.py @@ -16,7 +16,6 @@ from collections import OrderedDict def get_vulkan_target_env(vulkan_target_triple): - arch, product, os = vulkan_target_triple.split("=")[1].split("-") triple = (arch, product, os) # get version @@ -37,7 +36,6 @@ def get_vulkan_target_env(vulkan_target_triple): def get_vulkan_target_env_flag(vulkan_target_triple): - target_env = get_vulkan_target_env(vulkan_target_triple) target_env_flag = f"--iree-vulkan-target-env={target_env}" return target_env_flag @@ -124,7 +122,6 @@ def get_extensions(triple): def get_vendor(triple): - arch, product, os = triple if arch == "unknown": return "Unknown" @@ -206,7 +203,6 @@ def get_vulkan_target_capabilities(triple): cap["coopmatCases"] = None if arch in ["rdna1", "rdna2", "rdna3"]: - cap["maxComputeSharedMemorySize"] = 65536 cap["maxComputeWorkGroupInvocations"] = 1024 cap["maxComputeWorkGroupSize"] = [1024, 1024, 1024] @@ -287,7 +283,6 @@ def get_vulkan_target_capabilities(triple): cap["variablePointersStorageBuffer"] = True elif arch == "m1": - cap["maxComputeSharedMemorySize"] = 32768 cap["maxComputeWorkGroupInvocations"] = 1024 cap["maxComputeWorkGroupSize"] = [1024, 1024, 1024] @@ -362,7 +357,6 @@ def get_vulkan_target_capabilities(triple): ] elif arch in ["ampere", "turing"]: - cap["maxComputeSharedMemorySize"] = 49152 cap["maxComputeWorkGroupInvocations"] = 1024 cap["maxComputeWorkGroupSize"] = [1024, 1024, 1024] @@ -402,7 +396,6 @@ def get_vulkan_target_capabilities(triple): ] elif arch == "adreno": - cap["maxComputeSharedMemorySize"] = 32768 cap["maxComputeWorkGroupInvocations"] = 1024 cap["maxComputeWorkGroupSize"] = [1024, 1024, 64] @@ -447,7 +440,6 @@ def get_vulkan_target_capabilities(triple): res = "" for k, v in cap.items(): - if v is None or v == False: continue if isinstance(v, bool): diff --git a/shark/shark_benchmark_runner.py b/shark/shark_benchmark_runner.py index 0ef1b3e0..4f060bf9 100644 --- a/shark/shark_benchmark_runner.py +++ b/shark/shark_benchmark_runner.py @@ -158,7 +158,10 @@ class SharkBenchmarkRunner(SharkRunner): # tf_device = "/GPU:0" if self.device == "cuda" else "/CPU:0" tf_device = "/CPU:0" with tf.device(tf_device): - model, input, = get_tf_model( + ( + model, + input, + ) = get_tf_model( modelname )[:2] frontend_model = model diff --git a/shark/shark_downloader.py b/shark/shark_downloader.py index 6e784878..8750efd6 100644 --- a/shark/shark_downloader.py +++ b/shark/shark_downloader.py @@ -34,7 +34,6 @@ def download_public_file( dest_filename = None desired_file = None if single_file: - desired_file = full_gs_url.split("/")[-1] source_blob_name = "/".join(full_gs_url.split("/")[3:-1]) destination_folder_name, dest_filename = os.path.split( diff --git a/shark/shark_importer.py b/shark/shark_importer.py index 389c74ac..0825573f 100644 --- a/shark/shark_importer.py +++ b/shark/shark_importer.py @@ -257,7 +257,6 @@ class SharkImporter: def get_f16_inputs(inputs, is_f16, f16_input_mask): - if is_f16 == False: return inputs if f16_input_mask == None: diff --git a/shark/shark_inference.py b/shark/shark_inference.py index 306632d4..b4b55411 100644 --- a/shark/shark_inference.py +++ b/shark/shark_inference.py @@ -90,7 +90,6 @@ class SharkInference: self.shark_runner = None def compile(self, extra_args=[]): - if self.dispatch_benchmarks is not None: extra_args.append( f"--iree-hal-dump-executable-sources-to={self.dispatch_benchmarks_dir}" diff --git a/shark/sharkdynamo/utils.py b/shark/sharkdynamo/utils.py index cdb59dcc..b2f29cca 100644 --- a/shark/sharkdynamo/utils.py +++ b/shark/sharkdynamo/utils.py @@ -9,6 +9,7 @@ from torch._decomp import get_decompositions import torch_mlir + # TODO: Control decompositions. def default_decompositions(): return get_decompositions( diff --git a/tank/examples/opt/hacked_hf_opt.py b/tank/examples/opt/hacked_hf_opt.py index d85d4a81..d7c26458 100644 --- a/tank/examples/opt/hacked_hf_opt.py +++ b/tank/examples/opt/hacked_hf_opt.py @@ -338,7 +338,6 @@ class OPTDecoderLayer(nn.Module): torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]], ]: - # TODO: Refactor this function residual = hidden_states @@ -509,7 +508,6 @@ class OPTDecoder(OPTPreTrainedModel): output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: - # TODO: Refactor this function output_attentions = ( @@ -788,7 +786,6 @@ class OPTForCausalLM(OPTPreTrainedModel): output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: - # TODO: Refactor this function output_attentions = ( diff --git a/tank/test_models.py b/tank/test_models.py index fcd22d4c..00d22231 100644 --- a/tank/test_models.py +++ b/tank/test_models.py @@ -132,7 +132,6 @@ class SharkModuleTester: self.config = config def create_and_check_module(self, dynamic, device): - shark_args.local_tank_cache = self.local_tank_cache shark_args.update_tank = self.update_tank if "nhcw-nhwc" in self.config["flags"] and not os.path.isfile( diff --git a/tank/tflite/albert_lite_base/albert_lite_base_tflite_test.py b/tank/tflite/albert_lite_base/albert_lite_base_tflite_test.py index a226d2f4..0c3d1f4f 100644 --- a/tank/tflite/albert_lite_base/albert_lite_base_tflite_test.py +++ b/tank/tflite/albert_lite_base/albert_lite_base_tflite_test.py @@ -9,6 +9,7 @@ from shark.parser import shark_args # model_path = "https://tfhub.dev/tensorflow/lite-model/albert_lite_base/squadv1/1?lite-format=tflite" # model_path = model_path + # Inputs modified to be useful albert inputs. def generate_inputs(input_details): for input in input_details: diff --git a/web/index.py b/web/index.py index 5cfa8db8..aed02e9f 100644 --- a/web/index.py +++ b/web/index.py @@ -23,7 +23,6 @@ demo_css = Path(__file__).parent.joinpath("demo.css").resolve() with gr.Blocks(title="Stable Diffusion", css=demo_css) as shark_web: - with gr.Row(elem_id="ui_title"): nod_logo = Image.open(nodlogo_loc) logo2 = Image.open(sdlogo_loc) @@ -44,7 +43,6 @@ with gr.Blocks(title="Stable Diffusion", css=demo_css) as shark_web: ).style(width=150, height=100) with gr.Row(elem_id="ui_body"): - with gr.Row(): with gr.Column(scale=1, min_width=600): with gr.Group(elem_id="prompt_box_outer"): diff --git a/web/models/albert_maskfill.py b/web/models/albert_maskfill.py index e0a1573d..3f4973e0 100644 --- a/web/models/albert_maskfill.py +++ b/web/models/albert_maskfill.py @@ -27,7 +27,6 @@ compiled_module["tokenizer"] = AutoTokenizer.from_pretrained("albert-base-v2") def preprocess_data(text): - global compiled_module # Preparing Data @@ -44,7 +43,6 @@ def preprocess_data(text): def top5_possibilities(text, inputs, token_logits, log_write): - global DEBUG global compiled_module @@ -68,7 +66,6 @@ def top5_possibilities(text, inputs, token_logits, log_write): def albert_maskfill_inf(masked_text, device): - global DEBUG global compiled_module diff --git a/web/models/diffusion/v_diffusion.py b/web/models/diffusion/v_diffusion.py index c47b0912..e14acf51 100755 --- a/web/models/diffusion/v_diffusion.py +++ b/web/models/diffusion/v_diffusion.py @@ -103,7 +103,6 @@ def cache_model(): def vdiff_inf(prompts: str, n, bs, steps, _device): - global device global model global checkpoint diff --git a/web/models/resnet50.py b/web/models/resnet50.py index 0cb4f11b..80377a51 100644 --- a/web/models/resnet50.py +++ b/web/models/resnet50.py @@ -37,7 +37,6 @@ def load_labels(): def top3_possibilities(res, log_write): - global DEBUG if DEBUG: @@ -57,7 +56,6 @@ def top3_possibilities(res, log_write): def resnet_inf(numpy_img, device): - global DEBUG global compiled_module diff --git a/web/models/stable_diffusion/main.py b/web/models/stable_diffusion/main.py index 61a1a03f..c9d20d84 100644 --- a/web/models/stable_diffusion/main.py +++ b/web/models/stable_diffusion/main.py @@ -209,7 +209,6 @@ def stable_diff_inf( avg_ms = 0 for i, t in tqdm(enumerate(scheduler.timesteps)): - step_start = time.time() timestep = torch.tensor([t]).to(dtype).detach().numpy() latent_model_input = scheduler.scale_model_input(latents, t) diff --git a/web/models/stable_diffusion/utils.py b/web/models/stable_diffusion/utils.py index 69e7ab65..7c9628e6 100644 --- a/web/models/stable_diffusion/utils.py +++ b/web/models/stable_diffusion/utils.py @@ -60,7 +60,6 @@ def get_shark_model(tank_url, model_name, extra_args=[]): # Converts the torch-module into a shark_module. def compile_through_fx(model, inputs, model_name, extra_args=[]): - mlir_module, func_name = import_with_fx(model, inputs) shark_module = SharkInference( @@ -73,7 +72,6 @@ def compile_through_fx(model, inputs, model_name, extra_args=[]): def set_iree_runtime_flags(): - vulkan_runtime_flags = [ f"--vulkan_large_heap_block_size={args.vulkan_large_heap_block_size}", f"--vulkan_validation_layers={'true' if args.vulkan_validation_layers else 'false'}",