From ce6d82eab25c032ec8c8daee27428ff7690f830b Mon Sep 17 00:00:00 2001 From: Anush Elangovan Date: Fri, 10 Mar 2023 11:52:40 -0800 Subject: [PATCH] Fix bloom lint --- shark/examples/shark_inference/sharded_bloom.py | 4 +--- shark/examples/shark_inference/sharded_bloom_large_models.py | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/shark/examples/shark_inference/sharded_bloom.py b/shark/examples/shark_inference/sharded_bloom.py index f9111548..3e8df14f 100644 --- a/shark/examples/shark_inference/sharded_bloom.py +++ b/shark/examples/shark_inference/sharded_bloom.py @@ -731,7 +731,6 @@ if __name__ == "__main__": "Warning: If you need to use memory efficient mode, you probably want to use 'download' instead" ) - if not os.path.isdir(args.model_path): os.mkdir(args.model_path) @@ -760,7 +759,6 @@ if __name__ == "__main__": config = json.load(f) f.close() - self_path = os.path.dirname(os.path.abspath(__file__)) script_path = os.path.join(self_path, "sharded_bloom_large_models.py") @@ -841,4 +839,4 @@ if __name__ == "__main__": [input_ids, next_token.unsqueeze(-1)], dim=-1 ) - print(tokenizer.decode(input_ids.squeeze())) \ No newline at end of file + print(tokenizer.decode(input_ids.squeeze())) diff --git a/shark/examples/shark_inference/sharded_bloom_large_models.py b/shark/examples/shark_inference/sharded_bloom_large_models.py index 6d35d6b8..1635ac13 100644 --- a/shark/examples/shark_inference/sharded_bloom_large_models.py +++ b/shark/examples/shark_inference/sharded_bloom_large_models.py @@ -378,4 +378,4 @@ if __name__ == "__main__": f = open(f"{working_dir}/prompt.txt", "w+") f.write(prompt + next_token) - f.close() \ No newline at end of file + f.close()