Gpt2 benchmark with HALF and BEAM (#2636)

* benchmark gpt2 with half and beam

* BEAM=4

* optional validation

* green is good

* we care
This commit is contained in:
chenyu
2023-12-05 22:15:16 -05:00
committed by GitHub
parent a73579919f
commit 229ada5fe5
2 changed files with 18 additions and 2 deletions

View File

@@ -81,6 +81,8 @@ jobs:
run: |
CUDA=1 JIT=0 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_unjitted.txt
CUDA=1 JIT=1 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_jitted.txt
- name: Run GPT2 w HALF/BEAM
run: CUDA=1 JIT=1 HALF=1 BEAM=4 CACHELEVEL=0 python3 examples/gpt2.py --count 10 --temperature 0 --timing | tee gpt2_half_beam.txt
- uses: actions/upload-artifact@v3
with:
name: Speed (NVIDIA)
@@ -89,6 +91,7 @@ jobs:
torch_speed.txt
gpt2_unjitted.txt
gpt2_jitted.txt
gpt2_half_beam.txt
testamdbenchmark:
name: AMD Benchmark

View File

@@ -159,9 +159,10 @@ class GPT2:
if __name__ == "__main__":
Tensor.no_grad = True
print(f"using {Device.DEFAULT} backend")
default_prompt = "What is the answer to life, the universe, and everything?"
parser = argparse.ArgumentParser(description='Run GPT2 in tinygrad', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--prompt', type=str, default="What is the answer to life, the universe, and everything?", help="Phrase to start with")
parser.add_argument('--prompt', type=str, default=default_prompt, help="Phrase to start with")
parser.add_argument('--count', type=int, default=100, help="Max number of tokens to generate")
parser.add_argument('--temperature', type=float, default=0.8, help="Temperature in the softmax")
parser.add_argument('--model_size', type=str, default="gpt2-medium", help="Size of model to use [gpt2, gpt2-medium, gpt2-large, gpt2-xl]")
@@ -191,4 +192,16 @@ if __name__ == "__main__":
print('Generating text...')
if len(texts) == 1: print(texts[0])
else:
for i,text in enumerate(texts): print(colored(f"Response {i}:", "green"), text)
for i,text in enumerate(texts): print(colored(f"Response {i}:", "green"), text)
# validate output!
if args.temperature == 0 and args.model_size == "gpt2-medium" and args.count == 10:
expected = {
default_prompt: "What is the answer to life, the universe, and everything?\n\nThe answer is that we are all one",
"Hello.": "Hello. I'm a little late to the party, but",
}
try:
assert texts[0] == expected[args.prompt]
print(colored("output validated", "green"))
except KeyError:
pass