Request for longer response to make cache test more robust. (#2043)

* Request for longer response to make cache test more robust.

* 1000 words --> 100 words
This commit is contained in:
Eric Zhu
2024-03-17 15:49:40 -07:00
committed by GitHub
parent 448736e2ad
commit 2cefff9206

View File

@@ -167,6 +167,9 @@ def test_legacy_cache():
filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
)
# Prompt to use for testing.
prompt = "Write a 100 word summary on the topic of the history of human civilization."
# Clear cache.
if os.path.exists(LEGACY_CACHE_DIR):
shutil.rmtree(LEGACY_CACHE_DIR)
@@ -174,12 +177,12 @@ def test_legacy_cache():
# Test default cache seed.
client = OpenAIWrapper(config_list=config_list)
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_cold_cache = end_time - start_time
start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_warm_cache = end_time - start_time
assert cold_cache_response == warm_cache_response
@@ -189,12 +192,12 @@ def test_legacy_cache():
# Test with cache seed set through constructor
client = OpenAIWrapper(config_list=config_list, cache_seed=13)
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_cold_cache = end_time - start_time
start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_warm_cache = end_time - start_time
assert cold_cache_response == warm_cache_response
@@ -204,12 +207,12 @@ def test_legacy_cache():
# Test with cache seed set through create method
client = OpenAIWrapper(config_list=config_list)
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache_seed=17)
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache_seed=17)
end_time = time.time()
duration_with_cold_cache = end_time - start_time
start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache_seed=17)
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache_seed=17)
end_time = time.time()
duration_with_warm_cache = end_time - start_time
assert cold_cache_response == warm_cache_response
@@ -218,7 +221,7 @@ def test_legacy_cache():
# Test using a different cache seed through create method.
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache_seed=21)
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache_seed=21)
end_time = time.time()
duration_with_cold_cache = end_time - start_time
assert duration_with_warm_cache < duration_with_cold_cache
@@ -233,6 +236,9 @@ def test_cache():
filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
)
# Prompt to use for testing.
prompt = "Write a 100 word summary on the topic of the history of artificial intelligence."
# Clear cache.
if os.path.exists(LEGACY_CACHE_DIR):
shutil.rmtree(LEGACY_CACHE_DIR)
@@ -245,12 +251,12 @@ def test_cache():
with Cache.disk(cache_seed=49, cache_path_root=cache_dir) as cache:
client = OpenAIWrapper(config_list=config_list, cache=cache)
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_cold_cache = end_time - start_time
start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_warm_cache = end_time - start_time
assert cold_cache_response == warm_cache_response
@@ -264,12 +270,12 @@ def test_cache():
client = OpenAIWrapper(config_list=config_list)
with Cache.disk(cache_seed=312, cache_path_root=cache_dir) as cache:
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache=cache)
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache=cache)
end_time = time.time()
duration_with_cold_cache = end_time - start_time
start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache=cache)
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache=cache)
end_time = time.time()
duration_with_warm_cache = end_time - start_time
assert cold_cache_response == warm_cache_response
@@ -282,7 +288,7 @@ def test_cache():
# Test different cache seed.
with Cache.disk(cache_seed=123, cache_path_root=cache_dir) as cache:
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache=cache)
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache=cache)
end_time = time.time()
duration_with_cold_cache = end_time - start_time
assert duration_with_warm_cache < duration_with_cold_cache