Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com>
This commit is contained in:
Konrad
2023-05-20 19:45:27 -04:00
committed by GitHub
parent c30f5b7d5e
commit 57ea7b5216
7 changed files with 122 additions and 3 deletions

View File

@@ -118,3 +118,13 @@ class TestApiManager:
assert api_manager.get_total_prompt_tokens() == 50
assert api_manager.get_total_completion_tokens() == 100
assert api_manager.get_total_cost() == (50 * 0.002 + 100 * 0.002) / 1000
@staticmethod
def test_get_models():
"""Test if getting models works correctly."""
with patch("openai.Model.list") as mock_list_models:
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
result = api_manager.get_models()
assert result[0]["id"] == "gpt-3.5-turbo"
assert api_manager.models[0]["id"] == "gpt-3.5-turbo"

View File

@@ -2,10 +2,11 @@
Test cases for the Config class, which handles the configuration settings
for the AI and ensures it behaves as a singleton.
"""
from unittest.mock import patch
import pytest
from openai import InvalidRequestError
from autogpt.config import Config
from autogpt.configurator import create_config
def test_initial_values(config):
@@ -117,3 +118,40 @@ def test_set_debug_mode(config):
# Reset debug mode
config.set_debug_mode(debug_mode)
@patch("openai.Model.list")
def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config):
"""
Test if models update to gpt-3.5-turbo if both are set to gpt-4.
"""
fast_llm_model = config.fast_llm_model
smart_llm_model = config.smart_llm_model
config.fast_llm_model = "gpt-4"
config.smart_llm_model = "gpt-4"
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
create_config(
continuous=False,
continuous_limit=False,
ai_settings_file="",
prompt_settings_file="",
skip_reprompt=False,
speak=False,
debug=False,
gpt3only=False,
gpt4only=False,
memory_type="",
browser_name="",
allow_downloads=False,
skip_news=False,
)
assert config.fast_llm_model == "gpt-3.5-turbo"
assert config.smart_llm_model == "gpt-3.5-turbo"
# Reset config
config.set_fast_llm_model(fast_llm_model)
config.set_smart_llm_model(smart_llm_model)

View File

@@ -1,7 +1,11 @@
from unittest.mock import patch
import pytest
from openai import InvalidRequestError
from openai.error import APIError, RateLimitError
from autogpt.llm import llm_utils
from autogpt.llm.llm_utils import check_model
@pytest.fixture(params=[RateLimitError, APIError])
@@ -131,3 +135,26 @@ def test_chunked_tokens():
]
output = list(llm_utils.chunked_tokens(text, "cl100k_base", 8191))
assert output == expected_output
def test_check_model(api_manager):
"""
Test if check_model() returns original model when valid.
Test if check_model() returns gpt-3.5-turbo when model is invalid.
"""
with patch("openai.Model.list") as mock_list_models:
# Test when correct model is returned
mock_list_models.return_value = {"data": [{"id": "gpt-4"}]}
result = check_model("gpt-4", "smart_llm_model")
assert result == "gpt-4"
# Reset api manager models
api_manager.models = None
# Test when incorrect model is returned
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
result = check_model("gpt-4", "fast_llm_model")
assert result == "gpt-3.5-turbo"
# Reset api manager models
api_manager.models = None