Files
OpenHands/tests/unit/test_llm.py
Robert Brennan 01ae22ef57 Rename OpenDevin to OpenHands (#3472)
* Replace OpenDevin with OpenHands

* Update CONTRIBUTING.md

* Update README.md

* Update README.md

* update poetry lock; move opendevin folder to openhands

* fix env var

* revert image references in docs

* revert permissions

* revert permissions

---------

Co-authored-by: Xingyao Wang <xingyao6@illinois.edu>
2024-08-20 00:44:54 +08:00

85 lines
2.6 KiB
Python

from unittest.mock import patch
import pytest
from openhands.core.config import LLMConfig
from openhands.core.metrics import Metrics
from openhands.llm.llm import LLM
@pytest.fixture
def default_config():
return LLMConfig(model='gpt-3.5-turbo', api_key='test_key')
def test_llm_init_with_default_config(default_config):
llm = LLM(default_config)
assert llm.config.model == 'gpt-3.5-turbo'
assert llm.config.api_key == 'test_key'
assert isinstance(llm.metrics, Metrics)
@patch('openhands.llm.llm.litellm.get_model_info')
def test_llm_init_with_model_info(mock_get_model_info, default_config):
mock_get_model_info.return_value = {
'max_input_tokens': 8000,
'max_output_tokens': 2000,
}
llm = LLM(default_config)
assert llm.config.max_input_tokens == 8000
assert llm.config.max_output_tokens == 2000
@patch('openhands.llm.llm.litellm.get_model_info')
def test_llm_init_without_model_info(mock_get_model_info, default_config):
mock_get_model_info.side_effect = Exception('Model info not available')
llm = LLM(default_config)
assert llm.config.max_input_tokens == 4096
assert llm.config.max_output_tokens == 1024
def test_llm_init_with_custom_config():
custom_config = LLMConfig(
model='custom-model',
api_key='custom_key',
max_input_tokens=5000,
max_output_tokens=1500,
temperature=0.8,
top_p=0.9,
)
llm = LLM(custom_config)
assert llm.config.model == 'custom-model'
assert llm.config.api_key == 'custom_key'
assert llm.config.max_input_tokens == 5000
assert llm.config.max_output_tokens == 1500
assert llm.config.temperature == 0.8
assert llm.config.top_p == 0.9
def test_llm_init_with_metrics():
config = LLMConfig(model='gpt-3.5-turbo', api_key='test_key')
metrics = Metrics()
llm = LLM(config, metrics=metrics)
assert llm.metrics is metrics
def test_llm_reset():
llm = LLM(LLMConfig(model='gpt-3.5-turbo', api_key='test_key'))
initial_metrics = llm.metrics
llm.reset()
assert llm.metrics is not initial_metrics
assert isinstance(llm.metrics, Metrics)
@patch('openhands.llm.llm.litellm.get_model_info')
def test_llm_init_with_openrouter_model(mock_get_model_info, default_config):
default_config.model = 'openrouter:gpt-3.5-turbo'
mock_get_model_info.return_value = {
'max_input_tokens': 7000,
'max_output_tokens': 1500,
}
llm = LLM(default_config)
assert llm.config.max_input_tokens == 7000
assert llm.config.max_output_tokens == 1500
mock_get_model_info.assert_called_once_with('openrouter:gpt-3.5-turbo')