From 6e7de30434a0f8b78267e0a9157098980752e9e7 Mon Sep 17 00:00:00 2001 From: Goran Peretin Date: Sat, 25 May 2024 08:54:27 +0000 Subject: [PATCH] Fix integ tests. --- tests/integration/llm/test_anthropic.py | 5 +++-- tests/integration/llm/test_groq.py | 10 ++++------ tests/integration/llm/test_openai.py | 10 ++++------ 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/tests/integration/llm/test_anthropic.py b/tests/integration/llm/test_anthropic.py index cdb06057..b63a7e69 100644 --- a/tests/integration/llm/test_anthropic.py +++ b/tests/integration/llm/test_anthropic.py @@ -5,6 +5,7 @@ import pytest from core.config import LLMConfig, LLMProvider from core.llm.anthropic_client import AnthropicClient +from core.llm.base import APIError from core.llm.convo import Convo from core.llm.request_log import LLMRequestStatus @@ -34,7 +35,7 @@ async def test_incorrect_key(): llm = AnthropicClient(cfg, stream_handler=print_handler) convo = Convo("you're a friendly assistant").user("tell me joke") - with pytest.raises(ValueError, match="invalid x-api-key"): + with pytest.raises(APIError, match="invalid x-api-key"): await llm(convo) @@ -49,7 +50,7 @@ async def test_unknown_model(): llm = AnthropicClient(cfg) convo = Convo("you're a friendly assistant").user("tell me joke") - with pytest.raises(ValueError, match="model: gpt-3.6-nonexistent"): + with pytest.raises(APIError, match="model: gpt-3.6-nonexistent"): await llm(convo) diff --git a/tests/integration/llm/test_groq.py b/tests/integration/llm/test_groq.py index c988a37a..04e8ed25 100644 --- a/tests/integration/llm/test_groq.py +++ b/tests/integration/llm/test_groq.py @@ -4,6 +4,7 @@ from os import getenv import pytest from core.config import LLMConfig, LLMProvider +from core.llm.base import APIError from core.llm.convo import Convo from core.llm.groq_client import GroqClient @@ -33,7 +34,7 @@ async def test_incorrect_key(): llm = GroqClient(cfg, stream_handler=print_handler) convo = Convo("you're a friendly assistant").user("tell me joke") - with pytest.raises(ValueError, match="Invalid API Key"): + with pytest.raises(APIError, match="Invalid API Key"): await llm(convo) @@ -48,7 +49,7 @@ async def test_unknown_model(): llm = GroqClient(cfg) convo = Convo("you're a friendly assistant").user("tell me joke") - with pytest.raises(ValueError, match="does not exist"): + with pytest.raises(APIError, match="does not exist"): await llm(convo) @@ -117,8 +118,5 @@ async def test_context_too_large(): large_convo = " ".join(["lorem ipsum dolor sit amet"] * 30000) convo = Convo(large_convo) - with pytest.raises(ValueError, match="Context limit exceeded."): + with pytest.raises(APIError, match="We sent too large request to the LLM"): await llm(convo) - - streamed = "".join(streamed_response) - assert "We sent too large request to the LLM" in streamed diff --git a/tests/integration/llm/test_openai.py b/tests/integration/llm/test_openai.py index 7d9ed2a4..55e338b7 100644 --- a/tests/integration/llm/test_openai.py +++ b/tests/integration/llm/test_openai.py @@ -4,6 +4,7 @@ from os import getenv import pytest from core.config import LLMConfig, LLMProvider +from core.llm.base import APIError from core.llm.convo import Convo from core.llm.openai_client import OpenAIClient @@ -33,7 +34,7 @@ async def test_incorrect_key(): llm = OpenAIClient(cfg, stream_handler=print_handler) convo = Convo("you're a friendly assistant").user("tell me joke") - with pytest.raises(ValueError, match="Incorrect API key provided: sk-inc"): + with pytest.raises(APIError, match="Incorrect API key provided: sk-inc"): await llm(convo) @@ -48,7 +49,7 @@ async def test_unknown_model(): llm = OpenAIClient(cfg) convo = Convo("you're a friendly assistant").user("tell me joke") - with pytest.raises(ValueError, match="does not exist"): + with pytest.raises(APIError, match="does not exist"): await llm(convo) @@ -114,8 +115,5 @@ async def test_context_too_large(): convo = Convo("you're a friendly assistant") large_convo = " ".join(["lorem ipsum dolor sit amet"] * 30000) convo.user(large_convo) - with pytest.raises(ValueError, match="Context limit exceeded."): + with pytest.raises(APIError, match="We sent too large request to the LLM"): await llm(convo) - - streamed = "".join(streamed_response) - assert "We sent too large request to the LLM" in streamed