ENG-509 (Remove checking LLMs availability)

This commit is contained in:
mijauexe
2025-03-06 14:19:33 +01:00
parent a7bc58d3c3
commit 3647c12ffc
4 changed files with 3 additions and 88 deletions

View File

@@ -95,7 +95,6 @@ def parse_arguments() -> Namespace:
--import-v0: Import data from a v0 (gpt-pilot) database with the given path
--email: User's email address, if provided
--extension-version: Version of the VSCode extension, if used
--no-check: Disable initial LLM API check
--use-git: Use Git for version control
--access-token: Access token
:return: Parsed arguments object.
@@ -137,7 +136,6 @@ def parse_arguments() -> Namespace:
)
parser.add_argument("--email", help="User's email address", required=False)
parser.add_argument("--extension-version", help="Version of the VSCode extension", required=False)
parser.add_argument("--no-check", help="Disable initial LLM API check", action="store_true")
parser.add_argument("--use-git", help="Use Git for version control", action="store_true", required=False)
parser.add_argument("--access-token", help="Access token", required=False)
return parser.parse_args()

View File

@@ -15,11 +15,10 @@ except ImportError:
from core.agents.orchestrator import Orchestrator
from core.cli.helpers import delete_project, init, list_projects, list_projects_json, load_project, show_config
from core.config import LLMProvider, get_config
from core.db.session import SessionManager
from core.db.v0importer import LegacyDatabaseImporter
from core.llm.anthropic_client import CustomAssertionError
from core.llm.base import APIError, BaseLLMClient
from core.llm.base import APIError
from core.log import get_logger
from core.state.state_manager import StateManager
from core.telemetry import telemetry
@@ -118,62 +117,6 @@ async def run_project(sm: StateManager, ui: UIBase, args) -> bool:
return success
async def llm_api_check(ui: UIBase, sm: StateManager) -> bool:
"""
Check whether the configured LLMs are reachable in parallel.
:param ui: UI we'll use to report any issues
:return: True if all the LLMs are reachable.
"""
config = get_config()
async def handler(*args, **kwargs):
pass
checked_llms: set[LLMProvider] = set()
tasks = []
async def check_llm(llm_config):
if llm_config.provider + llm_config.model in checked_llms:
return True
checked_llms.add(llm_config.provider + llm_config.model)
client_class = BaseLLMClient.for_provider(llm_config.provider)
llm_client = client_class(llm_config, stream_handler=handler, error_handler=handler, ui=ui, state_manager=sm)
try:
resp = await llm_client.api_check()
if not resp:
await ui.send_message(
f"API check for {llm_config.provider.value} {llm_config.model} failed.",
source=pythagora_source,
)
log.warning(f"API check for {llm_config.provider.value} {llm_config.model} failed.")
return False
else:
log.info(f"API check for {llm_config.provider.value} {llm_config.model} succeeded.")
return True
except APIError as err:
await ui.send_message(
f"API check for {llm_config.provider.value} {llm_config.model} failed with: {err}",
source=pythagora_source,
)
log.warning(f"API check for {llm_config.provider.value} failed with: {err}")
return False
for llm_config in config.all_llms():
tasks.append(check_llm(llm_config))
results = await asyncio.gather(*tasks)
success = all(results)
if not success:
telemetry.set("end_result", "failure:api-error")
return success
async def start_new_project(sm: StateManager, ui: UIBase) -> bool:
"""
Start a new project.
@@ -253,14 +196,6 @@ async def run_pythagora_session(sm: StateManager, ui: UIBase, args: Namespace):
:return: True if the application ran successfully, False otherwise.
"""
if not args.no_check:
if not await llm_api_check(ui, sm):
await ui.send_message(
"Pythagora cannot start because the LLM API is not reachable.",
source=pythagora_source,
)
return False
if args.project or args.branch or args.step:
telemetry.set("is_continuation", True)
success = await load_project(sm, args.project, args.branch, args.step)

View File

@@ -378,19 +378,6 @@ class BaseLLMClient:
return response, request_log
async def api_check(self) -> bool:
"""
Perform an LLM API check.
:return: True if the check was successful, False otherwise.
"""
convo = Convo()
msg = "This is a connection test. If you can see this, please respond only with 'START' and nothing else."
convo.user(msg)
resp, _log = await self(convo)
return bool(resp)
@staticmethod
def for_provider(provider: LLMProvider) -> type["BaseLLMClient"]:
"""

View File

@@ -56,7 +56,6 @@ def test_parse_arguments(mock_ArgumentParser):
"--import-v0",
"--email",
"--extension-version",
"--no-check",
"--use-git",
"--access-token",
}
@@ -294,10 +293,8 @@ def test_init(tmp_path):
([], True, True),
],
)
@patch("core.cli.main.llm_api_check")
@patch("core.cli.main.Orchestrator")
async def test_main(mock_Orchestrator, mock_llm_check, args, run_orchestrator, retval, tmp_path):
mock_llm_check.return_value = True
async def test_main(mock_Orchestrator, args, run_orchestrator, retval, tmp_path):
config_file = write_test_config(tmp_path)
class MockArgumentParser(ArgumentParser):
@@ -322,10 +319,8 @@ async def test_main(mock_Orchestrator, mock_llm_check, args, run_orchestrator, r
@pytest.mark.asyncio
@patch("core.cli.main.llm_api_check")
@patch("core.cli.main.Orchestrator")
async def test_main_handles_crash(mock_Orchestrator, mock_llm_check, tmp_path):
mock_llm_check.return_value = True
async def test_main_handles_crash(mock_Orchestrator, tmp_path):
config_file = write_test_config(tmp_path)
class MockArgumentParser(ArgumentParser):