mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
## Summary - Remove ~1200 lines of broken/unmaintained non-SDK copilot streaming code (retry logic, parallel tool calls, context window management) - Add `stream_chat_completion_baseline()` as a clean fallback LLM path with full tool-calling support when `CHAT_USE_CLAUDE_AGENT_SDK=false` (e.g. when Anthropic is down) - Baseline reuses the same shared `TOOL_REGISTRY`, `get_available_tools()`, and `execute_tool()` as the SDK path - Move baseline code to dedicated `baseline/` folder (mirrors `sdk/` structure) - Clean up SDK service: remove unused params, fix model/env resolution, fix stream error persistence - Clean up config: remove `max_retries`, `thinking_enabled` fields (non-SDK only) ## Changes | File | Action | |------|--------| | `backend/copilot/baseline/__init__.py` | New — package export | | `backend/copilot/baseline/service.py` | New — baseline streaming with tool-call loop | | `backend/copilot/baseline/service_test.py` | New — multi-turn keyword recall test | | `backend/copilot/service.py` | Remove ~1200 lines of legacy code, keep shared helpers only | | `backend/copilot/executor/processor.py` | Simplify branching to SDK vs baseline | | `backend/copilot/sdk/service.py` | Remove unused params, fix model/env separation, fix stream error persistence | | `backend/copilot/config.py` | Remove `max_retries`, `thinking_enabled` | | `backend/copilot/service_test.py` | Keep SDK test only (baseline test moved) | | `backend/copilot/parallel_tool_calls_test.py` | Deleted (tested removed code) | ## Test plan - [x] `poetry run format` passes - [x] CI passes (all 3 Python versions, types, CodeQL) - [ ] SDK path works unchanged in production - [x] Baseline path (`CHAT_USE_CLAUDE_AGENT_SDK=false`) streams responses with tool calling - [x] Baseline emits correct Vercel AI SDK stream protocol events
104 lines
3.7 KiB
Python
104 lines
3.7 KiB
Python
import asyncio
|
|
import logging
|
|
from os import getenv
|
|
|
|
import pytest
|
|
|
|
from .model import create_chat_session, get_chat_session, upsert_chat_session
|
|
from .response_model import StreamError, StreamTextDelta
|
|
from .sdk import service as sdk_service
|
|
from .sdk.transcript import download_transcript
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@pytest.mark.asyncio(loop_scope="session")
|
|
async def test_sdk_resume_multi_turn(setup_test_user, test_user_id):
|
|
"""Test that the SDK --resume path captures and uses transcripts across turns.
|
|
|
|
Turn 1: Send a message containing a unique keyword.
|
|
Turn 2: Ask the model to recall that keyword — proving the transcript was
|
|
persisted and restored via --resume.
|
|
"""
|
|
api_key: str | None = getenv("OPEN_ROUTER_API_KEY")
|
|
if not api_key:
|
|
return pytest.skip("OPEN_ROUTER_API_KEY is not set, skipping test")
|
|
|
|
from .config import ChatConfig
|
|
|
|
cfg = ChatConfig()
|
|
if not cfg.claude_agent_use_resume:
|
|
return pytest.skip("CLAUDE_AGENT_USE_RESUME is not enabled, skipping test")
|
|
|
|
session = await create_chat_session(test_user_id)
|
|
session = await upsert_chat_session(session)
|
|
|
|
# --- Turn 1: send a message with a unique keyword ---
|
|
keyword = "ZEPHYR42"
|
|
turn1_msg = (
|
|
f"Please remember this special keyword: {keyword}. "
|
|
"Just confirm you've noted it, keep your response brief."
|
|
)
|
|
turn1_text = ""
|
|
turn1_errors: list[str] = []
|
|
|
|
async for chunk in sdk_service.stream_chat_completion_sdk(
|
|
session.session_id,
|
|
turn1_msg,
|
|
user_id=test_user_id,
|
|
):
|
|
if isinstance(chunk, StreamTextDelta):
|
|
turn1_text += chunk.delta
|
|
elif isinstance(chunk, StreamError):
|
|
turn1_errors.append(chunk.errorText)
|
|
|
|
assert not turn1_errors, f"Turn 1 errors: {turn1_errors}"
|
|
assert turn1_text, "Turn 1 produced no text"
|
|
|
|
# Wait for background upload task to complete (retry up to 5s).
|
|
# The CLI may not produce a usable transcript for very short
|
|
# conversations (only metadata entries) — this is environment-dependent
|
|
# (CLI version, platform). When that happens, multi-turn still works
|
|
# via conversation compression (non-resume path), but we can't test
|
|
# the --resume round-trip.
|
|
transcript = None
|
|
for _ in range(10):
|
|
await asyncio.sleep(0.5)
|
|
transcript = await download_transcript(test_user_id, session.session_id)
|
|
if transcript:
|
|
break
|
|
if not transcript:
|
|
return pytest.skip(
|
|
"CLI did not produce a usable transcript — "
|
|
"cannot test --resume round-trip in this environment"
|
|
)
|
|
logger.info(f"Turn 1 transcript uploaded: {len(transcript.content)} bytes")
|
|
|
|
# Reload session for turn 2
|
|
session = await get_chat_session(session.session_id, test_user_id)
|
|
assert session, "Session not found after turn 1"
|
|
|
|
# --- Turn 2: ask model to recall the keyword ---
|
|
turn2_msg = "What was the special keyword I asked you to remember?"
|
|
turn2_text = ""
|
|
turn2_errors: list[str] = []
|
|
|
|
async for chunk in sdk_service.stream_chat_completion_sdk(
|
|
session.session_id,
|
|
turn2_msg,
|
|
user_id=test_user_id,
|
|
session=session,
|
|
):
|
|
if isinstance(chunk, StreamTextDelta):
|
|
turn2_text += chunk.delta
|
|
elif isinstance(chunk, StreamError):
|
|
turn2_errors.append(chunk.errorText)
|
|
|
|
assert not turn2_errors, f"Turn 2 errors: {turn2_errors}"
|
|
assert turn2_text, "Turn 2 produced no text"
|
|
assert keyword in turn2_text, (
|
|
f"Model did not recall keyword '{keyword}' in turn 2. "
|
|
f"Response: {turn2_text[:200]}"
|
|
)
|
|
logger.info(f"Turn 2 recalled keyword successfully: {turn2_text[:100]}")
|