mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-10 23:05:17 -05:00
Compare commits
48 Commits
fix/copilo
...
feat/copit
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82c483d6c8 | ||
|
|
7cffa1895f | ||
|
|
9791bdd724 | ||
|
|
750a674c78 | ||
|
|
960c7980a3 | ||
|
|
e85d437bb2 | ||
|
|
2cd0d4fe0f | ||
|
|
44f9536bd6 | ||
|
|
1c1085a227 | ||
|
|
d7ef70469e | ||
|
|
1926127ddd | ||
|
|
8b509e56de | ||
|
|
1ecae8c87e | ||
|
|
659338f90c | ||
|
|
4df5b7bde7 | ||
|
|
acb2d0bd1b | ||
|
|
51aa369c80 | ||
|
|
017a00af46 | ||
|
|
6403ffe353 | ||
|
|
52650eed1d | ||
|
|
c40a98ba3c | ||
|
|
a31fc8b162 | ||
|
|
0f2d1a6553 | ||
|
|
87d817b83b | ||
|
|
acf932bf4f | ||
|
|
f562d9a277 | ||
|
|
3c92a96504 | ||
|
|
8b8e1df739 | ||
|
|
602a0a4fb1 | ||
|
|
8d7d531ae0 | ||
|
|
43153a12e0 | ||
|
|
587e11c60a | ||
|
|
57da545e02 | ||
|
|
626980bf27 | ||
|
|
e42b27af3c | ||
|
|
34face15d2 | ||
|
|
7d32c83f95 | ||
|
|
6e2a45b84e | ||
|
|
32f6532e9c | ||
|
|
0bbe8a184d | ||
|
|
7592deed63 | ||
|
|
b9c759ce4f | ||
|
|
5efb80d47b | ||
|
|
b49d8e2cba | ||
|
|
452544530d | ||
|
|
32ee7e6cf8 | ||
|
|
670663c406 | ||
|
|
0dbe4cf51e |
@@ -22,7 +22,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.workflow_run.head_branch }}
|
ref: ${{ github.event.workflow_run.head_branch }}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|||||||
2
.github/workflows/claude-dependabot.yml
vendored
2
.github/workflows/claude-dependabot.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
|||||||
actions: read # Required for CI access
|
actions: read # Required for CI access
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
|||||||
actions: read # Required for CI access
|
actions: read # Required for CI access
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
|||||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
|
|||||||
2
.github/workflows/copilot-setup-steps.yml
vendored
2
.github/workflows/copilot-setup-steps.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
|||||||
# If you do not check out your code, Copilot will do this for you.
|
# If you do not check out your code, Copilot will do this for you.
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|||||||
2
.github/workflows/docs-block-sync.yml
vendored
2
.github/workflows/docs-block-sync.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/docs-claude-review.yml
vendored
2
.github/workflows/docs-claude-review.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/docs-enhance.yml
vendored
2
.github/workflows/docs-enhance.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
|
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Trigger deploy workflow
|
- name: Trigger deploy workflow
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.ref_name || 'master' }}
|
ref: ${{ github.ref_name || 'master' }}
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Trigger deploy workflow
|
- name: Trigger deploy workflow
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||||
|
|||||||
2
.github/workflows/platform-backend-ci.yml
vendored
2
.github/workflows/platform-backend-ci.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Dispatch Deploy Event
|
- name: Dispatch Deploy Event
|
||||||
if: steps.check_status.outputs.should_deploy == 'true'
|
if: steps.check_status.outputs.should_deploy == 'true'
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||||
@@ -110,7 +110,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Dispatch Undeploy Event (from comment)
|
- name: Dispatch Undeploy Event (from comment)
|
||||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||||
@@ -168,7 +168,7 @@ jobs:
|
|||||||
github.event_name == 'pull_request' &&
|
github.event_name == 'pull_request' &&
|
||||||
github.event.action == 'closed' &&
|
github.event.action == 'closed' &&
|
||||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||||
|
|||||||
10
.github/workflows/platform-frontend-ci.yml
vendored
10
.github/workflows/platform-frontend-ci.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Check for component changes
|
- name: Check for component changes
|
||||||
uses: dorny/paths-filter@v3
|
uses: dorny/paths-filter@v3
|
||||||
@@ -71,7 +71,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
@@ -107,7 +107,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -148,7 +148,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
@@ -277,7 +277,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/platform-fullstack-ci.yml
vendored
4
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
@@ -63,7 +63,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/repo-workflow-checker.yml
vendored
2
.github/workflows/repo-workflow-checker.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# - name: Wait some time for all actions to start
|
# - name: Wait some time for all actions to start
|
||||||
# run: sleep 30
|
# run: sleep 30
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
# with:
|
# with:
|
||||||
# fetch-depth: 0
|
# fetch-depth: 0
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
|
|||||||
@@ -27,12 +27,11 @@ class ChatConfig(BaseSettings):
|
|||||||
session_ttl: int = Field(default=43200, description="Session TTL in seconds")
|
session_ttl: int = Field(default=43200, description="Session TTL in seconds")
|
||||||
|
|
||||||
# Streaming Configuration
|
# Streaming Configuration
|
||||||
max_context_messages: int = Field(
|
|
||||||
default=50, ge=1, le=200, description="Maximum context messages"
|
|
||||||
)
|
|
||||||
|
|
||||||
stream_timeout: int = Field(default=300, description="Stream timeout in seconds")
|
stream_timeout: int = Field(default=300, description="Stream timeout in seconds")
|
||||||
max_retries: int = Field(default=3, description="Maximum number of retries")
|
max_retries: int = Field(
|
||||||
|
default=3,
|
||||||
|
description="Max retries for fallback path (SDK handles retries internally)",
|
||||||
|
)
|
||||||
max_agent_runs: int = Field(default=30, description="Maximum number of agent runs")
|
max_agent_runs: int = Field(default=30, description="Maximum number of agent runs")
|
||||||
max_agent_schedules: int = Field(
|
max_agent_schedules: int = Field(
|
||||||
default=30, description="Maximum number of agent schedules"
|
default=30, description="Maximum number of agent schedules"
|
||||||
@@ -93,6 +92,18 @@ class ChatConfig(BaseSettings):
|
|||||||
description="Name of the prompt in Langfuse to fetch",
|
description="Name of the prompt in Langfuse to fetch",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Claude Agent SDK Configuration
|
||||||
|
use_claude_agent_sdk: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Use Claude Agent SDK for chat completions",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extended thinking configuration for Claude models
|
||||||
|
thinking_enabled: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Enable adaptive thinking for Claude models via OpenRouter",
|
||||||
|
)
|
||||||
|
|
||||||
@field_validator("api_key", mode="before")
|
@field_validator("api_key", mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_api_key(cls, v):
|
def get_api_key(cls, v):
|
||||||
@@ -132,6 +143,17 @@ class ChatConfig(BaseSettings):
|
|||||||
v = os.getenv("CHAT_INTERNAL_API_KEY")
|
v = os.getenv("CHAT_INTERNAL_API_KEY")
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
@field_validator("use_claude_agent_sdk", mode="before")
|
||||||
|
@classmethod
|
||||||
|
def get_use_claude_agent_sdk(cls, v):
|
||||||
|
"""Get use_claude_agent_sdk from environment if not provided."""
|
||||||
|
# Check environment variable - default to True if not set
|
||||||
|
env_val = os.getenv("CHAT_USE_CLAUDE_AGENT_SDK", "").lower()
|
||||||
|
if env_val:
|
||||||
|
return env_val in ("true", "1", "yes", "on")
|
||||||
|
# Default to True (SDK enabled by default)
|
||||||
|
return True if v is None else v
|
||||||
|
|
||||||
# Prompt paths for different contexts
|
# Prompt paths for different contexts
|
||||||
PROMPT_PATHS: dict[str, str] = {
|
PROMPT_PATHS: dict[str, str] = {
|
||||||
"default": "prompts/chat_system.md",
|
"default": "prompts/chat_system.md",
|
||||||
|
|||||||
@@ -273,9 +273,8 @@ async def _get_session_from_cache(session_id: str) -> ChatSession | None:
|
|||||||
try:
|
try:
|
||||||
session = ChatSession.model_validate_json(raw_session)
|
session = ChatSession.model_validate_json(raw_session)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Loading session {session_id} from cache: "
|
f"[CACHE] Loaded session {session_id}: {len(session.messages)} messages, "
|
||||||
f"message_count={len(session.messages)}, "
|
f"last_roles={[m.role for m in session.messages[-3:]]}" # Last 3 roles
|
||||||
f"roles={[m.role for m in session.messages]}"
|
|
||||||
)
|
)
|
||||||
return session
|
return session
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -317,11 +316,9 @@ async def _get_session_from_db(session_id: str) -> ChatSession | None:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
messages = prisma_session.Messages
|
messages = prisma_session.Messages
|
||||||
logger.info(
|
logger.debug(
|
||||||
f"Loading session {session_id} from DB: "
|
f"[DB] Loaded session {session_id}: {len(messages) if messages else 0} messages, "
|
||||||
f"has_messages={messages is not None}, "
|
f"roles={[m.role for m in messages[-3:]] if messages else []}" # Last 3 roles
|
||||||
f"message_count={len(messages) if messages else 0}, "
|
|
||||||
f"roles={[m.role for m in messages] if messages else []}"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return ChatSession.from_db(prisma_session, messages)
|
return ChatSession.from_db(prisma_session, messages)
|
||||||
@@ -372,10 +369,9 @@ async def _save_session_to_db(
|
|||||||
"function_call": msg.function_call,
|
"function_call": msg.function_call,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
logger.info(
|
logger.debug(
|
||||||
f"Saving {len(new_messages)} new messages to DB for session {session.session_id}: "
|
f"[DB] Saving {len(new_messages)} messages to session {session.session_id}, "
|
||||||
f"roles={[m['role'] for m in messages_data]}, "
|
f"roles={[m['role'] for m in messages_data]}"
|
||||||
f"start_sequence={existing_message_count}"
|
|
||||||
)
|
)
|
||||||
await chat_db.add_chat_messages_batch(
|
await chat_db.add_chat_messages_batch(
|
||||||
session_id=session.session_id,
|
session_id=session.session_id,
|
||||||
@@ -415,7 +411,7 @@ async def get_chat_session(
|
|||||||
logger.warning(f"Unexpected cache error for session {session_id}: {e}")
|
logger.warning(f"Unexpected cache error for session {session_id}: {e}")
|
||||||
|
|
||||||
# Fall back to database
|
# Fall back to database
|
||||||
logger.info(f"Session {session_id} not in cache, checking database")
|
logger.debug(f"Session {session_id} not in cache, checking database")
|
||||||
session = await _get_session_from_db(session_id)
|
session = await _get_session_from_db(session_id)
|
||||||
|
|
||||||
if session is None:
|
if session is None:
|
||||||
@@ -432,7 +428,6 @@ async def get_chat_session(
|
|||||||
# Cache the session from DB
|
# Cache the session from DB
|
||||||
try:
|
try:
|
||||||
await _cache_session(session)
|
await _cache_session(session)
|
||||||
logger.info(f"Cached session {session_id} from database")
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Failed to cache session {session_id}: {e}")
|
logger.warning(f"Failed to cache session {session_id}: {e}")
|
||||||
|
|
||||||
@@ -603,13 +598,19 @@ async def update_session_title(session_id: str, title: str) -> bool:
|
|||||||
logger.warning(f"Session {session_id} not found for title update")
|
logger.warning(f"Session {session_id} not found for title update")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Invalidate cache so next fetch gets updated title
|
# Update title in cache if it exists (instead of invalidating).
|
||||||
|
# This prevents race conditions where cache invalidation causes
|
||||||
|
# the frontend to see stale DB data while streaming is still in progress.
|
||||||
try:
|
try:
|
||||||
redis_key = _get_session_cache_key(session_id)
|
cached = await _get_session_from_cache(session_id)
|
||||||
async_redis = await get_redis_async()
|
if cached:
|
||||||
await async_redis.delete(redis_key)
|
cached.title = title
|
||||||
|
await _cache_session(cached)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Failed to invalidate cache for session {session_id}: {e}")
|
# Not critical - title will be correct on next full cache refresh
|
||||||
|
logger.warning(
|
||||||
|
f"Failed to update title in cache for session {session_id}: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
"""Chat API routes for chat session management and streaming via SSE."""
|
"""Chat API routes for chat session management and streaming via SSE."""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import uuid as uuid_module
|
import uuid as uuid_module
|
||||||
from collections.abc import AsyncGenerator
|
from collections.abc import AsyncGenerator
|
||||||
@@ -16,8 +17,16 @@ from . import service as chat_service
|
|||||||
from . import stream_registry
|
from . import stream_registry
|
||||||
from .completion_handler import process_operation_failure, process_operation_success
|
from .completion_handler import process_operation_failure, process_operation_success
|
||||||
from .config import ChatConfig
|
from .config import ChatConfig
|
||||||
from .model import ChatSession, create_chat_session, get_chat_session, get_user_sessions
|
from .model import (
|
||||||
from .response_model import StreamFinish, StreamHeartbeat
|
ChatMessage,
|
||||||
|
ChatSession,
|
||||||
|
create_chat_session,
|
||||||
|
get_chat_session,
|
||||||
|
get_user_sessions,
|
||||||
|
upsert_chat_session,
|
||||||
|
)
|
||||||
|
from .response_model import StreamError, StreamFinish, StreamHeartbeat, StreamStart
|
||||||
|
from .sdk import service as sdk_service
|
||||||
from .tools.models import (
|
from .tools.models import (
|
||||||
AgentDetailsResponse,
|
AgentDetailsResponse,
|
||||||
AgentOutputResponse,
|
AgentOutputResponse,
|
||||||
@@ -40,6 +49,7 @@ from .tools.models import (
|
|||||||
SetupRequirementsResponse,
|
SetupRequirementsResponse,
|
||||||
UnderstandingUpdatedResponse,
|
UnderstandingUpdatedResponse,
|
||||||
)
|
)
|
||||||
|
from .tracking import track_user_message
|
||||||
|
|
||||||
config = ChatConfig()
|
config = ChatConfig()
|
||||||
|
|
||||||
@@ -231,6 +241,10 @@ async def get_session(
|
|||||||
active_task, last_message_id = await stream_registry.get_active_task_for_session(
|
active_task, last_message_id = await stream_registry.get_active_task_for_session(
|
||||||
session_id, user_id
|
session_id, user_id
|
||||||
)
|
)
|
||||||
|
logger.info(
|
||||||
|
f"[GET_SESSION] session={session_id}, active_task={active_task is not None}, "
|
||||||
|
f"msg_count={len(messages)}, last_role={messages[-1].get('role') if messages else 'none'}"
|
||||||
|
)
|
||||||
if active_task:
|
if active_task:
|
||||||
# Filter out the in-progress assistant message from the session response.
|
# Filter out the in-progress assistant message from the session response.
|
||||||
# The client will receive the complete assistant response through the SSE
|
# The client will receive the complete assistant response through the SSE
|
||||||
@@ -300,10 +314,9 @@ async def stream_chat_post(
|
|||||||
f"user={user_id}, message_len={len(request.message)}",
|
f"user={user_id}, message_len={len(request.message)}",
|
||||||
extra={"json_fields": log_meta},
|
extra={"json_fields": log_meta},
|
||||||
)
|
)
|
||||||
|
|
||||||
session = await _validate_and_get_session(session_id, user_id)
|
session = await _validate_and_get_session(session_id, user_id)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"[TIMING] session validated in {(time.perf_counter() - stream_start_time)*1000:.1f}ms",
|
f"[TIMING] session validated in {(time.perf_counter() - stream_start_time) * 1000:.1f}ms",
|
||||||
extra={
|
extra={
|
||||||
"json_fields": {
|
"json_fields": {
|
||||||
**log_meta,
|
**log_meta,
|
||||||
@@ -312,6 +325,28 @@ async def stream_chat_post(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Add user message to session BEFORE creating task to avoid race condition
|
||||||
|
# where GET_SESSION sees the task as "running" but the message isn't saved yet
|
||||||
|
if request.message:
|
||||||
|
session.messages.append(
|
||||||
|
ChatMessage(
|
||||||
|
role="user" if request.is_user_message else "assistant",
|
||||||
|
content=request.message,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if request.is_user_message:
|
||||||
|
track_user_message(
|
||||||
|
user_id=user_id,
|
||||||
|
session_id=session_id,
|
||||||
|
message_length=len(request.message),
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"[STREAM] Saving user message to session {session_id}, "
|
||||||
|
f"msg_count={len(session.messages)}"
|
||||||
|
)
|
||||||
|
session = await upsert_chat_session(session)
|
||||||
|
logger.info(f"[STREAM] User message saved for session {session_id}")
|
||||||
|
|
||||||
# Create a task in the stream registry for reconnection support
|
# Create a task in the stream registry for reconnection support
|
||||||
task_id = str(uuid_module.uuid4())
|
task_id = str(uuid_module.uuid4())
|
||||||
operation_id = str(uuid_module.uuid4())
|
operation_id = str(uuid_module.uuid4())
|
||||||
@@ -327,7 +362,7 @@ async def stream_chat_post(
|
|||||||
operation_id=operation_id,
|
operation_id=operation_id,
|
||||||
)
|
)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start)*1000:.1f}ms",
|
f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start) * 1000:.1f}ms",
|
||||||
extra={
|
extra={
|
||||||
"json_fields": {
|
"json_fields": {
|
||||||
**log_meta,
|
**log_meta,
|
||||||
@@ -348,15 +383,43 @@ async def stream_chat_post(
|
|||||||
first_chunk_time, ttfc = None, None
|
first_chunk_time, ttfc = None, None
|
||||||
chunk_count = 0
|
chunk_count = 0
|
||||||
try:
|
try:
|
||||||
async for chunk in chat_service.stream_chat_completion(
|
# Emit a start event with task_id for reconnection
|
||||||
|
start_chunk = StreamStart(messageId=task_id, taskId=task_id)
|
||||||
|
await stream_registry.publish_chunk(task_id, start_chunk)
|
||||||
|
logger.info(
|
||||||
|
f"[TIMING] StreamStart published at {(time_module.perf_counter() - gen_start_time) * 1000:.1f}ms",
|
||||||
|
extra={
|
||||||
|
"json_fields": {
|
||||||
|
**log_meta,
|
||||||
|
"elapsed_ms": (time_module.perf_counter() - gen_start_time)
|
||||||
|
* 1000,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Choose service based on configuration
|
||||||
|
use_sdk = config.use_claude_agent_sdk
|
||||||
|
stream_fn = (
|
||||||
|
sdk_service.stream_chat_completion_sdk
|
||||||
|
if use_sdk
|
||||||
|
else chat_service.stream_chat_completion
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"[TIMING] Calling {'sdk' if use_sdk else 'standard'} stream_chat_completion",
|
||||||
|
extra={"json_fields": log_meta},
|
||||||
|
)
|
||||||
|
# Pass message=None since we already added it to the session above
|
||||||
|
async for chunk in stream_fn(
|
||||||
session_id,
|
session_id,
|
||||||
request.message,
|
None, # Message already in session
|
||||||
is_user_message=request.is_user_message,
|
is_user_message=request.is_user_message,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
session=session, # Pass pre-fetched session to avoid double-fetch
|
session=session, # Pass session with message already added
|
||||||
context=request.context,
|
context=request.context,
|
||||||
_task_id=task_id, # Pass task_id so service emits start with taskId for reconnection
|
|
||||||
):
|
):
|
||||||
|
# Skip duplicate StreamStart — we already published one above
|
||||||
|
if isinstance(chunk, StreamStart):
|
||||||
|
continue
|
||||||
chunk_count += 1
|
chunk_count += 1
|
||||||
if first_chunk_time is None:
|
if first_chunk_time is None:
|
||||||
first_chunk_time = time_module.perf_counter()
|
first_chunk_time = time_module.perf_counter()
|
||||||
@@ -377,7 +440,7 @@ async def stream_chat_post(
|
|||||||
gen_end_time = time_module.perf_counter()
|
gen_end_time = time_module.perf_counter()
|
||||||
total_time = (gen_end_time - gen_start_time) * 1000
|
total_time = (gen_end_time - gen_start_time) * 1000
|
||||||
logger.info(
|
logger.info(
|
||||||
f"[TIMING] run_ai_generation FINISHED in {total_time/1000:.1f}s; "
|
f"[TIMING] run_ai_generation FINISHED in {total_time / 1000:.1f}s; "
|
||||||
f"task={task_id}, session={session_id}, "
|
f"task={task_id}, session={session_id}, "
|
||||||
f"ttfc={ttfc or -1:.2f}s, n_chunks={chunk_count}",
|
f"ttfc={ttfc or -1:.2f}s, n_chunks={chunk_count}",
|
||||||
extra={
|
extra={
|
||||||
@@ -404,6 +467,17 @@ async def stream_chat_post(
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
# Publish a StreamError so the frontend can display an error message
|
||||||
|
try:
|
||||||
|
await stream_registry.publish_chunk(
|
||||||
|
task_id,
|
||||||
|
StreamError(
|
||||||
|
errorText="An error occurred. Please try again.",
|
||||||
|
code="stream_error",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
pass # Best-effort; mark_task_completed will publish StreamFinish
|
||||||
await stream_registry.mark_task_completed(task_id, "failed")
|
await stream_registry.mark_task_completed(task_id, "failed")
|
||||||
|
|
||||||
# Start the AI generation in a background task
|
# Start the AI generation in a background task
|
||||||
@@ -506,8 +580,14 @@ async def stream_chat_post(
|
|||||||
"json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}
|
"json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
# Surface error to frontend so it doesn't appear stuck
|
||||||
|
yield StreamError(
|
||||||
|
errorText="An error occurred. Please try again.",
|
||||||
|
code="stream_error",
|
||||||
|
).to_sse()
|
||||||
|
yield StreamFinish().to_sse()
|
||||||
finally:
|
finally:
|
||||||
# Unsubscribe when client disconnects or stream ends to prevent resource leak
|
# Unsubscribe when client disconnects or stream ends
|
||||||
if subscriber_queue is not None:
|
if subscriber_queue is not None:
|
||||||
try:
|
try:
|
||||||
await stream_registry.unsubscribe_from_task(
|
await stream_registry.unsubscribe_from_task(
|
||||||
@@ -751,8 +831,6 @@ async def stream_task(
|
|||||||
)
|
)
|
||||||
|
|
||||||
async def event_generator() -> AsyncGenerator[str, None]:
|
async def event_generator() -> AsyncGenerator[str, None]:
|
||||||
import asyncio
|
|
||||||
|
|
||||||
heartbeat_interval = 15.0 # Send heartbeat every 15 seconds
|
heartbeat_interval = 15.0 # Send heartbeat every 15 seconds
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
|
|||||||
@@ -0,0 +1,14 @@
|
|||||||
|
"""Claude Agent SDK integration for CoPilot.
|
||||||
|
|
||||||
|
This module provides the integration layer between the Claude Agent SDK
|
||||||
|
and the existing CoPilot tool system, enabling drop-in replacement of
|
||||||
|
the current LLM orchestration with the battle-tested Claude Agent SDK.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .service import stream_chat_completion_sdk
|
||||||
|
from .tool_adapter import create_copilot_mcp_server
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"stream_chat_completion_sdk",
|
||||||
|
"create_copilot_mcp_server",
|
||||||
|
]
|
||||||
@@ -0,0 +1,354 @@
|
|||||||
|
"""Anthropic SDK fallback implementation.
|
||||||
|
|
||||||
|
This module provides the fallback streaming implementation using the Anthropic SDK
|
||||||
|
directly when the Claude Agent SDK is not available.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import uuid
|
||||||
|
from collections.abc import AsyncGenerator
|
||||||
|
from typing import Any, cast
|
||||||
|
|
||||||
|
from ..config import ChatConfig
|
||||||
|
from ..model import ChatMessage, ChatSession
|
||||||
|
from ..response_model import (
|
||||||
|
StreamBaseResponse,
|
||||||
|
StreamError,
|
||||||
|
StreamFinish,
|
||||||
|
StreamTextDelta,
|
||||||
|
StreamTextEnd,
|
||||||
|
StreamTextStart,
|
||||||
|
StreamToolInputAvailable,
|
||||||
|
StreamToolInputStart,
|
||||||
|
StreamToolOutputAvailable,
|
||||||
|
StreamUsage,
|
||||||
|
)
|
||||||
|
from .tool_adapter import get_tool_definitions, get_tool_handlers
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
config = ChatConfig()
|
||||||
|
|
||||||
|
# Maximum tool-call iterations before stopping to prevent infinite loops
|
||||||
|
_MAX_TOOL_ITERATIONS = 10
|
||||||
|
|
||||||
|
|
||||||
|
async def stream_with_anthropic(
|
||||||
|
session: ChatSession,
|
||||||
|
system_prompt: str,
|
||||||
|
text_block_id: str,
|
||||||
|
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||||
|
"""Stream using Anthropic SDK directly with tool calling support.
|
||||||
|
|
||||||
|
This function accumulates messages into the session for persistence.
|
||||||
|
The caller should NOT yield an additional StreamFinish - this function handles it.
|
||||||
|
"""
|
||||||
|
import anthropic
|
||||||
|
|
||||||
|
# Only use ANTHROPIC_API_KEY - don't fall back to OpenRouter keys
|
||||||
|
api_key = os.getenv("ANTHROPIC_API_KEY")
|
||||||
|
if not api_key:
|
||||||
|
yield StreamError(
|
||||||
|
errorText="ANTHROPIC_API_KEY not configured for fallback",
|
||||||
|
code="config_error",
|
||||||
|
)
|
||||||
|
yield StreamFinish()
|
||||||
|
return
|
||||||
|
|
||||||
|
client = anthropic.AsyncAnthropic(api_key=api_key)
|
||||||
|
tool_definitions = get_tool_definitions()
|
||||||
|
tool_handlers = get_tool_handlers()
|
||||||
|
|
||||||
|
anthropic_tools = [
|
||||||
|
{
|
||||||
|
"name": t["name"],
|
||||||
|
"description": t["description"],
|
||||||
|
"input_schema": t["inputSchema"],
|
||||||
|
}
|
||||||
|
for t in tool_definitions
|
||||||
|
]
|
||||||
|
|
||||||
|
anthropic_messages = _convert_session_to_anthropic(session)
|
||||||
|
|
||||||
|
if not anthropic_messages or anthropic_messages[-1]["role"] != "user":
|
||||||
|
anthropic_messages.append(
|
||||||
|
{"role": "user", "content": "Continue with the task."}
|
||||||
|
)
|
||||||
|
|
||||||
|
has_started_text = False
|
||||||
|
accumulated_text = ""
|
||||||
|
accumulated_tool_calls: list[dict[str, Any]] = []
|
||||||
|
|
||||||
|
for _ in range(_MAX_TOOL_ITERATIONS):
|
||||||
|
try:
|
||||||
|
async with client.messages.stream(
|
||||||
|
model=(
|
||||||
|
config.model.split("/")[-1] if "/" in config.model else config.model
|
||||||
|
),
|
||||||
|
max_tokens=4096,
|
||||||
|
system=system_prompt,
|
||||||
|
messages=cast(Any, anthropic_messages),
|
||||||
|
tools=cast(Any, anthropic_tools) if anthropic_tools else [],
|
||||||
|
) as stream:
|
||||||
|
async for event in stream:
|
||||||
|
if event.type == "content_block_start":
|
||||||
|
block = event.content_block
|
||||||
|
if hasattr(block, "type"):
|
||||||
|
if block.type == "text" and not has_started_text:
|
||||||
|
yield StreamTextStart(id=text_block_id)
|
||||||
|
has_started_text = True
|
||||||
|
elif block.type == "tool_use":
|
||||||
|
yield StreamToolInputStart(
|
||||||
|
toolCallId=block.id, toolName=block.name
|
||||||
|
)
|
||||||
|
|
||||||
|
elif event.type == "content_block_delta":
|
||||||
|
delta = event.delta
|
||||||
|
if hasattr(delta, "type") and delta.type == "text_delta":
|
||||||
|
accumulated_text += delta.text
|
||||||
|
yield StreamTextDelta(id=text_block_id, delta=delta.text)
|
||||||
|
|
||||||
|
final_message = await stream.get_final_message()
|
||||||
|
|
||||||
|
if final_message.stop_reason == "tool_use":
|
||||||
|
if has_started_text:
|
||||||
|
yield StreamTextEnd(id=text_block_id)
|
||||||
|
has_started_text = False
|
||||||
|
text_block_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
tool_results = []
|
||||||
|
assistant_content: list[dict[str, Any]] = []
|
||||||
|
|
||||||
|
for block in final_message.content:
|
||||||
|
if block.type == "text":
|
||||||
|
assistant_content.append(
|
||||||
|
{"type": "text", "text": block.text}
|
||||||
|
)
|
||||||
|
elif block.type == "tool_use":
|
||||||
|
assistant_content.append(
|
||||||
|
{
|
||||||
|
"type": "tool_use",
|
||||||
|
"id": block.id,
|
||||||
|
"name": block.name,
|
||||||
|
"input": block.input,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Track tool call for session persistence
|
||||||
|
accumulated_tool_calls.append(
|
||||||
|
{
|
||||||
|
"id": block.id,
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": block.name,
|
||||||
|
"arguments": json.dumps(
|
||||||
|
block.input
|
||||||
|
if isinstance(block.input, dict)
|
||||||
|
else {}
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
yield StreamToolInputAvailable(
|
||||||
|
toolCallId=block.id,
|
||||||
|
toolName=block.name,
|
||||||
|
input=(
|
||||||
|
block.input if isinstance(block.input, dict) else {}
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
output, is_error = await _execute_tool(
|
||||||
|
block.name, block.input, tool_handlers
|
||||||
|
)
|
||||||
|
|
||||||
|
yield StreamToolOutputAvailable(
|
||||||
|
toolCallId=block.id,
|
||||||
|
toolName=block.name,
|
||||||
|
output=output,
|
||||||
|
success=not is_error,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Save tool result to session
|
||||||
|
session.messages.append(
|
||||||
|
ChatMessage(
|
||||||
|
role="tool",
|
||||||
|
content=output,
|
||||||
|
tool_call_id=block.id,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
tool_results.append(
|
||||||
|
{
|
||||||
|
"type": "tool_result",
|
||||||
|
"tool_use_id": block.id,
|
||||||
|
"content": output,
|
||||||
|
"is_error": is_error,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Save assistant message with tool calls to session
|
||||||
|
session.messages.append(
|
||||||
|
ChatMessage(
|
||||||
|
role="assistant",
|
||||||
|
content=accumulated_text or None,
|
||||||
|
tool_calls=(
|
||||||
|
accumulated_tool_calls
|
||||||
|
if accumulated_tool_calls
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# Reset for next iteration
|
||||||
|
accumulated_text = ""
|
||||||
|
accumulated_tool_calls = []
|
||||||
|
|
||||||
|
anthropic_messages.append(
|
||||||
|
{"role": "assistant", "content": assistant_content}
|
||||||
|
)
|
||||||
|
anthropic_messages.append({"role": "user", "content": tool_results})
|
||||||
|
continue
|
||||||
|
|
||||||
|
else:
|
||||||
|
if has_started_text:
|
||||||
|
yield StreamTextEnd(id=text_block_id)
|
||||||
|
|
||||||
|
# Save final assistant response to session
|
||||||
|
if accumulated_text:
|
||||||
|
session.messages.append(
|
||||||
|
ChatMessage(role="assistant", content=accumulated_text)
|
||||||
|
)
|
||||||
|
|
||||||
|
yield StreamUsage(
|
||||||
|
promptTokens=final_message.usage.input_tokens,
|
||||||
|
completionTokens=final_message.usage.output_tokens,
|
||||||
|
totalTokens=final_message.usage.input_tokens
|
||||||
|
+ final_message.usage.output_tokens,
|
||||||
|
)
|
||||||
|
yield StreamFinish()
|
||||||
|
return
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"[Anthropic Fallback] Error: {e}", exc_info=True)
|
||||||
|
yield StreamError(
|
||||||
|
errorText="An error occurred. Please try again.",
|
||||||
|
code="anthropic_error",
|
||||||
|
)
|
||||||
|
yield StreamFinish()
|
||||||
|
return
|
||||||
|
|
||||||
|
yield StreamError(errorText="Max tool iterations reached", code="max_iterations")
|
||||||
|
yield StreamFinish()
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_session_to_anthropic(session: ChatSession) -> list[dict[str, Any]]:
|
||||||
|
"""Convert session messages to Anthropic format.
|
||||||
|
|
||||||
|
Handles merging consecutive same-role messages (Anthropic requires alternating roles).
|
||||||
|
"""
|
||||||
|
messages: list[dict[str, Any]] = []
|
||||||
|
|
||||||
|
for msg in session.messages:
|
||||||
|
if msg.role == "user":
|
||||||
|
new_msg = {"role": "user", "content": msg.content or ""}
|
||||||
|
elif msg.role == "assistant":
|
||||||
|
content: list[dict[str, Any]] = []
|
||||||
|
if msg.content:
|
||||||
|
content.append({"type": "text", "text": msg.content})
|
||||||
|
if msg.tool_calls:
|
||||||
|
for tc in msg.tool_calls:
|
||||||
|
func = tc.get("function", {})
|
||||||
|
args = func.get("arguments", {})
|
||||||
|
if isinstance(args, str):
|
||||||
|
try:
|
||||||
|
args = json.loads(args)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
args = {}
|
||||||
|
content.append(
|
||||||
|
{
|
||||||
|
"type": "tool_use",
|
||||||
|
"id": tc.get("id", str(uuid.uuid4())),
|
||||||
|
"name": func.get("name", ""),
|
||||||
|
"input": args,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if content:
|
||||||
|
new_msg = {"role": "assistant", "content": content}
|
||||||
|
else:
|
||||||
|
continue # Skip empty assistant messages
|
||||||
|
elif msg.role == "tool":
|
||||||
|
new_msg = {
|
||||||
|
"role": "user",
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "tool_result",
|
||||||
|
"tool_use_id": msg.tool_call_id or "",
|
||||||
|
"content": msg.content or "",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
messages.append(new_msg)
|
||||||
|
|
||||||
|
# Merge consecutive same-role messages (Anthropic requires alternating roles)
|
||||||
|
return _merge_consecutive_roles(messages)
|
||||||
|
|
||||||
|
|
||||||
|
def _merge_consecutive_roles(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||||
|
"""Merge consecutive messages with the same role.
|
||||||
|
|
||||||
|
Anthropic API requires alternating user/assistant roles.
|
||||||
|
"""
|
||||||
|
if not messages:
|
||||||
|
return []
|
||||||
|
|
||||||
|
merged: list[dict[str, Any]] = []
|
||||||
|
for msg in messages:
|
||||||
|
if merged and merged[-1]["role"] == msg["role"]:
|
||||||
|
# Merge with previous message
|
||||||
|
prev_content = merged[-1]["content"]
|
||||||
|
new_content = msg["content"]
|
||||||
|
|
||||||
|
# Normalize both to list-of-blocks form
|
||||||
|
if isinstance(prev_content, str):
|
||||||
|
prev_content = [{"type": "text", "text": prev_content}]
|
||||||
|
if isinstance(new_content, str):
|
||||||
|
new_content = [{"type": "text", "text": new_content}]
|
||||||
|
|
||||||
|
# Ensure both are lists
|
||||||
|
if not isinstance(prev_content, list):
|
||||||
|
prev_content = [prev_content]
|
||||||
|
if not isinstance(new_content, list):
|
||||||
|
new_content = [new_content]
|
||||||
|
|
||||||
|
merged[-1]["content"] = prev_content + new_content
|
||||||
|
else:
|
||||||
|
merged.append(msg)
|
||||||
|
|
||||||
|
return merged
|
||||||
|
|
||||||
|
|
||||||
|
async def _execute_tool(
|
||||||
|
tool_name: str, tool_input: Any, handlers: dict[str, Any]
|
||||||
|
) -> tuple[str, bool]:
|
||||||
|
"""Execute a tool and return (output, is_error)."""
|
||||||
|
handler = handlers.get(tool_name)
|
||||||
|
if not handler:
|
||||||
|
return f"Unknown tool: {tool_name}", True
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = await handler(tool_input)
|
||||||
|
# Safely extract output - handle empty or missing content
|
||||||
|
content = result.get("content") or []
|
||||||
|
if content and isinstance(content, list) and len(content) > 0:
|
||||||
|
first_item = content[0]
|
||||||
|
output = first_item.get("text", "") if isinstance(first_item, dict) else ""
|
||||||
|
else:
|
||||||
|
output = ""
|
||||||
|
is_error = result.get("isError", False)
|
||||||
|
return output, is_error
|
||||||
|
except Exception as e:
|
||||||
|
return f"Error: {str(e)}", True
|
||||||
@@ -0,0 +1,160 @@
|
|||||||
|
"""Response adapter for converting Claude Agent SDK messages to Vercel AI SDK format.
|
||||||
|
|
||||||
|
This module provides the adapter layer that converts streaming messages from
|
||||||
|
the Claude Agent SDK into the Vercel AI SDK UI Stream Protocol format that
|
||||||
|
the frontend expects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from claude_agent_sdk import (
|
||||||
|
AssistantMessage,
|
||||||
|
Message,
|
||||||
|
ResultMessage,
|
||||||
|
SystemMessage,
|
||||||
|
TextBlock,
|
||||||
|
ToolResultBlock,
|
||||||
|
ToolUseBlock,
|
||||||
|
UserMessage,
|
||||||
|
)
|
||||||
|
|
||||||
|
from backend.api.features.chat.response_model import (
|
||||||
|
StreamBaseResponse,
|
||||||
|
StreamError,
|
||||||
|
StreamFinish,
|
||||||
|
StreamStart,
|
||||||
|
StreamTextDelta,
|
||||||
|
StreamTextEnd,
|
||||||
|
StreamTextStart,
|
||||||
|
StreamToolInputAvailable,
|
||||||
|
StreamToolInputStart,
|
||||||
|
StreamToolOutputAvailable,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class SDKResponseAdapter:
|
||||||
|
"""Adapter for converting Claude Agent SDK messages to Vercel AI SDK format.
|
||||||
|
|
||||||
|
This class maintains state during a streaming session to properly track
|
||||||
|
text blocks, tool calls, and message lifecycle.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, message_id: str | None = None):
|
||||||
|
self.message_id = message_id or str(uuid.uuid4())
|
||||||
|
self.text_block_id = str(uuid.uuid4())
|
||||||
|
self.has_started_text = False
|
||||||
|
self.has_ended_text = False
|
||||||
|
self.current_tool_calls: dict[str, dict[str, str]] = {}
|
||||||
|
self.task_id: str | None = None
|
||||||
|
|
||||||
|
def set_task_id(self, task_id: str) -> None:
|
||||||
|
"""Set the task ID for reconnection support."""
|
||||||
|
self.task_id = task_id
|
||||||
|
|
||||||
|
def convert_message(self, sdk_message: Message) -> list[StreamBaseResponse]:
|
||||||
|
"""Convert a single SDK message to Vercel AI SDK format."""
|
||||||
|
responses: list[StreamBaseResponse] = []
|
||||||
|
|
||||||
|
if isinstance(sdk_message, SystemMessage):
|
||||||
|
if sdk_message.subtype == "init":
|
||||||
|
responses.append(
|
||||||
|
StreamStart(messageId=self.message_id, taskId=self.task_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
elif isinstance(sdk_message, AssistantMessage):
|
||||||
|
for block in sdk_message.content:
|
||||||
|
if isinstance(block, TextBlock):
|
||||||
|
if block.text:
|
||||||
|
self._ensure_text_started(responses)
|
||||||
|
responses.append(
|
||||||
|
StreamTextDelta(id=self.text_block_id, delta=block.text)
|
||||||
|
)
|
||||||
|
|
||||||
|
elif isinstance(block, ToolUseBlock):
|
||||||
|
self._end_text_if_open(responses)
|
||||||
|
|
||||||
|
responses.append(
|
||||||
|
StreamToolInputStart(toolCallId=block.id, toolName=block.name)
|
||||||
|
)
|
||||||
|
responses.append(
|
||||||
|
StreamToolInputAvailable(
|
||||||
|
toolCallId=block.id,
|
||||||
|
toolName=block.name,
|
||||||
|
input=block.input,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.current_tool_calls[block.id] = {"name": block.name}
|
||||||
|
|
||||||
|
elif isinstance(sdk_message, UserMessage):
|
||||||
|
# UserMessage carries tool results back from tool execution
|
||||||
|
content = sdk_message.content
|
||||||
|
blocks = content if isinstance(content, list) else []
|
||||||
|
for block in blocks:
|
||||||
|
if isinstance(block, ToolResultBlock) and block.tool_use_id:
|
||||||
|
tool_info = self.current_tool_calls.get(block.tool_use_id, {})
|
||||||
|
tool_name = tool_info.get("name", "unknown")
|
||||||
|
output = _extract_tool_output(block.content)
|
||||||
|
responses.append(
|
||||||
|
StreamToolOutputAvailable(
|
||||||
|
toolCallId=block.tool_use_id,
|
||||||
|
toolName=tool_name,
|
||||||
|
output=output,
|
||||||
|
success=not (block.is_error or False),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
elif isinstance(sdk_message, ResultMessage):
|
||||||
|
if sdk_message.subtype == "success":
|
||||||
|
self._end_text_if_open(responses)
|
||||||
|
responses.append(StreamFinish())
|
||||||
|
|
||||||
|
elif sdk_message.subtype in ("error", "error_during_execution"):
|
||||||
|
error_msg = getattr(sdk_message, "result", None) or "Unknown error"
|
||||||
|
responses.append(
|
||||||
|
StreamError(errorText=str(error_msg), code="sdk_error")
|
||||||
|
)
|
||||||
|
responses.append(StreamFinish())
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.debug(f"Unhandled SDK message type: {type(sdk_message).__name__}")
|
||||||
|
|
||||||
|
return responses
|
||||||
|
|
||||||
|
def _ensure_text_started(self, responses: list[StreamBaseResponse]) -> None:
|
||||||
|
"""Start (or restart) a text block if needed."""
|
||||||
|
if not self.has_started_text or self.has_ended_text:
|
||||||
|
if self.has_ended_text:
|
||||||
|
self.text_block_id = str(uuid.uuid4())
|
||||||
|
self.has_ended_text = False
|
||||||
|
responses.append(StreamTextStart(id=self.text_block_id))
|
||||||
|
self.has_started_text = True
|
||||||
|
|
||||||
|
def _end_text_if_open(self, responses: list[StreamBaseResponse]) -> None:
|
||||||
|
"""End the current text block if one is open."""
|
||||||
|
if self.has_started_text and not self.has_ended_text:
|
||||||
|
responses.append(StreamTextEnd(id=self.text_block_id))
|
||||||
|
self.has_ended_text = True
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_tool_output(content: str | list[dict[str, str]] | None) -> str:
|
||||||
|
"""Extract a string output from a ToolResultBlock's content field."""
|
||||||
|
if isinstance(content, str):
|
||||||
|
return content
|
||||||
|
if isinstance(content, list):
|
||||||
|
parts = [item.get("text", "") for item in content if item.get("type") == "text"]
|
||||||
|
if parts:
|
||||||
|
return "".join(parts)
|
||||||
|
try:
|
||||||
|
return json.dumps(content)
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
return str(content)
|
||||||
|
if content is None:
|
||||||
|
return ""
|
||||||
|
try:
|
||||||
|
return json.dumps(content)
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
return str(content)
|
||||||
@@ -0,0 +1,324 @@
|
|||||||
|
"""Unit tests for the SDK response adapter."""
|
||||||
|
|
||||||
|
from claude_agent_sdk import (
|
||||||
|
AssistantMessage,
|
||||||
|
ResultMessage,
|
||||||
|
SystemMessage,
|
||||||
|
TextBlock,
|
||||||
|
ToolResultBlock,
|
||||||
|
ToolUseBlock,
|
||||||
|
UserMessage,
|
||||||
|
)
|
||||||
|
|
||||||
|
from backend.api.features.chat.response_model import (
|
||||||
|
StreamBaseResponse,
|
||||||
|
StreamError,
|
||||||
|
StreamFinish,
|
||||||
|
StreamStart,
|
||||||
|
StreamTextDelta,
|
||||||
|
StreamTextEnd,
|
||||||
|
StreamTextStart,
|
||||||
|
StreamToolInputAvailable,
|
||||||
|
StreamToolInputStart,
|
||||||
|
StreamToolOutputAvailable,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .response_adapter import SDKResponseAdapter
|
||||||
|
|
||||||
|
|
||||||
|
def _adapter() -> SDKResponseAdapter:
|
||||||
|
a = SDKResponseAdapter(message_id="msg-1")
|
||||||
|
a.set_task_id("task-1")
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
# -- SystemMessage -----------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_system_init_emits_start():
|
||||||
|
adapter = _adapter()
|
||||||
|
results = adapter.convert_message(SystemMessage(subtype="init", data={}))
|
||||||
|
assert len(results) == 1
|
||||||
|
assert isinstance(results[0], StreamStart)
|
||||||
|
assert results[0].messageId == "msg-1"
|
||||||
|
assert results[0].taskId == "task-1"
|
||||||
|
|
||||||
|
|
||||||
|
def test_system_non_init_emits_nothing():
|
||||||
|
adapter = _adapter()
|
||||||
|
results = adapter.convert_message(SystemMessage(subtype="other", data={}))
|
||||||
|
assert results == []
|
||||||
|
|
||||||
|
|
||||||
|
# -- AssistantMessage with TextBlock -----------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_text_block_emits_start_and_delta():
|
||||||
|
adapter = _adapter()
|
||||||
|
msg = AssistantMessage(content=[TextBlock(text="hello")], model="test")
|
||||||
|
results = adapter.convert_message(msg)
|
||||||
|
assert len(results) == 2
|
||||||
|
assert isinstance(results[0], StreamTextStart)
|
||||||
|
assert isinstance(results[1], StreamTextDelta)
|
||||||
|
assert results[1].delta == "hello"
|
||||||
|
|
||||||
|
|
||||||
|
def test_empty_text_block_is_skipped():
|
||||||
|
adapter = _adapter()
|
||||||
|
msg = AssistantMessage(content=[TextBlock(text="")], model="test")
|
||||||
|
results = adapter.convert_message(msg)
|
||||||
|
assert results == []
|
||||||
|
|
||||||
|
|
||||||
|
def test_multiple_text_deltas_reuse_block_id():
|
||||||
|
adapter = _adapter()
|
||||||
|
msg1 = AssistantMessage(content=[TextBlock(text="a")], model="test")
|
||||||
|
msg2 = AssistantMessage(content=[TextBlock(text="b")], model="test")
|
||||||
|
r1 = adapter.convert_message(msg1)
|
||||||
|
r2 = adapter.convert_message(msg2)
|
||||||
|
# First gets start+delta, second only delta (block already started)
|
||||||
|
assert len(r1) == 2
|
||||||
|
assert len(r2) == 1
|
||||||
|
assert isinstance(r2[0], StreamTextDelta)
|
||||||
|
assert isinstance(r1[0], StreamTextStart)
|
||||||
|
assert r1[0].id == r2[0].id # same block ID
|
||||||
|
|
||||||
|
|
||||||
|
# -- AssistantMessage with ToolUseBlock --------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_tool_use_emits_input_start_and_available():
|
||||||
|
adapter = _adapter()
|
||||||
|
msg = AssistantMessage(
|
||||||
|
content=[ToolUseBlock(id="tool-1", name="find_agent", input={"q": "x"})],
|
||||||
|
model="test",
|
||||||
|
)
|
||||||
|
results = adapter.convert_message(msg)
|
||||||
|
assert len(results) == 2
|
||||||
|
assert isinstance(results[0], StreamToolInputStart)
|
||||||
|
assert results[0].toolCallId == "tool-1"
|
||||||
|
assert results[0].toolName == "find_agent"
|
||||||
|
assert isinstance(results[1], StreamToolInputAvailable)
|
||||||
|
assert results[1].input == {"q": "x"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_text_then_tool_ends_text_block():
|
||||||
|
adapter = _adapter()
|
||||||
|
text_msg = AssistantMessage(content=[TextBlock(text="thinking...")], model="test")
|
||||||
|
tool_msg = AssistantMessage(
|
||||||
|
content=[ToolUseBlock(id="t1", name="tool", input={})], model="test"
|
||||||
|
)
|
||||||
|
adapter.convert_message(text_msg)
|
||||||
|
results = adapter.convert_message(tool_msg)
|
||||||
|
# Should have: TextEnd, ToolInputStart, ToolInputAvailable
|
||||||
|
assert len(results) == 3
|
||||||
|
assert isinstance(results[0], StreamTextEnd)
|
||||||
|
assert isinstance(results[1], StreamToolInputStart)
|
||||||
|
|
||||||
|
|
||||||
|
# -- UserMessage with ToolResultBlock ----------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_tool_result_emits_output():
|
||||||
|
adapter = _adapter()
|
||||||
|
# First register the tool call
|
||||||
|
tool_msg = AssistantMessage(
|
||||||
|
content=[ToolUseBlock(id="t1", name="find_agent", input={})], model="test"
|
||||||
|
)
|
||||||
|
adapter.convert_message(tool_msg)
|
||||||
|
|
||||||
|
# Now send tool result
|
||||||
|
result_msg = UserMessage(
|
||||||
|
content=[ToolResultBlock(tool_use_id="t1", content="found 3 agents")]
|
||||||
|
)
|
||||||
|
results = adapter.convert_message(result_msg)
|
||||||
|
assert len(results) == 1
|
||||||
|
assert isinstance(results[0], StreamToolOutputAvailable)
|
||||||
|
assert results[0].toolCallId == "t1"
|
||||||
|
assert results[0].toolName == "find_agent"
|
||||||
|
assert results[0].output == "found 3 agents"
|
||||||
|
assert results[0].success is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_tool_result_error():
|
||||||
|
adapter = _adapter()
|
||||||
|
adapter.convert_message(
|
||||||
|
AssistantMessage(
|
||||||
|
content=[ToolUseBlock(id="t1", name="run_agent", input={})], model="test"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
result_msg = UserMessage(
|
||||||
|
content=[ToolResultBlock(tool_use_id="t1", content="timeout", is_error=True)]
|
||||||
|
)
|
||||||
|
results = adapter.convert_message(result_msg)
|
||||||
|
assert isinstance(results[0], StreamToolOutputAvailable)
|
||||||
|
assert results[0].success is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_tool_result_list_content():
|
||||||
|
adapter = _adapter()
|
||||||
|
adapter.convert_message(
|
||||||
|
AssistantMessage(
|
||||||
|
content=[ToolUseBlock(id="t1", name="tool", input={})], model="test"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
result_msg = UserMessage(
|
||||||
|
content=[
|
||||||
|
ToolResultBlock(
|
||||||
|
tool_use_id="t1",
|
||||||
|
content=[
|
||||||
|
{"type": "text", "text": "line1"},
|
||||||
|
{"type": "text", "text": "line2"},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
results = adapter.convert_message(result_msg)
|
||||||
|
assert isinstance(results[0], StreamToolOutputAvailable)
|
||||||
|
assert results[0].output == "line1line2"
|
||||||
|
|
||||||
|
|
||||||
|
def test_string_user_message_ignored():
|
||||||
|
"""A plain string UserMessage (not tool results) produces no output."""
|
||||||
|
adapter = _adapter()
|
||||||
|
results = adapter.convert_message(UserMessage(content="hello"))
|
||||||
|
assert results == []
|
||||||
|
|
||||||
|
|
||||||
|
# -- ResultMessage -----------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_result_success_emits_finish():
|
||||||
|
adapter = _adapter()
|
||||||
|
# Start some text first
|
||||||
|
adapter.convert_message(
|
||||||
|
AssistantMessage(content=[TextBlock(text="done")], model="test")
|
||||||
|
)
|
||||||
|
msg = ResultMessage(
|
||||||
|
subtype="success",
|
||||||
|
duration_ms=100,
|
||||||
|
duration_api_ms=50,
|
||||||
|
is_error=False,
|
||||||
|
num_turns=1,
|
||||||
|
session_id="s1",
|
||||||
|
)
|
||||||
|
results = adapter.convert_message(msg)
|
||||||
|
# TextEnd + StreamFinish
|
||||||
|
assert len(results) == 2
|
||||||
|
assert isinstance(results[0], StreamTextEnd)
|
||||||
|
assert isinstance(results[1], StreamFinish)
|
||||||
|
|
||||||
|
|
||||||
|
def test_result_error_emits_error_and_finish():
|
||||||
|
adapter = _adapter()
|
||||||
|
msg = ResultMessage(
|
||||||
|
subtype="error",
|
||||||
|
duration_ms=100,
|
||||||
|
duration_api_ms=50,
|
||||||
|
is_error=True,
|
||||||
|
num_turns=0,
|
||||||
|
session_id="s1",
|
||||||
|
result="API rate limited",
|
||||||
|
)
|
||||||
|
results = adapter.convert_message(msg)
|
||||||
|
assert len(results) == 2
|
||||||
|
assert isinstance(results[0], StreamError)
|
||||||
|
assert "API rate limited" in results[0].errorText
|
||||||
|
assert isinstance(results[1], StreamFinish)
|
||||||
|
|
||||||
|
|
||||||
|
# -- Text after tools (new block ID) ----------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_text_after_tool_gets_new_block_id():
|
||||||
|
adapter = _adapter()
|
||||||
|
# Text -> Tool -> Text should get a new text block ID
|
||||||
|
adapter.convert_message(
|
||||||
|
AssistantMessage(content=[TextBlock(text="before")], model="test")
|
||||||
|
)
|
||||||
|
adapter.convert_message(
|
||||||
|
AssistantMessage(
|
||||||
|
content=[ToolUseBlock(id="t1", name="tool", input={})], model="test"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
results = adapter.convert_message(
|
||||||
|
AssistantMessage(content=[TextBlock(text="after")], model="test")
|
||||||
|
)
|
||||||
|
# Should get StreamTextStart (new block) + StreamTextDelta
|
||||||
|
assert len(results) == 2
|
||||||
|
assert isinstance(results[0], StreamTextStart)
|
||||||
|
assert isinstance(results[1], StreamTextDelta)
|
||||||
|
assert results[1].delta == "after"
|
||||||
|
|
||||||
|
|
||||||
|
# -- Full conversation flow --------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_full_conversation_flow():
|
||||||
|
"""Simulate a complete conversation: init -> text -> tool -> result -> text -> finish."""
|
||||||
|
adapter = _adapter()
|
||||||
|
all_responses: list[StreamBaseResponse] = []
|
||||||
|
|
||||||
|
# 1. Init
|
||||||
|
all_responses.extend(
|
||||||
|
adapter.convert_message(SystemMessage(subtype="init", data={}))
|
||||||
|
)
|
||||||
|
# 2. Assistant text
|
||||||
|
all_responses.extend(
|
||||||
|
adapter.convert_message(
|
||||||
|
AssistantMessage(content=[TextBlock(text="Let me search")], model="test")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# 3. Tool use
|
||||||
|
all_responses.extend(
|
||||||
|
adapter.convert_message(
|
||||||
|
AssistantMessage(
|
||||||
|
content=[
|
||||||
|
ToolUseBlock(id="t1", name="find_agent", input={"query": "email"})
|
||||||
|
],
|
||||||
|
model="test",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# 4. Tool result
|
||||||
|
all_responses.extend(
|
||||||
|
adapter.convert_message(
|
||||||
|
UserMessage(
|
||||||
|
content=[ToolResultBlock(tool_use_id="t1", content="Found 2 agents")]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# 5. More text
|
||||||
|
all_responses.extend(
|
||||||
|
adapter.convert_message(
|
||||||
|
AssistantMessage(content=[TextBlock(text="I found 2")], model="test")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# 6. Result
|
||||||
|
all_responses.extend(
|
||||||
|
adapter.convert_message(
|
||||||
|
ResultMessage(
|
||||||
|
subtype="success",
|
||||||
|
duration_ms=500,
|
||||||
|
duration_api_ms=400,
|
||||||
|
is_error=False,
|
||||||
|
num_turns=2,
|
||||||
|
session_id="s1",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
types = [type(r).__name__ for r in all_responses]
|
||||||
|
assert types == [
|
||||||
|
"StreamStart",
|
||||||
|
"StreamTextStart",
|
||||||
|
"StreamTextDelta", # "Let me search"
|
||||||
|
"StreamTextEnd", # closed before tool
|
||||||
|
"StreamToolInputStart",
|
||||||
|
"StreamToolInputAvailable",
|
||||||
|
"StreamToolOutputAvailable", # tool result
|
||||||
|
"StreamTextStart", # new block after tool
|
||||||
|
"StreamTextDelta", # "I found 2"
|
||||||
|
"StreamTextEnd", # closed by result
|
||||||
|
"StreamFinish",
|
||||||
|
]
|
||||||
@@ -0,0 +1,212 @@
|
|||||||
|
"""Security hooks for Claude Agent SDK integration.
|
||||||
|
|
||||||
|
This module provides security hooks that validate tool calls before execution,
|
||||||
|
ensuring multi-user isolation and preventing unauthorized operations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from typing import Any, cast
|
||||||
|
|
||||||
|
from backend.api.features.chat.sdk.tool_adapter import MCP_TOOL_PREFIX
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Tools that are blocked entirely (CLI/system access)
|
||||||
|
BLOCKED_TOOLS = {
|
||||||
|
"Bash",
|
||||||
|
"bash",
|
||||||
|
"shell",
|
||||||
|
"exec",
|
||||||
|
"terminal",
|
||||||
|
"command",
|
||||||
|
"Read", # Block raw file read - use workspace tools instead
|
||||||
|
"Write", # Block raw file write - use workspace tools instead
|
||||||
|
"Edit", # Block raw file edit - use workspace tools instead
|
||||||
|
"Glob", # Block raw file glob - use workspace tools instead
|
||||||
|
"Grep", # Block raw file grep - use workspace tools instead
|
||||||
|
}
|
||||||
|
|
||||||
|
# Dangerous patterns in tool inputs
|
||||||
|
DANGEROUS_PATTERNS = [
|
||||||
|
r"sudo",
|
||||||
|
r"rm\s+-rf",
|
||||||
|
r"dd\s+if=",
|
||||||
|
r"/etc/passwd",
|
||||||
|
r"/etc/shadow",
|
||||||
|
r"chmod\s+777",
|
||||||
|
r"curl\s+.*\|.*sh",
|
||||||
|
r"wget\s+.*\|.*sh",
|
||||||
|
r"eval\s*\(",
|
||||||
|
r"exec\s*\(",
|
||||||
|
r"__import__",
|
||||||
|
r"os\.system",
|
||||||
|
r"subprocess",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_tool_access(tool_name: str, tool_input: dict[str, Any]) -> dict[str, Any]:
|
||||||
|
"""Validate that a tool call is allowed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Empty dict to allow, or dict with hookSpecificOutput to deny
|
||||||
|
"""
|
||||||
|
# Block forbidden tools
|
||||||
|
if tool_name in BLOCKED_TOOLS:
|
||||||
|
logger.warning(f"Blocked tool access attempt: {tool_name}")
|
||||||
|
return {
|
||||||
|
"hookSpecificOutput": {
|
||||||
|
"hookEventName": "PreToolUse",
|
||||||
|
"permissionDecision": "deny",
|
||||||
|
"permissionDecisionReason": (
|
||||||
|
f"Tool '{tool_name}' is not available. "
|
||||||
|
"Use the CoPilot-specific tools instead."
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check for dangerous patterns in tool input
|
||||||
|
input_str = str(tool_input)
|
||||||
|
|
||||||
|
for pattern in DANGEROUS_PATTERNS:
|
||||||
|
if re.search(pattern, input_str, re.IGNORECASE):
|
||||||
|
logger.warning(
|
||||||
|
f"Blocked dangerous pattern in tool input: {pattern} in {tool_name}"
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"hookSpecificOutput": {
|
||||||
|
"hookEventName": "PreToolUse",
|
||||||
|
"permissionDecision": "deny",
|
||||||
|
"permissionDecisionReason": "Input contains blocked pattern",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_user_isolation(
|
||||||
|
tool_name: str, tool_input: dict[str, Any], user_id: str | None
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Validate that tool calls respect user isolation."""
|
||||||
|
# For workspace file tools, ensure path doesn't escape
|
||||||
|
if "workspace" in tool_name.lower():
|
||||||
|
path = tool_input.get("path", "") or tool_input.get("file_path", "")
|
||||||
|
if path:
|
||||||
|
# Check for path traversal
|
||||||
|
if ".." in path or path.startswith("/"):
|
||||||
|
logger.warning(
|
||||||
|
f"Blocked path traversal attempt: {path} by user {user_id}"
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"hookSpecificOutput": {
|
||||||
|
"hookEventName": "PreToolUse",
|
||||||
|
"permissionDecision": "deny",
|
||||||
|
"permissionDecisionReason": "Path traversal not allowed",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def create_security_hooks(user_id: str | None) -> dict[str, Any]:
|
||||||
|
"""Create the security hooks configuration for Claude Agent SDK.
|
||||||
|
|
||||||
|
Includes security validation and observability hooks:
|
||||||
|
- PreToolUse: Security validation before tool execution
|
||||||
|
- PostToolUse: Log successful tool executions
|
||||||
|
- PostToolUseFailure: Log and handle failed tool executions
|
||||||
|
- PreCompact: Log context compaction events (SDK handles compaction automatically)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id: Current user ID for isolation validation
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Hooks configuration dict for ClaudeAgentOptions
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from claude_agent_sdk import HookMatcher
|
||||||
|
from claude_agent_sdk.types import HookContext, HookInput, SyncHookJSONOutput
|
||||||
|
|
||||||
|
async def pre_tool_use_hook(
|
||||||
|
input_data: HookInput,
|
||||||
|
tool_use_id: str | None,
|
||||||
|
context: HookContext,
|
||||||
|
) -> SyncHookJSONOutput:
|
||||||
|
"""Combined pre-tool-use validation hook."""
|
||||||
|
_ = context # unused but required by signature
|
||||||
|
tool_name = cast(str, input_data.get("tool_name", ""))
|
||||||
|
tool_input = cast(dict[str, Any], input_data.get("tool_input", {}))
|
||||||
|
|
||||||
|
# Strip MCP prefix for consistent validation
|
||||||
|
is_copilot_tool = tool_name.startswith(MCP_TOOL_PREFIX)
|
||||||
|
clean_name = tool_name.removeprefix(MCP_TOOL_PREFIX)
|
||||||
|
|
||||||
|
# Only block non-CoPilot tools; our MCP-registered tools
|
||||||
|
# (including Read for oversized results) are already sandboxed.
|
||||||
|
if not is_copilot_tool:
|
||||||
|
result = _validate_tool_access(clean_name, tool_input)
|
||||||
|
if result:
|
||||||
|
return cast(SyncHookJSONOutput, result)
|
||||||
|
|
||||||
|
# Validate user isolation
|
||||||
|
result = _validate_user_isolation(clean_name, tool_input, user_id)
|
||||||
|
if result:
|
||||||
|
return cast(SyncHookJSONOutput, result)
|
||||||
|
|
||||||
|
logger.debug(f"[SDK] Tool start: {tool_name}, user={user_id}")
|
||||||
|
return cast(SyncHookJSONOutput, {})
|
||||||
|
|
||||||
|
async def post_tool_use_hook(
|
||||||
|
input_data: HookInput,
|
||||||
|
tool_use_id: str | None,
|
||||||
|
context: HookContext,
|
||||||
|
) -> SyncHookJSONOutput:
|
||||||
|
"""Log successful tool executions for observability."""
|
||||||
|
_ = context
|
||||||
|
tool_name = cast(str, input_data.get("tool_name", ""))
|
||||||
|
logger.debug(f"[SDK] Tool success: {tool_name}, tool_use_id={tool_use_id}")
|
||||||
|
return cast(SyncHookJSONOutput, {})
|
||||||
|
|
||||||
|
async def post_tool_failure_hook(
|
||||||
|
input_data: HookInput,
|
||||||
|
tool_use_id: str | None,
|
||||||
|
context: HookContext,
|
||||||
|
) -> SyncHookJSONOutput:
|
||||||
|
"""Log failed tool executions for debugging."""
|
||||||
|
_ = context
|
||||||
|
tool_name = cast(str, input_data.get("tool_name", ""))
|
||||||
|
error = input_data.get("error", "Unknown error")
|
||||||
|
logger.warning(
|
||||||
|
f"[SDK] Tool failed: {tool_name}, error={error}, "
|
||||||
|
f"user={user_id}, tool_use_id={tool_use_id}"
|
||||||
|
)
|
||||||
|
return cast(SyncHookJSONOutput, {})
|
||||||
|
|
||||||
|
async def pre_compact_hook(
|
||||||
|
input_data: HookInput,
|
||||||
|
tool_use_id: str | None,
|
||||||
|
context: HookContext,
|
||||||
|
) -> SyncHookJSONOutput:
|
||||||
|
"""Log when SDK triggers context compaction.
|
||||||
|
|
||||||
|
The SDK automatically compacts conversation history when it grows too large.
|
||||||
|
This hook provides visibility into when compaction happens.
|
||||||
|
"""
|
||||||
|
_ = context, tool_use_id
|
||||||
|
trigger = input_data.get("trigger", "auto")
|
||||||
|
logger.info(
|
||||||
|
f"[SDK] Context compaction triggered: {trigger}, user={user_id}"
|
||||||
|
)
|
||||||
|
return cast(SyncHookJSONOutput, {})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"PreToolUse": [HookMatcher(matcher="*", hooks=[pre_tool_use_hook])],
|
||||||
|
"PostToolUse": [HookMatcher(matcher="*", hooks=[post_tool_use_hook])],
|
||||||
|
"PostToolUseFailure": [
|
||||||
|
HookMatcher(matcher="*", hooks=[post_tool_failure_hook])
|
||||||
|
],
|
||||||
|
"PreCompact": [HookMatcher(matcher="*", hooks=[pre_compact_hook])],
|
||||||
|
}
|
||||||
|
except ImportError:
|
||||||
|
# Fallback for when SDK isn't available - return empty hooks
|
||||||
|
return {}
|
||||||
@@ -0,0 +1,438 @@
|
|||||||
|
"""Claude Agent SDK service layer for CoPilot chat completions."""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import uuid
|
||||||
|
from collections.abc import AsyncGenerator
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from backend.util.exceptions import NotFoundError
|
||||||
|
|
||||||
|
from ..config import ChatConfig
|
||||||
|
from ..model import (
|
||||||
|
ChatMessage,
|
||||||
|
ChatSession,
|
||||||
|
get_chat_session,
|
||||||
|
update_session_title,
|
||||||
|
upsert_chat_session,
|
||||||
|
)
|
||||||
|
from ..response_model import (
|
||||||
|
StreamBaseResponse,
|
||||||
|
StreamError,
|
||||||
|
StreamFinish,
|
||||||
|
StreamStart,
|
||||||
|
StreamTextDelta,
|
||||||
|
StreamToolInputAvailable,
|
||||||
|
StreamToolOutputAvailable,
|
||||||
|
)
|
||||||
|
from ..service import _build_system_prompt, _generate_session_title
|
||||||
|
from ..tracking import track_user_message
|
||||||
|
from .anthropic_fallback import stream_with_anthropic
|
||||||
|
from .response_adapter import SDKResponseAdapter
|
||||||
|
from .security_hooks import create_security_hooks
|
||||||
|
from .tool_adapter import (
|
||||||
|
COPILOT_TOOL_NAMES,
|
||||||
|
create_copilot_mcp_server,
|
||||||
|
set_execution_context,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
config = ChatConfig()
|
||||||
|
|
||||||
|
# Set to hold background tasks to prevent garbage collection
|
||||||
|
_background_tasks: set[asyncio.Task[Any]] = set()
|
||||||
|
|
||||||
|
|
||||||
|
_SDK_CWD_PREFIX = "/tmp/copilot-"
|
||||||
|
|
||||||
|
|
||||||
|
def _make_sdk_cwd(session_id: str) -> str:
|
||||||
|
"""Create a safe, session-specific working directory path.
|
||||||
|
|
||||||
|
Sanitizes session_id, then validates the resulting path stays under /tmp/
|
||||||
|
using normpath + startswith (the pattern CodeQL recognises as a sanitizer).
|
||||||
|
"""
|
||||||
|
safe_id = re.sub(r"[^A-Za-z0-9-]", "", session_id)
|
||||||
|
cwd = os.path.normpath(f"{_SDK_CWD_PREFIX}{safe_id}")
|
||||||
|
if not cwd.startswith(_SDK_CWD_PREFIX):
|
||||||
|
raise ValueError(f"Session path escaped prefix: {cwd}")
|
||||||
|
return cwd
|
||||||
|
|
||||||
|
|
||||||
|
def _cleanup_sdk_tool_results(cwd: str) -> None:
|
||||||
|
"""Remove SDK tool-result files for a specific session working directory.
|
||||||
|
|
||||||
|
The SDK creates tool-result files under ~/.claude/projects/<encoded-cwd>/tool-results/.
|
||||||
|
We clean only the specific cwd's results to avoid race conditions between
|
||||||
|
concurrent sessions.
|
||||||
|
"""
|
||||||
|
import glob as _glob
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
# Validate cwd is under the expected prefix (CodeQL sanitizer pattern)
|
||||||
|
normalized = os.path.normpath(cwd)
|
||||||
|
if not normalized.startswith(_SDK_CWD_PREFIX):
|
||||||
|
return
|
||||||
|
|
||||||
|
# SDK encodes the cwd path by replacing '/' with '-'
|
||||||
|
encoded_cwd = normalized.replace("/", "-")
|
||||||
|
project_dir = os.path.expanduser(f"~/.claude/projects/{encoded_cwd}")
|
||||||
|
results_glob = os.path.join(project_dir, "tool-results", "*")
|
||||||
|
|
||||||
|
for path in _glob.glob(results_glob):
|
||||||
|
try:
|
||||||
|
os.remove(path)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Also clean up the temp cwd directory itself
|
||||||
|
try:
|
||||||
|
shutil.rmtree(normalized, ignore_errors=True)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
async def _compress_conversation_history(
|
||||||
|
session: ChatSession,
|
||||||
|
) -> list[ChatMessage]:
|
||||||
|
"""Compress prior conversation messages if they exceed the token threshold.
|
||||||
|
|
||||||
|
Uses the shared compress_context() from prompt.py which supports:
|
||||||
|
- LLM summarization of old messages (keeps recent ones intact)
|
||||||
|
- Progressive content truncation as fallback
|
||||||
|
- Middle-out deletion as last resort
|
||||||
|
|
||||||
|
Returns the compressed prior messages (everything except the current message).
|
||||||
|
"""
|
||||||
|
prior = session.messages[:-1]
|
||||||
|
if len(prior) < 2:
|
||||||
|
return prior
|
||||||
|
|
||||||
|
from backend.util.prompt import compress_context
|
||||||
|
|
||||||
|
# Convert ChatMessages to dicts for compress_context
|
||||||
|
messages_dict = []
|
||||||
|
for msg in prior:
|
||||||
|
msg_dict: dict[str, Any] = {"role": msg.role}
|
||||||
|
if msg.content:
|
||||||
|
msg_dict["content"] = msg.content
|
||||||
|
if msg.tool_calls:
|
||||||
|
msg_dict["tool_calls"] = msg.tool_calls
|
||||||
|
if msg.tool_call_id:
|
||||||
|
msg_dict["tool_call_id"] = msg.tool_call_id
|
||||||
|
messages_dict.append(msg_dict)
|
||||||
|
|
||||||
|
try:
|
||||||
|
import openai
|
||||||
|
|
||||||
|
async with openai.AsyncOpenAI(
|
||||||
|
api_key=config.api_key, base_url=config.base_url, timeout=30.0
|
||||||
|
) as client:
|
||||||
|
result = await compress_context(
|
||||||
|
messages=messages_dict,
|
||||||
|
model=config.model,
|
||||||
|
client=client,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"[SDK] Context compression with LLM failed: {e}")
|
||||||
|
# Fall back to truncation-only (no LLM summarization)
|
||||||
|
result = await compress_context(
|
||||||
|
messages=messages_dict,
|
||||||
|
model=config.model,
|
||||||
|
client=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.was_compacted:
|
||||||
|
logger.info(
|
||||||
|
f"[SDK] Context compacted: {result.original_token_count} -> "
|
||||||
|
f"{result.token_count} tokens "
|
||||||
|
f"({result.messages_summarized} summarized, "
|
||||||
|
f"{result.messages_dropped} dropped)"
|
||||||
|
)
|
||||||
|
# Convert compressed dicts back to ChatMessages
|
||||||
|
return [
|
||||||
|
ChatMessage(
|
||||||
|
role=m["role"],
|
||||||
|
content=m.get("content"),
|
||||||
|
tool_calls=m.get("tool_calls"),
|
||||||
|
tool_call_id=m.get("tool_call_id"),
|
||||||
|
)
|
||||||
|
for m in result.messages
|
||||||
|
]
|
||||||
|
|
||||||
|
return prior
|
||||||
|
|
||||||
|
|
||||||
|
def _format_conversation_context(messages: list[ChatMessage]) -> str | None:
|
||||||
|
"""Format conversation messages into a context prefix for the user message.
|
||||||
|
|
||||||
|
Returns a string like:
|
||||||
|
<conversation_history>
|
||||||
|
User: hello
|
||||||
|
You responded: Hi! How can I help?
|
||||||
|
</conversation_history>
|
||||||
|
|
||||||
|
Returns None if there are no messages to format.
|
||||||
|
"""
|
||||||
|
if not messages:
|
||||||
|
return None
|
||||||
|
|
||||||
|
lines: list[str] = []
|
||||||
|
for msg in messages:
|
||||||
|
if not msg.content:
|
||||||
|
continue
|
||||||
|
if msg.role == "user":
|
||||||
|
lines.append(f"User: {msg.content}")
|
||||||
|
elif msg.role == "assistant":
|
||||||
|
lines.append(f"You responded: {msg.content}")
|
||||||
|
# Skip tool messages — they're internal details
|
||||||
|
|
||||||
|
if not lines:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return "<conversation_history>\n" + "\n".join(lines) + "\n</conversation_history>"
|
||||||
|
|
||||||
|
|
||||||
|
async def stream_chat_completion_sdk(
|
||||||
|
session_id: str,
|
||||||
|
message: str | None = None,
|
||||||
|
tool_call_response: str | None = None, # noqa: ARG001
|
||||||
|
is_user_message: bool = True,
|
||||||
|
user_id: str | None = None,
|
||||||
|
retry_count: int = 0, # noqa: ARG001
|
||||||
|
session: ChatSession | None = None,
|
||||||
|
context: dict[str, str] | None = None, # noqa: ARG001
|
||||||
|
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||||
|
"""Stream chat completion using Claude Agent SDK.
|
||||||
|
|
||||||
|
Drop-in replacement for stream_chat_completion with improved reliability.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if session is None:
|
||||||
|
session = await get_chat_session(session_id, user_id)
|
||||||
|
|
||||||
|
if not session:
|
||||||
|
raise NotFoundError(
|
||||||
|
f"Session {session_id} not found. Please create a new session first."
|
||||||
|
)
|
||||||
|
|
||||||
|
if message:
|
||||||
|
session.messages.append(
|
||||||
|
ChatMessage(
|
||||||
|
role="user" if is_user_message else "assistant", content=message
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if is_user_message:
|
||||||
|
track_user_message(
|
||||||
|
user_id=user_id, session_id=session_id, message_length=len(message)
|
||||||
|
)
|
||||||
|
|
||||||
|
session = await upsert_chat_session(session)
|
||||||
|
|
||||||
|
# Generate title for new sessions (first user message)
|
||||||
|
if is_user_message and not session.title:
|
||||||
|
user_messages = [m for m in session.messages if m.role == "user"]
|
||||||
|
if len(user_messages) == 1:
|
||||||
|
first_message = user_messages[0].content or message or ""
|
||||||
|
if first_message:
|
||||||
|
task = asyncio.create_task(
|
||||||
|
_update_title_async(session_id, first_message, user_id)
|
||||||
|
)
|
||||||
|
_background_tasks.add(task)
|
||||||
|
task.add_done_callback(_background_tasks.discard)
|
||||||
|
|
||||||
|
# Build system prompt (reuses non-SDK path with Langfuse support)
|
||||||
|
has_history = len(session.messages) > 1
|
||||||
|
system_prompt, _ = await _build_system_prompt(
|
||||||
|
user_id, has_conversation_history=has_history
|
||||||
|
)
|
||||||
|
set_execution_context(user_id, session, None)
|
||||||
|
|
||||||
|
message_id = str(uuid.uuid4())
|
||||||
|
text_block_id = str(uuid.uuid4())
|
||||||
|
task_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
yield StreamStart(messageId=message_id, taskId=task_id)
|
||||||
|
|
||||||
|
stream_completed = False
|
||||||
|
# Use a session-specific temp dir to avoid cleanup race conditions
|
||||||
|
# between concurrent sessions.
|
||||||
|
sdk_cwd = _make_sdk_cwd(session_id)
|
||||||
|
os.makedirs(sdk_cwd, exist_ok=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
from claude_agent_sdk import ClaudeAgentOptions, ClaudeSDKClient
|
||||||
|
|
||||||
|
mcp_server = create_copilot_mcp_server()
|
||||||
|
|
||||||
|
options = ClaudeAgentOptions(
|
||||||
|
system_prompt=system_prompt,
|
||||||
|
mcp_servers={"copilot": mcp_server}, # type: ignore[arg-type]
|
||||||
|
allowed_tools=COPILOT_TOOL_NAMES,
|
||||||
|
hooks=create_security_hooks(user_id), # type: ignore[arg-type]
|
||||||
|
cwd=sdk_cwd,
|
||||||
|
)
|
||||||
|
|
||||||
|
adapter = SDKResponseAdapter(message_id=message_id)
|
||||||
|
adapter.set_task_id(task_id)
|
||||||
|
|
||||||
|
async with ClaudeSDKClient(options=options) as client:
|
||||||
|
current_message = message or ""
|
||||||
|
if not current_message and session.messages:
|
||||||
|
last_user = [m for m in session.messages if m.role == "user"]
|
||||||
|
if last_user:
|
||||||
|
current_message = last_user[-1].content or ""
|
||||||
|
|
||||||
|
if not current_message.strip():
|
||||||
|
yield StreamError(
|
||||||
|
errorText="Message cannot be empty.",
|
||||||
|
code="empty_prompt",
|
||||||
|
)
|
||||||
|
yield StreamFinish()
|
||||||
|
return
|
||||||
|
|
||||||
|
# Build query with conversation history context.
|
||||||
|
# Compress history first to handle long conversations.
|
||||||
|
query_message = current_message
|
||||||
|
if len(session.messages) > 1:
|
||||||
|
compressed = await _compress_conversation_history(session)
|
||||||
|
history_context = _format_conversation_context(compressed)
|
||||||
|
if history_context:
|
||||||
|
query_message = (
|
||||||
|
f"{history_context}\n\n"
|
||||||
|
f"Now, the user says:\n{current_message}"
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"[SDK] Sending query: {current_message[:80]!r}"
|
||||||
|
f" ({len(session.messages)} msgs in session)"
|
||||||
|
)
|
||||||
|
await client.query(query_message, session_id=session_id)
|
||||||
|
|
||||||
|
assistant_response = ChatMessage(role="assistant", content="")
|
||||||
|
accumulated_tool_calls: list[dict[str, Any]] = []
|
||||||
|
has_appended_assistant = False
|
||||||
|
has_tool_results = False
|
||||||
|
|
||||||
|
async for sdk_msg in client.receive_messages():
|
||||||
|
logger.debug(
|
||||||
|
f"[SDK] Received: {type(sdk_msg).__name__} "
|
||||||
|
f"{getattr(sdk_msg, 'subtype', '')}"
|
||||||
|
)
|
||||||
|
for response in adapter.convert_message(sdk_msg):
|
||||||
|
if isinstance(response, StreamStart):
|
||||||
|
continue
|
||||||
|
yield response
|
||||||
|
|
||||||
|
if isinstance(response, StreamTextDelta):
|
||||||
|
delta = response.delta or ""
|
||||||
|
# After tool results, start a new assistant
|
||||||
|
# message for the post-tool text.
|
||||||
|
if has_tool_results and has_appended_assistant:
|
||||||
|
assistant_response = ChatMessage(
|
||||||
|
role="assistant", content=delta
|
||||||
|
)
|
||||||
|
accumulated_tool_calls = []
|
||||||
|
has_appended_assistant = False
|
||||||
|
has_tool_results = False
|
||||||
|
session.messages.append(assistant_response)
|
||||||
|
has_appended_assistant = True
|
||||||
|
else:
|
||||||
|
assistant_response.content = (
|
||||||
|
assistant_response.content or ""
|
||||||
|
) + delta
|
||||||
|
if not has_appended_assistant:
|
||||||
|
session.messages.append(assistant_response)
|
||||||
|
has_appended_assistant = True
|
||||||
|
|
||||||
|
elif isinstance(response, StreamToolInputAvailable):
|
||||||
|
accumulated_tool_calls.append(
|
||||||
|
{
|
||||||
|
"id": response.toolCallId,
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": response.toolName,
|
||||||
|
"arguments": json.dumps(response.input or {}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
assistant_response.tool_calls = accumulated_tool_calls
|
||||||
|
if not has_appended_assistant:
|
||||||
|
session.messages.append(assistant_response)
|
||||||
|
has_appended_assistant = True
|
||||||
|
|
||||||
|
elif isinstance(response, StreamToolOutputAvailable):
|
||||||
|
session.messages.append(
|
||||||
|
ChatMessage(
|
||||||
|
role="tool",
|
||||||
|
content=(
|
||||||
|
response.output
|
||||||
|
if isinstance(response.output, str)
|
||||||
|
else str(response.output)
|
||||||
|
),
|
||||||
|
tool_call_id=response.toolCallId,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
has_tool_results = True
|
||||||
|
|
||||||
|
elif isinstance(response, StreamFinish):
|
||||||
|
stream_completed = True
|
||||||
|
|
||||||
|
if stream_completed:
|
||||||
|
break
|
||||||
|
|
||||||
|
if (
|
||||||
|
assistant_response.content or assistant_response.tool_calls
|
||||||
|
) and not has_appended_assistant:
|
||||||
|
session.messages.append(assistant_response)
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
logger.warning(
|
||||||
|
"[SDK] claude-agent-sdk not available, using Anthropic fallback"
|
||||||
|
)
|
||||||
|
async for response in stream_with_anthropic(
|
||||||
|
session, system_prompt, text_block_id
|
||||||
|
):
|
||||||
|
if isinstance(response, StreamFinish):
|
||||||
|
stream_completed = True
|
||||||
|
yield response
|
||||||
|
|
||||||
|
await upsert_chat_session(session)
|
||||||
|
logger.debug(
|
||||||
|
f"[SDK] Session {session_id} saved with {len(session.messages)} messages"
|
||||||
|
)
|
||||||
|
if not stream_completed:
|
||||||
|
yield StreamFinish()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"[SDK] Error: {e}", exc_info=True)
|
||||||
|
try:
|
||||||
|
await upsert_chat_session(session)
|
||||||
|
except Exception as save_err:
|
||||||
|
logger.error(f"[SDK] Failed to save session on error: {save_err}")
|
||||||
|
yield StreamError(
|
||||||
|
errorText="An error occurred. Please try again.",
|
||||||
|
code="sdk_error",
|
||||||
|
)
|
||||||
|
yield StreamFinish()
|
||||||
|
finally:
|
||||||
|
_cleanup_sdk_tool_results(sdk_cwd)
|
||||||
|
|
||||||
|
|
||||||
|
async def _update_title_async(
|
||||||
|
session_id: str, message: str, user_id: str | None = None
|
||||||
|
) -> None:
|
||||||
|
"""Background task to update session title."""
|
||||||
|
try:
|
||||||
|
title = await _generate_session_title(
|
||||||
|
message, user_id=user_id, session_id=session_id
|
||||||
|
)
|
||||||
|
if title:
|
||||||
|
await update_session_title(session_id, title)
|
||||||
|
logger.debug(f"[SDK] Generated title for {session_id}: {title}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"[SDK] Failed to update session title: {e}")
|
||||||
@@ -0,0 +1,284 @@
|
|||||||
|
"""Tool adapter for wrapping existing CoPilot tools as Claude Agent SDK MCP tools.
|
||||||
|
|
||||||
|
This module provides the adapter layer that converts existing BaseTool implementations
|
||||||
|
into in-process MCP tools that can be used with the Claude Agent SDK.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import uuid
|
||||||
|
from contextvars import ContextVar
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from backend.api.features.chat.model import ChatSession
|
||||||
|
from backend.api.features.chat.tools import TOOL_REGISTRY
|
||||||
|
from backend.api.features.chat.tools.base import BaseTool
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Allowed base directory for the Read tool (SDK saves oversized tool results here)
|
||||||
|
_SDK_TOOL_RESULTS_DIR = os.path.expanduser("~/.claude/")
|
||||||
|
|
||||||
|
# MCP server naming - the SDK prefixes tool names as "mcp__{server_name}__{tool}"
|
||||||
|
MCP_SERVER_NAME = "copilot"
|
||||||
|
MCP_TOOL_PREFIX = f"mcp__{MCP_SERVER_NAME}__"
|
||||||
|
|
||||||
|
# Context variables to pass user/session info to tool execution
|
||||||
|
_current_user_id: ContextVar[str | None] = ContextVar("current_user_id", default=None)
|
||||||
|
_current_session: ContextVar[ChatSession | None] = ContextVar(
|
||||||
|
"current_session", default=None
|
||||||
|
)
|
||||||
|
_current_tool_call_id: ContextVar[str | None] = ContextVar(
|
||||||
|
"current_tool_call_id", default=None
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def set_execution_context(
|
||||||
|
user_id: str | None,
|
||||||
|
session: ChatSession,
|
||||||
|
tool_call_id: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
"""Set the execution context for tool calls.
|
||||||
|
|
||||||
|
This must be called before streaming begins to ensure tools have access
|
||||||
|
to user_id and session information.
|
||||||
|
"""
|
||||||
|
_current_user_id.set(user_id)
|
||||||
|
_current_session.set(session)
|
||||||
|
_current_tool_call_id.set(tool_call_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_execution_context() -> tuple[str | None, ChatSession | None, str | None]:
|
||||||
|
"""Get the current execution context."""
|
||||||
|
return (
|
||||||
|
_current_user_id.get(),
|
||||||
|
_current_session.get(),
|
||||||
|
_current_tool_call_id.get(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def create_tool_handler(base_tool: BaseTool):
|
||||||
|
"""Create an async handler function for a BaseTool.
|
||||||
|
|
||||||
|
This wraps the existing BaseTool._execute method to be compatible
|
||||||
|
with the Claude Agent SDK MCP tool format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
async def tool_handler(args: dict[str, Any]) -> dict[str, Any]:
|
||||||
|
"""Execute the wrapped tool and return MCP-formatted response."""
|
||||||
|
user_id, session, tool_call_id = get_execution_context()
|
||||||
|
|
||||||
|
if session is None:
|
||||||
|
return {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": json.dumps(
|
||||||
|
{
|
||||||
|
"error": "No session context available",
|
||||||
|
"type": "error",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Call the existing tool's execute method
|
||||||
|
# Generate unique tool_call_id per invocation for proper correlation
|
||||||
|
effective_id = tool_call_id or f"sdk-{uuid.uuid4().hex[:12]}"
|
||||||
|
result = await base_tool.execute(
|
||||||
|
user_id=user_id,
|
||||||
|
session=session,
|
||||||
|
tool_call_id=effective_id,
|
||||||
|
**args,
|
||||||
|
)
|
||||||
|
|
||||||
|
# The result is a StreamToolOutputAvailable, extract the output
|
||||||
|
text = (
|
||||||
|
result.output
|
||||||
|
if isinstance(result.output, str)
|
||||||
|
else json.dumps(result.output)
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"content": [{"type": "text", "text": text}],
|
||||||
|
"isError": not result.success,
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error executing tool {base_tool.name}: {e}", exc_info=True)
|
||||||
|
return {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": json.dumps(
|
||||||
|
{
|
||||||
|
"error": str(e),
|
||||||
|
"type": "error",
|
||||||
|
"message": f"Failed to execute {base_tool.name}",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
return tool_handler
|
||||||
|
|
||||||
|
|
||||||
|
def _build_input_schema(base_tool: BaseTool) -> dict[str, Any]:
|
||||||
|
"""Build a JSON Schema input schema for a tool."""
|
||||||
|
return {
|
||||||
|
"type": "object",
|
||||||
|
"properties": base_tool.parameters.get("properties", {}),
|
||||||
|
"required": base_tool.parameters.get("required", []),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_tool_definitions() -> list[dict[str, Any]]:
|
||||||
|
"""Get all tool definitions in MCP format.
|
||||||
|
|
||||||
|
Returns a list of tool definitions that can be used with
|
||||||
|
create_sdk_mcp_server or as raw tool definitions.
|
||||||
|
"""
|
||||||
|
tool_definitions = []
|
||||||
|
|
||||||
|
for tool_name, base_tool in TOOL_REGISTRY.items():
|
||||||
|
tool_def = {
|
||||||
|
"name": tool_name,
|
||||||
|
"description": base_tool.description,
|
||||||
|
"inputSchema": _build_input_schema(base_tool),
|
||||||
|
}
|
||||||
|
tool_definitions.append(tool_def)
|
||||||
|
|
||||||
|
return tool_definitions
|
||||||
|
|
||||||
|
|
||||||
|
def get_tool_handlers() -> dict[str, Any]:
|
||||||
|
"""Get all tool handlers mapped by name.
|
||||||
|
|
||||||
|
Returns a dictionary mapping tool names to their handler functions.
|
||||||
|
"""
|
||||||
|
handlers = {}
|
||||||
|
|
||||||
|
for tool_name, base_tool in TOOL_REGISTRY.items():
|
||||||
|
handlers[tool_name] = create_tool_handler(base_tool)
|
||||||
|
|
||||||
|
return handlers
|
||||||
|
|
||||||
|
|
||||||
|
async def _read_file_handler(args: dict[str, Any]) -> dict[str, Any]:
|
||||||
|
"""Read a file with optional offset/limit. Restricted to SDK working directory.
|
||||||
|
|
||||||
|
After reading, the file is deleted to prevent accumulation in long-running pods.
|
||||||
|
"""
|
||||||
|
file_path = args.get("file_path", "")
|
||||||
|
offset = args.get("offset", 0)
|
||||||
|
limit = args.get("limit", 2000)
|
||||||
|
|
||||||
|
# Security: only allow reads under the SDK's working directory
|
||||||
|
real_path = os.path.realpath(file_path)
|
||||||
|
if not real_path.startswith(_SDK_TOOL_RESULTS_DIR):
|
||||||
|
return {
|
||||||
|
"content": [{"type": "text", "text": f"Access denied: {file_path}"}],
|
||||||
|
"isError": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(real_path) as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
selected = lines[offset : offset + limit]
|
||||||
|
content = "".join(selected)
|
||||||
|
return {"content": [{"type": "text", "text": content}], "isError": False}
|
||||||
|
except FileNotFoundError:
|
||||||
|
return {
|
||||||
|
"content": [{"type": "text", "text": f"File not found: {file_path}"}],
|
||||||
|
"isError": True,
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
"content": [{"type": "text", "text": f"Error reading file: {e}"}],
|
||||||
|
"isError": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_READ_TOOL_NAME = "Read"
|
||||||
|
_READ_TOOL_DESCRIPTION = (
|
||||||
|
"Read a file from the local filesystem. "
|
||||||
|
"Use offset and limit to read specific line ranges for large files."
|
||||||
|
)
|
||||||
|
_READ_TOOL_SCHEMA = {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"file_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The absolute path to the file to read",
|
||||||
|
},
|
||||||
|
"offset": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Line number to start reading from (0-indexed). Default: 0",
|
||||||
|
},
|
||||||
|
"limit": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Number of lines to read. Default: 2000",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["file_path"],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Create the MCP server configuration
|
||||||
|
def create_copilot_mcp_server():
|
||||||
|
"""Create an in-process MCP server configuration for CoPilot tools.
|
||||||
|
|
||||||
|
This can be passed to ClaudeAgentOptions.mcp_servers.
|
||||||
|
|
||||||
|
Note: The actual SDK MCP server creation depends on the claude-agent-sdk
|
||||||
|
package being available. This function returns the configuration that
|
||||||
|
can be used with the SDK.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from claude_agent_sdk import create_sdk_mcp_server, tool
|
||||||
|
|
||||||
|
# Create decorated tool functions
|
||||||
|
sdk_tools = []
|
||||||
|
|
||||||
|
for tool_name, base_tool in TOOL_REGISTRY.items():
|
||||||
|
handler = create_tool_handler(base_tool)
|
||||||
|
decorated = tool(
|
||||||
|
tool_name,
|
||||||
|
base_tool.description,
|
||||||
|
_build_input_schema(base_tool),
|
||||||
|
)(handler)
|
||||||
|
sdk_tools.append(decorated)
|
||||||
|
|
||||||
|
# Add the Read tool so the SDK can read back oversized tool results
|
||||||
|
read_tool = tool(
|
||||||
|
_READ_TOOL_NAME,
|
||||||
|
_READ_TOOL_DESCRIPTION,
|
||||||
|
_READ_TOOL_SCHEMA,
|
||||||
|
)(_read_file_handler)
|
||||||
|
sdk_tools.append(read_tool)
|
||||||
|
|
||||||
|
server = create_sdk_mcp_server(
|
||||||
|
name=MCP_SERVER_NAME,
|
||||||
|
version="1.0.0",
|
||||||
|
tools=sdk_tools,
|
||||||
|
)
|
||||||
|
|
||||||
|
return server
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
# Let ImportError propagate so service.py handles the fallback
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# List of tool names for allowed_tools configuration
|
||||||
|
# Include the Read tool so the SDK can use it for oversized tool results
|
||||||
|
COPILOT_TOOL_NAMES = [
|
||||||
|
*[f"{MCP_TOOL_PREFIX}{name}" for name in TOOL_REGISTRY.keys()],
|
||||||
|
f"{MCP_TOOL_PREFIX}{_READ_TOOL_NAME}",
|
||||||
|
]
|
||||||
@@ -245,12 +245,16 @@ async def _get_system_prompt_template(context: str) -> str:
|
|||||||
return DEFAULT_SYSTEM_PROMPT.format(users_information=context)
|
return DEFAULT_SYSTEM_PROMPT.format(users_information=context)
|
||||||
|
|
||||||
|
|
||||||
async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
|
async def _build_system_prompt(
|
||||||
|
user_id: str | None, has_conversation_history: bool = False
|
||||||
|
) -> tuple[str, Any]:
|
||||||
"""Build the full system prompt including business understanding if available.
|
"""Build the full system prompt including business understanding if available.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
user_id: The user ID for fetching business understanding
|
user_id: The user ID for fetching business understanding.
|
||||||
If "default" and this is the user's first session, will use "onboarding" instead.
|
has_conversation_history: Whether there's existing conversation history.
|
||||||
|
If True, we don't tell the model to greet/introduce (since they're
|
||||||
|
already in a conversation).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (compiled prompt string, business understanding object)
|
Tuple of (compiled prompt string, business understanding object)
|
||||||
@@ -266,6 +270,8 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
|
|||||||
|
|
||||||
if understanding:
|
if understanding:
|
||||||
context = format_understanding_for_prompt(understanding)
|
context = format_understanding_for_prompt(understanding)
|
||||||
|
elif has_conversation_history:
|
||||||
|
context = "No prior understanding saved yet. Continue the existing conversation naturally."
|
||||||
else:
|
else:
|
||||||
context = "This is the first time you are meeting the user. Greet them and introduce them to the platform"
|
context = "This is the first time you are meeting the user. Greet them and introduce them to the platform"
|
||||||
|
|
||||||
@@ -374,7 +380,6 @@ async def stream_chat_completion(
|
|||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
NotFoundError: If session_id is invalid
|
NotFoundError: If session_id is invalid
|
||||||
ValueError: If max_context_messages is exceeded
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
completion_start = time.monotonic()
|
completion_start = time.monotonic()
|
||||||
@@ -459,8 +464,9 @@ async def stream_chat_completion(
|
|||||||
|
|
||||||
# Generate title for new sessions on first user message (non-blocking)
|
# Generate title for new sessions on first user message (non-blocking)
|
||||||
# Check: is_user_message, no title yet, and this is the first user message
|
# Check: is_user_message, no title yet, and this is the first user message
|
||||||
if is_user_message and message and not session.title:
|
user_messages = [m for m in session.messages if m.role == "user"]
|
||||||
user_messages = [m for m in session.messages if m.role == "user"]
|
first_user_msg = message or (user_messages[0].content if user_messages else None)
|
||||||
|
if is_user_message and first_user_msg and not session.title:
|
||||||
if len(user_messages) == 1:
|
if len(user_messages) == 1:
|
||||||
# First user message - generate title in background
|
# First user message - generate title in background
|
||||||
import asyncio
|
import asyncio
|
||||||
@@ -468,7 +474,7 @@ async def stream_chat_completion(
|
|||||||
# Capture only the values we need (not the session object) to avoid
|
# Capture only the values we need (not the session object) to avoid
|
||||||
# stale data issues when the main flow modifies the session
|
# stale data issues when the main flow modifies the session
|
||||||
captured_session_id = session_id
|
captured_session_id = session_id
|
||||||
captured_message = message
|
captured_message = first_user_msg
|
||||||
captured_user_id = user_id
|
captured_user_id = user_id
|
||||||
|
|
||||||
async def _update_title():
|
async def _update_title():
|
||||||
@@ -1066,6 +1072,10 @@ async def _stream_chat_chunks(
|
|||||||
:128
|
:128
|
||||||
] # OpenRouter limit
|
] # OpenRouter limit
|
||||||
|
|
||||||
|
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||||
|
if config.thinking_enabled and "anthropic" in model.lower():
|
||||||
|
extra_body["reasoning"] = {"enabled": True}
|
||||||
|
|
||||||
api_call_start = time_module.perf_counter()
|
api_call_start = time_module.perf_counter()
|
||||||
stream = await client.chat.completions.create(
|
stream = await client.chat.completions.create(
|
||||||
model=model,
|
model=model,
|
||||||
@@ -1229,7 +1239,7 @@ async def _stream_chat_chunks(
|
|||||||
|
|
||||||
total_time = (time_module.perf_counter() - stream_chunks_start) * 1000
|
total_time = (time_module.perf_counter() - stream_chunks_start) * 1000
|
||||||
logger.info(
|
logger.info(
|
||||||
f"[TIMING] _stream_chat_chunks COMPLETED in {total_time/1000:.1f}s; "
|
f"[TIMING] _stream_chat_chunks COMPLETED in {total_time / 1000:.1f}s; "
|
||||||
f"session={session.session_id}, user={session.user_id}",
|
f"session={session.session_id}, user={session.user_id}",
|
||||||
extra={"json_fields": {**log_meta, "total_time_ms": total_time}},
|
extra={"json_fields": {**log_meta, "total_time_ms": total_time}},
|
||||||
)
|
)
|
||||||
@@ -1829,6 +1839,10 @@ async def _generate_llm_continuation(
|
|||||||
if session_id:
|
if session_id:
|
||||||
extra_body["session_id"] = session_id[:128]
|
extra_body["session_id"] = session_id[:128]
|
||||||
|
|
||||||
|
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||||
|
if config.thinking_enabled and "anthropic" in config.model.lower():
|
||||||
|
extra_body["reasoning"] = {"enabled": True}
|
||||||
|
|
||||||
retry_count = 0
|
retry_count = 0
|
||||||
last_error: Exception | None = None
|
last_error: Exception | None = None
|
||||||
response = None
|
response = None
|
||||||
@@ -1959,6 +1973,10 @@ async def _generate_llm_continuation_with_streaming(
|
|||||||
if session_id:
|
if session_id:
|
||||||
extra_body["session_id"] = session_id[:128]
|
extra_body["session_id"] = session_id[:128]
|
||||||
|
|
||||||
|
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||||
|
if config.thinking_enabled and "anthropic" in config.model.lower():
|
||||||
|
extra_body["reasoning"] = {"enabled": True}
|
||||||
|
|
||||||
# Make streaming LLM call (no tools - just text response)
|
# Make streaming LLM call (no tools - just text response)
|
||||||
from typing import cast
|
from typing import cast
|
||||||
|
|
||||||
|
|||||||
@@ -814,6 +814,28 @@ async def get_active_task_for_session(
|
|||||||
if task_user_id and user_id != task_user_id:
|
if task_user_id and user_id != task_user_id:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# Auto-expire stale tasks that exceeded stream_timeout
|
||||||
|
created_at_str = meta.get("created_at", "")
|
||||||
|
if created_at_str:
|
||||||
|
try:
|
||||||
|
created_at = datetime.fromisoformat(created_at_str)
|
||||||
|
age_seconds = (
|
||||||
|
datetime.now(timezone.utc) - created_at
|
||||||
|
).total_seconds()
|
||||||
|
if age_seconds > config.stream_timeout:
|
||||||
|
logger.warning(
|
||||||
|
f"[TASK_LOOKUP] Auto-expiring stale task {task_id[:8]}... "
|
||||||
|
f"(age={age_seconds:.0f}s > timeout={config.stream_timeout}s)"
|
||||||
|
)
|
||||||
|
await mark_task_completed(task_id, "failed")
|
||||||
|
continue
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"[TASK_LOOKUP] Found running task {task_id[:8]}... for session {session_id[:8]}..."
|
||||||
|
)
|
||||||
|
|
||||||
# Get the last message ID from Redis Stream
|
# Get the last message ID from Redis Stream
|
||||||
stream_key = _get_task_stream_key(task_id)
|
stream_key = _get_task_stream_key(task_id)
|
||||||
last_id = "0-0"
|
last_id = "0-0"
|
||||||
|
|||||||
102
autogpt_platform/backend/poetry.lock
generated
102
autogpt_platform/backend/poetry.lock
generated
@@ -46,14 +46,14 @@ pycares = ">=4.9.0,<5"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "aiofiles"
|
name = "aiofiles"
|
||||||
version = "24.1.0"
|
version = "25.1.0"
|
||||||
description = "File support for asyncio."
|
description = "File support for asyncio."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"},
|
{file = "aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695"},
|
||||||
{file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"},
|
{file = "aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -897,6 +897,29 @@ files = [
|
|||||||
{file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"},
|
{file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "claude-agent-sdk"
|
||||||
|
version = "0.1.35"
|
||||||
|
description = "Python SDK for Claude Code"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.10"
|
||||||
|
groups = ["main"]
|
||||||
|
files = [
|
||||||
|
{file = "claude_agent_sdk-0.1.35-py3-none-macosx_11_0_arm64.whl", hash = "sha256:df67f4deade77b16a9678b3a626c176498e40417f33b04beda9628287f375591"},
|
||||||
|
{file = "claude_agent_sdk-0.1.35-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:14963944f55ded7c8ed518feebfa5b4284aa6dd8d81aeff2e5b21a962ce65097"},
|
||||||
|
{file = "claude_agent_sdk-0.1.35-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:84344dcc535d179c1fc8a11c6f34c37c3b583447bdf09d869effb26514fd7a65"},
|
||||||
|
{file = "claude_agent_sdk-0.1.35-py3-none-win_amd64.whl", hash = "sha256:1b3d54b47448c93f6f372acd4d1757f047c3c1e8ef5804be7a1e3e53e2c79a5f"},
|
||||||
|
{file = "claude_agent_sdk-0.1.35.tar.gz", hash = "sha256:0f98e2b3c71ca85abfc042e7a35c648df88e87fda41c52e6779ef7b038dcbb52"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
anyio = ">=4.0.0"
|
||||||
|
mcp = ">=0.1.0"
|
||||||
|
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
dev = ["anyio[trio] (>=4.0.0)", "mypy (>=1.0.0)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.20.0)", "pytest-cov (>=4.0.0)", "ruff (>=0.1.0)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cleo"
|
name = "cleo"
|
||||||
version = "2.1.0"
|
version = "2.1.0"
|
||||||
@@ -2593,6 +2616,18 @@ http2 = ["h2 (>=3,<5)"]
|
|||||||
socks = ["socksio (==1.*)"]
|
socks = ["socksio (==1.*)"]
|
||||||
zstd = ["zstandard (>=0.18.0)"]
|
zstd = ["zstandard (>=0.18.0)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "httpx-sse"
|
||||||
|
version = "0.4.3"
|
||||||
|
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.9"
|
||||||
|
groups = ["main"]
|
||||||
|
files = [
|
||||||
|
{file = "httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc"},
|
||||||
|
{file = "httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d"},
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "huggingface-hub"
|
name = "huggingface-hub"
|
||||||
version = "1.4.1"
|
version = "1.4.1"
|
||||||
@@ -3310,6 +3345,39 @@ files = [
|
|||||||
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
|
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "mcp"
|
||||||
|
version = "1.26.0"
|
||||||
|
description = "Model Context Protocol SDK"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.10"
|
||||||
|
groups = ["main"]
|
||||||
|
files = [
|
||||||
|
{file = "mcp-1.26.0-py3-none-any.whl", hash = "sha256:904a21c33c25aa98ddbeb47273033c435e595bbacfdb177f4bd87f6dceebe1ca"},
|
||||||
|
{file = "mcp-1.26.0.tar.gz", hash = "sha256:db6e2ef491eecc1a0d93711a76f28dec2e05999f93afd48795da1c1137142c66"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
anyio = ">=4.5"
|
||||||
|
httpx = ">=0.27.1"
|
||||||
|
httpx-sse = ">=0.4"
|
||||||
|
jsonschema = ">=4.20.0"
|
||||||
|
pydantic = ">=2.11.0,<3.0.0"
|
||||||
|
pydantic-settings = ">=2.5.2"
|
||||||
|
pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
||||||
|
python-multipart = ">=0.0.9"
|
||||||
|
pywin32 = {version = ">=310", markers = "sys_platform == \"win32\""}
|
||||||
|
sse-starlette = ">=1.6.1"
|
||||||
|
starlette = ">=0.27"
|
||||||
|
typing-extensions = ">=4.9.0"
|
||||||
|
typing-inspection = ">=0.4.1"
|
||||||
|
uvicorn = {version = ">=0.31.1", markers = "sys_platform != \"emscripten\""}
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
cli = ["python-dotenv (>=1.0.0)", "typer (>=0.16.0)"]
|
||||||
|
rich = ["rich (>=13.9.4)"]
|
||||||
|
ws = ["websockets (>=15.0.1)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mdurl"
|
name = "mdurl"
|
||||||
version = "0.1.2"
|
version = "0.1.2"
|
||||||
@@ -5994,7 +6062,7 @@ description = "Python for Window Extensions"
|
|||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "platform_system == \"Windows\""
|
markers = "sys_platform == \"win32\" or platform_system == \"Windows\""
|
||||||
files = [
|
files = [
|
||||||
{file = "pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3"},
|
{file = "pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3"},
|
||||||
{file = "pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b"},
|
{file = "pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b"},
|
||||||
@@ -6974,6 +7042,28 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
|
|||||||
pymysql = ["pymysql"]
|
pymysql = ["pymysql"]
|
||||||
sqlcipher = ["sqlcipher3_binary"]
|
sqlcipher = ["sqlcipher3_binary"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "sse-starlette"
|
||||||
|
version = "3.2.0"
|
||||||
|
description = "SSE plugin for Starlette"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.9"
|
||||||
|
groups = ["main"]
|
||||||
|
files = [
|
||||||
|
{file = "sse_starlette-3.2.0-py3-none-any.whl", hash = "sha256:5876954bd51920fc2cd51baee47a080eb88a37b5b784e615abb0b283f801cdbf"},
|
||||||
|
{file = "sse_starlette-3.2.0.tar.gz", hash = "sha256:8127594edfb51abe44eac9c49e59b0b01f1039d0c7461c6fd91d4e03b70da422"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
anyio = ">=4.7.0"
|
||||||
|
starlette = ">=0.49.1"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
daphne = ["daphne (>=4.2.0)"]
|
||||||
|
examples = ["aiosqlite (>=0.21.0)", "fastapi (>=0.115.12)", "sqlalchemy[asyncio] (>=2.0.41)", "uvicorn (>=0.34.0)"]
|
||||||
|
granian = ["granian (>=2.3.1)"]
|
||||||
|
uvicorn = ["uvicorn (>=0.34.0)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "stagehand"
|
name = "stagehand"
|
||||||
version = "0.5.9"
|
version = "0.5.9"
|
||||||
@@ -8440,4 +8530,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.1"
|
lock-version = "2.1"
|
||||||
python-versions = ">=3.10,<3.14"
|
python-versions = ">=3.10,<3.14"
|
||||||
content-hash = "fc135114e01de39c8adf70f6132045e7d44a19473c1279aee0978de65aad1655"
|
content-hash = "942dea6daf671c3be65a22f3445feda26c1af9409d7173765e9a0742f0aa05dc"
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ anthropic = "^0.79.0"
|
|||||||
apscheduler = "^3.11.1"
|
apscheduler = "^3.11.1"
|
||||||
autogpt-libs = { path = "../autogpt_libs", develop = true }
|
autogpt-libs = { path = "../autogpt_libs", develop = true }
|
||||||
bleach = { extras = ["css"], version = "^6.2.0" }
|
bleach = { extras = ["css"], version = "^6.2.0" }
|
||||||
|
claude-agent-sdk = "^0.1.0"
|
||||||
click = "^8.2.0"
|
click = "^8.2.0"
|
||||||
cryptography = "^46.0"
|
cryptography = "^46.0"
|
||||||
discord-py = "^2.5.2"
|
discord-py = "^2.5.2"
|
||||||
@@ -76,7 +77,7 @@ yt-dlp = "2025.12.08"
|
|||||||
zerobouncesdk = "^1.1.2"
|
zerobouncesdk = "^1.1.2"
|
||||||
# NOTE: please insert new dependencies in their alphabetical location
|
# NOTE: please insert new dependencies in their alphabetical location
|
||||||
pytest-snapshot = "^0.9.0"
|
pytest-snapshot = "^0.9.0"
|
||||||
aiofiles = "^24.1.0"
|
aiofiles = "^25.1.0"
|
||||||
tiktoken = "^0.12.0"
|
tiktoken = "^0.12.0"
|
||||||
aioclamd = "^1.0.0"
|
aioclamd = "^1.0.0"
|
||||||
setuptools = "^80.9.0"
|
setuptools = "^80.9.0"
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
|
||||||
import { SidebarProvider } from "@/components/ui/sidebar";
|
import { SidebarProvider } from "@/components/ui/sidebar";
|
||||||
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
|
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
|
||||||
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
|
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
|
||||||
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
|
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
|
||||||
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
|
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
|
||||||
|
import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader";
|
||||||
import { useCopilotPage } from "./useCopilotPage";
|
import { useCopilotPage } from "./useCopilotPage";
|
||||||
|
|
||||||
export function CopilotPage() {
|
export function CopilotPage() {
|
||||||
@@ -34,7 +34,11 @@ export function CopilotPage() {
|
|||||||
} = useCopilotPage();
|
} = useCopilotPage();
|
||||||
|
|
||||||
if (isUserLoading || !isLoggedIn) {
|
if (isUserLoading || !isLoggedIn) {
|
||||||
return <LoadingSpinner size="large" cover />;
|
return (
|
||||||
|
<div className="fixed inset-0 z-50 flex items-center justify-center bg-[#f8f8f9]">
|
||||||
|
<ScaleLoader className="text-neutral-400" />
|
||||||
|
</div>
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
|
|||||||
@@ -143,10 +143,10 @@ export const ChatMessagesContainer = ({
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<Conversation className="min-h-0 flex-1">
|
<Conversation className="min-h-0 flex-1">
|
||||||
<ConversationContent className="gap-6 px-3 py-6">
|
<ConversationContent className="flex min-h-screen flex-1 flex-col gap-6 px-3 py-6">
|
||||||
{isLoading && messages.length === 0 && (
|
{isLoading && messages.length === 0 && (
|
||||||
<div className="flex flex-1 items-center justify-center">
|
<div className="flex min-h-full flex-1 items-center justify-center">
|
||||||
<LoadingSpinner size="large" className="text-neutral-400" />
|
<LoadingSpinner className="text-neutral-600" />
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
{messages.map((message, messageIndex) => {
|
{messages.map((message, messageIndex) => {
|
||||||
|
|||||||
@@ -121,8 +121,8 @@ export function ChatSidebar() {
|
|||||||
className="mt-4 flex flex-col gap-1"
|
className="mt-4 flex flex-col gap-1"
|
||||||
>
|
>
|
||||||
{isLoadingSessions ? (
|
{isLoadingSessions ? (
|
||||||
<div className="flex items-center justify-center py-4">
|
<div className="flex min-h-[30rem] items-center justify-center py-4">
|
||||||
<LoadingSpinner size="small" className="text-neutral-400" />
|
<LoadingSpinner size="small" className="text-neutral-600" />
|
||||||
</div>
|
</div>
|
||||||
) : sessions.length === 0 ? (
|
) : sessions.length === 0 ? (
|
||||||
<p className="py-4 text-center text-sm text-neutral-500">
|
<p className="py-4 text-center text-sm text-neutral-500">
|
||||||
|
|||||||
@@ -0,0 +1,35 @@
|
|||||||
|
.loader {
|
||||||
|
width: 48px;
|
||||||
|
height: 48px;
|
||||||
|
display: inline-block;
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
|
||||||
|
.loader::after,
|
||||||
|
.loader::before {
|
||||||
|
content: "";
|
||||||
|
box-sizing: border-box;
|
||||||
|
width: 100%;
|
||||||
|
height: 100%;
|
||||||
|
border-radius: 50%;
|
||||||
|
background: currentColor;
|
||||||
|
position: absolute;
|
||||||
|
left: 0;
|
||||||
|
top: 0;
|
||||||
|
animation: animloader 2s linear infinite;
|
||||||
|
}
|
||||||
|
|
||||||
|
.loader::after {
|
||||||
|
animation-delay: 1s;
|
||||||
|
}
|
||||||
|
|
||||||
|
@keyframes animloader {
|
||||||
|
0% {
|
||||||
|
transform: scale(0);
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
100% {
|
||||||
|
transform: scale(1);
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
import { cn } from "@/lib/utils";
|
||||||
|
import styles from "./ScaleLoader.module.css";
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
size?: number;
|
||||||
|
className?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ScaleLoader({ size = 48, className }: Props) {
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
className={cn(styles.loader, className)}
|
||||||
|
style={{ width: size, height: size }}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -49,12 +49,7 @@ interface Props {
|
|||||||
part: CreateAgentToolPart;
|
part: CreateAgentToolPart;
|
||||||
}
|
}
|
||||||
|
|
||||||
function getAccordionMeta(output: CreateAgentToolOutput): {
|
function getAccordionMeta(output: CreateAgentToolOutput) {
|
||||||
icon: React.ReactNode;
|
|
||||||
title: React.ReactNode;
|
|
||||||
titleClassName?: string;
|
|
||||||
description?: string;
|
|
||||||
} {
|
|
||||||
const icon = <AccordionIcon />;
|
const icon = <AccordionIcon />;
|
||||||
|
|
||||||
if (isAgentSavedOutput(output)) {
|
if (isAgentSavedOutput(output)) {
|
||||||
@@ -73,6 +68,7 @@ function getAccordionMeta(output: CreateAgentToolOutput): {
|
|||||||
icon,
|
icon,
|
||||||
title: "Needs clarification",
|
title: "Needs clarification",
|
||||||
description: `${questions.length} question${questions.length === 1 ? "" : "s"}`,
|
description: `${questions.length} question${questions.length === 1 ? "" : "s"}`,
|
||||||
|
expanded: true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
if (
|
if (
|
||||||
@@ -97,18 +93,23 @@ function getAccordionMeta(output: CreateAgentToolOutput): {
|
|||||||
export function CreateAgentTool({ part }: Props) {
|
export function CreateAgentTool({ part }: Props) {
|
||||||
const text = getAnimationText(part);
|
const text = getAnimationText(part);
|
||||||
const { onSend } = useCopilotChatActions();
|
const { onSend } = useCopilotChatActions();
|
||||||
|
|
||||||
const isStreaming =
|
const isStreaming =
|
||||||
part.state === "input-streaming" || part.state === "input-available";
|
part.state === "input-streaming" || part.state === "input-available";
|
||||||
|
|
||||||
const output = getCreateAgentToolOutput(part);
|
const output = getCreateAgentToolOutput(part);
|
||||||
|
|
||||||
const isError =
|
const isError =
|
||||||
part.state === "output-error" || (!!output && isErrorOutput(output));
|
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||||
|
|
||||||
const isOperating =
|
const isOperating =
|
||||||
!!output &&
|
!!output &&
|
||||||
(isOperationStartedOutput(output) ||
|
(isOperationStartedOutput(output) ||
|
||||||
isOperationPendingOutput(output) ||
|
isOperationPendingOutput(output) ||
|
||||||
isOperationInProgressOutput(output));
|
isOperationInProgressOutput(output));
|
||||||
|
|
||||||
const progress = useAsymptoticProgress(isOperating);
|
const progress = useAsymptoticProgress(isOperating);
|
||||||
|
|
||||||
const hasExpandableContent =
|
const hasExpandableContent =
|
||||||
part.state === "output-available" &&
|
part.state === "output-available" &&
|
||||||
!!output &&
|
!!output &&
|
||||||
@@ -149,10 +150,7 @@ export function CreateAgentTool({ part }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{hasExpandableContent && output && (
|
{hasExpandableContent && output && (
|
||||||
<ToolAccordion
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{...getAccordionMeta(output)}
|
|
||||||
defaultExpanded={isOperating || isClarificationNeededOutput(output)}
|
|
||||||
>
|
|
||||||
{isOperating && (
|
{isOperating && (
|
||||||
<ContentGrid>
|
<ContentGrid>
|
||||||
<ProgressBar value={progress} className="max-w-[280px]" />
|
<ProgressBar value={progress} className="max-w-[280px]" />
|
||||||
|
|||||||
@@ -146,10 +146,7 @@ export function EditAgentTool({ part }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{hasExpandableContent && output && (
|
{hasExpandableContent && output && (
|
||||||
<ToolAccordion
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{...getAccordionMeta(output)}
|
|
||||||
defaultExpanded={isOperating || isClarificationNeededOutput(output)}
|
|
||||||
>
|
|
||||||
{isOperating && (
|
{isOperating && (
|
||||||
<ContentGrid>
|
<ContentGrid>
|
||||||
<ProgressBar value={progress} className="max-w-[280px]" />
|
<ProgressBar value={progress} className="max-w-[280px]" />
|
||||||
|
|||||||
@@ -61,14 +61,7 @@ export function RunAgentTool({ part }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{hasExpandableContent && output && (
|
{hasExpandableContent && output && (
|
||||||
<ToolAccordion
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{...getAccordionMeta(output)}
|
|
||||||
defaultExpanded={
|
|
||||||
isRunAgentExecutionStartedOutput(output) ||
|
|
||||||
isRunAgentSetupRequirementsOutput(output) ||
|
|
||||||
isRunAgentAgentDetailsOutput(output)
|
|
||||||
}
|
|
||||||
>
|
|
||||||
{isRunAgentExecutionStartedOutput(output) && (
|
{isRunAgentExecutionStartedOutput(output) && (
|
||||||
<ExecutionStartedCard output={output} />
|
<ExecutionStartedCard output={output} />
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import {
|
|||||||
WarningDiamondIcon,
|
WarningDiamondIcon,
|
||||||
} from "@phosphor-icons/react";
|
} from "@phosphor-icons/react";
|
||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader";
|
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||||
|
|
||||||
export interface RunAgentInput {
|
export interface RunAgentInput {
|
||||||
username_agent_slug?: string;
|
username_agent_slug?: string;
|
||||||
@@ -171,7 +171,7 @@ export function ToolIcon({
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (isStreaming) {
|
if (isStreaming) {
|
||||||
return <SpinnerLoader size={40} className="text-neutral-700" />;
|
return <OrbitLoader size={24} />;
|
||||||
}
|
}
|
||||||
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
|
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
|
||||||
}
|
}
|
||||||
@@ -203,7 +203,7 @@ export function getAccordionMeta(output: RunAgentToolOutput): {
|
|||||||
? output.status.trim()
|
? output.status.trim()
|
||||||
: "started";
|
: "started";
|
||||||
return {
|
return {
|
||||||
icon: <SpinnerLoader size={28} className="text-neutral-700" />,
|
icon: <OrbitLoader size={28} className="text-neutral-700" />,
|
||||||
title: output.graph_name,
|
title: output.graph_name,
|
||||||
description: `Status: ${statusText}`,
|
description: `Status: ${statusText}`,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -55,13 +55,7 @@ export function RunBlockTool({ part }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{hasExpandableContent && output && (
|
{hasExpandableContent && output && (
|
||||||
<ToolAccordion
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{...getAccordionMeta(output)}
|
|
||||||
defaultExpanded={
|
|
||||||
isRunBlockBlockOutput(output) ||
|
|
||||||
isRunBlockSetupRequirementsOutput(output)
|
|
||||||
}
|
|
||||||
>
|
|
||||||
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
||||||
|
|
||||||
{isRunBlockSetupRequirementsOutput(output) && (
|
{isRunBlockSetupRequirementsOutput(output) && (
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import {
|
|||||||
WarningDiamondIcon,
|
WarningDiamondIcon,
|
||||||
} from "@phosphor-icons/react";
|
} from "@phosphor-icons/react";
|
||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader";
|
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||||
|
|
||||||
export interface RunBlockInput {
|
export interface RunBlockInput {
|
||||||
block_id?: string;
|
block_id?: string;
|
||||||
@@ -120,7 +120,7 @@ export function ToolIcon({
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (isStreaming) {
|
if (isStreaming) {
|
||||||
return <SpinnerLoader size={40} className="text-neutral-700" />;
|
return <OrbitLoader size={24} />;
|
||||||
}
|
}
|
||||||
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
|
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
|
||||||
}
|
}
|
||||||
@@ -149,7 +149,7 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
|||||||
if (isRunBlockBlockOutput(output)) {
|
if (isRunBlockBlockOutput(output)) {
|
||||||
const keys = Object.keys(output.outputs ?? {});
|
const keys = Object.keys(output.outputs ?? {});
|
||||||
return {
|
return {
|
||||||
icon: <SpinnerLoader size={32} className="text-neutral-700" />,
|
icon: <OrbitLoader size={24} className="text-neutral-700" />,
|
||||||
title: output.block_name,
|
title: output.block_name,
|
||||||
description:
|
description:
|
||||||
keys.length > 0
|
keys.length > 0
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
|||||||
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
||||||
import { useChat } from "@ai-sdk/react";
|
import { useChat } from "@ai-sdk/react";
|
||||||
import { DefaultChatTransport } from "ai";
|
import { DefaultChatTransport } from "ai";
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
import { useEffect, useMemo, useState } from "react";
|
import { useEffect, useMemo, useState } from "react";
|
||||||
import { useChatSession } from "./useChatSession";
|
import { useChatSession } from "./useChatSession";
|
||||||
|
|
||||||
@@ -11,7 +10,6 @@ export function useCopilotPage() {
|
|||||||
const { isUserLoading, isLoggedIn } = useSupabase();
|
const { isUserLoading, isLoggedIn } = useSupabase();
|
||||||
const [isDrawerOpen, setIsDrawerOpen] = useState(false);
|
const [isDrawerOpen, setIsDrawerOpen] = useState(false);
|
||||||
const [pendingMessage, setPendingMessage] = useState<string | null>(null);
|
const [pendingMessage, setPendingMessage] = useState<string | null>(null);
|
||||||
const router = useRouter();
|
|
||||||
|
|
||||||
const {
|
const {
|
||||||
sessionId,
|
sessionId,
|
||||||
@@ -54,10 +52,6 @@ export function useCopilotPage() {
|
|||||||
transport: transport ?? undefined,
|
transport: transport ?? undefined,
|
||||||
});
|
});
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (!isUserLoading && !isLoggedIn) router.replace("/login");
|
|
||||||
}, [isUserLoading, isLoggedIn]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!hydratedMessages || hydratedMessages.length === 0) return;
|
if (!hydratedMessages || hydratedMessages.length === 0) return;
|
||||||
setMessages((prev) => {
|
setMessages((prev) => {
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import { SupabaseClient } from "@supabase/supabase-js";
|
|||||||
export const PROTECTED_PAGES = [
|
export const PROTECTED_PAGES = [
|
||||||
"/auth/authorize",
|
"/auth/authorize",
|
||||||
"/auth/integrations",
|
"/auth/integrations",
|
||||||
|
"/copilot",
|
||||||
"/monitor",
|
"/monitor",
|
||||||
"/build",
|
"/build",
|
||||||
"/onboarding",
|
"/onboarding",
|
||||||
|
|||||||
Reference in New Issue
Block a user