mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Merge branch 'dev' into add-llm-manager-ui
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -180,4 +180,6 @@ autogpt_platform/backend/settings.py
|
||||
.claude/settings.local.json
|
||||
CLAUDE.local.md
|
||||
/autogpt_platform/backend/logs
|
||||
.next
|
||||
.next
|
||||
# Implementation plans (generated by AI agents)
|
||||
plans/
|
||||
|
||||
@@ -1,3 +1,10 @@
|
||||
default_install_hook_types:
|
||||
- pre-commit
|
||||
- pre-push
|
||||
- post-checkout
|
||||
|
||||
default_stages: [pre-commit]
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
@@ -17,6 +24,7 @@ repos:
|
||||
name: Detect secrets
|
||||
description: Detects high entropy strings that are likely to be passwords.
|
||||
files: ^autogpt_platform/
|
||||
exclude: pnpm-lock\.yaml$
|
||||
stages: [pre-push]
|
||||
|
||||
- repo: local
|
||||
@@ -26,49 +34,106 @@ repos:
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Backend
|
||||
alias: poetry-install-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend install
|
||||
# include autogpt_libs source (since it's a path dependency)
|
||||
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
|
||||
types: [file]
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$" || exit 0;
|
||||
poetry -C autogpt_platform/backend install
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Libs
|
||||
alias: poetry-install-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs install
|
||||
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
|
||||
types: [file]
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^autogpt_platform/autogpt_libs/poetry\.lock$" || exit 0;
|
||||
poetry -C autogpt_platform/autogpt_libs install
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: pnpm-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Frontend
|
||||
alias: pnpm-install-platform-frontend
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^autogpt_platform/frontend/pnpm-lock\.yaml$" || exit 0;
|
||||
pnpm --prefix autogpt_platform/frontend install
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^classic/(original_autogpt|forge)/poetry\.lock$" || exit 0;
|
||||
poetry -C classic/original_autogpt install
|
||||
'
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^classic/forge/poetry\.lock$" || exit 0;
|
||||
poetry -C classic/forge install
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
types: [file]
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^classic/benchmark/poetry\.lock$" || exit 0;
|
||||
poetry -C classic/benchmark install
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, Prisma client must be up-to-date.
|
||||
@@ -76,12 +141,54 @@ repos:
|
||||
- id: prisma-generate
|
||||
name: Prisma Generate - AutoGPT Platform - Backend
|
||||
alias: prisma-generate-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema\.prisma)$" || exit 0;
|
||||
cd autogpt_platform/backend
|
||||
&& poetry run prisma generate
|
||||
&& poetry run gen-prisma-stub
|
||||
'
|
||||
# include everything that triggers poetry install + the prisma schema
|
||||
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
|
||||
types: [file]
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: export-api-schema
|
||||
name: Export API schema - AutoGPT Platform - Backend -> Frontend
|
||||
alias: export-api-schema-platform
|
||||
entry: >
|
||||
bash -c '
|
||||
cd autogpt_platform/backend
|
||||
&& poetry run export-api-schema --output ../frontend/src/app/api/openapi.json
|
||||
&& cd ../frontend
|
||||
&& pnpm prettier --write ./src/app/api/openapi.json
|
||||
'
|
||||
files: ^autogpt_platform/backend/
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: generate-api-client
|
||||
name: Generate API client - AutoGPT Platform - Frontend
|
||||
alias: generate-api-client-platform-frontend
|
||||
entry: >
|
||||
bash -c '
|
||||
SCHEMA=autogpt_platform/frontend/src/app/api/openapi.json;
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --quiet "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF" -- "$SCHEMA" && exit 0
|
||||
else
|
||||
git diff --quiet HEAD -- "$SCHEMA" && exit 0
|
||||
fi;
|
||||
cd autogpt_platform/frontend && pnpm generate:api
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.7.2
|
||||
|
||||
@@ -88,20 +88,23 @@ async def require_auth(
|
||||
)
|
||||
|
||||
|
||||
def require_permission(permission: APIKeyPermission):
|
||||
def require_permission(*permissions: APIKeyPermission):
|
||||
"""
|
||||
Dependency function for checking specific permissions
|
||||
Dependency function for checking required permissions.
|
||||
All listed permissions must be present.
|
||||
(works with API keys and OAuth tokens)
|
||||
"""
|
||||
|
||||
async def check_permission(
|
||||
async def check_permissions(
|
||||
auth: APIAuthorizationInfo = Security(require_auth),
|
||||
) -> APIAuthorizationInfo:
|
||||
if permission not in auth.scopes:
|
||||
missing = [p for p in permissions if p not in auth.scopes]
|
||||
if missing:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail=f"Missing required permission: {permission.value}",
|
||||
detail=f"Missing required permission(s): "
|
||||
f"{', '.join(p.value for p in missing)}",
|
||||
)
|
||||
return auth
|
||||
|
||||
return check_permission
|
||||
return check_permissions
|
||||
|
||||
@@ -18,6 +18,7 @@ from backend.data import user as user_db
|
||||
from backend.data.auth.base import APIAuthorizationInfo
|
||||
from backend.data.block import BlockInput, CompletedBlockOutput
|
||||
from backend.executor.utils import add_graph_execution
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate
|
||||
from backend.util.settings import Settings
|
||||
|
||||
from .integrations import integrations_router
|
||||
@@ -95,6 +96,43 @@ async def execute_graph_block(
|
||||
return output
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
path="/graphs",
|
||||
tags=["graphs"],
|
||||
status_code=201,
|
||||
dependencies=[
|
||||
Security(
|
||||
require_permission(
|
||||
APIKeyPermission.WRITE_GRAPH, APIKeyPermission.WRITE_LIBRARY
|
||||
)
|
||||
)
|
||||
],
|
||||
)
|
||||
async def create_graph(
|
||||
graph: graph_db.Graph,
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.WRITE_GRAPH, APIKeyPermission.WRITE_LIBRARY)
|
||||
),
|
||||
) -> graph_db.GraphModel:
|
||||
"""
|
||||
Create a new agent graph.
|
||||
|
||||
The graph will be validated and assigned a new ID.
|
||||
It is automatically added to the user's library.
|
||||
"""
|
||||
from backend.api.features.library import db as library_db
|
||||
|
||||
graph_model = graph_db.make_graph_model(graph, auth.user_id)
|
||||
graph_model.reassign_ids(user_id=auth.user_id, reassign_graph_id=True)
|
||||
graph_model.validate_graph(for_run=False)
|
||||
|
||||
await graph_db.create_graph(graph_model, user_id=auth.user_id)
|
||||
await library_db.create_library_agent(graph_model, auth.user_id)
|
||||
activated_graph = await on_graph_activate(graph_model, user_id=auth.user_id)
|
||||
|
||||
return activated_graph
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
path="/graphs/{graph_id}/execute/{graph_version}",
|
||||
tags=["graphs"],
|
||||
|
||||
@@ -621,11 +621,10 @@ async def resume_session_stream(
|
||||
if not active_session:
|
||||
return Response(status_code=204)
|
||||
|
||||
# Subscribe from the beginning ("0-0") to replay all chunks for this turn.
|
||||
# This is necessary because hydrated messages filter out incomplete tool calls
|
||||
# to avoid "No tool invocation found" errors. The resume stream delivers
|
||||
# those tool calls fresh with proper SDK state.
|
||||
# The AI SDK's deduplication will handle any duplicate chunks.
|
||||
# Always replay from the beginning ("0-0") on resume.
|
||||
# We can't use last_message_id because it's the latest ID in the backend
|
||||
# stream, not the latest the frontend received — the gap causes lost
|
||||
# messages. The frontend deduplicates replayed content.
|
||||
subscriber_queue = await stream_registry.subscribe_to_session(
|
||||
session_id=session_id,
|
||||
user_id=user_id,
|
||||
|
||||
@@ -34,10 +34,12 @@ def main(output: Path, pretty: bool):
|
||||
"""Generate and output the OpenAPI JSON specification."""
|
||||
openapi_schema = get_openapi_schema()
|
||||
|
||||
json_output = json.dumps(openapi_schema, indent=2 if pretty else None)
|
||||
json_output = json.dumps(
|
||||
openapi_schema, indent=2 if pretty else None, ensure_ascii=False
|
||||
)
|
||||
|
||||
if output:
|
||||
output.write_text(json_output)
|
||||
output.write_text(json_output, encoding="utf-8")
|
||||
click.echo(f"✅ OpenAPI specification written to {output}\n\nPreview:")
|
||||
click.echo(f"\n{json_output[:500]} ...")
|
||||
else:
|
||||
|
||||
@@ -85,7 +85,7 @@ class ChatConfig(BaseSettings):
|
||||
)
|
||||
claude_agent_max_subtasks: int = Field(
|
||||
default=10,
|
||||
description="Max number of sub-agent Tasks the SDK can spawn per session.",
|
||||
description="Max number of concurrent sub-agent Tasks the SDK can run per session.",
|
||||
)
|
||||
claude_agent_use_resume: bool = Field(
|
||||
default=True,
|
||||
|
||||
@@ -4,6 +4,7 @@ This module contains the CoPilotExecutor class that consumes chat tasks from
|
||||
RabbitMQ and processes them using a thread pool, following the graph executor pattern.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
@@ -409,14 +410,19 @@ class CoPilotExecutor(AppProcess):
|
||||
|
||||
def on_run_done(f: Future):
|
||||
logger.info(f"Run completed for {session_id}")
|
||||
error_msg = None
|
||||
try:
|
||||
if exec_error := f.exception():
|
||||
logger.error(f"Execution for {session_id} failed: {exec_error}")
|
||||
error_msg = str(exec_error) or type(exec_error).__name__
|
||||
logger.error(f"Execution for {session_id} failed: {error_msg}")
|
||||
ack_message(reject=True, requeue=False)
|
||||
else:
|
||||
ack_message(reject=False, requeue=False)
|
||||
except asyncio.CancelledError:
|
||||
logger.info(f"Run completion callback cancelled for {session_id}")
|
||||
except BaseException as e:
|
||||
logger.exception(f"Error in run completion callback: {e}")
|
||||
error_msg = str(e) or type(e).__name__
|
||||
logger.exception(f"Error in run completion callback: {error_msg}")
|
||||
finally:
|
||||
# Release the cluster lock
|
||||
if session_id in self._task_locks:
|
||||
|
||||
@@ -125,7 +125,10 @@ class CoPilotProcessor:
|
||||
)
|
||||
future.result(timeout=5)
|
||||
except Exception as e:
|
||||
logger.warning(f"[CoPilotExecutor] Worker {self.tid} cleanup error: {e}")
|
||||
error_msg = str(e) or type(e).__name__
|
||||
logger.warning(
|
||||
f"[CoPilotExecutor] Worker {self.tid} cleanup error: {error_msg}"
|
||||
)
|
||||
|
||||
# Stop the event loop
|
||||
self.execution_loop.call_soon_threadsafe(self.execution_loop.stop)
|
||||
@@ -157,47 +160,30 @@ class CoPilotProcessor:
|
||||
|
||||
start_time = time.monotonic()
|
||||
|
||||
try:
|
||||
# Run the async execution in our event loop
|
||||
future = asyncio.run_coroutine_threadsafe(
|
||||
self._execute_async(entry, cancel, cluster_lock, log),
|
||||
self.execution_loop,
|
||||
)
|
||||
# Run the async execution in our event loop
|
||||
future = asyncio.run_coroutine_threadsafe(
|
||||
self._execute_async(entry, cancel, cluster_lock, log),
|
||||
self.execution_loop,
|
||||
)
|
||||
|
||||
# Wait for completion, checking cancel periodically
|
||||
while not future.done():
|
||||
try:
|
||||
future.result(timeout=1.0)
|
||||
except asyncio.TimeoutError:
|
||||
if cancel.is_set():
|
||||
log.info("Cancellation requested")
|
||||
future.cancel()
|
||||
break
|
||||
# Refresh cluster lock to maintain ownership
|
||||
cluster_lock.refresh()
|
||||
|
||||
if not future.cancelled():
|
||||
# Get result to propagate any exceptions
|
||||
future.result()
|
||||
|
||||
elapsed = time.monotonic() - start_time
|
||||
log.info(f"Execution completed in {elapsed:.2f}s")
|
||||
|
||||
except BaseException as e:
|
||||
elapsed = time.monotonic() - start_time
|
||||
log.error(f"Execution failed after {elapsed:.2f}s: {e}")
|
||||
# Safety net: if _execute_async's error handler failed to mark
|
||||
# the session (e.g. RuntimeError from SDK cleanup), do it here.
|
||||
# Wait for completion, checking cancel periodically
|
||||
while not future.done():
|
||||
try:
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
stream_registry.mark_session_completed(
|
||||
entry.session_id, error_message=str(e) or "Unknown error"
|
||||
),
|
||||
self.execution_loop,
|
||||
).result(timeout=5.0)
|
||||
except Exception as cleanup_err:
|
||||
log.error(f"Safety net mark_session_completed failed: {cleanup_err}")
|
||||
raise
|
||||
future.result(timeout=1.0)
|
||||
except asyncio.TimeoutError:
|
||||
if cancel.is_set():
|
||||
log.info("Cancellation requested")
|
||||
future.cancel()
|
||||
break
|
||||
# Refresh cluster lock to maintain ownership
|
||||
cluster_lock.refresh()
|
||||
|
||||
if not future.cancelled():
|
||||
# Get result to propagate any exceptions
|
||||
future.result()
|
||||
|
||||
elapsed = time.monotonic() - start_time
|
||||
log.info(f"Execution completed in {elapsed:.2f}s")
|
||||
|
||||
async def _execute_async(
|
||||
self,
|
||||
@@ -219,6 +205,7 @@ class CoPilotProcessor:
|
||||
"""
|
||||
last_refresh = time.monotonic()
|
||||
refresh_interval = 30.0 # Refresh lock every 30 seconds
|
||||
error_msg = None
|
||||
|
||||
try:
|
||||
# Choose service based on LaunchDarkly flag
|
||||
@@ -264,17 +251,26 @@ class CoPilotProcessor:
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
error_message = "Operation cancelled" if cancel.is_set() else None
|
||||
await stream_registry.mark_session_completed(
|
||||
entry.session_id, error_message=error_message
|
||||
)
|
||||
# Stream loop completed
|
||||
if cancel.is_set():
|
||||
log.info("Stream cancelled by user")
|
||||
|
||||
except BaseException as e:
|
||||
log.error(f"Turn failed: {e}")
|
||||
# Handle all exceptions (including CancelledError) with appropriate logging
|
||||
if isinstance(e, asyncio.CancelledError):
|
||||
log.info("Turn cancelled")
|
||||
error_msg = "Operation cancelled"
|
||||
else:
|
||||
error_msg = str(e) or type(e).__name__
|
||||
log.error(f"Turn failed: {error_msg}")
|
||||
raise
|
||||
finally:
|
||||
# If no exception but user cancelled, still mark as cancelled
|
||||
if not error_msg and cancel.is_set():
|
||||
error_msg = "Operation cancelled"
|
||||
try:
|
||||
await stream_registry.mark_session_completed(
|
||||
entry.session_id, error_message=str(e) or "Unknown error"
|
||||
entry.session_id, error_message=error_msg
|
||||
)
|
||||
except Exception as mark_err:
|
||||
log.error(f"mark_session_completed also failed: {mark_err}")
|
||||
raise
|
||||
log.error(f"Failed to mark session completed: {mark_err}")
|
||||
|
||||
172
autogpt_platform/backend/backend/copilot/sdk/otel_setup_test.py
Normal file
172
autogpt_platform/backend/backend/copilot/sdk/otel_setup_test.py
Normal file
@@ -0,0 +1,172 @@
|
||||
"""Tests for OTEL tracing setup in the SDK copilot path."""
|
||||
|
||||
import os
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
|
||||
class TestSetupLangfuseOtel:
|
||||
"""Tests for _setup_langfuse_otel()."""
|
||||
|
||||
def test_noop_when_langfuse_not_configured(self):
|
||||
"""No env vars should be set when Langfuse credentials are missing."""
|
||||
with patch(
|
||||
"backend.copilot.sdk.service._is_langfuse_configured", return_value=False
|
||||
):
|
||||
from backend.copilot.sdk.service import _setup_langfuse_otel
|
||||
|
||||
# Clear any previously set env vars
|
||||
env_keys = [
|
||||
"LANGSMITH_OTEL_ENABLED",
|
||||
"LANGSMITH_OTEL_ONLY",
|
||||
"LANGSMITH_TRACING",
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT",
|
||||
"OTEL_EXPORTER_OTLP_HEADERS",
|
||||
]
|
||||
saved = {k: os.environ.pop(k, None) for k in env_keys}
|
||||
try:
|
||||
_setup_langfuse_otel()
|
||||
for key in env_keys:
|
||||
assert key not in os.environ, f"{key} should not be set"
|
||||
finally:
|
||||
for k, v in saved.items():
|
||||
if v is not None:
|
||||
os.environ[k] = v
|
||||
|
||||
def test_sets_env_vars_when_langfuse_configured(self):
|
||||
"""OTEL env vars should be set when Langfuse credentials exist."""
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.secrets.langfuse_public_key = "pk-test-123"
|
||||
mock_settings.secrets.langfuse_secret_key = "sk-test-456"
|
||||
mock_settings.secrets.langfuse_host = "https://langfuse.example.com"
|
||||
mock_settings.secrets.langfuse_tracing_environment = "test"
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.sdk.service._is_langfuse_configured",
|
||||
return_value=True,
|
||||
),
|
||||
patch("backend.copilot.sdk.service.Settings", return_value=mock_settings),
|
||||
patch(
|
||||
"backend.copilot.sdk.service.configure_claude_agent_sdk",
|
||||
return_value=True,
|
||||
) as mock_configure,
|
||||
):
|
||||
from backend.copilot.sdk.service import _setup_langfuse_otel
|
||||
|
||||
# Clear env vars so setdefault works
|
||||
env_keys = [
|
||||
"LANGSMITH_OTEL_ENABLED",
|
||||
"LANGSMITH_OTEL_ONLY",
|
||||
"LANGSMITH_TRACING",
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT",
|
||||
"OTEL_EXPORTER_OTLP_HEADERS",
|
||||
"OTEL_RESOURCE_ATTRIBUTES",
|
||||
]
|
||||
saved = {k: os.environ.pop(k, None) for k in env_keys}
|
||||
try:
|
||||
_setup_langfuse_otel()
|
||||
|
||||
assert os.environ["LANGSMITH_OTEL_ENABLED"] == "true"
|
||||
assert os.environ["LANGSMITH_OTEL_ONLY"] == "true"
|
||||
assert os.environ["LANGSMITH_TRACING"] == "true"
|
||||
assert (
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"]
|
||||
== "https://langfuse.example.com/api/public/otel"
|
||||
)
|
||||
assert "Authorization=Basic" in os.environ["OTEL_EXPORTER_OTLP_HEADERS"]
|
||||
assert (
|
||||
os.environ["OTEL_RESOURCE_ATTRIBUTES"]
|
||||
== "langfuse.environment=test"
|
||||
)
|
||||
|
||||
mock_configure.assert_called_once_with(tags=["sdk"])
|
||||
finally:
|
||||
for k, v in saved.items():
|
||||
if v is not None:
|
||||
os.environ[k] = v
|
||||
elif k in os.environ:
|
||||
del os.environ[k]
|
||||
|
||||
def test_existing_env_vars_not_overwritten(self):
|
||||
"""Explicit env-var overrides should not be clobbered."""
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.secrets.langfuse_public_key = "pk-test"
|
||||
mock_settings.secrets.langfuse_secret_key = "sk-test"
|
||||
mock_settings.secrets.langfuse_host = "https://langfuse.example.com"
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.sdk.service._is_langfuse_configured",
|
||||
return_value=True,
|
||||
),
|
||||
patch("backend.copilot.sdk.service.Settings", return_value=mock_settings),
|
||||
patch(
|
||||
"backend.copilot.sdk.service.configure_claude_agent_sdk",
|
||||
return_value=True,
|
||||
),
|
||||
):
|
||||
from backend.copilot.sdk.service import _setup_langfuse_otel
|
||||
|
||||
saved = os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT")
|
||||
try:
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://custom.endpoint/v1"
|
||||
_setup_langfuse_otel()
|
||||
assert (
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"]
|
||||
== "https://custom.endpoint/v1"
|
||||
)
|
||||
finally:
|
||||
if saved is not None:
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = saved
|
||||
elif "OTEL_EXPORTER_OTLP_ENDPOINT" in os.environ:
|
||||
del os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"]
|
||||
|
||||
def test_graceful_failure_on_exception(self):
|
||||
"""Setup should not raise even if internal code fails."""
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.sdk.service._is_langfuse_configured",
|
||||
return_value=True,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.sdk.service.Settings",
|
||||
side_effect=RuntimeError("settings unavailable"),
|
||||
),
|
||||
):
|
||||
from backend.copilot.sdk.service import _setup_langfuse_otel
|
||||
|
||||
# Should not raise — just logs and returns
|
||||
_setup_langfuse_otel()
|
||||
|
||||
|
||||
class TestPropagateAttributesImport:
|
||||
"""Verify langfuse.propagate_attributes is available."""
|
||||
|
||||
def test_propagate_attributes_is_importable(self):
|
||||
from langfuse import propagate_attributes
|
||||
|
||||
assert callable(propagate_attributes)
|
||||
|
||||
def test_propagate_attributes_returns_context_manager(self):
|
||||
from langfuse import propagate_attributes
|
||||
|
||||
ctx = propagate_attributes(user_id="u1", session_id="s1", tags=["test"])
|
||||
assert hasattr(ctx, "__enter__")
|
||||
assert hasattr(ctx, "__exit__")
|
||||
|
||||
|
||||
class TestReceiveResponseCompat:
|
||||
"""Verify ClaudeSDKClient.receive_response() exists (langsmith patches it)."""
|
||||
|
||||
def test_receive_response_exists(self):
|
||||
from claude_agent_sdk import ClaudeSDKClient
|
||||
|
||||
assert hasattr(ClaudeSDKClient, "receive_response")
|
||||
|
||||
def test_receive_response_is_async_generator(self):
|
||||
import inspect
|
||||
|
||||
from claude_agent_sdk import ClaudeSDKClient
|
||||
|
||||
method = getattr(ClaudeSDKClient, "receive_response")
|
||||
assert inspect.isfunction(method) or inspect.ismethod(method)
|
||||
@@ -160,7 +160,7 @@ def create_security_hooks(
|
||||
Args:
|
||||
user_id: Current user ID for isolation validation
|
||||
sdk_cwd: SDK working directory for workspace-scoped tool validation
|
||||
max_subtasks: Maximum Task (sub-agent) spawns allowed per session
|
||||
max_subtasks: Maximum concurrent Task (sub-agent) spawns allowed per session
|
||||
on_stop: Callback ``(transcript_path, sdk_session_id)`` invoked when
|
||||
the SDK finishes processing — used to read the JSONL transcript
|
||||
before the CLI process exits.
|
||||
@@ -172,8 +172,9 @@ def create_security_hooks(
|
||||
from claude_agent_sdk import HookMatcher
|
||||
from claude_agent_sdk.types import HookContext, HookInput, SyncHookJSONOutput
|
||||
|
||||
# Per-session counter for Task sub-agent spawns
|
||||
task_spawn_count = 0
|
||||
# Per-session tracking for Task sub-agent concurrency.
|
||||
# Set of tool_use_ids that consumed a slot — len() is the active count.
|
||||
task_tool_use_ids: set[str] = set()
|
||||
|
||||
async def pre_tool_use_hook(
|
||||
input_data: HookInput,
|
||||
@@ -181,7 +182,6 @@ def create_security_hooks(
|
||||
context: HookContext,
|
||||
) -> SyncHookJSONOutput:
|
||||
"""Combined pre-tool-use validation hook."""
|
||||
nonlocal task_spawn_count
|
||||
_ = context # unused but required by signature
|
||||
tool_name = cast(str, input_data.get("tool_name", ""))
|
||||
tool_input = cast(dict[str, Any], input_data.get("tool_input", {}))
|
||||
@@ -200,18 +200,18 @@ def create_security_hooks(
|
||||
"(remove the run_in_background parameter)."
|
||||
),
|
||||
)
|
||||
if task_spawn_count >= max_subtasks:
|
||||
if len(task_tool_use_ids) >= max_subtasks:
|
||||
logger.warning(
|
||||
f"[SDK] Task limit reached ({max_subtasks}), user={user_id}"
|
||||
)
|
||||
return cast(
|
||||
SyncHookJSONOutput,
|
||||
_deny(
|
||||
f"Maximum {max_subtasks} sub-tasks per session. "
|
||||
"Please continue in the main conversation."
|
||||
f"Maximum {max_subtasks} concurrent sub-tasks. "
|
||||
"Wait for running sub-tasks to finish, "
|
||||
"or continue in the main conversation."
|
||||
),
|
||||
)
|
||||
task_spawn_count += 1
|
||||
|
||||
# Strip MCP prefix for consistent validation
|
||||
is_copilot_tool = tool_name.startswith(MCP_TOOL_PREFIX)
|
||||
@@ -229,9 +229,24 @@ def create_security_hooks(
|
||||
if result:
|
||||
return cast(SyncHookJSONOutput, result)
|
||||
|
||||
# Reserve the Task slot only after all validations pass
|
||||
if tool_name == "Task" and tool_use_id is not None:
|
||||
task_tool_use_ids.add(tool_use_id)
|
||||
|
||||
logger.debug(f"[SDK] Tool start: {tool_name}, user={user_id}")
|
||||
return cast(SyncHookJSONOutput, {})
|
||||
|
||||
def _release_task_slot(tool_name: str, tool_use_id: str | None) -> None:
|
||||
"""Release a Task concurrency slot if one was reserved."""
|
||||
if tool_name == "Task" and tool_use_id in task_tool_use_ids:
|
||||
task_tool_use_ids.discard(tool_use_id)
|
||||
logger.info(
|
||||
"[SDK] Task slot released, active=%d/%d, user=%s",
|
||||
len(task_tool_use_ids),
|
||||
max_subtasks,
|
||||
user_id,
|
||||
)
|
||||
|
||||
async def post_tool_use_hook(
|
||||
input_data: HookInput,
|
||||
tool_use_id: str | None,
|
||||
@@ -246,6 +261,8 @@ def create_security_hooks(
|
||||
"""
|
||||
_ = context
|
||||
tool_name = cast(str, input_data.get("tool_name", ""))
|
||||
|
||||
_release_task_slot(tool_name, tool_use_id)
|
||||
is_builtin = not tool_name.startswith(MCP_TOOL_PREFIX)
|
||||
logger.info(
|
||||
"[SDK] PostToolUse: %s (builtin=%s, tool_use_id=%s)",
|
||||
@@ -289,6 +306,9 @@ def create_security_hooks(
|
||||
f"[SDK] Tool failed: {tool_name}, error={error}, "
|
||||
f"user={user_id}, tool_use_id={tool_use_id}"
|
||||
)
|
||||
|
||||
_release_task_slot(tool_name, tool_use_id)
|
||||
|
||||
return cast(SyncHookJSONOutput, {})
|
||||
|
||||
async def pre_compact_hook(
|
||||
|
||||
@@ -208,19 +208,22 @@ def test_bash_builtin_blocked_message_clarity():
|
||||
|
||||
@pytest.fixture()
|
||||
def _hooks():
|
||||
"""Create security hooks and return the PreToolUse handler."""
|
||||
"""Create security hooks and return (pre, post, post_failure) handlers."""
|
||||
from .security_hooks import create_security_hooks
|
||||
|
||||
hooks = create_security_hooks(user_id="u1", sdk_cwd=SDK_CWD, max_subtasks=2)
|
||||
pre = hooks["PreToolUse"][0].hooks[0]
|
||||
return pre
|
||||
post = hooks["PostToolUse"][0].hooks[0]
|
||||
post_failure = hooks["PostToolUseFailure"][0].hooks[0]
|
||||
return pre, post, post_failure
|
||||
|
||||
|
||||
@pytest.mark.skipif(not _sdk_available(), reason="claude_agent_sdk not installed")
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_background_blocked(_hooks):
|
||||
"""Task with run_in_background=true must be denied."""
|
||||
result = await _hooks(
|
||||
pre, _, _ = _hooks
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"run_in_background": True, "prompt": "x"}},
|
||||
tool_use_id=None,
|
||||
context={},
|
||||
@@ -233,9 +236,10 @@ async def test_task_background_blocked(_hooks):
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_foreground_allowed(_hooks):
|
||||
"""Task without run_in_background should be allowed."""
|
||||
result = await _hooks(
|
||||
pre, _, _ = _hooks
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "do stuff"}},
|
||||
tool_use_id=None,
|
||||
tool_use_id="tu-1",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
@@ -245,25 +249,102 @@ async def test_task_foreground_allowed(_hooks):
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_limit_enforced(_hooks):
|
||||
"""Task spawns beyond max_subtasks should be denied."""
|
||||
pre, _, _ = _hooks
|
||||
# First two should pass
|
||||
for _ in range(2):
|
||||
result = await _hooks(
|
||||
for i in range(2):
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "ok"}},
|
||||
tool_use_id=None,
|
||||
tool_use_id=f"tu-limit-{i}",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
# Third should be denied (limit=2)
|
||||
result = await _hooks(
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "over limit"}},
|
||||
tool_use_id=None,
|
||||
tool_use_id="tu-limit-2",
|
||||
context={},
|
||||
)
|
||||
assert _is_denied(result)
|
||||
assert "Maximum" in _reason(result)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not _sdk_available(), reason="claude_agent_sdk not installed")
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_slot_released_on_completion(_hooks):
|
||||
"""Completing a Task should free a slot so new Tasks can be spawned."""
|
||||
pre, post, _ = _hooks
|
||||
# Fill both slots
|
||||
for i in range(2):
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "ok"}},
|
||||
tool_use_id=f"tu-comp-{i}",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
# Third should be denied — at capacity
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "over"}},
|
||||
tool_use_id="tu-comp-2",
|
||||
context={},
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
# Complete first task — frees a slot
|
||||
await post(
|
||||
{"tool_name": "Task", "tool_input": {}},
|
||||
tool_use_id="tu-comp-0",
|
||||
context={},
|
||||
)
|
||||
|
||||
# Now a new Task should be allowed
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "after release"}},
|
||||
tool_use_id="tu-comp-3",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not _sdk_available(), reason="claude_agent_sdk not installed")
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_slot_released_on_failure(_hooks):
|
||||
"""A failed Task should also free its concurrency slot."""
|
||||
pre, _, post_failure = _hooks
|
||||
# Fill both slots
|
||||
for i in range(2):
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "ok"}},
|
||||
tool_use_id=f"tu-fail-{i}",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
# At capacity
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "over"}},
|
||||
tool_use_id="tu-fail-2",
|
||||
context={},
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
# Fail first task — should free a slot
|
||||
await post_failure(
|
||||
{"tool_name": "Task", "tool_input": {}, "error": "something broke"},
|
||||
tool_use_id="tu-fail-0",
|
||||
context={},
|
||||
)
|
||||
|
||||
# New Task should be allowed
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "after failure"}},
|
||||
tool_use_id="tu-fail-3",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
|
||||
# -- _is_tool_error_or_denial ------------------------------------------------
|
||||
|
||||
|
||||
@@ -298,7 +379,9 @@ class TestIsToolErrorOrDenial:
|
||||
def test_subtask_limit_denial(self):
|
||||
assert (
|
||||
_is_tool_error_or_denial(
|
||||
"Maximum 2 sub-tasks per session. Please continue in the main conversation."
|
||||
"Maximum 2 concurrent sub-tasks. "
|
||||
"Wait for running sub-tasks to finish, "
|
||||
"or continue in the main conversation."
|
||||
)
|
||||
is True
|
||||
)
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
"""Claude Agent SDK service layer for CoPilot chat completions."""
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
from collections.abc import AsyncGenerator
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, cast
|
||||
|
||||
from langfuse import propagate_attributes
|
||||
from langsmith.integrations.claude_agent_sdk import configure_claude_agent_sdk
|
||||
|
||||
from backend.data.redis_client import get_redis_async
|
||||
from backend.executor.cluster_lock import AsyncClusterLock
|
||||
from backend.util.exceptions import NotFoundError
|
||||
from backend.util.settings import Settings
|
||||
|
||||
from ..config import ChatConfig
|
||||
from ..model import (
|
||||
@@ -31,7 +37,11 @@ from ..response_model import (
|
||||
StreamToolInputAvailable,
|
||||
StreamToolOutputAvailable,
|
||||
)
|
||||
from ..service import _build_system_prompt, _generate_session_title
|
||||
from ..service import (
|
||||
_build_system_prompt,
|
||||
_generate_session_title,
|
||||
_is_langfuse_configured,
|
||||
)
|
||||
from ..tools.sandbox import WORKSPACE_PREFIX, make_session_path
|
||||
from ..tracking import track_user_message
|
||||
from .response_adapter import SDKResponseAdapter
|
||||
@@ -56,6 +66,55 @@ logger = logging.getLogger(__name__)
|
||||
config = ChatConfig()
|
||||
|
||||
|
||||
def _setup_langfuse_otel() -> None:
|
||||
"""Configure OTEL tracing for the Claude Agent SDK → Langfuse.
|
||||
|
||||
This uses LangSmith's built-in Claude Agent SDK integration to monkey-patch
|
||||
``ClaudeSDKClient``, capturing every tool call and model turn as OTEL spans.
|
||||
Spans are exported via OTLP to Langfuse (or any OTEL-compatible backend).
|
||||
|
||||
To route traces elsewhere, override ``OTEL_EXPORTER_OTLP_ENDPOINT`` and
|
||||
``OTEL_EXPORTER_OTLP_HEADERS`` environment variables — no code changes needed.
|
||||
"""
|
||||
if not _is_langfuse_configured():
|
||||
return
|
||||
|
||||
try:
|
||||
settings = Settings()
|
||||
pk = settings.secrets.langfuse_public_key
|
||||
sk = settings.secrets.langfuse_secret_key
|
||||
host = settings.secrets.langfuse_host
|
||||
|
||||
# OTEL exporter config — these are only set if not already present,
|
||||
# so explicit env-var overrides always win.
|
||||
creds = base64.b64encode(f"{pk}:{sk}".encode()).decode()
|
||||
os.environ.setdefault("LANGSMITH_OTEL_ENABLED", "true")
|
||||
os.environ.setdefault("LANGSMITH_OTEL_ONLY", "true")
|
||||
os.environ.setdefault("LANGSMITH_TRACING", "true")
|
||||
os.environ.setdefault("OTEL_EXPORTER_OTLP_ENDPOINT", f"{host}/api/public/otel")
|
||||
os.environ.setdefault(
|
||||
"OTEL_EXPORTER_OTLP_HEADERS", f"Authorization=Basic {creds}"
|
||||
)
|
||||
|
||||
# Set the Langfuse environment via OTEL resource attributes so the
|
||||
# Langfuse server maps it to the first-class environment field.
|
||||
tracing_env = settings.secrets.langfuse_tracing_environment
|
||||
os.environ.setdefault(
|
||||
"OTEL_RESOURCE_ATTRIBUTES",
|
||||
f"langfuse.environment={tracing_env}",
|
||||
)
|
||||
|
||||
configure_claude_agent_sdk(tags=["sdk"])
|
||||
logger.info(
|
||||
"OTEL tracing configured for Claude Agent SDK → %s [%s]", host, tracing_env
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("OTEL setup skipped — failed to configure", exc_info=True)
|
||||
|
||||
|
||||
_setup_langfuse_otel()
|
||||
|
||||
|
||||
# Set to hold background tasks to prevent garbage collection
|
||||
_background_tasks: set[asyncio.Task[Any]] = set()
|
||||
|
||||
@@ -75,14 +134,21 @@ class CapturedTranscript:
|
||||
|
||||
_SDK_CWD_PREFIX = WORKSPACE_PREFIX
|
||||
|
||||
# Special message prefixes for text-based markers (parsed by frontend)
|
||||
COPILOT_ERROR_PREFIX = "[COPILOT_ERROR]" # Renders as ErrorCard
|
||||
COPILOT_SYSTEM_PREFIX = "[COPILOT_SYSTEM]" # Renders as system info message
|
||||
|
||||
# Heartbeat interval — keep SSE alive through proxies/LBs during tool execution.
|
||||
# IMPORTANT: Must be less than frontend timeout (12s in useCopilotPage.ts)
|
||||
_HEARTBEAT_INTERVAL = 10.0 # seconds
|
||||
|
||||
|
||||
# Appended to the system prompt to inform the agent about available tools.
|
||||
# The SDK built-in Bash is NOT available — use mcp__copilot__bash_exec instead,
|
||||
# which has kernel-level network isolation (unshare --net).
|
||||
_SDK_TOOL_SUPPLEMENT = """
|
||||
def _build_sdk_tool_supplement(cwd: str) -> str:
|
||||
"""Build the SDK tool supplement with the actual working directory injected."""
|
||||
return f"""
|
||||
|
||||
## Tool notes
|
||||
|
||||
@@ -90,9 +156,16 @@ _SDK_TOOL_SUPPLEMENT = """
|
||||
- The SDK built-in Bash tool is NOT available. Use the `bash_exec` MCP tool
|
||||
for shell commands — it runs in a network-isolated sandbox.
|
||||
|
||||
### Working directory
|
||||
- Your working directory is: `{cwd}`
|
||||
- All SDK Read/Write/Edit/Glob/Grep tools AND `bash_exec` operate inside this
|
||||
directory. This is the ONLY writable path — do not attempt to read or write
|
||||
anywhere else on the filesystem.
|
||||
- Use relative paths or absolute paths under `{cwd}` for all file operations.
|
||||
|
||||
### Two storage systems — CRITICAL to understand
|
||||
|
||||
1. **Ephemeral working directory** (`/tmp/copilot-<session>/`):
|
||||
1. **Ephemeral working directory** (`{cwd}`):
|
||||
- Shared by SDK Read/Write/Edit/Glob/Grep tools AND `bash_exec`
|
||||
- Files here are **lost between turns** — do NOT rely on them persisting
|
||||
- Use for temporary work: running scripts, processing data, etc.
|
||||
@@ -118,6 +191,21 @@ When you create or modify important files (code, configs, outputs), you MUST:
|
||||
2. At the start of a new turn, call `list_workspace_files` to see what files
|
||||
are available from previous turns
|
||||
|
||||
### Sharing files with the user
|
||||
After saving a file to the persistent workspace with `write_workspace_file`,
|
||||
share it with the user by embedding the `download_url` from the response in
|
||||
your message as a Markdown link or image:
|
||||
|
||||
- **Any file** — shows as a clickable download link:
|
||||
`[report.csv](workspace://file_id#text/csv)`
|
||||
- **Image** — renders inline in chat:
|
||||
``
|
||||
- **Video** — renders inline in chat with player controls:
|
||||
``
|
||||
|
||||
The `download_url` field in the `write_workspace_file` response is already
|
||||
in the correct format — paste it directly after the `(` in the Markdown.
|
||||
|
||||
### Long-running tools
|
||||
Long-running tools (create_agent, edit_agent, etc.) are handled
|
||||
asynchronously. You will receive an immediate response; the actual result
|
||||
@@ -128,6 +216,7 @@ is delivered to the user via a background stream.
|
||||
All tasks must run in the foreground.
|
||||
"""
|
||||
|
||||
|
||||
STREAM_LOCK_PREFIX = "copilot:stream:lock:"
|
||||
|
||||
|
||||
@@ -413,6 +502,20 @@ async def stream_chat_completion_sdk(
|
||||
# Type narrowing: session is guaranteed ChatSession after the check above
|
||||
session = cast(ChatSession, session)
|
||||
|
||||
# Clean up stale error markers from previous turn before starting new turn
|
||||
# If the last message contains an error marker, remove it (user is retrying)
|
||||
if (
|
||||
len(session.messages) > 0
|
||||
and session.messages[-1].role == "assistant"
|
||||
and session.messages[-1].content
|
||||
and COPILOT_ERROR_PREFIX in session.messages[-1].content
|
||||
):
|
||||
logger.info(
|
||||
"[SDK] [%s] Removing stale error marker from previous turn",
|
||||
session_id[:12],
|
||||
)
|
||||
session.messages.pop()
|
||||
|
||||
# Append the new message to the session if it's not already there
|
||||
new_message_role = "user" if is_user_message else "assistant"
|
||||
if message and (
|
||||
@@ -442,14 +545,13 @@ async def stream_chat_completion_sdk(
|
||||
_background_tasks.add(task)
|
||||
task.add_done_callback(_background_tasks.discard)
|
||||
|
||||
# Build system prompt (reuses non-SDK path with Langfuse support)
|
||||
has_history = len(session.messages) > 1
|
||||
system_prompt, _ = await _build_system_prompt(
|
||||
user_id, has_conversation_history=has_history
|
||||
)
|
||||
system_prompt += _SDK_TOOL_SUPPLEMENT
|
||||
message_id = str(uuid.uuid4())
|
||||
stream_id = str(uuid.uuid4())
|
||||
stream_completed = False
|
||||
use_resume = False
|
||||
resume_file: str | None = None
|
||||
captured_transcript = CapturedTranscript()
|
||||
sdk_cwd = ""
|
||||
|
||||
# Acquire stream lock to prevent concurrent streams to the same session
|
||||
lock = AsyncClusterLock(
|
||||
@@ -472,21 +574,33 @@ async def stream_chat_completion_sdk(
|
||||
)
|
||||
return
|
||||
|
||||
yield StreamStart(messageId=message_id, sessionId=session_id)
|
||||
|
||||
stream_completed = False
|
||||
# Initialise variables before the try so the finally block can
|
||||
# always attempt transcript upload regardless of errors.
|
||||
sdk_cwd = ""
|
||||
use_resume = False
|
||||
resume_file: str | None = None
|
||||
captured_transcript = CapturedTranscript()
|
||||
# OTEL context manager — initialized inside the try and cleaned up in finally.
|
||||
_otel_ctx: Any = None
|
||||
|
||||
# Make sure there is no more code between the lock acquitition and try-block.
|
||||
try:
|
||||
# Use a session-specific temp dir to avoid cleanup race conditions
|
||||
# between concurrent sessions.
|
||||
sdk_cwd = _make_sdk_cwd(session_id)
|
||||
os.makedirs(sdk_cwd, exist_ok=True)
|
||||
# Build system prompt (reuses non-SDK path with Langfuse support).
|
||||
# Pre-compute the cwd here so the exact working directory path can be
|
||||
# injected into the supplement instead of the generic placeholder.
|
||||
# Catch ValueError early so the failure yields a clean StreamError rather
|
||||
# than propagating outside the stream error-handling path.
|
||||
has_history = len(session.messages) > 1
|
||||
try:
|
||||
sdk_cwd = _make_sdk_cwd(session_id)
|
||||
os.makedirs(sdk_cwd, exist_ok=True)
|
||||
except (ValueError, OSError) as e:
|
||||
logger.error("[SDK] [%s] Invalid SDK cwd: %s", session_id[:12], e)
|
||||
yield StreamError(
|
||||
errorText="Unable to initialize working directory.",
|
||||
code="sdk_cwd_error",
|
||||
)
|
||||
return
|
||||
system_prompt, _ = await _build_system_prompt(
|
||||
user_id, has_conversation_history=has_history
|
||||
)
|
||||
system_prompt += _build_sdk_tool_supplement(sdk_cwd)
|
||||
|
||||
yield StreamStart(messageId=message_id, sessionId=session_id)
|
||||
|
||||
set_execution_context(user_id, session)
|
||||
try:
|
||||
@@ -581,6 +695,19 @@ async def stream_chat_completion_sdk(
|
||||
|
||||
adapter = SDKResponseAdapter(message_id=message_id, session_id=session_id)
|
||||
|
||||
# Propagate user_id/session_id as OTEL context attributes so the
|
||||
# langsmith tracing integration attaches them to every span. This
|
||||
# is what Langfuse (or any OTEL backend) maps to its native
|
||||
# user/session fields.
|
||||
_otel_ctx = propagate_attributes(
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
trace_name="copilot-sdk",
|
||||
tags=["sdk"],
|
||||
metadata={"resume": str(use_resume)},
|
||||
)
|
||||
_otel_ctx.__enter__()
|
||||
|
||||
async with ClaudeSDKClient(options=options) as client:
|
||||
current_message = message or ""
|
||||
if not current_message and session.messages:
|
||||
@@ -624,7 +751,7 @@ async def stream_chat_completion_sdk(
|
||||
# Instead, wrap __anext__() in a Task and use asyncio.wait()
|
||||
# with a timeout. On timeout we emit a heartbeat but keep the
|
||||
# Task alive so it can deliver the next message.
|
||||
msg_iter = client.receive_messages().__aiter__()
|
||||
msg_iter = client.receive_response().__aiter__()
|
||||
pending_task: asyncio.Task[Any] | None = None
|
||||
try:
|
||||
while not stream_completed:
|
||||
@@ -658,7 +785,7 @@ async def stream_chat_completion_sdk(
|
||||
break
|
||||
except Exception as stream_err:
|
||||
# SDK sends {"type": "error"} which raises
|
||||
# Exception in receive_messages() — capture it
|
||||
# Exception in receive_response() — capture it
|
||||
# so the session can still be saved and the
|
||||
# frontend gets a clean finish.
|
||||
logger.error(
|
||||
@@ -725,6 +852,25 @@ async def stream_chat_completion_sdk(
|
||||
- len(adapter.resolved_tool_calls),
|
||||
)
|
||||
|
||||
# Log ResultMessage details for debugging
|
||||
if isinstance(sdk_msg, ResultMessage):
|
||||
logger.info(
|
||||
"[SDK] [%s] Received: ResultMessage %s "
|
||||
"(unresolved=%d, current=%d, resolved=%d)",
|
||||
session_id[:12],
|
||||
sdk_msg.subtype,
|
||||
len(adapter.current_tool_calls)
|
||||
- len(adapter.resolved_tool_calls),
|
||||
len(adapter.current_tool_calls),
|
||||
len(adapter.resolved_tool_calls),
|
||||
)
|
||||
if sdk_msg.subtype in ("error", "error_during_execution"):
|
||||
logger.error(
|
||||
"[SDK] [%s] SDK execution failed with error: %s",
|
||||
session_id[:12],
|
||||
sdk_msg.result or "(no error message provided)",
|
||||
)
|
||||
|
||||
for response in adapter.convert_message(sdk_msg):
|
||||
if isinstance(response, StreamStart):
|
||||
continue
|
||||
@@ -749,6 +895,15 @@ async def stream_chat_completion_sdk(
|
||||
extra,
|
||||
)
|
||||
|
||||
# Log errors being sent to frontend
|
||||
if isinstance(response, StreamError):
|
||||
logger.error(
|
||||
"[SDK] [%s] Sending error to frontend: %s (code=%s)",
|
||||
session_id[:12],
|
||||
response.errorText,
|
||||
response.code,
|
||||
)
|
||||
|
||||
yield response
|
||||
|
||||
if isinstance(response, StreamTextDelta):
|
||||
@@ -855,13 +1010,13 @@ async def stream_chat_completion_sdk(
|
||||
yield response
|
||||
|
||||
# If the stream ended without a ResultMessage, the SDK
|
||||
# CLI exited unexpectedly. Close any open text/step so
|
||||
# the chunks are well-formed. StreamFinish is published
|
||||
# by mark_session_completed in the processor.
|
||||
# CLI exited unexpectedly or the user stopped execution.
|
||||
# Close any open text/step so chunks are well-formed, and
|
||||
# append a cancellation message so users see feedback.
|
||||
# StreamFinish is published by mark_session_completed in the processor.
|
||||
if not stream_completed:
|
||||
logger.warning(
|
||||
"[SDK] [%s] Stream ended without ResultMessage "
|
||||
"(StopAsyncIteration)",
|
||||
logger.info(
|
||||
"[SDK] [%s] Stream ended without ResultMessage (stopped by user)",
|
||||
session_id[:12],
|
||||
)
|
||||
closing_responses: list[StreamBaseResponse] = []
|
||||
@@ -869,6 +1024,15 @@ async def stream_chat_completion_sdk(
|
||||
for r in closing_responses:
|
||||
yield r
|
||||
|
||||
# Add "Stopped by user" message so it persists after refresh
|
||||
# Use COPILOT_SYSTEM_PREFIX so frontend renders it as system message, not assistant
|
||||
session.messages.append(
|
||||
ChatMessage(
|
||||
role="assistant",
|
||||
content=f"{COPILOT_SYSTEM_PREFIX} Execution stopped by user",
|
||||
)
|
||||
)
|
||||
|
||||
if (
|
||||
assistant_response.content or assistant_response.tool_calls
|
||||
) and not has_appended_assistant:
|
||||
@@ -922,43 +1086,83 @@ async def stream_chat_completion_sdk(
|
||||
"to use the OpenAI-compatible fallback."
|
||||
)
|
||||
|
||||
session = cast(ChatSession, await asyncio.shield(upsert_chat_session(session)))
|
||||
logger.info(
|
||||
"[SDK] [%s] Session saved with %d messages",
|
||||
"[SDK] [%s] Stream completed successfully with %d messages",
|
||||
session_id[:12],
|
||||
len(session.messages),
|
||||
)
|
||||
except asyncio.CancelledError:
|
||||
# Client disconnect / server shutdown — save session before re-raising
|
||||
# so accumulated messages aren't lost.
|
||||
logger.warning("[SDK] [%s] Session cancelled (CancelledError)", session_id[:12])
|
||||
except BaseException as e:
|
||||
# Catch BaseException to handle both Exception and CancelledError
|
||||
# (CancelledError inherits from BaseException in Python 3.8+)
|
||||
if isinstance(e, asyncio.CancelledError):
|
||||
logger.warning("[SDK] [%s] Session cancelled", session_id[:12])
|
||||
error_msg = "Operation cancelled"
|
||||
else:
|
||||
error_msg = str(e) or type(e).__name__
|
||||
# SDK cleanup RuntimeError is expected during cancellation, log as warning
|
||||
if isinstance(e, RuntimeError) and "cancel scope" in str(e):
|
||||
logger.warning(
|
||||
"[SDK] [%s] SDK cleanup error: %s", session_id[:12], error_msg
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"[SDK] [%s] Error: {error_msg}", session_id[:12], exc_info=True
|
||||
)
|
||||
|
||||
# Append error marker to session (non-invasive text parsing approach)
|
||||
# The finally block will persist the session with this error marker
|
||||
if session:
|
||||
session.messages.append(
|
||||
ChatMessage(
|
||||
role="assistant", content=f"{COPILOT_ERROR_PREFIX} {error_msg}"
|
||||
)
|
||||
)
|
||||
logger.debug(
|
||||
"[SDK] [%s] Appended error marker, will be persisted in finally",
|
||||
session_id[:12],
|
||||
)
|
||||
|
||||
# Yield StreamError for immediate feedback (only for non-cancellation errors)
|
||||
# Skip for CancelledError and RuntimeError cleanup issues (both are cancellations)
|
||||
is_cancellation = isinstance(e, asyncio.CancelledError) or (
|
||||
isinstance(e, RuntimeError) and "cancel scope" in str(e)
|
||||
)
|
||||
if not is_cancellation:
|
||||
yield StreamError(
|
||||
errorText=error_msg,
|
||||
code="sdk_error",
|
||||
)
|
||||
|
||||
raise
|
||||
finally:
|
||||
# --- Close OTEL context ---
|
||||
if _otel_ctx is not None:
|
||||
try:
|
||||
_otel_ctx.__exit__(*sys.exc_info())
|
||||
except Exception:
|
||||
logger.warning("OTEL context teardown failed", exc_info=True)
|
||||
|
||||
# --- Persist session messages ---
|
||||
# This MUST run in finally to persist messages even when the generator
|
||||
# is stopped early (e.g., user clicks stop, processor breaks stream loop).
|
||||
# Without this, messages disappear after refresh because they were never
|
||||
# saved to the database.
|
||||
if session is not None:
|
||||
try:
|
||||
await asyncio.shield(upsert_chat_session(session))
|
||||
logger.info(
|
||||
"[SDK] [%s] Session saved on cancel (%d messages)",
|
||||
"[SDK] [%s] Session persisted in finally with %d messages",
|
||||
session_id[:12],
|
||||
len(session.messages),
|
||||
)
|
||||
except Exception as save_err:
|
||||
except Exception as persist_err:
|
||||
logger.error(
|
||||
"[SDK] [%s] Failed to save session on cancel: %s",
|
||||
"[SDK] [%s] Failed to persist session in finally: %s",
|
||||
session_id[:12],
|
||||
save_err,
|
||||
persist_err,
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[SDK] Error: {e}", exc_info=True)
|
||||
if session:
|
||||
try:
|
||||
await asyncio.shield(upsert_chat_session(session))
|
||||
except Exception as save_err:
|
||||
logger.error(f"[SDK] Failed to save session on error: {save_err}")
|
||||
yield StreamError(
|
||||
errorText="An error occurred. Please try again.",
|
||||
code="sdk_error",
|
||||
)
|
||||
finally:
|
||||
|
||||
# --- Upload transcript for next-turn --resume ---
|
||||
# This MUST run in finally so the transcript is uploaded even when
|
||||
# the streaming loop raises an exception. The CLI uses
|
||||
|
||||
@@ -707,7 +707,6 @@ async def mark_session_completed(
|
||||
True if session was newly marked completed, False if already completed/failed
|
||||
"""
|
||||
status: Literal["completed", "failed"] = "failed" if error_message else "completed"
|
||||
|
||||
redis = await get_redis_async()
|
||||
meta_key = _get_session_meta_key(session_id)
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import re
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from backend.api.features.library.model import LibraryAgent
|
||||
from backend.copilot.model import ChatSession
|
||||
@@ -13,6 +13,7 @@ from backend.data.db_accessors import execution_db, library_db
|
||||
from backend.data.execution import ExecutionStatus, GraphExecution, GraphExecutionMeta
|
||||
|
||||
from .base import BaseTool
|
||||
from .execution_utils import TERMINAL_STATUSES, wait_for_execution
|
||||
from .models import (
|
||||
AgentOutputResponse,
|
||||
ErrorResponse,
|
||||
@@ -33,6 +34,7 @@ class AgentOutputInput(BaseModel):
|
||||
store_slug: str = ""
|
||||
execution_id: str = ""
|
||||
run_time: str = "latest"
|
||||
wait_if_running: int = Field(default=0, ge=0, le=300)
|
||||
|
||||
@field_validator(
|
||||
"agent_name",
|
||||
@@ -116,6 +118,11 @@ class AgentOutputTool(BaseTool):
|
||||
Select which run to retrieve using:
|
||||
- execution_id: Specific execution ID
|
||||
- run_time: 'latest' (default), 'yesterday', 'last week', or ISO date 'YYYY-MM-DD'
|
||||
|
||||
Wait for completion (optional):
|
||||
- wait_if_running: Max seconds to wait if execution is still running (0-300).
|
||||
If the execution is running/queued, waits up to this many seconds for completion.
|
||||
Returns current status on timeout. If already finished, returns immediately.
|
||||
"""
|
||||
|
||||
@property
|
||||
@@ -145,6 +152,13 @@ class AgentOutputTool(BaseTool):
|
||||
"Time filter: 'latest', 'yesterday', 'last week', or 'YYYY-MM-DD'"
|
||||
),
|
||||
},
|
||||
"wait_if_running": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Max seconds to wait if execution is still running (0-300). "
|
||||
"If running, waits for completion. Returns current state on timeout."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
}
|
||||
@@ -224,10 +238,14 @@ class AgentOutputTool(BaseTool):
|
||||
execution_id: str | None,
|
||||
time_start: datetime | None,
|
||||
time_end: datetime | None,
|
||||
include_running: bool = False,
|
||||
) -> tuple[GraphExecution | None, list[GraphExecutionMeta], str | None]:
|
||||
"""
|
||||
Fetch execution(s) based on filters.
|
||||
Returns (single_execution, available_executions_meta, error_message).
|
||||
|
||||
Args:
|
||||
include_running: If True, also look for running/queued executions (for waiting)
|
||||
"""
|
||||
exec_db = execution_db()
|
||||
|
||||
@@ -242,11 +260,25 @@ class AgentOutputTool(BaseTool):
|
||||
return None, [], f"Execution '{execution_id}' not found"
|
||||
return execution, [], None
|
||||
|
||||
# Get completed executions with time filters
|
||||
# Determine which statuses to query
|
||||
statuses = [ExecutionStatus.COMPLETED]
|
||||
if include_running:
|
||||
statuses.extend(
|
||||
[
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.REVIEW,
|
||||
ExecutionStatus.FAILED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
]
|
||||
)
|
||||
|
||||
# Get executions with time filters
|
||||
executions = await exec_db.get_graph_executions(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
statuses=[ExecutionStatus.COMPLETED],
|
||||
statuses=statuses,
|
||||
created_time_gte=time_start,
|
||||
created_time_lte=time_end,
|
||||
limit=10,
|
||||
@@ -313,10 +345,33 @@ class AgentOutputTool(BaseTool):
|
||||
for e in available_executions[:5]
|
||||
]
|
||||
|
||||
message = f"Found execution outputs for agent '{agent.name}'"
|
||||
# Build appropriate message based on execution status
|
||||
if execution.status == ExecutionStatus.COMPLETED:
|
||||
message = f"Found execution outputs for agent '{agent.name}'"
|
||||
elif execution.status == ExecutionStatus.FAILED:
|
||||
message = f"Execution for agent '{agent.name}' failed"
|
||||
elif execution.status == ExecutionStatus.TERMINATED:
|
||||
message = f"Execution for agent '{agent.name}' was terminated"
|
||||
elif execution.status == ExecutionStatus.REVIEW:
|
||||
message = (
|
||||
f"Execution for agent '{agent.name}' is awaiting human review. "
|
||||
"The user needs to approve it before it can continue."
|
||||
)
|
||||
elif execution.status in (
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
):
|
||||
message = (
|
||||
f"Execution for agent '{agent.name}' is still {execution.status.value}. "
|
||||
"Results may be incomplete. Use wait_if_running to wait for completion."
|
||||
)
|
||||
else:
|
||||
message = f"Found execution for agent '{agent.name}' (status: {execution.status.value})"
|
||||
|
||||
if len(available_executions) > 1:
|
||||
message += (
|
||||
f". Showing latest of {len(available_executions)} matching executions."
|
||||
f" Showing latest of {len(available_executions)} matching executions."
|
||||
)
|
||||
|
||||
return AgentOutputResponse(
|
||||
@@ -431,13 +486,17 @@ class AgentOutputTool(BaseTool):
|
||||
# Parse time expression
|
||||
time_start, time_end = parse_time_expression(input_data.run_time)
|
||||
|
||||
# Fetch execution(s)
|
||||
# Check if we should wait for running executions
|
||||
wait_timeout = input_data.wait_if_running
|
||||
|
||||
# Fetch execution(s) - include running if we're going to wait
|
||||
execution, available_executions, exec_error = await self._get_execution(
|
||||
user_id=user_id,
|
||||
graph_id=agent.graph_id,
|
||||
execution_id=input_data.execution_id or None,
|
||||
time_start=time_start,
|
||||
time_end=time_end,
|
||||
include_running=wait_timeout > 0,
|
||||
)
|
||||
|
||||
if exec_error:
|
||||
@@ -446,4 +505,17 @@ class AgentOutputTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# If we have an execution that's still running and we should wait
|
||||
if execution and wait_timeout > 0 and execution.status not in TERMINAL_STATUSES:
|
||||
logger.info(
|
||||
f"Execution {execution.id} is {execution.status}, "
|
||||
f"waiting up to {wait_timeout}s for completion"
|
||||
)
|
||||
execution = await wait_for_execution(
|
||||
user_id=user_id,
|
||||
graph_id=agent.graph_id,
|
||||
execution_id=execution.id,
|
||||
timeout_seconds=wait_timeout,
|
||||
)
|
||||
|
||||
return self._build_response(agent, execution, available_executions, session_id)
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
"""Shared agent search functionality for find_agent and find_library_agent tools."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import Literal
|
||||
from typing import TYPE_CHECKING, Literal
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.api.features.library.model import LibraryAgent
|
||||
|
||||
from backend.data.db_accessors import library_db, store_db
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
@@ -24,94 +29,24 @@ _UUID_PATTERN = re.compile(
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def _is_uuid(text: str) -> bool:
|
||||
"""Check if text is a valid UUID v4."""
|
||||
return bool(_UUID_PATTERN.match(text.strip()))
|
||||
|
||||
|
||||
async def _get_library_agent_by_id(user_id: str, agent_id: str) -> AgentInfo | None:
|
||||
"""Fetch a library agent by ID (library agent ID or graph_id).
|
||||
|
||||
Tries multiple lookup strategies:
|
||||
1. First by graph_id (AgentGraph primary key)
|
||||
2. Then by library agent ID (LibraryAgent primary key)
|
||||
|
||||
Args:
|
||||
user_id: The user ID
|
||||
agent_id: The ID to look up (can be graph_id or library agent ID)
|
||||
|
||||
Returns:
|
||||
AgentInfo if found, None otherwise
|
||||
"""
|
||||
lib_db = library_db()
|
||||
|
||||
try:
|
||||
agent = await lib_db.get_library_agent_by_graph_id(user_id, agent_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by graph_id: {agent.name}")
|
||||
return AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by graph_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
try:
|
||||
agent = await lib_db.get_library_agent(agent_id, user_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by library_id: {agent.name}")
|
||||
return AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
except NotFoundError:
|
||||
logger.debug(f"Library agent not found by library_id: {agent_id}")
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by library_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
return None
|
||||
# Keywords that should be treated as "list all" rather than a literal search
|
||||
_LIST_ALL_KEYWORDS = frozenset({"all", "*", "everything", "any", ""})
|
||||
|
||||
|
||||
async def search_agents(
|
||||
query: str,
|
||||
source: SearchSource,
|
||||
session_id: str | None,
|
||||
session_id: str | None = None,
|
||||
user_id: str | None = None,
|
||||
) -> ToolResponseBase:
|
||||
"""
|
||||
Search for agents in marketplace or user library.
|
||||
|
||||
For library searches, keywords like "all", "*", "everything", or an empty
|
||||
query will list all agents without filtering.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
query: Search query string. Special keywords list all library agents.
|
||||
source: "marketplace" or "library"
|
||||
session_id: Chat session ID
|
||||
user_id: User ID (required for library search)
|
||||
@@ -119,7 +54,11 @@ async def search_agents(
|
||||
Returns:
|
||||
AgentsFoundResponse, NoResultsResponse, or ErrorResponse
|
||||
"""
|
||||
if not query:
|
||||
# Normalize list-all keywords to empty string for library searches
|
||||
if source == "library" and query.lower().strip() in _LIST_ALL_KEYWORDS:
|
||||
query = ""
|
||||
|
||||
if source == "marketplace" and not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query", session_id=session_id
|
||||
)
|
||||
@@ -159,28 +98,18 @@ async def search_agents(
|
||||
logger.info(f"Found agent by direct ID lookup: {agent.name}")
|
||||
|
||||
if not agents:
|
||||
logger.info(f"Searching user library for: {query}")
|
||||
search_term = query or None
|
||||
logger.info(
|
||||
f"{'Listing all agents in' if not query else 'Searching'} "
|
||||
f"user library{'' if not query else f' for: {query}'}"
|
||||
)
|
||||
results = await library_db().list_library_agents(
|
||||
user_id=user_id, # type: ignore[arg-type]
|
||||
search_term=query,
|
||||
page_size=10,
|
||||
search_term=search_term,
|
||||
page_size=50 if not query else 10,
|
||||
)
|
||||
for agent in results.agents:
|
||||
agents.append(
|
||||
AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
)
|
||||
agents.append(_library_agent_to_info(agent))
|
||||
logger.info(f"Found {len(agents)} agents in {source}")
|
||||
except NotFoundError:
|
||||
pass
|
||||
@@ -193,42 +122,62 @@ async def search_agents(
|
||||
)
|
||||
|
||||
if not agents:
|
||||
suggestions = (
|
||||
[
|
||||
if source == "marketplace":
|
||||
suggestions = [
|
||||
"Try more general terms",
|
||||
"Browse categories in the marketplace",
|
||||
"Check spelling",
|
||||
]
|
||||
if source == "marketplace"
|
||||
else [
|
||||
no_results_msg = (
|
||||
f"No agents found matching '{query}'. Let the user know they can "
|
||||
"try different keywords or browse the marketplace. Also let them "
|
||||
"know you can create a custom agent for them based on their needs."
|
||||
)
|
||||
elif not query:
|
||||
# User asked to list all but library is empty
|
||||
suggestions = [
|
||||
"Browse the marketplace to find and add agents",
|
||||
"Use find_agent to search the marketplace",
|
||||
]
|
||||
no_results_msg = (
|
||||
"Your library is empty. Let the user know they can browse the "
|
||||
"marketplace to find agents, or you can create a custom agent "
|
||||
"for them based on their needs."
|
||||
)
|
||||
else:
|
||||
suggestions = [
|
||||
"Try different keywords",
|
||||
"Use find_agent to search the marketplace",
|
||||
"Check your library at /library",
|
||||
]
|
||||
)
|
||||
no_results_msg = (
|
||||
f"No agents found matching '{query}'. Let the user know they can try different keywords or browse the marketplace. Also let them know you can create a custom agent for them based on their needs."
|
||||
if source == "marketplace"
|
||||
else f"No agents matching '{query}' found in your library. Let the user know you can create a custom agent for them based on their needs."
|
||||
)
|
||||
no_results_msg = (
|
||||
f"No agents matching '{query}' found in your library. Let the "
|
||||
"user know you can create a custom agent for them based on "
|
||||
"their needs."
|
||||
)
|
||||
return NoResultsResponse(
|
||||
message=no_results_msg, session_id=session_id, suggestions=suggestions
|
||||
)
|
||||
|
||||
title = f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} "
|
||||
title += (
|
||||
f"for '{query}'"
|
||||
if source == "marketplace"
|
||||
else f"in your library for '{query}'"
|
||||
)
|
||||
if source == "marketplace":
|
||||
title = (
|
||||
f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} for '{query}'"
|
||||
)
|
||||
elif not query:
|
||||
title = f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} in your library"
|
||||
else:
|
||||
title = f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} in your library for '{query}'"
|
||||
|
||||
message = (
|
||||
"Now you have found some options for the user to choose from. "
|
||||
"You can add a link to a recommended agent at: /marketplace/agent/agent_id "
|
||||
"Please ask the user if they would like to use any of these agents. Let the user know we can create a custom agent for them based on their needs."
|
||||
"Please ask the user if they would like to use any of these agents. "
|
||||
"Let the user know we can create a custom agent for them based on their needs."
|
||||
if source == "marketplace"
|
||||
else "Found agents in the user's library. You can provide a link to view an agent at: "
|
||||
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute. Let the user know we can create a custom agent for them based on their needs."
|
||||
else "Found agents in the user's library. You can provide a link to view "
|
||||
"an agent at: /library/agents/{agent_id}. Use agent_output to get "
|
||||
"execution results, or run_agent to execute. Let the user know we can "
|
||||
"create a custom agent for them based on their needs."
|
||||
)
|
||||
|
||||
return AgentsFoundResponse(
|
||||
@@ -238,3 +187,67 @@ async def search_agents(
|
||||
count=len(agents),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
def _is_uuid(text: str) -> bool:
|
||||
"""Check if text is a valid UUID v4."""
|
||||
return bool(_UUID_PATTERN.match(text.strip()))
|
||||
|
||||
|
||||
def _library_agent_to_info(agent: LibraryAgent) -> AgentInfo:
|
||||
"""Convert a library agent model to an AgentInfo."""
|
||||
return AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
|
||||
|
||||
async def _get_library_agent_by_id(user_id: str, agent_id: str) -> AgentInfo | None:
|
||||
"""Fetch a library agent by ID (library agent ID or graph_id).
|
||||
|
||||
Tries multiple lookup strategies:
|
||||
1. First by graph_id (AgentGraph primary key)
|
||||
2. Then by library agent ID (LibraryAgent primary key)
|
||||
"""
|
||||
lib_db = library_db()
|
||||
|
||||
try:
|
||||
agent = await lib_db.get_library_agent_by_graph_id(user_id, agent_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by graph_id: {agent.name}")
|
||||
return _library_agent_to_info(agent)
|
||||
except NotFoundError:
|
||||
logger.debug(f"Library agent not found by graph_id: {agent_id}")
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by graph_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
try:
|
||||
agent = await lib_db.get_library_agent(agent_id, user_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by library_id: {agent.name}")
|
||||
return _library_agent_to_info(agent)
|
||||
except NotFoundError:
|
||||
logger.debug(f"Library agent not found by library_id: {agent_id}")
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by library_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
@@ -0,0 +1,186 @@
|
||||
"""Shared utilities for execution waiting and status handling."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.data.db_accessors import execution_db
|
||||
from backend.data.execution import (
|
||||
AsyncRedisExecutionEventBus,
|
||||
ExecutionStatus,
|
||||
GraphExecution,
|
||||
GraphExecutionEvent,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Terminal statuses that indicate execution is complete
|
||||
TERMINAL_STATUSES = frozenset(
|
||||
{
|
||||
ExecutionStatus.COMPLETED,
|
||||
ExecutionStatus.FAILED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
}
|
||||
)
|
||||
|
||||
# Statuses where execution is paused but not finished (e.g. human-in-the-loop)
|
||||
PAUSED_STATUSES = frozenset(
|
||||
{
|
||||
ExecutionStatus.REVIEW,
|
||||
}
|
||||
)
|
||||
|
||||
# Statuses that mean "stop waiting" (terminal or paused)
|
||||
STOP_WAITING_STATUSES = TERMINAL_STATUSES | PAUSED_STATUSES
|
||||
|
||||
_POST_SUBSCRIBE_RECHECK_DELAY = 0.1 # seconds to wait for subscription to establish
|
||||
|
||||
|
||||
async def wait_for_execution(
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
execution_id: str,
|
||||
timeout_seconds: int,
|
||||
) -> GraphExecution | None:
|
||||
"""
|
||||
Wait for an execution to reach a terminal or paused status using Redis pubsub.
|
||||
|
||||
Handles the race condition between checking status and subscribing by
|
||||
re-checking the DB after the subscription is established.
|
||||
|
||||
Args:
|
||||
user_id: User ID
|
||||
graph_id: Graph ID
|
||||
execution_id: Execution ID to wait for
|
||||
timeout_seconds: Max seconds to wait
|
||||
|
||||
Returns:
|
||||
The execution with current status, or None if not found
|
||||
"""
|
||||
exec_db = execution_db()
|
||||
|
||||
# Quick check — maybe it's already done
|
||||
execution = await exec_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
if not execution:
|
||||
return None
|
||||
|
||||
if execution.status in STOP_WAITING_STATUSES:
|
||||
logger.debug(
|
||||
f"Execution {execution_id} already in stop-waiting state: "
|
||||
f"{execution.status}"
|
||||
)
|
||||
return execution
|
||||
|
||||
logger.info(
|
||||
f"Waiting up to {timeout_seconds}s for execution {execution_id} "
|
||||
f"(current status: {execution.status})"
|
||||
)
|
||||
|
||||
event_bus = AsyncRedisExecutionEventBus()
|
||||
channel_key = f"{user_id}/{graph_id}/{execution_id}"
|
||||
|
||||
# Mutable container so _subscribe_and_wait can surface the task even if
|
||||
# asyncio.wait_for cancels the coroutine before it returns.
|
||||
task_holder: list[asyncio.Task] = []
|
||||
|
||||
try:
|
||||
result = await asyncio.wait_for(
|
||||
_subscribe_and_wait(
|
||||
event_bus, channel_key, user_id, execution_id, exec_db, task_holder
|
||||
),
|
||||
timeout=timeout_seconds,
|
||||
)
|
||||
return result
|
||||
except asyncio.TimeoutError:
|
||||
logger.info(f"Timeout waiting for execution {execution_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error waiting for execution: {e}", exc_info=True)
|
||||
finally:
|
||||
for task in task_holder:
|
||||
if not task.done():
|
||||
task.cancel()
|
||||
try:
|
||||
await task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
await event_bus.close()
|
||||
|
||||
# Return current state on timeout/error
|
||||
return await exec_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
|
||||
|
||||
async def _subscribe_and_wait(
|
||||
event_bus: AsyncRedisExecutionEventBus,
|
||||
channel_key: str,
|
||||
user_id: str,
|
||||
execution_id: str,
|
||||
exec_db: Any,
|
||||
task_holder: list[asyncio.Task],
|
||||
) -> GraphExecution | None:
|
||||
"""
|
||||
Subscribe to execution events and wait for a terminal/paused status.
|
||||
|
||||
Appends the consumer task to ``task_holder`` so the caller can clean it up
|
||||
even if this coroutine is cancelled by ``asyncio.wait_for``.
|
||||
|
||||
To avoid the race condition where the execution completes between the
|
||||
initial DB check and the Redis subscription, we:
|
||||
1. Start listening (which subscribes internally)
|
||||
2. Re-check the DB after subscription is active
|
||||
3. If still running, wait for pubsub events
|
||||
"""
|
||||
listen_iter = event_bus.listen_events(channel_key).__aiter__()
|
||||
|
||||
done = asyncio.Event()
|
||||
result_execution: GraphExecution | None = None
|
||||
|
||||
async def _consume() -> None:
|
||||
nonlocal result_execution
|
||||
try:
|
||||
async for event in listen_iter:
|
||||
if isinstance(event, GraphExecutionEvent):
|
||||
logger.debug(f"Received execution update: {event.status}")
|
||||
if event.status in STOP_WAITING_STATUSES:
|
||||
result_execution = await exec_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
done.set()
|
||||
return
|
||||
except Exception as e:
|
||||
logger.error(f"Error in execution consumer: {e}", exc_info=True)
|
||||
done.set()
|
||||
|
||||
consume_task = asyncio.create_task(_consume())
|
||||
task_holder.append(consume_task)
|
||||
|
||||
# Give the subscription a moment to establish, then re-check DB
|
||||
await asyncio.sleep(_POST_SUBSCRIBE_RECHECK_DELAY)
|
||||
|
||||
execution = await exec_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
if execution and execution.status in STOP_WAITING_STATUSES:
|
||||
return execution
|
||||
|
||||
# Wait for the pubsub consumer to find a terminal event
|
||||
await done.wait()
|
||||
return result_execution
|
||||
|
||||
|
||||
def get_execution_outputs(execution: GraphExecution | None) -> dict[str, Any] | None:
|
||||
"""Extract outputs from an execution, or return None."""
|
||||
if execution is None:
|
||||
return None
|
||||
return execution.outputs
|
||||
@@ -19,9 +19,10 @@ class FindLibraryAgentTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search for agents in the user's library. Use this to find agents "
|
||||
"the user has already added to their library, including agents they "
|
||||
"created or added from the marketplace."
|
||||
"Search for or list agents in the user's library. Use this to find "
|
||||
"agents the user has already added to their library, including agents "
|
||||
"they created or added from the marketplace. "
|
||||
"Omit the query to list all agents."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -31,10 +32,13 @@ class FindLibraryAgentTool(BaseTool):
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query to find agents by name or description.",
|
||||
"description": (
|
||||
"Search query to find agents by name or description. "
|
||||
"Omit to list all agents in the library."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
"required": [],
|
||||
}
|
||||
|
||||
@property
|
||||
@@ -45,7 +49,7 @@ class FindLibraryAgentTool(BaseTool):
|
||||
self, user_id: str | None, session: ChatSession, **kwargs
|
||||
) -> ToolResponseBase:
|
||||
return await search_agents(
|
||||
query=kwargs.get("query", "").strip(),
|
||||
query=(kwargs.get("query") or "").strip(),
|
||||
source="library",
|
||||
session_id=session.session_id,
|
||||
user_id=user_id,
|
||||
|
||||
@@ -9,6 +9,7 @@ from backend.copilot.config import ChatConfig
|
||||
from backend.copilot.model import ChatSession
|
||||
from backend.copilot.tracking import track_agent_run_success, track_agent_scheduled
|
||||
from backend.data.db_accessors import graph_db, library_db, user_db
|
||||
from backend.data.execution import ExecutionStatus
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.executor import utils as execution_utils
|
||||
@@ -20,12 +21,15 @@ from backend.util.timezone_utils import (
|
||||
)
|
||||
|
||||
from .base import BaseTool
|
||||
from .execution_utils import get_execution_outputs, wait_for_execution
|
||||
from .helpers import get_inputs_from_schema
|
||||
from .models import (
|
||||
AgentDetails,
|
||||
AgentDetailsResponse,
|
||||
AgentOutputResponse,
|
||||
ErrorResponse,
|
||||
ExecutionOptions,
|
||||
ExecutionOutputInfo,
|
||||
ExecutionStartedResponse,
|
||||
InputValidationErrorResponse,
|
||||
SetupInfo,
|
||||
@@ -66,6 +70,7 @@ class RunAgentInput(BaseModel):
|
||||
schedule_name: str = ""
|
||||
cron: str = ""
|
||||
timezone: str = "UTC"
|
||||
wait_for_result: int = Field(default=0, ge=0, le=300)
|
||||
|
||||
@field_validator(
|
||||
"username_agent_slug",
|
||||
@@ -147,6 +152,14 @@ class RunAgentTool(BaseTool):
|
||||
"type": "string",
|
||||
"description": "IANA timezone for schedule (default: UTC)",
|
||||
},
|
||||
"wait_for_result": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Max seconds to wait for execution to complete (0-300). "
|
||||
"If >0, blocks until the execution finishes or times out. "
|
||||
"Returns execution outputs when complete."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
}
|
||||
@@ -341,6 +354,7 @@ class RunAgentTool(BaseTool):
|
||||
graph=graph,
|
||||
graph_credentials=graph_credentials,
|
||||
inputs=params.inputs,
|
||||
wait_for_result=params.wait_for_result,
|
||||
)
|
||||
|
||||
except NotFoundError as e:
|
||||
@@ -424,8 +438,9 @@ class RunAgentTool(BaseTool):
|
||||
graph: GraphModel,
|
||||
graph_credentials: dict[str, CredentialsMetaInput],
|
||||
inputs: dict[str, Any],
|
||||
wait_for_result: int = 0,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute an agent immediately."""
|
||||
"""Execute an agent immediately, optionally waiting for completion."""
|
||||
session_id = session.session_id
|
||||
|
||||
# Check rate limits
|
||||
@@ -462,6 +477,91 @@ class RunAgentTool(BaseTool):
|
||||
)
|
||||
|
||||
library_agent_link = f"/library/agents/{library_agent.id}"
|
||||
|
||||
# If wait_for_result is requested, wait for execution to complete
|
||||
if wait_for_result > 0:
|
||||
logger.info(
|
||||
f"Waiting up to {wait_for_result}s for execution {execution.id}"
|
||||
)
|
||||
completed = await wait_for_execution(
|
||||
user_id=user_id,
|
||||
graph_id=library_agent.graph_id,
|
||||
execution_id=execution.id,
|
||||
timeout_seconds=wait_for_result,
|
||||
)
|
||||
|
||||
if completed and completed.status == ExecutionStatus.COMPLETED:
|
||||
outputs = get_execution_outputs(completed)
|
||||
return AgentOutputResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' completed successfully. "
|
||||
f"View at {library_agent_link}."
|
||||
),
|
||||
session_id=session_id,
|
||||
agent_name=library_agent.name,
|
||||
agent_id=library_agent.graph_id,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
execution=ExecutionOutputInfo(
|
||||
execution_id=execution.id,
|
||||
status=completed.status.value,
|
||||
started_at=completed.started_at,
|
||||
ended_at=completed.ended_at,
|
||||
outputs=outputs or {},
|
||||
),
|
||||
)
|
||||
elif completed and completed.status == ExecutionStatus.FAILED:
|
||||
error_detail = completed.stats.error if completed.stats else None
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' execution failed. "
|
||||
f"View details at {library_agent_link}."
|
||||
),
|
||||
session_id=session_id,
|
||||
error=error_detail,
|
||||
)
|
||||
elif completed and completed.status == ExecutionStatus.TERMINATED:
|
||||
error_detail = completed.stats.error if completed.stats else None
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' execution was terminated. "
|
||||
f"View details at {library_agent_link}."
|
||||
),
|
||||
session_id=session_id,
|
||||
error=error_detail,
|
||||
)
|
||||
elif completed and completed.status == ExecutionStatus.REVIEW:
|
||||
return ExecutionStartedResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' is awaiting human review. "
|
||||
f"Check at {library_agent_link}."
|
||||
),
|
||||
session_id=session_id,
|
||||
execution_id=execution.id,
|
||||
graph_id=library_agent.graph_id,
|
||||
graph_name=library_agent.name,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
status=ExecutionStatus.REVIEW.value,
|
||||
)
|
||||
else:
|
||||
status = completed.status.value if completed else "unknown"
|
||||
return ExecutionStartedResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' is still {status} after "
|
||||
f"{wait_for_result}s. Check results later at "
|
||||
f"{library_agent_link}. "
|
||||
f"Use view_agent_output with wait_if_running to check again."
|
||||
),
|
||||
session_id=session_id,
|
||||
execution_id=execution.id,
|
||||
graph_id=library_agent.graph_id,
|
||||
graph_name=library_agent.name,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
status=status,
|
||||
)
|
||||
|
||||
return ExecutionStartedResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' execution started successfully. "
|
||||
|
||||
@@ -214,7 +214,11 @@ class WorkspaceWriteResponse(ToolResponseBase):
|
||||
file_id: str
|
||||
name: str
|
||||
path: str
|
||||
mime_type: str
|
||||
size_bytes: int
|
||||
# workspace:// URL the agent can embed directly in chat to give the user a link.
|
||||
# Format: workspace://<file_id>#<mime_type> (frontend resolves to download URL)
|
||||
download_url: str
|
||||
source: str | None = None # "content", "base64", or "copied from <path>"
|
||||
content_preview: str | None = None # First 200 chars for text files
|
||||
|
||||
@@ -680,11 +684,21 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Strip MIME parameters (e.g. "text/html; charset=utf-8" → "text/html")
|
||||
# and normalise to lowercase so the fragment is URL-safe.
|
||||
normalized_mime = (rec.mime_type or "").split(";", 1)[0].strip().lower()
|
||||
download_url = (
|
||||
f"workspace://{rec.id}#{normalized_mime}"
|
||||
if normalized_mime
|
||||
else f"workspace://{rec.id}"
|
||||
)
|
||||
return WorkspaceWriteResponse(
|
||||
file_id=rec.id,
|
||||
name=rec.name,
|
||||
path=rec.path,
|
||||
mime_type=normalized_mime,
|
||||
size_bytes=rec.size_bytes,
|
||||
download_url=download_url,
|
||||
source=source,
|
||||
content_preview=preview,
|
||||
message=msg,
|
||||
|
||||
@@ -178,9 +178,13 @@ async def test_block_credit_reset(server: SpinTestServer):
|
||||
assert month2_balance == 1100 # Balance persists, no reset
|
||||
|
||||
# Now test the refill behavior when balance is low
|
||||
# Set balance below refill threshold
|
||||
# Set balance below refill threshold and backdate updatedAt to month2 so
|
||||
# the month3 refill check sees a different (month2 → month3) transition.
|
||||
# Without the explicit updatedAt, Prisma sets it to real-world NOW which
|
||||
# may share the same calendar month as the mocked month3, suppressing refill.
|
||||
await UserBalance.prisma().update(
|
||||
where={"userId": DEFAULT_USER_ID}, data={"balance": 400}
|
||||
where={"userId": DEFAULT_USER_ID},
|
||||
data={"balance": 400, "updatedAt": month2},
|
||||
)
|
||||
|
||||
# Create a month 2 transaction to update the last transaction time
|
||||
|
||||
@@ -723,6 +723,9 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
langfuse_host: str = Field(
|
||||
default="https://cloud.langfuse.com", description="Langfuse host URL"
|
||||
)
|
||||
langfuse_tracing_environment: str = Field(
|
||||
default="local", description="Tracing environment tag (local/dev/production)"
|
||||
)
|
||||
|
||||
# PostHog analytics
|
||||
posthog_api_key: str = Field(default="", description="PostHog API key")
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
-- This migration adds more than one value to an enum.
|
||||
-- With PostgreSQL versions 11 and earlier, this is not possible
|
||||
-- in a single migration. This can be worked around by creating
|
||||
-- multiple migrations, each migration adding only one value to
|
||||
-- the enum.
|
||||
ALTER TYPE "APIKeyPermission" ADD VALUE 'WRITE_GRAPH';
|
||||
ALTER TYPE "APIKeyPermission" ADD VALUE 'WRITE_LIBRARY';
|
||||
312
autogpt_platform/backend/poetry.lock
generated
312
autogpt_platform/backend/poetry.lock
generated
@@ -1610,6 +1610,101 @@ mccabe = ">=0.7.0,<0.8.0"
|
||||
pycodestyle = ">=2.14.0,<2.15.0"
|
||||
pyflakes = ">=3.4.0,<3.5.0"
|
||||
|
||||
[[package]]
|
||||
name = "fonttools"
|
||||
version = "4.61.1"
|
||||
description = "Tools to manipulate font files"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c7db70d57e5e1089a274cbb2b1fd635c9a24de809a231b154965d415d6c6d24"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5fe9fd43882620017add5eabb781ebfbc6998ee49b35bd7f8f79af1f9f99a958"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8db08051fc9e7d8bc622f2112511b8107d8f27cd89e2f64ec45e9825e8288da"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a76d4cb80f41ba94a6691264be76435e5f72f2cb3cab0b092a6212855f71c2f6"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a13fc8aeb24bad755eea8f7f9d409438eb94e82cf86b08fe77a03fbc8f6a96b1"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b846a1fcf8beadeb9ea4f44ec5bdde393e2f1569e17d700bfc49cd69bde75881"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-win32.whl", hash = "sha256:78a7d3ab09dc47ac1a363a493e6112d8cabed7ba7caad5f54dbe2f08676d1b47"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-win_amd64.whl", hash = "sha256:eff1ac3cc66c2ac7cda1e64b4e2f3ffef474b7335f92fc3833fc632d595fcee6"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c6604b735bb12fef8e0efd5578c9fb5d3d8532d5001ea13a19cddf295673ee09"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5ce02f38a754f207f2f06557523cd39a06438ba3aafc0639c477ac409fc64e37"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77efb033d8d7ff233385f30c62c7c79271c8885d5c9657d967ede124671bbdfb"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:75c1a6dfac6abd407634420c93864a1e274ebc1c7531346d9254c0d8f6ca00f9"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0de30bfe7745c0d1ffa2b0b7048fb7123ad0d71107e10ee090fa0b16b9452e87"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58b0ee0ab5b1fc9921eccfe11d1435added19d6494dde14e323f25ad2bc30c56"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-win32.whl", hash = "sha256:f79b168428351d11e10c5aeb61a74e1851ec221081299f4cf56036a95431c43a"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-win_amd64.whl", hash = "sha256:fe2efccb324948a11dd09d22136fe2ac8a97d6c1347cf0b58a911dcd529f66b7"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f3cb4a569029b9f291f88aafc927dd53683757e640081ca8c412781ea144565e"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41a7170d042e8c0024703ed13b71893519a1a6d6e18e933e3ec7507a2c26a4b2"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10d88e55330e092940584774ee5e8a6971b01fc2f4d3466a1d6c158230880796"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15acc09befd16a0fb8a8f62bc147e1a82817542d72184acca9ce6e0aeda9fa6d"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6bcdf33aec38d16508ce61fd81838f24c83c90a1d1b8c68982857038673d6b8"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5fade934607a523614726119164ff621e8c30e8fa1ffffbbd358662056ba69f0"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-win32.whl", hash = "sha256:75da8f28eff26defba42c52986de97b22106cb8f26515b7c22443ebc9c2d3261"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-win_amd64.whl", hash = "sha256:497c31ce314219888c0e2fce5ad9178ca83fe5230b01a5006726cdf3ac9f24d9"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c56c488ab471628ff3bfa80964372fc13504ece601e0d97a78ee74126b2045c"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dc492779501fa723b04d0ab1f5be046797fee17d27700476edc7ee9ae535a61e"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:64102ca87e84261419c3747a0d20f396eb024bdbeb04c2bfb37e2891f5fadcb5"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c1b526c8d3f615a7b1867f38a9410849c8f4aef078535742198e942fba0e9bd"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:41ed4b5ec103bd306bb68f81dc166e77409e5209443e5773cb4ed837bcc9b0d3"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b501c862d4901792adaec7c25b1ecc749e2662543f68bb194c42ba18d6eec98d"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-win32.whl", hash = "sha256:4d7092bb38c53bbc78e9255a59158b150bcdc115a1e3b3ce0b5f267dc35dd63c"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-win_amd64.whl", hash = "sha256:21e7c8d76f62ab13c9472ccf74515ca5b9a761d1bde3265152a6dc58700d895b"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:fff4f534200a04b4a36e7ae3cb74493afe807b517a09e99cb4faa89a34ed6ecd"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:d9203500f7c63545b4ce3799319fe4d9feb1a1b89b28d3cb5abd11b9dd64147e"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa646ecec9528bef693415c79a86e733c70a4965dd938e9a226b0fc64c9d2e6c"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:11f35ad7805edba3aac1a3710d104592df59f4b957e30108ae0ba6c10b11dd75"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b931ae8f62db78861b0ff1ac017851764602288575d65b8e8ff1963fed419063"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b148b56f5de675ee16d45e769e69f87623a4944f7443850bf9a9376e628a89d2"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-win32.whl", hash = "sha256:9b666a475a65f4e839d3d10473fad6d47e0a9db14a2f4a224029c5bfde58ad2c"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-win_amd64.whl", hash = "sha256:4f5686e1fe5fce75d82d93c47a438a25bf0d1319d2843a926f741140b2b16e0c"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:e76ce097e3c57c4bcb67c5aa24a0ecdbd9f74ea9219997a707a4061fbe2707aa"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9cfef3ab326780c04d6646f68d4b4742aae222e8b8ea1d627c74e38afcbc9d91"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a75c301f96db737e1c5ed5fd7d77d9c34466de16095a266509e13da09751bd19"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:91669ccac46bbc1d09e9273546181919064e8df73488ea087dcac3e2968df9ba"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c33ab3ca9d3ccd581d58e989d67554e42d8d4ded94ab3ade3508455fe70e65f7"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:664c5a68ec406f6b1547946683008576ef8b38275608e1cee6c061828171c118"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-win32.whl", hash = "sha256:aed04cabe26f30c1647ef0e8fbb207516fd40fe9472e9439695f5c6998e60ac5"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2180f14c141d2f0f3da43f3a81bc8aa4684860f6b0e6f9e165a4831f24e6a23b"},
|
||||
{file = "fonttools-4.61.1-py3-none-any.whl", hash = "sha256:17d2bf5d541add43822bcf0c43d7d847b160c9bb01d15d5007d84e2217aaa371"},
|
||||
{file = "fonttools-4.61.1.tar.gz", hash = "sha256:6675329885c44657f826ef01d9e4fb33b9158e9d93c537d84ad8399539bc6f69"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.45.0)", "unicodedata2 (>=17.0.0) ; python_version <= \"3.14\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"]
|
||||
graphite = ["lz4 (>=1.7.4.2)"]
|
||||
interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""]
|
||||
lxml = ["lxml (>=4.0)"]
|
||||
pathops = ["skia-pathops (>=0.5.0)"]
|
||||
plot = ["matplotlib"]
|
||||
repacker = ["uharfbuzz (>=0.45.0)"]
|
||||
symfont = ["sympy"]
|
||||
type1 = ["xattr ; sys_platform == \"darwin\""]
|
||||
unicode = ["unicodedata2 (>=17.0.0) ; python_version <= \"3.14\""]
|
||||
woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"]
|
||||
|
||||
[[package]]
|
||||
name = "fpdf2"
|
||||
version = "2.8.6"
|
||||
description = "Simple & fast PDF generation for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fpdf2-2.8.6-py3-none-any.whl", hash = "sha256:464658b896c6b0fcbf883abb316b8f0a52d582eb959d71822ba254d6c790bfdd"},
|
||||
{file = "fpdf2-2.8.6.tar.gz", hash = "sha256:5132f26bbeee69a7ca6a292e4da1eb3241147b5aea9348b35e780ecd02bf5fc2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
defusedxml = "*"
|
||||
fonttools = ">=4.34.0"
|
||||
Pillow = ">=8.3.2,<9.2.dev0 || >=9.3.dev0"
|
||||
|
||||
[package.extras]
|
||||
dev = ["bandit", "black", "mypy", "pre-commit", "pylint", "pyright", "semgrep", "zizmor"]
|
||||
docs = ["lxml", "mkdocs", "mkdocs-git-revision-date-localized-plugin", "mkdocs-include-markdown-plugin", "mkdocs-macros-plugin", "mkdocs-material", "mkdocs-minify-plugin", "mkdocs-redirects", "mkdocs-with-pdf", "mknotebooks", "pdoc3"]
|
||||
test = ["brotli", "camelot-py[base]", "endesive[full]", "pytest", "pytest-cov", "qrcode", "tabula-py", "typing-extensions (>=4.0) ; python_version < \"3.11\"", "uharfbuzz"]
|
||||
|
||||
[[package]]
|
||||
name = "frozenlist"
|
||||
version = "1.8.0"
|
||||
@@ -3135,6 +3230,39 @@ pydantic = ">=1.10.7,<3.0"
|
||||
requests = ">=2,<3"
|
||||
wrapt = ">=1.14,<2.0"
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.7.7"
|
||||
description = "Client library to connect to the LangSmith Observability and Evaluation Platform."
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "langsmith-0.7.7-py3-none-any.whl", hash = "sha256:ef3d0aff77917bf3776368e90f387df5ffd7cb7cff11ece0ec4fd227e433b5de"},
|
||||
{file = "langsmith-0.7.7.tar.gz", hash = "sha256:2294d3c4a5a8205ef38880c1c412d85322e6055858ae999ef6641c815995d437"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.23.0,<1"
|
||||
orjson = {version = ">=3.9.14", markers = "platform_python_implementation != \"PyPy\""}
|
||||
packaging = ">=23.2"
|
||||
pydantic = ">=2,<3"
|
||||
requests = ">=2.0.0"
|
||||
requests-toolbelt = ">=1.0.0"
|
||||
uuid-utils = ">=0.12.0,<1.0"
|
||||
xxhash = ">=3.0.0"
|
||||
zstandard = ">=0.23.0"
|
||||
|
||||
[package.extras]
|
||||
claude-agent-sdk = ["claude-agent-sdk (>=0.1.0) ; python_version >= \"3.10\""]
|
||||
google-adk = ["google-adk (>=1.0.0)", "wrapt (>=1.16.0)"]
|
||||
langsmith-pyo3 = ["langsmith-pyo3 (>=0.1.0rc2)"]
|
||||
openai-agents = ["openai-agents (>=0.0.3)"]
|
||||
otel = ["opentelemetry-api (>=1.30.0)", "opentelemetry-exporter-otlp-proto-http (>=1.30.0)", "opentelemetry-sdk (>=1.30.0)"]
|
||||
pytest = ["pytest (>=7.0.0)", "rich (>=13.9.4)", "vcrpy (>=7.0.0)"]
|
||||
sandbox = ["websockets (>=15.0)"]
|
||||
vcr = ["vcrpy (>=7.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "launchdarkly-eventsource"
|
||||
version = "1.5.1"
|
||||
@@ -7652,6 +7780,38 @@ h2 = ["h2 (>=4,<5)"]
|
||||
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
|
||||
zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""]
|
||||
|
||||
[[package]]
|
||||
name = "uuid-utils"
|
||||
version = "0.14.1"
|
||||
description = "Fast, drop-in replacement for Python's uuid module, powered by Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:93a3b5dc798a54a1feb693f2d1cb4cf08258c32ff05ae4929b5f0a2ca624a4f0"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:ccd65a4b8e83af23eae5e56d88034b2fe7264f465d3e830845f10d1591b81741"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b56b0cacd81583834820588378e432b0696186683b813058b707aedc1e16c4b1"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb3cf14de789097320a3c56bfdfdd51b1225d11d67298afbedee7e84e3837c96"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60e0854a90d67f4b0cc6e54773deb8be618f4c9bad98d3326f081423b5d14fae"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce6743ba194de3910b5feb1a62590cd2587e33a73ab6af8a01b642ceb5055862"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:043fb58fde6cf1620a6c066382f04f87a8e74feb0f95a585e4ed46f5d44af57b"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c915d53f22945e55fe0d3d3b0b87fd965a57f5fd15666fd92d6593a73b1dd297"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:0972488e3f9b449e83f006ead5a0e0a33ad4a13e4462e865b7c286ab7d7566a3"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:1c238812ae0c8ffe77d8d447a32c6dfd058ea4631246b08b5a71df586ff08531"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:bec8f8ef627af86abf8298e7ec50926627e29b34fa907fcfbedb45aaa72bca43"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-win32.whl", hash = "sha256:b54d6aa6252d96bac1fdbc80d26ba71bad9f220b2724d692ad2f2310c22ef523"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-win_amd64.whl", hash = "sha256:fc27638c2ce267a0ce3e06828aff786f91367f093c80625ee21dad0208e0f5ba"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-win_arm64.whl", hash = "sha256:b04cb49b42afbc4ff8dbc60cf054930afc479d6f4dd7f1ec3bbe5dbfdde06b7a"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:b197cd5424cf89fb019ca7f53641d05bfe34b1879614bed111c9c313b5574cd8"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:12c65020ba6cb6abe1d57fcbfc2d0ea0506c67049ee031714057f5caf0f9bc9c"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b5d2ad28063d422ccc2c28d46471d47b61a58de885d35113a8f18cb547e25bf"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da2234387b45fde40b0fedfee64a0ba591caeea9c48c7698ab6e2d85c7991533"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50fffc2827348c1e48972eed3d1c698959e63f9d030aa5dd82ba451113158a62"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1dbe718765f70f5b7f9b7f66b6a937802941b1cc56bcf642ce0274169741e01"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:258186964039a8e36db10810c1ece879d229b01331e09e9030bc5dcabe231bd2"},
|
||||
{file = "uuid_utils-0.14.1.tar.gz", hash = "sha256:9bfc95f64af80ccf129c604fb6b8ca66c6f256451e32bc4570f760e4309c9b69"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uvicorn"
|
||||
version = "0.40.0"
|
||||
@@ -8197,6 +8357,156 @@ cffi = ">=1.16.0"
|
||||
[package.extras]
|
||||
test = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "xxhash"
|
||||
version = "3.6.0"
|
||||
description = "Python binding for xxHash"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "xxhash-3.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:87ff03d7e35c61435976554477a7f4cd1704c3596a89a8300d5ce7fc83874a71"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f572dfd3d0e2eb1a57511831cf6341242f5a9f8298a45862d085f5b93394a27d"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:89952ea539566b9fed2bbd94e589672794b4286f342254fad28b149f9615fef8"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e6f2ffb07a50b52465a1032c3cf1f4a5683f944acaca8a134a2f23674c2058"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b5b848ad6c16d308c3ac7ad4ba6bede80ed5df2ba8ed382f8932df63158dd4b2"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a034590a727b44dd8ac5914236a7b8504144447a9682586c3327e935f33ec8cc"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a8f1972e75ebdd161d7896743122834fe87378160c20e97f8b09166213bf8cc"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ee34327b187f002a596d7b167ebc59a1b729e963ce645964bbc050d2f1b73d07"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:339f518c3c7a850dd033ab416ea25a692759dc7478a71131fe8869010d2b75e4"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:bf48889c9630542d4709192578aebbd836177c9f7a4a2778a7d6340107c65f06"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:5576b002a56207f640636056b4160a378fe36a58db73ae5c27a7ec8db35f71d4"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af1f3278bd02814d6dedc5dec397993b549d6f16c19379721e5a1d31e132c49b"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-win32.whl", hash = "sha256:aed058764db109dc9052720da65fafe84873b05eb8b07e5e653597951af57c3b"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:e82da5670f2d0d98950317f82a0e4a0197150ff19a6df2ba40399c2a3b9ae5fb"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-win_arm64.whl", hash = "sha256:4a082ffff8c6ac07707fb6b671caf7c6e020c75226c561830b73d862060f281d"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b47bbd8cf2d72797f3c2772eaaac0ded3d3af26481a26d7d7d41dc2d3c46b04a"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2b6821e94346f96db75abaa6e255706fb06ebd530899ed76d32cd99f20dc52fa"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d0a9751f71a1a65ce3584e9cae4467651c7e70c9d31017fa57574583a4540248"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b29ee68625ab37b04c0b40c3fafdf24d2f75ccd778333cfb698f65f6c463f62"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6812c25fe0d6c36a46ccb002f40f27ac903bf18af9f6dd8f9669cb4d176ab18f"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4ccbff013972390b51a18ef1255ef5ac125c92dc9143b2d1909f59abc765540e"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:297b7fbf86c82c550e12e8fb71968b3f033d27b874276ba3624ea868c11165a8"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dea26ae1eb293db089798d3973a5fc928a18fdd97cc8801226fae705b02b14b0"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7a0b169aafb98f4284f73635a8e93f0735f9cbde17bd5ec332480484241aaa77"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:08d45aef063a4531b785cd72de4887766d01dc8f362a515693df349fdb825e0c"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:929142361a48ee07f09121fe9e96a84950e8d4df3bb298ca5d88061969f34d7b"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:51312c768403d8540487dbbfb557454cfc55589bbde6424456951f7fcd4facb3"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-win32.whl", hash = "sha256:d1927a69feddc24c987b337ce81ac15c4720955b667fe9b588e02254b80446fd"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:26734cdc2d4ffe449b41d186bbeac416f704a482ed835d375a5c0cb02bc63fef"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-win_arm64.whl", hash = "sha256:d72f67ef8bf36e05f5b6c65e8524f265bd61071471cd4cf1d36743ebeeeb06b7"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:01362c4331775398e7bb34e3ab403bc9ee9f7c497bc7dee6272114055277dd3c"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7b2df81a23f8cb99656378e72501b2cb41b1827c0f5a86f87d6b06b69f9f204"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dc94790144e66b14f67b10ac8ed75b39ca47536bf8800eb7c24b50271ea0c490"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93f107c673bccf0d592cdba077dedaf52fe7f42dcd7676eba1f6d6f0c3efffd2"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aa5ee3444c25b69813663c9f8067dcfaa2e126dc55e8dddf40f4d1c25d7effa"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7f99123f0e1194fa59cc69ad46dbae2e07becec5df50a0509a808f90a0f03f0"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49e03e6fe2cac4a1bc64952dd250cf0dbc5ef4ebb7b8d96bce82e2de163c82a2"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bd17fede52a17a4f9a7bc4472a5867cb0b160deeb431795c0e4abe158bc784e9"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6fb5f5476bef678f69db04f2bd1efbed3030d2aba305b0fc1773645f187d6a4e"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:843b52f6d88071f87eba1631b684fcb4b2068cd2180a0224122fe4ef011a9374"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7d14a6cfaf03b1b6f5f9790f76880601ccc7896aff7ab9cd8978a939c1eb7e0d"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:418daf3db71e1413cfe211c2f9a528456936645c17f46b5204705581a45390ae"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-win32.whl", hash = "sha256:50fc255f39428a27299c20e280d6193d8b63b8ef8028995323bf834a026b4fbb"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:c0f2ab8c715630565ab8991b536ecded9416d615538be8ecddce43ccf26cbc7c"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:599e64ba7f67472481ceb6ee80fa3bd828fd61ba59fb11475572cc5ee52b89ec"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d8b8aaa30fca4f16f0c84a5c8d7ddee0e25250ec2796c973775373257dde8f1"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d597acf8506d6e7101a4a44a5e428977a51c0fadbbfd3c39650cca9253f6e5a6"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:858dc935963a33bc33490128edc1c12b0c14d9c7ebaa4e387a7869ecc4f3e263"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba284920194615cb8edf73bf52236ce2e1664ccd4a38fdb543506413529cc546"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b54219177f6c6674d5378bd862c6aedf64725f70dd29c472eaae154df1a2e89"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:42c36dd7dbad2f5238950c377fcbf6811b1cdb1c444fab447960030cea60504d"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f22927652cba98c44639ffdc7aaf35828dccf679b10b31c4ad72a5b530a18eb7"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b45fad44d9c5c119e9c6fbf2e1c656a46dc68e280275007bbfd3d572b21426db"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6f2580ffab1a8b68ef2b901cde7e55fa8da5e4be0977c68f78fc80f3c143de42"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:40c391dd3cd041ebc3ffe6f2c862f402e306eb571422e0aa918d8070ba31da11"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f205badabde7aafd1a31e8ca2a3e5a763107a71c397c4481d6a804eb5063d8bd"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-win32.whl", hash = "sha256:2577b276e060b73b73a53042ea5bd5203d3e6347ce0d09f98500f418a9fcf799"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:757320d45d2fbcce8f30c42a6b2f47862967aea7bf458b9625b4bbe7ee390392"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-win_arm64.whl", hash = "sha256:457b8f85dec5825eed7b69c11ae86834a018b8e3df5e77783c999663da2f96d6"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a42e633d75cdad6d625434e3468126c73f13f7584545a9cf34e883aa1710e702"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:568a6d743219e717b07b4e03b0a828ce593833e498c3b64752e0f5df6bfe84db"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bec91b562d8012dae276af8025a55811b875baace6af510412a5e58e3121bc54"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78e7f2f4c521c30ad5e786fdd6bae89d47a32672a80195467b5de0480aa97b1f"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3ed0df1b11a79856df5ffcab572cbd6b9627034c1c748c5566fa79df9048a7c5"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e4edbfc7d420925b0dd5e792478ed393d6e75ff8fc219a6546fb446b6a417b1"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fba27a198363a7ef87f8c0f6b171ec36b674fe9053742c58dd7e3201c1ab30ee"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:794fe9145fe60191c6532fa95063765529770edcdd67b3d537793e8004cabbfd"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6105ef7e62b5ac73a837778efc331a591d8442f8ef5c7e102376506cb4ae2729"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:f01375c0e55395b814a679b3eea205db7919ac2af213f4a6682e01220e5fe292"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d706dca2d24d834a4661619dcacf51a75c16d65985718d6a7d73c1eeeb903ddf"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f059d9faeacd49c0215d66f4056e1326c80503f51a1532ca336a385edadd033"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-win32.whl", hash = "sha256:1244460adc3a9be84731d72b8e80625788e5815b68da3da8b83f78115a40a7ec"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b1e420ef35c503869c4064f4a2f2b08ad6431ab7b229a05cce39d74268bca6b8"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-win_arm64.whl", hash = "sha256:ec44b73a4220623235f67a996c862049f375df3b1052d9899f40a6382c32d746"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a40a3d35b204b7cc7643cbcf8c9976d818cb47befcfac8bbefec8038ac363f3e"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a54844be970d3fc22630b32d515e79a90d0a3ddb2644d8d7402e3c4c8da61405"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:016e9190af8f0a4e3741343777710e3d5717427f175adfdc3e72508f59e2a7f3"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f6f72232f849eb9d0141e2ebe2677ece15adfd0fa599bc058aad83c714bb2c6"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:63275a8aba7865e44b1813d2177e0f5ea7eadad3dd063a21f7cf9afdc7054063"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cd01fa2aa00d8b017c97eb46b9a794fbdca53fc14f845f5a328c71254b0abb7"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0226aa89035b62b6a86d3c68df4d7c1f47a342b8683da2b60cedcddb46c4d95b"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c6e193e9f56e4ca4923c61238cdaced324f0feac782544eb4c6d55ad5cc99ddd"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9176dcaddf4ca963d4deb93866d739a343c01c969231dbe21680e13a5d1a5bf0"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c1ce4009c97a752e682b897aa99aef84191077a9433eb237774689f14f8ec152"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:8cb2f4f679b01513b7adbb9b1b2f0f9cdc31b70007eaf9d59d0878809f385b11"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:653a91d7c2ab54a92c19ccf43508b6a555440b9be1bc8be553376778be7f20b5"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-win32.whl", hash = "sha256:a756fe893389483ee8c394d06b5ab765d96e68fbbfe6fde7aa17e11f5720559f"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:39be8e4e142550ef69629c9cd71b88c90e9a5db703fecbcf265546d9536ca4ad"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-win_arm64.whl", hash = "sha256:25915e6000338999236f1eb68a02a32c3275ac338628a7eaa5a269c401995679"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c5294f596a9017ca5a3e3f8884c00b91ab2ad2933cf288f4923c3fd4346cf3d4"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1cf9dcc4ab9cff01dfbba78544297a3a01dafd60f3bde4e2bfd016cf7e4ddc67"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01262da8798422d0685f7cef03b2bd3f4f46511b02830861df548d7def4402ad"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51a73fb7cb3a3ead9f7a8b583ffd9b8038e277cdb8cb87cf890e88b3456afa0b"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b9c6df83594f7df8f7f708ce5ebeacfc69f72c9fbaaababf6cf4758eaada0c9b"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:627f0af069b0ea56f312fd5189001c24578868643203bca1abbc2c52d3a6f3ca"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa912c62f842dfd013c5f21a642c9c10cd9f4c4e943e0af83618b4a404d9091a"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b465afd7909db30168ab62afe40b2fcf79eedc0b89a6c0ab3123515dc0df8b99"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a881851cf38b0a70e7c4d3ce81fc7afd86fbc2a024f4cfb2a97cf49ce04b75d3"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9b3222c686a919a0f3253cfc12bb118b8b103506612253b5baeaac10d8027cf6"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:c5aa639bc113e9286137cec8fadc20e9cd732b2cc385c0b7fa673b84fc1f2a93"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5c1343d49ac102799905e115aee590183c3921d475356cb24b4de29a4bc56518"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-win32.whl", hash = "sha256:5851f033c3030dd95c086b4a36a2683c2ff4a799b23af60977188b057e467119"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0444e7967dac37569052d2409b00a8860c2135cff05502df4da80267d384849f"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-win_arm64.whl", hash = "sha256:bb79b1e63f6fd84ec778a4b1916dfe0a7c3fdb986c06addd5db3a0d413819d95"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dac94fad14a3d1c92affb661021e1d5cbcf3876be5f5b4d90730775ccb7ac41"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6965e0e90f1f0e6cb78da568c13d4a348eeb7f40acfd6d43690a666a459458b8"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2ab89a6b80f22214b43d98693c30da66af910c04f9858dd39c8e570749593d7e"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4903530e866b7a9c1eadfd3fa2fbe1b97d3aed4739a80abf506eb9318561c850"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4da8168ae52c01ac64c511d6f4a709479da8b7a4a1d7621ed51652f93747dffa"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:97460eec202017f719e839a0d3551fbc0b2fcc9c6c6ffaa5af85bbd5de432788"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:45aae0c9df92e7fa46fbb738737324a563c727990755ec1965a6a339ea10a1df"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d50101e57aad86f4344ca9b32d091a2135a9d0a4396f19133426c88025b09f1"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9085e798c163ce310d91f8aa6b325dda3c2944c93c6ce1edb314030d4167cc65"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:a87f271a33fad0e5bf3be282be55d78df3a45ae457950deb5241998790326f87"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:9e040d3e762f84500961791fa3709ffa4784d4dcd7690afc655c095e02fff05f"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b0359391c3dad6de872fefb0cf5b69d55b0655c55ee78b1bb7a568979b2ce96b"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-win32.whl", hash = "sha256:e4ff728a2894e7f436b9e94c667b0f426b9c74b71f900cf37d5468c6b5da0536"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:01be0c5b500c5362871fc9cfdf58c69b3e5c4f531a82229ddb9eb1eb14138004"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cc604dc06027dbeb8281aeac5899c35fcfe7c77b25212833709f0bff4ce74d2a"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:277175a73900ad43a8caeb8b99b9604f21fe8d7c842f2f9061a364a7e220ddb7"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cfbc5b91397c8c2972fdac13fb3e4ed2f7f8ccac85cd2c644887557780a9b6e2"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2762bfff264c4e73c0e507274b40634ff465e025f0eaf050897e88ec8367575d"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2f171a900d59d51511209f7476933c34a0c2c711078d3c80e74e0fe4f38680ec"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:780b90c313348f030b811efc37b0fa1431163cb8db8064cf88a7936b6ce5f222"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b242455eccdfcd1fa4134c431a30737d2b4f045770f8fe84356b3469d4b919"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a75ffc1bd5def584129774c158e108e5d768e10b75813f2b32650bb041066ed6"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1fc1ed882d1e8df932a66e2999429ba6cc4d5172914c904ab193381fba825360"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:44e342e8cc11b4e79dae5c57f2fb6360c3c20cc57d32049af8f567f5b4bcb5f4"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c2f9ccd5c4be370939a2e17602fbc49995299203da72a3429db013d44d590e86"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:02ea4cb627c76f48cd9fb37cf7ab22bd51e57e1b519807234b473faebe526796"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-win32.whl", hash = "sha256:6551880383f0e6971dc23e512c9ccc986147ce7bfa1cd2e4b520b876c53e9f3d"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:7c35c4cdc65f2a29f34425c446f2f5cdcd0e3c34158931e1cc927ece925ab802"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-win_arm64.whl", hash = "sha256:ffc578717a347baf25be8397cb10d2528802d24f94cfc005c0e44fef44b5cdd6"},
|
||||
{file = "xxhash-3.6.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0f7b7e2ec26c1666ad5fc9dbfa426a6a3367ceaf79db5dd76264659d509d73b0"},
|
||||
{file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5dc1e14d14fa0f5789ec29a7062004b5933964bb9b02aae6622b8f530dc40296"},
|
||||
{file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:881b47fc47e051b37d94d13e7455131054b56749b91b508b0907eb07900d1c13"},
|
||||
{file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c6dc31591899f5e5666f04cc2e529e69b4072827085c1ef15294d91a004bc1bd"},
|
||||
{file = "xxhash-3.6.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:15e0dac10eb9309508bfc41f7f9deaa7755c69e35af835db9cb10751adebc35d"},
|
||||
{file = "xxhash-3.6.0.tar.gz", hash = "sha256:f0162a78b13a0d7617b2845b90c763339d1f1d82bb04a4b07f4ab535cc5e05d6"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "yarl"
|
||||
version = "1.22.0"
|
||||
@@ -8530,4 +8840,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "3ef62836d8321b9a3b8e897dade8dc6ca9022fd9468c53f384b0871b521ab343"
|
||||
content-hash = "e7863413fda5e0a8b236e39a4c37390b52ae8c2f572c77df732abbd4280312b6"
|
||||
|
||||
@@ -89,6 +89,8 @@ croniter = "^6.0.0"
|
||||
stagehand = "^0.5.1"
|
||||
gravitas-md2gdocs = "^0.1.0"
|
||||
posthog = "^7.6.0"
|
||||
fpdf2 = "^2.8.6"
|
||||
langsmith = "^0.7.7"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
aiohappyeyeballs = "^2.6.1"
|
||||
|
||||
@@ -1130,9 +1130,11 @@ enum APIKeyPermission {
|
||||
IDENTITY // Info about the authenticated user
|
||||
EXECUTE_GRAPH // Can execute agent graphs
|
||||
READ_GRAPH // Can get graph versions and details
|
||||
WRITE_GRAPH // Can create and update agent graphs
|
||||
EXECUTE_BLOCK // Can execute individual blocks
|
||||
READ_BLOCK // Can get block information
|
||||
READ_STORE // Can read store agents and creators
|
||||
WRITE_LIBRARY // Can add agents to library
|
||||
USE_TOOLS // Can use chat tools via external API
|
||||
MANAGE_INTEGRATIONS // Can initiate OAuth flows and complete them
|
||||
READ_INTEGRATIONS // Can list credentials and providers
|
||||
|
||||
@@ -6,6 +6,7 @@ const config: StorybookConfig = {
|
||||
"../src/components/tokens/**/*.stories.@(js|jsx|mjs|ts|tsx)",
|
||||
"../src/components/atoms/**/*.stories.@(js|jsx|mjs|ts|tsx)",
|
||||
"../src/components/molecules/**/*.stories.@(js|jsx|mjs|ts|tsx)",
|
||||
"../src/components/ai-elements/**/*.stories.@(js|jsx|mjs|ts|tsx)",
|
||||
],
|
||||
addons: [
|
||||
"@storybook/addon-a11y",
|
||||
|
||||
@@ -19,6 +19,8 @@ const SCOPE_DESCRIPTIONS: { [key in APIKeyPermission]: string } = {
|
||||
IDENTITY: "View your user ID, e-mail, and timezone",
|
||||
EXECUTE_GRAPH: "Run your agents",
|
||||
READ_GRAPH: "View your agents and their configurations",
|
||||
WRITE_GRAPH: "Create agent graphs",
|
||||
WRITE_LIBRARY: "Add agents to your library",
|
||||
EXECUTE_BLOCK: "Execute individual blocks",
|
||||
READ_BLOCK: "View available blocks",
|
||||
READ_STORE: "Access the Marketplace",
|
||||
|
||||
@@ -46,7 +46,7 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
|
||||
<div className="space-y-2">
|
||||
<Text variant="small-medium">Input</Text>
|
||||
|
||||
<ContentRenderer value={latestInputData} shortContent={false} />
|
||||
<ContentRenderer value={latestInputData} shortContent={true} />
|
||||
|
||||
<div className="mt-1 flex justify-end gap-1">
|
||||
<NodeDataViewer
|
||||
@@ -98,7 +98,7 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
|
||||
Data:
|
||||
</Text>
|
||||
<div className="relative space-y-2">
|
||||
{value.map((item, index) => (
|
||||
{value.slice(0, 3).map((item, index) => (
|
||||
<div key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
|
||||
@@ -37,15 +37,15 @@ export const ContentRenderer: React.FC<{
|
||||
!shortContent
|
||||
) {
|
||||
return (
|
||||
<div className="[&>*]:rounded-xlarge [&>*]:!text-xs">
|
||||
<div className="overflow-hidden [&>*]:rounded-xlarge [&>*]:!text-xs [&_pre]:whitespace-pre-wrap [&_pre]:break-words">
|
||||
{renderer?.render(value, metadata)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="[&>*]:rounded-xlarge [&>*]:!text-xs">
|
||||
<TextRenderer value={value} truncateLengthLimit={100} />
|
||||
<div className="overflow-hidden [&>*]:rounded-xlarge [&>*]:!text-xs">
|
||||
<TextRenderer value={value} truncateLengthLimit={200} />
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import {
|
||||
@@ -164,129 +163,119 @@ export const NodeDataViewer: FC<NodeDataViewerProps> = ({
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex-1 overflow-hidden">
|
||||
<ScrollArea className="h-full">
|
||||
<div className="my-4">
|
||||
{shouldGroupExecutions ? (
|
||||
<div className="space-y-4">
|
||||
{groupedExecutions.map((execution) => (
|
||||
<div
|
||||
key={execution.execId}
|
||||
className="rounded-3xl border border-slate-200 bg-white p-4 shadow-sm"
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<Text variant="body" className="text-slate-600">
|
||||
Execution ID:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="rounded-full border border-gray-300 bg-gray-50 px-2 py-1 font-mono text-xs"
|
||||
>
|
||||
{execution.execId}
|
||||
</Text>
|
||||
</div>
|
||||
<div className="mt-2 space-y-4">
|
||||
{execution.outputItems.length > 0 ? (
|
||||
execution.outputItems.map((item, index) => (
|
||||
<div
|
||||
key={item.key}
|
||||
className="group flex items-start gap-4"
|
||||
>
|
||||
<div className="w-full flex-1">
|
||||
<OutputItem
|
||||
value={item.value}
|
||||
metadata={item.metadata}
|
||||
renderer={item.renderer}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex w-fit gap-3">
|
||||
<Button
|
||||
variant="secondary"
|
||||
className="min-w-0 p-1"
|
||||
size="icon"
|
||||
onClick={() =>
|
||||
handleCopyGroupedItem(
|
||||
execution.execId,
|
||||
index,
|
||||
item,
|
||||
)
|
||||
}
|
||||
aria-label="Copy item"
|
||||
>
|
||||
{copiedKey ===
|
||||
`${execution.execId}-${index}` ? (
|
||||
<CheckIcon className="size-4 text-green-600" />
|
||||
) : (
|
||||
<CopyIcon className="size-4 text-black" />
|
||||
)}
|
||||
</Button>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="icon"
|
||||
className="min-w-0 p-1"
|
||||
onClick={() =>
|
||||
handleDownloadGroupedItem(item)
|
||||
}
|
||||
aria-label="Download item"
|
||||
>
|
||||
<DownloadIcon className="size-4 text-black" />
|
||||
</Button>
|
||||
</div>
|
||||
<div className="flex-1">
|
||||
<div className="my-4">
|
||||
{shouldGroupExecutions ? (
|
||||
<div className="space-y-4">
|
||||
{groupedExecutions.map((execution) => (
|
||||
<div
|
||||
key={execution.execId}
|
||||
className="rounded-3xl border border-slate-200 bg-white p-4 shadow-sm"
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<Text variant="body" className="text-slate-600">
|
||||
Execution ID:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="rounded-full border border-gray-300 bg-gray-50 px-2 py-1 font-mono text-xs"
|
||||
>
|
||||
{execution.execId}
|
||||
</Text>
|
||||
</div>
|
||||
<div className="mt-2 space-y-4">
|
||||
{execution.outputItems.length > 0 ? (
|
||||
execution.outputItems.map((item, index) => (
|
||||
<div key={item.key} className="group">
|
||||
<OutputItem
|
||||
value={item.value}
|
||||
metadata={item.metadata}
|
||||
renderer={item.renderer}
|
||||
/>
|
||||
<div className="mt-2 flex gap-3">
|
||||
<Button
|
||||
variant="secondary"
|
||||
className="min-w-0 p-1"
|
||||
size="icon"
|
||||
onClick={() =>
|
||||
handleCopyGroupedItem(
|
||||
execution.execId,
|
||||
index,
|
||||
item,
|
||||
)
|
||||
}
|
||||
aria-label="Copy item"
|
||||
>
|
||||
{copiedKey ===
|
||||
`${execution.execId}-${index}` ? (
|
||||
<CheckIcon className="size-4 text-green-600" />
|
||||
) : (
|
||||
<CopyIcon className="size-4 text-black" />
|
||||
)}
|
||||
</Button>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="icon"
|
||||
className="min-w-0 p-1"
|
||||
onClick={() => handleDownloadGroupedItem(item)}
|
||||
aria-label="Download item"
|
||||
>
|
||||
<DownloadIcon className="size-4 text-black" />
|
||||
</Button>
|
||||
</div>
|
||||
))
|
||||
) : (
|
||||
<div className="py-4 text-center text-gray-500">
|
||||
No data available
|
||||
</div>
|
||||
))
|
||||
) : (
|
||||
<div className="py-4 text-center text-gray-500">
|
||||
No data available
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
) : dataArray.length > 0 ? (
|
||||
<div className="space-y-4">
|
||||
{outputItems.map((item, index) => (
|
||||
<div key={item.key} className="group">
|
||||
<OutputItem
|
||||
value={item.value}
|
||||
metadata={item.metadata}
|
||||
renderer={item.renderer}
|
||||
/>
|
||||
<div className="mt-2 flex gap-3">
|
||||
<Button
|
||||
variant="secondary"
|
||||
className="min-w-0 p-1"
|
||||
size="icon"
|
||||
onClick={() => handleCopyItem(index)}
|
||||
aria-label="Copy item"
|
||||
>
|
||||
{copiedIndex === index ? (
|
||||
<CheckIcon className="size-4 text-green-600" />
|
||||
) : (
|
||||
<CopyIcon className="size-4 text-black" />
|
||||
)}
|
||||
</div>
|
||||
</Button>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="icon"
|
||||
className="min-w-0 p-1"
|
||||
onClick={() => handleDownloadItem(index)}
|
||||
aria-label="Download item"
|
||||
>
|
||||
<DownloadIcon className="size-4 text-black" />
|
||||
</Button>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
) : dataArray.length > 0 ? (
|
||||
<div className="space-y-4">
|
||||
{outputItems.map((item, index) => (
|
||||
<div key={item.key} className="group relative">
|
||||
<OutputItem
|
||||
value={item.value}
|
||||
metadata={item.metadata}
|
||||
renderer={item.renderer}
|
||||
/>
|
||||
<div className="absolute right-3 top-3 flex gap-3">
|
||||
<Button
|
||||
variant="secondary"
|
||||
className="min-w-0 p-1"
|
||||
size="icon"
|
||||
onClick={() => handleCopyItem(index)}
|
||||
aria-label="Copy item"
|
||||
>
|
||||
{copiedIndex === index ? (
|
||||
<CheckIcon className="size-4 text-green-600" />
|
||||
) : (
|
||||
<CopyIcon className="size-4 text-black" />
|
||||
)}
|
||||
</Button>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="icon"
|
||||
className="min-w-0 p-1"
|
||||
onClick={() => handleDownloadItem(index)}
|
||||
aria-label="Download item"
|
||||
>
|
||||
<DownloadIcon className="size-4 text-black" />
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
) : (
|
||||
<div className="py-8 text-center text-gray-500">
|
||||
No data available
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
) : (
|
||||
<div className="py-8 text-center text-gray-500">
|
||||
No data available
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex justify-end pt-4">
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { cn } from "@/lib/utils";
|
||||
import {
|
||||
ArrowUpIcon,
|
||||
CircleNotchIcon,
|
||||
MicrophoneIcon,
|
||||
StopIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import { ChangeEvent, useCallback } from "react";
|
||||
PromptInputBody,
|
||||
PromptInputButton,
|
||||
PromptInputFooter,
|
||||
PromptInputSubmit,
|
||||
PromptInputTextarea,
|
||||
PromptInputTools,
|
||||
} from "@/components/ai-elements/prompt-input";
|
||||
import { InputGroup } from "@/components/ui/input-group";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { CircleNotchIcon, MicrophoneIcon } from "@phosphor-icons/react";
|
||||
import { ChangeEvent } from "react";
|
||||
import { RecordingIndicator } from "./components/RecordingIndicator";
|
||||
import { useChatInput } from "./useChatInput";
|
||||
import { useVoiceRecording } from "./useVoiceRecording";
|
||||
@@ -33,14 +36,11 @@ export function ChatInput({
|
||||
const {
|
||||
value,
|
||||
setValue,
|
||||
handleKeyDown: baseHandleKeyDown,
|
||||
handleSubmit,
|
||||
handleChange: baseHandleChange,
|
||||
hasMultipleLines,
|
||||
} = useChatInput({
|
||||
onSend,
|
||||
disabled: disabled || isStreaming,
|
||||
maxRows: 4,
|
||||
inputId,
|
||||
});
|
||||
|
||||
@@ -58,60 +58,35 @@ export function ChatInput({
|
||||
disabled: disabled || isStreaming,
|
||||
isStreaming,
|
||||
value,
|
||||
baseHandleKeyDown,
|
||||
inputId,
|
||||
});
|
||||
|
||||
// Block text changes when recording
|
||||
const handleChange = useCallback(
|
||||
(e: ChangeEvent<HTMLTextAreaElement>) => {
|
||||
if (isRecording) return;
|
||||
baseHandleChange(e);
|
||||
},
|
||||
[isRecording, baseHandleChange],
|
||||
);
|
||||
function handleChange(e: ChangeEvent<HTMLTextAreaElement>) {
|
||||
if (isRecording) return;
|
||||
baseHandleChange(e);
|
||||
}
|
||||
|
||||
const canSend =
|
||||
!disabled && !!value.trim() && !isRecording && !isTranscribing;
|
||||
|
||||
return (
|
||||
<form onSubmit={handleSubmit} className={cn("relative flex-1", className)}>
|
||||
<div className="relative">
|
||||
<div
|
||||
id={`${inputId}-wrapper`}
|
||||
className={cn(
|
||||
"relative overflow-hidden border bg-white shadow-sm",
|
||||
"focus-within:ring-1",
|
||||
isRecording
|
||||
? "border-red-400 focus-within:border-red-400 focus-within:ring-red-400"
|
||||
: "border-neutral-200 focus-within:border-zinc-400 focus-within:ring-zinc-400",
|
||||
hasMultipleLines ? "rounded-xlarge" : "rounded-full",
|
||||
)}
|
||||
>
|
||||
{!value && !isRecording && (
|
||||
<div
|
||||
className="pointer-events-none absolute inset-0 top-0.5 flex items-center justify-start pl-14 text-[1rem] text-zinc-400"
|
||||
aria-hidden="true"
|
||||
>
|
||||
{isTranscribing ? "Transcribing..." : placeholder}
|
||||
</div>
|
||||
)}
|
||||
<textarea
|
||||
<InputGroup
|
||||
className={cn(
|
||||
"overflow-hidden has-[[data-slot=input-group-control]:focus-visible]:border-neutral-200 has-[[data-slot=input-group-control]:focus-visible]:ring-0",
|
||||
isRecording &&
|
||||
"border-red-400 ring-1 ring-red-400 has-[[data-slot=input-group-control]:focus-visible]:border-red-400 has-[[data-slot=input-group-control]:focus-visible]:ring-red-400",
|
||||
)}
|
||||
>
|
||||
<PromptInputBody className="relative block w-full">
|
||||
<PromptInputTextarea
|
||||
id={inputId}
|
||||
aria-label="Chat message input"
|
||||
value={value}
|
||||
onChange={handleChange}
|
||||
onKeyDown={handleKeyDown}
|
||||
disabled={isInputDisabled}
|
||||
rows={1}
|
||||
className={cn(
|
||||
"w-full resize-none overflow-y-auto border-0 bg-transparent text-[1rem] leading-6 text-black",
|
||||
"placeholder:text-zinc-400",
|
||||
"focus:outline-none focus:ring-0",
|
||||
"disabled:text-zinc-500",
|
||||
hasMultipleLines
|
||||
? "pb-6 pl-4 pr-4 pt-2"
|
||||
: showMicButton
|
||||
? "pb-4 pl-14 pr-14 pt-4"
|
||||
: "pb-4 pl-4 pr-14 pt-4",
|
||||
)}
|
||||
placeholder={isTranscribing ? "Transcribing..." : placeholder}
|
||||
/>
|
||||
{isRecording && !value && (
|
||||
<div className="pointer-events-none absolute inset-0 flex items-center justify-center">
|
||||
@@ -121,67 +96,43 @@ export function ChatInput({
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<span id="chat-input-hint" className="sr-only">
|
||||
</PromptInputBody>
|
||||
|
||||
<span id={`${inputId}-hint`} className="sr-only">
|
||||
Press Enter to send, Shift+Enter for new line, Space to record voice
|
||||
</span>
|
||||
|
||||
{showMicButton && (
|
||||
<div className="absolute bottom-[7px] left-2 flex items-center gap-1">
|
||||
<Button
|
||||
type="button"
|
||||
variant="icon"
|
||||
size="icon"
|
||||
aria-label={isRecording ? "Stop recording" : "Start recording"}
|
||||
onClick={toggleRecording}
|
||||
disabled={disabled || isTranscribing || isStreaming}
|
||||
className={cn(
|
||||
isRecording
|
||||
? "animate-pulse border-red-500 bg-red-500 text-white hover:border-red-600 hover:bg-red-600"
|
||||
: isTranscribing
|
||||
? "border-zinc-300 bg-zinc-100 text-zinc-400"
|
||||
: "border-zinc-300 bg-white text-zinc-500 hover:border-zinc-400 hover:bg-zinc-50 hover:text-zinc-700",
|
||||
isStreaming && "opacity-40",
|
||||
)}
|
||||
>
|
||||
{isTranscribing ? (
|
||||
<CircleNotchIcon className="h-4 w-4 animate-spin" />
|
||||
) : (
|
||||
<MicrophoneIcon className="h-4 w-4" weight="bold" />
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
<PromptInputFooter>
|
||||
<PromptInputTools>
|
||||
{showMicButton && (
|
||||
<PromptInputButton
|
||||
aria-label={isRecording ? "Stop recording" : "Start recording"}
|
||||
onClick={toggleRecording}
|
||||
disabled={disabled || isTranscribing || isStreaming}
|
||||
className={cn(
|
||||
"size-[2.625rem] rounded-[96px] border border-zinc-300 bg-transparent text-black hover:border-zinc-600 hover:bg-zinc-100",
|
||||
isRecording &&
|
||||
"animate-pulse border-red-500 bg-red-500 text-white hover:border-red-600 hover:bg-red-600",
|
||||
isTranscribing && "bg-zinc-100 text-zinc-400",
|
||||
isStreaming && "opacity-40",
|
||||
)}
|
||||
>
|
||||
{isTranscribing ? (
|
||||
<CircleNotchIcon className="h-4 w-4 animate-spin" />
|
||||
) : (
|
||||
<MicrophoneIcon className="h-4 w-4" weight="bold" />
|
||||
)}
|
||||
</PromptInputButton>
|
||||
)}
|
||||
</PromptInputTools>
|
||||
|
||||
<div className="absolute bottom-[7px] right-2 flex items-center gap-1">
|
||||
{isStreaming ? (
|
||||
<Button
|
||||
type="button"
|
||||
variant="icon"
|
||||
size="icon"
|
||||
aria-label="Stop generating"
|
||||
onClick={onStop}
|
||||
className="border-red-600 bg-red-600 text-white hover:border-red-800 hover:bg-red-800"
|
||||
>
|
||||
<StopIcon className="h-4 w-4" weight="bold" />
|
||||
</Button>
|
||||
<PromptInputSubmit status="streaming" onStop={onStop} />
|
||||
) : (
|
||||
<Button
|
||||
type="submit"
|
||||
variant="icon"
|
||||
size="icon"
|
||||
aria-label="Send message"
|
||||
className={cn(
|
||||
"border-zinc-800 bg-zinc-800 text-white hover:border-zinc-900 hover:bg-zinc-900",
|
||||
(disabled || !value.trim() || isRecording) && "opacity-20",
|
||||
)}
|
||||
disabled={disabled || !value.trim() || isRecording}
|
||||
>
|
||||
<ArrowUpIcon className="h-4 w-4" weight="bold" />
|
||||
</Button>
|
||||
<PromptInputSubmit disabled={!canSend} />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</PromptInputFooter>
|
||||
</InputGroup>
|
||||
</form>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,26 +1,17 @@
|
||||
import {
|
||||
ChangeEvent,
|
||||
FormEvent,
|
||||
KeyboardEvent,
|
||||
useEffect,
|
||||
useState,
|
||||
} from "react";
|
||||
import { ChangeEvent, FormEvent, useEffect, useState } from "react";
|
||||
|
||||
interface Args {
|
||||
onSend: (message: string) => void;
|
||||
disabled?: boolean;
|
||||
maxRows?: number;
|
||||
inputId?: string;
|
||||
}
|
||||
|
||||
export function useChatInput({
|
||||
onSend,
|
||||
disabled = false,
|
||||
maxRows = 5,
|
||||
inputId = "chat-input",
|
||||
}: Args) {
|
||||
const [value, setValue] = useState("");
|
||||
const [hasMultipleLines, setHasMultipleLines] = useState(false);
|
||||
const [isSending, setIsSending] = useState(false);
|
||||
|
||||
useEffect(
|
||||
@@ -40,67 +31,6 @@ export function useChatInput({
|
||||
[disabled, inputId],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
const textarea = document.getElementById(inputId) as HTMLTextAreaElement;
|
||||
const wrapper = document.getElementById(
|
||||
`${inputId}-wrapper`,
|
||||
) as HTMLDivElement;
|
||||
if (!textarea || !wrapper) return;
|
||||
|
||||
const isEmpty = !value.trim();
|
||||
const lines = value.split("\n").length;
|
||||
const hasExplicitNewlines = lines > 1;
|
||||
|
||||
const computedStyle = window.getComputedStyle(textarea);
|
||||
const lineHeight = parseInt(computedStyle.lineHeight, 10);
|
||||
const paddingTop = parseInt(computedStyle.paddingTop, 10);
|
||||
const paddingBottom = parseInt(computedStyle.paddingBottom, 10);
|
||||
|
||||
const singleLinePadding = paddingTop + paddingBottom;
|
||||
|
||||
textarea.style.height = "auto";
|
||||
const scrollHeight = textarea.scrollHeight;
|
||||
|
||||
const singleLineHeight = lineHeight + singleLinePadding;
|
||||
const isMultiLine =
|
||||
hasExplicitNewlines || scrollHeight > singleLineHeight + 2;
|
||||
setHasMultipleLines(isMultiLine);
|
||||
|
||||
if (isEmpty) {
|
||||
wrapper.style.height = `${singleLineHeight}px`;
|
||||
wrapper.style.maxHeight = "";
|
||||
textarea.style.height = `${singleLineHeight}px`;
|
||||
textarea.style.maxHeight = "";
|
||||
textarea.style.overflowY = "hidden";
|
||||
return;
|
||||
}
|
||||
|
||||
if (isMultiLine) {
|
||||
const wrapperMaxHeight = 196;
|
||||
const currentMultilinePadding = paddingTop + paddingBottom;
|
||||
const contentMaxHeight = wrapperMaxHeight - currentMultilinePadding;
|
||||
const minMultiLineHeight = lineHeight * 2 + currentMultilinePadding;
|
||||
const contentHeight = scrollHeight;
|
||||
const targetWrapperHeight = Math.min(
|
||||
Math.max(contentHeight + currentMultilinePadding, minMultiLineHeight),
|
||||
wrapperMaxHeight,
|
||||
);
|
||||
|
||||
wrapper.style.height = `${targetWrapperHeight}px`;
|
||||
wrapper.style.maxHeight = `${wrapperMaxHeight}px`;
|
||||
textarea.style.height = `${contentHeight}px`;
|
||||
textarea.style.maxHeight = `${contentMaxHeight}px`;
|
||||
textarea.style.overflowY =
|
||||
contentHeight > contentMaxHeight ? "auto" : "hidden";
|
||||
} else {
|
||||
wrapper.style.height = `${singleLineHeight}px`;
|
||||
wrapper.style.maxHeight = "";
|
||||
textarea.style.height = `${singleLineHeight}px`;
|
||||
textarea.style.maxHeight = "";
|
||||
textarea.style.overflowY = "hidden";
|
||||
}
|
||||
}, [value, maxRows, inputId]);
|
||||
|
||||
async function handleSend() {
|
||||
if (disabled || isSending || !value.trim()) return;
|
||||
|
||||
@@ -108,30 +38,11 @@ export function useChatInput({
|
||||
try {
|
||||
await onSend(value.trim());
|
||||
setValue("");
|
||||
setHasMultipleLines(false);
|
||||
const textarea = document.getElementById(inputId) as HTMLTextAreaElement;
|
||||
const wrapper = document.getElementById(
|
||||
`${inputId}-wrapper`,
|
||||
) as HTMLDivElement;
|
||||
if (textarea) {
|
||||
textarea.style.height = "auto";
|
||||
}
|
||||
if (wrapper) {
|
||||
wrapper.style.height = "";
|
||||
wrapper.style.maxHeight = "";
|
||||
}
|
||||
} finally {
|
||||
setIsSending(false);
|
||||
}
|
||||
}
|
||||
|
||||
function handleKeyDown(event: KeyboardEvent<HTMLTextAreaElement>) {
|
||||
if (event.key === "Enter" && !event.shiftKey) {
|
||||
event.preventDefault();
|
||||
void handleSend();
|
||||
}
|
||||
}
|
||||
|
||||
function handleSubmit(e: FormEvent<HTMLFormElement>) {
|
||||
e.preventDefault();
|
||||
void handleSend();
|
||||
@@ -144,11 +55,9 @@ export function useChatInput({
|
||||
return {
|
||||
value,
|
||||
setValue,
|
||||
handleKeyDown,
|
||||
handleSend,
|
||||
handleSubmit,
|
||||
handleChange,
|
||||
hasMultipleLines,
|
||||
isSending,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ interface Args {
|
||||
disabled?: boolean;
|
||||
isStreaming?: boolean;
|
||||
value: string;
|
||||
baseHandleKeyDown: (event: KeyboardEvent<HTMLTextAreaElement>) => void;
|
||||
inputId?: string;
|
||||
}
|
||||
|
||||
@@ -23,7 +22,6 @@ export function useVoiceRecording({
|
||||
disabled = false,
|
||||
isStreaming = false,
|
||||
value,
|
||||
baseHandleKeyDown,
|
||||
inputId,
|
||||
}: Args) {
|
||||
const [isRecording, setIsRecording] = useState(false);
|
||||
@@ -237,9 +235,9 @@ export function useVoiceRecording({
|
||||
event.preventDefault();
|
||||
return;
|
||||
}
|
||||
baseHandleKeyDown(event);
|
||||
// Let PromptInputTextarea handle remaining keys (Enter → submit, etc.)
|
||||
},
|
||||
[value, isTranscribing, stopRecording, startRecording, baseHandleKeyDown],
|
||||
[value, isTranscribing, stopRecording, startRecording],
|
||||
);
|
||||
|
||||
const showMicButton = isSupported;
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
MessageResponse,
|
||||
} from "@/components/ai-elements/message";
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
||||
import { useEffect, useState } from "react";
|
||||
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
||||
@@ -27,9 +28,56 @@ import { GenericTool } from "../../tools/GenericTool/GenericTool";
|
||||
import { ViewAgentOutputTool } from "../../tools/ViewAgentOutput/ViewAgentOutput";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Workspace media support
|
||||
// Special text parsing (error markers, workspace URLs, etc.)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Special message prefixes for text-based markers (set by backend)
|
||||
const COPILOT_ERROR_PREFIX = "[COPILOT_ERROR]";
|
||||
const COPILOT_SYSTEM_PREFIX = "[COPILOT_SYSTEM]";
|
||||
|
||||
type MarkerType = "error" | "system" | null;
|
||||
|
||||
/**
|
||||
* Parse special markers from message content (error, system).
|
||||
*
|
||||
* Detects markers added by the backend for special rendering:
|
||||
* - `[COPILOT_ERROR] message` → ErrorCard
|
||||
* - `[COPILOT_SYSTEM] message` → System info message
|
||||
*
|
||||
* Returns marker type, marker text, and cleaned text.
|
||||
*/
|
||||
function parseSpecialMarkers(text: string): {
|
||||
markerType: MarkerType;
|
||||
markerText: string;
|
||||
cleanText: string;
|
||||
} {
|
||||
// Check for error marker
|
||||
const errorMatch = text.match(
|
||||
new RegExp(`\\${COPILOT_ERROR_PREFIX}\\s*(.+?)$`, "s"),
|
||||
);
|
||||
if (errorMatch) {
|
||||
return {
|
||||
markerType: "error",
|
||||
markerText: errorMatch[1].trim(),
|
||||
cleanText: text.replace(errorMatch[0], "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
// Check for system marker
|
||||
const systemMatch = text.match(
|
||||
new RegExp(`\\${COPILOT_SYSTEM_PREFIX}\\s*(.+?)$`, "s"),
|
||||
);
|
||||
if (systemMatch) {
|
||||
return {
|
||||
markerType: "system",
|
||||
markerText: systemMatch[1].trim(),
|
||||
cleanText: text.replace(systemMatch[0], "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
return { markerType: null, markerText: "", cleanText: text };
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve workspace:// URLs in markdown text to proxy download URLs.
|
||||
*
|
||||
@@ -56,11 +104,15 @@ function resolveWorkspaceUrls(text: string): string {
|
||||
// These are blocked by Streamdown's rehype-harden sanitizer because
|
||||
// "workspace://" is not in the allowed URL-scheme whitelist, which causes
|
||||
// "[blocked]" to appear next to the link text.
|
||||
// Use an absolute URL so Streamdown's "Copy link" button copies the full
|
||||
// URL (including host) rather than just the path.
|
||||
resolved = resolved.replace(
|
||||
/(?<!!)\[([^\]]*)\]\(workspace:\/\/([^)#\s]+)(?:#[^)#\s]*)?\)/g,
|
||||
(_match, linkText: string, fileId: string) => {
|
||||
const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId);
|
||||
const url = `/api/proxy${apiPath}`;
|
||||
const origin =
|
||||
typeof window !== "undefined" ? window.location.origin : "";
|
||||
const url = `${origin}/api/proxy${apiPath}`;
|
||||
return `[${linkText || "Download file"}](${url})`;
|
||||
},
|
||||
);
|
||||
@@ -147,24 +199,42 @@ export const ChatMessagesContainer = ({
|
||||
}: ChatMessagesContainerProps) => {
|
||||
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
|
||||
|
||||
useEffect(() => {
|
||||
if (status === "submitted") {
|
||||
setThinkingPhrase(getRandomPhrase());
|
||||
}
|
||||
}, [status]);
|
||||
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
const lastAssistantHasVisibleContent =
|
||||
lastMessage?.role === "assistant" &&
|
||||
lastMessage.parts.some(
|
||||
(p) =>
|
||||
(p.type === "text" && p.text.trim().length > 0) ||
|
||||
p.type.startsWith("tool-"),
|
||||
);
|
||||
|
||||
// Determine if something is visibly "in-flight" in the last assistant message:
|
||||
// - Text is actively streaming (last part is non-empty text)
|
||||
// - A tool call is pending (state is input-streaming or input-available)
|
||||
const hasInflight = (() => {
|
||||
if (lastMessage?.role !== "assistant") return false;
|
||||
const parts = lastMessage.parts;
|
||||
if (parts.length === 0) return false;
|
||||
|
||||
const lastPart = parts[parts.length - 1];
|
||||
|
||||
// Text is actively being written
|
||||
if (lastPart.type === "text" && lastPart.text.trim().length > 0)
|
||||
return true;
|
||||
|
||||
// A tool call is still pending (no output yet)
|
||||
if (
|
||||
lastPart.type.startsWith("tool-") &&
|
||||
"state" in lastPart &&
|
||||
(lastPart.state === "input-streaming" ||
|
||||
lastPart.state === "input-available")
|
||||
)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
})();
|
||||
|
||||
const showThinking =
|
||||
status === "submitted" ||
|
||||
(status === "streaming" && !lastAssistantHasVisibleContent);
|
||||
status === "submitted" || (status === "streaming" && !hasInflight);
|
||||
|
||||
useEffect(() => {
|
||||
if (showThinking) {
|
||||
setThinkingPhrase(getRandomPhrase());
|
||||
}
|
||||
}, [showThinking]);
|
||||
|
||||
return (
|
||||
<Conversation className="min-h-0 flex-1">
|
||||
@@ -182,11 +252,6 @@ export const ChatMessagesContainer = ({
|
||||
const isLastAssistant =
|
||||
messageIndex === messages.length - 1 &&
|
||||
message.role === "assistant";
|
||||
const messageHasVisibleContent = message.parts.some(
|
||||
(p) =>
|
||||
(p.type === "text" && p.text.trim().length > 0) ||
|
||||
p.type.startsWith("tool-"),
|
||||
);
|
||||
|
||||
return (
|
||||
<Message from={message.role} key={message.id}>
|
||||
@@ -199,15 +264,41 @@ export const ChatMessagesContainer = ({
|
||||
>
|
||||
{message.parts.map((part, i) => {
|
||||
switch (part.type) {
|
||||
case "text":
|
||||
case "text": {
|
||||
// Check for special markers (error, system)
|
||||
const { markerType, markerText, cleanText } =
|
||||
parseSpecialMarkers(part.text);
|
||||
|
||||
if (markerType === "error") {
|
||||
return (
|
||||
<ErrorCard
|
||||
key={`${message.id}-${i}`}
|
||||
responseError={{ message: markerText }}
|
||||
context="execution"
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
if (markerType === "system") {
|
||||
return (
|
||||
<div
|
||||
key={`${message.id}-${i}`}
|
||||
className="my-2 rounded-lg bg-neutral-100 px-3 py-2 text-sm italic text-neutral-600"
|
||||
>
|
||||
{markerText}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<MessageResponse
|
||||
key={`${message.id}-${i}`}
|
||||
components={STREAMDOWN_COMPONENTS}
|
||||
>
|
||||
{resolveWorkspaceUrls(part.text)}
|
||||
{resolveWorkspaceUrls(cleanText)}
|
||||
</MessageResponse>
|
||||
);
|
||||
}
|
||||
case "tool-find_block":
|
||||
return (
|
||||
<FindBlocksTool
|
||||
@@ -295,13 +386,11 @@ export const ChatMessagesContainer = ({
|
||||
return null;
|
||||
}
|
||||
})}
|
||||
{isLastAssistant &&
|
||||
!messageHasVisibleContent &&
|
||||
showThinking && (
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
{thinkingPhrase}
|
||||
</span>
|
||||
)}
|
||||
{isLastAssistant && showThinking && (
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
{thinkingPhrase}
|
||||
</span>
|
||||
)}
|
||||
</MessageContent>
|
||||
</Message>
|
||||
);
|
||||
|
||||
@@ -16,6 +16,10 @@ import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||
import { ClarificationQuestionsCard } from "./components/ClarificationQuestionsCard";
|
||||
import { MiniGame } from "../../components/MiniGame/MiniGame";
|
||||
import { SuggestedGoalCard } from "./components/SuggestedGoalCard";
|
||||
import {
|
||||
buildClarificationAnswersMessage,
|
||||
normalizeClarifyingQuestions,
|
||||
} from "../clarifying-questions";
|
||||
import {
|
||||
AccordionIcon,
|
||||
formatMaybeJson,
|
||||
@@ -28,7 +32,6 @@ import {
|
||||
isSuggestedGoalOutput,
|
||||
ToolIcon,
|
||||
truncateText,
|
||||
normalizeClarifyingQuestions,
|
||||
type CreateAgentToolOutput,
|
||||
} from "./helpers";
|
||||
|
||||
@@ -110,16 +113,7 @@ export function CreateAgentTool({ part }: Props) {
|
||||
? (output.questions ?? [])
|
||||
: [];
|
||||
|
||||
const contextMessage = questions
|
||||
.map((q) => {
|
||||
const answer = answers[q.keyword] || "";
|
||||
return `> ${q.question}\n\n${answer}`;
|
||||
})
|
||||
.join("\n\n");
|
||||
|
||||
onSend(
|
||||
`**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with creating the agent.`,
|
||||
);
|
||||
onSend(buildClarificationAnswersMessage(answers, questions, "create"));
|
||||
}
|
||||
|
||||
return (
|
||||
|
||||
@@ -7,7 +7,7 @@ import { Text } from "@/components/atoms/Text/Text";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { ChatTeardropDotsIcon, CheckCircleIcon } from "@phosphor-icons/react";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import type { ClarifyingQuestion } from "../helpers";
|
||||
import type { ClarifyingQuestion } from "../../clarifying-questions";
|
||||
|
||||
interface Props {
|
||||
questions: ClarifyingQuestion[];
|
||||
@@ -149,20 +149,20 @@ export function ClarificationQuestionsCard({
|
||||
|
||||
<div className="space-y-6">
|
||||
{questions.map((q, index) => {
|
||||
const isAnswered = !!answers[q.keyword]?.trim();
|
||||
const hasAnswer = !!answers[q.keyword]?.trim();
|
||||
|
||||
return (
|
||||
<div
|
||||
key={`${q.keyword}-${index}`}
|
||||
className={cn(
|
||||
"relative rounded-lg border border-dotted p-3",
|
||||
isAnswered
|
||||
hasAnswer
|
||||
? "border-green-500 bg-green-50/50"
|
||||
: "border-slate-100 bg-slate-50/50",
|
||||
)}
|
||||
>
|
||||
<div className="mb-2 flex items-start gap-2">
|
||||
{isAnswered ? (
|
||||
{hasAnswer ? (
|
||||
<CheckCircleIcon
|
||||
size={20}
|
||||
className="mt-0.5 text-green-500"
|
||||
|
||||
@@ -157,41 +157,3 @@ export function truncateText(text: string, maxChars: number): string {
|
||||
if (trimmed.length <= maxChars) return trimmed;
|
||||
return `${trimmed.slice(0, maxChars).trimEnd()}…`;
|
||||
}
|
||||
|
||||
export interface ClarifyingQuestion {
|
||||
question: string;
|
||||
keyword: string;
|
||||
example?: string;
|
||||
}
|
||||
|
||||
export function normalizeClarifyingQuestions(
|
||||
questions: Array<{ question: string; keyword: string; example?: unknown }>,
|
||||
): ClarifyingQuestion[] {
|
||||
const seen = new Set<string>();
|
||||
|
||||
return questions.map((q, index) => {
|
||||
let keyword = q.keyword?.trim().toLowerCase() || "";
|
||||
if (!keyword) {
|
||||
keyword = `question-${index}`;
|
||||
}
|
||||
|
||||
let unique = keyword;
|
||||
let suffix = 1;
|
||||
while (seen.has(unique)) {
|
||||
unique = `${keyword}-${suffix}`;
|
||||
suffix++;
|
||||
}
|
||||
seen.add(unique);
|
||||
|
||||
const item: ClarifyingQuestion = {
|
||||
question: q.question,
|
||||
keyword: unique,
|
||||
};
|
||||
const example =
|
||||
typeof q.example === "string" && q.example.trim()
|
||||
? q.example.trim()
|
||||
: null;
|
||||
if (example) item.example = example;
|
||||
return item;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -14,8 +14,11 @@ import {
|
||||
ContentMessage,
|
||||
} from "../../components/ToolAccordion/AccordionContent";
|
||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||
import {
|
||||
buildClarificationAnswersMessage,
|
||||
normalizeClarifyingQuestions,
|
||||
} from "../clarifying-questions";
|
||||
import { ClarificationQuestionsCard } from "../CreateAgent/components/ClarificationQuestionsCard";
|
||||
import { normalizeClarifyingQuestions } from "../CreateAgent/helpers";
|
||||
import {
|
||||
AccordionIcon,
|
||||
formatMaybeJson,
|
||||
@@ -99,16 +102,7 @@ export function EditAgentTool({ part }: Props) {
|
||||
? (output.questions ?? [])
|
||||
: [];
|
||||
|
||||
const contextMessage = questions
|
||||
.map((q) => {
|
||||
const answer = answers[q.keyword] || "";
|
||||
return `> ${q.question}\n\n${answer}`;
|
||||
})
|
||||
.join("\n\n");
|
||||
|
||||
onSend(
|
||||
`**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with editing the agent.`,
|
||||
);
|
||||
onSend(buildClarificationAnswersMessage(answers, questions, "edit"));
|
||||
}
|
||||
|
||||
return (
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import type { AgentDetailsResponse } from "@/app/api/__generated__/models/agentDetailsResponse";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { FormRenderer } from "@/components/renderers/InputRenderer/FormRenderer";
|
||||
import { useState } from "react";
|
||||
import { useEffect, useState } from "react";
|
||||
import { useCopilotChatActions } from "../../../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
||||
import { ContentMessage } from "../../../../components/ToolAccordion/AccordionContent";
|
||||
import { buildInputSchema, extractDefaults, isFormValid } from "./helpers";
|
||||
@@ -24,6 +24,14 @@ export function AgentDetailsCard({ output }: Props) {
|
||||
schema ? isFormValid(schema, defaults) : false,
|
||||
);
|
||||
|
||||
// Reset form state when the agent changes (e.g. during mid-conversation switches)
|
||||
useEffect(() => {
|
||||
const newSchema = buildInputSchema(output.agent.inputs);
|
||||
const newDefaults = newSchema ? extractDefaults(newSchema) : {};
|
||||
setInputValues(newDefaults);
|
||||
setValid(newSchema ? isFormValid(newSchema, newDefaults) : false);
|
||||
}, [output.agent.id]);
|
||||
|
||||
function handleChange(v: { formData?: Record<string, unknown> }) {
|
||||
const data = v.formData ?? {};
|
||||
setInputValues(data);
|
||||
|
||||
@@ -0,0 +1,106 @@
|
||||
import type { RJSFSchema } from "@rjsf/utils";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { buildInputSchema, extractDefaults, isFormValid } from "./helpers";
|
||||
|
||||
describe("buildInputSchema", () => {
|
||||
it("returns null for falsy input", () => {
|
||||
expect(buildInputSchema(null)).toBeNull();
|
||||
expect(buildInputSchema(undefined)).toBeNull();
|
||||
expect(buildInputSchema("")).toBeNull();
|
||||
});
|
||||
|
||||
it("returns null for empty properties object", () => {
|
||||
expect(buildInputSchema({})).toBeNull();
|
||||
});
|
||||
|
||||
it("returns the schema when properties exist", () => {
|
||||
const schema = { name: { type: "string" as const } };
|
||||
expect(buildInputSchema(schema)).toBe(schema);
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractDefaults", () => {
|
||||
it("returns an empty object when no properties exist", () => {
|
||||
expect(extractDefaults({})).toEqual({});
|
||||
expect(extractDefaults({ properties: null as never })).toEqual({});
|
||||
});
|
||||
|
||||
it("extracts default values from property definitions", () => {
|
||||
const schema: RJSFSchema = {
|
||||
properties: {
|
||||
name: { type: "string", default: "Alice" },
|
||||
age: { type: "number", default: 30 },
|
||||
},
|
||||
};
|
||||
expect(extractDefaults(schema)).toEqual({ name: "Alice", age: 30 });
|
||||
});
|
||||
|
||||
it("falls back to the first example when no default is defined", () => {
|
||||
const schema: RJSFSchema = {
|
||||
properties: {
|
||||
query: { type: "string", examples: ["hello", "world"] },
|
||||
},
|
||||
};
|
||||
expect(extractDefaults(schema)).toEqual({ query: "hello" });
|
||||
});
|
||||
|
||||
it("prefers default over examples", () => {
|
||||
const schema: RJSFSchema = {
|
||||
properties: {
|
||||
value: { type: "string", default: "def", examples: ["ex"] },
|
||||
},
|
||||
};
|
||||
expect(extractDefaults(schema)).toEqual({ value: "def" });
|
||||
});
|
||||
|
||||
it("skips properties without default or examples", () => {
|
||||
const schema: RJSFSchema = {
|
||||
properties: {
|
||||
name: { type: "string" },
|
||||
title: { type: "string", default: "Mr." },
|
||||
},
|
||||
};
|
||||
expect(extractDefaults(schema)).toEqual({ title: "Mr." });
|
||||
});
|
||||
|
||||
it("skips properties that are not objects", () => {
|
||||
const schema: RJSFSchema = {
|
||||
properties: {
|
||||
bad: true,
|
||||
alsobad: false,
|
||||
},
|
||||
};
|
||||
expect(extractDefaults(schema)).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe("isFormValid", () => {
|
||||
it("returns true for a valid form", () => {
|
||||
const schema: RJSFSchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
name: { type: "string" },
|
||||
},
|
||||
};
|
||||
expect(isFormValid(schema, { name: "Alice" })).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false when required fields are missing", () => {
|
||||
const schema: RJSFSchema = {
|
||||
type: "object",
|
||||
required: ["name"],
|
||||
properties: {
|
||||
name: { type: "string" },
|
||||
},
|
||||
};
|
||||
expect(isFormValid(schema, {})).toBe(false);
|
||||
});
|
||||
|
||||
it("returns true for empty schema with empty data", () => {
|
||||
const schema: RJSFSchema = {
|
||||
type: "object",
|
||||
properties: {},
|
||||
};
|
||||
expect(isFormValid(schema, {})).toBe(true);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,93 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
buildClarificationAnswersMessage,
|
||||
normalizeClarifyingQuestions,
|
||||
} from "./clarifying-questions";
|
||||
|
||||
describe("normalizeClarifyingQuestions", () => {
|
||||
it("returns normalized questions with trimmed lowercase keywords", () => {
|
||||
const result = normalizeClarifyingQuestions([
|
||||
{ question: "What is your goal?", keyword: " Goal ", example: "test" },
|
||||
]);
|
||||
expect(result).toEqual([
|
||||
{ question: "What is your goal?", keyword: "goal", example: "test" },
|
||||
]);
|
||||
});
|
||||
|
||||
it("deduplicates keywords by appending a numeric suffix", () => {
|
||||
const result = normalizeClarifyingQuestions([
|
||||
{ question: "Q1", keyword: "topic" },
|
||||
{ question: "Q2", keyword: "topic" },
|
||||
{ question: "Q3", keyword: "topic" },
|
||||
]);
|
||||
expect(result.map((q) => q.keyword)).toEqual([
|
||||
"topic",
|
||||
"topic-1",
|
||||
"topic-2",
|
||||
]);
|
||||
});
|
||||
|
||||
it("falls back to question-{index} when keyword is empty", () => {
|
||||
const result = normalizeClarifyingQuestions([
|
||||
{ question: "First?", keyword: "" },
|
||||
{ question: "Second?", keyword: " " },
|
||||
]);
|
||||
expect(result[0].keyword).toBe("question-0");
|
||||
expect(result[1].keyword).toBe("question-1");
|
||||
});
|
||||
|
||||
it("coerces non-string examples to undefined", () => {
|
||||
const result = normalizeClarifyingQuestions([
|
||||
{ question: "Q1", keyword: "k1", example: 42 },
|
||||
{ question: "Q2", keyword: "k2", example: null },
|
||||
{ question: "Q3", keyword: "k3", example: { nested: true } },
|
||||
]);
|
||||
expect(result[0].example).toBeUndefined();
|
||||
expect(result[1].example).toBeUndefined();
|
||||
expect(result[2].example).toBeUndefined();
|
||||
});
|
||||
|
||||
it("trims string examples and omits empty ones", () => {
|
||||
const result = normalizeClarifyingQuestions([
|
||||
{ question: "Q1", keyword: "k1", example: " valid " },
|
||||
{ question: "Q2", keyword: "k2", example: " " },
|
||||
]);
|
||||
expect(result[0].example).toBe("valid");
|
||||
expect(result[1].example).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns an empty array for empty input", () => {
|
||||
expect(normalizeClarifyingQuestions([])).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("buildClarificationAnswersMessage", () => {
|
||||
it("formats answers with create mode", () => {
|
||||
const result = buildClarificationAnswersMessage(
|
||||
{ goal: "automate tasks" },
|
||||
[{ question: "What is your goal?", keyword: "goal" }],
|
||||
"create",
|
||||
);
|
||||
expect(result).toContain("> What is your goal?");
|
||||
expect(result).toContain("automate tasks");
|
||||
expect(result).toContain("Please proceed with creating the agent.");
|
||||
});
|
||||
|
||||
it("formats answers with edit mode", () => {
|
||||
const result = buildClarificationAnswersMessage(
|
||||
{ goal: "fix bugs" },
|
||||
[{ question: "What should change?", keyword: "goal" }],
|
||||
"edit",
|
||||
);
|
||||
expect(result).toContain("Please proceed with editing the agent.");
|
||||
});
|
||||
|
||||
it("uses empty string for missing answers", () => {
|
||||
const result = buildClarificationAnswersMessage(
|
||||
{},
|
||||
[{ question: "Q?", keyword: "missing" }],
|
||||
"create",
|
||||
);
|
||||
expect(result).toContain("> Q?\n\n");
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,56 @@
|
||||
export interface ClarifyingQuestion {
|
||||
question: string;
|
||||
keyword: string;
|
||||
example?: string;
|
||||
}
|
||||
|
||||
export function normalizeClarifyingQuestions(
|
||||
questions: Array<{ question: string; keyword: string; example?: unknown }>,
|
||||
): ClarifyingQuestion[] {
|
||||
const seen = new Set<string>();
|
||||
|
||||
return questions.map((q, index) => {
|
||||
let keyword = q.keyword?.trim().toLowerCase() || "";
|
||||
if (!keyword) {
|
||||
keyword = `question-${index}`;
|
||||
}
|
||||
|
||||
let unique = keyword;
|
||||
let suffix = 1;
|
||||
while (seen.has(unique)) {
|
||||
unique = `${keyword}-${suffix}`;
|
||||
suffix++;
|
||||
}
|
||||
seen.add(unique);
|
||||
|
||||
const item: ClarifyingQuestion = {
|
||||
question: q.question,
|
||||
keyword: unique,
|
||||
};
|
||||
const example =
|
||||
typeof q.example === "string" && q.example.trim()
|
||||
? q.example.trim()
|
||||
: null;
|
||||
if (example) item.example = example;
|
||||
return item;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats clarification answers as a context message and sends it via onSend.
|
||||
*/
|
||||
export function buildClarificationAnswersMessage(
|
||||
answers: Record<string, string>,
|
||||
rawQuestions: Array<{ question: string; keyword: string }>,
|
||||
mode: "create" | "edit",
|
||||
): string {
|
||||
const contextMessage = rawQuestions
|
||||
.map((q) => {
|
||||
const answer = answers[q.keyword] || "";
|
||||
return `> ${q.question}\n\n${answer}`;
|
||||
})
|
||||
.join("\n\n");
|
||||
|
||||
const action = mode === "create" ? "creating" : "editing";
|
||||
return `**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with ${action} the agent.`;
|
||||
}
|
||||
@@ -18,7 +18,7 @@ export function useChatSession() {
|
||||
const sessionQuery = useGetV2GetSession(sessionId ?? "", {
|
||||
query: {
|
||||
enabled: !!sessionId,
|
||||
staleTime: Infinity,
|
||||
staleTime: Infinity, // Manual invalidation on session switch
|
||||
refetchOnWindowFocus: false,
|
||||
refetchOnReconnect: true,
|
||||
refetchOnMount: true,
|
||||
@@ -47,7 +47,7 @@ export function useChatSession() {
|
||||
const hasActiveStream = useMemo(() => {
|
||||
if (sessionQuery.data?.status !== 200) return false;
|
||||
return !!sessionQuery.data.data.active_stream;
|
||||
}, [sessionQuery.data]);
|
||||
}, [sessionQuery.data, sessionId]);
|
||||
|
||||
// Memoize so the effect in useCopilotPage doesn't infinite-loop on a new
|
||||
// array reference every render. Re-derives only when query data changes.
|
||||
@@ -119,5 +119,6 @@ export function useChatSession() {
|
||||
isSessionError: sessionQuery.isError,
|
||||
createSession,
|
||||
isCreatingSession,
|
||||
refetchSession: sessionQuery.refetch,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -15,7 +15,9 @@ import type { UIMessage } from "ai";
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { useChatSession } from "./useChatSession";
|
||||
|
||||
const STREAM_START_TIMEOUT_MS = 12_000;
|
||||
const RECONNECT_BASE_DELAY_MS = 1_000;
|
||||
const RECONNECT_MAX_DELAY_MS = 30_000;
|
||||
const RECONNECT_MAX_ATTEMPTS = 5;
|
||||
|
||||
/** Mark any in-progress tool parts as completed/errored so spinners stop. */
|
||||
function resolveInProgressTools(
|
||||
@@ -35,42 +37,12 @@ function resolveInProgressTools(
|
||||
}));
|
||||
}
|
||||
|
||||
/** Build a fingerprint from a message's role + text/tool content for cross-boundary dedup. */
|
||||
function messageFingerprint(msg: UIMessage): string {
|
||||
const fragments = msg.parts.map((p) => {
|
||||
if ("text" in p && typeof p.text === "string") return p.text;
|
||||
if ("toolCallId" in p && typeof p.toolCallId === "string")
|
||||
return `tool:${p.toolCallId}`;
|
||||
return "";
|
||||
});
|
||||
return `${msg.role}::${fragments.join("\n")}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deduplicate messages by ID *and* by content fingerprint.
|
||||
* ID-based dedup catches duplicates within the same source (e.g. two
|
||||
* identical stream events). Fingerprint-based dedup catches duplicates
|
||||
* across the hydration/stream boundary where IDs differ (synthetic
|
||||
* `${sessionId}-${index}` vs AI SDK nanoid).
|
||||
*
|
||||
* NOTE: Fingerprint dedup only applies to assistant messages, not user messages.
|
||||
* Users should be able to send the same message multiple times.
|
||||
*/
|
||||
/** Simple ID-based deduplication - trust backend for correctness */
|
||||
function deduplicateMessages(messages: UIMessage[]): UIMessage[] {
|
||||
const seenIds = new Set<string>();
|
||||
const seenFingerprints = new Set<string>();
|
||||
return messages.filter((msg) => {
|
||||
if (seenIds.has(msg.id)) return false;
|
||||
seenIds.add(msg.id);
|
||||
|
||||
// Only apply fingerprint deduplication to assistant messages
|
||||
// User messages should allow duplicates (same text sent multiple times)
|
||||
if (msg.role === "assistant") {
|
||||
const fp = messageFingerprint(msg);
|
||||
if (fp !== "::" && seenFingerprints.has(fp)) return false;
|
||||
seenFingerprints.add(fp);
|
||||
}
|
||||
|
||||
return true;
|
||||
});
|
||||
}
|
||||
@@ -94,6 +66,7 @@ export function useCopilotPage() {
|
||||
isSessionError,
|
||||
createSession,
|
||||
isCreatingSession,
|
||||
refetchSession,
|
||||
} = useChatSession();
|
||||
|
||||
const { mutate: deleteSessionMutation, isPending: isDeleting } =
|
||||
@@ -153,6 +126,48 @@ export function useCopilotPage() {
|
||||
[sessionId],
|
||||
);
|
||||
|
||||
// Reconnect state
|
||||
const [reconnectAttempts, setReconnectAttempts] = useState(0);
|
||||
const [isReconnectScheduled, setIsReconnectScheduled] = useState(false);
|
||||
const reconnectTimerRef = useRef<ReturnType<typeof setTimeout>>();
|
||||
const hasShownDisconnectToast = useRef(false);
|
||||
|
||||
// Consolidated reconnect logic
|
||||
function handleReconnect(sid: string) {
|
||||
if (isReconnectScheduled || !sid) return;
|
||||
|
||||
const nextAttempt = reconnectAttempts + 1;
|
||||
if (nextAttempt > RECONNECT_MAX_ATTEMPTS) {
|
||||
toast({
|
||||
title: "Connection lost",
|
||||
description: "Unable to reconnect. Please refresh the page.",
|
||||
variant: "destructive",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
setIsReconnectScheduled(true);
|
||||
setReconnectAttempts(nextAttempt);
|
||||
|
||||
if (!hasShownDisconnectToast.current) {
|
||||
hasShownDisconnectToast.current = true;
|
||||
toast({
|
||||
title: "Connection lost",
|
||||
description: "Reconnecting...",
|
||||
});
|
||||
}
|
||||
|
||||
const delay = Math.min(
|
||||
RECONNECT_BASE_DELAY_MS * 2 ** reconnectAttempts,
|
||||
RECONNECT_MAX_DELAY_MS,
|
||||
);
|
||||
|
||||
reconnectTimerRef.current = setTimeout(() => {
|
||||
setIsReconnectScheduled(false);
|
||||
resumeStream();
|
||||
}, delay);
|
||||
}
|
||||
|
||||
const {
|
||||
messages: rawMessages,
|
||||
sendMessage,
|
||||
@@ -164,9 +179,32 @@ export function useCopilotPage() {
|
||||
} = useChat({
|
||||
id: sessionId ?? undefined,
|
||||
transport: transport ?? undefined,
|
||||
// Don't use resume: true — it fires before hydration completes, causing
|
||||
// the hydrated messages to overwrite the resumed stream. Instead we
|
||||
// call resumeStream() manually after hydration + active_stream detection.
|
||||
onFinish: async ({ isDisconnect, isAbort }) => {
|
||||
if (isAbort || !sessionId) return;
|
||||
|
||||
if (isDisconnect) {
|
||||
handleReconnect(sessionId);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if backend executor is still running after clean close
|
||||
const result = await refetchSession();
|
||||
const backendActive =
|
||||
result.data?.status === 200 && !!result.data.data.active_stream;
|
||||
|
||||
if (backendActive) {
|
||||
handleReconnect(sessionId);
|
||||
}
|
||||
},
|
||||
onError: (error) => {
|
||||
if (!sessionId) return;
|
||||
// Only reconnect on network errors (not HTTP errors)
|
||||
const isNetworkError =
|
||||
error.name === "TypeError" || error.name === "AbortError";
|
||||
if (isNetworkError) {
|
||||
handleReconnect(sessionId);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
// Deduplicate messages continuously to prevent duplicates when resuming streams
|
||||
@@ -205,44 +243,31 @@ export function useCopilotPage() {
|
||||
}
|
||||
}
|
||||
|
||||
// Abort the stream if the backend doesn't start sending data within 12s.
|
||||
const stopRef = useRef(stop);
|
||||
stopRef.current = stop;
|
||||
useEffect(() => {
|
||||
if (status !== "submitted") return;
|
||||
|
||||
const timer = setTimeout(() => {
|
||||
stopRef.current();
|
||||
toast({
|
||||
title: "Stream timed out",
|
||||
description: "The server took too long to respond. Please try again.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}, STREAM_START_TIMEOUT_MS);
|
||||
|
||||
return () => clearTimeout(timer);
|
||||
}, [status]);
|
||||
|
||||
// Hydrate messages from the REST session endpoint.
|
||||
// Skip hydration while streaming to avoid overwriting the live stream.
|
||||
// Hydrate messages from REST API when not actively streaming
|
||||
useEffect(() => {
|
||||
if (!hydratedMessages || hydratedMessages.length === 0) return;
|
||||
if (status === "streaming" || status === "submitted") return;
|
||||
if (isReconnectScheduled) return;
|
||||
setMessages((prev) => {
|
||||
if (prev.length >= hydratedMessages.length) return prev;
|
||||
// Deduplicate to handle rare cases where duplicate streams might occur
|
||||
return deduplicateMessages(hydratedMessages);
|
||||
});
|
||||
}, [hydratedMessages, setMessages, status]);
|
||||
}, [hydratedMessages, setMessages, status, isReconnectScheduled]);
|
||||
|
||||
// Ref: tracks whether we've already resumed for a given session.
|
||||
// Format: Map<sessionId, hasResumed>
|
||||
// Track resume state per session
|
||||
const hasResumedRef = useRef<Map<string, boolean>>(new Map());
|
||||
|
||||
// When the stream ends (or drops), invalidate the session cache so the
|
||||
// next hydration fetches fresh messages from the backend. Without this,
|
||||
// staleTime: Infinity means the cache keeps the pre-stream data forever,
|
||||
// and any messages added during streaming are lost on remount/navigation.
|
||||
// Clean up reconnect state on session switch
|
||||
useEffect(() => {
|
||||
clearTimeout(reconnectTimerRef.current);
|
||||
reconnectTimerRef.current = undefined;
|
||||
setReconnectAttempts(0);
|
||||
setIsReconnectScheduled(false);
|
||||
hasShownDisconnectToast.current = false;
|
||||
prevStatusRef.current = status; // Reset to avoid cross-session state bleeding
|
||||
}, [sessionId, status]);
|
||||
|
||||
// Invalidate session cache when stream completes
|
||||
const prevStatusRef = useRef(status);
|
||||
useEffect(() => {
|
||||
const prev = prevStatusRef.current;
|
||||
@@ -250,12 +275,17 @@ export function useCopilotPage() {
|
||||
|
||||
const wasActive = prev === "streaming" || prev === "submitted";
|
||||
const isIdle = status === "ready" || status === "error";
|
||||
if (wasActive && isIdle && sessionId) {
|
||||
|
||||
if (wasActive && isIdle && sessionId && !isReconnectScheduled) {
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2GetSessionQueryKey(sessionId),
|
||||
});
|
||||
if (status === "ready") {
|
||||
setReconnectAttempts(0);
|
||||
hasShownDisconnectToast.current = false;
|
||||
}
|
||||
}
|
||||
}, [status, sessionId, queryClient]);
|
||||
}, [status, sessionId, queryClient, isReconnectScheduled]);
|
||||
|
||||
// Resume an active stream AFTER hydration completes.
|
||||
// IMPORTANT: Only runs when page loads with existing active stream (reconnection).
|
||||
@@ -352,16 +382,16 @@ export function useCopilotPage() {
|
||||
}
|
||||
}, [isDeleting]);
|
||||
|
||||
// True while we know the backend has an active stream but haven't
|
||||
// reconnected yet. Used to disable the send button and show stop UI.
|
||||
// True while reconnecting or backend has active stream but we haven't connected yet
|
||||
const isReconnecting =
|
||||
hasActiveStream && status !== "streaming" && status !== "submitted";
|
||||
isReconnectScheduled ||
|
||||
(hasActiveStream && status !== "streaming" && status !== "submitted");
|
||||
|
||||
return {
|
||||
sessionId,
|
||||
messages,
|
||||
status,
|
||||
error,
|
||||
error: isReconnecting ? undefined : error,
|
||||
stop,
|
||||
isReconnecting,
|
||||
isLoadingSession,
|
||||
|
||||
@@ -3,6 +3,20 @@ import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
||||
import { NextRequest } from "next/server";
|
||||
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
|
||||
|
||||
export const maxDuration = 800;
|
||||
|
||||
const DEBUG_SSE_TIMEOUT_MS = process.env.NEXT_PUBLIC_SSE_TIMEOUT_MS
|
||||
? Number(process.env.NEXT_PUBLIC_SSE_TIMEOUT_MS)
|
||||
: undefined;
|
||||
|
||||
function debugSignal(): AbortSignal | undefined {
|
||||
if (!DEBUG_SSE_TIMEOUT_MS) return undefined;
|
||||
console.warn(
|
||||
`[SSE_DEBUG] Simulating proxy timeout in ${DEBUG_SSE_TIMEOUT_MS}ms`,
|
||||
);
|
||||
return AbortSignal.timeout(DEBUG_SSE_TIMEOUT_MS);
|
||||
}
|
||||
|
||||
export async function POST(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ sessionId: string }> },
|
||||
@@ -47,6 +61,7 @@ export async function POST(
|
||||
is_user_message: is_user_message ?? true,
|
||||
context: context || null,
|
||||
}),
|
||||
signal: debugSignal(),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
@@ -110,6 +125,7 @@ export async function GET(
|
||||
const response = await fetch(streamUrl.toString(), {
|
||||
method: "GET",
|
||||
headers,
|
||||
signal: debugSignal(),
|
||||
});
|
||||
|
||||
if (response.status === 204) {
|
||||
|
||||
@@ -7573,9 +7573,11 @@
|
||||
"IDENTITY",
|
||||
"EXECUTE_GRAPH",
|
||||
"READ_GRAPH",
|
||||
"WRITE_GRAPH",
|
||||
"EXECUTE_BLOCK",
|
||||
"READ_BLOCK",
|
||||
"READ_STORE",
|
||||
"WRITE_LIBRARY",
|
||||
"USE_TOOLS",
|
||||
"MANAGE_INTEGRATIONS",
|
||||
"READ_INTEGRATIONS",
|
||||
|
||||
@@ -181,6 +181,11 @@ body[data-google-picker-open="true"] [data-dialog-content] {
|
||||
pointer-events: none !important;
|
||||
}
|
||||
|
||||
/* Streamdown external link dialog: "Open link" button */
|
||||
[data-streamdown="link-safety-modal"] button:last-of-type {
|
||||
color: black;
|
||||
}
|
||||
|
||||
/* CoPilot chat table styling — remove left/right borders, increase padding */
|
||||
[data-streamdown="table-wrapper"] table {
|
||||
border-left: none;
|
||||
|
||||
@@ -61,15 +61,11 @@ export const ConversationEmptyState = ({
|
||||
>
|
||||
{children ?? (
|
||||
<>
|
||||
{icon && (
|
||||
<div className="text-neutral-500 dark:text-neutral-400">{icon}</div>
|
||||
)}
|
||||
{icon && <div className="text-neutral-500">{icon}</div>}
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-medium">{title}</h3>
|
||||
{description && (
|
||||
<p className="text-sm text-neutral-500 dark:text-neutral-400">
|
||||
{description}
|
||||
</p>
|
||||
<p className="text-sm text-neutral-500">{description}</p>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
@@ -93,7 +89,7 @@ export const ConversationScrollButton = ({
|
||||
!isAtBottom && (
|
||||
<Button
|
||||
className={cn(
|
||||
"absolute bottom-4 left-[50%] translate-x-[-50%] rounded-full dark:bg-white dark:dark:bg-neutral-950 dark:dark:hover:bg-neutral-800 dark:hover:bg-neutral-100",
|
||||
"absolute bottom-4 left-[50%] translate-x-[-50%] rounded-full",
|
||||
className,
|
||||
)}
|
||||
onClick={handleScrollToBottom}
|
||||
|
||||
@@ -0,0 +1,110 @@
|
||||
import type { Meta, StoryObj } from "@storybook/nextjs";
|
||||
|
||||
import { Message, MessageContent, MessageResponse } from "./message";
|
||||
|
||||
const meta: Meta<typeof Message> = {
|
||||
title: "AI Elements/Message",
|
||||
component: Message,
|
||||
parameters: {
|
||||
layout: "padded",
|
||||
},
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof Message>;
|
||||
|
||||
type MessageStoryProps = { children: string };
|
||||
|
||||
function AssistantMessage({ children }: MessageStoryProps) {
|
||||
return (
|
||||
<Message from="assistant">
|
||||
<MessageContent>
|
||||
<MessageResponse>{children}</MessageResponse>
|
||||
</MessageContent>
|
||||
</Message>
|
||||
);
|
||||
}
|
||||
|
||||
function UserMessage({ children }: MessageStoryProps) {
|
||||
return (
|
||||
<Message from="user">
|
||||
<MessageContent>
|
||||
<MessageResponse>{children}</MessageResponse>
|
||||
</MessageContent>
|
||||
</Message>
|
||||
);
|
||||
}
|
||||
|
||||
export const Default: Story = {
|
||||
render: () => (
|
||||
<div className="flex flex-col gap-4">
|
||||
<AssistantMessage>
|
||||
{
|
||||
"Here is a response with **bold text**, *italic text*, and `inline code`."
|
||||
}
|
||||
</AssistantMessage>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
|
||||
export const UserMessageStory: Story = {
|
||||
name: "User Message",
|
||||
render: () => (
|
||||
<div className="flex flex-col gap-4">
|
||||
<UserMessage>{"How do I download my workspace files?"}</UserMessage>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
|
||||
export const WithLinks: Story = {
|
||||
name: "With Links (Internal & External)",
|
||||
render: () => (
|
||||
<div className="flex flex-col gap-4">
|
||||
<AssistantMessage>
|
||||
{[
|
||||
"Here are some links:\n\n",
|
||||
"- Internal link: [Download file](/api/proxy/api/v1/workspace/files/download/abc123)\n",
|
||||
"- External link: [GitHub](https://github.com/Significant-Gravitas/AutoGPT)\n",
|
||||
"- Another external: [Documentation](https://docs.agpt.co)\n\n",
|
||||
"Internal links should open directly. External links should show a safety modal.\n\n",
|
||||
"**Try clicking each link to verify behavior.**",
|
||||
].join("")}
|
||||
</AssistantMessage>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
|
||||
export const LinkSafetyModal: Story = {
|
||||
name: "LinkSafetyModal",
|
||||
render: () => (
|
||||
<div className="flex flex-col gap-4">
|
||||
<p className="text-sm text-muted-foreground">
|
||||
Click the external link below to trigger the link safety modal. Verify
|
||||
that both "Copy link" and "Open link" buttons are
|
||||
visible.
|
||||
</p>
|
||||
<AssistantMessage>
|
||||
{
|
||||
"Click this external link to see the safety modal: [Example Site](https://example.com)"
|
||||
}
|
||||
</AssistantMessage>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
|
||||
export const Conversation: Story = {
|
||||
render: () => (
|
||||
<div className="flex flex-col gap-4">
|
||||
<UserMessage>{"Can you help me with my workspace files?"}</UserMessage>
|
||||
<AssistantMessage>
|
||||
{[
|
||||
"Sure! Here's how to manage your workspace files:\n\n",
|
||||
"1. Upload files using the attachment button\n",
|
||||
"2. Download files by clicking the link in chat\n",
|
||||
"3. View all files in the [workspace panel](/workspace)\n\n",
|
||||
"For more details, check the [documentation](https://docs.agpt.co/workspace).",
|
||||
].join("")}
|
||||
</AssistantMessage>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
@@ -45,8 +45,8 @@ export const MessageContent = ({
|
||||
className={cn(
|
||||
"is-user:dark flex w-full min-w-0 max-w-full flex-col gap-2 overflow-hidden text-sm",
|
||||
"group-[.is-user]:w-fit",
|
||||
"group-[.is-user]:ml-auto group-[.is-user]:rounded-lg group-[.is-user]:bg-neutral-100 group-[.is-user]:px-4 group-[.is-user]:py-3 group-[.is-user]:text-neutral-950 dark:group-[.is-user]:bg-neutral-800 dark:group-[.is-user]:text-neutral-50",
|
||||
"group-[.is-assistant]:text-neutral-950 dark:group-[.is-assistant]:text-neutral-50",
|
||||
"group-[.is-user]:ml-auto group-[.is-user]:rounded-lg group-[.is-user]:bg-neutral-100 group-[.is-user]:px-4 group-[.is-user]:py-3 group-[.is-user]:text-neutral-950",
|
||||
"group-[.is-assistant]:text-neutral-950",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
@@ -291,7 +291,7 @@ export const MessageBranchPage = ({
|
||||
return (
|
||||
<ButtonGroupText
|
||||
className={cn(
|
||||
"border-none bg-transparent text-neutral-500 shadow-none dark:text-neutral-400",
|
||||
"border-none bg-transparent text-neutral-500 shadow-none",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
@@ -303,6 +303,15 @@ export const MessageBranchPage = ({
|
||||
|
||||
export type MessageResponseProps = ComponentProps<typeof Streamdown>;
|
||||
|
||||
function isSameOriginLink(url: string): boolean {
|
||||
try {
|
||||
const parsed = new URL(url, window.location.origin);
|
||||
return parsed.origin === window.location.origin;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export const MessageResponse = memo(
|
||||
({ className, ...props }: MessageResponseProps) => (
|
||||
<Streamdown
|
||||
@@ -311,6 +320,10 @@ export const MessageResponse = memo(
|
||||
className,
|
||||
)}
|
||||
plugins={{ code, mermaid, math, cjk }}
|
||||
linkSafety={{
|
||||
enabled: true,
|
||||
onLinkCheck: isSameOriginLink,
|
||||
}}
|
||||
{...props}
|
||||
/>
|
||||
),
|
||||
|
||||
@@ -0,0 +1,345 @@
|
||||
"use client";
|
||||
|
||||
/**
|
||||
* Adapted from AI SDK Elements `prompt-input` component.
|
||||
* @see https://elements.ai-sdk.dev/components/prompt-input
|
||||
*
|
||||
* Stripped down to only the sub-components used by the copilot ChatInput:
|
||||
* PromptInput, PromptInputBody, PromptInputTextarea, PromptInputFooter,
|
||||
* PromptInputTools, PromptInputButton, PromptInputSubmit.
|
||||
*/
|
||||
|
||||
import type { ChatStatus } from "ai";
|
||||
import type {
|
||||
ComponentProps,
|
||||
FormEvent,
|
||||
FormEventHandler,
|
||||
HTMLAttributes,
|
||||
KeyboardEventHandler,
|
||||
ReactNode,
|
||||
} from "react";
|
||||
|
||||
import {
|
||||
InputGroup,
|
||||
InputGroupAddon,
|
||||
InputGroupButton,
|
||||
InputGroupTextarea,
|
||||
} from "@/components/ui/input-group";
|
||||
import { Spinner } from "@/components/ui/spinner";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { cn } from "@/lib/utils";
|
||||
import {
|
||||
ArrowUp as ArrowUpIcon,
|
||||
Stop as StopIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import { Children, useCallback, useEffect, useRef, useState } from "react";
|
||||
|
||||
// ============================================================================
|
||||
// PromptInput — form wrapper
|
||||
// ============================================================================
|
||||
|
||||
export type PromptInputProps = Omit<
|
||||
HTMLAttributes<HTMLFormElement>,
|
||||
"onSubmit"
|
||||
> & {
|
||||
onSubmit: (
|
||||
text: string,
|
||||
event: FormEvent<HTMLFormElement>,
|
||||
) => void | Promise<void>;
|
||||
};
|
||||
|
||||
export function PromptInput({
|
||||
className,
|
||||
onSubmit,
|
||||
children,
|
||||
...props
|
||||
}: PromptInputProps) {
|
||||
const formRef = useRef<HTMLFormElement | null>(null);
|
||||
|
||||
const handleSubmit: FormEventHandler<HTMLFormElement> = useCallback(
|
||||
async (event) => {
|
||||
event.preventDefault();
|
||||
const form = event.currentTarget;
|
||||
const formData = new FormData(form);
|
||||
const text = (formData.get("message") as string) || "";
|
||||
|
||||
const result = onSubmit(text, event);
|
||||
if (result instanceof Promise) {
|
||||
await result;
|
||||
}
|
||||
},
|
||||
[onSubmit],
|
||||
);
|
||||
|
||||
return (
|
||||
<form
|
||||
className={cn("w-full", className)}
|
||||
onSubmit={handleSubmit}
|
||||
ref={formRef}
|
||||
{...props}
|
||||
>
|
||||
<InputGroup className="overflow-hidden">{children}</InputGroup>
|
||||
</form>
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PromptInputBody — content wrapper
|
||||
// ============================================================================
|
||||
|
||||
export type PromptInputBodyProps = HTMLAttributes<HTMLDivElement>;
|
||||
|
||||
export function PromptInputBody({ className, ...props }: PromptInputBodyProps) {
|
||||
return <div className={cn("contents", className)} {...props} />;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PromptInputTextarea — auto-resize textarea with Enter-to-submit
|
||||
// ============================================================================
|
||||
|
||||
export type PromptInputTextareaProps = ComponentProps<
|
||||
typeof InputGroupTextarea
|
||||
>;
|
||||
|
||||
export function PromptInputTextarea({
|
||||
onKeyDown,
|
||||
onChange,
|
||||
className,
|
||||
placeholder = "Type your message...",
|
||||
value,
|
||||
...props
|
||||
}: PromptInputTextareaProps) {
|
||||
const [isComposing, setIsComposing] = useState(false);
|
||||
const textareaRef = useRef<HTMLTextAreaElement | null>(null);
|
||||
|
||||
function autoResize(el: HTMLTextAreaElement) {
|
||||
el.style.height = "auto";
|
||||
el.style.height = `${el.scrollHeight}px`;
|
||||
}
|
||||
|
||||
// Resize when value changes externally (e.g. cleared after send)
|
||||
useEffect(() => {
|
||||
if (textareaRef.current) autoResize(textareaRef.current);
|
||||
}, [value]);
|
||||
|
||||
const handleChange = useCallback(
|
||||
(e: React.ChangeEvent<HTMLTextAreaElement>) => {
|
||||
autoResize(e.currentTarget);
|
||||
onChange?.(e);
|
||||
},
|
||||
[onChange],
|
||||
);
|
||||
|
||||
const handleKeyDown: KeyboardEventHandler<HTMLTextAreaElement> = useCallback(
|
||||
(e) => {
|
||||
// Call external handler first
|
||||
onKeyDown?.(e);
|
||||
|
||||
if (e.defaultPrevented) return;
|
||||
|
||||
if (e.key === "Enter") {
|
||||
if (isComposing || e.nativeEvent.isComposing) return;
|
||||
if (e.shiftKey) return;
|
||||
e.preventDefault();
|
||||
|
||||
const { form } = e.currentTarget;
|
||||
const submitButton = form?.querySelector(
|
||||
'button[type="submit"]',
|
||||
) as HTMLButtonElement | null;
|
||||
if (submitButton?.disabled) return;
|
||||
|
||||
form?.requestSubmit();
|
||||
}
|
||||
},
|
||||
[onKeyDown, isComposing],
|
||||
);
|
||||
|
||||
const handleCompositionEnd = useCallback(() => setIsComposing(false), []);
|
||||
const handleCompositionStart = useCallback(() => setIsComposing(true), []);
|
||||
|
||||
return (
|
||||
<InputGroupTextarea
|
||||
ref={textareaRef}
|
||||
rows={1}
|
||||
className={cn(
|
||||
"max-h-48 min-h-0 text-base leading-6 md:text-base",
|
||||
className,
|
||||
)}
|
||||
name="message"
|
||||
value={value}
|
||||
onChange={handleChange}
|
||||
onCompositionEnd={handleCompositionEnd}
|
||||
onCompositionStart={handleCompositionStart}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder={placeholder}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PromptInputFooter — bottom bar
|
||||
// ============================================================================
|
||||
|
||||
export type PromptInputFooterProps = Omit<
|
||||
ComponentProps<typeof InputGroupAddon>,
|
||||
"align"
|
||||
>;
|
||||
|
||||
export function PromptInputFooter({
|
||||
className,
|
||||
...props
|
||||
}: PromptInputFooterProps) {
|
||||
return (
|
||||
<InputGroupAddon
|
||||
align="block-end"
|
||||
className={cn("justify-between gap-1", className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PromptInputTools — left-side button group
|
||||
// ============================================================================
|
||||
|
||||
export type PromptInputToolsProps = HTMLAttributes<HTMLDivElement>;
|
||||
|
||||
export function PromptInputTools({
|
||||
className,
|
||||
...props
|
||||
}: PromptInputToolsProps) {
|
||||
return (
|
||||
<div
|
||||
className={cn("flex min-w-0 items-center gap-1", className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PromptInputButton — tool button with optional tooltip
|
||||
// ============================================================================
|
||||
|
||||
export type PromptInputButtonTooltip =
|
||||
| string
|
||||
| {
|
||||
content: ReactNode;
|
||||
shortcut?: string;
|
||||
side?: ComponentProps<typeof TooltipContent>["side"];
|
||||
};
|
||||
|
||||
export type PromptInputButtonProps = ComponentProps<typeof InputGroupButton> & {
|
||||
tooltip?: PromptInputButtonTooltip;
|
||||
};
|
||||
|
||||
export function PromptInputButton({
|
||||
variant = "ghost",
|
||||
className,
|
||||
size,
|
||||
tooltip,
|
||||
...props
|
||||
}: PromptInputButtonProps) {
|
||||
const newSize =
|
||||
size ?? (Children.count(props.children) > 1 ? "sm" : "icon-sm");
|
||||
|
||||
const button = (
|
||||
<InputGroupButton
|
||||
className={cn(className)}
|
||||
size={newSize}
|
||||
type="button"
|
||||
variant={variant}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
|
||||
if (!tooltip) return button;
|
||||
|
||||
const tooltipContent =
|
||||
typeof tooltip === "string" ? tooltip : tooltip.content;
|
||||
const shortcut = typeof tooltip === "string" ? undefined : tooltip.shortcut;
|
||||
const side = typeof tooltip === "string" ? "top" : (tooltip.side ?? "top");
|
||||
|
||||
return (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>{button}</TooltipTrigger>
|
||||
<TooltipContent side={side}>
|
||||
{tooltipContent}
|
||||
{shortcut && (
|
||||
<span className="ml-2 text-muted-foreground">{shortcut}</span>
|
||||
)}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PromptInputSubmit — send / stop button
|
||||
// ============================================================================
|
||||
|
||||
export type PromptInputSubmitProps = ComponentProps<typeof InputGroupButton> & {
|
||||
status?: ChatStatus;
|
||||
onStop?: () => void;
|
||||
};
|
||||
|
||||
export function PromptInputSubmit({
|
||||
className,
|
||||
variant = "default",
|
||||
size = "icon-sm",
|
||||
status,
|
||||
onStop,
|
||||
onClick,
|
||||
disabled,
|
||||
children,
|
||||
...props
|
||||
}: PromptInputSubmitProps) {
|
||||
const isGenerating = status === "submitted" || status === "streaming";
|
||||
const canStop = isGenerating && Boolean(onStop);
|
||||
const isDisabled = Boolean(disabled) || (isGenerating && !canStop);
|
||||
|
||||
let Icon = <ArrowUpIcon className="size-4" weight="bold" />;
|
||||
|
||||
if (status === "submitted") {
|
||||
Icon = <Spinner />;
|
||||
} else if (status === "streaming") {
|
||||
Icon = <StopIcon className="size-4" weight="bold" />;
|
||||
}
|
||||
|
||||
const handleClick = useCallback(
|
||||
(e: React.MouseEvent<HTMLButtonElement>) => {
|
||||
if (canStop && onStop) {
|
||||
e.preventDefault();
|
||||
onStop();
|
||||
return;
|
||||
}
|
||||
if (isGenerating) {
|
||||
e.preventDefault();
|
||||
return;
|
||||
}
|
||||
onClick?.(e);
|
||||
},
|
||||
[canStop, isGenerating, onStop, onClick],
|
||||
);
|
||||
|
||||
return (
|
||||
<InputGroupButton
|
||||
aria-label={canStop ? "Stop" : "Submit"}
|
||||
className={cn(
|
||||
"size-[2.625rem] rounded-full border-zinc-800 bg-zinc-800 text-white hover:border-zinc-900 hover:bg-zinc-900 disabled:border-zinc-200 disabled:bg-zinc-200 disabled:text-white disabled:opacity-100",
|
||||
className,
|
||||
)}
|
||||
disabled={isDisabled}
|
||||
onClick={handleClick}
|
||||
size={size}
|
||||
type={canStop ? "button" : "submit"}
|
||||
variant={variant}
|
||||
{...props}
|
||||
>
|
||||
{children ?? Icon}
|
||||
</InputGroupButton>
|
||||
);
|
||||
}
|
||||
129
autogpt_platform/frontend/src/components/ui/input-group.tsx
Normal file
129
autogpt_platform/frontend/src/components/ui/input-group.tsx
Normal file
@@ -0,0 +1,129 @@
|
||||
"use client";
|
||||
|
||||
import * as React from "react";
|
||||
import { cva, type VariantProps } from "class-variance-authority";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Textarea } from "@/components/ui/textarea";
|
||||
|
||||
function InputGroup({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="input-group"
|
||||
role="group"
|
||||
className={cn(
|
||||
"group/input-group relative flex w-full items-center rounded-xlarge border border-neutral-200 bg-white shadow-sm outline-none transition-[color,box-shadow]",
|
||||
"min-w-0 has-[>textarea]:h-auto",
|
||||
|
||||
// Variants based on alignment.
|
||||
"has-[>[data-align=block-start]]:h-auto has-[>[data-align=block-start]]:flex-col",
|
||||
"has-[>[data-align=block-end]]:h-auto has-[>[data-align=block-end]]:flex-col",
|
||||
|
||||
// Focus state.
|
||||
"has-[[data-slot=input-group-control]:focus-visible]:border-zinc-400 has-[[data-slot=input-group-control]:focus-visible]:ring-1 has-[[data-slot=input-group-control]:focus-visible]:ring-zinc-400",
|
||||
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const inputGroupAddonVariants = cva(
|
||||
"text-muted-foreground flex h-auto cursor-text items-center justify-center gap-2 py-1.5 text-sm font-medium select-none group-data-[disabled=true]/input-group:opacity-50",
|
||||
{
|
||||
variants: {
|
||||
align: {
|
||||
"inline-start": "order-first pl-3",
|
||||
"inline-end": "order-last pr-3",
|
||||
"block-start": "order-first w-full justify-start px-3 pt-3",
|
||||
"block-end": "order-last w-full justify-start px-3 pb-3",
|
||||
},
|
||||
},
|
||||
defaultVariants: {
|
||||
align: "inline-start",
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
function InputGroupAddon({
|
||||
className,
|
||||
align = "inline-start",
|
||||
onClick,
|
||||
...props
|
||||
}: React.ComponentProps<"div"> & VariantProps<typeof inputGroupAddonVariants>) {
|
||||
return (
|
||||
<div
|
||||
role="group"
|
||||
data-slot="input-group-addon"
|
||||
data-align={align}
|
||||
className={cn(inputGroupAddonVariants({ align }), className)}
|
||||
onClick={(e) => {
|
||||
onClick?.(e);
|
||||
if (e.defaultPrevented) return;
|
||||
if ((e.target as HTMLElement).closest("button")) {
|
||||
return;
|
||||
}
|
||||
e.currentTarget.parentElement?.querySelector("textarea")?.focus();
|
||||
}}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const inputGroupButtonVariants = cva(
|
||||
"text-sm shadow-none flex min-w-0 gap-2 items-center",
|
||||
{
|
||||
variants: {
|
||||
size: {
|
||||
xs: "h-6 gap-1 px-2 rounded-md has-[>svg]:px-2",
|
||||
sm: "h-8 px-2.5 gap-1.5 rounded-md has-[>svg]:px-2.5",
|
||||
"icon-xs": "size-6 rounded-md p-0 has-[>svg]:p-0",
|
||||
"icon-sm": "size-8 rounded-md p-0 has-[>svg]:p-0",
|
||||
},
|
||||
},
|
||||
defaultVariants: {
|
||||
size: "xs",
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
function InputGroupButton({
|
||||
className,
|
||||
type = "button",
|
||||
variant = "ghost",
|
||||
size = "xs",
|
||||
...props
|
||||
}: Omit<React.ComponentProps<typeof Button>, "size"> &
|
||||
VariantProps<typeof inputGroupButtonVariants>) {
|
||||
return (
|
||||
<Button
|
||||
type={type}
|
||||
data-size={size}
|
||||
variant={variant}
|
||||
className={cn(inputGroupButtonVariants({ size }), className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const InputGroupTextarea = React.forwardRef<
|
||||
HTMLTextAreaElement,
|
||||
React.ComponentProps<"textarea">
|
||||
>(({ className, ...props }, ref) => {
|
||||
return (
|
||||
<Textarea
|
||||
ref={ref}
|
||||
data-slot="input-group-control"
|
||||
className={cn(
|
||||
"flex-1 resize-none rounded-none border-0 bg-transparent py-3 shadow-none focus-visible:ring-0",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
});
|
||||
InputGroupTextarea.displayName = "InputGroupTextarea";
|
||||
|
||||
export { InputGroup, InputGroupAddon, InputGroupButton, InputGroupTextarea };
|
||||
16
autogpt_platform/frontend/src/components/ui/spinner.tsx
Normal file
16
autogpt_platform/frontend/src/components/ui/spinner.tsx
Normal file
@@ -0,0 +1,16 @@
|
||||
import { CircleNotch as CircleNotchIcon } from "@phosphor-icons/react";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
function Spinner({ className, ...props }: React.ComponentProps<"svg">) {
|
||||
return (
|
||||
<CircleNotchIcon
|
||||
role="status"
|
||||
aria-label="Loading"
|
||||
className={cn("size-4 animate-spin", className)}
|
||||
{...(props as Record<string, unknown>)}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export { Spinner };
|
||||
22
autogpt_platform/frontend/src/components/ui/textarea.tsx
Normal file
22
autogpt_platform/frontend/src/components/ui/textarea.tsx
Normal file
@@ -0,0 +1,22 @@
|
||||
import * as React from "react";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
const Textarea = React.forwardRef<
|
||||
HTMLTextAreaElement,
|
||||
React.ComponentProps<"textarea">
|
||||
>(({ className, ...props }, ref) => {
|
||||
return (
|
||||
<textarea
|
||||
className={cn(
|
||||
"flex min-h-[60px] w-full rounded-md border border-neutral-200 bg-transparent px-3 py-2 text-base shadow-sm placeholder:text-neutral-500 focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-neutral-950 disabled:cursor-not-allowed disabled:opacity-50 dark:border-neutral-800 dark:placeholder:text-neutral-400 dark:focus-visible:ring-neutral-300 md:text-sm",
|
||||
className,
|
||||
)}
|
||||
ref={ref}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
});
|
||||
Textarea.displayName = "Textarea";
|
||||
|
||||
export { Textarea };
|
||||
@@ -41,7 +41,7 @@ export default function useCredits({
|
||||
|
||||
const fetchCredits = useCallback(async () => {
|
||||
const response = await api.getUserCredit();
|
||||
setCredits(response.credits);
|
||||
setCredits(response.credits ?? null);
|
||||
}, [api]);
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
@@ -72,6 +72,18 @@ import type {
|
||||
|
||||
const isClient = environment.isClientSide();
|
||||
|
||||
/**
|
||||
* Thrown when a request fails because the user is logging out.
|
||||
* Callers can catch this specifically to silently ignore logout-related failures,
|
||||
* rather than receiving null and crashing on property access.
|
||||
*/
|
||||
export class LogoutInterruptError extends Error {
|
||||
constructor(message: string) {
|
||||
super(message);
|
||||
this.name = "LogoutInterruptError";
|
||||
}
|
||||
}
|
||||
|
||||
export default class BackendAPI {
|
||||
private baseUrl: string;
|
||||
private wsUrl: string;
|
||||
@@ -128,11 +140,15 @@ export default class BackendAPI {
|
||||
/////////////// CREDITS ////////////////
|
||||
////////////////////////////////////////
|
||||
|
||||
getUserCredit(): Promise<{ credits: number }> {
|
||||
async getUserCredit(): Promise<{ credits: number }> {
|
||||
try {
|
||||
return this._get("/credits");
|
||||
} catch {
|
||||
return Promise.resolve({ credits: 0 });
|
||||
const response = await this._get("/credits");
|
||||
return response ?? { credits: 0 };
|
||||
} catch (error) {
|
||||
if (!(error instanceof LogoutInterruptError)) {
|
||||
Sentry.captureException(error);
|
||||
}
|
||||
return { credits: 0 };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -433,13 +449,14 @@ export default class BackendAPI {
|
||||
///////////// V2 STORE API /////////////
|
||||
////////////////////////////////////////
|
||||
|
||||
getStoreProfile(): Promise<ProfileDetails | null> {
|
||||
async getStoreProfile(): Promise<ProfileDetails | null> {
|
||||
try {
|
||||
const result = this._get("/store/profile");
|
||||
return result;
|
||||
return await this._get("/store/profile");
|
||||
} catch (error) {
|
||||
console.error("Error fetching store profile:", error);
|
||||
return Promise.resolve(null);
|
||||
if (!(error instanceof LogoutInterruptError)) {
|
||||
Sentry.captureException(error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1040,7 +1057,7 @@ export default class BackendAPI {
|
||||
"Authentication request failed during logout, ignoring:",
|
||||
error.message,
|
||||
);
|
||||
return null;
|
||||
throw new LogoutInterruptError("Request cancelled: logout in progress");
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
/**
|
||||
* Unit tests for helpers.ts
|
||||
*
|
||||
* These tests validate the error handling in handleFetchError, specifically
|
||||
* the fix for the issue where calling response.json() on non-JSON responses
|
||||
* would throw: "Failed to execute 'json' on 'Response': Unexpected token 'A',
|
||||
* "A server e"... is not valid JSON"
|
||||
*
|
||||
* To run these tests, you'll need to set up a unit test framework like Jest or Vitest.
|
||||
*
|
||||
* Test cases to cover:
|
||||
*
|
||||
* 1. JSON error responses should be parsed correctly
|
||||
* - Given: Response with content-type: application/json
|
||||
* - When: handleFetchError is called
|
||||
* - Then: Should parse JSON and return ApiError with parsed response
|
||||
*
|
||||
* 2. Non-JSON error responses (e.g., HTML) should be handled gracefully
|
||||
* - Given: Response with content-type: text/html
|
||||
* - When: handleFetchError is called
|
||||
* - Then: Should read as text and return ApiError with text response
|
||||
*
|
||||
* 3. Response without content-type header should be handled
|
||||
* - Given: Response without content-type header
|
||||
* - When: handleFetchError is called
|
||||
* - Then: Should default to reading as text
|
||||
*
|
||||
* 4. JSON parsing errors should not throw
|
||||
* - Given: Response with content-type: application/json but HTML body
|
||||
* - When: handleFetchError is called and json() throws
|
||||
* - Then: Should catch error, log warning, and return ApiError with null response
|
||||
*
|
||||
* 5. Specific validation for the fixed bug
|
||||
* - Given: 502 Bad Gateway with content-type: application/json but HTML body
|
||||
* - When: response.json() throws "Unexpected token 'A'" error
|
||||
* - Then: Should NOT propagate the error, should return ApiError with null response
|
||||
*/
|
||||
|
||||
import { handleFetchError } from "./helpers";
|
||||
|
||||
// Manual test function - can be run in browser console or Node
|
||||
export async function testHandleFetchError() {
|
||||
console.log("Testing handleFetchError...");
|
||||
|
||||
// Test 1: JSON response
|
||||
const jsonResponse = new Response(
|
||||
JSON.stringify({ error: "Internal server error" }),
|
||||
{
|
||||
status: 500,
|
||||
headers: { "content-type": "application/json" },
|
||||
},
|
||||
);
|
||||
const error1 = await handleFetchError(jsonResponse);
|
||||
console.assert(
|
||||
error1.status === 500 && error1.response?.error === "Internal server error",
|
||||
"Test 1 failed: JSON response",
|
||||
);
|
||||
|
||||
// Test 2: HTML response
|
||||
const htmlResponse = new Response("<html><body>Server Error</body></html>", {
|
||||
status: 502,
|
||||
headers: { "content-type": "text/html" },
|
||||
});
|
||||
const error2 = await handleFetchError(htmlResponse);
|
||||
console.assert(
|
||||
error2.status === 502 &&
|
||||
typeof error2.response === "string" &&
|
||||
error2.response.includes("Server Error"),
|
||||
"Test 2 failed: HTML response",
|
||||
);
|
||||
|
||||
// Test 3: Mismatched content-type (claims JSON but is HTML)
|
||||
// This simulates the bug that was fixed
|
||||
const mismatchedResponse = new Response(
|
||||
"<html><body>A server error occurred</body></html>",
|
||||
{
|
||||
status: 502,
|
||||
headers: { "content-type": "application/json" }, // Claims JSON but isn't
|
||||
},
|
||||
);
|
||||
try {
|
||||
const error3 = await handleFetchError(mismatchedResponse);
|
||||
console.assert(
|
||||
error3.status === 502 && error3.response === null,
|
||||
"Test 3 failed: Mismatched content-type should return null response",
|
||||
);
|
||||
console.log("✓ All tests passed!");
|
||||
} catch (e) {
|
||||
console.error("✗ Test 3 failed: Should not throw error", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Uncomment to run manual tests
|
||||
// testHandleFetchError();
|
||||
@@ -5,7 +5,7 @@ import { colors } from "./src/components/styles/colors";
|
||||
|
||||
const config = {
|
||||
darkMode: ["class", ".dark-mode"], // ignore dark: prefix classes for now until we fully support dark mode
|
||||
content: ["./src/**/*.{ts,tsx}"],
|
||||
content: ["./src/**/*.{ts,tsx}", "./node_modules/streamdown/dist/**/*.js"],
|
||||
prefix: "",
|
||||
theme: {
|
||||
container: {
|
||||
|
||||
@@ -6,7 +6,7 @@ export default defineConfig({
|
||||
plugins: [tsconfigPaths(), react()],
|
||||
test: {
|
||||
environment: "happy-dom",
|
||||
include: ["src/**/*.test.tsx"],
|
||||
include: ["src/**/*.test.tsx", "src/**/*.test.ts"],
|
||||
setupFiles: ["./src/tests/integrations/vitest.setup.tsx"],
|
||||
},
|
||||
});
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
# Implementation Plan: SECRT-1950 - Apply E2E CI Optimizations to Claude Code Workflows
|
||||
|
||||
## Ticket
|
||||
[SECRT-1950](https://linear.app/autogpt/issue/SECRT-1950)
|
||||
|
||||
## Summary
|
||||
Apply Pwuts's CI performance optimizations from PR #12090 to Claude Code workflows.
|
||||
|
||||
## Reference PR
|
||||
https://github.com/Significant-Gravitas/AutoGPT/pull/12090
|
||||
|
||||
---
|
||||
|
||||
## Analysis
|
||||
|
||||
### Current State (claude.yml)
|
||||
|
||||
**pnpm caching (lines 104-118):**
|
||||
```yaml
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set pnpm store directory
|
||||
run: |
|
||||
pnpm config set store-dir ~/.pnpm-store
|
||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache frontend dependencies
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
```
|
||||
|
||||
**Docker setup (lines 134-165):**
|
||||
- Uses `docker-buildx-action@v3`
|
||||
- Has manual Docker image caching via `actions/cache`
|
||||
- Runs `docker compose up` without buildx bake optimization
|
||||
|
||||
### Pwuts's Optimizations (PR #12090)
|
||||
|
||||
1. **Simplified pnpm caching** - Use `setup-node` built-in cache:
|
||||
```yaml
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
```
|
||||
|
||||
2. **Docker build caching via buildx bake**:
|
||||
```yaml
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Expose GHA cache to docker buildx CLI
|
||||
uses: crazy-max/ghaction-github-runtime@v3
|
||||
|
||||
- name: Build Docker images (with cache)
|
||||
run: |
|
||||
pip install pyyaml
|
||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||
--source docker-compose.resolved.yml \
|
||||
--cache-from "type=gha" \
|
||||
--cache-to "type=gha,mode=max" \
|
||||
...
|
||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Proposed Changes
|
||||
|
||||
### 1. Update pnpm caching in `claude.yml`
|
||||
|
||||
**Before:**
|
||||
- Manual cache key generation
|
||||
- Separate `actions/cache` step
|
||||
- Manual pnpm store directory config
|
||||
|
||||
**After:**
|
||||
- Use `setup-node` built-in `cache: "pnpm"` option
|
||||
- Remove manual cache step
|
||||
- Keep `corepack enable` before `setup-node`
|
||||
|
||||
### 2. Update Docker build in `claude.yml`
|
||||
|
||||
**Before:**
|
||||
- Manual Docker layer caching via `actions/cache` with `/tmp/.buildx-cache`
|
||||
- Simple `docker compose build`
|
||||
|
||||
**After:**
|
||||
- Use `crazy-max/ghaction-github-runtime@v3` to expose GHA cache
|
||||
- Use `docker-ci-fix-compose-build-cache.py` script
|
||||
- Build with `docker buildx bake`
|
||||
|
||||
### 3. Apply same changes to other Claude workflows
|
||||
|
||||
- `claude-dependabot.yml` - Check if it has similar patterns
|
||||
- `claude-ci-failure-auto-fix.yml` - Check if it has similar patterns
|
||||
- `copilot-setup-steps.yml` - Reusable workflow, may be the source of truth
|
||||
|
||||
---
|
||||
|
||||
## Files to Modify
|
||||
|
||||
1. `.github/workflows/claude.yml`
|
||||
2. `.github/workflows/claude-dependabot.yml` (if applicable)
|
||||
3. `.github/workflows/claude-ci-failure-auto-fix.yml` (if applicable)
|
||||
|
||||
## Dependencies
|
||||
|
||||
- PR #12090 must be merged first (provides the `docker-ci-fix-compose-build-cache.py` script)
|
||||
- Backend Dockerfile optimizations (already in PR #12090)
|
||||
|
||||
---
|
||||
|
||||
## Test Plan
|
||||
|
||||
1. Create PR with changes
|
||||
2. Trigger Claude workflow manually or via `@claude` mention on a test issue
|
||||
3. Compare CI runtime before/after
|
||||
4. Verify Claude agent still works correctly (can checkout, build, run tests)
|
||||
|
||||
---
|
||||
|
||||
## Risk Assessment
|
||||
|
||||
**Low risk:**
|
||||
- These are CI infrastructure changes, not code changes
|
||||
- If caching fails, builds fall back to uncached (slower but works)
|
||||
- Changes mirror proven patterns from PR #12090
|
||||
|
||||
---
|
||||
|
||||
## Questions for Reviewer
|
||||
|
||||
1. Should we wait for PR #12090 to merge before creating this PR?
|
||||
2. Does `copilot-setup-steps.yml` need updating, or is it a separate concern?
|
||||
3. Any concerns about cache key collisions between frontend E2E and Claude workflows?
|
||||
|
||||
---
|
||||
|
||||
## Verified
|
||||
|
||||
- ✅ **`claude-dependabot.yml`**: Has same pnpm caching pattern as `claude.yml` (manual `actions/cache`) — NEEDS UPDATE
|
||||
- ✅ **`claude-ci-failure-auto-fix.yml`**: Simple workflow with no pnpm or Docker caching — NO CHANGES NEEDED
|
||||
- ✅ **Script path**: `docker-ci-fix-compose-build-cache.py` will be at `.github/workflows/scripts/` after PR #12090 merges
|
||||
- ✅ **Test seed caching**: NOT APPLICABLE — Claude workflows spin up a dev environment but don't run E2E tests with pre-seeded data. The seed caching in PR #12090 is specific to the frontend E2E test suite which needs consistent test data. Claude just needs the services running.
|
||||
Reference in New Issue
Block a user