mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-14 00:35:02 -05:00
Compare commits
5 Commits
feat/copit
...
feat/opena
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
889b4e4152 | ||
|
|
e8c50b96d1 | ||
|
|
30e854569a | ||
|
|
301d7cbada | ||
|
|
d95aef7665 |
@@ -0,0 +1,154 @@
|
|||||||
|
"""Dummy Agent Generator for testing.
|
||||||
|
|
||||||
|
Returns mock responses matching the format expected from the external service.
|
||||||
|
Enable via AGENTGENERATOR_USE_DUMMY=true in settings.
|
||||||
|
|
||||||
|
WARNING: This is for testing only. Do not use in production.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Dummy decomposition result (instructions type)
|
||||||
|
DUMMY_DECOMPOSITION_RESULT: dict[str, Any] = {
|
||||||
|
"type": "instructions",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"description": "Get input from user",
|
||||||
|
"action": "input",
|
||||||
|
"block_name": "AgentInputBlock",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Process the input",
|
||||||
|
"action": "process",
|
||||||
|
"block_name": "TextFormatterBlock",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Return output to user",
|
||||||
|
"action": "output",
|
||||||
|
"block_name": "AgentOutputBlock",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
# Block IDs from backend/blocks/io.py
|
||||||
|
AGENT_INPUT_BLOCK_ID = "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b"
|
||||||
|
AGENT_OUTPUT_BLOCK_ID = "363ae599-353e-4804-937e-b2ee3cef3da4"
|
||||||
|
|
||||||
|
|
||||||
|
def _generate_dummy_agent_json() -> dict[str, Any]:
|
||||||
|
"""Generate a minimal valid agent JSON for testing."""
|
||||||
|
input_node_id = str(uuid.uuid4())
|
||||||
|
output_node_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": str(uuid.uuid4()),
|
||||||
|
"version": 1,
|
||||||
|
"is_active": True,
|
||||||
|
"name": "Dummy Test Agent",
|
||||||
|
"description": "A dummy agent generated for testing purposes",
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": input_node_id,
|
||||||
|
"block_id": AGENT_INPUT_BLOCK_ID,
|
||||||
|
"input_default": {
|
||||||
|
"name": "input",
|
||||||
|
"title": "Input",
|
||||||
|
"description": "Enter your input",
|
||||||
|
"placeholder_values": [],
|
||||||
|
},
|
||||||
|
"metadata": {"position": {"x": 0, "y": 0}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": output_node_id,
|
||||||
|
"block_id": AGENT_OUTPUT_BLOCK_ID,
|
||||||
|
"input_default": {
|
||||||
|
"name": "output",
|
||||||
|
"title": "Output",
|
||||||
|
"description": "Agent output",
|
||||||
|
"format": "{output}",
|
||||||
|
},
|
||||||
|
"metadata": {"position": {"x": 400, "y": 0}},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"id": str(uuid.uuid4()),
|
||||||
|
"source_id": input_node_id,
|
||||||
|
"sink_id": output_node_id,
|
||||||
|
"source_name": "result",
|
||||||
|
"sink_name": "value",
|
||||||
|
"is_static": False,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
async def decompose_goal_dummy(
|
||||||
|
description: str,
|
||||||
|
context: str = "",
|
||||||
|
library_agents: list[dict[str, Any]] | None = None,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Return dummy decomposition result."""
|
||||||
|
logger.info("Using dummy agent generator for decompose_goal")
|
||||||
|
return DUMMY_DECOMPOSITION_RESULT.copy()
|
||||||
|
|
||||||
|
|
||||||
|
async def generate_agent_dummy(
|
||||||
|
instructions: dict[str, Any],
|
||||||
|
library_agents: list[dict[str, Any]] | None = None,
|
||||||
|
operation_id: str | None = None,
|
||||||
|
task_id: str | None = None,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Return dummy agent JSON after a simulated delay."""
|
||||||
|
logger.info("Using dummy agent generator for generate_agent (30s delay)")
|
||||||
|
await asyncio.sleep(30)
|
||||||
|
return _generate_dummy_agent_json()
|
||||||
|
|
||||||
|
|
||||||
|
async def generate_agent_patch_dummy(
|
||||||
|
update_request: str,
|
||||||
|
current_agent: dict[str, Any],
|
||||||
|
library_agents: list[dict[str, Any]] | None = None,
|
||||||
|
operation_id: str | None = None,
|
||||||
|
task_id: str | None = None,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Return dummy patched agent (returns the current agent with updated description)."""
|
||||||
|
logger.info("Using dummy agent generator for generate_agent_patch")
|
||||||
|
patched = current_agent.copy()
|
||||||
|
patched["description"] = (
|
||||||
|
f"{current_agent.get('description', '')} (updated: {update_request})"
|
||||||
|
)
|
||||||
|
return patched
|
||||||
|
|
||||||
|
|
||||||
|
async def customize_template_dummy(
|
||||||
|
template_agent: dict[str, Any],
|
||||||
|
modification_request: str,
|
||||||
|
context: str = "",
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Return dummy customized template (returns template with updated description)."""
|
||||||
|
logger.info("Using dummy agent generator for customize_template")
|
||||||
|
customized = template_agent.copy()
|
||||||
|
customized["description"] = (
|
||||||
|
f"{template_agent.get('description', '')} (customized: {modification_request})"
|
||||||
|
)
|
||||||
|
return customized
|
||||||
|
|
||||||
|
|
||||||
|
async def get_blocks_dummy() -> list[dict[str, Any]]:
|
||||||
|
"""Return dummy blocks list."""
|
||||||
|
logger.info("Using dummy agent generator for get_blocks")
|
||||||
|
return [
|
||||||
|
{"id": AGENT_INPUT_BLOCK_ID, "name": "AgentInputBlock"},
|
||||||
|
{"id": AGENT_OUTPUT_BLOCK_ID, "name": "AgentOutputBlock"},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
async def health_check_dummy() -> bool:
|
||||||
|
"""Always returns healthy for dummy service."""
|
||||||
|
return True
|
||||||
@@ -12,8 +12,19 @@ import httpx
|
|||||||
|
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
from .dummy import (
|
||||||
|
customize_template_dummy,
|
||||||
|
decompose_goal_dummy,
|
||||||
|
generate_agent_dummy,
|
||||||
|
generate_agent_patch_dummy,
|
||||||
|
get_blocks_dummy,
|
||||||
|
health_check_dummy,
|
||||||
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_dummy_mode_warned = False
|
||||||
|
|
||||||
|
|
||||||
def _create_error_response(
|
def _create_error_response(
|
||||||
error_message: str,
|
error_message: str,
|
||||||
@@ -90,10 +101,26 @@ def _get_settings() -> Settings:
|
|||||||
return _settings
|
return _settings
|
||||||
|
|
||||||
|
|
||||||
def is_external_service_configured() -> bool:
|
def _is_dummy_mode() -> bool:
|
||||||
"""Check if external Agent Generator service is configured."""
|
"""Check if dummy mode is enabled for testing."""
|
||||||
|
global _dummy_mode_warned
|
||||||
settings = _get_settings()
|
settings = _get_settings()
|
||||||
return bool(settings.config.agentgenerator_host)
|
is_dummy = bool(settings.config.agentgenerator_use_dummy)
|
||||||
|
if is_dummy and not _dummy_mode_warned:
|
||||||
|
logger.warning(
|
||||||
|
"Agent Generator running in DUMMY MODE - returning mock responses. "
|
||||||
|
"Do not use in production!"
|
||||||
|
)
|
||||||
|
_dummy_mode_warned = True
|
||||||
|
return is_dummy
|
||||||
|
|
||||||
|
|
||||||
|
def is_external_service_configured() -> bool:
|
||||||
|
"""Check if external Agent Generator service is configured (or dummy mode)."""
|
||||||
|
settings = _get_settings()
|
||||||
|
return bool(settings.config.agentgenerator_host) or bool(
|
||||||
|
settings.config.agentgenerator_use_dummy
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _get_base_url() -> str:
|
def _get_base_url() -> str:
|
||||||
@@ -137,6 +164,9 @@ async def decompose_goal_external(
|
|||||||
- {"type": "error", "error": "...", "error_type": "..."} on error
|
- {"type": "error", "error": "...", "error_type": "..."} on error
|
||||||
Or None on unexpected error
|
Or None on unexpected error
|
||||||
"""
|
"""
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await decompose_goal_dummy(description, context, library_agents)
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
if context:
|
if context:
|
||||||
@@ -226,6 +256,11 @@ async def generate_agent_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error
|
Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error
|
||||||
"""
|
"""
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await generate_agent_dummy(
|
||||||
|
instructions, library_agents, operation_id, task_id
|
||||||
|
)
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
# Build request payload
|
# Build request payload
|
||||||
@@ -297,6 +332,11 @@ async def generate_agent_patch_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
|
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
|
||||||
"""
|
"""
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await generate_agent_patch_dummy(
|
||||||
|
update_request, current_agent, library_agents, operation_id, task_id
|
||||||
|
)
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
# Build request payload
|
# Build request payload
|
||||||
@@ -383,6 +423,11 @@ async def customize_template_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Customized agent JSON, clarifying questions dict, or error dict on error
|
Customized agent JSON, clarifying questions dict, or error dict on error
|
||||||
"""
|
"""
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await customize_template_dummy(
|
||||||
|
template_agent, modification_request, context
|
||||||
|
)
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
request = modification_request
|
request = modification_request
|
||||||
@@ -445,6 +490,9 @@ async def get_blocks_external() -> list[dict[str, Any]] | None:
|
|||||||
Returns:
|
Returns:
|
||||||
List of block info dicts or None on error
|
List of block info dicts or None on error
|
||||||
"""
|
"""
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await get_blocks_dummy()
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -478,6 +526,9 @@ async def health_check() -> bool:
|
|||||||
if not is_external_service_configured():
|
if not is_external_service_configured():
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await health_check_dummy()
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -32,6 +32,14 @@ from backend.data.model import (
|
|||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util import json
|
from backend.util import json
|
||||||
from backend.util.logging import TruncatedLogger
|
from backend.util.logging import TruncatedLogger
|
||||||
|
from backend.util.openai_responses import (
|
||||||
|
convert_tools_to_responses_format,
|
||||||
|
extract_responses_content,
|
||||||
|
extract_responses_reasoning,
|
||||||
|
extract_responses_tool_calls,
|
||||||
|
extract_usage,
|
||||||
|
requires_responses_api,
|
||||||
|
)
|
||||||
from backend.util.prompt import compress_context, estimate_token_count
|
from backend.util.prompt import compress_context, estimate_token_count
|
||||||
from backend.util.text import TextFormatter
|
from backend.util.text import TextFormatter
|
||||||
|
|
||||||
@@ -659,38 +667,72 @@ async def llm_call(
|
|||||||
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
||||||
|
|
||||||
if provider == "openai":
|
if provider == "openai":
|
||||||
tools_param = tools if tools else openai.NOT_GIVEN
|
|
||||||
oai_client = openai.AsyncOpenAI(api_key=credentials.api_key.get_secret_value())
|
oai_client = openai.AsyncOpenAI(api_key=credentials.api_key.get_secret_value())
|
||||||
response_format = None
|
|
||||||
|
|
||||||
parallel_tool_calls = get_parallel_tool_calls_param(
|
# Check if this model requires the Responses API (reasoning models: o1, o3, etc.)
|
||||||
llm_model, parallel_tool_calls
|
if requires_responses_api(llm_model.value):
|
||||||
)
|
# Use responses.create for reasoning models
|
||||||
|
tools_converted = (
|
||||||
|
convert_tools_to_responses_format(tools) if tools else None
|
||||||
|
)
|
||||||
|
|
||||||
if force_json_output:
|
response = await oai_client.responses.create(
|
||||||
response_format = {"type": "json_object"}
|
model=llm_model.value,
|
||||||
|
input=prompt, # type: ignore
|
||||||
|
tools=tools_converted, # type: ignore
|
||||||
|
max_output_tokens=max_tokens,
|
||||||
|
store=False, # Don't persist conversations
|
||||||
|
)
|
||||||
|
|
||||||
response = await oai_client.chat.completions.create(
|
tool_calls = extract_responses_tool_calls(response)
|
||||||
model=llm_model.value,
|
reasoning = extract_responses_reasoning(response)
|
||||||
messages=prompt, # type: ignore
|
content = extract_responses_content(response)
|
||||||
response_format=response_format, # type: ignore
|
prompt_tokens, completion_tokens = extract_usage(response, True)
|
||||||
max_completion_tokens=max_tokens,
|
|
||||||
tools=tools_param, # type: ignore
|
|
||||||
parallel_tool_calls=parallel_tool_calls,
|
|
||||||
)
|
|
||||||
|
|
||||||
tool_calls = extract_openai_tool_calls(response)
|
return LLMResponse(
|
||||||
reasoning = extract_openai_reasoning(response)
|
raw_response=response,
|
||||||
|
prompt=prompt,
|
||||||
|
response=content,
|
||||||
|
tool_calls=tool_calls,
|
||||||
|
prompt_tokens=prompt_tokens,
|
||||||
|
completion_tokens=completion_tokens,
|
||||||
|
reasoning=reasoning,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Use chat.completions.create for standard models
|
||||||
|
tools_param = tools if tools else openai.NOT_GIVEN
|
||||||
|
response_format = None
|
||||||
|
|
||||||
return LLMResponse(
|
parallel_tool_calls = get_parallel_tool_calls_param(
|
||||||
raw_response=response.choices[0].message,
|
llm_model, parallel_tool_calls
|
||||||
prompt=prompt,
|
)
|
||||||
response=response.choices[0].message.content or "",
|
|
||||||
tool_calls=tool_calls,
|
if force_json_output:
|
||||||
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
response_format = {"type": "json_object"}
|
||||||
completion_tokens=response.usage.completion_tokens if response.usage else 0,
|
|
||||||
reasoning=reasoning,
|
response = await oai_client.chat.completions.create(
|
||||||
)
|
model=llm_model.value,
|
||||||
|
messages=prompt, # type: ignore
|
||||||
|
response_format=response_format, # type: ignore
|
||||||
|
max_completion_tokens=max_tokens,
|
||||||
|
tools=tools_param, # type: ignore
|
||||||
|
parallel_tool_calls=parallel_tool_calls,
|
||||||
|
)
|
||||||
|
|
||||||
|
tool_calls = extract_openai_tool_calls(response)
|
||||||
|
reasoning = extract_openai_reasoning(response)
|
||||||
|
|
||||||
|
return LLMResponse(
|
||||||
|
raw_response=response.choices[0].message,
|
||||||
|
prompt=prompt,
|
||||||
|
response=response.choices[0].message.content or "",
|
||||||
|
tool_calls=tool_calls,
|
||||||
|
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
||||||
|
completion_tokens=(
|
||||||
|
response.usage.completion_tokens if response.usage else 0
|
||||||
|
),
|
||||||
|
reasoning=reasoning,
|
||||||
|
)
|
||||||
elif provider == "anthropic":
|
elif provider == "anthropic":
|
||||||
|
|
||||||
an_tools = convert_openai_tool_fmt_to_anthropic(tools)
|
an_tools = convert_openai_tool_fmt_to_anthropic(tools)
|
||||||
|
|||||||
185
autogpt_platform/backend/backend/util/openai_responses.py
Normal file
185
autogpt_platform/backend/backend/util/openai_responses.py
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
"""Helpers for OpenAI Responses API migration.
|
||||||
|
|
||||||
|
This module provides utilities for conditionally using OpenAI's Responses API
|
||||||
|
instead of Chat Completions for reasoning models (o1, o3, etc.) that require it.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
# Exact model identifiers that require the Responses API.
|
||||||
|
# Use exact matching to avoid false positives on future models.
|
||||||
|
# NOTE: Update this set when OpenAI releases new reasoning models.
|
||||||
|
REASONING_MODELS = frozenset(
|
||||||
|
{
|
||||||
|
# O1 family
|
||||||
|
"o1",
|
||||||
|
"o1-mini",
|
||||||
|
"o1-preview",
|
||||||
|
"o1-2024-12-17",
|
||||||
|
# O3 family
|
||||||
|
"o3",
|
||||||
|
"o3-mini",
|
||||||
|
"o3-2025-04-16",
|
||||||
|
"o3-mini-2025-01-31",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def requires_responses_api(model: str) -> bool:
|
||||||
|
"""Check if model requires the Responses API (exact match).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: The model identifier string (e.g., "o3-mini", "gpt-4o")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the model requires responses.create, False otherwise
|
||||||
|
"""
|
||||||
|
return model in REASONING_MODELS
|
||||||
|
|
||||||
|
|
||||||
|
def convert_tools_to_responses_format(tools: list[dict] | None) -> list[dict]:
|
||||||
|
"""Convert Chat Completions tool format to Responses API format.
|
||||||
|
|
||||||
|
The Responses API uses internally-tagged polymorphism (flatter structure)
|
||||||
|
and functions are strict by default.
|
||||||
|
|
||||||
|
Chat Completions format:
|
||||||
|
{"type": "function", "function": {"name": "...", "parameters": {...}}}
|
||||||
|
|
||||||
|
Responses API format:
|
||||||
|
{"type": "function", "name": "...", "parameters": {...}}
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tools: List of tools in Chat Completions format
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of tools in Responses API format
|
||||||
|
"""
|
||||||
|
if not tools:
|
||||||
|
return []
|
||||||
|
|
||||||
|
converted = []
|
||||||
|
for tool in tools:
|
||||||
|
if tool.get("type") == "function":
|
||||||
|
func = tool.get("function", {})
|
||||||
|
converted.append(
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"name": func.get("name"),
|
||||||
|
"description": func.get("description"),
|
||||||
|
"parameters": func.get("parameters"),
|
||||||
|
# Note: strict=True is default in Responses API
|
||||||
|
}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Pass through non-function tools as-is
|
||||||
|
converted.append(tool)
|
||||||
|
return converted
|
||||||
|
|
||||||
|
|
||||||
|
def extract_responses_tool_calls(response: Any) -> list[dict] | None:
|
||||||
|
"""Extract tool calls from Responses API response.
|
||||||
|
|
||||||
|
The Responses API returns tool calls as separate items in the output array
|
||||||
|
with type="function_call".
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response: The Responses API response object
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of tool calls in a normalized format, or None if no tool calls
|
||||||
|
"""
|
||||||
|
tool_calls = []
|
||||||
|
for item in response.output:
|
||||||
|
if getattr(item, "type", None) == "function_call":
|
||||||
|
tool_calls.append(
|
||||||
|
{
|
||||||
|
"id": item.call_id,
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": item.name,
|
||||||
|
"arguments": item.arguments,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return tool_calls if tool_calls else None
|
||||||
|
|
||||||
|
|
||||||
|
def extract_usage(response: Any, is_responses_api: bool) -> tuple[int, int]:
|
||||||
|
"""Extract token usage from either API response.
|
||||||
|
|
||||||
|
The Responses API uses different field names for token counts:
|
||||||
|
- Chat Completions: prompt_tokens, completion_tokens
|
||||||
|
- Responses API: input_tokens, output_tokens
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response: The API response object
|
||||||
|
is_responses_api: True if response is from Responses API
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (prompt_tokens, completion_tokens)
|
||||||
|
"""
|
||||||
|
if not response.usage:
|
||||||
|
return 0, 0
|
||||||
|
|
||||||
|
if is_responses_api:
|
||||||
|
# Responses API uses different field names
|
||||||
|
return (
|
||||||
|
getattr(response.usage, "input_tokens", 0),
|
||||||
|
getattr(response.usage, "output_tokens", 0),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Chat Completions API
|
||||||
|
return (
|
||||||
|
getattr(response.usage, "prompt_tokens", 0),
|
||||||
|
getattr(response.usage, "completion_tokens", 0),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_responses_content(response: Any) -> str:
|
||||||
|
"""Extract text content from Responses API response.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response: The Responses API response object
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The text content from the response, or empty string if none
|
||||||
|
"""
|
||||||
|
# The SDK provides a helper property
|
||||||
|
if hasattr(response, "output_text"):
|
||||||
|
return response.output_text or ""
|
||||||
|
|
||||||
|
# Fallback: manually extract from output items
|
||||||
|
for item in response.output:
|
||||||
|
if getattr(item, "type", None) == "message":
|
||||||
|
for content in getattr(item, "content", []):
|
||||||
|
if getattr(content, "type", None) == "output_text":
|
||||||
|
return getattr(content, "text", "")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
|
def extract_responses_reasoning(response: Any) -> str | None:
|
||||||
|
"""Extract reasoning content from Responses API response.
|
||||||
|
|
||||||
|
Reasoning models return their reasoning process in the response,
|
||||||
|
which can be useful for debugging or display.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response: The Responses API response object
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The reasoning text, or None if not present
|
||||||
|
"""
|
||||||
|
for item in response.output:
|
||||||
|
if getattr(item, "type", None) == "reasoning":
|
||||||
|
# Reasoning items may have summary or content
|
||||||
|
summary = getattr(item, "summary", [])
|
||||||
|
if summary:
|
||||||
|
# Join summary items if present
|
||||||
|
texts = []
|
||||||
|
for s in summary:
|
||||||
|
if hasattr(s, "text"):
|
||||||
|
texts.append(s.text)
|
||||||
|
if texts:
|
||||||
|
return "\n".join(texts)
|
||||||
|
return None
|
||||||
155
autogpt_platform/backend/backend/util/openai_responses_test.py
Normal file
155
autogpt_platform/backend/backend/util/openai_responses_test.py
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
"""Tests for OpenAI Responses API helpers."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from backend.util.openai_responses import (
|
||||||
|
REASONING_MODELS,
|
||||||
|
convert_tools_to_responses_format,
|
||||||
|
requires_responses_api,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestRequiresResponsesApi:
|
||||||
|
"""Tests for the requires_responses_api function."""
|
||||||
|
|
||||||
|
def test_o1_models_require_responses_api(self):
|
||||||
|
"""O1 family models should require the Responses API."""
|
||||||
|
assert requires_responses_api("o1") is True
|
||||||
|
assert requires_responses_api("o1-mini") is True
|
||||||
|
assert requires_responses_api("o1-preview") is True
|
||||||
|
assert requires_responses_api("o1-2024-12-17") is True
|
||||||
|
|
||||||
|
def test_o3_models_require_responses_api(self):
|
||||||
|
"""O3 family models should require the Responses API."""
|
||||||
|
assert requires_responses_api("o3") is True
|
||||||
|
assert requires_responses_api("o3-mini") is True
|
||||||
|
assert requires_responses_api("o3-2025-04-16") is True
|
||||||
|
assert requires_responses_api("o3-mini-2025-01-31") is True
|
||||||
|
|
||||||
|
def test_gpt_models_do_not_require_responses_api(self):
|
||||||
|
"""GPT models should NOT require the Responses API."""
|
||||||
|
assert requires_responses_api("gpt-4o") is False
|
||||||
|
assert requires_responses_api("gpt-4o-mini") is False
|
||||||
|
assert requires_responses_api("gpt-4-turbo") is False
|
||||||
|
assert requires_responses_api("gpt-3.5-turbo") is False
|
||||||
|
assert requires_responses_api("gpt-5") is False
|
||||||
|
assert requires_responses_api("gpt-5-mini") is False
|
||||||
|
|
||||||
|
def test_other_models_do_not_require_responses_api(self):
|
||||||
|
"""Other provider models should NOT require the Responses API."""
|
||||||
|
assert requires_responses_api("claude-3-opus") is False
|
||||||
|
assert requires_responses_api("llama-3.3-70b") is False
|
||||||
|
assert requires_responses_api("gemini-pro") is False
|
||||||
|
|
||||||
|
def test_empty_string_does_not_require_responses_api(self):
|
||||||
|
"""Empty string should not require the Responses API."""
|
||||||
|
assert requires_responses_api("") is False
|
||||||
|
|
||||||
|
def test_exact_matching_no_false_positives(self):
|
||||||
|
"""Should not match models that just start with 'o1' or 'o3'."""
|
||||||
|
# These are hypothetical models that start with o1/o3 but aren't
|
||||||
|
# actually reasoning models
|
||||||
|
assert requires_responses_api("o1-turbo-hypothetical") is False
|
||||||
|
assert requires_responses_api("o3-fast-hypothetical") is False
|
||||||
|
assert requires_responses_api("o100") is False
|
||||||
|
|
||||||
|
|
||||||
|
class TestConvertToolsToResponsesFormat:
|
||||||
|
"""Tests for the convert_tools_to_responses_format function."""
|
||||||
|
|
||||||
|
def test_empty_tools_returns_empty_list(self):
|
||||||
|
"""Empty or None tools should return empty list."""
|
||||||
|
assert convert_tools_to_responses_format(None) == []
|
||||||
|
assert convert_tools_to_responses_format([]) == []
|
||||||
|
|
||||||
|
def test_converts_function_tool_format(self):
|
||||||
|
"""Should convert Chat Completions function format to Responses format."""
|
||||||
|
chat_completions_tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_weather",
|
||||||
|
"description": "Get the weather in a location",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"location": {"type": "string"},
|
||||||
|
},
|
||||||
|
"required": ["location"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
result = convert_tools_to_responses_format(chat_completions_tools)
|
||||||
|
|
||||||
|
assert len(result) == 1
|
||||||
|
assert result[0]["type"] == "function"
|
||||||
|
assert result[0]["name"] == "get_weather"
|
||||||
|
assert result[0]["description"] == "Get the weather in a location"
|
||||||
|
assert result[0]["parameters"] == {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"location": {"type": "string"},
|
||||||
|
},
|
||||||
|
"required": ["location"],
|
||||||
|
}
|
||||||
|
# Should not have nested "function" key
|
||||||
|
assert "function" not in result[0]
|
||||||
|
|
||||||
|
def test_handles_multiple_tools(self):
|
||||||
|
"""Should handle multiple tools."""
|
||||||
|
chat_completions_tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "tool_1",
|
||||||
|
"description": "First tool",
|
||||||
|
"parameters": {"type": "object", "properties": {}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "tool_2",
|
||||||
|
"description": "Second tool",
|
||||||
|
"parameters": {"type": "object", "properties": {}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
result = convert_tools_to_responses_format(chat_completions_tools)
|
||||||
|
|
||||||
|
assert len(result) == 2
|
||||||
|
assert result[0]["name"] == "tool_1"
|
||||||
|
assert result[1]["name"] == "tool_2"
|
||||||
|
|
||||||
|
def test_passes_through_non_function_tools(self):
|
||||||
|
"""Non-function tools should be passed through as-is."""
|
||||||
|
tools = [{"type": "web_search", "config": {"enabled": True}}]
|
||||||
|
|
||||||
|
result = convert_tools_to_responses_format(tools)
|
||||||
|
|
||||||
|
assert result == tools
|
||||||
|
|
||||||
|
|
||||||
|
class TestReasoningModelsSet:
|
||||||
|
"""Tests for the REASONING_MODELS constant."""
|
||||||
|
|
||||||
|
def test_reasoning_models_is_frozenset(self):
|
||||||
|
"""REASONING_MODELS should be a frozenset (immutable)."""
|
||||||
|
assert isinstance(REASONING_MODELS, frozenset)
|
||||||
|
|
||||||
|
def test_contains_expected_models(self):
|
||||||
|
"""Should contain all expected reasoning models."""
|
||||||
|
expected = {
|
||||||
|
"o1",
|
||||||
|
"o1-mini",
|
||||||
|
"o1-preview",
|
||||||
|
"o1-2024-12-17",
|
||||||
|
"o3",
|
||||||
|
"o3-mini",
|
||||||
|
"o3-2025-04-16",
|
||||||
|
"o3-mini-2025-01-31",
|
||||||
|
}
|
||||||
|
assert expected.issubset(REASONING_MODELS)
|
||||||
@@ -368,6 +368,10 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
|||||||
default=600,
|
default=600,
|
||||||
description="The timeout in seconds for Agent Generator service requests (includes retries for rate limits)",
|
description="The timeout in seconds for Agent Generator service requests (includes retries for rate limits)",
|
||||||
)
|
)
|
||||||
|
agentgenerator_use_dummy: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Use dummy agent generator responses for testing (bypasses external service)",
|
||||||
|
)
|
||||||
|
|
||||||
enable_example_blocks: bool = Field(
|
enable_example_blocks: bool = Field(
|
||||||
default=False,
|
default=False,
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ class TestServiceConfiguration:
|
|||||||
"""Test that external service is not configured when host is empty."""
|
"""Test that external service is not configured when host is empty."""
|
||||||
mock_settings = MagicMock()
|
mock_settings = MagicMock()
|
||||||
mock_settings.config.agentgenerator_host = ""
|
mock_settings.config.agentgenerator_host = ""
|
||||||
|
mock_settings.config.agentgenerator_use_dummy = False
|
||||||
|
|
||||||
with patch.object(service, "_get_settings", return_value=mock_settings):
|
with patch.object(service, "_get_settings", return_value=mock_settings):
|
||||||
assert service.is_external_service_configured() is False
|
assert service.is_external_service_configured() is False
|
||||||
|
|||||||
@@ -22,6 +22,11 @@ Sentry.init({
|
|||||||
|
|
||||||
enabled: shouldEnable,
|
enabled: shouldEnable,
|
||||||
|
|
||||||
|
// Suppress cross-origin stylesheet errors from Sentry Replay (rrweb)
|
||||||
|
// serializing DOM snapshots with cross-origin stylesheets
|
||||||
|
// (e.g., from browser extensions or CDN-loaded CSS)
|
||||||
|
ignoreErrors: [/Not allowed to access cross-origin stylesheet/],
|
||||||
|
|
||||||
// Add optional integrations for additional features
|
// Add optional integrations for additional features
|
||||||
integrations: [
|
integrations: [
|
||||||
Sentry.captureConsoleIntegration(),
|
Sentry.captureConsoleIntegration(),
|
||||||
|
|||||||
@@ -1,10 +0,0 @@
|
|||||||
import { parseAsString, useQueryState } from "nuqs";
|
|
||||||
|
|
||||||
export function useCopilotSessionId() {
|
|
||||||
const [urlSessionId, setUrlSessionId] = useQueryState(
|
|
||||||
"sessionId",
|
|
||||||
parseAsString,
|
|
||||||
);
|
|
||||||
|
|
||||||
return { urlSessionId, setUrlSessionId };
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,126 @@
|
|||||||
|
import { getGetV2GetSessionQueryKey } from "@/app/api/__generated__/endpoints/chat/chat";
|
||||||
|
import { useQueryClient } from "@tanstack/react-query";
|
||||||
|
import type { UIDataTypes, UIMessage, UITools } from "ai";
|
||||||
|
import { useCallback, useEffect, useRef } from "react";
|
||||||
|
import { convertChatSessionMessagesToUiMessages } from "../helpers/convertChatSessionToUiMessages";
|
||||||
|
|
||||||
|
const OPERATING_TYPES = new Set([
|
||||||
|
"operation_started",
|
||||||
|
"operation_pending",
|
||||||
|
"operation_in_progress",
|
||||||
|
]);
|
||||||
|
|
||||||
|
const POLL_INTERVAL_MS = 1_500;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detects whether any message contains a tool part whose output indicates
|
||||||
|
* a long-running operation is still in progress.
|
||||||
|
*/
|
||||||
|
function hasOperatingTool(
|
||||||
|
messages: UIMessage<unknown, UIDataTypes, UITools>[],
|
||||||
|
) {
|
||||||
|
for (const msg of messages) {
|
||||||
|
for (const part of msg.parts) {
|
||||||
|
if (!part.type.startsWith("tool-")) continue;
|
||||||
|
const toolPart = part as { output?: unknown };
|
||||||
|
if (!toolPart.output) continue;
|
||||||
|
const output =
|
||||||
|
typeof toolPart.output === "string"
|
||||||
|
? safeParse(toolPart.output)
|
||||||
|
: toolPart.output;
|
||||||
|
if (
|
||||||
|
output &&
|
||||||
|
typeof output === "object" &&
|
||||||
|
"type" in output &&
|
||||||
|
OPERATING_TYPES.has((output as { type: string }).type)
|
||||||
|
) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
function safeParse(value: string): unknown {
|
||||||
|
try {
|
||||||
|
return JSON.parse(value);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Polls the session endpoint while any tool is in an "operating" state
|
||||||
|
* (operation_started / operation_pending / operation_in_progress).
|
||||||
|
*
|
||||||
|
* When the session data shows the tool output has changed (e.g. to
|
||||||
|
* agent_saved), it calls `setMessages` with the updated messages.
|
||||||
|
*/
|
||||||
|
export function useLongRunningToolPolling(
|
||||||
|
sessionId: string | null,
|
||||||
|
messages: UIMessage<unknown, UIDataTypes, UITools>[],
|
||||||
|
setMessages: (
|
||||||
|
updater: (
|
||||||
|
prev: UIMessage<unknown, UIDataTypes, UITools>[],
|
||||||
|
) => UIMessage<unknown, UIDataTypes, UITools>[],
|
||||||
|
) => void,
|
||||||
|
) {
|
||||||
|
const queryClient = useQueryClient();
|
||||||
|
const intervalRef = useRef<ReturnType<typeof setInterval> | null>(null);
|
||||||
|
|
||||||
|
const stopPolling = useCallback(() => {
|
||||||
|
if (intervalRef.current) {
|
||||||
|
clearInterval(intervalRef.current);
|
||||||
|
intervalRef.current = null;
|
||||||
|
}
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const poll = useCallback(async () => {
|
||||||
|
if (!sessionId) return;
|
||||||
|
|
||||||
|
// Invalidate the query cache so the next fetch gets fresh data
|
||||||
|
await queryClient.invalidateQueries({
|
||||||
|
queryKey: getGetV2GetSessionQueryKey(sessionId),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Fetch fresh session data
|
||||||
|
const data = queryClient.getQueryData<{
|
||||||
|
status: number;
|
||||||
|
data: { messages?: unknown[] };
|
||||||
|
}>(getGetV2GetSessionQueryKey(sessionId));
|
||||||
|
|
||||||
|
if (data?.status !== 200 || !data.data.messages) return;
|
||||||
|
|
||||||
|
const freshMessages = convertChatSessionMessagesToUiMessages(
|
||||||
|
sessionId,
|
||||||
|
data.data.messages,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!freshMessages || freshMessages.length === 0) return;
|
||||||
|
|
||||||
|
// Update when the long-running tool completed
|
||||||
|
if (!hasOperatingTool(freshMessages)) {
|
||||||
|
setMessages(() => freshMessages);
|
||||||
|
stopPolling();
|
||||||
|
}
|
||||||
|
}, [sessionId, queryClient, setMessages, stopPolling]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const shouldPoll = hasOperatingTool(messages);
|
||||||
|
|
||||||
|
// Always clear any previous interval first so we never leak timers
|
||||||
|
// when the effect re-runs due to dependency changes (e.g. messages
|
||||||
|
// updating as the LLM streams text after the tool call).
|
||||||
|
stopPolling();
|
||||||
|
|
||||||
|
if (shouldPoll && sessionId) {
|
||||||
|
intervalRef.current = setInterval(() => {
|
||||||
|
poll();
|
||||||
|
}, POLL_INTERVAL_MS);
|
||||||
|
}
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
stopPolling();
|
||||||
|
};
|
||||||
|
}, [messages, sessionId, poll, stopPolling]);
|
||||||
|
}
|
||||||
@@ -1,24 +1,30 @@
|
|||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
import { WarningDiamondIcon } from "@phosphor-icons/react";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
|
import {
|
||||||
|
BookOpenIcon,
|
||||||
|
CheckFatIcon,
|
||||||
|
PencilSimpleIcon,
|
||||||
|
WarningDiamondIcon,
|
||||||
|
} from "@phosphor-icons/react";
|
||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
|
import NextLink from "next/link";
|
||||||
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
||||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||||
import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
|
|
||||||
import {
|
import {
|
||||||
ContentCardDescription,
|
ContentCardDescription,
|
||||||
ContentCodeBlock,
|
ContentCodeBlock,
|
||||||
ContentGrid,
|
ContentGrid,
|
||||||
ContentHint,
|
ContentHint,
|
||||||
ContentLink,
|
|
||||||
ContentMessage,
|
ContentMessage,
|
||||||
} from "../../components/ToolAccordion/AccordionContent";
|
} from "../../components/ToolAccordion/AccordionContent";
|
||||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||||
import { useAsymptoticProgress } from "../../hooks/useAsymptoticProgress";
|
|
||||||
import {
|
import {
|
||||||
ClarificationQuestionsCard,
|
ClarificationQuestionsCard,
|
||||||
ClarifyingQuestion,
|
ClarifyingQuestion,
|
||||||
} from "./components/ClarificationQuestionsCard";
|
} from "./components/ClarificationQuestionsCard";
|
||||||
|
import { MiniGame } from "./components/MiniGame/MiniGame";
|
||||||
import {
|
import {
|
||||||
AccordionIcon,
|
AccordionIcon,
|
||||||
formatMaybeJson,
|
formatMaybeJson,
|
||||||
@@ -52,7 +58,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
|
|||||||
const icon = <AccordionIcon />;
|
const icon = <AccordionIcon />;
|
||||||
|
|
||||||
if (isAgentSavedOutput(output)) {
|
if (isAgentSavedOutput(output)) {
|
||||||
return { icon, title: output.agent_name };
|
return { icon, title: output.agent_name, expanded: true };
|
||||||
}
|
}
|
||||||
if (isAgentPreviewOutput(output)) {
|
if (isAgentPreviewOutput(output)) {
|
||||||
return {
|
return {
|
||||||
@@ -78,6 +84,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
|
|||||||
return {
|
return {
|
||||||
icon,
|
icon,
|
||||||
title: "Creating agent, this may take a few minutes. Sit back and relax.",
|
title: "Creating agent, this may take a few minutes. Sit back and relax.",
|
||||||
|
expanded: true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
return {
|
return {
|
||||||
@@ -107,8 +114,6 @@ export function CreateAgentTool({ part }: Props) {
|
|||||||
isOperationPendingOutput(output) ||
|
isOperationPendingOutput(output) ||
|
||||||
isOperationInProgressOutput(output));
|
isOperationInProgressOutput(output));
|
||||||
|
|
||||||
const progress = useAsymptoticProgress(isOperating);
|
|
||||||
|
|
||||||
const hasExpandableContent =
|
const hasExpandableContent =
|
||||||
part.state === "output-available" &&
|
part.state === "output-available" &&
|
||||||
!!output &&
|
!!output &&
|
||||||
@@ -152,31 +157,53 @@ export function CreateAgentTool({ part }: Props) {
|
|||||||
<ToolAccordion {...getAccordionMeta(output)}>
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{isOperating && (
|
{isOperating && (
|
||||||
<ContentGrid>
|
<ContentGrid>
|
||||||
<ProgressBar value={progress} className="max-w-[280px]" />
|
<MiniGame />
|
||||||
<ContentHint>
|
<ContentHint>
|
||||||
This could take a few minutes, grab a coffee ☕
|
This could take a few minutes — play while you wait!
|
||||||
</ContentHint>
|
</ContentHint>
|
||||||
</ContentGrid>
|
</ContentGrid>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{isAgentSavedOutput(output) && (
|
{isAgentSavedOutput(output) && (
|
||||||
<ContentGrid>
|
<div className="rounded-xl border border-border/60 bg-card p-4 shadow-sm">
|
||||||
<ContentMessage>{output.message}</ContentMessage>
|
<div className="flex items-baseline gap-2">
|
||||||
<div className="flex flex-wrap gap-2">
|
<CheckFatIcon
|
||||||
<ContentLink href={output.library_agent_link}>
|
size={18}
|
||||||
Open in library
|
weight="regular"
|
||||||
</ContentLink>
|
className="relative top-1 text-green-500"
|
||||||
<ContentLink href={output.agent_page_link}>
|
/>
|
||||||
Open in builder
|
<Text
|
||||||
</ContentLink>
|
variant="body-medium"
|
||||||
|
className="text-blacks mb-2 text-[16px]"
|
||||||
|
>
|
||||||
|
{output.message}
|
||||||
|
</Text>
|
||||||
</div>
|
</div>
|
||||||
<ContentCodeBlock>
|
<div className="mt-3 flex flex-wrap gap-4">
|
||||||
{truncateText(
|
<Button variant="outline" size="small">
|
||||||
formatMaybeJson({ agent_id: output.agent_id }),
|
<NextLink
|
||||||
800,
|
href={output.library_agent_link}
|
||||||
)}
|
className="inline-flex items-center gap-1.5"
|
||||||
</ContentCodeBlock>
|
target="_blank"
|
||||||
</ContentGrid>
|
rel="noopener noreferrer"
|
||||||
|
>
|
||||||
|
<BookOpenIcon size={14} weight="regular" />
|
||||||
|
Open in library
|
||||||
|
</NextLink>
|
||||||
|
</Button>
|
||||||
|
<Button variant="outline" size="small">
|
||||||
|
<NextLink
|
||||||
|
href={output.agent_page_link}
|
||||||
|
target="_blank"
|
||||||
|
rel="noopener noreferrer"
|
||||||
|
className="inline-flex items-center gap-1.5"
|
||||||
|
>
|
||||||
|
<PencilSimpleIcon size={14} weight="regular" />
|
||||||
|
Open in builder
|
||||||
|
</NextLink>
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{isAgentPreviewOutput(output) && (
|
{isAgentPreviewOutput(output) && (
|
||||||
|
|||||||
@@ -0,0 +1,21 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import { useMiniGame } from "./useMiniGame";
|
||||||
|
|
||||||
|
export function MiniGame() {
|
||||||
|
const { canvasRef } = useMiniGame();
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
className="w-full overflow-hidden rounded-md bg-background text-foreground"
|
||||||
|
style={{ border: "1px solid #d17fff" }}
|
||||||
|
>
|
||||||
|
<canvas
|
||||||
|
ref={canvasRef}
|
||||||
|
tabIndex={0}
|
||||||
|
className="block w-full outline-none"
|
||||||
|
style={{ imageRendering: "pixelated" }}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -0,0 +1,579 @@
|
|||||||
|
import { useEffect, useRef } from "react";
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Constants */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
const CANVAS_HEIGHT = 150;
|
||||||
|
const GRAVITY = 0.55;
|
||||||
|
const JUMP_FORCE = -9.5;
|
||||||
|
const BASE_SPEED = 3;
|
||||||
|
const SPEED_INCREMENT = 0.0008;
|
||||||
|
const SPAWN_MIN = 70;
|
||||||
|
const SPAWN_MAX = 130;
|
||||||
|
const CHAR_SIZE = 18;
|
||||||
|
const CHAR_X = 50;
|
||||||
|
const GROUND_PAD = 20;
|
||||||
|
const STORAGE_KEY = "copilot-minigame-highscore";
|
||||||
|
|
||||||
|
// Colors
|
||||||
|
const COLOR_BG = "#E8EAF6";
|
||||||
|
const COLOR_CHAR = "#263238";
|
||||||
|
const COLOR_BOSS = "#F50057";
|
||||||
|
|
||||||
|
// Boss
|
||||||
|
const BOSS_SIZE = 36;
|
||||||
|
const BOSS_ENTER_SPEED = 2;
|
||||||
|
const BOSS_LEAVE_SPEED = 3;
|
||||||
|
const BOSS_SHOOT_COOLDOWN = 90;
|
||||||
|
const BOSS_SHOTS_TO_EVADE = 5;
|
||||||
|
const BOSS_INTERVAL = 20; // every N score
|
||||||
|
const PROJ_SPEED = 4.5;
|
||||||
|
const PROJ_SIZE = 12;
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Types */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
interface Obstacle {
|
||||||
|
x: number;
|
||||||
|
width: number;
|
||||||
|
height: number;
|
||||||
|
scored: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface Projectile {
|
||||||
|
x: number;
|
||||||
|
y: number;
|
||||||
|
speed: number;
|
||||||
|
evaded: boolean;
|
||||||
|
type: "low" | "high";
|
||||||
|
}
|
||||||
|
|
||||||
|
interface BossState {
|
||||||
|
phase: "inactive" | "entering" | "fighting" | "leaving";
|
||||||
|
x: number;
|
||||||
|
targetX: number;
|
||||||
|
shotsEvaded: number;
|
||||||
|
cooldown: number;
|
||||||
|
projectiles: Projectile[];
|
||||||
|
bob: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GameState {
|
||||||
|
charY: number;
|
||||||
|
vy: number;
|
||||||
|
obstacles: Obstacle[];
|
||||||
|
score: number;
|
||||||
|
highScore: number;
|
||||||
|
speed: number;
|
||||||
|
frame: number;
|
||||||
|
nextSpawn: number;
|
||||||
|
running: boolean;
|
||||||
|
over: boolean;
|
||||||
|
groundY: number;
|
||||||
|
boss: BossState;
|
||||||
|
bossThreshold: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Helpers */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
function randInt(min: number, max: number) {
|
||||||
|
return Math.floor(Math.random() * (max - min + 1)) + min;
|
||||||
|
}
|
||||||
|
|
||||||
|
function readHighScore(): number {
|
||||||
|
try {
|
||||||
|
return parseInt(localStorage.getItem(STORAGE_KEY) || "0", 10) || 0;
|
||||||
|
} catch {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function writeHighScore(score: number) {
|
||||||
|
try {
|
||||||
|
localStorage.setItem(STORAGE_KEY, String(score));
|
||||||
|
} catch {
|
||||||
|
/* noop */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeBoss(): BossState {
|
||||||
|
return {
|
||||||
|
phase: "inactive",
|
||||||
|
x: 0,
|
||||||
|
targetX: 0,
|
||||||
|
shotsEvaded: 0,
|
||||||
|
cooldown: 0,
|
||||||
|
projectiles: [],
|
||||||
|
bob: 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeState(groundY: number): GameState {
|
||||||
|
return {
|
||||||
|
charY: groundY - CHAR_SIZE,
|
||||||
|
vy: 0,
|
||||||
|
obstacles: [],
|
||||||
|
score: 0,
|
||||||
|
highScore: readHighScore(),
|
||||||
|
speed: BASE_SPEED,
|
||||||
|
frame: 0,
|
||||||
|
nextSpawn: randInt(SPAWN_MIN, SPAWN_MAX),
|
||||||
|
running: false,
|
||||||
|
over: false,
|
||||||
|
groundY,
|
||||||
|
boss: makeBoss(),
|
||||||
|
bossThreshold: BOSS_INTERVAL,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function gameOver(s: GameState) {
|
||||||
|
s.running = false;
|
||||||
|
s.over = true;
|
||||||
|
if (s.score > s.highScore) {
|
||||||
|
s.highScore = s.score;
|
||||||
|
writeHighScore(s.score);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Projectile collision — shared between fighting & leaving phases */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
/** Returns true if the player died. */
|
||||||
|
function tickProjectiles(s: GameState): boolean {
|
||||||
|
const boss = s.boss;
|
||||||
|
|
||||||
|
for (const p of boss.projectiles) {
|
||||||
|
p.x -= p.speed;
|
||||||
|
|
||||||
|
if (!p.evaded && p.x + PROJ_SIZE < CHAR_X) {
|
||||||
|
p.evaded = true;
|
||||||
|
boss.shotsEvaded++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collision
|
||||||
|
if (
|
||||||
|
!p.evaded &&
|
||||||
|
CHAR_X + CHAR_SIZE > p.x &&
|
||||||
|
CHAR_X < p.x + PROJ_SIZE &&
|
||||||
|
s.charY + CHAR_SIZE > p.y &&
|
||||||
|
s.charY < p.y + PROJ_SIZE
|
||||||
|
) {
|
||||||
|
gameOver(s);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
boss.projectiles = boss.projectiles.filter((p) => p.x + PROJ_SIZE > -20);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Update */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
function update(s: GameState, canvasWidth: number) {
|
||||||
|
if (!s.running) return;
|
||||||
|
|
||||||
|
s.frame++;
|
||||||
|
|
||||||
|
// Speed only ramps during regular play
|
||||||
|
if (s.boss.phase === "inactive") {
|
||||||
|
s.speed = BASE_SPEED + s.frame * SPEED_INCREMENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Character physics (always active) ---- //
|
||||||
|
s.vy += GRAVITY;
|
||||||
|
s.charY += s.vy;
|
||||||
|
if (s.charY + CHAR_SIZE >= s.groundY) {
|
||||||
|
s.charY = s.groundY - CHAR_SIZE;
|
||||||
|
s.vy = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Trigger boss ---- //
|
||||||
|
if (s.boss.phase === "inactive" && s.score >= s.bossThreshold) {
|
||||||
|
s.boss.phase = "entering";
|
||||||
|
s.boss.x = canvasWidth + 10;
|
||||||
|
s.boss.targetX = canvasWidth - BOSS_SIZE - 40;
|
||||||
|
s.boss.shotsEvaded = 0;
|
||||||
|
s.boss.cooldown = BOSS_SHOOT_COOLDOWN;
|
||||||
|
s.boss.projectiles = [];
|
||||||
|
s.obstacles = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Boss: entering ---- //
|
||||||
|
if (s.boss.phase === "entering") {
|
||||||
|
s.boss.bob = Math.sin(s.frame * 0.05) * 3;
|
||||||
|
s.boss.x -= BOSS_ENTER_SPEED;
|
||||||
|
if (s.boss.x <= s.boss.targetX) {
|
||||||
|
s.boss.x = s.boss.targetX;
|
||||||
|
s.boss.phase = "fighting";
|
||||||
|
}
|
||||||
|
return; // no obstacles while entering
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Boss: fighting ---- //
|
||||||
|
if (s.boss.phase === "fighting") {
|
||||||
|
s.boss.bob = Math.sin(s.frame * 0.05) * 3;
|
||||||
|
|
||||||
|
// Shoot
|
||||||
|
s.boss.cooldown--;
|
||||||
|
if (s.boss.cooldown <= 0) {
|
||||||
|
const isLow = Math.random() < 0.5;
|
||||||
|
s.boss.projectiles.push({
|
||||||
|
x: s.boss.x - PROJ_SIZE,
|
||||||
|
y: isLow ? s.groundY - 14 : s.groundY - 70,
|
||||||
|
speed: PROJ_SPEED,
|
||||||
|
evaded: false,
|
||||||
|
type: isLow ? "low" : "high",
|
||||||
|
});
|
||||||
|
s.boss.cooldown = BOSS_SHOOT_COOLDOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tickProjectiles(s)) return;
|
||||||
|
|
||||||
|
// Boss defeated?
|
||||||
|
if (s.boss.shotsEvaded >= BOSS_SHOTS_TO_EVADE) {
|
||||||
|
s.boss.phase = "leaving";
|
||||||
|
s.score += 5; // bonus
|
||||||
|
s.bossThreshold = s.score + BOSS_INTERVAL;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Boss: leaving ---- //
|
||||||
|
if (s.boss.phase === "leaving") {
|
||||||
|
s.boss.bob = Math.sin(s.frame * 0.05) * 3;
|
||||||
|
s.boss.x += BOSS_LEAVE_SPEED;
|
||||||
|
|
||||||
|
// Still check in-flight projectiles
|
||||||
|
if (tickProjectiles(s)) return;
|
||||||
|
|
||||||
|
if (s.boss.x > canvasWidth + 50) {
|
||||||
|
s.boss = makeBoss();
|
||||||
|
s.nextSpawn = s.frame + randInt(SPAWN_MIN / 2, SPAWN_MAX / 2);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Regular obstacle play ---- //
|
||||||
|
if (s.frame >= s.nextSpawn) {
|
||||||
|
s.obstacles.push({
|
||||||
|
x: canvasWidth + 10,
|
||||||
|
width: randInt(10, 16),
|
||||||
|
height: randInt(20, 48),
|
||||||
|
scored: false,
|
||||||
|
});
|
||||||
|
s.nextSpawn = s.frame + randInt(SPAWN_MIN, SPAWN_MAX);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const o of s.obstacles) {
|
||||||
|
o.x -= s.speed;
|
||||||
|
if (!o.scored && o.x + o.width < CHAR_X) {
|
||||||
|
o.scored = true;
|
||||||
|
s.score++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.obstacles = s.obstacles.filter((o) => o.x + o.width > -20);
|
||||||
|
|
||||||
|
for (const o of s.obstacles) {
|
||||||
|
const oY = s.groundY - o.height;
|
||||||
|
if (
|
||||||
|
CHAR_X + CHAR_SIZE > o.x &&
|
||||||
|
CHAR_X < o.x + o.width &&
|
||||||
|
s.charY + CHAR_SIZE > oY
|
||||||
|
) {
|
||||||
|
gameOver(s);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Drawing */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
function drawBoss(ctx: CanvasRenderingContext2D, s: GameState, bg: string) {
|
||||||
|
const bx = s.boss.x;
|
||||||
|
const by = s.groundY - BOSS_SIZE + s.boss.bob;
|
||||||
|
|
||||||
|
// Body
|
||||||
|
ctx.save();
|
||||||
|
ctx.fillStyle = COLOR_BOSS;
|
||||||
|
ctx.globalAlpha = 0.9;
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.roundRect(bx, by, BOSS_SIZE, BOSS_SIZE, 4);
|
||||||
|
ctx.fill();
|
||||||
|
ctx.restore();
|
||||||
|
|
||||||
|
// Eyes
|
||||||
|
ctx.save();
|
||||||
|
ctx.fillStyle = bg;
|
||||||
|
const eyeY = by + 13;
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.arc(bx + 10, eyeY, 4, 0, Math.PI * 2);
|
||||||
|
ctx.fill();
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.arc(bx + 26, eyeY, 4, 0, Math.PI * 2);
|
||||||
|
ctx.fill();
|
||||||
|
ctx.restore();
|
||||||
|
|
||||||
|
// Angry eyebrows
|
||||||
|
ctx.save();
|
||||||
|
ctx.strokeStyle = bg;
|
||||||
|
ctx.lineWidth = 2;
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.moveTo(bx + 5, eyeY - 7);
|
||||||
|
ctx.lineTo(bx + 14, eyeY - 4);
|
||||||
|
ctx.stroke();
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.moveTo(bx + 31, eyeY - 7);
|
||||||
|
ctx.lineTo(bx + 22, eyeY - 4);
|
||||||
|
ctx.stroke();
|
||||||
|
ctx.restore();
|
||||||
|
|
||||||
|
// Zigzag mouth
|
||||||
|
ctx.save();
|
||||||
|
ctx.strokeStyle = bg;
|
||||||
|
ctx.lineWidth = 1.5;
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.moveTo(bx + 10, by + 27);
|
||||||
|
ctx.lineTo(bx + 14, by + 24);
|
||||||
|
ctx.lineTo(bx + 18, by + 27);
|
||||||
|
ctx.lineTo(bx + 22, by + 24);
|
||||||
|
ctx.lineTo(bx + 26, by + 27);
|
||||||
|
ctx.stroke();
|
||||||
|
ctx.restore();
|
||||||
|
}
|
||||||
|
|
||||||
|
function drawProjectiles(ctx: CanvasRenderingContext2D, boss: BossState) {
|
||||||
|
ctx.save();
|
||||||
|
ctx.fillStyle = COLOR_BOSS;
|
||||||
|
ctx.globalAlpha = 0.8;
|
||||||
|
for (const p of boss.projectiles) {
|
||||||
|
if (p.evaded) continue;
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.arc(
|
||||||
|
p.x + PROJ_SIZE / 2,
|
||||||
|
p.y + PROJ_SIZE / 2,
|
||||||
|
PROJ_SIZE / 2,
|
||||||
|
0,
|
||||||
|
Math.PI * 2,
|
||||||
|
);
|
||||||
|
ctx.fill();
|
||||||
|
}
|
||||||
|
ctx.restore();
|
||||||
|
}
|
||||||
|
|
||||||
|
function draw(
|
||||||
|
ctx: CanvasRenderingContext2D,
|
||||||
|
s: GameState,
|
||||||
|
w: number,
|
||||||
|
h: number,
|
||||||
|
fg: string,
|
||||||
|
started: boolean,
|
||||||
|
) {
|
||||||
|
ctx.fillStyle = COLOR_BG;
|
||||||
|
ctx.fillRect(0, 0, w, h);
|
||||||
|
|
||||||
|
// Ground
|
||||||
|
ctx.save();
|
||||||
|
ctx.strokeStyle = fg;
|
||||||
|
ctx.globalAlpha = 0.15;
|
||||||
|
ctx.setLineDash([4, 4]);
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.moveTo(0, s.groundY);
|
||||||
|
ctx.lineTo(w, s.groundY);
|
||||||
|
ctx.stroke();
|
||||||
|
ctx.restore();
|
||||||
|
|
||||||
|
// Character
|
||||||
|
ctx.save();
|
||||||
|
ctx.fillStyle = COLOR_CHAR;
|
||||||
|
ctx.globalAlpha = 0.85;
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.roundRect(CHAR_X, s.charY, CHAR_SIZE, CHAR_SIZE, 3);
|
||||||
|
ctx.fill();
|
||||||
|
ctx.restore();
|
||||||
|
|
||||||
|
// Eyes
|
||||||
|
ctx.save();
|
||||||
|
ctx.fillStyle = COLOR_BG;
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.arc(CHAR_X + 6, s.charY + 7, 2.5, 0, Math.PI * 2);
|
||||||
|
ctx.fill();
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.arc(CHAR_X + 12, s.charY + 7, 2.5, 0, Math.PI * 2);
|
||||||
|
ctx.fill();
|
||||||
|
ctx.restore();
|
||||||
|
|
||||||
|
// Obstacles
|
||||||
|
ctx.save();
|
||||||
|
ctx.fillStyle = fg;
|
||||||
|
ctx.globalAlpha = 0.55;
|
||||||
|
for (const o of s.obstacles) {
|
||||||
|
ctx.fillRect(o.x, s.groundY - o.height, o.width, o.height);
|
||||||
|
}
|
||||||
|
ctx.restore();
|
||||||
|
|
||||||
|
// Boss + projectiles
|
||||||
|
if (s.boss.phase !== "inactive") {
|
||||||
|
drawBoss(ctx, s, COLOR_BG);
|
||||||
|
drawProjectiles(ctx, s.boss);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Score HUD
|
||||||
|
ctx.save();
|
||||||
|
ctx.fillStyle = fg;
|
||||||
|
ctx.globalAlpha = 0.5;
|
||||||
|
ctx.font = "bold 11px monospace";
|
||||||
|
ctx.textAlign = "right";
|
||||||
|
ctx.fillText(`Score: ${s.score}`, w - 12, 20);
|
||||||
|
ctx.fillText(`Best: ${s.highScore}`, w - 12, 34);
|
||||||
|
if (s.boss.phase === "fighting") {
|
||||||
|
ctx.fillText(
|
||||||
|
`Evade: ${s.boss.shotsEvaded}/${BOSS_SHOTS_TO_EVADE}`,
|
||||||
|
w - 12,
|
||||||
|
48,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
ctx.restore();
|
||||||
|
|
||||||
|
// Prompts
|
||||||
|
if (!started && !s.running && !s.over) {
|
||||||
|
ctx.save();
|
||||||
|
ctx.fillStyle = fg;
|
||||||
|
ctx.globalAlpha = 0.5;
|
||||||
|
ctx.font = "12px sans-serif";
|
||||||
|
ctx.textAlign = "center";
|
||||||
|
ctx.fillText("Click or press Space to play while you wait", w / 2, h / 2);
|
||||||
|
ctx.restore();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (s.over) {
|
||||||
|
ctx.save();
|
||||||
|
ctx.fillStyle = fg;
|
||||||
|
ctx.globalAlpha = 0.7;
|
||||||
|
ctx.font = "bold 13px sans-serif";
|
||||||
|
ctx.textAlign = "center";
|
||||||
|
ctx.fillText("Game Over", w / 2, h / 2 - 8);
|
||||||
|
ctx.font = "11px sans-serif";
|
||||||
|
ctx.fillText("Click or Space to restart", w / 2, h / 2 + 10);
|
||||||
|
ctx.restore();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Hook */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
export function useMiniGame() {
|
||||||
|
const canvasRef = useRef<HTMLCanvasElement>(null);
|
||||||
|
const stateRef = useRef<GameState | null>(null);
|
||||||
|
const rafRef = useRef(0);
|
||||||
|
const startedRef = useRef(false);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const canvas = canvasRef.current;
|
||||||
|
if (!canvas) return;
|
||||||
|
|
||||||
|
const container = canvas.parentElement;
|
||||||
|
if (container) {
|
||||||
|
canvas.width = container.clientWidth;
|
||||||
|
canvas.height = CANVAS_HEIGHT;
|
||||||
|
}
|
||||||
|
|
||||||
|
const groundY = canvas.height - GROUND_PAD;
|
||||||
|
stateRef.current = makeState(groundY);
|
||||||
|
|
||||||
|
const style = getComputedStyle(canvas);
|
||||||
|
let fg = style.color || "#71717a";
|
||||||
|
|
||||||
|
// -------------------------------------------------------------- //
|
||||||
|
// Jump //
|
||||||
|
// -------------------------------------------------------------- //
|
||||||
|
function jump() {
|
||||||
|
const s = stateRef.current;
|
||||||
|
if (!s) return;
|
||||||
|
|
||||||
|
if (s.over) {
|
||||||
|
const hs = s.highScore;
|
||||||
|
const gy = s.groundY;
|
||||||
|
stateRef.current = makeState(gy);
|
||||||
|
stateRef.current.highScore = hs;
|
||||||
|
stateRef.current.running = true;
|
||||||
|
startedRef.current = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!s.running) {
|
||||||
|
s.running = true;
|
||||||
|
startedRef.current = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only jump when on the ground
|
||||||
|
if (s.charY + CHAR_SIZE >= s.groundY) {
|
||||||
|
s.vy = JUMP_FORCE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function onKey(e: KeyboardEvent) {
|
||||||
|
if (e.code === "Space" || e.key === " ") {
|
||||||
|
e.preventDefault();
|
||||||
|
jump();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function onClick() {
|
||||||
|
canvas?.focus();
|
||||||
|
jump();
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------------------------------------------------- //
|
||||||
|
// Loop //
|
||||||
|
// -------------------------------------------------------------- //
|
||||||
|
function loop() {
|
||||||
|
const s = stateRef.current;
|
||||||
|
if (!canvas || !s) return;
|
||||||
|
const ctx = canvas.getContext("2d");
|
||||||
|
if (!ctx) return;
|
||||||
|
|
||||||
|
update(s, canvas.width);
|
||||||
|
draw(ctx, s, canvas.width, canvas.height, fg, startedRef.current);
|
||||||
|
rafRef.current = requestAnimationFrame(loop);
|
||||||
|
}
|
||||||
|
|
||||||
|
rafRef.current = requestAnimationFrame(loop);
|
||||||
|
|
||||||
|
canvas.addEventListener("click", onClick);
|
||||||
|
canvas.addEventListener("keydown", onKey);
|
||||||
|
|
||||||
|
const observer = new ResizeObserver((entries) => {
|
||||||
|
for (const entry of entries) {
|
||||||
|
canvas.width = entry.contentRect.width;
|
||||||
|
canvas.height = CANVAS_HEIGHT;
|
||||||
|
if (stateRef.current) {
|
||||||
|
stateRef.current.groundY = canvas.height - GROUND_PAD;
|
||||||
|
}
|
||||||
|
const cs = getComputedStyle(canvas);
|
||||||
|
fg = cs.color || fg;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (container) observer.observe(container);
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
cancelAnimationFrame(rafRef.current);
|
||||||
|
canvas.removeEventListener("click", onClick);
|
||||||
|
canvas.removeEventListener("keydown", onKey);
|
||||||
|
observer.disconnect();
|
||||||
|
};
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return { canvasRef };
|
||||||
|
}
|
||||||
@@ -1,10 +1,14 @@
|
|||||||
import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat";
|
import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat";
|
||||||
|
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||||
import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
||||||
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
||||||
import { useChat } from "@ai-sdk/react";
|
import { useChat } from "@ai-sdk/react";
|
||||||
import { DefaultChatTransport } from "ai";
|
import { DefaultChatTransport } from "ai";
|
||||||
import { useEffect, useMemo, useState } from "react";
|
import { useEffect, useMemo, useRef, useState } from "react";
|
||||||
import { useChatSession } from "./useChatSession";
|
import { useChatSession } from "./useChatSession";
|
||||||
|
import { useLongRunningToolPolling } from "./hooks/useLongRunningToolPolling";
|
||||||
|
|
||||||
|
const STREAM_START_TIMEOUT_MS = 12_000;
|
||||||
|
|
||||||
export function useCopilotPage() {
|
export function useCopilotPage() {
|
||||||
const { isUserLoading, isLoggedIn } = useSupabase();
|
const { isUserLoading, isLoggedIn } = useSupabase();
|
||||||
@@ -52,6 +56,24 @@ export function useCopilotPage() {
|
|||||||
transport: transport ?? undefined,
|
transport: transport ?? undefined,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Abort the stream if the backend doesn't start sending data within 12s.
|
||||||
|
const stopRef = useRef(stop);
|
||||||
|
stopRef.current = stop;
|
||||||
|
useEffect(() => {
|
||||||
|
if (status !== "submitted") return;
|
||||||
|
|
||||||
|
const timer = setTimeout(() => {
|
||||||
|
stopRef.current();
|
||||||
|
toast({
|
||||||
|
title: "Stream timed out",
|
||||||
|
description: "The server took too long to respond. Please try again.",
|
||||||
|
variant: "destructive",
|
||||||
|
});
|
||||||
|
}, STREAM_START_TIMEOUT_MS);
|
||||||
|
|
||||||
|
return () => clearTimeout(timer);
|
||||||
|
}, [status]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!hydratedMessages || hydratedMessages.length === 0) return;
|
if (!hydratedMessages || hydratedMessages.length === 0) return;
|
||||||
setMessages((prev) => {
|
setMessages((prev) => {
|
||||||
@@ -60,6 +82,11 @@ export function useCopilotPage() {
|
|||||||
});
|
});
|
||||||
}, [hydratedMessages, setMessages]);
|
}, [hydratedMessages, setMessages]);
|
||||||
|
|
||||||
|
// Poll session endpoint when a long-running tool (create_agent, edit_agent)
|
||||||
|
// is in progress. When the backend completes, the session data will contain
|
||||||
|
// the final tool output — this hook detects the change and updates messages.
|
||||||
|
useLongRunningToolPolling(sessionId, messages, setMessages);
|
||||||
|
|
||||||
// Clear messages when session is null
|
// Clear messages when session is null
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!sessionId) setMessages([]);
|
if (!sessionId) setMessages([]);
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ export function ScheduleListItem({
|
|||||||
description={formatDistanceToNow(schedule.next_run_time, {
|
description={formatDistanceToNow(schedule.next_run_time, {
|
||||||
addSuffix: true,
|
addSuffix: true,
|
||||||
})}
|
})}
|
||||||
|
descriptionTitle={new Date(schedule.next_run_time).toString()}
|
||||||
onClick={onClick}
|
onClick={onClick}
|
||||||
selected={selected}
|
selected={selected}
|
||||||
icon={
|
icon={
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import React from "react";
|
|||||||
interface Props {
|
interface Props {
|
||||||
title: string;
|
title: string;
|
||||||
description?: string;
|
description?: string;
|
||||||
|
descriptionTitle?: string;
|
||||||
icon?: React.ReactNode;
|
icon?: React.ReactNode;
|
||||||
selected?: boolean;
|
selected?: boolean;
|
||||||
onClick?: () => void;
|
onClick?: () => void;
|
||||||
@@ -16,6 +17,7 @@ interface Props {
|
|||||||
export function SidebarItemCard({
|
export function SidebarItemCard({
|
||||||
title,
|
title,
|
||||||
description,
|
description,
|
||||||
|
descriptionTitle,
|
||||||
icon,
|
icon,
|
||||||
selected,
|
selected,
|
||||||
onClick,
|
onClick,
|
||||||
@@ -38,7 +40,11 @@ export function SidebarItemCard({
|
|||||||
>
|
>
|
||||||
{title}
|
{title}
|
||||||
</Text>
|
</Text>
|
||||||
<Text variant="body" className="leading-tight !text-zinc-500">
|
<Text
|
||||||
|
variant="body"
|
||||||
|
className="leading-tight !text-zinc-500"
|
||||||
|
title={descriptionTitle}
|
||||||
|
>
|
||||||
{description}
|
{description}
|
||||||
</Text>
|
</Text>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -81,6 +81,9 @@ export function TaskListItem({
|
|||||||
? formatDistanceToNow(run.started_at, { addSuffix: true })
|
? formatDistanceToNow(run.started_at, { addSuffix: true })
|
||||||
: "—"
|
: "—"
|
||||||
}
|
}
|
||||||
|
descriptionTitle={
|
||||||
|
run.started_at ? new Date(run.started_at).toString() : undefined
|
||||||
|
}
|
||||||
onClick={onClick}
|
onClick={onClick}
|
||||||
selected={selected}
|
selected={selected}
|
||||||
actions={
|
actions={
|
||||||
|
|||||||
@@ -180,3 +180,14 @@ body[data-google-picker-open="true"] [data-dialog-content] {
|
|||||||
z-index: 1 !important;
|
z-index: 1 !important;
|
||||||
pointer-events: none !important;
|
pointer-events: none !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* CoPilot chat table styling — remove left/right borders, increase padding */
|
||||||
|
[data-streamdown="table-wrapper"] table {
|
||||||
|
border-left: none;
|
||||||
|
border-right: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
[data-streamdown="table-wrapper"] th,
|
||||||
|
[data-streamdown="table-wrapper"] td {
|
||||||
|
padding: 0.875rem 1rem; /* py-3.5 px-4 */
|
||||||
|
}
|
||||||
|
|||||||
@@ -226,7 +226,7 @@ function renderMarkdown(
|
|||||||
table: ({ children, ...props }) => (
|
table: ({ children, ...props }) => (
|
||||||
<div className="my-4 overflow-x-auto">
|
<div className="my-4 overflow-x-auto">
|
||||||
<table
|
<table
|
||||||
className="min-w-full divide-y divide-gray-200 rounded-lg border border-gray-200 dark:divide-gray-700 dark:border-gray-700"
|
className="min-w-full divide-y divide-gray-200 border-y border-gray-200 dark:divide-gray-700 dark:border-gray-700"
|
||||||
{...props}
|
{...props}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
@@ -235,7 +235,7 @@ function renderMarkdown(
|
|||||||
),
|
),
|
||||||
th: ({ children, ...props }) => (
|
th: ({ children, ...props }) => (
|
||||||
<th
|
<th
|
||||||
className="bg-gray-50 px-4 py-3 text-left text-xs font-semibold uppercase tracking-wider text-gray-700 dark:bg-gray-800 dark:text-gray-300"
|
className="bg-gray-50 px-4 py-3.5 text-left text-xs font-semibold uppercase tracking-wider text-gray-700 dark:bg-gray-800 dark:text-gray-300"
|
||||||
{...props}
|
{...props}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
@@ -243,7 +243,7 @@ function renderMarkdown(
|
|||||||
),
|
),
|
||||||
td: ({ children, ...props }) => (
|
td: ({ children, ...props }) => (
|
||||||
<td
|
<td
|
||||||
className="border-t border-gray-200 px-4 py-3 text-sm text-gray-600 dark:border-gray-700 dark:text-gray-400"
|
className="border-t border-gray-200 px-4 py-3.5 text-sm text-gray-600 dark:border-gray-700 dark:text-gray-400"
|
||||||
{...props}
|
{...props}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
|
|||||||
Reference in New Issue
Block a user