Compare commits
6 Commits
add-llm-ma
...
lluis/impr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4fa9c6a797 | ||
|
|
256d59303a | ||
|
|
5035b69c79 | ||
|
|
86af8fc856 | ||
|
|
e0aa565192 | ||
|
|
dfa517300b |
42
.github/workflows/claude-ci-failure-auto-fix.yml
vendored
@@ -40,6 +40,48 @@ jobs:
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
|
||||
|
||||
# Backend Python/Poetry setup (so Claude can run linting/tests)
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
# Frontend Node.js/pnpm setup (so Claude can run linting/tests)
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
working-directory: autogpt_platform/frontend
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Get CI failure details
|
||||
id: failure_details
|
||||
uses: actions/github-script@v8
|
||||
|
||||
22
.github/workflows/claude-dependabot.yml
vendored
@@ -77,27 +77,15 @@ jobs:
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set pnpm store directory
|
||||
run: |
|
||||
pnpm config set store-dir ~/.pnpm-store
|
||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache frontend dependencies
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
22
.github/workflows/claude.yml
vendored
@@ -93,27 +93,15 @@ jobs:
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set pnpm store directory
|
||||
run: |
|
||||
pnpm config set store-dir ~/.pnpm-store
|
||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache frontend dependencies
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
@@ -104,6 +104,12 @@ TWITTER_CLIENT_SECRET=
|
||||
# Make a new workspace for your OAuth APP -- trust me
|
||||
# https://linear.app/settings/api/applications/new
|
||||
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
||||
LINEAR_API_KEY=
|
||||
# Linear project and team IDs for the feature request tracker.
|
||||
# Find these in your Linear workspace URL: linear.app/<workspace>/project/<project-id>
|
||||
# and in team settings. Used by the chat copilot to file and search feature requests.
|
||||
LINEAR_FEATURE_REQUEST_PROJECT_ID=
|
||||
LINEAR_FEATURE_REQUEST_TEAM_ID=
|
||||
LINEAR_CLIENT_ID=
|
||||
LINEAR_CLIENT_SECRET=
|
||||
|
||||
|
||||
@@ -1245,6 +1245,7 @@ async def _stream_chat_chunks(
|
||||
return
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
|
||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||
retry_count += 1
|
||||
# Calculate delay with exponential backoff
|
||||
@@ -1260,12 +1261,27 @@ async def _stream_chat_chunks(
|
||||
continue # Retry the stream
|
||||
else:
|
||||
# Non-retryable error or max retries exceeded
|
||||
logger.error(
|
||||
f"Error in stream (not retrying): {e!s}",
|
||||
exc_info=True,
|
||||
_log_api_error(
|
||||
error=e,
|
||||
context="stream (not retrying)",
|
||||
session_id=session.session_id if session else None,
|
||||
message_count=len(messages) if messages else None,
|
||||
model=model,
|
||||
retry_count=retry_count,
|
||||
)
|
||||
error_code = None
|
||||
error_text = str(e)
|
||||
|
||||
error_details = _extract_api_error_details(e)
|
||||
if error_details.get("response_body"):
|
||||
body = error_details["response_body"]
|
||||
if isinstance(body, dict):
|
||||
err = body.get("error")
|
||||
if isinstance(err, dict) and err.get("message"):
|
||||
error_text = err["message"]
|
||||
elif body.get("message"):
|
||||
error_text = body["message"]
|
||||
|
||||
if _is_region_blocked_error(e):
|
||||
error_code = "MODEL_NOT_AVAILABLE_REGION"
|
||||
error_text = (
|
||||
@@ -1282,9 +1298,13 @@ async def _stream_chat_chunks(
|
||||
|
||||
# If we exit the retry loop without returning, it means we exhausted retries
|
||||
if last_error:
|
||||
logger.error(
|
||||
f"Max retries ({MAX_RETRIES}) exceeded. Last error: {last_error!s}",
|
||||
exc_info=True,
|
||||
_log_api_error(
|
||||
error=last_error,
|
||||
context=f"stream (max retries {MAX_RETRIES} exceeded)",
|
||||
session_id=session.session_id if session else None,
|
||||
message_count=len(messages) if messages else None,
|
||||
model=model,
|
||||
retry_count=MAX_RETRIES,
|
||||
)
|
||||
yield StreamError(errorText=f"Max retries exceeded: {last_error!s}")
|
||||
yield StreamFinish()
|
||||
@@ -1857,6 +1877,7 @@ async def _generate_llm_continuation(
|
||||
break # Success, exit retry loop
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
|
||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||
retry_count += 1
|
||||
delay = min(
|
||||
@@ -1870,17 +1891,25 @@ async def _generate_llm_continuation(
|
||||
await asyncio.sleep(delay)
|
||||
continue
|
||||
else:
|
||||
# Non-retryable error - log and exit gracefully
|
||||
logger.error(
|
||||
f"Non-retryable error in LLM continuation: {e!s}",
|
||||
exc_info=True,
|
||||
# Non-retryable error - log details and exit gracefully
|
||||
_log_api_error(
|
||||
error=e,
|
||||
context="LLM continuation (not retrying)",
|
||||
session_id=session_id,
|
||||
message_count=len(messages) if messages else None,
|
||||
model=config.model,
|
||||
retry_count=retry_count,
|
||||
)
|
||||
return
|
||||
|
||||
if last_error:
|
||||
logger.error(
|
||||
f"Max retries ({MAX_RETRIES}) exceeded for LLM continuation. "
|
||||
f"Last error: {last_error!s}"
|
||||
_log_api_error(
|
||||
error=last_error,
|
||||
context=f"LLM continuation (max retries {MAX_RETRIES} exceeded)",
|
||||
session_id=session_id,
|
||||
message_count=len(messages) if messages else None,
|
||||
model=config.model,
|
||||
retry_count=MAX_RETRIES,
|
||||
)
|
||||
return
|
||||
|
||||
@@ -1920,6 +1949,91 @@ async def _generate_llm_continuation(
|
||||
logger.error(f"Failed to generate LLM continuation: {e}", exc_info=True)
|
||||
|
||||
|
||||
def _log_api_error(
|
||||
error: Exception,
|
||||
context: str,
|
||||
session_id: str | None = None,
|
||||
message_count: int | None = None,
|
||||
model: str | None = None,
|
||||
retry_count: int = 0,
|
||||
) -> None:
|
||||
"""Log detailed API error information for debugging."""
|
||||
details = _extract_api_error_details(error)
|
||||
details["context"] = context
|
||||
details["session_id"] = session_id
|
||||
details["message_count"] = message_count
|
||||
details["model"] = model
|
||||
details["retry_count"] = retry_count
|
||||
|
||||
if isinstance(error, RateLimitError):
|
||||
logger.warning(f"Rate limit error in {context}: {details}", exc_info=error)
|
||||
elif isinstance(error, APIConnectionError):
|
||||
logger.warning(f"API connection error in {context}: {details}", exc_info=error)
|
||||
elif isinstance(error, APIStatusError) and error.status_code >= 500:
|
||||
logger.error(f"API server error (5xx) in {context}: {details}", exc_info=error)
|
||||
else:
|
||||
logger.error(f"API error in {context}: {details}", exc_info=error)
|
||||
|
||||
|
||||
def _extract_api_error_details(error: Exception) -> dict[str, Any]:
|
||||
"""Extract detailed information from OpenAI/OpenRouter API errors."""
|
||||
error_msg = str(error)
|
||||
details: dict[str, Any] = {
|
||||
"error_type": type(error).__name__,
|
||||
"error_message": error_msg[:500] + "..." if len(error_msg) > 500 else error_msg,
|
||||
}
|
||||
|
||||
if hasattr(error, "code"):
|
||||
details["code"] = getattr(error, "code", None)
|
||||
if hasattr(error, "param"):
|
||||
details["param"] = getattr(error, "param", None)
|
||||
|
||||
if isinstance(error, APIStatusError):
|
||||
details["status_code"] = error.status_code
|
||||
details["request_id"] = getattr(error, "request_id", None)
|
||||
|
||||
if hasattr(error, "body") and error.body:
|
||||
details["response_body"] = _sanitize_error_body(error.body)
|
||||
|
||||
if hasattr(error, "response") and error.response:
|
||||
headers = error.response.headers
|
||||
details["openrouter_provider"] = headers.get("x-openrouter-provider")
|
||||
details["openrouter_model"] = headers.get("x-openrouter-model")
|
||||
details["retry_after"] = headers.get("retry-after")
|
||||
details["rate_limit_remaining"] = headers.get("x-ratelimit-remaining")
|
||||
|
||||
return details
|
||||
|
||||
|
||||
def _sanitize_error_body(
|
||||
body: Any, max_length: int = 2000
|
||||
) -> dict[str, Any] | str | None:
|
||||
"""Extract only safe fields from error response body to avoid logging sensitive data."""
|
||||
if not isinstance(body, dict):
|
||||
# Non-dict bodies (e.g., HTML error pages) - return truncated string
|
||||
if body is not None:
|
||||
body_str = str(body)
|
||||
if len(body_str) > max_length:
|
||||
return body_str[:max_length] + "...[truncated]"
|
||||
return body_str
|
||||
return None
|
||||
|
||||
safe_fields = ("message", "type", "code", "param", "error")
|
||||
sanitized: dict[str, Any] = {}
|
||||
|
||||
for field in safe_fields:
|
||||
if field in body:
|
||||
value = body[field]
|
||||
if field == "error" and isinstance(value, dict):
|
||||
sanitized[field] = _sanitize_error_body(value, max_length)
|
||||
elif isinstance(value, str) and len(value) > max_length:
|
||||
sanitized[field] = value[:max_length] + "...[truncated]"
|
||||
else:
|
||||
sanitized[field] = value
|
||||
|
||||
return sanitized if sanitized else None
|
||||
|
||||
|
||||
async def _generate_llm_continuation_with_streaming(
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
|
||||
@@ -12,6 +12,7 @@ from .base import BaseTool
|
||||
from .create_agent import CreateAgentTool
|
||||
from .customize_agent import CustomizeAgentTool
|
||||
from .edit_agent import EditAgentTool
|
||||
from .feature_requests import CreateFeatureRequestTool, SearchFeatureRequestsTool
|
||||
from .find_agent import FindAgentTool
|
||||
from .find_block import FindBlockTool
|
||||
from .find_library_agent import FindLibraryAgentTool
|
||||
@@ -45,6 +46,9 @@ TOOL_REGISTRY: dict[str, BaseTool] = {
|
||||
"view_agent_output": AgentOutputTool(),
|
||||
"search_docs": SearchDocsTool(),
|
||||
"get_doc_page": GetDocPageTool(),
|
||||
# Feature request tools
|
||||
"search_feature_requests": SearchFeatureRequestsTool(),
|
||||
"create_feature_request": CreateFeatureRequestTool(),
|
||||
# Workspace tools for CoPilot file operations
|
||||
"list_workspace_files": ListWorkspaceFilesTool(),
|
||||
"read_workspace_file": ReadWorkspaceFileTool(),
|
||||
|
||||
@@ -0,0 +1,448 @@
|
||||
"""Feature request tools - search and create feature requests via Linear."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
from backend.api.features.chat.tools.models import (
|
||||
ErrorResponse,
|
||||
FeatureRequestCreatedResponse,
|
||||
FeatureRequestInfo,
|
||||
FeatureRequestSearchResponse,
|
||||
NoResultsResponse,
|
||||
ToolResponseBase,
|
||||
)
|
||||
from backend.blocks.linear._api import LinearClient
|
||||
from backend.data.model import APIKeyCredentials
|
||||
from backend.data.user import get_user_email_by_id
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_SEARCH_RESULTS = 10
|
||||
|
||||
# GraphQL queries/mutations
|
||||
SEARCH_ISSUES_QUERY = """
|
||||
query SearchFeatureRequests($term: String!, $filter: IssueFilter, $first: Int) {
|
||||
searchIssues(term: $term, filter: $filter, first: $first) {
|
||||
nodes {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
description
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
CUSTOMER_UPSERT_MUTATION = """
|
||||
mutation CustomerUpsert($input: CustomerUpsertInput!) {
|
||||
customerUpsert(input: $input) {
|
||||
success
|
||||
customer {
|
||||
id
|
||||
name
|
||||
externalIds
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
ISSUE_CREATE_MUTATION = """
|
||||
mutation IssueCreate($input: IssueCreateInput!) {
|
||||
issueCreate(input: $input) {
|
||||
success
|
||||
issue {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
url
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
CUSTOMER_NEED_CREATE_MUTATION = """
|
||||
mutation CustomerNeedCreate($input: CustomerNeedCreateInput!) {
|
||||
customerNeedCreate(input: $input) {
|
||||
success
|
||||
need {
|
||||
id
|
||||
body
|
||||
customer {
|
||||
id
|
||||
name
|
||||
}
|
||||
issue {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
_settings: Settings | None = None
|
||||
|
||||
|
||||
def _get_settings() -> Settings:
|
||||
global _settings
|
||||
if _settings is None:
|
||||
_settings = Settings()
|
||||
return _settings
|
||||
|
||||
|
||||
def _get_linear_config() -> tuple[LinearClient, str, str]:
|
||||
"""Return a configured Linear client, project ID, and team ID.
|
||||
|
||||
Raises RuntimeError if any required setting is missing.
|
||||
"""
|
||||
secrets = _get_settings().secrets
|
||||
if not secrets.linear_api_key:
|
||||
raise RuntimeError("LINEAR_API_KEY is not configured")
|
||||
if not secrets.linear_feature_request_project_id:
|
||||
raise RuntimeError("LINEAR_FEATURE_REQUEST_PROJECT_ID is not configured")
|
||||
if not secrets.linear_feature_request_team_id:
|
||||
raise RuntimeError("LINEAR_FEATURE_REQUEST_TEAM_ID is not configured")
|
||||
|
||||
credentials = APIKeyCredentials(
|
||||
id="system-linear",
|
||||
provider="linear",
|
||||
api_key=SecretStr(secrets.linear_api_key),
|
||||
title="System Linear API Key",
|
||||
)
|
||||
client = LinearClient(credentials=credentials)
|
||||
return (
|
||||
client,
|
||||
secrets.linear_feature_request_project_id,
|
||||
secrets.linear_feature_request_team_id,
|
||||
)
|
||||
|
||||
|
||||
class SearchFeatureRequestsTool(BaseTool):
|
||||
"""Tool for searching existing feature requests in Linear."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "search_feature_requests"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search existing feature requests to check if a similar request "
|
||||
"already exists before creating a new one. Returns matching feature "
|
||||
"requests with their ID, title, and description."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search term to find matching feature requests.",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query.",
|
||||
error="Missing query parameter",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
try:
|
||||
client, project_id, _team_id = _get_linear_config()
|
||||
data = await client.query(
|
||||
SEARCH_ISSUES_QUERY,
|
||||
{
|
||||
"term": query,
|
||||
"filter": {
|
||||
"project": {"id": {"eq": project_id}},
|
||||
},
|
||||
"first": MAX_SEARCH_RESULTS,
|
||||
},
|
||||
)
|
||||
|
||||
nodes = data.get("searchIssues", {}).get("nodes", [])
|
||||
|
||||
if not nodes:
|
||||
return NoResultsResponse(
|
||||
message=f"No feature requests found matching '{query}'.",
|
||||
suggestions=[
|
||||
"Try different keywords",
|
||||
"Use broader search terms",
|
||||
"You can create a new feature request if none exists",
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
results = [
|
||||
FeatureRequestInfo(
|
||||
id=node["id"],
|
||||
identifier=node["identifier"],
|
||||
title=node["title"],
|
||||
description=node.get("description"),
|
||||
)
|
||||
for node in nodes
|
||||
]
|
||||
|
||||
return FeatureRequestSearchResponse(
|
||||
message=f"Found {len(results)} feature request(s) matching '{query}'.",
|
||||
results=results,
|
||||
count=len(results),
|
||||
query=query,
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to search feature requests")
|
||||
return ErrorResponse(
|
||||
message="Failed to search feature requests.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
class CreateFeatureRequestTool(BaseTool):
|
||||
"""Tool for creating feature requests (or adding needs to existing ones)."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "create_feature_request"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Create a new feature request or add a customer need to an existing one. "
|
||||
"Always search first with search_feature_requests to avoid duplicates. "
|
||||
"If a matching request exists, pass its ID as existing_issue_id to add "
|
||||
"the user's need to it instead of creating a duplicate."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Title for the feature request.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Detailed description of what the user wants and why.",
|
||||
},
|
||||
"existing_issue_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"If adding a need to an existing feature request, "
|
||||
"provide its Linear issue ID (from search results). "
|
||||
"Omit to create a new feature request."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["title", "description"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _find_or_create_customer(
|
||||
self, client: LinearClient, user_id: str, name: str
|
||||
) -> dict:
|
||||
"""Find existing customer by user_id or create a new one via upsert.
|
||||
|
||||
Args:
|
||||
client: Linear API client.
|
||||
user_id: Stable external ID used to deduplicate customers.
|
||||
name: Human-readable display name (e.g. the user's email).
|
||||
"""
|
||||
data = await client.mutate(
|
||||
CUSTOMER_UPSERT_MUTATION,
|
||||
{
|
||||
"input": {
|
||||
"name": name,
|
||||
"externalId": user_id,
|
||||
},
|
||||
},
|
||||
)
|
||||
result = data.get("customerUpsert", {})
|
||||
if not result.get("success"):
|
||||
raise RuntimeError(f"Failed to upsert customer: {data}")
|
||||
return result["customer"]
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
title = kwargs.get("title", "").strip()
|
||||
description = kwargs.get("description", "").strip()
|
||||
existing_issue_id = kwargs.get("existing_issue_id")
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not title or not description:
|
||||
return ErrorResponse(
|
||||
message="Both title and description are required.",
|
||||
error="Missing required parameters",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="Authentication required to create feature requests.",
|
||||
error="Missing user_id",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
try:
|
||||
client, project_id, team_id = _get_linear_config()
|
||||
except Exception as e:
|
||||
logger.exception("Failed to initialize Linear client")
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Resolve a human-readable name (email) for the Linear customer record.
|
||||
# Fall back to user_id if the lookup fails or returns None.
|
||||
try:
|
||||
customer_display_name = await get_user_email_by_id(user_id) or user_id
|
||||
except Exception:
|
||||
customer_display_name = user_id
|
||||
|
||||
# Step 1: Find or create customer for this user
|
||||
try:
|
||||
customer = await self._find_or_create_customer(
|
||||
client, user_id, customer_display_name
|
||||
)
|
||||
customer_id = customer["id"]
|
||||
customer_name = customer["name"]
|
||||
except Exception as e:
|
||||
logger.exception("Failed to upsert customer in Linear")
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Step 2: Create or reuse issue
|
||||
issue_id: str | None = None
|
||||
issue_identifier: str | None = None
|
||||
if existing_issue_id:
|
||||
# Add need to existing issue - we still need the issue details for response
|
||||
is_new_issue = False
|
||||
issue_id = existing_issue_id
|
||||
else:
|
||||
# Create new issue in the feature requests project
|
||||
try:
|
||||
data = await client.mutate(
|
||||
ISSUE_CREATE_MUTATION,
|
||||
{
|
||||
"input": {
|
||||
"title": title,
|
||||
"description": description,
|
||||
"teamId": team_id,
|
||||
"projectId": project_id,
|
||||
},
|
||||
},
|
||||
)
|
||||
result = data.get("issueCreate", {})
|
||||
if not result.get("success"):
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request issue.",
|
||||
error=str(data),
|
||||
session_id=session_id,
|
||||
)
|
||||
issue = result["issue"]
|
||||
issue_id = issue["id"]
|
||||
issue_identifier = issue.get("identifier")
|
||||
except Exception as e:
|
||||
logger.exception("Failed to create feature request issue")
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
is_new_issue = True
|
||||
|
||||
# Step 3: Create customer need on the issue
|
||||
try:
|
||||
data = await client.mutate(
|
||||
CUSTOMER_NEED_CREATE_MUTATION,
|
||||
{
|
||||
"input": {
|
||||
"customerId": customer_id,
|
||||
"issueId": issue_id,
|
||||
"body": description,
|
||||
"priority": 0,
|
||||
},
|
||||
},
|
||||
)
|
||||
need_result = data.get("customerNeedCreate", {})
|
||||
if not need_result.get("success"):
|
||||
orphaned = (
|
||||
{"issue_id": issue_id, "issue_identifier": issue_identifier}
|
||||
if is_new_issue
|
||||
else None
|
||||
)
|
||||
return ErrorResponse(
|
||||
message="Failed to attach customer need to the feature request.",
|
||||
error=str(data),
|
||||
details=orphaned,
|
||||
session_id=session_id,
|
||||
)
|
||||
need = need_result["need"]
|
||||
issue_info = need["issue"]
|
||||
except Exception as e:
|
||||
logger.exception("Failed to create customer need")
|
||||
orphaned = (
|
||||
{"issue_id": issue_id, "issue_identifier": issue_identifier}
|
||||
if is_new_issue
|
||||
else None
|
||||
)
|
||||
return ErrorResponse(
|
||||
message="Failed to attach customer need to the feature request.",
|
||||
error=str(e),
|
||||
details=orphaned,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
return FeatureRequestCreatedResponse(
|
||||
message=(
|
||||
f"{'Created new feature request' if is_new_issue else 'Added your request to existing feature request'}: "
|
||||
f"{issue_info['title']}."
|
||||
),
|
||||
issue_id=issue_info["id"],
|
||||
issue_identifier=issue_info["identifier"],
|
||||
issue_title=issue_info["title"],
|
||||
issue_url=issue_info.get("url", ""),
|
||||
is_new_issue=is_new_issue,
|
||||
customer_name=customer_name,
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -0,0 +1,615 @@
|
||||
"""Tests for SearchFeatureRequestsTool and CreateFeatureRequestTool."""
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.feature_requests import (
|
||||
CreateFeatureRequestTool,
|
||||
SearchFeatureRequestsTool,
|
||||
)
|
||||
from backend.api.features.chat.tools.models import (
|
||||
ErrorResponse,
|
||||
FeatureRequestCreatedResponse,
|
||||
FeatureRequestSearchResponse,
|
||||
NoResultsResponse,
|
||||
)
|
||||
|
||||
from ._test_data import make_session
|
||||
|
||||
_TEST_USER_ID = "test-user-feature-requests"
|
||||
_TEST_USER_EMAIL = "testuser@example.com"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
_FAKE_PROJECT_ID = "test-project-id"
|
||||
_FAKE_TEAM_ID = "test-team-id"
|
||||
|
||||
|
||||
def _mock_linear_config(*, query_return=None, mutate_return=None):
|
||||
"""Return a patched _get_linear_config that yields a mock LinearClient."""
|
||||
client = AsyncMock()
|
||||
if query_return is not None:
|
||||
client.query.return_value = query_return
|
||||
if mutate_return is not None:
|
||||
client.mutate.return_value = mutate_return
|
||||
return (
|
||||
patch(
|
||||
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||
return_value=(client, _FAKE_PROJECT_ID, _FAKE_TEAM_ID),
|
||||
),
|
||||
client,
|
||||
)
|
||||
|
||||
|
||||
def _search_response(nodes: list[dict]) -> dict:
|
||||
return {"searchIssues": {"nodes": nodes}}
|
||||
|
||||
|
||||
def _customer_upsert_response(
|
||||
customer_id: str = "cust-1", name: str = _TEST_USER_EMAIL, success: bool = True
|
||||
) -> dict:
|
||||
return {
|
||||
"customerUpsert": {
|
||||
"success": success,
|
||||
"customer": {"id": customer_id, "name": name, "externalIds": [name]},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _issue_create_response(
|
||||
issue_id: str = "issue-1",
|
||||
identifier: str = "FR-1",
|
||||
title: str = "New Feature",
|
||||
success: bool = True,
|
||||
) -> dict:
|
||||
return {
|
||||
"issueCreate": {
|
||||
"success": success,
|
||||
"issue": {
|
||||
"id": issue_id,
|
||||
"identifier": identifier,
|
||||
"title": title,
|
||||
"url": f"https://linear.app/issue/{identifier}",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _need_create_response(
|
||||
need_id: str = "need-1",
|
||||
issue_id: str = "issue-1",
|
||||
identifier: str = "FR-1",
|
||||
title: str = "New Feature",
|
||||
success: bool = True,
|
||||
) -> dict:
|
||||
return {
|
||||
"customerNeedCreate": {
|
||||
"success": success,
|
||||
"need": {
|
||||
"id": need_id,
|
||||
"body": "description",
|
||||
"customer": {"id": "cust-1", "name": _TEST_USER_EMAIL},
|
||||
"issue": {
|
||||
"id": issue_id,
|
||||
"identifier": identifier,
|
||||
"title": title,
|
||||
"url": f"https://linear.app/issue/{identifier}",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# SearchFeatureRequestsTool
|
||||
# ===========================================================================
|
||||
|
||||
|
||||
class TestSearchFeatureRequestsTool:
|
||||
"""Tests for SearchFeatureRequestsTool._execute."""
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_successful_search(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
nodes = [
|
||||
{
|
||||
"id": "id-1",
|
||||
"identifier": "FR-1",
|
||||
"title": "Dark mode",
|
||||
"description": "Add dark mode support",
|
||||
},
|
||||
{
|
||||
"id": "id-2",
|
||||
"identifier": "FR-2",
|
||||
"title": "Dark theme",
|
||||
"description": None,
|
||||
},
|
||||
]
|
||||
patcher, _ = _mock_linear_config(query_return=_search_response(nodes))
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="dark mode"
|
||||
)
|
||||
|
||||
assert isinstance(resp, FeatureRequestSearchResponse)
|
||||
assert resp.count == 2
|
||||
assert resp.results[0].id == "id-1"
|
||||
assert resp.results[1].identifier == "FR-2"
|
||||
assert resp.query == "dark mode"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_no_results(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, _ = _mock_linear_config(query_return=_search_response([]))
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="nonexistent"
|
||||
)
|
||||
|
||||
assert isinstance(resp, NoResultsResponse)
|
||||
assert "nonexistent" in resp.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_empty_query_returns_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(user_id=_TEST_USER_ID, session=session, query=" ")
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "query" in resp.error.lower()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_query_returns_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(user_id=_TEST_USER_ID, session=session)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_api_failure(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.query.side_effect = RuntimeError("Linear API down")
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Linear API down" in resp.error
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_malformed_node_returns_error(self):
|
||||
"""A node missing required keys should be caught by the try/except."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
# Node missing 'identifier' key
|
||||
bad_nodes = [{"id": "id-1", "title": "Missing identifier"}]
|
||||
patcher, _ = _mock_linear_config(query_return=_search_response(bad_nodes))
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_linear_client_init_failure(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||
side_effect=RuntimeError("No API key"),
|
||||
):
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "No API key" in resp.error
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# CreateFeatureRequestTool
|
||||
# ===========================================================================
|
||||
|
||||
|
||||
class TestCreateFeatureRequestTool:
|
||||
"""Tests for CreateFeatureRequestTool._execute."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _patch_email_lookup(self):
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.feature_requests.get_user_email_by_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=_TEST_USER_EMAIL,
|
||||
):
|
||||
yield
|
||||
|
||||
# ---- Happy paths -------------------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_create_new_issue(self):
|
||||
"""Full happy path: upsert customer -> create issue -> attach need."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(),
|
||||
_need_create_response(),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="New Feature",
|
||||
description="Please add this",
|
||||
)
|
||||
|
||||
assert isinstance(resp, FeatureRequestCreatedResponse)
|
||||
assert resp.is_new_issue is True
|
||||
assert resp.issue_identifier == "FR-1"
|
||||
assert resp.customer_name == _TEST_USER_EMAIL
|
||||
assert client.mutate.call_count == 3
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_add_need_to_existing_issue(self):
|
||||
"""When existing_issue_id is provided, skip issue creation."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_need_create_response(issue_id="existing-1", identifier="FR-99"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Existing Feature",
|
||||
description="Me too",
|
||||
existing_issue_id="existing-1",
|
||||
)
|
||||
|
||||
assert isinstance(resp, FeatureRequestCreatedResponse)
|
||||
assert resp.is_new_issue is False
|
||||
assert resp.issue_id == "existing-1"
|
||||
# Only 2 mutations: customer upsert + need create (no issue create)
|
||||
assert client.mutate.call_count == 2
|
||||
|
||||
# ---- Validation errors -------------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_title(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="",
|
||||
description="some desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "required" in resp.error.lower()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_description(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Some title",
|
||||
description="",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "required" in resp.error.lower()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_user_id(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=None,
|
||||
session=session,
|
||||
title="Some title",
|
||||
description="Some desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "user_id" in resp.error.lower()
|
||||
|
||||
# ---- Linear client init failure ----------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_linear_client_init_failure(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||
side_effect=RuntimeError("No API key"),
|
||||
):
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "No API key" in resp.error
|
||||
|
||||
# ---- Customer upsert failures ------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_customer_upsert_api_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = RuntimeError("Customer API error")
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Customer API error" in resp.error
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_customer_upsert_not_success(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.return_value = _customer_upsert_response(success=False)
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_customer_malformed_response(self):
|
||||
"""Customer dict missing 'id' key should be caught."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
# success=True but customer has no 'id'
|
||||
client.mutate.return_value = {
|
||||
"customerUpsert": {
|
||||
"success": True,
|
||||
"customer": {"name": _TEST_USER_ID},
|
||||
}
|
||||
}
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
# ---- Issue creation failures -------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_issue_create_api_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
RuntimeError("Issue create failed"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Issue create failed" in resp.error
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_issue_create_not_success(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(success=False),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert "Failed to create feature request issue" in resp.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_issue_create_malformed_response(self):
|
||||
"""issueCreate success=True but missing 'issue' key."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
{"issueCreate": {"success": True}}, # no 'issue' key
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
# ---- Customer need attachment failures ---------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_api_error_new_issue(self):
|
||||
"""Need creation fails after new issue was created -> orphaned issue info."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(issue_id="orphan-1", identifier="FR-10"),
|
||||
RuntimeError("Need attach failed"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Need attach failed" in resp.error
|
||||
assert resp.details is not None
|
||||
assert resp.details["issue_id"] == "orphan-1"
|
||||
assert resp.details["issue_identifier"] == "FR-10"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_api_error_existing_issue(self):
|
||||
"""Need creation fails on existing issue -> no orphaned info."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
RuntimeError("Need attach failed"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
existing_issue_id="existing-1",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is None
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_not_success_includes_orphaned_info(self):
|
||||
"""customerNeedCreate returns success=False -> includes orphaned issue."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(issue_id="orphan-2", identifier="FR-20"),
|
||||
_need_create_response(success=False),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is not None
|
||||
assert resp.details["issue_id"] == "orphan-2"
|
||||
assert resp.details["issue_identifier"] == "FR-20"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_not_success_existing_issue_no_details(self):
|
||||
"""customerNeedCreate fails on existing issue -> no orphaned info."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_need_create_response(success=False),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
existing_issue_id="existing-1",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is None
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_malformed_response(self):
|
||||
"""need_result missing 'need' key after success=True."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(),
|
||||
{"customerNeedCreate": {"success": True}}, # no 'need' key
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is not None
|
||||
assert resp.details["issue_id"] == "issue-1"
|
||||
@@ -41,6 +41,9 @@ class ResponseType(str, Enum):
|
||||
OPERATION_IN_PROGRESS = "operation_in_progress"
|
||||
# Input validation
|
||||
INPUT_VALIDATION_ERROR = "input_validation_error"
|
||||
# Feature request types
|
||||
FEATURE_REQUEST_SEARCH = "feature_request_search"
|
||||
FEATURE_REQUEST_CREATED = "feature_request_created"
|
||||
|
||||
|
||||
# Base response model
|
||||
@@ -430,3 +433,34 @@ class AsyncProcessingResponse(ToolResponseBase):
|
||||
status: str = "accepted" # Must be "accepted" for detection
|
||||
operation_id: str | None = None
|
||||
task_id: str | None = None
|
||||
|
||||
|
||||
# Feature request models
|
||||
class FeatureRequestInfo(BaseModel):
|
||||
"""Information about a feature request issue."""
|
||||
|
||||
id: str
|
||||
identifier: str
|
||||
title: str
|
||||
description: str | None = None
|
||||
|
||||
|
||||
class FeatureRequestSearchResponse(ToolResponseBase):
|
||||
"""Response for search_feature_requests tool."""
|
||||
|
||||
type: ResponseType = ResponseType.FEATURE_REQUEST_SEARCH
|
||||
results: list[FeatureRequestInfo]
|
||||
count: int
|
||||
query: str
|
||||
|
||||
|
||||
class FeatureRequestCreatedResponse(ToolResponseBase):
|
||||
"""Response for create_feature_request tool."""
|
||||
|
||||
type: ResponseType = ResponseType.FEATURE_REQUEST_CREATED
|
||||
issue_id: str
|
||||
issue_identifier: str
|
||||
issue_title: str
|
||||
issue_url: str
|
||||
is_new_issue: bool # False if added to existing
|
||||
customer_name: str
|
||||
|
||||
@@ -662,6 +662,17 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
||||
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
||||
|
||||
linear_api_key: str = Field(
|
||||
default="", description="Linear API key for system-level operations"
|
||||
)
|
||||
linear_feature_request_project_id: str = Field(
|
||||
default="",
|
||||
description="Linear project ID where feature requests are tracked",
|
||||
)
|
||||
linear_feature_request_team_id: str = Field(
|
||||
default="",
|
||||
description="Linear team ID used when creating feature request issues",
|
||||
)
|
||||
linear_client_id: str = Field(default="", description="Linear client ID")
|
||||
linear_client_secret: str = Field(default="", description="Linear client secret")
|
||||
|
||||
|
||||
@@ -15,6 +15,10 @@ import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
||||
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
||||
import {
|
||||
CreateFeatureRequestTool,
|
||||
SearchFeatureRequestsTool,
|
||||
} from "../../tools/FeatureRequests/FeatureRequests";
|
||||
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
|
||||
import { FindBlocksTool } from "../../tools/FindBlocks/FindBlocks";
|
||||
import { RunAgentTool } from "../../tools/RunAgent/RunAgent";
|
||||
@@ -254,6 +258,20 @@ export const ChatMessagesContainer = ({
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-search_feature_requests":
|
||||
return (
|
||||
<SearchFeatureRequestsTool
|
||||
key={`${message.id}-${i}`}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-create_feature_request":
|
||||
return (
|
||||
<CreateFeatureRequestTool
|
||||
key={`${message.id}-${i}`}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -14,6 +14,10 @@ import { Text } from "@/components/atoms/Text/Text";
|
||||
import { CopilotChatActionsProvider } from "../components/CopilotChatActionsProvider/CopilotChatActionsProvider";
|
||||
import { CreateAgentTool } from "../tools/CreateAgent/CreateAgent";
|
||||
import { EditAgentTool } from "../tools/EditAgent/EditAgent";
|
||||
import {
|
||||
CreateFeatureRequestTool,
|
||||
SearchFeatureRequestsTool,
|
||||
} from "../tools/FeatureRequests/FeatureRequests";
|
||||
import { FindAgentsTool } from "../tools/FindAgents/FindAgents";
|
||||
import { FindBlocksTool } from "../tools/FindBlocks/FindBlocks";
|
||||
import { RunAgentTool } from "../tools/RunAgent/RunAgent";
|
||||
@@ -45,6 +49,8 @@ const SECTIONS = [
|
||||
"Tool: Create Agent",
|
||||
"Tool: Edit Agent",
|
||||
"Tool: View Agent Output",
|
||||
"Tool: Search Feature Requests",
|
||||
"Tool: Create Feature Request",
|
||||
"Full Conversation Example",
|
||||
] as const;
|
||||
|
||||
@@ -1421,6 +1427,235 @@ export default function StyleguidePage() {
|
||||
</SubSection>
|
||||
</Section>
|
||||
|
||||
{/* ============================================================= */}
|
||||
{/* SEARCH FEATURE REQUESTS */}
|
||||
{/* ============================================================= */}
|
||||
|
||||
<Section title="Tool: Search Feature Requests">
|
||||
<SubSection label="Input streaming">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "input-streaming",
|
||||
input: { query: "dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Input available">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "input-available",
|
||||
input: { query: "dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (with results)">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: { query: "dark mode" },
|
||||
output: {
|
||||
type: "feature_request_search",
|
||||
message:
|
||||
'Found 2 feature request(s) matching "dark mode".',
|
||||
query: "dark mode",
|
||||
count: 2,
|
||||
results: [
|
||||
{
|
||||
id: "fr-001",
|
||||
identifier: "INT-42",
|
||||
title: "Add dark mode to the platform",
|
||||
description:
|
||||
"Users have requested a dark mode option for the builder and copilot interfaces to reduce eye strain during long sessions.",
|
||||
},
|
||||
{
|
||||
id: "fr-002",
|
||||
identifier: "INT-87",
|
||||
title: "Dark theme for agent output viewer",
|
||||
description:
|
||||
"Specifically requesting dark theme support for the agent output/execution viewer panel.",
|
||||
},
|
||||
],
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (no results)">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: { query: "teleportation" },
|
||||
output: {
|
||||
type: "no_results",
|
||||
message:
|
||||
"No feature requests found matching 'teleportation'.",
|
||||
suggestions: [
|
||||
"Try different keywords",
|
||||
"Use broader search terms",
|
||||
"You can create a new feature request if none exists",
|
||||
],
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (error)">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: { query: "dark mode" },
|
||||
output: {
|
||||
type: "error",
|
||||
message: "Failed to search feature requests.",
|
||||
error: "LINEAR_API_KEY environment variable is not set",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output error">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-error",
|
||||
input: { query: "dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
</Section>
|
||||
|
||||
{/* ============================================================= */}
|
||||
{/* CREATE FEATURE REQUEST */}
|
||||
{/* ============================================================= */}
|
||||
|
||||
<Section title="Tool: Create Feature Request">
|
||||
<SubSection label="Input streaming">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "input-streaming",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode for the platform.",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Input available">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "input-available",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode for the platform.",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (new issue created)">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode for the platform.",
|
||||
},
|
||||
output: {
|
||||
type: "feature_request_created",
|
||||
message:
|
||||
"Created new feature request [INT-105] Add dark mode.",
|
||||
issue_id: "issue-new-123",
|
||||
issue_identifier: "INT-105",
|
||||
issue_title: "Add dark mode",
|
||||
issue_url:
|
||||
"https://linear.app/autogpt/issue/INT-105/add-dark-mode",
|
||||
is_new_issue: true,
|
||||
customer_name: "user-abc-123",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (added to existing issue)">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: {
|
||||
title: "Dark mode support",
|
||||
description:
|
||||
"Please add dark mode, it would help with long sessions.",
|
||||
existing_issue_id: "fr-001",
|
||||
},
|
||||
output: {
|
||||
type: "feature_request_created",
|
||||
message:
|
||||
"Added your request to existing feature request [INT-42] Add dark mode to the platform.",
|
||||
issue_id: "fr-001",
|
||||
issue_identifier: "INT-42",
|
||||
issue_title: "Add dark mode to the platform",
|
||||
issue_url:
|
||||
"https://linear.app/autogpt/issue/INT-42/add-dark-mode-to-the-platform",
|
||||
is_new_issue: false,
|
||||
customer_name: "user-xyz-789",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (error)">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode.",
|
||||
},
|
||||
output: {
|
||||
type: "error",
|
||||
message:
|
||||
"Failed to attach customer need to the feature request.",
|
||||
error: "Linear API request failed (500): Internal error",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output error">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-error",
|
||||
input: { title: "Add dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
</Section>
|
||||
|
||||
{/* ============================================================= */}
|
||||
{/* FULL CONVERSATION EXAMPLE */}
|
||||
{/* ============================================================= */}
|
||||
|
||||
@@ -4,7 +4,6 @@ import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import {
|
||||
BookOpenIcon,
|
||||
CheckFatIcon,
|
||||
PencilSimpleIcon,
|
||||
WarningDiamondIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
@@ -24,6 +23,7 @@ import {
|
||||
ClarificationQuestionsCard,
|
||||
ClarifyingQuestion,
|
||||
} from "./components/ClarificationQuestionsCard";
|
||||
import sparklesImg from "./components/MiniGame/assets/sparkles.png";
|
||||
import { MiniGame } from "./components/MiniGame/MiniGame";
|
||||
import {
|
||||
AccordionIcon,
|
||||
@@ -83,7 +83,8 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
|
||||
) {
|
||||
return {
|
||||
icon,
|
||||
title: "Creating agent, this may take a few minutes. Sit back and relax.",
|
||||
title:
|
||||
"Creating agent, this may take a few minutes. Play while you wait.",
|
||||
expanded: true,
|
||||
};
|
||||
}
|
||||
@@ -167,16 +168,22 @@ export function CreateAgentTool({ part }: Props) {
|
||||
{isAgentSavedOutput(output) && (
|
||||
<div className="rounded-xl border border-border/60 bg-card p-4 shadow-sm">
|
||||
<div className="flex items-baseline gap-2">
|
||||
<CheckFatIcon
|
||||
size={18}
|
||||
weight="regular"
|
||||
className="relative top-1 text-green-500"
|
||||
<img
|
||||
src={sparklesImg.src}
|
||||
alt="sparkles"
|
||||
width={24}
|
||||
height={24}
|
||||
className="relative top-1"
|
||||
/>
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="text-blacks mb-2 text-[16px]"
|
||||
>
|
||||
{output.message}
|
||||
Agent{" "}
|
||||
<span className="text-[rgb(124,58,237)]">
|
||||
{output.agent_name}
|
||||
</span>{" "}
|
||||
has been saved to your library!
|
||||
</Text>
|
||||
</div>
|
||||
<div className="mt-3 flex flex-wrap gap-4">
|
||||
|
||||
@@ -2,20 +2,78 @@
|
||||
|
||||
import { useMiniGame } from "./useMiniGame";
|
||||
|
||||
function Key({ children }: { children: React.ReactNode }) {
|
||||
return <strong>[{children}]</strong>;
|
||||
}
|
||||
|
||||
export function MiniGame() {
|
||||
const { canvasRef } = useMiniGame();
|
||||
const {
|
||||
canvasRef,
|
||||
activeMode,
|
||||
showOverlay,
|
||||
score,
|
||||
highScore,
|
||||
onContinue,
|
||||
} = useMiniGame();
|
||||
|
||||
const isRunActive =
|
||||
activeMode === "run" || activeMode === "idle" || activeMode === "over";
|
||||
const isBossActive =
|
||||
activeMode === "boss" ||
|
||||
activeMode === "boss-intro" ||
|
||||
activeMode === "boss-defeated";
|
||||
|
||||
let overlayText: string | undefined;
|
||||
let buttonLabel = "Continue";
|
||||
if (activeMode === "idle") {
|
||||
buttonLabel = "Start";
|
||||
} else if (activeMode === "boss-intro") {
|
||||
overlayText = "Face the bandit!";
|
||||
} else if (activeMode === "boss-defeated") {
|
||||
overlayText = "Great job, keep on going";
|
||||
} else if (activeMode === "over") {
|
||||
overlayText = `Score: ${score} / Record: ${highScore}`;
|
||||
buttonLabel = "Retry";
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
className="w-full overflow-hidden rounded-md bg-background text-foreground"
|
||||
style={{ border: "1px solid #d17fff" }}
|
||||
>
|
||||
<canvas
|
||||
ref={canvasRef}
|
||||
tabIndex={0}
|
||||
className="block w-full outline-none"
|
||||
style={{ imageRendering: "pixelated" }}
|
||||
/>
|
||||
<div className="flex flex-col gap-2">
|
||||
<p className="text-sm font-medium text-purple-500">
|
||||
{isBossActive ? (
|
||||
<>
|
||||
Duel mode: <Key>←→</Key> to move · <Key>Z</Key> to attack ·{" "}
|
||||
<Key>X</Key> to block · <Key>Space</Key> to jump
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
Run mode: <Key>Space</Key> to jump
|
||||
</>
|
||||
)}
|
||||
</p>
|
||||
<div
|
||||
className="relative w-full overflow-hidden rounded-md bg-background text-foreground"
|
||||
style={{ border: "1px solid #d17fff" }}
|
||||
>
|
||||
<canvas
|
||||
ref={canvasRef}
|
||||
tabIndex={0}
|
||||
className="block w-full outline-none"
|
||||
/>
|
||||
{showOverlay && (
|
||||
<div className="absolute inset-0 flex flex-col items-center justify-center gap-3 bg-black/40">
|
||||
{overlayText && (
|
||||
<p className="text-lg font-bold text-white">{overlayText}</p>
|
||||
)}
|
||||
<button
|
||||
type="button"
|
||||
onClick={onContinue}
|
||||
className="rounded-md bg-white px-4 py-2 text-sm font-semibold text-zinc-800 shadow-md transition-colors hover:bg-zinc-100"
|
||||
>
|
||||
{buttonLabel}
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
|
After Width: | Height: | Size: 5.2 KiB |
|
After Width: | Height: | Size: 4.9 KiB |
|
After Width: | Height: | Size: 12 KiB |
|
After Width: | Height: | Size: 8.0 KiB |
|
After Width: | Height: | Size: 7.3 KiB |
|
After Width: | Height: | Size: 9.6 KiB |
|
After Width: | Height: | Size: 9.5 KiB |
|
After Width: | Height: | Size: 8.0 KiB |
|
After Width: | Height: | Size: 16 KiB |
|
After Width: | Height: | Size: 14 KiB |
|
After Width: | Height: | Size: 10 KiB |
@@ -136,7 +136,7 @@ export function getAnimationText(part: {
|
||||
if (isOperationPendingOutput(output)) return "Agent creation in progress";
|
||||
if (isOperationInProgressOutput(output))
|
||||
return "Agent creation already in progress";
|
||||
if (isAgentSavedOutput(output)) return `Saved "${output.agent_name}"`;
|
||||
if (isAgentSavedOutput(output)) return `Saved ${output.agent_name}`;
|
||||
if (isAgentPreviewOutput(output)) return `Preview "${output.agent_name}"`;
|
||||
if (isClarificationNeededOutput(output)) return "Needs clarification";
|
||||
return "Error creating agent";
|
||||
|
||||
@@ -5,7 +5,6 @@ import type { ToolUIPart } from "ai";
|
||||
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||
import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
|
||||
import {
|
||||
ContentCardDescription,
|
||||
ContentCodeBlock,
|
||||
@@ -15,7 +14,7 @@ import {
|
||||
ContentMessage,
|
||||
} from "../../components/ToolAccordion/AccordionContent";
|
||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||
import { useAsymptoticProgress } from "../../hooks/useAsymptoticProgress";
|
||||
import { MiniGame } from "../CreateAgent/components/MiniGame/MiniGame";
|
||||
import {
|
||||
ClarificationQuestionsCard,
|
||||
ClarifyingQuestion,
|
||||
@@ -80,7 +79,12 @@ function getAccordionMeta(output: EditAgentToolOutput): {
|
||||
isOperationPendingOutput(output) ||
|
||||
isOperationInProgressOutput(output)
|
||||
) {
|
||||
return { icon: <OrbitLoader size={32} />, title: "Editing agent" };
|
||||
return {
|
||||
icon: <OrbitLoader size={32} />,
|
||||
title:
|
||||
"Editing agent, this may take a few minutes. Play while you wait.",
|
||||
expanded: true,
|
||||
};
|
||||
}
|
||||
return {
|
||||
icon: (
|
||||
@@ -105,7 +109,6 @@ export function EditAgentTool({ part }: Props) {
|
||||
(isOperationStartedOutput(output) ||
|
||||
isOperationPendingOutput(output) ||
|
||||
isOperationInProgressOutput(output));
|
||||
const progress = useAsymptoticProgress(isOperating);
|
||||
const hasExpandableContent =
|
||||
part.state === "output-available" &&
|
||||
!!output &&
|
||||
@@ -149,9 +152,9 @@ export function EditAgentTool({ part }: Props) {
|
||||
<ToolAccordion {...getAccordionMeta(output)}>
|
||||
{isOperating && (
|
||||
<ContentGrid>
|
||||
<ProgressBar value={progress} className="max-w-[280px]" />
|
||||
<MiniGame />
|
||||
<ContentHint>
|
||||
This could take a few minutes, grab a coffee ☕
|
||||
This could take a few minutes — play while you wait!
|
||||
</ContentHint>
|
||||
</ContentGrid>
|
||||
)}
|
||||
|
||||
@@ -0,0 +1,227 @@
|
||||
"use client";
|
||||
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { useMemo } from "react";
|
||||
|
||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||
import {
|
||||
ContentBadge,
|
||||
ContentCard,
|
||||
ContentCardDescription,
|
||||
ContentCardHeader,
|
||||
ContentCardTitle,
|
||||
ContentGrid,
|
||||
ContentMessage,
|
||||
ContentSuggestionsList,
|
||||
} from "../../components/ToolAccordion/AccordionContent";
|
||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||
import {
|
||||
AccordionIcon,
|
||||
getAccordionTitle,
|
||||
getAnimationText,
|
||||
getFeatureRequestOutput,
|
||||
isCreatedOutput,
|
||||
isErrorOutput,
|
||||
isNoResultsOutput,
|
||||
isSearchResultsOutput,
|
||||
ToolIcon,
|
||||
type FeatureRequestToolType,
|
||||
} from "./helpers";
|
||||
|
||||
export interface FeatureRequestToolPart {
|
||||
type: FeatureRequestToolType;
|
||||
toolCallId: string;
|
||||
state: ToolUIPart["state"];
|
||||
input?: unknown;
|
||||
output?: unknown;
|
||||
}
|
||||
|
||||
interface Props {
|
||||
part: FeatureRequestToolPart;
|
||||
}
|
||||
|
||||
function truncate(text: string, maxChars: number): string {
|
||||
const trimmed = text.trim();
|
||||
if (trimmed.length <= maxChars) return trimmed;
|
||||
return `${trimmed.slice(0, maxChars).trimEnd()}…`;
|
||||
}
|
||||
|
||||
export function SearchFeatureRequestsTool({ part }: Props) {
|
||||
const output = getFeatureRequestOutput(part);
|
||||
const text = getAnimationText(part);
|
||||
const isStreaming =
|
||||
part.state === "input-streaming" || part.state === "input-available";
|
||||
const isError =
|
||||
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||
|
||||
const normalized = useMemo(() => {
|
||||
if (!output) return null;
|
||||
return { title: getAccordionTitle(part.type, output) };
|
||||
}, [output, part.type]);
|
||||
|
||||
const isOutputAvailable = part.state === "output-available" && !!output;
|
||||
|
||||
const searchOutput =
|
||||
isOutputAvailable && output && isSearchResultsOutput(output)
|
||||
? output
|
||||
: null;
|
||||
const noResultsOutput =
|
||||
isOutputAvailable && output && isNoResultsOutput(output) ? output : null;
|
||||
const errorOutput =
|
||||
isOutputAvailable && output && isErrorOutput(output) ? output : null;
|
||||
|
||||
const hasExpandableContent =
|
||||
isOutputAvailable &&
|
||||
((!!searchOutput && searchOutput.count > 0) ||
|
||||
!!noResultsOutput ||
|
||||
!!errorOutput);
|
||||
|
||||
const accordionDescription =
|
||||
hasExpandableContent && searchOutput
|
||||
? `Found ${searchOutput.count} result${searchOutput.count === 1 ? "" : "s"} for "${searchOutput.query}"`
|
||||
: hasExpandableContent && (noResultsOutput || errorOutput)
|
||||
? ((noResultsOutput ?? errorOutput)?.message ?? null)
|
||||
: null;
|
||||
|
||||
return (
|
||||
<div className="py-2">
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
<ToolIcon
|
||||
toolType={part.type}
|
||||
isStreaming={isStreaming}
|
||||
isError={isError}
|
||||
/>
|
||||
<MorphingTextAnimation
|
||||
text={text}
|
||||
className={isError ? "text-red-500" : undefined}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{hasExpandableContent && normalized && (
|
||||
<ToolAccordion
|
||||
icon={<AccordionIcon toolType={part.type} />}
|
||||
title={normalized.title}
|
||||
description={accordionDescription}
|
||||
>
|
||||
{searchOutput && (
|
||||
<ContentGrid>
|
||||
{searchOutput.results.map((r) => (
|
||||
<ContentCard key={r.id}>
|
||||
<ContentCardHeader>
|
||||
<ContentCardTitle>{r.title}</ContentCardTitle>
|
||||
</ContentCardHeader>
|
||||
{r.description && (
|
||||
<ContentCardDescription>
|
||||
{truncate(r.description, 200)}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</ContentCard>
|
||||
))}
|
||||
</ContentGrid>
|
||||
)}
|
||||
|
||||
{noResultsOutput && (
|
||||
<div>
|
||||
<ContentMessage>{noResultsOutput.message}</ContentMessage>
|
||||
{noResultsOutput.suggestions &&
|
||||
noResultsOutput.suggestions.length > 0 && (
|
||||
<ContentSuggestionsList items={noResultsOutput.suggestions} />
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{errorOutput && (
|
||||
<div>
|
||||
<ContentMessage>{errorOutput.message}</ContentMessage>
|
||||
{errorOutput.error && (
|
||||
<ContentCardDescription>
|
||||
{errorOutput.error}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</ToolAccordion>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export function CreateFeatureRequestTool({ part }: Props) {
|
||||
const output = getFeatureRequestOutput(part);
|
||||
const text = getAnimationText(part);
|
||||
const isStreaming =
|
||||
part.state === "input-streaming" || part.state === "input-available";
|
||||
const isError =
|
||||
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||
|
||||
const normalized = useMemo(() => {
|
||||
if (!output) return null;
|
||||
return { title: getAccordionTitle(part.type, output) };
|
||||
}, [output, part.type]);
|
||||
|
||||
const isOutputAvailable = part.state === "output-available" && !!output;
|
||||
|
||||
const createdOutput =
|
||||
isOutputAvailable && output && isCreatedOutput(output) ? output : null;
|
||||
const errorOutput =
|
||||
isOutputAvailable && output && isErrorOutput(output) ? output : null;
|
||||
|
||||
const hasExpandableContent =
|
||||
isOutputAvailable && (!!createdOutput || !!errorOutput);
|
||||
|
||||
const accordionDescription =
|
||||
hasExpandableContent && createdOutput
|
||||
? createdOutput.issue_title
|
||||
: hasExpandableContent && errorOutput
|
||||
? errorOutput.message
|
||||
: null;
|
||||
|
||||
return (
|
||||
<div className="py-2">
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
<ToolIcon
|
||||
toolType={part.type}
|
||||
isStreaming={isStreaming}
|
||||
isError={isError}
|
||||
/>
|
||||
<MorphingTextAnimation
|
||||
text={text}
|
||||
className={isError ? "text-red-500" : undefined}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{hasExpandableContent && normalized && (
|
||||
<ToolAccordion
|
||||
icon={<AccordionIcon toolType={part.type} />}
|
||||
title={normalized.title}
|
||||
description={accordionDescription}
|
||||
>
|
||||
{createdOutput && (
|
||||
<ContentCard>
|
||||
<ContentCardHeader>
|
||||
<ContentCardTitle>{createdOutput.issue_title}</ContentCardTitle>
|
||||
</ContentCardHeader>
|
||||
<div className="mt-2 flex items-center gap-2">
|
||||
<ContentBadge>
|
||||
{createdOutput.is_new_issue ? "New" : "Existing"}
|
||||
</ContentBadge>
|
||||
</div>
|
||||
<ContentMessage>{createdOutput.message}</ContentMessage>
|
||||
</ContentCard>
|
||||
)}
|
||||
|
||||
{errorOutput && (
|
||||
<div>
|
||||
<ContentMessage>{errorOutput.message}</ContentMessage>
|
||||
{errorOutput.error && (
|
||||
<ContentCardDescription>
|
||||
{errorOutput.error}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</ToolAccordion>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,271 @@
|
||||
import {
|
||||
CheckCircleIcon,
|
||||
LightbulbIcon,
|
||||
MagnifyingGlassIcon,
|
||||
PlusCircleIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import type { ToolUIPart } from "ai";
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Types (local until API client is regenerated) */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
interface FeatureRequestInfo {
|
||||
id: string;
|
||||
identifier: string;
|
||||
title: string;
|
||||
description?: string | null;
|
||||
}
|
||||
|
||||
export interface FeatureRequestSearchResponse {
|
||||
type: "feature_request_search";
|
||||
message: string;
|
||||
results: FeatureRequestInfo[];
|
||||
count: number;
|
||||
query: string;
|
||||
}
|
||||
|
||||
export interface FeatureRequestCreatedResponse {
|
||||
type: "feature_request_created";
|
||||
message: string;
|
||||
issue_id: string;
|
||||
issue_identifier: string;
|
||||
issue_title: string;
|
||||
issue_url: string;
|
||||
is_new_issue: boolean;
|
||||
customer_name: string;
|
||||
}
|
||||
|
||||
interface NoResultsResponse {
|
||||
type: "no_results";
|
||||
message: string;
|
||||
suggestions?: string[];
|
||||
}
|
||||
|
||||
interface ErrorResponse {
|
||||
type: "error";
|
||||
message: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export type FeatureRequestOutput =
|
||||
| FeatureRequestSearchResponse
|
||||
| FeatureRequestCreatedResponse
|
||||
| NoResultsResponse
|
||||
| ErrorResponse;
|
||||
|
||||
export type FeatureRequestToolType =
|
||||
| "tool-search_feature_requests"
|
||||
| "tool-create_feature_request"
|
||||
| string;
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Output parsing */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
function parseOutput(output: unknown): FeatureRequestOutput | null {
|
||||
if (!output) return null;
|
||||
if (typeof output === "string") {
|
||||
const trimmed = output.trim();
|
||||
if (!trimmed) return null;
|
||||
try {
|
||||
return parseOutput(JSON.parse(trimmed) as unknown);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
if (typeof output === "object") {
|
||||
const type = (output as { type?: unknown }).type;
|
||||
if (
|
||||
type === "feature_request_search" ||
|
||||
type === "feature_request_created" ||
|
||||
type === "no_results" ||
|
||||
type === "error"
|
||||
) {
|
||||
return output as FeatureRequestOutput;
|
||||
}
|
||||
// Fallback structural checks
|
||||
if ("results" in output && "query" in output)
|
||||
return output as FeatureRequestSearchResponse;
|
||||
if ("issue_identifier" in output)
|
||||
return output as FeatureRequestCreatedResponse;
|
||||
if ("suggestions" in output && !("error" in output))
|
||||
return output as NoResultsResponse;
|
||||
if ("error" in output || "details" in output)
|
||||
return output as ErrorResponse;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export function getFeatureRequestOutput(
|
||||
part: unknown,
|
||||
): FeatureRequestOutput | null {
|
||||
if (!part || typeof part !== "object") return null;
|
||||
return parseOutput((part as { output?: unknown }).output);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Type guards */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function isSearchResultsOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is FeatureRequestSearchResponse {
|
||||
return (
|
||||
output.type === "feature_request_search" ||
|
||||
("results" in output && "query" in output)
|
||||
);
|
||||
}
|
||||
|
||||
export function isCreatedOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is FeatureRequestCreatedResponse {
|
||||
return (
|
||||
output.type === "feature_request_created" || "issue_identifier" in output
|
||||
);
|
||||
}
|
||||
|
||||
export function isNoResultsOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is NoResultsResponse {
|
||||
return (
|
||||
output.type === "no_results" ||
|
||||
("suggestions" in output && !("error" in output))
|
||||
);
|
||||
}
|
||||
|
||||
export function isErrorOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is ErrorResponse {
|
||||
return output.type === "error" || "error" in output;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Accordion metadata */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function getAccordionTitle(
|
||||
toolType: FeatureRequestToolType,
|
||||
output: FeatureRequestOutput,
|
||||
): string {
|
||||
if (toolType === "tool-search_feature_requests") {
|
||||
if (isSearchResultsOutput(output)) return "Feature requests";
|
||||
if (isNoResultsOutput(output)) return "No feature requests found";
|
||||
return "Feature request search error";
|
||||
}
|
||||
if (isCreatedOutput(output)) {
|
||||
return output.is_new_issue
|
||||
? "Feature request created"
|
||||
: "Added to feature request";
|
||||
}
|
||||
if (isErrorOutput(output)) return "Feature request error";
|
||||
return "Feature request";
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Animation text */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
interface AnimationPart {
|
||||
type: FeatureRequestToolType;
|
||||
state: ToolUIPart["state"];
|
||||
input?: unknown;
|
||||
output?: unknown;
|
||||
}
|
||||
|
||||
export function getAnimationText(part: AnimationPart): string {
|
||||
if (part.type === "tool-search_feature_requests") {
|
||||
const query = (part.input as { query?: string } | undefined)?.query?.trim();
|
||||
const queryText = query ? ` for "${query}"` : "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Searching feature requests${queryText}`;
|
||||
case "output-available": {
|
||||
const output = parseOutput(part.output);
|
||||
if (!output) return `Searching feature requests${queryText}`;
|
||||
if (isSearchResultsOutput(output)) {
|
||||
return `Found ${output.count} feature request${output.count === 1 ? "" : "s"}${queryText}`;
|
||||
}
|
||||
if (isNoResultsOutput(output))
|
||||
return `No feature requests found${queryText}`;
|
||||
return `Error searching feature requests${queryText}`;
|
||||
}
|
||||
case "output-error":
|
||||
return `Error searching feature requests${queryText}`;
|
||||
default:
|
||||
return "Searching feature requests";
|
||||
}
|
||||
}
|
||||
|
||||
// create_feature_request
|
||||
const title = (part.input as { title?: string } | undefined)?.title?.trim();
|
||||
const titleText = title ? ` "${title}"` : "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Creating feature request${titleText}`;
|
||||
case "output-available": {
|
||||
const output = parseOutput(part.output);
|
||||
if (!output) return `Creating feature request${titleText}`;
|
||||
if (isCreatedOutput(output)) {
|
||||
return output.is_new_issue
|
||||
? "Feature request created"
|
||||
: "Added to existing feature request";
|
||||
}
|
||||
if (isErrorOutput(output)) return "Error creating feature request";
|
||||
return `Created feature request${titleText}`;
|
||||
}
|
||||
case "output-error":
|
||||
return "Error creating feature request";
|
||||
default:
|
||||
return "Creating feature request";
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Icons */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function ToolIcon({
|
||||
toolType,
|
||||
isStreaming,
|
||||
isError,
|
||||
}: {
|
||||
toolType: FeatureRequestToolType;
|
||||
isStreaming?: boolean;
|
||||
isError?: boolean;
|
||||
}) {
|
||||
const IconComponent =
|
||||
toolType === "tool-create_feature_request"
|
||||
? PlusCircleIcon
|
||||
: MagnifyingGlassIcon;
|
||||
|
||||
return (
|
||||
<IconComponent
|
||||
size={14}
|
||||
weight="regular"
|
||||
className={
|
||||
isError
|
||||
? "text-red-500"
|
||||
: isStreaming
|
||||
? "text-neutral-500"
|
||||
: "text-neutral-400"
|
||||
}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export function AccordionIcon({
|
||||
toolType,
|
||||
}: {
|
||||
toolType: FeatureRequestToolType;
|
||||
}) {
|
||||
const IconComponent =
|
||||
toolType === "tool-create_feature_request"
|
||||
? CheckCircleIcon
|
||||
: LightbulbIcon;
|
||||
return <IconComponent size={32} weight="light" />;
|
||||
}
|
||||
@@ -2,8 +2,14 @@
|
||||
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||
import { ContentMessage } from "../../components/ToolAccordion/AccordionContent";
|
||||
import {
|
||||
ContentGrid,
|
||||
ContentHint,
|
||||
ContentMessage,
|
||||
} from "../../components/ToolAccordion/AccordionContent";
|
||||
import { MiniGame } from "../CreateAgent/components/MiniGame/MiniGame";
|
||||
import {
|
||||
getAccordionMeta,
|
||||
getAnimationText,
|
||||
@@ -60,6 +66,21 @@ export function RunAgentTool({ part }: Props) {
|
||||
/>
|
||||
</div>
|
||||
|
||||
{isStreaming && !output && (
|
||||
<ToolAccordion
|
||||
icon={<OrbitLoader size={32} />}
|
||||
title="Running agent, this may take a few minutes. Play while you wait."
|
||||
expanded={true}
|
||||
>
|
||||
<ContentGrid>
|
||||
<MiniGame />
|
||||
<ContentHint>
|
||||
This could take a few minutes — play while you wait!
|
||||
</ContentHint>
|
||||
</ContentGrid>
|
||||
</ToolAccordion>
|
||||
)}
|
||||
|
||||
{hasExpandableContent && output && (
|
||||
<ToolAccordion {...getAccordionMeta(output)}>
|
||||
{isRunAgentExecutionStartedOutput(output) && (
|
||||
|
||||
@@ -10495,7 +10495,9 @@
|
||||
"operation_started",
|
||||
"operation_pending",
|
||||
"operation_in_progress",
|
||||
"input_validation_error"
|
||||
"input_validation_error",
|
||||
"feature_request_search",
|
||||
"feature_request_created"
|
||||
],
|
||||
"title": "ResponseType",
|
||||
"description": "Types of tool responses."
|
||||
|
||||
165
plans/SECRT-1950-claude-ci-optimizations.md
Normal file
@@ -0,0 +1,165 @@
|
||||
# Implementation Plan: SECRT-1950 - Apply E2E CI Optimizations to Claude Code Workflows
|
||||
|
||||
## Ticket
|
||||
[SECRT-1950](https://linear.app/autogpt/issue/SECRT-1950)
|
||||
|
||||
## Summary
|
||||
Apply Pwuts's CI performance optimizations from PR #12090 to Claude Code workflows.
|
||||
|
||||
## Reference PR
|
||||
https://github.com/Significant-Gravitas/AutoGPT/pull/12090
|
||||
|
||||
---
|
||||
|
||||
## Analysis
|
||||
|
||||
### Current State (claude.yml)
|
||||
|
||||
**pnpm caching (lines 104-118):**
|
||||
```yaml
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set pnpm store directory
|
||||
run: |
|
||||
pnpm config set store-dir ~/.pnpm-store
|
||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache frontend dependencies
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
```
|
||||
|
||||
**Docker setup (lines 134-165):**
|
||||
- Uses `docker-buildx-action@v3`
|
||||
- Has manual Docker image caching via `actions/cache`
|
||||
- Runs `docker compose up` without buildx bake optimization
|
||||
|
||||
### Pwuts's Optimizations (PR #12090)
|
||||
|
||||
1. **Simplified pnpm caching** - Use `setup-node` built-in cache:
|
||||
```yaml
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
```
|
||||
|
||||
2. **Docker build caching via buildx bake**:
|
||||
```yaml
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Expose GHA cache to docker buildx CLI
|
||||
uses: crazy-max/ghaction-github-runtime@v3
|
||||
|
||||
- name: Build Docker images (with cache)
|
||||
run: |
|
||||
pip install pyyaml
|
||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||
--source docker-compose.resolved.yml \
|
||||
--cache-from "type=gha" \
|
||||
--cache-to "type=gha,mode=max" \
|
||||
...
|
||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Proposed Changes
|
||||
|
||||
### 1. Update pnpm caching in `claude.yml`
|
||||
|
||||
**Before:**
|
||||
- Manual cache key generation
|
||||
- Separate `actions/cache` step
|
||||
- Manual pnpm store directory config
|
||||
|
||||
**After:**
|
||||
- Use `setup-node` built-in `cache: "pnpm"` option
|
||||
- Remove manual cache step
|
||||
- Keep `corepack enable` before `setup-node`
|
||||
|
||||
### 2. Update Docker build in `claude.yml`
|
||||
|
||||
**Before:**
|
||||
- Manual Docker layer caching via `actions/cache` with `/tmp/.buildx-cache`
|
||||
- Simple `docker compose build`
|
||||
|
||||
**After:**
|
||||
- Use `crazy-max/ghaction-github-runtime@v3` to expose GHA cache
|
||||
- Use `docker-ci-fix-compose-build-cache.py` script
|
||||
- Build with `docker buildx bake`
|
||||
|
||||
### 3. Apply same changes to other Claude workflows
|
||||
|
||||
- `claude-dependabot.yml` - Check if it has similar patterns
|
||||
- `claude-ci-failure-auto-fix.yml` - Check if it has similar patterns
|
||||
- `copilot-setup-steps.yml` - Reusable workflow, may be the source of truth
|
||||
|
||||
---
|
||||
|
||||
## Files to Modify
|
||||
|
||||
1. `.github/workflows/claude.yml`
|
||||
2. `.github/workflows/claude-dependabot.yml` (if applicable)
|
||||
3. `.github/workflows/claude-ci-failure-auto-fix.yml` (if applicable)
|
||||
|
||||
## Dependencies
|
||||
|
||||
- PR #12090 must be merged first (provides the `docker-ci-fix-compose-build-cache.py` script)
|
||||
- Backend Dockerfile optimizations (already in PR #12090)
|
||||
|
||||
---
|
||||
|
||||
## Test Plan
|
||||
|
||||
1. Create PR with changes
|
||||
2. Trigger Claude workflow manually or via `@claude` mention on a test issue
|
||||
3. Compare CI runtime before/after
|
||||
4. Verify Claude agent still works correctly (can checkout, build, run tests)
|
||||
|
||||
---
|
||||
|
||||
## Risk Assessment
|
||||
|
||||
**Low risk:**
|
||||
- These are CI infrastructure changes, not code changes
|
||||
- If caching fails, builds fall back to uncached (slower but works)
|
||||
- Changes mirror proven patterns from PR #12090
|
||||
|
||||
---
|
||||
|
||||
## Questions for Reviewer
|
||||
|
||||
1. Should we wait for PR #12090 to merge before creating this PR?
|
||||
2. Does `copilot-setup-steps.yml` need updating, or is it a separate concern?
|
||||
3. Any concerns about cache key collisions between frontend E2E and Claude workflows?
|
||||
|
||||
---
|
||||
|
||||
## Verified
|
||||
|
||||
- ✅ **`claude-dependabot.yml`**: Has same pnpm caching pattern as `claude.yml` (manual `actions/cache`) — NEEDS UPDATE
|
||||
- ✅ **`claude-ci-failure-auto-fix.yml`**: Simple workflow with no pnpm or Docker caching — NO CHANGES NEEDED
|
||||
- ✅ **Script path**: `docker-ci-fix-compose-build-cache.py` will be at `.github/workflows/scripts/` after PR #12090 merges
|
||||
- ✅ **Test seed caching**: NOT APPLICABLE — Claude workflows spin up a dev environment but don't run E2E tests with pre-seeded data. The seed caching in PR #12090 is specific to the frontend E2E test suite which needs consistent test data. Claude just needs the services running.
|
||||