From 695a185fa1118f2948f7388bd70d0114cdaebc30 Mon Sep 17 00:00:00 2001 From: Otto Date: Thu, 12 Feb 2026 12:46:29 +0000 Subject: [PATCH 1/6] fix(frontend): remove fixed min-height from CoPilot message container (#12091) ## Summary Removes the `min-h-screen` class from `ConversationContent` in ChatMessagesContainer, which was causing fixed height layout issues in the CoPilot chat interface. ## Changes - Removed `min-h-screen` from ConversationContent className ## Linear Fixes [SECRT-1944](https://linear.app/autogpt/issue/SECRT-1944)

Greptile Overview

Greptile Summary

Removes the `min-h-screen` (100vh) class from `ConversationContent` that was causing the chat message container to enforce a minimum viewport height. The parent container already handles height constraints with `h-full min-h-0` and flexbox layout, so the fixed minimum height was creating layout conflicts. The component now properly grows within its flex container using `flex-1`.

Confidence Score: 5/5

- This PR is safe to merge with minimal risk - The change removes a single problematic CSS class that was causing fixed height layout issues. The parent container already handles height constraints properly with flexbox, and removing min-h-screen allows the component to size correctly within its flex parent. This is a targeted, low-risk bug fix with no logic changes. - No files require special attention
--- .../components/ChatMessagesContainer/ChatMessagesContainer.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx index fbe1c03d1d..71ade81a9f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx @@ -159,7 +159,7 @@ export const ChatMessagesContainer = ({ return ( - + {isLoading && messages.length === 0 && (
From 4f6055f4942f30414206f527e6bc40de43239087 Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Thu, 12 Feb 2026 18:27:06 +0530 Subject: [PATCH 2/6] refactor(frontend): remove default expiration date from API key credentials form (#12092) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ Removed the default expiration date for API keys in the credentials modal. Previously, API keys were set to expire the next day by default, but now the expiration date field starts empty, allowing users to explicitly choose whether they want to set an expiration date. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Open the API key credentials modal and verify the expiration date field is empty by default - [x] Test creating an API key with and without an expiration date - [x] Verify both scenarios work correctly

Greptile Overview

Greptile Summary

Removed the default expiration date for API key credentials in the credentials modal. Previously, API keys were automatically set to expire the next day at midnight. Now the expiration date field starts empty, allowing users to explicitly choose whether to set an expiration. - Removed `getDefaultExpirationDate()` helper function that calculated tomorrow's date - Changed default `expiresAt` value from calculated date to empty string - Backend already supports optional expiration (`expires_at?: number`), so no backend changes needed - Form submission correctly handles empty expiration by passing `undefined` to the API

Confidence Score: 5/5

- This PR is safe to merge with minimal risk - The changes are straightforward and well-contained. The refactor removes a helper function and changes a default value. The backend API already supports optional expiration dates, and the form submission logic correctly handles empty values by passing undefined. The change improves UX by not forcing a default expiration date on users. - No files require special attention
--- .../APIKeyCredentialsModal.tsx | 8 ++- .../useAPIKeyCredentialsModal.ts | 51 +++++++++---------- 2 files changed, 31 insertions(+), 28 deletions(-) diff --git a/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx index 90f6c0ff70..1c455863dd 100644 --- a/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx +++ b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx @@ -30,6 +30,7 @@ export function APIKeyCredentialsModal({ const { form, isLoading, + isSubmitting, supportsApiKey, providerName, schemaDescription, @@ -138,7 +139,12 @@ export function APIKeyCredentialsModal({ /> )} /> - diff --git a/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts index 72599a2e79..1f3d4c9085 100644 --- a/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts +++ b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts @@ -4,6 +4,7 @@ import { CredentialsMetaInput, } from "@/lib/autogpt-server-api/types"; import { zodResolver } from "@hookform/resolvers/zod"; +import { useState } from "react"; import { useForm, type UseFormReturn } from "react-hook-form"; import { z } from "zod"; @@ -26,6 +27,7 @@ export function useAPIKeyCredentialsModal({ }: Args): { form: UseFormReturn; isLoading: boolean; + isSubmitting: boolean; supportsApiKey: boolean; provider?: string; providerName?: string; @@ -33,6 +35,7 @@ export function useAPIKeyCredentialsModal({ onSubmit: (values: APIKeyFormValues) => Promise; } { const credentials = useCredentials(schema, siblingInputs); + const [isSubmitting, setIsSubmitting] = useState(false); const formSchema = z.object({ apiKey: z.string().min(1, "API Key is required"), @@ -40,48 +43,42 @@ export function useAPIKeyCredentialsModal({ expiresAt: z.string().optional(), }); - function getDefaultExpirationDate(): string { - const tomorrow = new Date(); - tomorrow.setDate(tomorrow.getDate() + 1); - tomorrow.setHours(0, 0, 0, 0); - const year = tomorrow.getFullYear(); - const month = String(tomorrow.getMonth() + 1).padStart(2, "0"); - const day = String(tomorrow.getDate()).padStart(2, "0"); - const hours = String(tomorrow.getHours()).padStart(2, "0"); - const minutes = String(tomorrow.getMinutes()).padStart(2, "0"); - return `${year}-${month}-${day}T${hours}:${minutes}`; - } - const form = useForm({ resolver: zodResolver(formSchema), defaultValues: { apiKey: "", title: "", - expiresAt: getDefaultExpirationDate(), + expiresAt: "", }, }); async function onSubmit(values: APIKeyFormValues) { if (!credentials || credentials.isLoading) return; - const expiresAt = values.expiresAt - ? new Date(values.expiresAt).getTime() / 1000 - : undefined; - const newCredentials = await credentials.createAPIKeyCredentials({ - api_key: values.apiKey, - title: values.title, - expires_at: expiresAt, - }); - onCredentialsCreate({ - provider: credentials.provider, - id: newCredentials.id, - type: "api_key", - title: newCredentials.title, - }); + setIsSubmitting(true); + try { + const expiresAt = values.expiresAt + ? new Date(values.expiresAt).getTime() / 1000 + : undefined; + const newCredentials = await credentials.createAPIKeyCredentials({ + api_key: values.apiKey, + title: values.title, + expires_at: expiresAt, + }); + onCredentialsCreate({ + provider: credentials.provider, + id: newCredentials.id, + type: "api_key", + title: newCredentials.title, + }); + } finally { + setIsSubmitting(false); + } } return { form, isLoading: !credentials || credentials.isLoading, + isSubmitting, supportsApiKey: !!credentials?.supportsApiKey, provider: credentials?.provider, providerName: From b8b6c9de2322cf083e61670ee8625d4abb2d8e19 Mon Sep 17 00:00:00 2001 From: Swifty Date: Thu, 12 Feb 2026 16:38:17 +0100 Subject: [PATCH 3/6] added feature request tooling --- .../api/features/chat/tools/__init__.py | 4 + .../features/chat/tools/feature_requests.py | 369 ++++++++++++++ .../backend/api/features/chat/tools/models.py | 34 ++ .../backend/backend/util/settings.py | 3 + .../backend/test_linear_customers.py | 468 ++++++++++++++++++ .../ChatMessagesContainer.tsx | 18 + .../(platform)/copilot/styleguide/page.tsx | 235 +++++++++ .../tools/FeatureRequests/FeatureRequests.tsx | 240 +++++++++ .../copilot/tools/FeatureRequests/helpers.tsx | 271 ++++++++++ .../frontend/src/app/api/openapi.json | 4 +- 10 files changed, 1645 insertions(+), 1 deletion(-) create mode 100644 autogpt_platform/backend/backend/api/features/chat/tools/feature_requests.py create mode 100644 autogpt_platform/backend/test_linear_customers.py create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/FeatureRequests.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/helpers.tsx diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py index dcbc35ef37..350776081a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py @@ -12,6 +12,7 @@ from .base import BaseTool from .create_agent import CreateAgentTool from .customize_agent import CustomizeAgentTool from .edit_agent import EditAgentTool +from .feature_requests import CreateFeatureRequestTool, SearchFeatureRequestsTool from .find_agent import FindAgentTool from .find_block import FindBlockTool from .find_library_agent import FindLibraryAgentTool @@ -45,6 +46,9 @@ TOOL_REGISTRY: dict[str, BaseTool] = { "view_agent_output": AgentOutputTool(), "search_docs": SearchDocsTool(), "get_doc_page": GetDocPageTool(), + # Feature request tools + "search_feature_requests": SearchFeatureRequestsTool(), + "create_feature_request": CreateFeatureRequestTool(), # Workspace tools for CoPilot file operations "list_workspace_files": ListWorkspaceFilesTool(), "read_workspace_file": ReadWorkspaceFileTool(), diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/feature_requests.py b/autogpt_platform/backend/backend/api/features/chat/tools/feature_requests.py new file mode 100644 index 0000000000..5e06d8b4b2 --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tools/feature_requests.py @@ -0,0 +1,369 @@ +"""Feature request tools - search and create feature requests via Linear.""" + +import logging +from typing import Any + +from pydantic import SecretStr + +from backend.api.features.chat.model import ChatSession +from backend.api.features.chat.tools.base import BaseTool +from backend.api.features.chat.tools.models import ( + ErrorResponse, + FeatureRequestCreatedResponse, + FeatureRequestInfo, + FeatureRequestSearchResponse, + NoResultsResponse, + ToolResponseBase, +) +from backend.blocks.linear._api import LinearClient +from backend.data.model import APIKeyCredentials +from backend.util.settings import Settings + +logger = logging.getLogger(__name__) + +# Target project and team IDs in our Linear workspace +FEATURE_REQUEST_PROJECT_ID = "13f066f3-f639-4a67-aaa3-31483ebdf8cd" +TEAM_ID = "557fd3d5-087e-43a9-83e3-476c8313ce49" + +MAX_SEARCH_RESULTS = 10 + +# GraphQL queries/mutations +SEARCH_ISSUES_QUERY = """ +query SearchFeatureRequests($term: String!, $filter: IssueFilter, $first: Int) { + searchIssues(term: $term, filter: $filter, first: $first) { + nodes { + id + identifier + title + description + } + } +} +""" + +CUSTOMER_UPSERT_MUTATION = """ +mutation CustomerUpsert($input: CustomerUpsertInput!) { + customerUpsert(input: $input) { + success + customer { + id + name + externalIds + } + } +} +""" + +ISSUE_CREATE_MUTATION = """ +mutation IssueCreate($input: IssueCreateInput!) { + issueCreate(input: $input) { + success + issue { + id + identifier + title + url + } + } +} +""" + +CUSTOMER_NEED_CREATE_MUTATION = """ +mutation CustomerNeedCreate($input: CustomerNeedCreateInput!) { + customerNeedCreate(input: $input) { + success + need { + id + body + customer { + id + name + } + issue { + id + identifier + title + url + } + } + } +} +""" + + +_settings: Settings | None = None + + +def _get_settings() -> Settings: + global _settings + if _settings is None: + _settings = Settings() + return _settings + + +def _get_linear_client() -> LinearClient: + """Create a Linear client using the system API key from settings.""" + api_key = _get_settings().secrets.linear_api_key + if not api_key: + raise RuntimeError("LINEAR_API_KEY secret is not configured") + credentials = APIKeyCredentials( + id="system-linear", + provider="linear", + api_key=SecretStr(api_key), + title="System Linear API Key", + ) + return LinearClient(credentials=credentials) + + +class SearchFeatureRequestsTool(BaseTool): + """Tool for searching existing feature requests in Linear.""" + + @property + def name(self) -> str: + return "search_feature_requests" + + @property + def description(self) -> str: + return ( + "Search existing feature requests to check if a similar request " + "already exists before creating a new one. Returns matching feature " + "requests with their ID, title, and description." + ) + + @property + def parameters(self) -> dict[str, Any]: + return { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search term to find matching feature requests.", + }, + }, + "required": ["query"], + } + + @property + def requires_auth(self) -> bool: + return True + + async def _execute( + self, + user_id: str | None, + session: ChatSession, + **kwargs, + ) -> ToolResponseBase: + query = kwargs.get("query", "").strip() + session_id = session.session_id if session else None + + if not query: + return ErrorResponse( + message="Please provide a search query.", + error="Missing query parameter", + session_id=session_id, + ) + + client = _get_linear_client() + data = await client.query( + SEARCH_ISSUES_QUERY, + { + "term": query, + "filter": { + "project": {"id": {"eq": FEATURE_REQUEST_PROJECT_ID}}, + }, + "first": MAX_SEARCH_RESULTS, + }, + ) + + nodes = data.get("searchIssues", {}).get("nodes", []) + + if not nodes: + return NoResultsResponse( + message=f"No feature requests found matching '{query}'.", + suggestions=[ + "Try different keywords", + "Use broader search terms", + "You can create a new feature request if none exists", + ], + session_id=session_id, + ) + + results = [ + FeatureRequestInfo( + id=node["id"], + identifier=node["identifier"], + title=node["title"], + description=node.get("description"), + ) + for node in nodes + ] + + return FeatureRequestSearchResponse( + message=f"Found {len(results)} feature request(s) matching '{query}'.", + results=results, + count=len(results), + query=query, + session_id=session_id, + ) + + +class CreateFeatureRequestTool(BaseTool): + """Tool for creating feature requests (or adding needs to existing ones).""" + + @property + def name(self) -> str: + return "create_feature_request" + + @property + def description(self) -> str: + return ( + "Create a new feature request or add a customer need to an existing one. " + "Always search first with search_feature_requests to avoid duplicates. " + "If a matching request exists, pass its ID as existing_issue_id to add " + "the user's need to it instead of creating a duplicate." + ) + + @property + def parameters(self) -> dict[str, Any]: + return { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "Title for the feature request.", + }, + "description": { + "type": "string", + "description": "Detailed description of what the user wants and why.", + }, + "existing_issue_id": { + "type": "string", + "description": ( + "If adding a need to an existing feature request, " + "provide its Linear issue ID (from search results). " + "Omit to create a new feature request." + ), + }, + }, + "required": ["title", "description"], + } + + @property + def requires_auth(self) -> bool: + return True + + async def _find_or_create_customer( + self, client: LinearClient, user_id: str + ) -> dict: + """Find existing customer by user_id or create a new one via upsert.""" + data = await client.mutate( + CUSTOMER_UPSERT_MUTATION, + { + "input": { + "name": user_id, + "externalId": user_id, + }, + }, + ) + result = data.get("customerUpsert", {}) + if not result.get("success"): + raise RuntimeError(f"Failed to upsert customer: {data}") + return result["customer"] + + async def _execute( + self, + user_id: str | None, + session: ChatSession, + **kwargs, + ) -> ToolResponseBase: + title = kwargs.get("title", "").strip() + description = kwargs.get("description", "").strip() + existing_issue_id = kwargs.get("existing_issue_id") + session_id = session.session_id if session else None + + if not title or not description: + return ErrorResponse( + message="Both title and description are required.", + error="Missing required parameters", + session_id=session_id, + ) + + if not user_id: + return ErrorResponse( + message="Authentication required to create feature requests.", + error="Missing user_id", + session_id=session_id, + ) + + client = _get_linear_client() + + # Step 1: Find or create customer for this user + customer = await self._find_or_create_customer(client, user_id) + customer_id = customer["id"] + customer_name = customer["name"] + + # Step 2: Create or reuse issue + if existing_issue_id: + # Add need to existing issue - we still need the issue details for response + is_new_issue = False + issue_id = existing_issue_id + else: + # Create new issue in the feature requests project + data = await client.mutate( + ISSUE_CREATE_MUTATION, + { + "input": { + "title": title, + "description": description, + "teamId": TEAM_ID, + "projectId": FEATURE_REQUEST_PROJECT_ID, + }, + }, + ) + result = data.get("issueCreate", {}) + if not result.get("success"): + return ErrorResponse( + message="Failed to create feature request issue.", + error=str(data), + session_id=session_id, + ) + issue = result["issue"] + issue_id = issue["id"] + is_new_issue = True + + # Step 3: Create customer need on the issue + data = await client.mutate( + CUSTOMER_NEED_CREATE_MUTATION, + { + "input": { + "customerId": customer_id, + "issueId": issue_id, + "body": description, + "priority": 0, + }, + }, + ) + need_result = data.get("customerNeedCreate", {}) + if not need_result.get("success"): + return ErrorResponse( + message="Failed to attach customer need to the feature request.", + error=str(data), + session_id=session_id, + ) + + need = need_result["need"] + issue_info = need["issue"] + + return FeatureRequestCreatedResponse( + message=( + f"{'Created new feature request' if is_new_issue else 'Added your request to existing feature request'} " + f"[{issue_info['identifier']}] {issue_info['title']}." + ), + issue_id=issue_info["id"], + issue_identifier=issue_info["identifier"], + issue_title=issue_info["title"], + issue_url=issue_info.get("url", ""), + is_new_issue=is_new_issue, + customer_name=customer_name, + session_id=session_id, + ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/models.py b/autogpt_platform/backend/backend/api/features/chat/tools/models.py index 69c8c6c684..d420b289dc 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/models.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/models.py @@ -40,6 +40,9 @@ class ResponseType(str, Enum): OPERATION_IN_PROGRESS = "operation_in_progress" # Input validation INPUT_VALIDATION_ERROR = "input_validation_error" + # Feature request types + FEATURE_REQUEST_SEARCH = "feature_request_search" + FEATURE_REQUEST_CREATED = "feature_request_created" # Base response model @@ -421,3 +424,34 @@ class AsyncProcessingResponse(ToolResponseBase): status: str = "accepted" # Must be "accepted" for detection operation_id: str | None = None task_id: str | None = None + + +# Feature request models +class FeatureRequestInfo(BaseModel): + """Information about a feature request issue.""" + + id: str + identifier: str + title: str + description: str | None = None + + +class FeatureRequestSearchResponse(ToolResponseBase): + """Response for search_feature_requests tool.""" + + type: ResponseType = ResponseType.FEATURE_REQUEST_SEARCH + results: list[FeatureRequestInfo] + count: int + query: str + + +class FeatureRequestCreatedResponse(ToolResponseBase): + """Response for create_feature_request tool.""" + + type: ResponseType = ResponseType.FEATURE_REQUEST_CREATED + issue_id: str + issue_identifier: str + issue_title: str + issue_url: str + is_new_issue: bool # False if added to existing + customer_name: str diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 50b7428160..d539832fb0 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -658,6 +658,9 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings): mem0_api_key: str = Field(default="", description="Mem0 API key") elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key") + linear_api_key: str = Field( + default="", description="Linear API key for system-level operations" + ) linear_client_id: str = Field(default="", description="Linear client ID") linear_client_secret: str = Field(default="", description="Linear client secret") diff --git a/autogpt_platform/backend/test_linear_customers.py b/autogpt_platform/backend/test_linear_customers.py new file mode 100644 index 0000000000..6e6f3e48fc --- /dev/null +++ b/autogpt_platform/backend/test_linear_customers.py @@ -0,0 +1,468 @@ +""" +Test script for Linear GraphQL API - Customer Requests operations. + +Tests the exact GraphQL calls needed for: +1. search_feature_requests - search issues in the Customer Feature Requests project +2. add_feature_request - upsert customer + create customer need on issue + +Requires LINEAR_API_KEY in backend/.env +Generate one at: https://linear.app/settings/api +""" + +import json +import os +import sys + +import httpx +from dotenv import load_dotenv + +load_dotenv() + +LINEAR_API_URL = "https://api.linear.app/graphql" +API_KEY = os.getenv("LINEAR_API_KEY") + +# Target project for feature requests +FEATURE_REQUEST_PROJECT_ID = "13f066f3-f639-4a67-aaa3-31483ebdf8cd" +# Team: Internal +TEAM_ID = "557fd3d5-087e-43a9-83e3-476c8313ce49" + +if not API_KEY: + print("ERROR: LINEAR_API_KEY not found in .env") + print("Generate a personal API key at: https://linear.app/settings/api") + print("Then add LINEAR_API_KEY=lin_api_... to backend/.env") + sys.exit(1) + +HEADERS = { + "Authorization": API_KEY, + "Content-Type": "application/json", +} + + +def graphql(query: str, variables: dict | None = None) -> dict: + """Execute a GraphQL query against Linear API.""" + payload = {"query": query} + if variables: + payload["variables"] = variables + + resp = httpx.post(LINEAR_API_URL, json=payload, headers=HEADERS, timeout=30) + if resp.status_code != 200: + print(f"HTTP {resp.status_code}: {resp.text[:500]}") + resp.raise_for_status() + data = resp.json() + + if "errors" in data: + print(f"GraphQL Errors: {json.dumps(data['errors'], indent=2)}") + + return data + + +# --------------------------------------------------------------------------- +# QUERIES +# --------------------------------------------------------------------------- + +# Search issues within the feature requests project by title/description +SEARCH_ISSUES_IN_PROJECT = """ +query SearchFeatureRequests($filter: IssueFilter!, $first: Int) { + issues(filter: $filter, first: $first) { + nodes { + id + identifier + title + description + url + state { + name + type + } + project { + id + name + } + labels { + nodes { + name + } + } + } + } +} +""" + +# Get issue with its customer needs +GET_ISSUE_WITH_NEEDS = """ +query GetIssueWithNeeds($id: String!) { + issue(id: $id) { + id + identifier + title + url + needs { + nodes { + id + body + priority + customer { + id + name + domains + externalIds + } + } + } + } +} +""" + +# Search customers +SEARCH_CUSTOMERS = """ +query SearchCustomers($filter: CustomerFilter, $first: Int) { + customers(filter: $filter, first: $first) { + nodes { + id + name + domains + externalIds + revenue + size + status { + name + } + tier { + name + } + } + } +} +""" + +# --------------------------------------------------------------------------- +# MUTATIONS +# --------------------------------------------------------------------------- + +CUSTOMER_UPSERT = """ +mutation CustomerUpsert($input: CustomerUpsertInput!) { + customerUpsert(input: $input) { + success + customer { + id + name + domains + externalIds + } + } +} +""" + +CUSTOMER_NEED_CREATE = """ +mutation CustomerNeedCreate($input: CustomerNeedCreateInput!) { + customerNeedCreate(input: $input) { + success + need { + id + body + priority + customer { + id + name + } + issue { + id + identifier + title + } + } + } +} +""" + +ISSUE_CREATE = """ +mutation IssueCreate($input: IssueCreateInput!) { + issueCreate(input: $input) { + success + issue { + id + identifier + title + url + } + } +} +""" + + +# --------------------------------------------------------------------------- +# TESTS +# --------------------------------------------------------------------------- + + +def test_1_search_feature_requests(): + """Search for feature requests in the target project by keyword.""" + print("\n" + "=" * 60) + print("TEST 1: Search feature requests in project by keyword") + print("=" * 60) + + search_term = "agent" + result = graphql( + SEARCH_ISSUES_IN_PROJECT, + { + "filter": { + "project": {"id": {"eq": FEATURE_REQUEST_PROJECT_ID}}, + "or": [ + {"title": {"containsIgnoreCase": search_term}}, + {"description": {"containsIgnoreCase": search_term}}, + ], + }, + "first": 5, + }, + ) + + issues = result.get("data", {}).get("issues", {}).get("nodes", []) + for issue in issues: + proj = issue.get("project") or {} + print(f"\n [{issue['identifier']}] {issue['title']}") + print(f" Project: {proj.get('name', 'N/A')}") + print(f" State: {issue['state']['name']}") + print(f" URL: {issue['url']}") + + print(f"\n Found {len(issues)} issues matching '{search_term}'") + return issues + + +def test_2_list_all_in_project(): + """List all issues in the feature requests project.""" + print("\n" + "=" * 60) + print("TEST 2: List all issues in Customer Feature Requests project") + print("=" * 60) + + result = graphql( + SEARCH_ISSUES_IN_PROJECT, + { + "filter": { + "project": {"id": {"eq": FEATURE_REQUEST_PROJECT_ID}}, + }, + "first": 10, + }, + ) + + issues = result.get("data", {}).get("issues", {}).get("nodes", []) + if not issues: + print(" No issues in project yet (empty project)") + for issue in issues: + print(f"\n [{issue['identifier']}] {issue['title']}") + print(f" State: {issue['state']['name']}") + + print(f"\n Total: {len(issues)} issues") + return issues + + +def test_3_search_customers(): + """List existing customers.""" + print("\n" + "=" * 60) + print("TEST 3: List customers") + print("=" * 60) + + result = graphql(SEARCH_CUSTOMERS, {"first": 10}) + customers = result.get("data", {}).get("customers", {}).get("nodes", []) + + if not customers: + print(" No customers exist yet") + for c in customers: + status = c.get("status") or {} + tier = c.get("tier") or {} + print(f"\n [{c['id'][:8]}...] {c['name']}") + print(f" Domains: {c.get('domains', [])}") + print(f" External IDs: {c.get('externalIds', [])}") + print( + f" Status: {status.get('name', 'N/A')}, Tier: {tier.get('name', 'N/A')}" + ) + + print(f"\n Total: {len(customers)} customers") + return customers + + +def test_4_customer_upsert(): + """Upsert a test customer.""" + print("\n" + "=" * 60) + print("TEST 4: Customer upsert (find-or-create)") + print("=" * 60) + + result = graphql( + CUSTOMER_UPSERT, + { + "input": { + "name": "Test Customer (API Test)", + "domains": ["test-api-customer.example.com"], + "externalId": "test-customer-001", + } + }, + ) + + upsert = result.get("data", {}).get("customerUpsert", {}) + if upsert.get("success"): + customer = upsert["customer"] + print(f" Success! Customer: {customer['name']}") + print(f" ID: {customer['id']}") + print(f" Domains: {customer['domains']}") + print(f" External IDs: {customer['externalIds']}") + return customer + else: + print(f" Failed: {json.dumps(result, indent=2)}") + return None + + +def test_5_create_issue_and_need(customer_id: str): + """Create a new feature request issue and attach a customer need.""" + print("\n" + "=" * 60) + print("TEST 5: Create issue + customer need") + print("=" * 60) + + # Step 1: Create issue in the project + result = graphql( + ISSUE_CREATE, + { + "input": { + "title": "Test Feature Request (API Test - safe to delete)", + "description": "This is a test feature request created via the GraphQL API.", + "teamId": TEAM_ID, + "projectId": FEATURE_REQUEST_PROJECT_ID, + } + }, + ) + + data = result.get("data") + if not data: + print(f" Issue creation failed: {json.dumps(result, indent=2)}") + return None + issue_data = data.get("issueCreate", {}) + if not issue_data.get("success"): + print(f" Issue creation failed: {json.dumps(result, indent=2)}") + return None + + issue = issue_data["issue"] + print(f" Created issue: [{issue['identifier']}] {issue['title']}") + print(f" URL: {issue['url']}") + + # Step 2: Attach customer need + result = graphql( + CUSTOMER_NEED_CREATE, + { + "input": { + "customerId": customer_id, + "issueId": issue["id"], + "body": "Our team really needs this feature for our workflow. High priority for us!", + "priority": 0, + } + }, + ) + + need_data = result.get("data", {}).get("customerNeedCreate", {}) + if need_data.get("success"): + need = need_data["need"] + print(f" Attached customer need: {need['id']}") + print(f" Customer: {need['customer']['name']}") + print(f" Body: {need['body'][:80]}") + else: + print(f" Customer need creation failed: {json.dumps(result, indent=2)}") + + # Step 3: Verify by fetching the issue with needs + print("\n Verifying...") + verify = graphql(GET_ISSUE_WITH_NEEDS, {"id": issue["id"]}) + issue_verify = verify.get("data", {}).get("issue", {}) + needs = issue_verify.get("needs", {}).get("nodes", []) + print(f" Issue now has {len(needs)} customer need(s)") + for n in needs: + cust = n.get("customer") or {} + print(f" - {cust.get('name', 'N/A')}: {n.get('body', '')[:60]}") + + return issue + + +def test_6_add_need_to_existing(customer_id: str, issue_id: str): + """Add a customer need to an existing issue (the common case).""" + print("\n" + "=" * 60) + print("TEST 6: Add customer need to existing issue") + print("=" * 60) + + result = graphql( + CUSTOMER_NEED_CREATE, + { + "input": { + "customerId": customer_id, + "issueId": issue_id, + "body": "We also want this! +1 from our organization.", + "priority": 0, + } + }, + ) + + need_data = result.get("data", {}).get("customerNeedCreate", {}) + if need_data.get("success"): + need = need_data["need"] + print(f" Success! Need: {need['id']}") + print(f" Customer: {need['customer']['name']}") + print(f" Issue: [{need['issue']['identifier']}] {need['issue']['title']}") + return need + else: + print(f" Failed: {json.dumps(result, indent=2)}") + return None + + +def main(): + print("Linear GraphQL API - Customer Requests Test Suite") + print("=" * 60) + print(f"API URL: {LINEAR_API_URL}") + print(f"API Key: {API_KEY[:10]}...") + print(f"Project: Customer Feature Requests ({FEATURE_REQUEST_PROJECT_ID[:8]}...)") + + # --- Read-only tests --- + test_1_search_feature_requests() + test_2_list_all_in_project() + test_3_search_customers() + + # --- Write tests --- + print("\n" + "=" * 60) + answer = ( + input("Run WRITE tests? (creates test customer + issue + need) [y/N]: ") + .strip() + .lower() + ) + if answer != "y": + print("Skipped write tests.") + print("\nDone!") + return + + customer = test_4_customer_upsert() + if not customer: + print("Customer upsert failed, stopping.") + return + + issue = test_5_create_issue_and_need(customer["id"]) + if not issue: + print("Issue creation failed, stopping.") + return + + # Test adding a second need to the same issue (simulates another customer requesting same feature) + # First upsert a second customer + result = graphql( + CUSTOMER_UPSERT, + { + "input": { + "name": "Second Test Customer", + "domains": ["second-test.example.com"], + "externalId": "test-customer-002", + } + }, + ) + customer2 = result.get("data", {}).get("customerUpsert", {}).get("customer") + if customer2: + test_6_add_need_to_existing(customer2["id"], issue["id"]) + + print("\n" + "=" * 60) + print("All tests complete!") + print( + "Check the project: https://linear.app/autogpt/project/customer-feature-requests-710dcbf8bf4e/issues" + ) + + +if __name__ == "__main__": + main() diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx index 71ade81a9f..b62e96f58a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx @@ -15,6 +15,10 @@ import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai"; import { useEffect, useRef, useState } from "react"; import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent"; import { EditAgentTool } from "../../tools/EditAgent/EditAgent"; +import { + CreateFeatureRequestTool, + SearchFeatureRequestsTool, +} from "../../tools/FeatureRequests/FeatureRequests"; import { FindAgentsTool } from "../../tools/FindAgents/FindAgents"; import { FindBlocksTool } from "../../tools/FindBlocks/FindBlocks"; import { RunAgentTool } from "../../tools/RunAgent/RunAgent"; @@ -254,6 +258,20 @@ export const ChatMessagesContainer = ({ part={part as ToolUIPart} /> ); + case "tool-search_feature_requests": + return ( + + ); + case "tool-create_feature_request": + return ( + + ); default: return null; } diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx index 6030665f1c..8a35f939ca 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx @@ -14,6 +14,10 @@ import { Text } from "@/components/atoms/Text/Text"; import { CopilotChatActionsProvider } from "../components/CopilotChatActionsProvider/CopilotChatActionsProvider"; import { CreateAgentTool } from "../tools/CreateAgent/CreateAgent"; import { EditAgentTool } from "../tools/EditAgent/EditAgent"; +import { + CreateFeatureRequestTool, + SearchFeatureRequestsTool, +} from "../tools/FeatureRequests/FeatureRequests"; import { FindAgentsTool } from "../tools/FindAgents/FindAgents"; import { FindBlocksTool } from "../tools/FindBlocks/FindBlocks"; import { RunAgentTool } from "../tools/RunAgent/RunAgent"; @@ -45,6 +49,8 @@ const SECTIONS = [ "Tool: Create Agent", "Tool: Edit Agent", "Tool: View Agent Output", + "Tool: Search Feature Requests", + "Tool: Create Feature Request", "Full Conversation Example", ] as const; @@ -1421,6 +1427,235 @@ export default function StyleguidePage() { + {/* ============================================================= */} + {/* SEARCH FEATURE REQUESTS */} + {/* ============================================================= */} + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + {/* ============================================================= */} + {/* CREATE FEATURE REQUEST */} + {/* ============================================================= */} + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+ {/* ============================================================= */} {/* FULL CONVERSATION EXAMPLE */} {/* ============================================================= */} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/FeatureRequests.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/FeatureRequests.tsx new file mode 100644 index 0000000000..e14ec69397 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/FeatureRequests.tsx @@ -0,0 +1,240 @@ +"use client"; + +import type { ToolUIPart } from "ai"; +import { useMemo } from "react"; + +import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; +import { + ContentBadge, + ContentCard, + ContentCardDescription, + ContentCardHeader, + ContentCardTitle, + ContentGrid, + ContentLink, + ContentMessage, + ContentSuggestionsList, +} from "../../components/ToolAccordion/AccordionContent"; +import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; +import { + AccordionIcon, + getAccordionTitle, + getAnimationText, + getFeatureRequestOutput, + isCreatedOutput, + isErrorOutput, + isNoResultsOutput, + isSearchResultsOutput, + ToolIcon, + type FeatureRequestToolType, +} from "./helpers"; + +export interface FeatureRequestToolPart { + type: FeatureRequestToolType; + toolCallId: string; + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +} + +interface Props { + part: FeatureRequestToolPart; +} + +function truncate(text: string, maxChars: number): string { + const trimmed = text.trim(); + if (trimmed.length <= maxChars) return trimmed; + return `${trimmed.slice(0, maxChars).trimEnd()}…`; +} + +export function SearchFeatureRequestsTool({ part }: Props) { + const output = getFeatureRequestOutput(part); + const text = getAnimationText(part); + const isStreaming = + part.state === "input-streaming" || part.state === "input-available"; + const isError = + part.state === "output-error" || (!!output && isErrorOutput(output)); + + const normalized = useMemo(() => { + if (!output) return null; + return { title: getAccordionTitle(part.type, output) }; + }, [output, part.type]); + + const isOutputAvailable = part.state === "output-available" && !!output; + + const searchOutput = + isOutputAvailable && output && isSearchResultsOutput(output) + ? output + : null; + const noResultsOutput = + isOutputAvailable && output && isNoResultsOutput(output) ? output : null; + const errorOutput = + isOutputAvailable && output && isErrorOutput(output) ? output : null; + + const hasExpandableContent = + isOutputAvailable && + ((!!searchOutput && searchOutput.count > 0) || + !!noResultsOutput || + !!errorOutput); + + const accordionDescription = + hasExpandableContent && searchOutput + ? `Found ${searchOutput.count} result${searchOutput.count === 1 ? "" : "s"} for "${searchOutput.query}"` + : hasExpandableContent && (noResultsOutput || errorOutput) + ? ((noResultsOutput ?? errorOutput)?.message ?? null) + : null; + + return ( +
+
+ + +
+ + {hasExpandableContent && normalized && ( + } + title={normalized.title} + description={accordionDescription} + > + {searchOutput && ( + + {searchOutput.results.map((r) => ( + + + + {r.identifier} — {r.title} + + + {r.description && ( + + {truncate(r.description, 200)} + + )} + + ))} + + )} + + {noResultsOutput && ( +
+ {noResultsOutput.message} + {noResultsOutput.suggestions && + noResultsOutput.suggestions.length > 0 && ( + + )} +
+ )} + + {errorOutput && ( +
+ {errorOutput.message} + {errorOutput.error && ( + + {errorOutput.error} + + )} +
+ )} +
+ )} +
+ ); +} + +export function CreateFeatureRequestTool({ part }: Props) { + const output = getFeatureRequestOutput(part); + const text = getAnimationText(part); + const isStreaming = + part.state === "input-streaming" || part.state === "input-available"; + const isError = + part.state === "output-error" || (!!output && isErrorOutput(output)); + + const normalized = useMemo(() => { + if (!output) return null; + return { title: getAccordionTitle(part.type, output) }; + }, [output, part.type]); + + const isOutputAvailable = part.state === "output-available" && !!output; + + const createdOutput = + isOutputAvailable && output && isCreatedOutput(output) ? output : null; + const errorOutput = + isOutputAvailable && output && isErrorOutput(output) ? output : null; + + const hasExpandableContent = + isOutputAvailable && (!!createdOutput || !!errorOutput); + + const accordionDescription = + hasExpandableContent && createdOutput + ? `${createdOutput.issue_identifier} — ${createdOutput.issue_title}` + : hasExpandableContent && errorOutput + ? errorOutput.message + : null; + + return ( +
+
+ + +
+ + {hasExpandableContent && normalized && ( + } + title={normalized.title} + description={accordionDescription} + > + {createdOutput && ( + + + View + + ) : undefined + } + > + + {createdOutput.issue_identifier} — {createdOutput.issue_title} + + +
+ + {createdOutput.is_new_issue ? "New" : "Existing"} + +
+ {createdOutput.message} +
+ )} + + {errorOutput && ( +
+ {errorOutput.message} + {errorOutput.error && ( + + {errorOutput.error} + + )} +
+ )} +
+ )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/helpers.tsx new file mode 100644 index 0000000000..ed292faf2b --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/helpers.tsx @@ -0,0 +1,271 @@ +import { + CheckCircleIcon, + LightbulbIcon, + MagnifyingGlassIcon, + PlusCircleIcon, +} from "@phosphor-icons/react"; +import type { ToolUIPart } from "ai"; + +/* ------------------------------------------------------------------ */ +/* Types (local until API client is regenerated) */ +/* ------------------------------------------------------------------ */ + +interface FeatureRequestInfo { + id: string; + identifier: string; + title: string; + description?: string | null; +} + +export interface FeatureRequestSearchResponse { + type: "feature_request_search"; + message: string; + results: FeatureRequestInfo[]; + count: number; + query: string; +} + +export interface FeatureRequestCreatedResponse { + type: "feature_request_created"; + message: string; + issue_id: string; + issue_identifier: string; + issue_title: string; + issue_url: string; + is_new_issue: boolean; + customer_name: string; +} + +interface NoResultsResponse { + type: "no_results"; + message: string; + suggestions?: string[]; +} + +interface ErrorResponse { + type: "error"; + message: string; + error?: string; +} + +export type FeatureRequestOutput = + | FeatureRequestSearchResponse + | FeatureRequestCreatedResponse + | NoResultsResponse + | ErrorResponse; + +export type FeatureRequestToolType = + | "tool-search_feature_requests" + | "tool-create_feature_request" + | string; + +/* ------------------------------------------------------------------ */ +/* Output parsing */ +/* ------------------------------------------------------------------ */ + +function parseOutput(output: unknown): FeatureRequestOutput | null { + if (!output) return null; + if (typeof output === "string") { + const trimmed = output.trim(); + if (!trimmed) return null; + try { + return parseOutput(JSON.parse(trimmed) as unknown); + } catch { + return null; + } + } + if (typeof output === "object") { + const type = (output as { type?: unknown }).type; + if ( + type === "feature_request_search" || + type === "feature_request_created" || + type === "no_results" || + type === "error" + ) { + return output as FeatureRequestOutput; + } + // Fallback structural checks + if ("results" in output && "query" in output) + return output as FeatureRequestSearchResponse; + if ("issue_identifier" in output) + return output as FeatureRequestCreatedResponse; + if ("suggestions" in output && !("error" in output)) + return output as NoResultsResponse; + if ("error" in output || "details" in output) + return output as ErrorResponse; + } + return null; +} + +export function getFeatureRequestOutput( + part: unknown, +): FeatureRequestOutput | null { + if (!part || typeof part !== "object") return null; + return parseOutput((part as { output?: unknown }).output); +} + +/* ------------------------------------------------------------------ */ +/* Type guards */ +/* ------------------------------------------------------------------ */ + +export function isSearchResultsOutput( + output: FeatureRequestOutput, +): output is FeatureRequestSearchResponse { + return ( + output.type === "feature_request_search" || + ("results" in output && "query" in output) + ); +} + +export function isCreatedOutput( + output: FeatureRequestOutput, +): output is FeatureRequestCreatedResponse { + return ( + output.type === "feature_request_created" || "issue_identifier" in output + ); +} + +export function isNoResultsOutput( + output: FeatureRequestOutput, +): output is NoResultsResponse { + return ( + output.type === "no_results" || + ("suggestions" in output && !("error" in output)) + ); +} + +export function isErrorOutput( + output: FeatureRequestOutput, +): output is ErrorResponse { + return output.type === "error" || "error" in output; +} + +/* ------------------------------------------------------------------ */ +/* Accordion metadata */ +/* ------------------------------------------------------------------ */ + +export function getAccordionTitle( + toolType: FeatureRequestToolType, + output: FeatureRequestOutput, +): string { + if (toolType === "tool-search_feature_requests") { + if (isSearchResultsOutput(output)) return "Feature requests"; + if (isNoResultsOutput(output)) return "No feature requests found"; + return "Feature request search error"; + } + if (isCreatedOutput(output)) { + return output.is_new_issue + ? "Feature request created" + : "Added to feature request"; + } + if (isErrorOutput(output)) return "Feature request error"; + return "Feature request"; +} + +/* ------------------------------------------------------------------ */ +/* Animation text */ +/* ------------------------------------------------------------------ */ + +interface AnimationPart { + type: FeatureRequestToolType; + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +} + +export function getAnimationText(part: AnimationPart): string { + if (part.type === "tool-search_feature_requests") { + const query = (part.input as { query?: string } | undefined)?.query?.trim(); + const queryText = query ? ` for "${query}"` : ""; + + switch (part.state) { + case "input-streaming": + case "input-available": + return `Searching feature requests${queryText}`; + case "output-available": { + const output = parseOutput(part.output); + if (!output) return `Searching feature requests${queryText}`; + if (isSearchResultsOutput(output)) { + return `Found ${output.count} feature request${output.count === 1 ? "" : "s"}${queryText}`; + } + if (isNoResultsOutput(output)) + return `No feature requests found${queryText}`; + return `Error searching feature requests${queryText}`; + } + case "output-error": + return `Error searching feature requests${queryText}`; + default: + return "Searching feature requests"; + } + } + + // create_feature_request + const title = (part.input as { title?: string } | undefined)?.title?.trim(); + const titleText = title ? ` "${title}"` : ""; + + switch (part.state) { + case "input-streaming": + case "input-available": + return `Creating feature request${titleText}`; + case "output-available": { + const output = parseOutput(part.output); + if (!output) return `Creating feature request${titleText}`; + if (isCreatedOutput(output)) { + return output.is_new_issue + ? `Created ${output.issue_identifier}` + : `Added to ${output.issue_identifier}`; + } + if (isErrorOutput(output)) return "Error creating feature request"; + return `Created feature request${titleText}`; + } + case "output-error": + return "Error creating feature request"; + default: + return "Creating feature request"; + } +} + +/* ------------------------------------------------------------------ */ +/* Icons */ +/* ------------------------------------------------------------------ */ + +export function ToolIcon({ + toolType, + isStreaming, + isError, +}: { + toolType: FeatureRequestToolType; + isStreaming?: boolean; + isError?: boolean; +}) { + const IconComponent = + toolType === "tool-create_feature_request" + ? PlusCircleIcon + : MagnifyingGlassIcon; + + return ( + + ); +} + +export function AccordionIcon({ + toolType, +}: { + toolType: FeatureRequestToolType; +}) { + const IconComponent = + toolType === "tool-create_feature_request" + ? CheckCircleIcon + : LightbulbIcon; + return ; +} diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 5d2cb83f7c..a0eb141aa9 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -10495,7 +10495,9 @@ "operation_started", "operation_pending", "operation_in_progress", - "input_validation_error" + "input_validation_error", + "feature_request_search", + "feature_request_created" ], "title": "ResponseType", "description": "Types of tool responses." From 3d31f62bf1376b7b3574977af86958e6aa000825 Mon Sep 17 00:00:00 2001 From: Swifty Date: Thu, 12 Feb 2026 16:39:24 +0100 Subject: [PATCH 4/6] Revert "added feature request tooling" This reverts commit b8b6c9de2322cf083e61670ee8625d4abb2d8e19. --- .../api/features/chat/tools/__init__.py | 4 - .../features/chat/tools/feature_requests.py | 369 -------------- .../backend/api/features/chat/tools/models.py | 34 -- .../backend/backend/util/settings.py | 3 - .../backend/test_linear_customers.py | 468 ------------------ .../ChatMessagesContainer.tsx | 18 - .../(platform)/copilot/styleguide/page.tsx | 235 --------- .../tools/FeatureRequests/FeatureRequests.tsx | 240 --------- .../copilot/tools/FeatureRequests/helpers.tsx | 271 ---------- .../frontend/src/app/api/openapi.json | 4 +- 10 files changed, 1 insertion(+), 1645 deletions(-) delete mode 100644 autogpt_platform/backend/backend/api/features/chat/tools/feature_requests.py delete mode 100644 autogpt_platform/backend/test_linear_customers.py delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/FeatureRequests.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/helpers.tsx diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py index 350776081a..dcbc35ef37 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py @@ -12,7 +12,6 @@ from .base import BaseTool from .create_agent import CreateAgentTool from .customize_agent import CustomizeAgentTool from .edit_agent import EditAgentTool -from .feature_requests import CreateFeatureRequestTool, SearchFeatureRequestsTool from .find_agent import FindAgentTool from .find_block import FindBlockTool from .find_library_agent import FindLibraryAgentTool @@ -46,9 +45,6 @@ TOOL_REGISTRY: dict[str, BaseTool] = { "view_agent_output": AgentOutputTool(), "search_docs": SearchDocsTool(), "get_doc_page": GetDocPageTool(), - # Feature request tools - "search_feature_requests": SearchFeatureRequestsTool(), - "create_feature_request": CreateFeatureRequestTool(), # Workspace tools for CoPilot file operations "list_workspace_files": ListWorkspaceFilesTool(), "read_workspace_file": ReadWorkspaceFileTool(), diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/feature_requests.py b/autogpt_platform/backend/backend/api/features/chat/tools/feature_requests.py deleted file mode 100644 index 5e06d8b4b2..0000000000 --- a/autogpt_platform/backend/backend/api/features/chat/tools/feature_requests.py +++ /dev/null @@ -1,369 +0,0 @@ -"""Feature request tools - search and create feature requests via Linear.""" - -import logging -from typing import Any - -from pydantic import SecretStr - -from backend.api.features.chat.model import ChatSession -from backend.api.features.chat.tools.base import BaseTool -from backend.api.features.chat.tools.models import ( - ErrorResponse, - FeatureRequestCreatedResponse, - FeatureRequestInfo, - FeatureRequestSearchResponse, - NoResultsResponse, - ToolResponseBase, -) -from backend.blocks.linear._api import LinearClient -from backend.data.model import APIKeyCredentials -from backend.util.settings import Settings - -logger = logging.getLogger(__name__) - -# Target project and team IDs in our Linear workspace -FEATURE_REQUEST_PROJECT_ID = "13f066f3-f639-4a67-aaa3-31483ebdf8cd" -TEAM_ID = "557fd3d5-087e-43a9-83e3-476c8313ce49" - -MAX_SEARCH_RESULTS = 10 - -# GraphQL queries/mutations -SEARCH_ISSUES_QUERY = """ -query SearchFeatureRequests($term: String!, $filter: IssueFilter, $first: Int) { - searchIssues(term: $term, filter: $filter, first: $first) { - nodes { - id - identifier - title - description - } - } -} -""" - -CUSTOMER_UPSERT_MUTATION = """ -mutation CustomerUpsert($input: CustomerUpsertInput!) { - customerUpsert(input: $input) { - success - customer { - id - name - externalIds - } - } -} -""" - -ISSUE_CREATE_MUTATION = """ -mutation IssueCreate($input: IssueCreateInput!) { - issueCreate(input: $input) { - success - issue { - id - identifier - title - url - } - } -} -""" - -CUSTOMER_NEED_CREATE_MUTATION = """ -mutation CustomerNeedCreate($input: CustomerNeedCreateInput!) { - customerNeedCreate(input: $input) { - success - need { - id - body - customer { - id - name - } - issue { - id - identifier - title - url - } - } - } -} -""" - - -_settings: Settings | None = None - - -def _get_settings() -> Settings: - global _settings - if _settings is None: - _settings = Settings() - return _settings - - -def _get_linear_client() -> LinearClient: - """Create a Linear client using the system API key from settings.""" - api_key = _get_settings().secrets.linear_api_key - if not api_key: - raise RuntimeError("LINEAR_API_KEY secret is not configured") - credentials = APIKeyCredentials( - id="system-linear", - provider="linear", - api_key=SecretStr(api_key), - title="System Linear API Key", - ) - return LinearClient(credentials=credentials) - - -class SearchFeatureRequestsTool(BaseTool): - """Tool for searching existing feature requests in Linear.""" - - @property - def name(self) -> str: - return "search_feature_requests" - - @property - def description(self) -> str: - return ( - "Search existing feature requests to check if a similar request " - "already exists before creating a new one. Returns matching feature " - "requests with their ID, title, and description." - ) - - @property - def parameters(self) -> dict[str, Any]: - return { - "type": "object", - "properties": { - "query": { - "type": "string", - "description": "Search term to find matching feature requests.", - }, - }, - "required": ["query"], - } - - @property - def requires_auth(self) -> bool: - return True - - async def _execute( - self, - user_id: str | None, - session: ChatSession, - **kwargs, - ) -> ToolResponseBase: - query = kwargs.get("query", "").strip() - session_id = session.session_id if session else None - - if not query: - return ErrorResponse( - message="Please provide a search query.", - error="Missing query parameter", - session_id=session_id, - ) - - client = _get_linear_client() - data = await client.query( - SEARCH_ISSUES_QUERY, - { - "term": query, - "filter": { - "project": {"id": {"eq": FEATURE_REQUEST_PROJECT_ID}}, - }, - "first": MAX_SEARCH_RESULTS, - }, - ) - - nodes = data.get("searchIssues", {}).get("nodes", []) - - if not nodes: - return NoResultsResponse( - message=f"No feature requests found matching '{query}'.", - suggestions=[ - "Try different keywords", - "Use broader search terms", - "You can create a new feature request if none exists", - ], - session_id=session_id, - ) - - results = [ - FeatureRequestInfo( - id=node["id"], - identifier=node["identifier"], - title=node["title"], - description=node.get("description"), - ) - for node in nodes - ] - - return FeatureRequestSearchResponse( - message=f"Found {len(results)} feature request(s) matching '{query}'.", - results=results, - count=len(results), - query=query, - session_id=session_id, - ) - - -class CreateFeatureRequestTool(BaseTool): - """Tool for creating feature requests (or adding needs to existing ones).""" - - @property - def name(self) -> str: - return "create_feature_request" - - @property - def description(self) -> str: - return ( - "Create a new feature request or add a customer need to an existing one. " - "Always search first with search_feature_requests to avoid duplicates. " - "If a matching request exists, pass its ID as existing_issue_id to add " - "the user's need to it instead of creating a duplicate." - ) - - @property - def parameters(self) -> dict[str, Any]: - return { - "type": "object", - "properties": { - "title": { - "type": "string", - "description": "Title for the feature request.", - }, - "description": { - "type": "string", - "description": "Detailed description of what the user wants and why.", - }, - "existing_issue_id": { - "type": "string", - "description": ( - "If adding a need to an existing feature request, " - "provide its Linear issue ID (from search results). " - "Omit to create a new feature request." - ), - }, - }, - "required": ["title", "description"], - } - - @property - def requires_auth(self) -> bool: - return True - - async def _find_or_create_customer( - self, client: LinearClient, user_id: str - ) -> dict: - """Find existing customer by user_id or create a new one via upsert.""" - data = await client.mutate( - CUSTOMER_UPSERT_MUTATION, - { - "input": { - "name": user_id, - "externalId": user_id, - }, - }, - ) - result = data.get("customerUpsert", {}) - if not result.get("success"): - raise RuntimeError(f"Failed to upsert customer: {data}") - return result["customer"] - - async def _execute( - self, - user_id: str | None, - session: ChatSession, - **kwargs, - ) -> ToolResponseBase: - title = kwargs.get("title", "").strip() - description = kwargs.get("description", "").strip() - existing_issue_id = kwargs.get("existing_issue_id") - session_id = session.session_id if session else None - - if not title or not description: - return ErrorResponse( - message="Both title and description are required.", - error="Missing required parameters", - session_id=session_id, - ) - - if not user_id: - return ErrorResponse( - message="Authentication required to create feature requests.", - error="Missing user_id", - session_id=session_id, - ) - - client = _get_linear_client() - - # Step 1: Find or create customer for this user - customer = await self._find_or_create_customer(client, user_id) - customer_id = customer["id"] - customer_name = customer["name"] - - # Step 2: Create or reuse issue - if existing_issue_id: - # Add need to existing issue - we still need the issue details for response - is_new_issue = False - issue_id = existing_issue_id - else: - # Create new issue in the feature requests project - data = await client.mutate( - ISSUE_CREATE_MUTATION, - { - "input": { - "title": title, - "description": description, - "teamId": TEAM_ID, - "projectId": FEATURE_REQUEST_PROJECT_ID, - }, - }, - ) - result = data.get("issueCreate", {}) - if not result.get("success"): - return ErrorResponse( - message="Failed to create feature request issue.", - error=str(data), - session_id=session_id, - ) - issue = result["issue"] - issue_id = issue["id"] - is_new_issue = True - - # Step 3: Create customer need on the issue - data = await client.mutate( - CUSTOMER_NEED_CREATE_MUTATION, - { - "input": { - "customerId": customer_id, - "issueId": issue_id, - "body": description, - "priority": 0, - }, - }, - ) - need_result = data.get("customerNeedCreate", {}) - if not need_result.get("success"): - return ErrorResponse( - message="Failed to attach customer need to the feature request.", - error=str(data), - session_id=session_id, - ) - - need = need_result["need"] - issue_info = need["issue"] - - return FeatureRequestCreatedResponse( - message=( - f"{'Created new feature request' if is_new_issue else 'Added your request to existing feature request'} " - f"[{issue_info['identifier']}] {issue_info['title']}." - ), - issue_id=issue_info["id"], - issue_identifier=issue_info["identifier"], - issue_title=issue_info["title"], - issue_url=issue_info.get("url", ""), - is_new_issue=is_new_issue, - customer_name=customer_name, - session_id=session_id, - ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/models.py b/autogpt_platform/backend/backend/api/features/chat/tools/models.py index d420b289dc..69c8c6c684 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/models.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/models.py @@ -40,9 +40,6 @@ class ResponseType(str, Enum): OPERATION_IN_PROGRESS = "operation_in_progress" # Input validation INPUT_VALIDATION_ERROR = "input_validation_error" - # Feature request types - FEATURE_REQUEST_SEARCH = "feature_request_search" - FEATURE_REQUEST_CREATED = "feature_request_created" # Base response model @@ -424,34 +421,3 @@ class AsyncProcessingResponse(ToolResponseBase): status: str = "accepted" # Must be "accepted" for detection operation_id: str | None = None task_id: str | None = None - - -# Feature request models -class FeatureRequestInfo(BaseModel): - """Information about a feature request issue.""" - - id: str - identifier: str - title: str - description: str | None = None - - -class FeatureRequestSearchResponse(ToolResponseBase): - """Response for search_feature_requests tool.""" - - type: ResponseType = ResponseType.FEATURE_REQUEST_SEARCH - results: list[FeatureRequestInfo] - count: int - query: str - - -class FeatureRequestCreatedResponse(ToolResponseBase): - """Response for create_feature_request tool.""" - - type: ResponseType = ResponseType.FEATURE_REQUEST_CREATED - issue_id: str - issue_identifier: str - issue_title: str - issue_url: str - is_new_issue: bool # False if added to existing - customer_name: str diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index d539832fb0..50b7428160 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -658,9 +658,6 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings): mem0_api_key: str = Field(default="", description="Mem0 API key") elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key") - linear_api_key: str = Field( - default="", description="Linear API key for system-level operations" - ) linear_client_id: str = Field(default="", description="Linear client ID") linear_client_secret: str = Field(default="", description="Linear client secret") diff --git a/autogpt_platform/backend/test_linear_customers.py b/autogpt_platform/backend/test_linear_customers.py deleted file mode 100644 index 6e6f3e48fc..0000000000 --- a/autogpt_platform/backend/test_linear_customers.py +++ /dev/null @@ -1,468 +0,0 @@ -""" -Test script for Linear GraphQL API - Customer Requests operations. - -Tests the exact GraphQL calls needed for: -1. search_feature_requests - search issues in the Customer Feature Requests project -2. add_feature_request - upsert customer + create customer need on issue - -Requires LINEAR_API_KEY in backend/.env -Generate one at: https://linear.app/settings/api -""" - -import json -import os -import sys - -import httpx -from dotenv import load_dotenv - -load_dotenv() - -LINEAR_API_URL = "https://api.linear.app/graphql" -API_KEY = os.getenv("LINEAR_API_KEY") - -# Target project for feature requests -FEATURE_REQUEST_PROJECT_ID = "13f066f3-f639-4a67-aaa3-31483ebdf8cd" -# Team: Internal -TEAM_ID = "557fd3d5-087e-43a9-83e3-476c8313ce49" - -if not API_KEY: - print("ERROR: LINEAR_API_KEY not found in .env") - print("Generate a personal API key at: https://linear.app/settings/api") - print("Then add LINEAR_API_KEY=lin_api_... to backend/.env") - sys.exit(1) - -HEADERS = { - "Authorization": API_KEY, - "Content-Type": "application/json", -} - - -def graphql(query: str, variables: dict | None = None) -> dict: - """Execute a GraphQL query against Linear API.""" - payload = {"query": query} - if variables: - payload["variables"] = variables - - resp = httpx.post(LINEAR_API_URL, json=payload, headers=HEADERS, timeout=30) - if resp.status_code != 200: - print(f"HTTP {resp.status_code}: {resp.text[:500]}") - resp.raise_for_status() - data = resp.json() - - if "errors" in data: - print(f"GraphQL Errors: {json.dumps(data['errors'], indent=2)}") - - return data - - -# --------------------------------------------------------------------------- -# QUERIES -# --------------------------------------------------------------------------- - -# Search issues within the feature requests project by title/description -SEARCH_ISSUES_IN_PROJECT = """ -query SearchFeatureRequests($filter: IssueFilter!, $first: Int) { - issues(filter: $filter, first: $first) { - nodes { - id - identifier - title - description - url - state { - name - type - } - project { - id - name - } - labels { - nodes { - name - } - } - } - } -} -""" - -# Get issue with its customer needs -GET_ISSUE_WITH_NEEDS = """ -query GetIssueWithNeeds($id: String!) { - issue(id: $id) { - id - identifier - title - url - needs { - nodes { - id - body - priority - customer { - id - name - domains - externalIds - } - } - } - } -} -""" - -# Search customers -SEARCH_CUSTOMERS = """ -query SearchCustomers($filter: CustomerFilter, $first: Int) { - customers(filter: $filter, first: $first) { - nodes { - id - name - domains - externalIds - revenue - size - status { - name - } - tier { - name - } - } - } -} -""" - -# --------------------------------------------------------------------------- -# MUTATIONS -# --------------------------------------------------------------------------- - -CUSTOMER_UPSERT = """ -mutation CustomerUpsert($input: CustomerUpsertInput!) { - customerUpsert(input: $input) { - success - customer { - id - name - domains - externalIds - } - } -} -""" - -CUSTOMER_NEED_CREATE = """ -mutation CustomerNeedCreate($input: CustomerNeedCreateInput!) { - customerNeedCreate(input: $input) { - success - need { - id - body - priority - customer { - id - name - } - issue { - id - identifier - title - } - } - } -} -""" - -ISSUE_CREATE = """ -mutation IssueCreate($input: IssueCreateInput!) { - issueCreate(input: $input) { - success - issue { - id - identifier - title - url - } - } -} -""" - - -# --------------------------------------------------------------------------- -# TESTS -# --------------------------------------------------------------------------- - - -def test_1_search_feature_requests(): - """Search for feature requests in the target project by keyword.""" - print("\n" + "=" * 60) - print("TEST 1: Search feature requests in project by keyword") - print("=" * 60) - - search_term = "agent" - result = graphql( - SEARCH_ISSUES_IN_PROJECT, - { - "filter": { - "project": {"id": {"eq": FEATURE_REQUEST_PROJECT_ID}}, - "or": [ - {"title": {"containsIgnoreCase": search_term}}, - {"description": {"containsIgnoreCase": search_term}}, - ], - }, - "first": 5, - }, - ) - - issues = result.get("data", {}).get("issues", {}).get("nodes", []) - for issue in issues: - proj = issue.get("project") or {} - print(f"\n [{issue['identifier']}] {issue['title']}") - print(f" Project: {proj.get('name', 'N/A')}") - print(f" State: {issue['state']['name']}") - print(f" URL: {issue['url']}") - - print(f"\n Found {len(issues)} issues matching '{search_term}'") - return issues - - -def test_2_list_all_in_project(): - """List all issues in the feature requests project.""" - print("\n" + "=" * 60) - print("TEST 2: List all issues in Customer Feature Requests project") - print("=" * 60) - - result = graphql( - SEARCH_ISSUES_IN_PROJECT, - { - "filter": { - "project": {"id": {"eq": FEATURE_REQUEST_PROJECT_ID}}, - }, - "first": 10, - }, - ) - - issues = result.get("data", {}).get("issues", {}).get("nodes", []) - if not issues: - print(" No issues in project yet (empty project)") - for issue in issues: - print(f"\n [{issue['identifier']}] {issue['title']}") - print(f" State: {issue['state']['name']}") - - print(f"\n Total: {len(issues)} issues") - return issues - - -def test_3_search_customers(): - """List existing customers.""" - print("\n" + "=" * 60) - print("TEST 3: List customers") - print("=" * 60) - - result = graphql(SEARCH_CUSTOMERS, {"first": 10}) - customers = result.get("data", {}).get("customers", {}).get("nodes", []) - - if not customers: - print(" No customers exist yet") - for c in customers: - status = c.get("status") or {} - tier = c.get("tier") or {} - print(f"\n [{c['id'][:8]}...] {c['name']}") - print(f" Domains: {c.get('domains', [])}") - print(f" External IDs: {c.get('externalIds', [])}") - print( - f" Status: {status.get('name', 'N/A')}, Tier: {tier.get('name', 'N/A')}" - ) - - print(f"\n Total: {len(customers)} customers") - return customers - - -def test_4_customer_upsert(): - """Upsert a test customer.""" - print("\n" + "=" * 60) - print("TEST 4: Customer upsert (find-or-create)") - print("=" * 60) - - result = graphql( - CUSTOMER_UPSERT, - { - "input": { - "name": "Test Customer (API Test)", - "domains": ["test-api-customer.example.com"], - "externalId": "test-customer-001", - } - }, - ) - - upsert = result.get("data", {}).get("customerUpsert", {}) - if upsert.get("success"): - customer = upsert["customer"] - print(f" Success! Customer: {customer['name']}") - print(f" ID: {customer['id']}") - print(f" Domains: {customer['domains']}") - print(f" External IDs: {customer['externalIds']}") - return customer - else: - print(f" Failed: {json.dumps(result, indent=2)}") - return None - - -def test_5_create_issue_and_need(customer_id: str): - """Create a new feature request issue and attach a customer need.""" - print("\n" + "=" * 60) - print("TEST 5: Create issue + customer need") - print("=" * 60) - - # Step 1: Create issue in the project - result = graphql( - ISSUE_CREATE, - { - "input": { - "title": "Test Feature Request (API Test - safe to delete)", - "description": "This is a test feature request created via the GraphQL API.", - "teamId": TEAM_ID, - "projectId": FEATURE_REQUEST_PROJECT_ID, - } - }, - ) - - data = result.get("data") - if not data: - print(f" Issue creation failed: {json.dumps(result, indent=2)}") - return None - issue_data = data.get("issueCreate", {}) - if not issue_data.get("success"): - print(f" Issue creation failed: {json.dumps(result, indent=2)}") - return None - - issue = issue_data["issue"] - print(f" Created issue: [{issue['identifier']}] {issue['title']}") - print(f" URL: {issue['url']}") - - # Step 2: Attach customer need - result = graphql( - CUSTOMER_NEED_CREATE, - { - "input": { - "customerId": customer_id, - "issueId": issue["id"], - "body": "Our team really needs this feature for our workflow. High priority for us!", - "priority": 0, - } - }, - ) - - need_data = result.get("data", {}).get("customerNeedCreate", {}) - if need_data.get("success"): - need = need_data["need"] - print(f" Attached customer need: {need['id']}") - print(f" Customer: {need['customer']['name']}") - print(f" Body: {need['body'][:80]}") - else: - print(f" Customer need creation failed: {json.dumps(result, indent=2)}") - - # Step 3: Verify by fetching the issue with needs - print("\n Verifying...") - verify = graphql(GET_ISSUE_WITH_NEEDS, {"id": issue["id"]}) - issue_verify = verify.get("data", {}).get("issue", {}) - needs = issue_verify.get("needs", {}).get("nodes", []) - print(f" Issue now has {len(needs)} customer need(s)") - for n in needs: - cust = n.get("customer") or {} - print(f" - {cust.get('name', 'N/A')}: {n.get('body', '')[:60]}") - - return issue - - -def test_6_add_need_to_existing(customer_id: str, issue_id: str): - """Add a customer need to an existing issue (the common case).""" - print("\n" + "=" * 60) - print("TEST 6: Add customer need to existing issue") - print("=" * 60) - - result = graphql( - CUSTOMER_NEED_CREATE, - { - "input": { - "customerId": customer_id, - "issueId": issue_id, - "body": "We also want this! +1 from our organization.", - "priority": 0, - } - }, - ) - - need_data = result.get("data", {}).get("customerNeedCreate", {}) - if need_data.get("success"): - need = need_data["need"] - print(f" Success! Need: {need['id']}") - print(f" Customer: {need['customer']['name']}") - print(f" Issue: [{need['issue']['identifier']}] {need['issue']['title']}") - return need - else: - print(f" Failed: {json.dumps(result, indent=2)}") - return None - - -def main(): - print("Linear GraphQL API - Customer Requests Test Suite") - print("=" * 60) - print(f"API URL: {LINEAR_API_URL}") - print(f"API Key: {API_KEY[:10]}...") - print(f"Project: Customer Feature Requests ({FEATURE_REQUEST_PROJECT_ID[:8]}...)") - - # --- Read-only tests --- - test_1_search_feature_requests() - test_2_list_all_in_project() - test_3_search_customers() - - # --- Write tests --- - print("\n" + "=" * 60) - answer = ( - input("Run WRITE tests? (creates test customer + issue + need) [y/N]: ") - .strip() - .lower() - ) - if answer != "y": - print("Skipped write tests.") - print("\nDone!") - return - - customer = test_4_customer_upsert() - if not customer: - print("Customer upsert failed, stopping.") - return - - issue = test_5_create_issue_and_need(customer["id"]) - if not issue: - print("Issue creation failed, stopping.") - return - - # Test adding a second need to the same issue (simulates another customer requesting same feature) - # First upsert a second customer - result = graphql( - CUSTOMER_UPSERT, - { - "input": { - "name": "Second Test Customer", - "domains": ["second-test.example.com"], - "externalId": "test-customer-002", - } - }, - ) - customer2 = result.get("data", {}).get("customerUpsert", {}).get("customer") - if customer2: - test_6_add_need_to_existing(customer2["id"], issue["id"]) - - print("\n" + "=" * 60) - print("All tests complete!") - print( - "Check the project: https://linear.app/autogpt/project/customer-feature-requests-710dcbf8bf4e/issues" - ) - - -if __name__ == "__main__": - main() diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx index b62e96f58a..71ade81a9f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx @@ -15,10 +15,6 @@ import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai"; import { useEffect, useRef, useState } from "react"; import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent"; import { EditAgentTool } from "../../tools/EditAgent/EditAgent"; -import { - CreateFeatureRequestTool, - SearchFeatureRequestsTool, -} from "../../tools/FeatureRequests/FeatureRequests"; import { FindAgentsTool } from "../../tools/FindAgents/FindAgents"; import { FindBlocksTool } from "../../tools/FindBlocks/FindBlocks"; import { RunAgentTool } from "../../tools/RunAgent/RunAgent"; @@ -258,20 +254,6 @@ export const ChatMessagesContainer = ({ part={part as ToolUIPart} /> ); - case "tool-search_feature_requests": - return ( - - ); - case "tool-create_feature_request": - return ( - - ); default: return null; } diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx index 8a35f939ca..6030665f1c 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx @@ -14,10 +14,6 @@ import { Text } from "@/components/atoms/Text/Text"; import { CopilotChatActionsProvider } from "../components/CopilotChatActionsProvider/CopilotChatActionsProvider"; import { CreateAgentTool } from "../tools/CreateAgent/CreateAgent"; import { EditAgentTool } from "../tools/EditAgent/EditAgent"; -import { - CreateFeatureRequestTool, - SearchFeatureRequestsTool, -} from "../tools/FeatureRequests/FeatureRequests"; import { FindAgentsTool } from "../tools/FindAgents/FindAgents"; import { FindBlocksTool } from "../tools/FindBlocks/FindBlocks"; import { RunAgentTool } from "../tools/RunAgent/RunAgent"; @@ -49,8 +45,6 @@ const SECTIONS = [ "Tool: Create Agent", "Tool: Edit Agent", "Tool: View Agent Output", - "Tool: Search Feature Requests", - "Tool: Create Feature Request", "Full Conversation Example", ] as const; @@ -1427,235 +1421,6 @@ export default function StyleguidePage() { - {/* ============================================================= */} - {/* SEARCH FEATURE REQUESTS */} - {/* ============================================================= */} - -
- - - - - - - - - - - - - - - - - - - - - - - -
- - {/* ============================================================= */} - {/* CREATE FEATURE REQUEST */} - {/* ============================================================= */} - -
- - - - - - - - - - - - - - - - - - - - - - - -
- {/* ============================================================= */} {/* FULL CONVERSATION EXAMPLE */} {/* ============================================================= */} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/FeatureRequests.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/FeatureRequests.tsx deleted file mode 100644 index e14ec69397..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/FeatureRequests.tsx +++ /dev/null @@ -1,240 +0,0 @@ -"use client"; - -import type { ToolUIPart } from "ai"; -import { useMemo } from "react"; - -import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; -import { - ContentBadge, - ContentCard, - ContentCardDescription, - ContentCardHeader, - ContentCardTitle, - ContentGrid, - ContentLink, - ContentMessage, - ContentSuggestionsList, -} from "../../components/ToolAccordion/AccordionContent"; -import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; -import { - AccordionIcon, - getAccordionTitle, - getAnimationText, - getFeatureRequestOutput, - isCreatedOutput, - isErrorOutput, - isNoResultsOutput, - isSearchResultsOutput, - ToolIcon, - type FeatureRequestToolType, -} from "./helpers"; - -export interface FeatureRequestToolPart { - type: FeatureRequestToolType; - toolCallId: string; - state: ToolUIPart["state"]; - input?: unknown; - output?: unknown; -} - -interface Props { - part: FeatureRequestToolPart; -} - -function truncate(text: string, maxChars: number): string { - const trimmed = text.trim(); - if (trimmed.length <= maxChars) return trimmed; - return `${trimmed.slice(0, maxChars).trimEnd()}…`; -} - -export function SearchFeatureRequestsTool({ part }: Props) { - const output = getFeatureRequestOutput(part); - const text = getAnimationText(part); - const isStreaming = - part.state === "input-streaming" || part.state === "input-available"; - const isError = - part.state === "output-error" || (!!output && isErrorOutput(output)); - - const normalized = useMemo(() => { - if (!output) return null; - return { title: getAccordionTitle(part.type, output) }; - }, [output, part.type]); - - const isOutputAvailable = part.state === "output-available" && !!output; - - const searchOutput = - isOutputAvailable && output && isSearchResultsOutput(output) - ? output - : null; - const noResultsOutput = - isOutputAvailable && output && isNoResultsOutput(output) ? output : null; - const errorOutput = - isOutputAvailable && output && isErrorOutput(output) ? output : null; - - const hasExpandableContent = - isOutputAvailable && - ((!!searchOutput && searchOutput.count > 0) || - !!noResultsOutput || - !!errorOutput); - - const accordionDescription = - hasExpandableContent && searchOutput - ? `Found ${searchOutput.count} result${searchOutput.count === 1 ? "" : "s"} for "${searchOutput.query}"` - : hasExpandableContent && (noResultsOutput || errorOutput) - ? ((noResultsOutput ?? errorOutput)?.message ?? null) - : null; - - return ( -
-
- - -
- - {hasExpandableContent && normalized && ( - } - title={normalized.title} - description={accordionDescription} - > - {searchOutput && ( - - {searchOutput.results.map((r) => ( - - - - {r.identifier} — {r.title} - - - {r.description && ( - - {truncate(r.description, 200)} - - )} - - ))} - - )} - - {noResultsOutput && ( -
- {noResultsOutput.message} - {noResultsOutput.suggestions && - noResultsOutput.suggestions.length > 0 && ( - - )} -
- )} - - {errorOutput && ( -
- {errorOutput.message} - {errorOutput.error && ( - - {errorOutput.error} - - )} -
- )} -
- )} -
- ); -} - -export function CreateFeatureRequestTool({ part }: Props) { - const output = getFeatureRequestOutput(part); - const text = getAnimationText(part); - const isStreaming = - part.state === "input-streaming" || part.state === "input-available"; - const isError = - part.state === "output-error" || (!!output && isErrorOutput(output)); - - const normalized = useMemo(() => { - if (!output) return null; - return { title: getAccordionTitle(part.type, output) }; - }, [output, part.type]); - - const isOutputAvailable = part.state === "output-available" && !!output; - - const createdOutput = - isOutputAvailable && output && isCreatedOutput(output) ? output : null; - const errorOutput = - isOutputAvailable && output && isErrorOutput(output) ? output : null; - - const hasExpandableContent = - isOutputAvailable && (!!createdOutput || !!errorOutput); - - const accordionDescription = - hasExpandableContent && createdOutput - ? `${createdOutput.issue_identifier} — ${createdOutput.issue_title}` - : hasExpandableContent && errorOutput - ? errorOutput.message - : null; - - return ( -
-
- - -
- - {hasExpandableContent && normalized && ( - } - title={normalized.title} - description={accordionDescription} - > - {createdOutput && ( - - - View - - ) : undefined - } - > - - {createdOutput.issue_identifier} — {createdOutput.issue_title} - - -
- - {createdOutput.is_new_issue ? "New" : "Existing"} - -
- {createdOutput.message} -
- )} - - {errorOutput && ( -
- {errorOutput.message} - {errorOutput.error && ( - - {errorOutput.error} - - )} -
- )} -
- )} -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/helpers.tsx deleted file mode 100644 index ed292faf2b..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FeatureRequests/helpers.tsx +++ /dev/null @@ -1,271 +0,0 @@ -import { - CheckCircleIcon, - LightbulbIcon, - MagnifyingGlassIcon, - PlusCircleIcon, -} from "@phosphor-icons/react"; -import type { ToolUIPart } from "ai"; - -/* ------------------------------------------------------------------ */ -/* Types (local until API client is regenerated) */ -/* ------------------------------------------------------------------ */ - -interface FeatureRequestInfo { - id: string; - identifier: string; - title: string; - description?: string | null; -} - -export interface FeatureRequestSearchResponse { - type: "feature_request_search"; - message: string; - results: FeatureRequestInfo[]; - count: number; - query: string; -} - -export interface FeatureRequestCreatedResponse { - type: "feature_request_created"; - message: string; - issue_id: string; - issue_identifier: string; - issue_title: string; - issue_url: string; - is_new_issue: boolean; - customer_name: string; -} - -interface NoResultsResponse { - type: "no_results"; - message: string; - suggestions?: string[]; -} - -interface ErrorResponse { - type: "error"; - message: string; - error?: string; -} - -export type FeatureRequestOutput = - | FeatureRequestSearchResponse - | FeatureRequestCreatedResponse - | NoResultsResponse - | ErrorResponse; - -export type FeatureRequestToolType = - | "tool-search_feature_requests" - | "tool-create_feature_request" - | string; - -/* ------------------------------------------------------------------ */ -/* Output parsing */ -/* ------------------------------------------------------------------ */ - -function parseOutput(output: unknown): FeatureRequestOutput | null { - if (!output) return null; - if (typeof output === "string") { - const trimmed = output.trim(); - if (!trimmed) return null; - try { - return parseOutput(JSON.parse(trimmed) as unknown); - } catch { - return null; - } - } - if (typeof output === "object") { - const type = (output as { type?: unknown }).type; - if ( - type === "feature_request_search" || - type === "feature_request_created" || - type === "no_results" || - type === "error" - ) { - return output as FeatureRequestOutput; - } - // Fallback structural checks - if ("results" in output && "query" in output) - return output as FeatureRequestSearchResponse; - if ("issue_identifier" in output) - return output as FeatureRequestCreatedResponse; - if ("suggestions" in output && !("error" in output)) - return output as NoResultsResponse; - if ("error" in output || "details" in output) - return output as ErrorResponse; - } - return null; -} - -export function getFeatureRequestOutput( - part: unknown, -): FeatureRequestOutput | null { - if (!part || typeof part !== "object") return null; - return parseOutput((part as { output?: unknown }).output); -} - -/* ------------------------------------------------------------------ */ -/* Type guards */ -/* ------------------------------------------------------------------ */ - -export function isSearchResultsOutput( - output: FeatureRequestOutput, -): output is FeatureRequestSearchResponse { - return ( - output.type === "feature_request_search" || - ("results" in output && "query" in output) - ); -} - -export function isCreatedOutput( - output: FeatureRequestOutput, -): output is FeatureRequestCreatedResponse { - return ( - output.type === "feature_request_created" || "issue_identifier" in output - ); -} - -export function isNoResultsOutput( - output: FeatureRequestOutput, -): output is NoResultsResponse { - return ( - output.type === "no_results" || - ("suggestions" in output && !("error" in output)) - ); -} - -export function isErrorOutput( - output: FeatureRequestOutput, -): output is ErrorResponse { - return output.type === "error" || "error" in output; -} - -/* ------------------------------------------------------------------ */ -/* Accordion metadata */ -/* ------------------------------------------------------------------ */ - -export function getAccordionTitle( - toolType: FeatureRequestToolType, - output: FeatureRequestOutput, -): string { - if (toolType === "tool-search_feature_requests") { - if (isSearchResultsOutput(output)) return "Feature requests"; - if (isNoResultsOutput(output)) return "No feature requests found"; - return "Feature request search error"; - } - if (isCreatedOutput(output)) { - return output.is_new_issue - ? "Feature request created" - : "Added to feature request"; - } - if (isErrorOutput(output)) return "Feature request error"; - return "Feature request"; -} - -/* ------------------------------------------------------------------ */ -/* Animation text */ -/* ------------------------------------------------------------------ */ - -interface AnimationPart { - type: FeatureRequestToolType; - state: ToolUIPart["state"]; - input?: unknown; - output?: unknown; -} - -export function getAnimationText(part: AnimationPart): string { - if (part.type === "tool-search_feature_requests") { - const query = (part.input as { query?: string } | undefined)?.query?.trim(); - const queryText = query ? ` for "${query}"` : ""; - - switch (part.state) { - case "input-streaming": - case "input-available": - return `Searching feature requests${queryText}`; - case "output-available": { - const output = parseOutput(part.output); - if (!output) return `Searching feature requests${queryText}`; - if (isSearchResultsOutput(output)) { - return `Found ${output.count} feature request${output.count === 1 ? "" : "s"}${queryText}`; - } - if (isNoResultsOutput(output)) - return `No feature requests found${queryText}`; - return `Error searching feature requests${queryText}`; - } - case "output-error": - return `Error searching feature requests${queryText}`; - default: - return "Searching feature requests"; - } - } - - // create_feature_request - const title = (part.input as { title?: string } | undefined)?.title?.trim(); - const titleText = title ? ` "${title}"` : ""; - - switch (part.state) { - case "input-streaming": - case "input-available": - return `Creating feature request${titleText}`; - case "output-available": { - const output = parseOutput(part.output); - if (!output) return `Creating feature request${titleText}`; - if (isCreatedOutput(output)) { - return output.is_new_issue - ? `Created ${output.issue_identifier}` - : `Added to ${output.issue_identifier}`; - } - if (isErrorOutput(output)) return "Error creating feature request"; - return `Created feature request${titleText}`; - } - case "output-error": - return "Error creating feature request"; - default: - return "Creating feature request"; - } -} - -/* ------------------------------------------------------------------ */ -/* Icons */ -/* ------------------------------------------------------------------ */ - -export function ToolIcon({ - toolType, - isStreaming, - isError, -}: { - toolType: FeatureRequestToolType; - isStreaming?: boolean; - isError?: boolean; -}) { - const IconComponent = - toolType === "tool-create_feature_request" - ? PlusCircleIcon - : MagnifyingGlassIcon; - - return ( - - ); -} - -export function AccordionIcon({ - toolType, -}: { - toolType: FeatureRequestToolType; -}) { - const IconComponent = - toolType === "tool-create_feature_request" - ? CheckCircleIcon - : LightbulbIcon; - return ; -} diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index a0eb141aa9..5d2cb83f7c 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -10495,9 +10495,7 @@ "operation_started", "operation_pending", "operation_in_progress", - "input_validation_error", - "feature_request_search", - "feature_request_created" + "input_validation_error" ], "title": "ResponseType", "description": "Types of tool responses." From cb166dd6fb80b42da54da6051b26fd25bebe4517 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 12 Feb 2026 09:56:59 -0600 Subject: [PATCH 5/6] feat(blocks): Store sandbox files to workspace (#12073) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Store files created by sandbox blocks (Claude Code, Code Executor) to the user's workspace for persistence across runs. ### Changes 🏗️ - **New `sandbox_files.py` utility** (`backend/util/sandbox_files.py`) - Shared module for extracting files from E2B sandboxes - Stores files to workspace via `store_media_file()` (includes virus scanning, size limits) - Returns `SandboxFileOutput` with path, content, and `workspace_ref` - **Claude Code block** (`backend/blocks/claude_code.py`) - Added `workspace_ref` field to `FileOutput` schema - Replaced inline `_extract_files()` with shared utility - Files from working directory now stored to workspace automatically - **Code Executor block** (`backend/blocks/code_executor.py`) - Added `files` output field to `ExecuteCodeBlock.Output` - Creates `/output` directory in sandbox before execution - Extracts all files (text + binary) from `/output` after execution - Updated `execute_code()` to support file extraction with `extract_files` param ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Create agent with Claude Code block, have it create a file, verify `workspace_ref` in output - [x] Create agent with Code Executor block, write file to `/output`, verify `workspace_ref` in output - [x] Verify files persist in workspace after sandbox disposal - [x] Verify binary files (images, etc.) work correctly in Code Executor - [x] Verify existing graphs using `content` field still work (backward compat) #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**) No configuration changes required - this is purely additive backend code. --- **Related:** Closes SECRT-1931 --- > [!NOTE] > **Medium Risk** > Adds automatic extraction and workspace storage of sandbox-written files (including binaries for code execution), which can affect output payload size, performance, and file-handling edge cases. > > **Overview** > **Sandbox blocks now persist generated files to workspace.** A new shared utility (`backend/util/sandbox_files.py`) extracts files from an E2B sandbox (scoped by a start timestamp) and stores them via `store_media_file`, returning `SandboxFileOutput` with `workspace_ref`. > > `ClaudeCodeBlock` replaces its inline file-scraping logic with this utility and updates the `files` output schema to include `workspace_ref`. > > `ExecuteCodeBlock` adds a `files` output and extends the executor mixin to optionally extract/store files (text + binary) when an `execution_context` is provided; related mocks/tests and docs are updated accordingly. > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit 343854c0cf971cffc975c466e79bbbc2f9fd7271. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). --------- Co-authored-by: Claude Opus 4.6 --- .../backend/backend/blocks/claude_code.py | 179 ++--------- .../backend/backend/blocks/code_executor.py | 73 ++++- .../backend/backend/util/sandbox_files.py | 288 ++++++++++++++++++ docs/integrations/block-integrations/llm.md | 2 +- docs/integrations/block-integrations/misc.md | 1 + 5 files changed, 383 insertions(+), 160 deletions(-) create mode 100644 autogpt_platform/backend/backend/util/sandbox_files.py diff --git a/autogpt_platform/backend/backend/blocks/claude_code.py b/autogpt_platform/backend/backend/blocks/claude_code.py index 1919406c6f..2e870f02b6 100644 --- a/autogpt_platform/backend/backend/blocks/claude_code.py +++ b/autogpt_platform/backend/backend/blocks/claude_code.py @@ -1,10 +1,10 @@ import json import shlex import uuid -from typing import Literal, Optional +from typing import TYPE_CHECKING, Literal, Optional from e2b import AsyncSandbox as BaseAsyncSandbox -from pydantic import BaseModel, SecretStr +from pydantic import SecretStr from backend.blocks._base import ( Block, @@ -20,6 +20,13 @@ from backend.data.model import ( SchemaField, ) from backend.integrations.providers import ProviderName +from backend.util.sandbox_files import ( + SandboxFileOutput, + extract_and_store_sandbox_files, +) + +if TYPE_CHECKING: + from backend.executor.utils import ExecutionContext class ClaudeCodeExecutionError(Exception): @@ -174,22 +181,15 @@ class ClaudeCodeBlock(Block): advanced=True, ) - class FileOutput(BaseModel): - """A file extracted from the sandbox.""" - - path: str - relative_path: str # Path relative to working directory (for GitHub, etc.) - name: str - content: str - class Output(BlockSchemaOutput): response: str = SchemaField( description="The output/response from Claude Code execution" ) - files: list["ClaudeCodeBlock.FileOutput"] = SchemaField( + files: list[SandboxFileOutput] = SchemaField( description=( "List of text files created/modified by Claude Code during this execution. " - "Each file has 'path', 'relative_path', 'name', and 'content' fields." + "Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. " + "workspace_ref contains a workspace:// URI if the file was stored to workspace." ) ) conversation_history: str = SchemaField( @@ -252,6 +252,7 @@ class ClaudeCodeBlock(Block): "relative_path": "index.html", "name": "index.html", "content": "Hello World", + "workspace_ref": None, } ], ), @@ -267,11 +268,12 @@ class ClaudeCodeBlock(Block): "execute_claude_code": lambda *args, **kwargs: ( "Created index.html with hello world content", # response [ - ClaudeCodeBlock.FileOutput( + SandboxFileOutput( path="/home/user/index.html", relative_path="index.html", name="index.html", content="Hello World", + workspace_ref=None, ) ], # files "User: Create a hello world HTML file\n" @@ -294,7 +296,8 @@ class ClaudeCodeBlock(Block): existing_sandbox_id: str, conversation_history: str, dispose_sandbox: bool, - ) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]: + execution_context: "ExecutionContext", + ) -> tuple[str, list[SandboxFileOutput], str, str, str]: """ Execute Claude Code in an E2B sandbox. @@ -449,14 +452,18 @@ class ClaudeCodeBlock(Block): else: new_conversation_history = turn_entry - # Extract files created/modified during this run - files = await self._extract_files( - sandbox, working_directory, start_timestamp + # Extract files created/modified during this run and store to workspace + sandbox_files = await extract_and_store_sandbox_files( + sandbox=sandbox, + working_directory=working_directory, + execution_context=execution_context, + since_timestamp=start_timestamp, + text_only=True, ) return ( response, - files, + sandbox_files, # Already SandboxFileOutput objects new_conversation_history, current_session_id, sandbox_id, @@ -471,140 +478,6 @@ class ClaudeCodeBlock(Block): if dispose_sandbox and sandbox: await sandbox.kill() - async def _extract_files( - self, - sandbox: BaseAsyncSandbox, - working_directory: str, - since_timestamp: str | None = None, - ) -> list["ClaudeCodeBlock.FileOutput"]: - """ - Extract text files created/modified during this Claude Code execution. - - Args: - sandbox: The E2B sandbox instance - working_directory: Directory to search for files - since_timestamp: ISO timestamp - only return files modified after this time - - Returns: - List of FileOutput objects with path, relative_path, name, and content - """ - files: list[ClaudeCodeBlock.FileOutput] = [] - - # Text file extensions we can safely read as text - text_extensions = { - ".txt", - ".md", - ".html", - ".htm", - ".css", - ".js", - ".ts", - ".jsx", - ".tsx", - ".json", - ".xml", - ".yaml", - ".yml", - ".toml", - ".ini", - ".cfg", - ".conf", - ".py", - ".rb", - ".php", - ".java", - ".c", - ".cpp", - ".h", - ".hpp", - ".cs", - ".go", - ".rs", - ".swift", - ".kt", - ".scala", - ".sh", - ".bash", - ".zsh", - ".sql", - ".graphql", - ".env", - ".gitignore", - ".dockerfile", - "Dockerfile", - ".vue", - ".svelte", - ".astro", - ".mdx", - ".rst", - ".tex", - ".csv", - ".log", - } - - try: - # List files recursively using find command - # Exclude node_modules and .git directories, but allow hidden files - # like .env and .gitignore (they're filtered by text_extensions later) - # Filter by timestamp to only get files created/modified during this run - safe_working_dir = shlex.quote(working_directory) - timestamp_filter = "" - if since_timestamp: - timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} " - find_result = await sandbox.commands.run( - f"find {safe_working_dir} -type f " - f"{timestamp_filter}" - f"-not -path '*/node_modules/*' " - f"-not -path '*/.git/*' " - f"2>/dev/null" - ) - - if find_result.stdout: - for file_path in find_result.stdout.strip().split("\n"): - if not file_path: - continue - - # Check if it's a text file we can read - is_text = any( - file_path.endswith(ext) for ext in text_extensions - ) or file_path.endswith("Dockerfile") - - if is_text: - try: - content = await sandbox.files.read(file_path) - # Handle bytes or string - if isinstance(content, bytes): - content = content.decode("utf-8", errors="replace") - - # Extract filename from path - file_name = file_path.split("/")[-1] - - # Calculate relative path by stripping working directory - relative_path = file_path - if file_path.startswith(working_directory): - relative_path = file_path[len(working_directory) :] - # Remove leading slash if present - if relative_path.startswith("/"): - relative_path = relative_path[1:] - - files.append( - ClaudeCodeBlock.FileOutput( - path=file_path, - relative_path=relative_path, - name=file_name, - content=content, - ) - ) - except Exception: - # Skip files that can't be read - pass - - except Exception: - # If file extraction fails, return empty results - pass - - return files - def _escape_prompt(self, prompt: str) -> str: """Escape the prompt for safe shell execution.""" # Use single quotes and escape any single quotes in the prompt @@ -617,6 +490,7 @@ class ClaudeCodeBlock(Block): *, e2b_credentials: APIKeyCredentials, anthropic_credentials: APIKeyCredentials, + execution_context: "ExecutionContext", **kwargs, ) -> BlockOutput: try: @@ -637,6 +511,7 @@ class ClaudeCodeBlock(Block): existing_sandbox_id=input_data.sandbox_id, conversation_history=input_data.conversation_history, dispose_sandbox=input_data.dispose_sandbox, + execution_context=execution_context, ) yield "response", response diff --git a/autogpt_platform/backend/backend/blocks/code_executor.py b/autogpt_platform/backend/backend/blocks/code_executor.py index 766f44b7bb..26bf9acd4f 100644 --- a/autogpt_platform/backend/backend/blocks/code_executor.py +++ b/autogpt_platform/backend/backend/blocks/code_executor.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Any, Literal, Optional +from typing import TYPE_CHECKING, Any, Literal, Optional from e2b_code_interpreter import AsyncSandbox from e2b_code_interpreter import Result as E2BExecutionResult @@ -20,6 +20,13 @@ from backend.data.model import ( SchemaField, ) from backend.integrations.providers import ProviderName +from backend.util.sandbox_files import ( + SandboxFileOutput, + extract_and_store_sandbox_files, +) + +if TYPE_CHECKING: + from backend.executor.utils import ExecutionContext TEST_CREDENTIALS = APIKeyCredentials( id="01234567-89ab-cdef-0123-456789abcdef", @@ -85,6 +92,9 @@ class CodeExecutionResult(MainCodeExecutionResult): class BaseE2BExecutorMixin: """Shared implementation methods for E2B executor blocks.""" + # Default working directory in E2B sandboxes + WORKING_DIR = "/home/user" + async def execute_code( self, api_key: str, @@ -95,14 +105,21 @@ class BaseE2BExecutorMixin: timeout: Optional[int] = None, sandbox_id: Optional[str] = None, dispose_sandbox: bool = False, + execution_context: Optional["ExecutionContext"] = None, + extract_files: bool = False, ): """ Unified code execution method that handles all three use cases: 1. Create new sandbox and execute (ExecuteCodeBlock) 2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock) 3. Connect to existing sandbox and execute (ExecuteCodeStepBlock) + + Args: + extract_files: If True and execution_context provided, extract files + created/modified during execution and store to workspace. """ # noqa sandbox = None + files: list[SandboxFileOutput] = [] try: if sandbox_id: # Connect to existing sandbox (ExecuteCodeStepBlock case) @@ -118,6 +135,12 @@ class BaseE2BExecutorMixin: for cmd in setup_commands: await sandbox.commands.run(cmd) + # Capture timestamp before execution to scope file extraction + start_timestamp = None + if extract_files: + ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S") + start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None + # Execute the code execution = await sandbox.run_code( code, @@ -133,7 +156,24 @@ class BaseE2BExecutorMixin: stdout_logs = "".join(execution.logs.stdout) stderr_logs = "".join(execution.logs.stderr) - return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id + # Extract files created/modified during this execution + if extract_files and execution_context: + files = await extract_and_store_sandbox_files( + sandbox=sandbox, + working_directory=self.WORKING_DIR, + execution_context=execution_context, + since_timestamp=start_timestamp, + text_only=False, # Include binary files too + ) + + return ( + results, + text_output, + stdout_logs, + stderr_logs, + sandbox.sandbox_id, + files, + ) finally: # Dispose of sandbox if requested to reduce usage costs if dispose_sandbox and sandbox: @@ -238,6 +278,12 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): description="Standard output logs from execution" ) stderr_logs: str = SchemaField(description="Standard error logs from execution") + files: list[SandboxFileOutput] = SchemaField( + description=( + "Files created or modified during execution. " + "Each file has path, name, content, and workspace_ref (if stored)." + ), + ) def __init__(self): super().__init__( @@ -259,23 +305,30 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): ("results", []), ("response", "Hello World"), ("stdout_logs", "Hello World\n"), + ("files", []), ], test_mock={ - "execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa + "execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa [], # results "Hello World", # text_output "Hello World\n", # stdout_logs "", # stderr_logs "sandbox_id", # sandbox_id + [], # files ), }, ) async def run( - self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + execution_context: "ExecutionContext", + **kwargs, ) -> BlockOutput: try: - results, text_output, stdout, stderr, _ = await self.execute_code( + results, text_output, stdout, stderr, _, files = await self.execute_code( api_key=credentials.api_key.get_secret_value(), code=input_data.code, language=input_data.language, @@ -283,6 +336,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): setup_commands=input_data.setup_commands, timeout=input_data.timeout, dispose_sandbox=input_data.dispose_sandbox, + execution_context=execution_context, + extract_files=True, ) # Determine result object shape & filter out empty formats @@ -296,6 +351,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): yield "stdout_logs", stdout if stderr: yield "stderr_logs", stderr + # Always yield files (empty list if none) + yield "files", [f.model_dump() for f in files] except Exception as e: yield "error", str(e) @@ -393,6 +450,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin): "Hello World\n", # stdout_logs "", # stderr_logs "sandbox_id", # sandbox_id + [], # files ), }, ) @@ -401,7 +459,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin): self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs ) -> BlockOutput: try: - _, text_output, stdout, stderr, sandbox_id = await self.execute_code( + _, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code( api_key=credentials.api_key.get_secret_value(), code=input_data.setup_code, language=input_data.language, @@ -500,6 +558,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin): "Hello World\n", # stdout_logs "", # stderr_logs sandbox_id, # sandbox_id + [], # files ), }, ) @@ -508,7 +567,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin): self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs ) -> BlockOutput: try: - results, text_output, stdout, stderr, _ = await self.execute_code( + results, text_output, stdout, stderr, _, _ = await self.execute_code( api_key=credentials.api_key.get_secret_value(), code=input_data.step_code, language=input_data.language, diff --git a/autogpt_platform/backend/backend/util/sandbox_files.py b/autogpt_platform/backend/backend/util/sandbox_files.py new file mode 100644 index 0000000000..9db53ded14 --- /dev/null +++ b/autogpt_platform/backend/backend/util/sandbox_files.py @@ -0,0 +1,288 @@ +""" +Shared utilities for extracting and storing files from E2B sandboxes. + +This module provides common file extraction and workspace storage functionality +for blocks that run code in E2B sandboxes (Claude Code, Code Executor, etc.). +""" + +import base64 +import logging +import mimetypes +import shlex +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from pydantic import BaseModel + +from backend.util.file import store_media_file +from backend.util.type import MediaFileType + +if TYPE_CHECKING: + from e2b import AsyncSandbox as BaseAsyncSandbox + + from backend.executor.utils import ExecutionContext + +logger = logging.getLogger(__name__) + +# Text file extensions that can be safely read and stored as text +TEXT_EXTENSIONS = { + ".txt", + ".md", + ".html", + ".htm", + ".css", + ".js", + ".ts", + ".jsx", + ".tsx", + ".json", + ".xml", + ".yaml", + ".yml", + ".toml", + ".ini", + ".cfg", + ".conf", + ".py", + ".rb", + ".php", + ".java", + ".c", + ".cpp", + ".h", + ".hpp", + ".cs", + ".go", + ".rs", + ".swift", + ".kt", + ".scala", + ".sh", + ".bash", + ".zsh", + ".sql", + ".graphql", + ".env", + ".gitignore", + ".dockerfile", + "Dockerfile", + ".vue", + ".svelte", + ".astro", + ".mdx", + ".rst", + ".tex", + ".csv", + ".log", +} + + +class SandboxFileOutput(BaseModel): + """A file extracted from a sandbox and optionally stored in workspace.""" + + path: str + """Full path in the sandbox.""" + + relative_path: str + """Path relative to the working directory.""" + + name: str + """Filename only.""" + + content: str + """File content as text (for backward compatibility).""" + + workspace_ref: str | None = None + """Workspace reference (workspace://{id}#mime) if stored, None otherwise.""" + + +@dataclass +class ExtractedFile: + """Internal representation of an extracted file before storage.""" + + path: str + relative_path: str + name: str + content: bytes + is_text: bool + + +async def extract_sandbox_files( + sandbox: "BaseAsyncSandbox", + working_directory: str, + since_timestamp: str | None = None, + text_only: bool = True, +) -> list[ExtractedFile]: + """ + Extract files from an E2B sandbox. + + Args: + sandbox: The E2B sandbox instance + working_directory: Directory to search for files + since_timestamp: ISO timestamp - only return files modified after this time + text_only: If True, only extract text files (default). If False, extract all files. + + Returns: + List of ExtractedFile objects with path, content, and metadata + """ + files: list[ExtractedFile] = [] + + try: + # Build find command + safe_working_dir = shlex.quote(working_directory) + timestamp_filter = "" + if since_timestamp: + timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} " + + find_result = await sandbox.commands.run( + f"find {safe_working_dir} -type f " + f"{timestamp_filter}" + f"-not -path '*/node_modules/*' " + f"-not -path '*/.git/*' " + f"2>/dev/null" + ) + + if not find_result.stdout: + return files + + for file_path in find_result.stdout.strip().split("\n"): + if not file_path: + continue + + # Check if it's a text file + is_text = any(file_path.endswith(ext) for ext in TEXT_EXTENSIONS) + + # Skip non-text files if text_only mode + if text_only and not is_text: + continue + + try: + # Read file content as bytes + content = await sandbox.files.read(file_path, format="bytes") + if isinstance(content, str): + content = content.encode("utf-8") + elif isinstance(content, bytearray): + content = bytes(content) + + # Extract filename from path + file_name = file_path.split("/")[-1] + + # Calculate relative path + relative_path = file_path + if file_path.startswith(working_directory): + relative_path = file_path[len(working_directory) :] + if relative_path.startswith("/"): + relative_path = relative_path[1:] + + files.append( + ExtractedFile( + path=file_path, + relative_path=relative_path, + name=file_name, + content=content, + is_text=is_text, + ) + ) + except Exception as e: + logger.debug(f"Failed to read file {file_path}: {e}") + continue + + except Exception as e: + logger.warning(f"File extraction failed: {e}") + + return files + + +async def store_sandbox_files( + extracted_files: list[ExtractedFile], + execution_context: "ExecutionContext", +) -> list[SandboxFileOutput]: + """ + Store extracted sandbox files to workspace and return output objects. + + Args: + extracted_files: List of files extracted from sandbox + execution_context: Execution context for workspace storage + + Returns: + List of SandboxFileOutput objects with workspace refs + """ + outputs: list[SandboxFileOutput] = [] + + for file in extracted_files: + # Decode content for text files (for backward compat content field) + if file.is_text: + try: + content_str = file.content.decode("utf-8", errors="replace") + except Exception: + content_str = "" + else: + content_str = f"[Binary file: {len(file.content)} bytes]" + + # Build data URI (needed for storage and as binary fallback) + mime_type = mimetypes.guess_type(file.name)[0] or "application/octet-stream" + data_uri = f"data:{mime_type};base64,{base64.b64encode(file.content).decode()}" + + # Try to store in workspace + workspace_ref: str | None = None + try: + result = await store_media_file( + file=MediaFileType(data_uri), + execution_context=execution_context, + return_format="for_block_output", + ) + if result.startswith("workspace://"): + workspace_ref = result + elif not file.is_text: + # Non-workspace context (graph execution): store_media_file + # returned a data URI — use it as content so binary data isn't lost. + content_str = result + except Exception as e: + logger.warning(f"Failed to store file {file.name} to workspace: {e}") + # For binary files, fall back to data URI to prevent data loss + if not file.is_text: + content_str = data_uri + + outputs.append( + SandboxFileOutput( + path=file.path, + relative_path=file.relative_path, + name=file.name, + content=content_str, + workspace_ref=workspace_ref, + ) + ) + + return outputs + + +async def extract_and_store_sandbox_files( + sandbox: "BaseAsyncSandbox", + working_directory: str, + execution_context: "ExecutionContext", + since_timestamp: str | None = None, + text_only: bool = True, +) -> list[SandboxFileOutput]: + """ + Extract files from sandbox and store them in workspace. + + This is the main entry point combining extraction and storage. + + Args: + sandbox: The E2B sandbox instance + working_directory: Directory to search for files + execution_context: Execution context for workspace storage + since_timestamp: ISO timestamp - only return files modified after this time + text_only: If True, only extract text files + + Returns: + List of SandboxFileOutput objects with content and workspace refs + """ + extracted = await extract_sandbox_files( + sandbox=sandbox, + working_directory=working_directory, + since_timestamp=since_timestamp, + text_only=text_only, + ) + + return await store_sandbox_files(extracted, execution_context) diff --git a/docs/integrations/block-integrations/llm.md b/docs/integrations/block-integrations/llm.md index 20a5147fcd..9c96ef56c0 100644 --- a/docs/integrations/block-integrations/llm.md +++ b/docs/integrations/block-integrations/llm.md @@ -563,7 +563,7 @@ The block supports conversation continuation through three mechanisms: |--------|-------------|------| | error | Error message if execution failed | str | | response | The output/response from Claude Code execution | str | -| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', and 'content' fields. | List[FileOutput] | +| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. workspace_ref contains a workspace:// URI if the file was stored to workspace. | List[SandboxFileOutput] | | conversation_history | Full conversation history including this turn. Pass this to conversation_history input to continue on a fresh sandbox if the previous sandbox timed out. | str | | session_id | Session ID for this conversation. Pass this back along with sandbox_id to continue the conversation. | str | | sandbox_id | ID of the sandbox instance. Pass this back along with session_id to continue the conversation. This is None if dispose_sandbox was True (sandbox was disposed). | str | diff --git a/docs/integrations/block-integrations/misc.md b/docs/integrations/block-integrations/misc.md index 4c199bebb4..ad6300ae88 100644 --- a/docs/integrations/block-integrations/misc.md +++ b/docs/integrations/block-integrations/misc.md @@ -215,6 +215,7 @@ The sandbox includes pip and npm pre-installed. Set timeout to limit execution t | response | Text output (if any) of the main execution result | str | | stdout_logs | Standard output logs from execution | str | | stderr_logs | Standard error logs from execution | str | +| files | Files created or modified during execution. Each file has path, name, content, and workspace_ref (if stored). | List[SandboxFileOutput] | ### Possible use case From d95aef76653733b8403e9a6dbec82a6bca9ca016 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Fri, 13 Feb 2026 04:06:40 +0800 Subject: [PATCH 6/6] fix(copilot): stream timeout, long-running tool polling, and CreateAgent UI refresh (#12070) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Agent generation completes on the backend but the UI does not update/refresh to show the result. ### Changes 🏗️ ![Uploading Screenshot 2026-02-13 at 00.44.54.png…]() - **Stream start timeout (12s):** If the backend doesn't begin streaming within 12 seconds of submitting a message, the stream is aborted and a destructive toast is shown to the user. - **Long-running tool polling:** Added `useLongRunningToolPolling` hook that polls the session endpoint every 1.5s while a tool output is in an operating state (`operation_started` / `operation_pending` / `operation_in_progress`). When the backend completes, messages are refreshed so the UI reflects the final result. - **CreateAgent UI improvements:** Replaced the orbit loader / progress bar with a mini-game, added expanded accordion for saved agents, and improved the saved-agent card with image, icons, and links that open in new tabs. - **Backend tweaks:** Added `image_url` to `CreateAgentToolOutput`, minor model/service updates for the dummy agent generator. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Send a message and verify the stream starts within 12s or a toast appears - [x] Trigger agent creation and verify the UI updates when the backend completes - [x] Verify the saved-agent card renders correctly with image, links, and icons --------- Co-authored-by: Otto Co-authored-by: Nicholas Tindle Co-authored-by: Claude Opus 4.6 --- .../chat/tools/agent_generator/dummy.py | 154 +++++ .../chat/tools/agent_generator/service.py | 57 +- .../backend/backend/util/settings.py | 4 + .../test/agent_generator/test_service.py | 1 + .../src/app/(platform)/copilot/hooks/Untitled | 10 - .../hooks/useLongRunningToolPolling.ts | 126 ++++ .../copilot/tools/CreateAgent/CreateAgent.tsx | 77 ++- .../components/MiniGame/MiniGame.tsx | 21 + .../components/MiniGame/useMiniGame.ts | 579 ++++++++++++++++++ .../app/(platform)/copilot/useCopilotPage.ts | 29 +- 10 files changed, 1019 insertions(+), 39 deletions(-) create mode 100644 autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/dummy.py delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/hooks/Untitled create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/hooks/useLongRunningToolPolling.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/dummy.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/dummy.py new file mode 100644 index 0000000000..cf0e76d3b3 --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/dummy.py @@ -0,0 +1,154 @@ +"""Dummy Agent Generator for testing. + +Returns mock responses matching the format expected from the external service. +Enable via AGENTGENERATOR_USE_DUMMY=true in settings. + +WARNING: This is for testing only. Do not use in production. +""" + +import asyncio +import logging +import uuid +from typing import Any + +logger = logging.getLogger(__name__) + +# Dummy decomposition result (instructions type) +DUMMY_DECOMPOSITION_RESULT: dict[str, Any] = { + "type": "instructions", + "steps": [ + { + "description": "Get input from user", + "action": "input", + "block_name": "AgentInputBlock", + }, + { + "description": "Process the input", + "action": "process", + "block_name": "TextFormatterBlock", + }, + { + "description": "Return output to user", + "action": "output", + "block_name": "AgentOutputBlock", + }, + ], +} + +# Block IDs from backend/blocks/io.py +AGENT_INPUT_BLOCK_ID = "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b" +AGENT_OUTPUT_BLOCK_ID = "363ae599-353e-4804-937e-b2ee3cef3da4" + + +def _generate_dummy_agent_json() -> dict[str, Any]: + """Generate a minimal valid agent JSON for testing.""" + input_node_id = str(uuid.uuid4()) + output_node_id = str(uuid.uuid4()) + + return { + "id": str(uuid.uuid4()), + "version": 1, + "is_active": True, + "name": "Dummy Test Agent", + "description": "A dummy agent generated for testing purposes", + "nodes": [ + { + "id": input_node_id, + "block_id": AGENT_INPUT_BLOCK_ID, + "input_default": { + "name": "input", + "title": "Input", + "description": "Enter your input", + "placeholder_values": [], + }, + "metadata": {"position": {"x": 0, "y": 0}}, + }, + { + "id": output_node_id, + "block_id": AGENT_OUTPUT_BLOCK_ID, + "input_default": { + "name": "output", + "title": "Output", + "description": "Agent output", + "format": "{output}", + }, + "metadata": {"position": {"x": 400, "y": 0}}, + }, + ], + "links": [ + { + "id": str(uuid.uuid4()), + "source_id": input_node_id, + "sink_id": output_node_id, + "source_name": "result", + "sink_name": "value", + "is_static": False, + }, + ], + } + + +async def decompose_goal_dummy( + description: str, + context: str = "", + library_agents: list[dict[str, Any]] | None = None, +) -> dict[str, Any]: + """Return dummy decomposition result.""" + logger.info("Using dummy agent generator for decompose_goal") + return DUMMY_DECOMPOSITION_RESULT.copy() + + +async def generate_agent_dummy( + instructions: dict[str, Any], + library_agents: list[dict[str, Any]] | None = None, + operation_id: str | None = None, + task_id: str | None = None, +) -> dict[str, Any]: + """Return dummy agent JSON after a simulated delay.""" + logger.info("Using dummy agent generator for generate_agent (30s delay)") + await asyncio.sleep(30) + return _generate_dummy_agent_json() + + +async def generate_agent_patch_dummy( + update_request: str, + current_agent: dict[str, Any], + library_agents: list[dict[str, Any]] | None = None, + operation_id: str | None = None, + task_id: str | None = None, +) -> dict[str, Any]: + """Return dummy patched agent (returns the current agent with updated description).""" + logger.info("Using dummy agent generator for generate_agent_patch") + patched = current_agent.copy() + patched["description"] = ( + f"{current_agent.get('description', '')} (updated: {update_request})" + ) + return patched + + +async def customize_template_dummy( + template_agent: dict[str, Any], + modification_request: str, + context: str = "", +) -> dict[str, Any]: + """Return dummy customized template (returns template with updated description).""" + logger.info("Using dummy agent generator for customize_template") + customized = template_agent.copy() + customized["description"] = ( + f"{template_agent.get('description', '')} (customized: {modification_request})" + ) + return customized + + +async def get_blocks_dummy() -> list[dict[str, Any]]: + """Return dummy blocks list.""" + logger.info("Using dummy agent generator for get_blocks") + return [ + {"id": AGENT_INPUT_BLOCK_ID, "name": "AgentInputBlock"}, + {"id": AGENT_OUTPUT_BLOCK_ID, "name": "AgentOutputBlock"}, + ] + + +async def health_check_dummy() -> bool: + """Always returns healthy for dummy service.""" + return True diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py index 62411b4e1b..2b40c6d6f3 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py @@ -12,8 +12,19 @@ import httpx from backend.util.settings import Settings +from .dummy import ( + customize_template_dummy, + decompose_goal_dummy, + generate_agent_dummy, + generate_agent_patch_dummy, + get_blocks_dummy, + health_check_dummy, +) + logger = logging.getLogger(__name__) +_dummy_mode_warned = False + def _create_error_response( error_message: str, @@ -90,10 +101,26 @@ def _get_settings() -> Settings: return _settings -def is_external_service_configured() -> bool: - """Check if external Agent Generator service is configured.""" +def _is_dummy_mode() -> bool: + """Check if dummy mode is enabled for testing.""" + global _dummy_mode_warned settings = _get_settings() - return bool(settings.config.agentgenerator_host) + is_dummy = bool(settings.config.agentgenerator_use_dummy) + if is_dummy and not _dummy_mode_warned: + logger.warning( + "Agent Generator running in DUMMY MODE - returning mock responses. " + "Do not use in production!" + ) + _dummy_mode_warned = True + return is_dummy + + +def is_external_service_configured() -> bool: + """Check if external Agent Generator service is configured (or dummy mode).""" + settings = _get_settings() + return bool(settings.config.agentgenerator_host) or bool( + settings.config.agentgenerator_use_dummy + ) def _get_base_url() -> str: @@ -137,6 +164,9 @@ async def decompose_goal_external( - {"type": "error", "error": "...", "error_type": "..."} on error Or None on unexpected error """ + if _is_dummy_mode(): + return await decompose_goal_dummy(description, context, library_agents) + client = _get_client() if context: @@ -226,6 +256,11 @@ async def generate_agent_external( Returns: Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error """ + if _is_dummy_mode(): + return await generate_agent_dummy( + instructions, library_agents, operation_id, task_id + ) + client = _get_client() # Build request payload @@ -297,6 +332,11 @@ async def generate_agent_patch_external( Returns: Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error """ + if _is_dummy_mode(): + return await generate_agent_patch_dummy( + update_request, current_agent, library_agents, operation_id, task_id + ) + client = _get_client() # Build request payload @@ -383,6 +423,11 @@ async def customize_template_external( Returns: Customized agent JSON, clarifying questions dict, or error dict on error """ + if _is_dummy_mode(): + return await customize_template_dummy( + template_agent, modification_request, context + ) + client = _get_client() request = modification_request @@ -445,6 +490,9 @@ async def get_blocks_external() -> list[dict[str, Any]] | None: Returns: List of block info dicts or None on error """ + if _is_dummy_mode(): + return await get_blocks_dummy() + client = _get_client() try: @@ -478,6 +526,9 @@ async def health_check() -> bool: if not is_external_service_configured(): return False + if _is_dummy_mode(): + return await health_check_dummy() + client = _get_client() try: diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 50b7428160..48dadb88f1 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -368,6 +368,10 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): default=600, description="The timeout in seconds for Agent Generator service requests (includes retries for rate limits)", ) + agentgenerator_use_dummy: bool = Field( + default=False, + description="Use dummy agent generator responses for testing (bypasses external service)", + ) enable_example_blocks: bool = Field( default=False, diff --git a/autogpt_platform/backend/test/agent_generator/test_service.py b/autogpt_platform/backend/test/agent_generator/test_service.py index cc37c428c0..93c9b9dcc0 100644 --- a/autogpt_platform/backend/test/agent_generator/test_service.py +++ b/autogpt_platform/backend/test/agent_generator/test_service.py @@ -25,6 +25,7 @@ class TestServiceConfiguration: """Test that external service is not configured when host is empty.""" mock_settings = MagicMock() mock_settings.config.agentgenerator_host = "" + mock_settings.config.agentgenerator_use_dummy = False with patch.object(service, "_get_settings", return_value=mock_settings): assert service.is_external_service_configured() is False diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/Untitled b/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/Untitled deleted file mode 100644 index 13769eb726..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/Untitled +++ /dev/null @@ -1,10 +0,0 @@ -import { parseAsString, useQueryState } from "nuqs"; - -export function useCopilotSessionId() { - const [urlSessionId, setUrlSessionId] = useQueryState( - "sessionId", - parseAsString, - ); - - return { urlSessionId, setUrlSessionId }; -} \ No newline at end of file diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/useLongRunningToolPolling.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/useLongRunningToolPolling.ts new file mode 100644 index 0000000000..85ef6b2962 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/useLongRunningToolPolling.ts @@ -0,0 +1,126 @@ +import { getGetV2GetSessionQueryKey } from "@/app/api/__generated__/endpoints/chat/chat"; +import { useQueryClient } from "@tanstack/react-query"; +import type { UIDataTypes, UIMessage, UITools } from "ai"; +import { useCallback, useEffect, useRef } from "react"; +import { convertChatSessionMessagesToUiMessages } from "../helpers/convertChatSessionToUiMessages"; + +const OPERATING_TYPES = new Set([ + "operation_started", + "operation_pending", + "operation_in_progress", +]); + +const POLL_INTERVAL_MS = 1_500; + +/** + * Detects whether any message contains a tool part whose output indicates + * a long-running operation is still in progress. + */ +function hasOperatingTool( + messages: UIMessage[], +) { + for (const msg of messages) { + for (const part of msg.parts) { + if (!part.type.startsWith("tool-")) continue; + const toolPart = part as { output?: unknown }; + if (!toolPart.output) continue; + const output = + typeof toolPart.output === "string" + ? safeParse(toolPart.output) + : toolPart.output; + if ( + output && + typeof output === "object" && + "type" in output && + OPERATING_TYPES.has((output as { type: string }).type) + ) { + return true; + } + } + } + return false; +} + +function safeParse(value: string): unknown { + try { + return JSON.parse(value); + } catch { + return null; + } +} + +/** + * Polls the session endpoint while any tool is in an "operating" state + * (operation_started / operation_pending / operation_in_progress). + * + * When the session data shows the tool output has changed (e.g. to + * agent_saved), it calls `setMessages` with the updated messages. + */ +export function useLongRunningToolPolling( + sessionId: string | null, + messages: UIMessage[], + setMessages: ( + updater: ( + prev: UIMessage[], + ) => UIMessage[], + ) => void, +) { + const queryClient = useQueryClient(); + const intervalRef = useRef | null>(null); + + const stopPolling = useCallback(() => { + if (intervalRef.current) { + clearInterval(intervalRef.current); + intervalRef.current = null; + } + }, []); + + const poll = useCallback(async () => { + if (!sessionId) return; + + // Invalidate the query cache so the next fetch gets fresh data + await queryClient.invalidateQueries({ + queryKey: getGetV2GetSessionQueryKey(sessionId), + }); + + // Fetch fresh session data + const data = queryClient.getQueryData<{ + status: number; + data: { messages?: unknown[] }; + }>(getGetV2GetSessionQueryKey(sessionId)); + + if (data?.status !== 200 || !data.data.messages) return; + + const freshMessages = convertChatSessionMessagesToUiMessages( + sessionId, + data.data.messages, + ); + + if (!freshMessages || freshMessages.length === 0) return; + + // Update when the long-running tool completed + if (!hasOperatingTool(freshMessages)) { + setMessages(() => freshMessages); + stopPolling(); + } + }, [sessionId, queryClient, setMessages, stopPolling]); + + useEffect(() => { + const shouldPoll = hasOperatingTool(messages); + + // Always clear any previous interval first so we never leak timers + // when the effect re-runs due to dependency changes (e.g. messages + // updating as the LLM streams text after the tool call). + stopPolling(); + + if (shouldPoll && sessionId) { + intervalRef.current = setInterval(() => { + poll(); + }, POLL_INTERVAL_MS); + } + + return () => { + stopPolling(); + }; + }, [messages, sessionId, poll, stopPolling]); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx index 88b1c491d7..26977a207a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx @@ -1,24 +1,30 @@ "use client"; -import { WarningDiamondIcon } from "@phosphor-icons/react"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { + BookOpenIcon, + CheckFatIcon, + PencilSimpleIcon, + WarningDiamondIcon, +} from "@phosphor-icons/react"; import type { ToolUIPart } from "ai"; +import NextLink from "next/link"; import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions"; import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; -import { ProgressBar } from "../../components/ProgressBar/ProgressBar"; import { ContentCardDescription, ContentCodeBlock, ContentGrid, ContentHint, - ContentLink, ContentMessage, } from "../../components/ToolAccordion/AccordionContent"; import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; -import { useAsymptoticProgress } from "../../hooks/useAsymptoticProgress"; import { ClarificationQuestionsCard, ClarifyingQuestion, } from "./components/ClarificationQuestionsCard"; +import { MiniGame } from "./components/MiniGame/MiniGame"; import { AccordionIcon, formatMaybeJson, @@ -52,7 +58,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) { const icon = ; if (isAgentSavedOutput(output)) { - return { icon, title: output.agent_name }; + return { icon, title: output.agent_name, expanded: true }; } if (isAgentPreviewOutput(output)) { return { @@ -78,6 +84,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) { return { icon, title: "Creating agent, this may take a few minutes. Sit back and relax.", + expanded: true, }; } return { @@ -107,8 +114,6 @@ export function CreateAgentTool({ part }: Props) { isOperationPendingOutput(output) || isOperationInProgressOutput(output)); - const progress = useAsymptoticProgress(isOperating); - const hasExpandableContent = part.state === "output-available" && !!output && @@ -152,31 +157,53 @@ export function CreateAgentTool({ part }: Props) { {isOperating && ( - + - This could take a few minutes, grab a coffee ☕ + This could take a few minutes — play while you wait! )} {isAgentSavedOutput(output) && ( - - {output.message} -
- - Open in library - - - Open in builder - +
+
+ + + {output.message} +
- - {truncateText( - formatMaybeJson({ agent_id: output.agent_id }), - 800, - )} - - +
+ + +
+
)} {isAgentPreviewOutput(output) && ( diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx new file mode 100644 index 0000000000..53cfcf2731 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx @@ -0,0 +1,21 @@ +"use client"; + +import { useMiniGame } from "./useMiniGame"; + +export function MiniGame() { + const { canvasRef } = useMiniGame(); + + return ( +
+ +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts new file mode 100644 index 0000000000..e91f1766ca --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts @@ -0,0 +1,579 @@ +import { useEffect, useRef } from "react"; + +/* ------------------------------------------------------------------ */ +/* Constants */ +/* ------------------------------------------------------------------ */ + +const CANVAS_HEIGHT = 150; +const GRAVITY = 0.55; +const JUMP_FORCE = -9.5; +const BASE_SPEED = 3; +const SPEED_INCREMENT = 0.0008; +const SPAWN_MIN = 70; +const SPAWN_MAX = 130; +const CHAR_SIZE = 18; +const CHAR_X = 50; +const GROUND_PAD = 20; +const STORAGE_KEY = "copilot-minigame-highscore"; + +// Colors +const COLOR_BG = "#E8EAF6"; +const COLOR_CHAR = "#263238"; +const COLOR_BOSS = "#F50057"; + +// Boss +const BOSS_SIZE = 36; +const BOSS_ENTER_SPEED = 2; +const BOSS_LEAVE_SPEED = 3; +const BOSS_SHOOT_COOLDOWN = 90; +const BOSS_SHOTS_TO_EVADE = 5; +const BOSS_INTERVAL = 20; // every N score +const PROJ_SPEED = 4.5; +const PROJ_SIZE = 12; + +/* ------------------------------------------------------------------ */ +/* Types */ +/* ------------------------------------------------------------------ */ + +interface Obstacle { + x: number; + width: number; + height: number; + scored: boolean; +} + +interface Projectile { + x: number; + y: number; + speed: number; + evaded: boolean; + type: "low" | "high"; +} + +interface BossState { + phase: "inactive" | "entering" | "fighting" | "leaving"; + x: number; + targetX: number; + shotsEvaded: number; + cooldown: number; + projectiles: Projectile[]; + bob: number; +} + +interface GameState { + charY: number; + vy: number; + obstacles: Obstacle[]; + score: number; + highScore: number; + speed: number; + frame: number; + nextSpawn: number; + running: boolean; + over: boolean; + groundY: number; + boss: BossState; + bossThreshold: number; +} + +/* ------------------------------------------------------------------ */ +/* Helpers */ +/* ------------------------------------------------------------------ */ + +function randInt(min: number, max: number) { + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +function readHighScore(): number { + try { + return parseInt(localStorage.getItem(STORAGE_KEY) || "0", 10) || 0; + } catch { + return 0; + } +} + +function writeHighScore(score: number) { + try { + localStorage.setItem(STORAGE_KEY, String(score)); + } catch { + /* noop */ + } +} + +function makeBoss(): BossState { + return { + phase: "inactive", + x: 0, + targetX: 0, + shotsEvaded: 0, + cooldown: 0, + projectiles: [], + bob: 0, + }; +} + +function makeState(groundY: number): GameState { + return { + charY: groundY - CHAR_SIZE, + vy: 0, + obstacles: [], + score: 0, + highScore: readHighScore(), + speed: BASE_SPEED, + frame: 0, + nextSpawn: randInt(SPAWN_MIN, SPAWN_MAX), + running: false, + over: false, + groundY, + boss: makeBoss(), + bossThreshold: BOSS_INTERVAL, + }; +} + +function gameOver(s: GameState) { + s.running = false; + s.over = true; + if (s.score > s.highScore) { + s.highScore = s.score; + writeHighScore(s.score); + } +} + +/* ------------------------------------------------------------------ */ +/* Projectile collision — shared between fighting & leaving phases */ +/* ------------------------------------------------------------------ */ + +/** Returns true if the player died. */ +function tickProjectiles(s: GameState): boolean { + const boss = s.boss; + + for (const p of boss.projectiles) { + p.x -= p.speed; + + if (!p.evaded && p.x + PROJ_SIZE < CHAR_X) { + p.evaded = true; + boss.shotsEvaded++; + } + + // Collision + if ( + !p.evaded && + CHAR_X + CHAR_SIZE > p.x && + CHAR_X < p.x + PROJ_SIZE && + s.charY + CHAR_SIZE > p.y && + s.charY < p.y + PROJ_SIZE + ) { + gameOver(s); + return true; + } + } + + boss.projectiles = boss.projectiles.filter((p) => p.x + PROJ_SIZE > -20); + return false; +} + +/* ------------------------------------------------------------------ */ +/* Update */ +/* ------------------------------------------------------------------ */ + +function update(s: GameState, canvasWidth: number) { + if (!s.running) return; + + s.frame++; + + // Speed only ramps during regular play + if (s.boss.phase === "inactive") { + s.speed = BASE_SPEED + s.frame * SPEED_INCREMENT; + } + + // ---- Character physics (always active) ---- // + s.vy += GRAVITY; + s.charY += s.vy; + if (s.charY + CHAR_SIZE >= s.groundY) { + s.charY = s.groundY - CHAR_SIZE; + s.vy = 0; + } + + // ---- Trigger boss ---- // + if (s.boss.phase === "inactive" && s.score >= s.bossThreshold) { + s.boss.phase = "entering"; + s.boss.x = canvasWidth + 10; + s.boss.targetX = canvasWidth - BOSS_SIZE - 40; + s.boss.shotsEvaded = 0; + s.boss.cooldown = BOSS_SHOOT_COOLDOWN; + s.boss.projectiles = []; + s.obstacles = []; + } + + // ---- Boss: entering ---- // + if (s.boss.phase === "entering") { + s.boss.bob = Math.sin(s.frame * 0.05) * 3; + s.boss.x -= BOSS_ENTER_SPEED; + if (s.boss.x <= s.boss.targetX) { + s.boss.x = s.boss.targetX; + s.boss.phase = "fighting"; + } + return; // no obstacles while entering + } + + // ---- Boss: fighting ---- // + if (s.boss.phase === "fighting") { + s.boss.bob = Math.sin(s.frame * 0.05) * 3; + + // Shoot + s.boss.cooldown--; + if (s.boss.cooldown <= 0) { + const isLow = Math.random() < 0.5; + s.boss.projectiles.push({ + x: s.boss.x - PROJ_SIZE, + y: isLow ? s.groundY - 14 : s.groundY - 70, + speed: PROJ_SPEED, + evaded: false, + type: isLow ? "low" : "high", + }); + s.boss.cooldown = BOSS_SHOOT_COOLDOWN; + } + + if (tickProjectiles(s)) return; + + // Boss defeated? + if (s.boss.shotsEvaded >= BOSS_SHOTS_TO_EVADE) { + s.boss.phase = "leaving"; + s.score += 5; // bonus + s.bossThreshold = s.score + BOSS_INTERVAL; + } + return; + } + + // ---- Boss: leaving ---- // + if (s.boss.phase === "leaving") { + s.boss.bob = Math.sin(s.frame * 0.05) * 3; + s.boss.x += BOSS_LEAVE_SPEED; + + // Still check in-flight projectiles + if (tickProjectiles(s)) return; + + if (s.boss.x > canvasWidth + 50) { + s.boss = makeBoss(); + s.nextSpawn = s.frame + randInt(SPAWN_MIN / 2, SPAWN_MAX / 2); + } + return; + } + + // ---- Regular obstacle play ---- // + if (s.frame >= s.nextSpawn) { + s.obstacles.push({ + x: canvasWidth + 10, + width: randInt(10, 16), + height: randInt(20, 48), + scored: false, + }); + s.nextSpawn = s.frame + randInt(SPAWN_MIN, SPAWN_MAX); + } + + for (const o of s.obstacles) { + o.x -= s.speed; + if (!o.scored && o.x + o.width < CHAR_X) { + o.scored = true; + s.score++; + } + } + + s.obstacles = s.obstacles.filter((o) => o.x + o.width > -20); + + for (const o of s.obstacles) { + const oY = s.groundY - o.height; + if ( + CHAR_X + CHAR_SIZE > o.x && + CHAR_X < o.x + o.width && + s.charY + CHAR_SIZE > oY + ) { + gameOver(s); + return; + } + } +} + +/* ------------------------------------------------------------------ */ +/* Drawing */ +/* ------------------------------------------------------------------ */ + +function drawBoss(ctx: CanvasRenderingContext2D, s: GameState, bg: string) { + const bx = s.boss.x; + const by = s.groundY - BOSS_SIZE + s.boss.bob; + + // Body + ctx.save(); + ctx.fillStyle = COLOR_BOSS; + ctx.globalAlpha = 0.9; + ctx.beginPath(); + ctx.roundRect(bx, by, BOSS_SIZE, BOSS_SIZE, 4); + ctx.fill(); + ctx.restore(); + + // Eyes + ctx.save(); + ctx.fillStyle = bg; + const eyeY = by + 13; + ctx.beginPath(); + ctx.arc(bx + 10, eyeY, 4, 0, Math.PI * 2); + ctx.fill(); + ctx.beginPath(); + ctx.arc(bx + 26, eyeY, 4, 0, Math.PI * 2); + ctx.fill(); + ctx.restore(); + + // Angry eyebrows + ctx.save(); + ctx.strokeStyle = bg; + ctx.lineWidth = 2; + ctx.beginPath(); + ctx.moveTo(bx + 5, eyeY - 7); + ctx.lineTo(bx + 14, eyeY - 4); + ctx.stroke(); + ctx.beginPath(); + ctx.moveTo(bx + 31, eyeY - 7); + ctx.lineTo(bx + 22, eyeY - 4); + ctx.stroke(); + ctx.restore(); + + // Zigzag mouth + ctx.save(); + ctx.strokeStyle = bg; + ctx.lineWidth = 1.5; + ctx.beginPath(); + ctx.moveTo(bx + 10, by + 27); + ctx.lineTo(bx + 14, by + 24); + ctx.lineTo(bx + 18, by + 27); + ctx.lineTo(bx + 22, by + 24); + ctx.lineTo(bx + 26, by + 27); + ctx.stroke(); + ctx.restore(); +} + +function drawProjectiles(ctx: CanvasRenderingContext2D, boss: BossState) { + ctx.save(); + ctx.fillStyle = COLOR_BOSS; + ctx.globalAlpha = 0.8; + for (const p of boss.projectiles) { + if (p.evaded) continue; + ctx.beginPath(); + ctx.arc( + p.x + PROJ_SIZE / 2, + p.y + PROJ_SIZE / 2, + PROJ_SIZE / 2, + 0, + Math.PI * 2, + ); + ctx.fill(); + } + ctx.restore(); +} + +function draw( + ctx: CanvasRenderingContext2D, + s: GameState, + w: number, + h: number, + fg: string, + started: boolean, +) { + ctx.fillStyle = COLOR_BG; + ctx.fillRect(0, 0, w, h); + + // Ground + ctx.save(); + ctx.strokeStyle = fg; + ctx.globalAlpha = 0.15; + ctx.setLineDash([4, 4]); + ctx.beginPath(); + ctx.moveTo(0, s.groundY); + ctx.lineTo(w, s.groundY); + ctx.stroke(); + ctx.restore(); + + // Character + ctx.save(); + ctx.fillStyle = COLOR_CHAR; + ctx.globalAlpha = 0.85; + ctx.beginPath(); + ctx.roundRect(CHAR_X, s.charY, CHAR_SIZE, CHAR_SIZE, 3); + ctx.fill(); + ctx.restore(); + + // Eyes + ctx.save(); + ctx.fillStyle = COLOR_BG; + ctx.beginPath(); + ctx.arc(CHAR_X + 6, s.charY + 7, 2.5, 0, Math.PI * 2); + ctx.fill(); + ctx.beginPath(); + ctx.arc(CHAR_X + 12, s.charY + 7, 2.5, 0, Math.PI * 2); + ctx.fill(); + ctx.restore(); + + // Obstacles + ctx.save(); + ctx.fillStyle = fg; + ctx.globalAlpha = 0.55; + for (const o of s.obstacles) { + ctx.fillRect(o.x, s.groundY - o.height, o.width, o.height); + } + ctx.restore(); + + // Boss + projectiles + if (s.boss.phase !== "inactive") { + drawBoss(ctx, s, COLOR_BG); + drawProjectiles(ctx, s.boss); + } + + // Score HUD + ctx.save(); + ctx.fillStyle = fg; + ctx.globalAlpha = 0.5; + ctx.font = "bold 11px monospace"; + ctx.textAlign = "right"; + ctx.fillText(`Score: ${s.score}`, w - 12, 20); + ctx.fillText(`Best: ${s.highScore}`, w - 12, 34); + if (s.boss.phase === "fighting") { + ctx.fillText( + `Evade: ${s.boss.shotsEvaded}/${BOSS_SHOTS_TO_EVADE}`, + w - 12, + 48, + ); + } + ctx.restore(); + + // Prompts + if (!started && !s.running && !s.over) { + ctx.save(); + ctx.fillStyle = fg; + ctx.globalAlpha = 0.5; + ctx.font = "12px sans-serif"; + ctx.textAlign = "center"; + ctx.fillText("Click or press Space to play while you wait", w / 2, h / 2); + ctx.restore(); + } + + if (s.over) { + ctx.save(); + ctx.fillStyle = fg; + ctx.globalAlpha = 0.7; + ctx.font = "bold 13px sans-serif"; + ctx.textAlign = "center"; + ctx.fillText("Game Over", w / 2, h / 2 - 8); + ctx.font = "11px sans-serif"; + ctx.fillText("Click or Space to restart", w / 2, h / 2 + 10); + ctx.restore(); + } +} + +/* ------------------------------------------------------------------ */ +/* Hook */ +/* ------------------------------------------------------------------ */ + +export function useMiniGame() { + const canvasRef = useRef(null); + const stateRef = useRef(null); + const rafRef = useRef(0); + const startedRef = useRef(false); + + useEffect(() => { + const canvas = canvasRef.current; + if (!canvas) return; + + const container = canvas.parentElement; + if (container) { + canvas.width = container.clientWidth; + canvas.height = CANVAS_HEIGHT; + } + + const groundY = canvas.height - GROUND_PAD; + stateRef.current = makeState(groundY); + + const style = getComputedStyle(canvas); + let fg = style.color || "#71717a"; + + // -------------------------------------------------------------- // + // Jump // + // -------------------------------------------------------------- // + function jump() { + const s = stateRef.current; + if (!s) return; + + if (s.over) { + const hs = s.highScore; + const gy = s.groundY; + stateRef.current = makeState(gy); + stateRef.current.highScore = hs; + stateRef.current.running = true; + startedRef.current = true; + return; + } + + if (!s.running) { + s.running = true; + startedRef.current = true; + return; + } + + // Only jump when on the ground + if (s.charY + CHAR_SIZE >= s.groundY) { + s.vy = JUMP_FORCE; + } + } + + function onKey(e: KeyboardEvent) { + if (e.code === "Space" || e.key === " ") { + e.preventDefault(); + jump(); + } + } + + function onClick() { + canvas?.focus(); + jump(); + } + + // -------------------------------------------------------------- // + // Loop // + // -------------------------------------------------------------- // + function loop() { + const s = stateRef.current; + if (!canvas || !s) return; + const ctx = canvas.getContext("2d"); + if (!ctx) return; + + update(s, canvas.width); + draw(ctx, s, canvas.width, canvas.height, fg, startedRef.current); + rafRef.current = requestAnimationFrame(loop); + } + + rafRef.current = requestAnimationFrame(loop); + + canvas.addEventListener("click", onClick); + canvas.addEventListener("keydown", onKey); + + const observer = new ResizeObserver((entries) => { + for (const entry of entries) { + canvas.width = entry.contentRect.width; + canvas.height = CANVAS_HEIGHT; + if (stateRef.current) { + stateRef.current.groundY = canvas.height - GROUND_PAD; + } + const cs = getComputedStyle(canvas); + fg = cs.color || fg; + } + }); + if (container) observer.observe(container); + + return () => { + cancelAnimationFrame(rafRef.current); + canvas.removeEventListener("click", onClick); + canvas.removeEventListener("keydown", onKey); + observer.disconnect(); + }; + }, []); + + return { canvasRef }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts index 3dbba6e790..28e9ba7cfb 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts @@ -1,10 +1,14 @@ import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat"; +import { toast } from "@/components/molecules/Toast/use-toast"; import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useChat } from "@ai-sdk/react"; import { DefaultChatTransport } from "ai"; -import { useEffect, useMemo, useState } from "react"; +import { useEffect, useMemo, useRef, useState } from "react"; import { useChatSession } from "./useChatSession"; +import { useLongRunningToolPolling } from "./hooks/useLongRunningToolPolling"; + +const STREAM_START_TIMEOUT_MS = 12_000; export function useCopilotPage() { const { isUserLoading, isLoggedIn } = useSupabase(); @@ -52,6 +56,24 @@ export function useCopilotPage() { transport: transport ?? undefined, }); + // Abort the stream if the backend doesn't start sending data within 12s. + const stopRef = useRef(stop); + stopRef.current = stop; + useEffect(() => { + if (status !== "submitted") return; + + const timer = setTimeout(() => { + stopRef.current(); + toast({ + title: "Stream timed out", + description: "The server took too long to respond. Please try again.", + variant: "destructive", + }); + }, STREAM_START_TIMEOUT_MS); + + return () => clearTimeout(timer); + }, [status]); + useEffect(() => { if (!hydratedMessages || hydratedMessages.length === 0) return; setMessages((prev) => { @@ -60,6 +82,11 @@ export function useCopilotPage() { }); }, [hydratedMessages, setMessages]); + // Poll session endpoint when a long-running tool (create_agent, edit_agent) + // is in progress. When the backend completes, the session data will contain + // the final tool output — this hook detects the change and updates messages. + useLongRunningToolPolling(sessionId, messages, setMessages); + // Clear messages when session is null useEffect(() => { if (!sessionId) setMessages([]);