From 350ad3591b1e7a9f5ebf5a2d3053339c0f8b57e2 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Fri, 30 Jan 2026 12:01:51 +0100 Subject: [PATCH 1/6] fix(backend/chat): Filter credentials for graph execution by scopes (#11881) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [SECRT-1842: run_agent tool does not correctly use credentials - agents fail with insufficient auth scopes](https://linear.app/autogpt/issue/SECRT-1842) ### Changes 🏗️ - Include scopes in credentials filter in `backend.api.features.chat.tools.utils.match_user_credentials_to_graph` ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - CI must pass - It's broken now and a simple change so we'll test in the dev deployment --- .../backend/api/features/chat/tools/utils.py | 40 ++++++++++++++++--- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py index a2ac91dc65..0046d0b249 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py @@ -8,7 +8,7 @@ from backend.api.features.library import model as library_model from backend.api.features.store import db as store_db from backend.data import graph as graph_db from backend.data.graph import GraphModel -from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput +from backend.data.model import Credentials, CredentialsFieldInfo, CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util.exceptions import NotFoundError @@ -266,13 +266,14 @@ async def match_user_credentials_to_graph( credential_requirements, _node_fields, ) in aggregated_creds.items(): - # Find first matching credential by provider and type + # Find first matching credential by provider, type, and scopes matching_cred = next( ( cred for cred in available_creds if cred.provider in credential_requirements.provider and cred.type in credential_requirements.supported_types + and _credential_has_required_scopes(cred, credential_requirements) ), None, ) @@ -296,10 +297,17 @@ async def match_user_credentials_to_graph( f"{credential_field_name} (validation failed: {e})" ) else: + # Build a helpful error message including scope requirements + error_parts = [ + f"provider in {list(credential_requirements.provider)}", + f"type in {list(credential_requirements.supported_types)}", + ] + if credential_requirements.required_scopes: + error_parts.append( + f"scopes including {list(credential_requirements.required_scopes)}" + ) missing_creds.append( - f"{credential_field_name} " - f"(requires provider in {list(credential_requirements.provider)}, " - f"type in {list(credential_requirements.supported_types)})" + f"{credential_field_name} (requires {', '.join(error_parts)})" ) logger.info( @@ -309,6 +317,28 @@ async def match_user_credentials_to_graph( return graph_credentials_inputs, missing_creds +def _credential_has_required_scopes( + credential: Credentials, + requirements: CredentialsFieldInfo, +) -> bool: + """ + Check if a credential has all the scopes required by the block. + + For OAuth2 credentials, verifies that the credential's scopes are a superset + of the required scopes. For other credential types, returns True (no scope check). + """ + # Only OAuth2 credentials have scopes to check + if credential.type != "oauth2": + return True + + # If no scopes are required, any credential matches + if not requirements.required_scopes: + return True + + # Check that credential scopes are a superset of required scopes + return set(credential.scopes).issuperset(requirements.required_scopes) + + async def check_user_has_required_credentials( user_id: str, required_credentials: list[CredentialsMetaInput], From b72521daa9bf9e84e33f03d312728a5175eb3228 Mon Sep 17 00:00:00 2001 From: Otto Date: Fri, 30 Jan 2026 22:59:45 +0000 Subject: [PATCH 2/6] fix(readme): update broken self-hosting docs link (#11911) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary The self-hosting guide link in README.md was broken. **Old link:** `https://docs.agpt.co/platform/getting-started/` - Redirects to `https://agpt.co/docs/platform/getting-started` - Returns HTTP 400 ❌ **New link:** `https://agpt.co/docs/platform/getting-started/getting-started` - Works correctly ✅ ## Changes - Updated the self-hosting guide URL in README.md Fixes #OPEN-2973 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3572fe318b..349d8818ef 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ Before proceeding with the installation, ensure your system meets the following ### Updated Setup Instructions: We've moved to a fully maintained and regularly updated documentation site. -👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/) +👉 [Follow the official self-hosting guide here](https://agpt.co/docs/platform/getting-started/getting-started) This tutorial assumes you have Docker, VSCode, git and npm installed. From 18a1661fa3ae88ee84465d30d30b76d26c93f08d Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 30 Jan 2026 18:18:21 -0600 Subject: [PATCH 3/6] feat: add library agent fetching with two-phase search for sub-agent support (#11889) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Context When users ask the chat to create agents, they may want to compose workflows that reuse their existing agents as sub-agents. For this to work, the Agent Generator service needs to know what agents the user has available. **Challenge:** Users can have large libraries with many agents. Fetching all of them would be slow and provide too much context to the LLM. ## Solution This PR implements **search-based library agent fetching** with a **two-phase search** strategy: 1. **Phase 1 (Initial Search):** When the user describes their goal, we search for relevant library agents using the goal as the search query 2. **Phase 2 (Step-Based Enrichment):** After the goal is decomposed into steps, we extract keywords from those steps and search for additional relevant agents This ensures we find agents that are relevant to both the high-level goal AND the specific steps identified. ### Example Flow ``` User goal: "Create an agent that fetches weather and sends a summary email" Phase 1: Search for "weather email summary" → finds "Weather Fetcher" agent Phase 2: After decomposition identifies steps like "send email notification" → searches "send email notification" → finds "Gmail Sender" agent ``` ### Changes **Library Agent Fetching:** - `get_library_agents_for_generation()` - Search-based fetching from user's library - `search_marketplace_agents_for_generation()` - Search public marketplace - `get_all_relevant_agents_for_generation()` - Combines both with deduplication **Two-Phase Search:** - `extract_search_terms_from_steps()` - Extracts keywords from decomposed steps - `enrich_library_agents_from_steps()` - Searches for additional agents based on steps - Integrated into `create_agent.py` as "Step 1.5" after goal decomposition **Type Safety:** - Added `TypedDict` definitions: `LibraryAgentSummary`, `MarketplaceAgentSummary`, `DecompositionStep`, `DecompositionResult` ### Design Decisions - **Search-based, not fetch-all:** Scalable for large libraries - **Library agents prioritized:** They have full schemas; marketplace agents have basic info only - **Deduplication by name and graph_id:** Prevents duplicates across searches - **Graceful degradation:** Failures don't block agent generation - **Limited to 3 search terms:** Avoids excessive API calls during enrichment ## Related PR - Agent Generator: https://github.com/Significant-Gravitas/AutoGPT-Agent-Generator/pull/103 ## Test plan - [x] `test_library_agents.py` - 19 tests covering all new functions - [x] `test_service.py` - 4 tests for library_agents passthrough - [ ] Integration test: Create agent with library sub-agent composition --- .../backend/api/features/chat/service.py | 5 + .../chat/tools/agent_generator/__init__.py | 42 +- .../chat/tools/agent_generator/core.py | 615 ++++++++++++- .../chat/tools/agent_generator/errors.py | 66 +- .../chat/tools/agent_generator/service.py | 40 +- .../api/features/chat/tools/agent_search.py | 132 ++- .../api/features/chat/tools/create_agent.py | 53 +- .../api/features/chat/tools/edit_agent.py | 32 +- .../backend/api/features/library/db.py | 8 +- .../backend/api/features/library/model.py | 118 ++- .../backend/snapshots/lib_agts_search | 8 + .../agent_generator/test_core_integration.py | 12 +- .../agent_generator/test_library_agents.py | 841 ++++++++++++++++++ .../test/agent_generator/test_service.py | 134 +++ .../legacy-builder/CustomNode/CustomNode.tsx | 2 +- .../frontend/src/app/api/openapi.json | 36 + .../components/ChatMessage/ChatMessage.tsx | 8 + .../ClarificationQuestionsWidget.tsx | 65 +- .../components/ToolResponseMessage/helpers.ts | 6 +- 19 files changed, 2069 insertions(+), 154 deletions(-) create mode 100644 autogpt_platform/backend/test/agent_generator/test_library_agents.py diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 20216162b5..bcd6856503 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -1834,6 +1834,11 @@ async def _execute_long_running_tool( tool_call_id=tool_call_id, result=error_response.model_dump_json(), ) + # Generate LLM continuation so user sees explanation even for errors + try: + await _generate_llm_continuation(session_id=session_id, user_id=user_id) + except Exception as llm_err: + logger.warning(f"Failed to generate LLM continuation for error: {llm_err}") finally: await _mark_operation_completed(tool_call_id) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py index 499025b7dc..b7650b3cbd 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py @@ -2,30 +2,54 @@ from .core import ( AgentGeneratorNotConfiguredError, + AgentJsonValidationError, + AgentSummary, + DecompositionResult, + DecompositionStep, + LibraryAgentSummary, + MarketplaceAgentSummary, decompose_goal, + enrich_library_agents_from_steps, + extract_search_terms_from_steps, + extract_uuids_from_text, generate_agent, generate_agent_patch, get_agent_as_json, + get_all_relevant_agents_for_generation, + get_library_agent_by_graph_id, + get_library_agent_by_id, + get_library_agents_for_generation, json_to_graph, save_agent_to_library, + search_marketplace_agents_for_generation, ) from .errors import get_user_message_for_error from .service import health_check as check_external_service_health from .service import is_external_service_configured __all__ = [ - # Core functions + "AgentGeneratorNotConfiguredError", + "AgentJsonValidationError", + "AgentSummary", + "DecompositionResult", + "DecompositionStep", + "LibraryAgentSummary", + "MarketplaceAgentSummary", + "check_external_service_health", "decompose_goal", + "enrich_library_agents_from_steps", + "extract_search_terms_from_steps", + "extract_uuids_from_text", "generate_agent", "generate_agent_patch", - "save_agent_to_library", "get_agent_as_json", - "json_to_graph", - # Exceptions - "AgentGeneratorNotConfiguredError", - # Service - "is_external_service_configured", - "check_external_service_health", - # Error handling + "get_all_relevant_agents_for_generation", + "get_library_agent_by_graph_id", + "get_library_agent_by_id", + "get_library_agents_for_generation", "get_user_message_for_error", + "is_external_service_configured", + "json_to_graph", + "save_agent_to_library", + "search_marketplace_agents_for_generation", ] diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py index d56e33cbb0..466f6438a3 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py @@ -1,11 +1,21 @@ """Core agent generation functions.""" import logging +import re import uuid -from typing import Any +from typing import Any, NotRequired, TypedDict from backend.api.features.library import db as library_db -from backend.data.graph import Graph, Link, Node, create_graph +from backend.api.features.store import db as store_db +from backend.data.graph import ( + Graph, + Link, + Node, + create_graph, + get_graph, + get_graph_all_versions, +) +from backend.util.exceptions import DatabaseError, NotFoundError from .service import ( decompose_goal_external, @@ -16,6 +26,74 @@ from .service import ( logger = logging.getLogger(__name__) +AGENT_EXECUTOR_BLOCK_ID = "e189baac-8c20-45a1-94a7-55177ea42565" + + +class ExecutionSummary(TypedDict): + """Summary of a single execution for quality assessment.""" + + status: str + correctness_score: NotRequired[float] + activity_summary: NotRequired[str] + + +class LibraryAgentSummary(TypedDict): + """Summary of a library agent for sub-agent composition. + + Includes recent executions to help the LLM decide whether to use this agent. + Each execution shows status, correctness_score (0-1), and activity_summary. + """ + + graph_id: str + graph_version: int + name: str + description: str + input_schema: dict[str, Any] + output_schema: dict[str, Any] + recent_executions: NotRequired[list[ExecutionSummary]] + + +class MarketplaceAgentSummary(TypedDict): + """Summary of a marketplace agent for sub-agent composition.""" + + name: str + description: str + sub_heading: str + creator: str + is_marketplace_agent: bool + + +class DecompositionStep(TypedDict, total=False): + """A single step in decomposed instructions.""" + + description: str + action: str + block_name: str + tool: str + name: str + + +class DecompositionResult(TypedDict, total=False): + """Result from decompose_goal - can be instructions, questions, or error.""" + + type: str + steps: list[DecompositionStep] + questions: list[dict[str, Any]] + error: str + error_type: str + + +AgentSummary = LibraryAgentSummary | MarketplaceAgentSummary | dict[str, Any] + + +def _to_dict_list( + agents: list[AgentSummary] | list[dict[str, Any]] | None, +) -> list[dict[str, Any]] | None: + """Convert typed agent summaries to plain dicts for external service calls.""" + if agents is None: + return None + return [dict(a) for a in agents] + class AgentGeneratorNotConfiguredError(Exception): """Raised when the external Agent Generator service is not configured.""" @@ -36,15 +114,414 @@ def _check_service_configured() -> None: ) -async def decompose_goal(description: str, context: str = "") -> dict[str, Any] | None: +_UUID_PATTERN = re.compile( + r"[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}", + re.IGNORECASE, +) + + +def extract_uuids_from_text(text: str) -> list[str]: + """Extract all UUID v4 strings from text. + + Args: + text: Text that may contain UUIDs (e.g., user's goal description) + + Returns: + List of unique UUIDs found in the text (lowercase) + """ + matches = _UUID_PATTERN.findall(text) + return list({m.lower() for m in matches}) + + +async def get_library_agent_by_id( + user_id: str, agent_id: str +) -> LibraryAgentSummary | None: + """Fetch a specific library agent by its ID (library agent ID or graph_id). + + This function tries multiple lookup strategies: + 1. First tries to find by graph_id (AgentGraph primary key) + 2. If not found, tries to find by library agent ID (LibraryAgent primary key) + + This handles both cases: + - User provides graph_id (e.g., from AgentExecutorBlock) + - User provides library agent ID (e.g., from library URL) + + Args: + user_id: The user ID + agent_id: The ID to look up (can be graph_id or library agent ID) + + Returns: + LibraryAgentSummary if found, None otherwise + """ + try: + agent = await library_db.get_library_agent_by_graph_id(user_id, agent_id) + if agent: + logger.debug(f"Found library agent by graph_id: {agent.name}") + return LibraryAgentSummary( + graph_id=agent.graph_id, + graph_version=agent.graph_version, + name=agent.name, + description=agent.description, + input_schema=agent.input_schema, + output_schema=agent.output_schema, + ) + except DatabaseError: + raise + except Exception as e: + logger.debug(f"Could not fetch library agent by graph_id {agent_id}: {e}") + + try: + agent = await library_db.get_library_agent(agent_id, user_id) + if agent: + logger.debug(f"Found library agent by library_id: {agent.name}") + return LibraryAgentSummary( + graph_id=agent.graph_id, + graph_version=agent.graph_version, + name=agent.name, + description=agent.description, + input_schema=agent.input_schema, + output_schema=agent.output_schema, + ) + except NotFoundError: + logger.debug(f"Library agent not found by library_id: {agent_id}") + except DatabaseError: + raise + except Exception as e: + logger.warning( + f"Could not fetch library agent by library_id {agent_id}: {e}", + exc_info=True, + ) + + return None + + +get_library_agent_by_graph_id = get_library_agent_by_id + + +async def get_library_agents_for_generation( + user_id: str, + search_query: str | None = None, + exclude_graph_id: str | None = None, + max_results: int = 15, +) -> list[LibraryAgentSummary]: + """Fetch user's library agents formatted for Agent Generator. + + Uses search-based fetching to return relevant agents instead of all agents. + This is more scalable for users with large libraries. + + Includes recent_executions list to help the LLM assess agent quality: + - Each execution has status, correctness_score (0-1), and activity_summary + - This gives the LLM concrete examples of recent performance + + Args: + user_id: The user ID + search_query: Optional search term to find relevant agents (user's goal/description) + exclude_graph_id: Optional graph ID to exclude (prevents circular references) + max_results: Maximum number of agents to return (default 15) + + Returns: + List of LibraryAgentSummary with schemas and recent executions for sub-agent composition + """ + try: + response = await library_db.list_library_agents( + user_id=user_id, + search_term=search_query, + page=1, + page_size=max_results, + include_executions=True, + ) + + results: list[LibraryAgentSummary] = [] + for agent in response.agents: + if exclude_graph_id is not None and agent.graph_id == exclude_graph_id: + continue + + summary = LibraryAgentSummary( + graph_id=agent.graph_id, + graph_version=agent.graph_version, + name=agent.name, + description=agent.description, + input_schema=agent.input_schema, + output_schema=agent.output_schema, + ) + if agent.recent_executions: + exec_summaries: list[ExecutionSummary] = [] + for ex in agent.recent_executions: + exec_sum = ExecutionSummary(status=ex.status) + if ex.correctness_score is not None: + exec_sum["correctness_score"] = ex.correctness_score + if ex.activity_summary: + exec_sum["activity_summary"] = ex.activity_summary + exec_summaries.append(exec_sum) + summary["recent_executions"] = exec_summaries + results.append(summary) + return results + except DatabaseError: + raise + except Exception as e: + logger.warning(f"Failed to fetch library agents: {e}") + return [] + + +async def search_marketplace_agents_for_generation( + search_query: str, + max_results: int = 10, +) -> list[MarketplaceAgentSummary]: + """Search marketplace agents formatted for Agent Generator. + + Note: This returns basic agent info. Full input/output schemas would require + additional graph fetches and is a potential future enhancement. + + Args: + search_query: Search term to find relevant public agents + max_results: Maximum number of agents to return (default 10) + + Returns: + List of MarketplaceAgentSummary (without detailed schemas for now) + """ + try: + response = await store_db.get_store_agents( + search_query=search_query, + page=1, + page_size=max_results, + ) + + results: list[MarketplaceAgentSummary] = [] + for agent in response.agents: + results.append( + MarketplaceAgentSummary( + name=agent.agent_name, + description=agent.description, + sub_heading=agent.sub_heading, + creator=agent.creator, + is_marketplace_agent=True, + ) + ) + return results + except Exception as e: + logger.warning(f"Failed to search marketplace agents: {e}") + return [] + + +async def get_all_relevant_agents_for_generation( + user_id: str, + search_query: str | None = None, + exclude_graph_id: str | None = None, + include_library: bool = True, + include_marketplace: bool = True, + max_library_results: int = 15, + max_marketplace_results: int = 10, +) -> list[AgentSummary]: + """Fetch relevant agents from library and/or marketplace. + + Searches both user's library and marketplace by default. + Explicitly mentioned UUIDs in the search query are always looked up. + + Args: + user_id: The user ID + search_query: Search term to find relevant agents (user's goal/description) + exclude_graph_id: Optional graph ID to exclude (prevents circular references) + include_library: Whether to search user's library (default True) + include_marketplace: Whether to also search marketplace (default True) + max_library_results: Max library agents to return (default 15) + max_marketplace_results: Max marketplace agents to return (default 10) + + Returns: + List of AgentSummary, library agents first (with full schemas), + then marketplace agents (basic info only) + """ + agents: list[AgentSummary] = [] + seen_graph_ids: set[str] = set() + + if search_query: + mentioned_uuids = extract_uuids_from_text(search_query) + for graph_id in mentioned_uuids: + if graph_id == exclude_graph_id: + continue + agent = await get_library_agent_by_graph_id(user_id, graph_id) + agent_graph_id = agent.get("graph_id") if agent else None + if agent and agent_graph_id and agent_graph_id not in seen_graph_ids: + agents.append(agent) + seen_graph_ids.add(agent_graph_id) + logger.debug( + f"Found explicitly mentioned agent: {agent.get('name') or 'Unknown'}" + ) + + if include_library: + library_agents = await get_library_agents_for_generation( + user_id=user_id, + search_query=search_query, + exclude_graph_id=exclude_graph_id, + max_results=max_library_results, + ) + for agent in library_agents: + graph_id = agent.get("graph_id") + if graph_id and graph_id not in seen_graph_ids: + agents.append(agent) + seen_graph_ids.add(graph_id) + + if include_marketplace and search_query: + marketplace_agents = await search_marketplace_agents_for_generation( + search_query=search_query, + max_results=max_marketplace_results, + ) + library_names: set[str] = set() + for a in agents: + name = a.get("name") + if name and isinstance(name, str): + library_names.add(name.lower()) + for agent in marketplace_agents: + agent_name = agent.get("name") + if agent_name and isinstance(agent_name, str): + if agent_name.lower() not in library_names: + agents.append(agent) + + return agents + + +def extract_search_terms_from_steps( + decomposition_result: DecompositionResult | dict[str, Any], +) -> list[str]: + """Extract search terms from decomposed instruction steps. + + Analyzes the decomposition result to extract relevant keywords + for additional library agent searches. + + Args: + decomposition_result: Result from decompose_goal containing steps + + Returns: + List of unique search terms extracted from steps + """ + search_terms: list[str] = [] + + if decomposition_result.get("type") != "instructions": + return search_terms + + steps = decomposition_result.get("steps", []) + if not steps: + return search_terms + + step_keys: list[str] = ["description", "action", "block_name", "tool", "name"] + + for step in steps: + for key in step_keys: + value = step.get(key) # type: ignore[union-attr] + if isinstance(value, str) and len(value) > 3: + search_terms.append(value) + + seen: set[str] = set() + unique_terms: list[str] = [] + for term in search_terms: + term_lower = term.lower() + if term_lower not in seen: + seen.add(term_lower) + unique_terms.append(term) + + return unique_terms + + +async def enrich_library_agents_from_steps( + user_id: str, + decomposition_result: DecompositionResult | dict[str, Any], + existing_agents: list[AgentSummary] | list[dict[str, Any]], + exclude_graph_id: str | None = None, + include_marketplace: bool = True, + max_additional_results: int = 10, +) -> list[AgentSummary] | list[dict[str, Any]]: + """Enrich library agents list with additional searches based on decomposed steps. + + This implements two-phase search: after decomposition, we search for additional + relevant agents based on the specific steps identified. + + Args: + user_id: The user ID + decomposition_result: Result from decompose_goal containing steps + existing_agents: Already fetched library agents from initial search + exclude_graph_id: Optional graph ID to exclude + include_marketplace: Whether to also search marketplace + max_additional_results: Max additional agents per search term (default 10) + + Returns: + Combined list of library agents (existing + newly discovered) + """ + search_terms = extract_search_terms_from_steps(decomposition_result) + + if not search_terms: + return existing_agents + + existing_ids: set[str] = set() + existing_names: set[str] = set() + + for agent in existing_agents: + agent_name = agent.get("name") + if agent_name and isinstance(agent_name, str): + existing_names.add(agent_name.lower()) + graph_id = agent.get("graph_id") # type: ignore[call-overload] + if graph_id and isinstance(graph_id, str): + existing_ids.add(graph_id) + + all_agents: list[AgentSummary] | list[dict[str, Any]] = list(existing_agents) + + for term in search_terms[:3]: + try: + additional_agents = await get_all_relevant_agents_for_generation( + user_id=user_id, + search_query=term, + exclude_graph_id=exclude_graph_id, + include_marketplace=include_marketplace, + max_library_results=max_additional_results, + max_marketplace_results=5, + ) + + for agent in additional_agents: + agent_name = agent.get("name") + if not agent_name or not isinstance(agent_name, str): + continue + agent_name_lower = agent_name.lower() + + if agent_name_lower in existing_names: + continue + + graph_id = agent.get("graph_id") # type: ignore[call-overload] + if graph_id and graph_id in existing_ids: + continue + + all_agents.append(agent) + existing_names.add(agent_name_lower) + if graph_id and isinstance(graph_id, str): + existing_ids.add(graph_id) + + except DatabaseError: + logger.error(f"Database error searching for agents with term '{term}'") + raise + except Exception as e: + logger.warning( + f"Failed to search for additional agents with term '{term}': {e}" + ) + + logger.debug( + f"Enriched library agents: {len(existing_agents)} initial + " + f"{len(all_agents) - len(existing_agents)} additional = {len(all_agents)} total" + ) + + return all_agents + + +async def decompose_goal( + description: str, + context: str = "", + library_agents: list[AgentSummary] | None = None, +) -> DecompositionResult | None: """Break down a goal into steps or return clarifying questions. Args: description: Natural language goal description context: Additional context (e.g., answers to previous questions) + library_agents: User's library agents available for sub-agent composition Returns: - Dict with either: + DecompositionResult with either: - {"type": "clarifying_questions", "questions": [...]} - {"type": "instructions", "steps": [...]} Or None on error @@ -54,14 +531,21 @@ async def decompose_goal(description: str, context: str = "") -> dict[str, Any] """ _check_service_configured() logger.info("Calling external Agent Generator service for decompose_goal") - return await decompose_goal_external(description, context) + result = await decompose_goal_external( + description, context, _to_dict_list(library_agents) + ) + return result # type: ignore[return-value] -async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: +async def generate_agent( + instructions: DecompositionResult | dict[str, Any], + library_agents: list[AgentSummary] | list[dict[str, Any]] | None = None, +) -> dict[str, Any] | None: """Generate agent JSON from instructions. Args: instructions: Structured instructions from decompose_goal + library_agents: User's library agents available for sub-agent composition Returns: Agent JSON dict, error dict {"type": "error", ...}, or None on error @@ -71,12 +555,12 @@ async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: """ _check_service_configured() logger.info("Calling external Agent Generator service for generate_agent") - result = await generate_agent_external(instructions) + result = await generate_agent_external( + dict(instructions), _to_dict_list(library_agents) + ) if result: - # Check if it's an error response - pass through as-is if isinstance(result, dict) and result.get("type") == "error": return result - # Ensure required fields for successful agent generation if "id" not in result: result["id"] = str(uuid.uuid4()) if "version" not in result: @@ -86,6 +570,12 @@ async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: return result +class AgentJsonValidationError(Exception): + """Raised when agent JSON is invalid or missing required fields.""" + + pass + + def json_to_graph(agent_json: dict[str, Any]) -> Graph: """Convert agent JSON dict to Graph model. @@ -94,25 +584,55 @@ def json_to_graph(agent_json: dict[str, Any]) -> Graph: Returns: Graph ready for saving + + Raises: + AgentJsonValidationError: If required fields are missing from nodes or links """ nodes = [] - for n in agent_json.get("nodes", []): + for idx, n in enumerate(agent_json.get("nodes", [])): + block_id = n.get("block_id") + if not block_id: + node_id = n.get("id", f"index_{idx}") + raise AgentJsonValidationError( + f"Node '{node_id}' is missing required field 'block_id'" + ) node = Node( id=n.get("id", str(uuid.uuid4())), - block_id=n["block_id"], + block_id=block_id, input_default=n.get("input_default", {}), metadata=n.get("metadata", {}), ) nodes.append(node) links = [] - for link_data in agent_json.get("links", []): + for idx, link_data in enumerate(agent_json.get("links", [])): + source_id = link_data.get("source_id") + sink_id = link_data.get("sink_id") + source_name = link_data.get("source_name") + sink_name = link_data.get("sink_name") + + missing_fields = [] + if not source_id: + missing_fields.append("source_id") + if not sink_id: + missing_fields.append("sink_id") + if not source_name: + missing_fields.append("source_name") + if not sink_name: + missing_fields.append("sink_name") + + if missing_fields: + link_id = link_data.get("id", f"index_{idx}") + raise AgentJsonValidationError( + f"Link '{link_id}' is missing required fields: {', '.join(missing_fields)}" + ) + link = Link( id=link_data.get("id", str(uuid.uuid4())), - source_id=link_data["source_id"], - sink_id=link_data["sink_id"], - source_name=link_data["source_name"], - sink_name=link_data["sink_name"], + source_id=source_id, + sink_id=sink_id, + source_name=source_name, + sink_name=sink_name, is_static=link_data.get("is_static", False), ) links.append(link) @@ -133,22 +653,40 @@ def _reassign_node_ids(graph: Graph) -> None: This is needed when creating a new version to avoid unique constraint violations. """ - # Create mapping from old node IDs to new UUIDs id_map = {node.id: str(uuid.uuid4()) for node in graph.nodes} - # Reassign node IDs for node in graph.nodes: node.id = id_map[node.id] - # Update link references to use new node IDs for link in graph.links: - link.id = str(uuid.uuid4()) # Also give links new IDs + link.id = str(uuid.uuid4()) if link.source_id in id_map: link.source_id = id_map[link.source_id] if link.sink_id in id_map: link.sink_id = id_map[link.sink_id] +def _populate_agent_executor_user_ids(agent_json: dict[str, Any], user_id: str) -> None: + """Populate user_id in AgentExecutorBlock nodes. + + The external agent generator creates AgentExecutorBlock nodes with empty user_id. + This function fills in the actual user_id so sub-agents run with correct permissions. + + Args: + agent_json: Agent JSON dict (modified in place) + user_id: User ID to set + """ + for node in agent_json.get("nodes", []): + if node.get("block_id") == AGENT_EXECUTOR_BLOCK_ID: + input_default = node.get("input_default") or {} + if not input_default.get("user_id"): + input_default["user_id"] = user_id + node["input_default"] = input_default + logger.debug( + f"Set user_id for AgentExecutorBlock node {node.get('id')}" + ) + + async def save_agent_to_library( agent_json: dict[str, Any], user_id: str, is_update: bool = False ) -> tuple[Graph, Any]: @@ -162,33 +700,27 @@ async def save_agent_to_library( Returns: Tuple of (created Graph, LibraryAgent) """ - from backend.data.graph import get_graph_all_versions + # Populate user_id in AgentExecutorBlock nodes before conversion + _populate_agent_executor_user_ids(agent_json, user_id) graph = json_to_graph(agent_json) if is_update: - # For updates, keep the same graph ID but increment version - # and reassign node/link IDs to avoid conflicts if graph.id: existing_versions = await get_graph_all_versions(graph.id, user_id) if existing_versions: latest_version = max(v.version for v in existing_versions) graph.version = latest_version + 1 - # Reassign node IDs (but keep graph ID the same) _reassign_node_ids(graph) logger.info(f"Updating agent {graph.id} to version {graph.version}") else: - # For new agents, always generate a fresh UUID to avoid collisions graph.id = str(uuid.uuid4()) graph.version = 1 - # Reassign all node IDs as well _reassign_node_ids(graph) logger.info(f"Creating new agent with ID {graph.id}") - # Save to database created_graph = await create_graph(graph, user_id) - # Add to user's library (or update existing library agent) library_agents = await library_db.create_library_agent( graph=created_graph, user_id=user_id, @@ -200,25 +732,31 @@ async def save_agent_to_library( async def get_agent_as_json( - graph_id: str, user_id: str | None + agent_id: str, user_id: str | None ) -> dict[str, Any] | None: """Fetch an agent and convert to JSON format for editing. Args: - graph_id: Graph ID or library agent ID + agent_id: Graph ID or library agent ID user_id: User ID Returns: Agent as JSON dict or None if not found """ - from backend.data.graph import get_graph + graph = await get_graph(agent_id, version=None, user_id=user_id) + + if not graph and user_id: + try: + library_agent = await library_db.get_library_agent(agent_id, user_id) + graph = await get_graph( + library_agent.graph_id, version=None, user_id=user_id + ) + except NotFoundError: + pass - # Try to get the graph (version=None gets the active version) - graph = await get_graph(graph_id, version=None, user_id=user_id) if not graph: return None - # Convert to JSON format nodes = [] for node in graph.nodes: nodes.append( @@ -256,7 +794,9 @@ async def get_agent_as_json( async def generate_agent_patch( - update_request: str, current_agent: dict[str, Any] + update_request: str, + current_agent: dict[str, Any], + library_agents: list[AgentSummary] | None = None, ) -> dict[str, Any] | None: """Update an existing agent using natural language. @@ -268,6 +808,7 @@ async def generate_agent_patch( Args: update_request: Natural language description of changes current_agent: Current agent JSON + library_agents: User's library agents available for sub-agent composition Returns: Updated agent JSON, clarifying questions dict {"type": "clarifying_questions", ...}, @@ -278,4 +819,6 @@ async def generate_agent_patch( """ _check_service_configured() logger.info("Calling external Agent Generator service for generate_agent_patch") - return await generate_agent_patch_external(update_request, current_agent) + return await generate_agent_patch_external( + update_request, current_agent, _to_dict_list(library_agents) + ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py index bf71a95df9..282d8cf9aa 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py @@ -1,11 +1,43 @@ """Error handling utilities for agent generator.""" +import re + + +def _sanitize_error_details(details: str) -> str: + """Sanitize error details to remove sensitive information. + + Strips common patterns that could expose internal system info: + - File paths (Unix and Windows) + - Database connection strings + - URLs with credentials + - Stack trace internals + + Args: + details: Raw error details string + + Returns: + Sanitized error details safe for user display + """ + sanitized = re.sub( + r"/[a-zA-Z0-9_./\-]+\.(py|js|ts|json|yaml|yml)", "[path]", details + ) + sanitized = re.sub(r"[A-Z]:\\[a-zA-Z0-9_\\.\\-]+", "[path]", sanitized) + sanitized = re.sub( + r"(postgres|mysql|mongodb|redis)://[^\s]+", "[database_url]", sanitized + ) + sanitized = re.sub(r"https?://[^:]+:[^@]+@[^\s]+", "[url]", sanitized) + sanitized = re.sub(r", line \d+", "", sanitized) + sanitized = re.sub(r'File "[^"]+",?', "", sanitized) + + return sanitized.strip() + def get_user_message_for_error( error_type: str, operation: str = "process the request", llm_parse_message: str | None = None, validation_message: str | None = None, + error_details: str | None = None, ) -> str: """Get a user-friendly error message based on error type. @@ -19,25 +51,45 @@ def get_user_message_for_error( message (e.g., "analyze the goal", "generate the agent") llm_parse_message: Custom message for llm_parse_error type validation_message: Custom message for validation_error type + error_details: Optional additional details about the error Returns: User-friendly error message suitable for display to the user """ + base_message = "" + if error_type == "llm_parse_error": - return ( + base_message = ( llm_parse_message or "The AI had trouble processing this request. Please try again." ) elif error_type == "validation_error": - return ( + base_message = ( validation_message - or "The request failed validation. Please try rephrasing." + or "The generated agent failed validation. " + "This usually happens when the agent structure doesn't match " + "what the platform expects. Please try simplifying your goal " + "or breaking it into smaller parts." ) elif error_type == "patch_error": - return "Failed to apply the changes. Please try a different approach." + base_message = ( + "Failed to apply the changes. The modification couldn't be " + "validated. Please try a different approach or simplify the change." + ) elif error_type in ("timeout", "llm_timeout"): - return "The request took too long. Please try again." + base_message = ( + "The request took too long to process. This can happen with " + "complex agents. Please try again or simplify your goal." + ) elif error_type in ("rate_limit", "llm_rate_limit"): - return "The service is currently busy. Please try again in a moment." + base_message = "The service is currently busy. Please try again in a moment." else: - return f"Failed to {operation}. Please try again." + base_message = f"Failed to {operation}. Please try again." + + if error_details: + details = _sanitize_error_details(error_details) + if len(details) > 200: + details = details[:200] + "..." + base_message += f"\n\nTechnical details: {details}" + + return base_message diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py index 1df1faaaef..c6242b0ba9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py @@ -117,13 +117,16 @@ def _get_client() -> httpx.AsyncClient: async def decompose_goal_external( - description: str, context: str = "" + description: str, + context: str = "", + library_agents: list[dict[str, Any]] | None = None, ) -> dict[str, Any] | None: """Call the external service to decompose a goal. Args: description: Natural language goal description context: Additional context (e.g., answers to previous questions) + library_agents: User's library agents available for sub-agent composition Returns: Dict with either: @@ -141,6 +144,8 @@ async def decompose_goal_external( if context: # The external service uses user_instruction for additional context payload["user_instruction"] = context + if library_agents: + payload["library_agents"] = library_agents try: response = await client.post("/api/decompose-description", json=payload) @@ -207,21 +212,25 @@ async def decompose_goal_external( async def generate_agent_external( instructions: dict[str, Any], + library_agents: list[dict[str, Any]] | None = None, ) -> dict[str, Any] | None: """Call the external service to generate an agent from instructions. Args: instructions: Structured instructions from decompose_goal + library_agents: User's library agents available for sub-agent composition Returns: Agent JSON dict on success, or error dict {"type": "error", ...} on error """ client = _get_client() + payload: dict[str, Any] = {"instructions": instructions} + if library_agents: + payload["library_agents"] = library_agents + try: - response = await client.post( - "/api/generate-agent", json={"instructions": instructions} - ) + response = await client.post("/api/generate-agent", json=payload) response.raise_for_status() data = response.json() @@ -229,8 +238,7 @@ async def generate_agent_external( error_msg = data.get("error", "Unknown error from Agent Generator") error_type = data.get("error_type", "unknown") logger.error( - f"Agent Generator generation failed: {error_msg} " - f"(type: {error_type})" + f"Agent Generator generation failed: {error_msg} (type: {error_type})" ) return _create_error_response(error_msg, error_type) @@ -251,27 +259,31 @@ async def generate_agent_external( async def generate_agent_patch_external( - update_request: str, current_agent: dict[str, Any] + update_request: str, + current_agent: dict[str, Any], + library_agents: list[dict[str, Any]] | None = None, ) -> dict[str, Any] | None: """Call the external service to generate a patch for an existing agent. Args: update_request: Natural language description of changes current_agent: Current agent JSON + library_agents: User's library agents available for sub-agent composition Returns: Updated agent JSON, clarifying questions dict, or error dict on error """ client = _get_client() + payload: dict[str, Any] = { + "update_request": update_request, + "current_agent_json": current_agent, + } + if library_agents: + payload["library_agents"] = library_agents + try: - response = await client.post( - "/api/update-agent", - json={ - "update_request": update_request, - "current_agent_json": current_agent, - }, - ) + response = await client.post("/api/update-agent", json=payload) response.raise_for_status() data = response.json() diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py index 5fa74ba04e..62d59c470e 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py @@ -1,6 +1,7 @@ """Shared agent search functionality for find_agent and find_library_agent tools.""" import logging +import re from typing import Literal from backend.api.features.library import db as library_db @@ -19,6 +20,85 @@ logger = logging.getLogger(__name__) SearchSource = Literal["marketplace", "library"] +_UUID_PATTERN = re.compile( + r"^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}$", + re.IGNORECASE, +) + + +def _is_uuid(text: str) -> bool: + """Check if text is a valid UUID v4.""" + return bool(_UUID_PATTERN.match(text.strip())) + + +async def _get_library_agent_by_id(user_id: str, agent_id: str) -> AgentInfo | None: + """Fetch a library agent by ID (library agent ID or graph_id). + + Tries multiple lookup strategies: + 1. First by graph_id (AgentGraph primary key) + 2. Then by library agent ID (LibraryAgent primary key) + + Args: + user_id: The user ID + agent_id: The ID to look up (can be graph_id or library agent ID) + + Returns: + AgentInfo if found, None otherwise + """ + try: + agent = await library_db.get_library_agent_by_graph_id(user_id, agent_id) + if agent: + logger.debug(f"Found library agent by graph_id: {agent.name}") + return AgentInfo( + id=agent.id, + name=agent.name, + description=agent.description or "", + source="library", + in_library=True, + creator=agent.creator_name, + status=agent.status.value, + can_access_graph=agent.can_access_graph, + has_external_trigger=agent.has_external_trigger, + new_output=agent.new_output, + graph_id=agent.graph_id, + ) + except DatabaseError: + raise + except Exception as e: + logger.warning( + f"Could not fetch library agent by graph_id {agent_id}: {e}", + exc_info=True, + ) + + try: + agent = await library_db.get_library_agent(agent_id, user_id) + if agent: + logger.debug(f"Found library agent by library_id: {agent.name}") + return AgentInfo( + id=agent.id, + name=agent.name, + description=agent.description or "", + source="library", + in_library=True, + creator=agent.creator_name, + status=agent.status.value, + can_access_graph=agent.can_access_graph, + has_external_trigger=agent.has_external_trigger, + new_output=agent.new_output, + graph_id=agent.graph_id, + ) + except NotFoundError: + logger.debug(f"Library agent not found by library_id: {agent_id}") + except DatabaseError: + raise + except Exception as e: + logger.warning( + f"Could not fetch library agent by library_id {agent_id}: {e}", + exc_info=True, + ) + + return None + async def search_agents( query: str, @@ -69,29 +149,37 @@ async def search_agents( is_featured=False, ) ) - else: # library - logger.info(f"Searching user library for: {query}") - results = await library_db.list_library_agents( - user_id=user_id, # type: ignore[arg-type] - search_term=query, - page_size=10, - ) - for agent in results.agents: - agents.append( - AgentInfo( - id=agent.id, - name=agent.name, - description=agent.description or "", - source="library", - in_library=True, - creator=agent.creator_name, - status=agent.status.value, - can_access_graph=agent.can_access_graph, - has_external_trigger=agent.has_external_trigger, - new_output=agent.new_output, - graph_id=agent.graph_id, - ) + else: + if _is_uuid(query): + logger.info(f"Query looks like UUID, trying direct lookup: {query}") + agent = await _get_library_agent_by_id(user_id, query) # type: ignore[arg-type] + if agent: + agents.append(agent) + logger.info(f"Found agent by direct ID lookup: {agent.name}") + + if not agents: + logger.info(f"Searching user library for: {query}") + results = await library_db.list_library_agents( + user_id=user_id, # type: ignore[arg-type] + search_term=query, + page_size=10, ) + for agent in results.agents: + agents.append( + AgentInfo( + id=agent.id, + name=agent.name, + description=agent.description or "", + source="library", + in_library=True, + creator=agent.creator_name, + status=agent.status.value, + can_access_graph=agent.can_access_graph, + has_external_trigger=agent.has_external_trigger, + new_output=agent.new_output, + graph_id=agent.graph_id, + ) + ) logger.info(f"Found {len(agents)} agents in {source}") except NotFoundError: pass diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py index 74011c7e95..adb2c78fce 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py @@ -8,7 +8,9 @@ from backend.api.features.chat.model import ChatSession from .agent_generator import ( AgentGeneratorNotConfiguredError, decompose_goal, + enrich_library_agents_from_steps, generate_agent, + get_all_relevant_agents_for_generation, get_user_message_for_error, save_agent_to_library, ) @@ -103,9 +105,24 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Step 1: Decompose goal into steps + library_agents = None + if user_id: + try: + library_agents = await get_all_relevant_agents_for_generation( + user_id=user_id, + search_query=description, + include_marketplace=True, + ) + logger.debug( + f"Found {len(library_agents)} relevant agents for sub-agent composition" + ) + except Exception as e: + logger.warning(f"Failed to fetch library agents: {e}") + try: - decomposition_result = await decompose_goal(description, context) + decomposition_result = await decompose_goal( + description, context, library_agents + ) except AgentGeneratorNotConfiguredError: return ErrorResponse( message=( @@ -124,7 +141,6 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Check if the result is an error from the external service if decomposition_result.get("type") == "error": error_msg = decomposition_result.get("error", "Unknown error") error_type = decomposition_result.get("error_type", "unknown") @@ -144,7 +160,6 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Check if LLM returned clarifying questions if decomposition_result.get("type") == "clarifying_questions": questions = decomposition_result.get("questions", []) return ClarificationNeededResponse( @@ -163,7 +178,6 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Check for unachievable/vague goals if decomposition_result.get("type") == "unachievable_goal": suggested = decomposition_result.get("suggested_goal", "") reason = decomposition_result.get("reason", "") @@ -190,9 +204,22 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Step 2: Generate agent JSON (external service handles fixing and validation) + if user_id and library_agents is not None: + try: + library_agents = await enrich_library_agents_from_steps( + user_id=user_id, + decomposition_result=decomposition_result, + existing_agents=library_agents, + include_marketplace=True, + ) + logger.debug( + f"After enrichment: {len(library_agents)} total agents for sub-agent composition" + ) + except Exception as e: + logger.warning(f"Failed to enrich library agents from steps: {e}") + try: - agent_json = await generate_agent(decomposition_result) + agent_json = await generate_agent(decomposition_result, library_agents) except AgentGeneratorNotConfiguredError: return ErrorResponse( message=( @@ -211,7 +238,6 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Check if the result is an error from the external service if isinstance(agent_json, dict) and agent_json.get("type") == "error": error_msg = agent_json.get("error", "Unknown error") error_type = agent_json.get("error_type", "unknown") @@ -219,7 +245,12 @@ class CreateAgentTool(BaseTool): error_type, operation="generate the agent", llm_parse_message="The AI had trouble generating the agent. Please try again or simplify your goal.", - validation_message="The generated agent failed validation. Please try rephrasing your goal.", + validation_message=( + "I wasn't able to create a valid agent for this request. " + "The generated workflow had some structural issues. " + "Please try simplifying your goal or breaking it into smaller steps." + ), + error_details=error_msg, ) return ErrorResponse( message=user_message, @@ -237,7 +268,6 @@ class CreateAgentTool(BaseTool): node_count = len(agent_json.get("nodes", [])) link_count = len(agent_json.get("links", [])) - # Step 3: Preview or save if not save: return AgentPreviewResponse( message=( @@ -252,7 +282,6 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Save to library if not user_id: return ErrorResponse( message="You must be logged in to save agents.", @@ -270,7 +299,7 @@ class CreateAgentTool(BaseTool): agent_id=created_graph.id, agent_name=created_graph.name, library_agent_id=library_agent.id, - library_agent_link=f"/library/{library_agent.id}", + library_agent_link=f"/library/agents/{library_agent.id}", agent_page_link=f"/build?flowID={created_graph.id}", session_id=session_id, ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py index ee8eee53ce..2c2c48226b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py @@ -9,6 +9,7 @@ from .agent_generator import ( AgentGeneratorNotConfiguredError, generate_agent_patch, get_agent_as_json, + get_all_relevant_agents_for_generation, get_user_message_for_error, save_agent_to_library, ) @@ -117,7 +118,6 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Step 1: Fetch current agent current_agent = await get_agent_as_json(agent_id, user_id) if current_agent is None: @@ -127,14 +127,30 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Build the update request with context + library_agents = None + if user_id: + try: + graph_id = current_agent.get("id") + library_agents = await get_all_relevant_agents_for_generation( + user_id=user_id, + search_query=changes, + exclude_graph_id=graph_id, + include_marketplace=True, + ) + logger.debug( + f"Found {len(library_agents)} relevant agents for sub-agent composition" + ) + except Exception as e: + logger.warning(f"Failed to fetch library agents: {e}") + update_request = changes if context: update_request = f"{changes}\n\nAdditional context:\n{context}" - # Step 2: Generate updated agent (external service handles fixing and validation) try: - result = await generate_agent_patch(update_request, current_agent) + result = await generate_agent_patch( + update_request, current_agent, library_agents + ) except AgentGeneratorNotConfiguredError: return ErrorResponse( message=( @@ -153,7 +169,6 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Check if the result is an error from the external service if isinstance(result, dict) and result.get("type") == "error": error_msg = result.get("error", "Unknown error") error_type = result.get("error_type", "unknown") @@ -162,6 +177,7 @@ class EditAgentTool(BaseTool): operation="generate the changes", llm_parse_message="The AI had trouble generating the changes. Please try again or simplify your request.", validation_message="The generated changes failed validation. Please try rephrasing your request.", + error_details=error_msg, ) return ErrorResponse( message=user_message, @@ -175,7 +191,6 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Check if LLM returned clarifying questions if result.get("type") == "clarifying_questions": questions = result.get("questions", []) return ClarificationNeededResponse( @@ -194,7 +209,6 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Result is the updated agent JSON updated_agent = result agent_name = updated_agent.get("name", "Updated Agent") @@ -202,7 +216,6 @@ class EditAgentTool(BaseTool): node_count = len(updated_agent.get("nodes", [])) link_count = len(updated_agent.get("links", [])) - # Step 3: Preview or save if not save: return AgentPreviewResponse( message=( @@ -218,7 +231,6 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Save to library (creates a new version) if not user_id: return ErrorResponse( message="You must be logged in to save agents.", @@ -236,7 +248,7 @@ class EditAgentTool(BaseTool): agent_id=created_graph.id, agent_name=created_graph.name, library_agent_id=library_agent.id, - library_agent_link=f"/library/{library_agent.id}", + library_agent_link=f"/library/agents/{library_agent.id}", agent_page_link=f"/build?flowID={created_graph.id}", session_id=session_id, ) diff --git a/autogpt_platform/backend/backend/api/features/library/db.py b/autogpt_platform/backend/backend/api/features/library/db.py index 872fe66b28..394f959953 100644 --- a/autogpt_platform/backend/backend/api/features/library/db.py +++ b/autogpt_platform/backend/backend/api/features/library/db.py @@ -39,6 +39,7 @@ async def list_library_agents( sort_by: library_model.LibraryAgentSort = library_model.LibraryAgentSort.UPDATED_AT, page: int = 1, page_size: int = 50, + include_executions: bool = False, ) -> library_model.LibraryAgentResponse: """ Retrieves a paginated list of LibraryAgent records for a given user. @@ -49,6 +50,9 @@ async def list_library_agents( sort_by: Sorting field (createdAt, updatedAt, isFavorite, isCreatedByUser). page: Current page (1-indexed). page_size: Number of items per page. + include_executions: Whether to include execution data for status calculation. + Defaults to False for performance (UI fetches status separately). + Set to True when accurate status/metrics are needed (e.g., agent generator). Returns: A LibraryAgentResponse containing the list of agents and pagination details. @@ -76,7 +80,6 @@ async def list_library_agents( "isArchived": False, } - # Build search filter if applicable if search_term: where_clause["OR"] = [ { @@ -93,7 +96,6 @@ async def list_library_agents( }, ] - # Determine sorting order_by: prisma.types.LibraryAgentOrderByInput | None = None if sort_by == library_model.LibraryAgentSort.CREATED_AT: @@ -105,7 +107,7 @@ async def list_library_agents( library_agents = await prisma.models.LibraryAgent.prisma().find_many( where=where_clause, include=library_agent_include( - user_id, include_nodes=False, include_executions=False + user_id, include_nodes=False, include_executions=include_executions ), order=order_by, skip=(page - 1) * page_size, diff --git a/autogpt_platform/backend/backend/api/features/library/model.py b/autogpt_platform/backend/backend/api/features/library/model.py index 14d7c7be81..c6bc0e0427 100644 --- a/autogpt_platform/backend/backend/api/features/library/model.py +++ b/autogpt_platform/backend/backend/api/features/library/model.py @@ -9,6 +9,7 @@ import pydantic from backend.data.block import BlockInput from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo from backend.data.model import CredentialsMetaInput, is_credentials_field_name +from backend.util.json import loads as json_loads from backend.util.models import Pagination if TYPE_CHECKING: @@ -16,10 +17,10 @@ if TYPE_CHECKING: class LibraryAgentStatus(str, Enum): - COMPLETED = "COMPLETED" # All runs completed - HEALTHY = "HEALTHY" # Agent is running (not all runs have completed) - WAITING = "WAITING" # Agent is queued or waiting to start - ERROR = "ERROR" # Agent is in an error state + COMPLETED = "COMPLETED" + HEALTHY = "HEALTHY" + WAITING = "WAITING" + ERROR = "ERROR" class MarketplaceListingCreator(pydantic.BaseModel): @@ -39,6 +40,30 @@ class MarketplaceListing(pydantic.BaseModel): creator: MarketplaceListingCreator +class RecentExecution(pydantic.BaseModel): + """Summary of a recent execution for quality assessment. + + Used by the LLM to understand the agent's recent performance with specific examples + rather than just aggregate statistics. + """ + + status: str + correctness_score: float | None = None + activity_summary: str | None = None + + +def _parse_settings(settings: dict | str | None) -> GraphSettings: + """Parse settings from database, handling both dict and string formats.""" + if settings is None: + return GraphSettings() + try: + if isinstance(settings, str): + settings = json_loads(settings) + return GraphSettings.model_validate(settings) + except Exception: + return GraphSettings() + + class LibraryAgent(pydantic.BaseModel): """ Represents an agent in the library, including metadata for display and @@ -48,7 +73,7 @@ class LibraryAgent(pydantic.BaseModel): id: str graph_id: str graph_version: int - owner_user_id: str # ID of user who owns/created this agent graph + owner_user_id: str image_url: str | None @@ -64,7 +89,7 @@ class LibraryAgent(pydantic.BaseModel): description: str instructions: str | None = None - input_schema: dict[str, Any] # Should be BlockIOObjectSubSchema in frontend + input_schema: dict[str, Any] output_schema: dict[str, Any] credentials_input_schema: dict[str, Any] | None = pydantic.Field( description="Input schema for credentials required by the agent", @@ -81,25 +106,19 @@ class LibraryAgent(pydantic.BaseModel): ) trigger_setup_info: Optional[GraphTriggerInfo] = None - # Indicates whether there's a new output (based on recent runs) new_output: bool - - # Whether the user can access the underlying graph + execution_count: int = 0 + success_rate: float | None = None + avg_correctness_score: float | None = None + recent_executions: list[RecentExecution] = pydantic.Field( + default_factory=list, + description="List of recent executions with status, score, and summary", + ) can_access_graph: bool - - # Indicates if this agent is the latest version is_latest_version: bool - - # Whether the agent is marked as favorite by the user is_favorite: bool - - # Recommended schedule cron (from marketplace agents) recommended_schedule_cron: str | None = None - - # User-specific settings for this library agent settings: GraphSettings = pydantic.Field(default_factory=GraphSettings) - - # Marketplace listing information if the agent has been published marketplace_listing: Optional["MarketplaceListing"] = None @staticmethod @@ -123,7 +142,6 @@ class LibraryAgent(pydantic.BaseModel): agent_updated_at = agent.AgentGraph.updatedAt lib_agent_updated_at = agent.updatedAt - # Compute updated_at as the latest between library agent and graph updated_at = ( max(agent_updated_at, lib_agent_updated_at) if agent_updated_at @@ -136,7 +154,6 @@ class LibraryAgent(pydantic.BaseModel): creator_name = agent.Creator.name or "Unknown" creator_image_url = agent.Creator.avatarUrl or "" - # Logic to calculate status and new_output week_ago = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta( days=7 ) @@ -145,13 +162,55 @@ class LibraryAgent(pydantic.BaseModel): status = status_result.status new_output = status_result.new_output - # Check if user can access the graph - can_access_graph = agent.AgentGraph.userId == agent.userId + execution_count = len(executions) + success_rate: float | None = None + avg_correctness_score: float | None = None + if execution_count > 0: + success_count = sum( + 1 + for e in executions + if e.executionStatus == prisma.enums.AgentExecutionStatus.COMPLETED + ) + success_rate = (success_count / execution_count) * 100 - # Hard-coded to True until a method to check is implemented + correctness_scores = [] + for e in executions: + if e.stats and isinstance(e.stats, dict): + score = e.stats.get("correctness_score") + if score is not None and isinstance(score, (int, float)): + correctness_scores.append(float(score)) + if correctness_scores: + avg_correctness_score = sum(correctness_scores) / len( + correctness_scores + ) + + recent_executions: list[RecentExecution] = [] + for e in executions: + exec_score: float | None = None + exec_summary: str | None = None + if e.stats and isinstance(e.stats, dict): + score = e.stats.get("correctness_score") + if score is not None and isinstance(score, (int, float)): + exec_score = float(score) + summary = e.stats.get("activity_status") + if summary is not None and isinstance(summary, str): + exec_summary = summary + exec_status = ( + e.executionStatus.value + if hasattr(e.executionStatus, "value") + else str(e.executionStatus) + ) + recent_executions.append( + RecentExecution( + status=exec_status, + correctness_score=exec_score, + activity_summary=exec_summary, + ) + ) + + can_access_graph = agent.AgentGraph.userId == agent.userId is_latest_version = True - # Build marketplace_listing if available marketplace_listing_data = None if store_listing and store_listing.ActiveVersion and profile: creator_data = MarketplaceListingCreator( @@ -190,11 +249,15 @@ class LibraryAgent(pydantic.BaseModel): has_sensitive_action=graph.has_sensitive_action, trigger_setup_info=graph.trigger_setup_info, new_output=new_output, + execution_count=execution_count, + success_rate=success_rate, + avg_correctness_score=avg_correctness_score, + recent_executions=recent_executions, can_access_graph=can_access_graph, is_latest_version=is_latest_version, is_favorite=agent.isFavorite, recommended_schedule_cron=agent.AgentGraph.recommendedScheduleCron, - settings=GraphSettings.model_validate(agent.settings), + settings=_parse_settings(agent.settings), marketplace_listing=marketplace_listing_data, ) @@ -220,18 +283,15 @@ def _calculate_agent_status( if not executions: return AgentStatusResult(status=LibraryAgentStatus.COMPLETED, new_output=False) - # Track how many times each execution status appears status_counts = {status: 0 for status in prisma.enums.AgentExecutionStatus} new_output = False for execution in executions: - # Check if there's a completed run more recent than `recent_threshold` if execution.createdAt >= recent_threshold: if execution.executionStatus == prisma.enums.AgentExecutionStatus.COMPLETED: new_output = True status_counts[execution.executionStatus] += 1 - # Determine the final status based on counts if status_counts[prisma.enums.AgentExecutionStatus.FAILED] > 0: return AgentStatusResult(status=LibraryAgentStatus.ERROR, new_output=new_output) elif status_counts[prisma.enums.AgentExecutionStatus.QUEUED] > 0: diff --git a/autogpt_platform/backend/snapshots/lib_agts_search b/autogpt_platform/backend/snapshots/lib_agts_search index 67c307b09e..3ce8402b63 100644 --- a/autogpt_platform/backend/snapshots/lib_agts_search +++ b/autogpt_platform/backend/snapshots/lib_agts_search @@ -31,6 +31,10 @@ "has_sensitive_action": false, "trigger_setup_info": null, "new_output": false, + "execution_count": 0, + "success_rate": null, + "avg_correctness_score": null, + "recent_executions": [], "can_access_graph": true, "is_latest_version": true, "is_favorite": false, @@ -72,6 +76,10 @@ "has_sensitive_action": false, "trigger_setup_info": null, "new_output": false, + "execution_count": 0, + "success_rate": null, + "avg_correctness_score": null, + "recent_executions": [], "can_access_graph": false, "is_latest_version": true, "is_favorite": false, diff --git a/autogpt_platform/backend/test/agent_generator/test_core_integration.py b/autogpt_platform/backend/test/agent_generator/test_core_integration.py index bdcc24ba79..05ce4a3aff 100644 --- a/autogpt_platform/backend/test/agent_generator/test_core_integration.py +++ b/autogpt_platform/backend/test/agent_generator/test_core_integration.py @@ -57,7 +57,8 @@ class TestDecomposeGoal: result = await core.decompose_goal("Build a chatbot") - mock_external.assert_called_once_with("Build a chatbot", "") + # library_agents defaults to None + mock_external.assert_called_once_with("Build a chatbot", "", None) assert result == expected_result @pytest.mark.asyncio @@ -74,7 +75,8 @@ class TestDecomposeGoal: await core.decompose_goal("Build a chatbot", "Use Python") - mock_external.assert_called_once_with("Build a chatbot", "Use Python") + # library_agents defaults to None + mock_external.assert_called_once_with("Build a chatbot", "Use Python", None) @pytest.mark.asyncio async def test_returns_none_on_service_failure(self): @@ -109,7 +111,8 @@ class TestGenerateAgent: instructions = {"type": "instructions", "steps": ["Step 1"]} result = await core.generate_agent(instructions) - mock_external.assert_called_once_with(instructions) + # library_agents defaults to None + mock_external.assert_called_once_with(instructions, None) # Result should have id, version, is_active added if not present assert result is not None assert result["name"] == "Test Agent" @@ -174,7 +177,8 @@ class TestGenerateAgentPatch: current_agent = {"nodes": [], "links": []} result = await core.generate_agent_patch("Add a node", current_agent) - mock_external.assert_called_once_with("Add a node", current_agent) + # library_agents defaults to None + mock_external.assert_called_once_with("Add a node", current_agent, None) assert result == expected_result @pytest.mark.asyncio diff --git a/autogpt_platform/backend/test/agent_generator/test_library_agents.py b/autogpt_platform/backend/test/agent_generator/test_library_agents.py new file mode 100644 index 0000000000..e62b0746e7 --- /dev/null +++ b/autogpt_platform/backend/test/agent_generator/test_library_agents.py @@ -0,0 +1,841 @@ +""" +Tests for library agent fetching functionality in agent generator. + +This test suite verifies the search-based library agent fetching, +including the combination of library and marketplace agents. +""" + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from backend.api.features.chat.tools.agent_generator import core + + +class TestGetLibraryAgentsForGeneration: + """Test get_library_agents_for_generation function.""" + + @pytest.mark.asyncio + async def test_fetches_agents_with_search_term(self): + """Test that search_term is passed to the library db.""" + # Create a mock agent with proper attribute values + mock_agent = MagicMock() + mock_agent.graph_id = "agent-123" + mock_agent.graph_version = 1 + mock_agent.name = "Email Agent" + mock_agent.description = "Sends emails" + mock_agent.input_schema = {"properties": {}} + mock_agent.output_schema = {"properties": {}} + mock_agent.recent_executions = [] + + mock_response = MagicMock() + mock_response.agents = [mock_agent] + + with patch.object( + core.library_db, + "list_library_agents", + new_callable=AsyncMock, + return_value=mock_response, + ) as mock_list: + result = await core.get_library_agents_for_generation( + user_id="user-123", + search_query="send email", + ) + + mock_list.assert_called_once_with( + user_id="user-123", + search_term="send email", + page=1, + page_size=15, + include_executions=True, + ) + + # Verify result format + assert len(result) == 1 + assert result[0]["graph_id"] == "agent-123" + assert result[0]["name"] == "Email Agent" + + @pytest.mark.asyncio + async def test_excludes_specified_graph_id(self): + """Test that agents with excluded graph_id are filtered out.""" + mock_response = MagicMock() + mock_response.agents = [ + MagicMock( + graph_id="agent-123", + graph_version=1, + name="Agent 1", + description="First agent", + input_schema={}, + output_schema={}, + recent_executions=[], + ), + MagicMock( + graph_id="agent-456", + graph_version=1, + name="Agent 2", + description="Second agent", + input_schema={}, + output_schema={}, + recent_executions=[], + ), + ] + + with patch.object( + core.library_db, + "list_library_agents", + new_callable=AsyncMock, + return_value=mock_response, + ): + result = await core.get_library_agents_for_generation( + user_id="user-123", + exclude_graph_id="agent-123", + ) + + # Verify the excluded agent is not in results + assert len(result) == 1 + assert result[0]["graph_id"] == "agent-456" + + @pytest.mark.asyncio + async def test_respects_max_results(self): + """Test that max_results parameter limits the page_size.""" + mock_response = MagicMock() + mock_response.agents = [] + + with patch.object( + core.library_db, + "list_library_agents", + new_callable=AsyncMock, + return_value=mock_response, + ) as mock_list: + await core.get_library_agents_for_generation( + user_id="user-123", + max_results=5, + ) + + mock_list.assert_called_once_with( + user_id="user-123", + search_term=None, + page=1, + page_size=5, + include_executions=True, + ) + + +class TestSearchMarketplaceAgentsForGeneration: + """Test search_marketplace_agents_for_generation function.""" + + @pytest.mark.asyncio + async def test_searches_marketplace_with_query(self): + """Test that marketplace is searched with the query.""" + mock_response = MagicMock() + mock_response.agents = [ + MagicMock( + agent_name="Public Agent", + description="A public agent", + sub_heading="Does something useful", + creator="creator-1", + ) + ] + + # The store_db is dynamically imported, so patch the import path + with patch( + "backend.api.features.store.db.get_store_agents", + new_callable=AsyncMock, + return_value=mock_response, + ) as mock_search: + result = await core.search_marketplace_agents_for_generation( + search_query="automation", + max_results=10, + ) + + mock_search.assert_called_once_with( + search_query="automation", + page=1, + page_size=10, + ) + + assert len(result) == 1 + assert result[0]["name"] == "Public Agent" + assert result[0]["is_marketplace_agent"] is True + + @pytest.mark.asyncio + async def test_handles_marketplace_error_gracefully(self): + """Test that marketplace errors don't crash the function.""" + with patch( + "backend.api.features.store.db.get_store_agents", + new_callable=AsyncMock, + side_effect=Exception("Marketplace unavailable"), + ): + result = await core.search_marketplace_agents_for_generation( + search_query="test" + ) + + # Should return empty list, not raise exception + assert result == [] + + +class TestGetAllRelevantAgentsForGeneration: + """Test get_all_relevant_agents_for_generation function.""" + + @pytest.mark.asyncio + async def test_combines_library_and_marketplace_agents(self): + """Test that agents from both sources are combined.""" + library_agents = [ + { + "graph_id": "lib-123", + "graph_version": 1, + "name": "Library Agent", + "description": "From library", + "input_schema": {}, + "output_schema": {}, + } + ] + + marketplace_agents = [ + { + "name": "Market Agent", + "description": "From marketplace", + "sub_heading": "Sub heading", + "creator": "creator-1", + "is_marketplace_agent": True, + } + ] + + with patch.object( + core, + "get_library_agents_for_generation", + new_callable=AsyncMock, + return_value=library_agents, + ): + with patch.object( + core, + "search_marketplace_agents_for_generation", + new_callable=AsyncMock, + return_value=marketplace_agents, + ): + result = await core.get_all_relevant_agents_for_generation( + user_id="user-123", + search_query="test query", + include_marketplace=True, + ) + + # Library agents should come first + assert len(result) == 2 + assert result[0]["name"] == "Library Agent" + assert result[1]["name"] == "Market Agent" + + @pytest.mark.asyncio + async def test_deduplicates_by_name(self): + """Test that marketplace agents with same name as library are excluded.""" + library_agents = [ + { + "graph_id": "lib-123", + "graph_version": 1, + "name": "Shared Agent", + "description": "From library", + "input_schema": {}, + "output_schema": {}, + } + ] + + marketplace_agents = [ + { + "name": "Shared Agent", # Same name, should be deduplicated + "description": "From marketplace", + "sub_heading": "Sub heading", + "creator": "creator-1", + "is_marketplace_agent": True, + }, + { + "name": "Unique Agent", + "description": "Only in marketplace", + "sub_heading": "Sub heading", + "creator": "creator-2", + "is_marketplace_agent": True, + }, + ] + + with patch.object( + core, + "get_library_agents_for_generation", + new_callable=AsyncMock, + return_value=library_agents, + ): + with patch.object( + core, + "search_marketplace_agents_for_generation", + new_callable=AsyncMock, + return_value=marketplace_agents, + ): + result = await core.get_all_relevant_agents_for_generation( + user_id="user-123", + search_query="test", + include_marketplace=True, + ) + + # Shared Agent from marketplace should be excluded + assert len(result) == 2 + names = [a["name"] for a in result] + assert "Shared Agent" in names + assert "Unique Agent" in names + + @pytest.mark.asyncio + async def test_skips_marketplace_when_disabled(self): + """Test that marketplace is not searched when include_marketplace=False.""" + library_agents = [ + { + "graph_id": "lib-123", + "graph_version": 1, + "name": "Library Agent", + "description": "From library", + "input_schema": {}, + "output_schema": {}, + } + ] + + with patch.object( + core, + "get_library_agents_for_generation", + new_callable=AsyncMock, + return_value=library_agents, + ): + with patch.object( + core, + "search_marketplace_agents_for_generation", + new_callable=AsyncMock, + ) as mock_marketplace: + result = await core.get_all_relevant_agents_for_generation( + user_id="user-123", + search_query="test", + include_marketplace=False, + ) + + # Marketplace should not be called + mock_marketplace.assert_not_called() + assert len(result) == 1 + + @pytest.mark.asyncio + async def test_skips_marketplace_when_no_search_query(self): + """Test that marketplace is not searched without a search query.""" + library_agents = [ + { + "graph_id": "lib-123", + "graph_version": 1, + "name": "Library Agent", + "description": "From library", + "input_schema": {}, + "output_schema": {}, + } + ] + + with patch.object( + core, + "get_library_agents_for_generation", + new_callable=AsyncMock, + return_value=library_agents, + ): + with patch.object( + core, + "search_marketplace_agents_for_generation", + new_callable=AsyncMock, + ) as mock_marketplace: + result = await core.get_all_relevant_agents_for_generation( + user_id="user-123", + search_query=None, # No search query + include_marketplace=True, + ) + + # Marketplace should not be called without search query + mock_marketplace.assert_not_called() + assert len(result) == 1 + + +class TestExtractSearchTermsFromSteps: + """Test extract_search_terms_from_steps function.""" + + def test_extracts_terms_from_instructions_type(self): + """Test extraction from valid instructions decomposition result.""" + decomposition_result = { + "type": "instructions", + "steps": [ + { + "description": "Send an email notification", + "block_name": "GmailSendBlock", + }, + {"description": "Fetch weather data", "action": "Get weather API"}, + ], + } + + result = core.extract_search_terms_from_steps(decomposition_result) + + assert "Send an email notification" in result + assert "GmailSendBlock" in result + assert "Fetch weather data" in result + assert "Get weather API" in result + + def test_returns_empty_for_non_instructions_type(self): + """Test that non-instructions types return empty list.""" + decomposition_result = { + "type": "clarifying_questions", + "questions": [{"question": "What email?"}], + } + + result = core.extract_search_terms_from_steps(decomposition_result) + + assert result == [] + + def test_deduplicates_terms_case_insensitively(self): + """Test that duplicate terms are removed (case-insensitive).""" + decomposition_result = { + "type": "instructions", + "steps": [ + {"description": "Send Email", "name": "send email"}, + {"description": "Other task"}, + ], + } + + result = core.extract_search_terms_from_steps(decomposition_result) + + # Should only have one "send email" variant + email_terms = [t for t in result if "email" in t.lower()] + assert len(email_terms) == 1 + + def test_filters_short_terms(self): + """Test that terms with 3 or fewer characters are filtered out.""" + decomposition_result = { + "type": "instructions", + "steps": [ + {"description": "ab", "action": "xyz"}, # Both too short + {"description": "Valid term here"}, + ], + } + + result = core.extract_search_terms_from_steps(decomposition_result) + + assert "ab" not in result + assert "xyz" not in result + assert "Valid term here" in result + + def test_handles_empty_steps(self): + """Test handling of empty steps list.""" + decomposition_result = { + "type": "instructions", + "steps": [], + } + + result = core.extract_search_terms_from_steps(decomposition_result) + + assert result == [] + + +class TestEnrichLibraryAgentsFromSteps: + """Test enrich_library_agents_from_steps function.""" + + @pytest.mark.asyncio + async def test_enriches_with_additional_agents(self): + """Test that additional agents are found based on steps.""" + existing_agents = [ + { + "graph_id": "existing-123", + "graph_version": 1, + "name": "Existing Agent", + "description": "Already fetched", + "input_schema": {}, + "output_schema": {}, + } + ] + + additional_agents = [ + { + "graph_id": "new-456", + "graph_version": 1, + "name": "Email Agent", + "description": "For sending emails", + "input_schema": {}, + "output_schema": {}, + } + ] + + decomposition_result = { + "type": "instructions", + "steps": [ + {"description": "Send email notification"}, + ], + } + + with patch.object( + core, + "get_all_relevant_agents_for_generation", + new_callable=AsyncMock, + return_value=additional_agents, + ): + result = await core.enrich_library_agents_from_steps( + user_id="user-123", + decomposition_result=decomposition_result, + existing_agents=existing_agents, + ) + + # Should have both existing and new agents + assert len(result) == 2 + names = [a["name"] for a in result] + assert "Existing Agent" in names + assert "Email Agent" in names + + @pytest.mark.asyncio + async def test_deduplicates_by_graph_id(self): + """Test that agents with same graph_id are not duplicated.""" + existing_agents = [ + { + "graph_id": "agent-123", + "graph_version": 1, + "name": "Existing Agent", + "description": "Already fetched", + "input_schema": {}, + "output_schema": {}, + } + ] + + # Additional search returns same agent + additional_agents = [ + { + "graph_id": "agent-123", # Same ID + "graph_version": 1, + "name": "Existing Agent Copy", + "description": "Same agent different name", + "input_schema": {}, + "output_schema": {}, + } + ] + + decomposition_result = { + "type": "instructions", + "steps": [{"description": "Some action"}], + } + + with patch.object( + core, + "get_all_relevant_agents_for_generation", + new_callable=AsyncMock, + return_value=additional_agents, + ): + result = await core.enrich_library_agents_from_steps( + user_id="user-123", + decomposition_result=decomposition_result, + existing_agents=existing_agents, + ) + + # Should not duplicate + assert len(result) == 1 + + @pytest.mark.asyncio + async def test_deduplicates_by_name(self): + """Test that agents with same name are not duplicated.""" + existing_agents = [ + { + "graph_id": "agent-123", + "graph_version": 1, + "name": "Email Agent", + "description": "Already fetched", + "input_schema": {}, + "output_schema": {}, + } + ] + + # Additional search returns agent with same name but different ID + additional_agents = [ + { + "graph_id": "agent-456", # Different ID + "graph_version": 1, + "name": "Email Agent", # Same name + "description": "Different agent same name", + "input_schema": {}, + "output_schema": {}, + } + ] + + decomposition_result = { + "type": "instructions", + "steps": [{"description": "Send email"}], + } + + with patch.object( + core, + "get_all_relevant_agents_for_generation", + new_callable=AsyncMock, + return_value=additional_agents, + ): + result = await core.enrich_library_agents_from_steps( + user_id="user-123", + decomposition_result=decomposition_result, + existing_agents=existing_agents, + ) + + # Should not duplicate by name + assert len(result) == 1 + assert result[0].get("graph_id") == "agent-123" # Original kept + + @pytest.mark.asyncio + async def test_returns_existing_when_no_steps(self): + """Test that existing agents are returned when no search terms extracted.""" + existing_agents = [ + { + "graph_id": "existing-123", + "graph_version": 1, + "name": "Existing Agent", + "description": "Already fetched", + "input_schema": {}, + "output_schema": {}, + } + ] + + decomposition_result = { + "type": "clarifying_questions", # Not instructions type + "questions": [], + } + + result = await core.enrich_library_agents_from_steps( + user_id="user-123", + decomposition_result=decomposition_result, + existing_agents=existing_agents, + ) + + # Should return existing unchanged + assert result == existing_agents + + @pytest.mark.asyncio + async def test_limits_search_terms_to_three(self): + """Test that only first 3 search terms are used.""" + existing_agents = [] + + decomposition_result = { + "type": "instructions", + "steps": [ + {"description": "First action"}, + {"description": "Second action"}, + {"description": "Third action"}, + {"description": "Fourth action"}, + {"description": "Fifth action"}, + ], + } + + call_count = 0 + + async def mock_get_agents(*args, **kwargs): + nonlocal call_count + call_count += 1 + return [] + + with patch.object( + core, + "get_all_relevant_agents_for_generation", + side_effect=mock_get_agents, + ): + await core.enrich_library_agents_from_steps( + user_id="user-123", + decomposition_result=decomposition_result, + existing_agents=existing_agents, + ) + + # Should only make 3 calls (limited to first 3 terms) + assert call_count == 3 + + +class TestExtractUuidsFromText: + """Test extract_uuids_from_text function.""" + + def test_extracts_single_uuid(self): + """Test extraction of a single UUID from text.""" + text = "Use my agent 46631191-e8a8-486f-ad90-84f89738321d for this task" + result = core.extract_uuids_from_text(text) + assert len(result) == 1 + assert "46631191-e8a8-486f-ad90-84f89738321d" in result + + def test_extracts_multiple_uuids(self): + """Test extraction of multiple UUIDs from text.""" + text = ( + "Combine agents 11111111-1111-4111-8111-111111111111 " + "and 22222222-2222-4222-9222-222222222222" + ) + result = core.extract_uuids_from_text(text) + assert len(result) == 2 + assert "11111111-1111-4111-8111-111111111111" in result + assert "22222222-2222-4222-9222-222222222222" in result + + def test_deduplicates_uuids(self): + """Test that duplicate UUIDs are deduplicated.""" + text = ( + "Use 46631191-e8a8-486f-ad90-84f89738321d twice: " + "46631191-e8a8-486f-ad90-84f89738321d" + ) + result = core.extract_uuids_from_text(text) + assert len(result) == 1 + + def test_normalizes_to_lowercase(self): + """Test that UUIDs are normalized to lowercase.""" + text = "Use 46631191-E8A8-486F-AD90-84F89738321D" + result = core.extract_uuids_from_text(text) + assert result[0] == "46631191-e8a8-486f-ad90-84f89738321d" + + def test_returns_empty_for_no_uuids(self): + """Test that empty list is returned when no UUIDs found.""" + text = "Create an email agent that sends notifications" + result = core.extract_uuids_from_text(text) + assert result == [] + + def test_ignores_invalid_uuids(self): + """Test that invalid UUID-like strings are ignored.""" + text = "Not a valid UUID: 12345678-1234-1234-1234-123456789abc" + result = core.extract_uuids_from_text(text) + # UUID v4 requires specific patterns (4 in third group, 8/9/a/b in fourth) + assert len(result) == 0 + + +class TestGetLibraryAgentById: + """Test get_library_agent_by_id function (and its alias get_library_agent_by_graph_id).""" + + @pytest.mark.asyncio + async def test_returns_agent_when_found_by_graph_id(self): + """Test that agent is returned when found by graph_id.""" + mock_agent = MagicMock() + mock_agent.graph_id = "agent-123" + mock_agent.graph_version = 1 + mock_agent.name = "Test Agent" + mock_agent.description = "Test description" + mock_agent.input_schema = {"properties": {}} + mock_agent.output_schema = {"properties": {}} + + with patch.object( + core.library_db, + "get_library_agent_by_graph_id", + new_callable=AsyncMock, + return_value=mock_agent, + ): + result = await core.get_library_agent_by_id("user-123", "agent-123") + + assert result is not None + assert result["graph_id"] == "agent-123" + assert result["name"] == "Test Agent" + + @pytest.mark.asyncio + async def test_falls_back_to_library_agent_id(self): + """Test that lookup falls back to library agent ID when graph_id not found.""" + mock_agent = MagicMock() + mock_agent.graph_id = "graph-456" # Different from the lookup ID + mock_agent.graph_version = 1 + mock_agent.name = "Library Agent" + mock_agent.description = "Found by library ID" + mock_agent.input_schema = {"properties": {}} + mock_agent.output_schema = {"properties": {}} + + with ( + patch.object( + core.library_db, + "get_library_agent_by_graph_id", + new_callable=AsyncMock, + return_value=None, # Not found by graph_id + ), + patch.object( + core.library_db, + "get_library_agent", + new_callable=AsyncMock, + return_value=mock_agent, # Found by library ID + ), + ): + result = await core.get_library_agent_by_id("user-123", "library-id-123") + + assert result is not None + assert result["graph_id"] == "graph-456" + assert result["name"] == "Library Agent" + + @pytest.mark.asyncio + async def test_returns_none_when_not_found_by_either_method(self): + """Test that None is returned when agent not found by either method.""" + with ( + patch.object( + core.library_db, + "get_library_agent_by_graph_id", + new_callable=AsyncMock, + return_value=None, + ), + patch.object( + core.library_db, + "get_library_agent", + new_callable=AsyncMock, + side_effect=core.NotFoundError("Not found"), + ), + ): + result = await core.get_library_agent_by_id("user-123", "nonexistent") + + assert result is None + + @pytest.mark.asyncio + async def test_returns_none_on_exception(self): + """Test that None is returned when exception occurs in both lookups.""" + with ( + patch.object( + core.library_db, + "get_library_agent_by_graph_id", + new_callable=AsyncMock, + side_effect=Exception("Database error"), + ), + patch.object( + core.library_db, + "get_library_agent", + new_callable=AsyncMock, + side_effect=Exception("Database error"), + ), + ): + result = await core.get_library_agent_by_id("user-123", "agent-123") + + assert result is None + + @pytest.mark.asyncio + async def test_alias_works(self): + """Test that get_library_agent_by_graph_id is an alias for get_library_agent_by_id.""" + assert core.get_library_agent_by_graph_id is core.get_library_agent_by_id + + +class TestGetAllRelevantAgentsWithUuids: + """Test UUID extraction in get_all_relevant_agents_for_generation.""" + + @pytest.mark.asyncio + async def test_fetches_explicitly_mentioned_agents(self): + """Test that agents mentioned by UUID are fetched directly.""" + mock_agent = MagicMock() + mock_agent.graph_id = "46631191-e8a8-486f-ad90-84f89738321d" + mock_agent.graph_version = 1 + mock_agent.name = "Mentioned Agent" + mock_agent.description = "Explicitly mentioned" + mock_agent.input_schema = {} + mock_agent.output_schema = {} + + mock_response = MagicMock() + mock_response.agents = [] + + with ( + patch.object( + core.library_db, + "get_library_agent_by_graph_id", + new_callable=AsyncMock, + return_value=mock_agent, + ), + patch.object( + core.library_db, + "list_library_agents", + new_callable=AsyncMock, + return_value=mock_response, + ), + ): + result = await core.get_all_relevant_agents_for_generation( + user_id="user-123", + search_query="Use agent 46631191-e8a8-486f-ad90-84f89738321d", + include_marketplace=False, + ) + + assert len(result) == 1 + assert result[0].get("graph_id") == "46631191-e8a8-486f-ad90-84f89738321d" + assert result[0].get("name") == "Mentioned Agent" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/autogpt_platform/backend/test/agent_generator/test_service.py b/autogpt_platform/backend/test/agent_generator/test_service.py index fe7a1a7fdd..d62dca1729 100644 --- a/autogpt_platform/backend/test/agent_generator/test_service.py +++ b/autogpt_platform/backend/test/agent_generator/test_service.py @@ -433,5 +433,139 @@ class TestGetBlocksExternal: assert result is None +class TestLibraryAgentsPassthrough: + """Test that library_agents are passed correctly in all requests.""" + + def setup_method(self): + """Reset client singleton before each test.""" + service._settings = None + service._client = None + + @pytest.mark.asyncio + async def test_decompose_goal_passes_library_agents(self): + """Test that library_agents are included in decompose goal payload.""" + library_agents = [ + { + "graph_id": "agent-123", + "graph_version": 1, + "name": "Email Sender", + "description": "Sends emails", + "input_schema": {"properties": {"to": {"type": "string"}}}, + "output_schema": {"properties": {"sent": {"type": "boolean"}}}, + }, + ] + + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "type": "instructions", + "steps": ["Step 1"], + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + await service.decompose_goal_external( + "Send an email", + library_agents=library_agents, + ) + + # Verify library_agents was passed in the payload + call_args = mock_client.post.call_args + assert call_args[1]["json"]["library_agents"] == library_agents + + @pytest.mark.asyncio + async def test_generate_agent_passes_library_agents(self): + """Test that library_agents are included in generate agent payload.""" + library_agents = [ + { + "graph_id": "agent-456", + "graph_version": 2, + "name": "Data Fetcher", + "description": "Fetches data from API", + "input_schema": {"properties": {"url": {"type": "string"}}}, + "output_schema": {"properties": {"data": {"type": "object"}}}, + }, + ] + + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "agent_json": {"name": "Test Agent", "nodes": []}, + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + await service.generate_agent_external( + {"steps": ["Step 1"]}, + library_agents=library_agents, + ) + + # Verify library_agents was passed in the payload + call_args = mock_client.post.call_args + assert call_args[1]["json"]["library_agents"] == library_agents + + @pytest.mark.asyncio + async def test_generate_agent_patch_passes_library_agents(self): + """Test that library_agents are included in patch generation payload.""" + library_agents = [ + { + "graph_id": "agent-789", + "graph_version": 1, + "name": "Slack Notifier", + "description": "Sends Slack messages", + "input_schema": {"properties": {"message": {"type": "string"}}}, + "output_schema": {"properties": {"success": {"type": "boolean"}}}, + }, + ] + + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "agent_json": {"name": "Updated Agent", "nodes": []}, + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + await service.generate_agent_patch_external( + "Add error handling", + {"name": "Original Agent", "nodes": []}, + library_agents=library_agents, + ) + + # Verify library_agents was passed in the payload + call_args = mock_client.post.call_args + assert call_args[1]["json"]["library_agents"] == library_agents + + @pytest.mark.asyncio + async def test_decompose_goal_without_library_agents(self): + """Test that decompose goal works without library_agents.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "type": "instructions", + "steps": ["Step 1"], + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + await service.decompose_goal_external("Build a workflow") + + # Verify library_agents was NOT passed when not provided + call_args = mock_client.post.call_args + assert "library_agents" not in call_args[1]["json"] + + if __name__ == "__main__": pytest.main([__file__, "-v"]) diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx index 94e917a4ac..834603cc4a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx @@ -857,7 +857,7 @@ export const CustomNode = React.memo( })(); const hasAdvancedFields = - data.inputSchema && + data.inputSchema?.properties && Object.entries(data.inputSchema.properties).some(([key, value]) => { return ( value.advanced === true && !data.inputSchema.required?.includes(key) diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 6692c30e72..a44ceb8388 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -7981,6 +7981,25 @@ ] }, "new_output": { "type": "boolean", "title": "New Output" }, + "execution_count": { + "type": "integer", + "title": "Execution Count", + "default": 0 + }, + "success_rate": { + "anyOf": [{ "type": "number" }, { "type": "null" }], + "title": "Success Rate" + }, + "avg_correctness_score": { + "anyOf": [{ "type": "number" }, { "type": "null" }], + "title": "Avg Correctness Score" + }, + "recent_executions": { + "items": { "$ref": "#/components/schemas/RecentExecution" }, + "type": "array", + "title": "Recent Executions", + "description": "List of recent executions with status, score, and summary" + }, "can_access_graph": { "type": "boolean", "title": "Can Access Graph" @@ -9374,6 +9393,23 @@ "required": ["providers", "pagination"], "title": "ProviderResponse" }, + "RecentExecution": { + "properties": { + "status": { "type": "string", "title": "Status" }, + "correctness_score": { + "anyOf": [{ "type": "number" }, { "type": "null" }], + "title": "Correctness Score" + }, + "activity_summary": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Activity Summary" + } + }, + "type": "object", + "required": ["status"], + "title": "RecentExecution", + "description": "Summary of a recent execution for quality assessment.\n\nUsed by the LLM to understand the agent's recent performance with specific examples\nrather than just aggregate statistics." + }, "RefundRequest": { "properties": { "id": { "type": "string", "title": "Id" }, diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx index c922d0da76..2ac433a272 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx @@ -156,11 +156,19 @@ export function ChatMessage({ } if (isClarificationNeeded && message.type === "clarification_needed") { + const hasUserReplyAfter = + index >= 0 && + messages + .slice(index + 1) + .some((m) => m.type === "message" && m.role === "user"); + return ( ); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx index a3bd17dd3f..3b225d1ef1 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx @@ -6,7 +6,7 @@ import { Input } from "@/components/atoms/Input/Input"; import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; import { CheckCircleIcon, QuestionIcon } from "@phosphor-icons/react"; -import { useState } from "react"; +import { useState, useEffect, useRef } from "react"; export interface ClarifyingQuestion { question: string; @@ -17,39 +17,96 @@ export interface ClarifyingQuestion { interface Props { questions: ClarifyingQuestion[]; message: string; + sessionId?: string; onSubmitAnswers: (answers: Record) => void; onCancel?: () => void; + isAnswered?: boolean; className?: string; } +function getStorageKey(sessionId?: string): string | null { + if (!sessionId) return null; + return `clarification_answers_${sessionId}`; +} + export function ClarificationQuestionsWidget({ questions, message, + sessionId, onSubmitAnswers, onCancel, + isAnswered = false, className, }: Props) { const [answers, setAnswers] = useState>({}); const [isSubmitted, setIsSubmitted] = useState(false); + const lastSessionIdRef = useRef(undefined); + + useEffect(() => { + const storageKey = getStorageKey(sessionId); + if (!storageKey) { + setAnswers({}); + setIsSubmitted(false); + lastSessionIdRef.current = sessionId; + return; + } + + try { + const saved = localStorage.getItem(storageKey); + if (saved) { + const parsed = JSON.parse(saved) as Record; + setAnswers(parsed); + } else { + setAnswers({}); + } + setIsSubmitted(false); + } catch { + setAnswers({}); + setIsSubmitted(false); + } + lastSessionIdRef.current = sessionId; + }, [sessionId]); + + useEffect(() => { + if (lastSessionIdRef.current !== sessionId) { + return; + } + const storageKey = getStorageKey(sessionId); + if (!storageKey) return; + + const hasAnswers = Object.values(answers).some((v) => v.trim()); + try { + if (hasAnswers) { + localStorage.setItem(storageKey, JSON.stringify(answers)); + } else { + localStorage.removeItem(storageKey); + } + } catch {} + }, [answers, sessionId]); function handleAnswerChange(keyword: string, value: string) { setAnswers((prev) => ({ ...prev, [keyword]: value })); } function handleSubmit() { - // Check if all questions are answered const allAnswered = questions.every((q) => answers[q.keyword]?.trim()); if (!allAnswered) { return; } setIsSubmitted(true); onSubmitAnswers(answers); + + const storageKey = getStorageKey(sessionId); + try { + if (storageKey) { + localStorage.removeItem(storageKey); + } + } catch {} } const allAnswered = questions.every((q) => answers[q.keyword]?.trim()); - // Show submitted state after answers are submitted - if (isSubmitted) { + if (isAnswered || isSubmitted) { return (
; - if (response.error) return stripInternalReasoning(String(response.error)); if (response.message) return stripInternalReasoning(String(response.message)); + if (response.error) return stripInternalReasoning(String(response.error)); } return "An error occurred"; } @@ -363,8 +363,8 @@ export function formatToolResponse(result: unknown, toolName: string): string { case "error": const errorMsg = - (response.error as string) || response.message || "An error occurred"; - return `Error: ${errorMsg}`; + (response.message as string) || response.error || "An error occurred"; + return stripInternalReasoning(String(errorMsg)); case "no_results": const suggestions = (response.suggestions as string[]) || []; From 7ee94d986cd39ba1c39ab0cd4577039cdeb83b92 Mon Sep 17 00:00:00 2001 From: Otto Date: Sat, 31 Jan 2026 03:05:31 +0000 Subject: [PATCH 4/6] docs: add credentials prerequisites to create-basic-agent guide (#11913) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Addresses #11785 - users were encountering `openai_api_key_credentials` errors when following the create-basic-agent guide because it didn't mention the need to configure API credentials before using AI blocks. ## Changes Added a **Prerequisites** section to `docs/platform/create-basic-agent.md` explaining: - **Cloud users:** Go to Profile → Integrations to add API keys - **Self-hosted (Docker):** Add keys to `autogpt_platform/backend/.env` and restart services Also added a note that the Calculator example doesn't need credentials, making it a good first test. ## Related - Issue: #11785 --- docs/platform/create-basic-agent.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/platform/create-basic-agent.md b/docs/platform/create-basic-agent.md index 7721fb9b9c..ffe654ba99 100644 --- a/docs/platform/create-basic-agent.md +++ b/docs/platform/create-basic-agent.md @@ -4,6 +4,28 @@ This guide walks through creating a simple question-answer AI agent using AutoGPT's visual builder. This is a basic example that can be expanded into more complex agents. +## **Prerequisites** + +### **Cloud-Hosted AutoGPT** +If you're using the cloud-hosted version at [agpt.co](https://agpt.co), you're ready to go! AI blocks come with **built-in credits** — no API keys required to get started. If you'd prefer to use your own API keys, you can add them via **Profile → Integrations**. + +### **Self-Hosted (Docker)** +If you're running AutoGPT locally with Docker, you'll need to add your own API keys to `autogpt_platform/backend/.env`: + +```bash +# Create or edit backend/.env +OPENAI_API_KEY=sk-your-key-here +ANTHROPIC_API_KEY=sk-ant-your-key-here +# Add other provider keys as needed +``` + +After adding keys, restart the services: +```bash +docker compose down && docker compose up -d +``` + +**Note:** The Calculator example below doesn't require any API credentials — it's a good way to test your setup before adding AI blocks. + ## **Example Agent: Q&A (with AI)** A step-by-step guide to creating a simple Q&A agent using input and output blocks. From 7e37de8e30529bd9b8c2ee3e3e74bf689ff13783 Mon Sep 17 00:00:00 2001 From: Otto Date: Sat, 31 Jan 2026 19:17:36 +0000 Subject: [PATCH 5/6] fix: Include graph schemas for marketplace agents in Agent Generator (#11920) ## Problem When marketplace agents are included in the `library_agents` payload sent to the Agent Generator service, they were missing required fields (`graph_id`, `graph_version`, `input_schema`, `output_schema`). This caused Pydantic validation to fail with HTTP 422 Unprocessable Entity. **Root cause:** The `MarketplaceAgentSummary` TypedDict had a different shape than `LibraryAgentInfo` expected by the Agent Generator: - Agent Generator expects: `graph_id`, `graph_version`, `name`, `description`, `input_schema`, `output_schema` - MarketplaceAgentSummary had: `name`, `description`, `sub_heading`, `creator`, `is_marketplace_agent` ## Solution 1. **Add `agent_graph_id` to `StoreAgent` model** - The field was already in the database view but not exposed 2. **Include `agentGraphId` in hybrid search SQL query** - Carry the field through the search CTEs 3. **Update `search_marketplace_agents_for_generation()`** - Now fetches full graph schemas using `get_graph()` and returns `LibraryAgentSummary` (same type as library agents) 4. **Update deduplication logic** - Use `graph_id` instead of name for more accurate deduplication ## Changes - `backend/api/features/store/model.py`: Add optional `agent_graph_id` field to `StoreAgent` - `backend/api/features/store/hybrid_search.py`: Include `agentGraphId` in SQL query columns - `backend/api/features/store/db.py`: Map `agentGraphId` when creating `StoreAgent` objects - `backend/api/features/chat/tools/agent_generator/core.py`: Update `search_marketplace_agents_for_generation()` to fetch and include full graph schemas ## Testing - [ ] Agent creation on dev with marketplace agents in context - [ ] Verify no 422 errors from Agent Generator - [ ] Verify marketplace agents can be used as sub-agents Fixes: SECRT-1817 --------- Co-authored-by: majdyz Co-authored-by: Zamil Majdy --- .../chat/tools/agent_generator/core.py | 59 +++++++++++-------- .../backend/backend/api/features/store/db.py | 2 + .../api/features/store/hybrid_search.py | 2 + .../backend/api/features/store/model.py | 1 + .../backend/api/features/store/model_test.py | 3 + .../backend/api/features/store/routes_test.py | 6 ++ .../api/features/store/test_cache_delete.py | 1 + .../backend/backend/data/graph.py | 33 +++++++++++ .../backend/snapshots/agts_by_creator | 3 +- .../backend/snapshots/agts_category | 3 +- .../backend/snapshots/agts_pagination | 15 +++-- .../backend/snapshots/agts_search | 3 +- .../backend/snapshots/agts_sorted | 3 +- autogpt_platform/backend/snapshots/feat_agts | 3 +- .../agent_generator/test_library_agents.py | 58 +++++++++++------- .../frontend/src/app/api/openapi.json | 6 +- 16 files changed, 143 insertions(+), 58 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py index 466f6438a3..0ddd2aa86b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py @@ -14,6 +14,7 @@ from backend.data.graph import ( create_graph, get_graph, get_graph_all_versions, + get_store_listed_graphs, ) from backend.util.exceptions import DatabaseError, NotFoundError @@ -266,18 +267,18 @@ async def get_library_agents_for_generation( async def search_marketplace_agents_for_generation( search_query: str, max_results: int = 10, -) -> list[MarketplaceAgentSummary]: +) -> list[LibraryAgentSummary]: """Search marketplace agents formatted for Agent Generator. - Note: This returns basic agent info. Full input/output schemas would require - additional graph fetches and is a potential future enhancement. + Fetches marketplace agents and their full schemas so they can be used + as sub-agents in generated workflows. Args: search_query: Search term to find relevant public agents max_results: Maximum number of agents to return (default 10) Returns: - List of MarketplaceAgentSummary (without detailed schemas for now) + List of LibraryAgentSummary with full input/output schemas """ try: response = await store_db.get_store_agents( @@ -286,17 +287,31 @@ async def search_marketplace_agents_for_generation( page_size=max_results, ) - results: list[MarketplaceAgentSummary] = [] - for agent in response.agents: - results.append( - MarketplaceAgentSummary( - name=agent.agent_name, - description=agent.description, - sub_heading=agent.sub_heading, - creator=agent.creator, - is_marketplace_agent=True, + agents_with_graphs = [ + agent for agent in response.agents if agent.agent_graph_id + ] + + if not agents_with_graphs: + return [] + + graph_ids = [agent.agent_graph_id for agent in agents_with_graphs] + graphs = await get_store_listed_graphs(*graph_ids) + + results: list[LibraryAgentSummary] = [] + for agent in agents_with_graphs: + graph_id = agent.agent_graph_id + if graph_id and graph_id in graphs: + graph = graphs[graph_id] + results.append( + LibraryAgentSummary( + graph_id=graph.id, + graph_version=graph.version, + name=agent.agent_name, + description=agent.description, + input_schema=graph.input_schema, + output_schema=graph.output_schema, + ) ) - ) return results except Exception as e: logger.warning(f"Failed to search marketplace agents: {e}") @@ -327,8 +342,7 @@ async def get_all_relevant_agents_for_generation( max_marketplace_results: Max marketplace agents to return (default 10) Returns: - List of AgentSummary, library agents first (with full schemas), - then marketplace agents (basic info only) + List of AgentSummary with full schemas (both library and marketplace agents) """ agents: list[AgentSummary] = [] seen_graph_ids: set[str] = set() @@ -365,16 +379,11 @@ async def get_all_relevant_agents_for_generation( search_query=search_query, max_results=max_marketplace_results, ) - library_names: set[str] = set() - for a in agents: - name = a.get("name") - if name and isinstance(name, str): - library_names.add(name.lower()) for agent in marketplace_agents: - agent_name = agent.get("name") - if agent_name and isinstance(agent_name, str): - if agent_name.lower() not in library_names: - agents.append(agent) + graph_id = agent.get("graph_id") + if graph_id and graph_id not in seen_graph_ids: + agents.append(agent) + seen_graph_ids.add(graph_id) return agents diff --git a/autogpt_platform/backend/backend/api/features/store/db.py b/autogpt_platform/backend/backend/api/features/store/db.py index 956fdfa7da..850a2bc3e9 100644 --- a/autogpt_platform/backend/backend/api/features/store/db.py +++ b/autogpt_platform/backend/backend/api/features/store/db.py @@ -112,6 +112,7 @@ async def get_store_agents( description=agent["description"], runs=agent["runs"], rating=agent["rating"], + agent_graph_id=agent.get("agentGraphId", ""), ) store_agents.append(store_agent) except Exception as e: @@ -170,6 +171,7 @@ async def get_store_agents( description=agent.description, runs=agent.runs, rating=agent.rating, + agent_graph_id=agent.agentGraphId, ) # Add to the list only if creation was successful store_agents.append(store_agent) diff --git a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py index 8b0884bb24..e1b8f402c8 100644 --- a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py +++ b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py @@ -600,6 +600,7 @@ async def hybrid_search( sa.featured, sa.is_available, sa.updated_at, + sa."agentGraphId", -- Searchable text for BM25 reranking COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text, -- Semantic score @@ -659,6 +660,7 @@ async def hybrid_search( featured, is_available, updated_at, + "agentGraphId", searchable_text, semantic_score, lexical_score, diff --git a/autogpt_platform/backend/backend/api/features/store/model.py b/autogpt_platform/backend/backend/api/features/store/model.py index a3310b96fc..d66b91807d 100644 --- a/autogpt_platform/backend/backend/api/features/store/model.py +++ b/autogpt_platform/backend/backend/api/features/store/model.py @@ -38,6 +38,7 @@ class StoreAgent(pydantic.BaseModel): description: str runs: int rating: float + agent_graph_id: str class StoreAgentsResponse(pydantic.BaseModel): diff --git a/autogpt_platform/backend/backend/api/features/store/model_test.py b/autogpt_platform/backend/backend/api/features/store/model_test.py index fd09a0cf77..c4109f4603 100644 --- a/autogpt_platform/backend/backend/api/features/store/model_test.py +++ b/autogpt_platform/backend/backend/api/features/store/model_test.py @@ -26,11 +26,13 @@ def test_store_agent(): description="Test description", runs=50, rating=4.5, + agent_graph_id="test-graph-id", ) assert agent.slug == "test-agent" assert agent.agent_name == "Test Agent" assert agent.runs == 50 assert agent.rating == 4.5 + assert agent.agent_graph_id == "test-graph-id" def test_store_agents_response(): @@ -46,6 +48,7 @@ def test_store_agents_response(): description="Test description", runs=50, rating=4.5, + agent_graph_id="test-graph-id", ) ], pagination=store_model.Pagination( diff --git a/autogpt_platform/backend/backend/api/features/store/routes_test.py b/autogpt_platform/backend/backend/api/features/store/routes_test.py index 36431c20ec..fcef3f845a 100644 --- a/autogpt_platform/backend/backend/api/features/store/routes_test.py +++ b/autogpt_platform/backend/backend/api/features/store/routes_test.py @@ -82,6 +82,7 @@ def test_get_agents_featured( description="Featured agent description", runs=100, rating=4.5, + agent_graph_id="test-graph-1", ) ], pagination=store_model.Pagination( @@ -127,6 +128,7 @@ def test_get_agents_by_creator( description="Creator agent description", runs=50, rating=4.0, + agent_graph_id="test-graph-2", ) ], pagination=store_model.Pagination( @@ -172,6 +174,7 @@ def test_get_agents_sorted( description="Top agent description", runs=1000, rating=5.0, + agent_graph_id="test-graph-3", ) ], pagination=store_model.Pagination( @@ -217,6 +220,7 @@ def test_get_agents_search( description="Specific search term description", runs=75, rating=4.2, + agent_graph_id="test-graph-search", ) ], pagination=store_model.Pagination( @@ -262,6 +266,7 @@ def test_get_agents_category( description="Category agent description", runs=60, rating=4.1, + agent_graph_id="test-graph-category", ) ], pagination=store_model.Pagination( @@ -306,6 +311,7 @@ def test_get_agents_pagination( description=f"Agent {i} description", runs=i * 10, rating=4.0, + agent_graph_id="test-graph-2", ) for i in range(5) ], diff --git a/autogpt_platform/backend/backend/api/features/store/test_cache_delete.py b/autogpt_platform/backend/backend/api/features/store/test_cache_delete.py index dd9be1f4ab..298c51d47c 100644 --- a/autogpt_platform/backend/backend/api/features/store/test_cache_delete.py +++ b/autogpt_platform/backend/backend/api/features/store/test_cache_delete.py @@ -33,6 +33,7 @@ class TestCacheDeletion: description="Test description", runs=100, rating=4.5, + agent_graph_id="test-graph-id", ) ], pagination=Pagination( diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index c1f38f81d5..ee6cd2e4b0 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -1028,6 +1028,39 @@ async def get_graph( return GraphModel.from_db(graph, for_export) +async def get_store_listed_graphs(*graph_ids: str) -> dict[str, GraphModel]: + """Batch-fetch multiple store-listed graphs by their IDs. + + Only returns graphs that have approved store listings (publicly available). + Does not require permission checks since store-listed graphs are public. + + Args: + *graph_ids: Variable number of graph IDs to fetch + + Returns: + Dict mapping graph_id to GraphModel for graphs with approved store listings + """ + if not graph_ids: + return {} + + store_listings = await StoreListingVersion.prisma().find_many( + where={ + "agentGraphId": {"in": list(graph_ids)}, + "submissionStatus": SubmissionStatus.APPROVED, + "isDeleted": False, + }, + include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}}, + distinct=["agentGraphId"], + order={"agentGraphVersion": "desc"}, + ) + + return { + listing.agentGraphId: GraphModel.from_db(listing.AgentGraph) + for listing in store_listings + if listing.AgentGraph + } + + async def get_graph_as_admin( graph_id: str, version: int | None = None, diff --git a/autogpt_platform/backend/snapshots/agts_by_creator b/autogpt_platform/backend/snapshots/agts_by_creator index 4d6dd12920..3f2e128a0d 100644 --- a/autogpt_platform/backend/snapshots/agts_by_creator +++ b/autogpt_platform/backend/snapshots/agts_by_creator @@ -9,7 +9,8 @@ "sub_heading": "Creator agent subheading", "description": "Creator agent description", "runs": 50, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" } ], "pagination": { diff --git a/autogpt_platform/backend/snapshots/agts_category b/autogpt_platform/backend/snapshots/agts_category index f65925ead3..4d0531763c 100644 --- a/autogpt_platform/backend/snapshots/agts_category +++ b/autogpt_platform/backend/snapshots/agts_category @@ -9,7 +9,8 @@ "sub_heading": "Category agent subheading", "description": "Category agent description", "runs": 60, - "rating": 4.1 + "rating": 4.1, + "agent_graph_id": "test-graph-category" } ], "pagination": { diff --git a/autogpt_platform/backend/snapshots/agts_pagination b/autogpt_platform/backend/snapshots/agts_pagination index 82e7f5f9bf..7b946157fb 100644 --- a/autogpt_platform/backend/snapshots/agts_pagination +++ b/autogpt_platform/backend/snapshots/agts_pagination @@ -9,7 +9,8 @@ "sub_heading": "Agent 0 subheading", "description": "Agent 0 description", "runs": 0, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" }, { "slug": "agent-1", @@ -20,7 +21,8 @@ "sub_heading": "Agent 1 subheading", "description": "Agent 1 description", "runs": 10, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" }, { "slug": "agent-2", @@ -31,7 +33,8 @@ "sub_heading": "Agent 2 subheading", "description": "Agent 2 description", "runs": 20, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" }, { "slug": "agent-3", @@ -42,7 +45,8 @@ "sub_heading": "Agent 3 subheading", "description": "Agent 3 description", "runs": 30, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" }, { "slug": "agent-4", @@ -53,7 +57,8 @@ "sub_heading": "Agent 4 subheading", "description": "Agent 4 description", "runs": 40, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" } ], "pagination": { diff --git a/autogpt_platform/backend/snapshots/agts_search b/autogpt_platform/backend/snapshots/agts_search index ca3f504584..ae9cc116bc 100644 --- a/autogpt_platform/backend/snapshots/agts_search +++ b/autogpt_platform/backend/snapshots/agts_search @@ -9,7 +9,8 @@ "sub_heading": "Search agent subheading", "description": "Specific search term description", "runs": 75, - "rating": 4.2 + "rating": 4.2, + "agent_graph_id": "test-graph-search" } ], "pagination": { diff --git a/autogpt_platform/backend/snapshots/agts_sorted b/autogpt_platform/backend/snapshots/agts_sorted index cddead76a5..b182256b2c 100644 --- a/autogpt_platform/backend/snapshots/agts_sorted +++ b/autogpt_platform/backend/snapshots/agts_sorted @@ -9,7 +9,8 @@ "sub_heading": "Top agent subheading", "description": "Top agent description", "runs": 1000, - "rating": 5.0 + "rating": 5.0, + "agent_graph_id": "test-graph-3" } ], "pagination": { diff --git a/autogpt_platform/backend/snapshots/feat_agts b/autogpt_platform/backend/snapshots/feat_agts index d57996a768..4f85786434 100644 --- a/autogpt_platform/backend/snapshots/feat_agts +++ b/autogpt_platform/backend/snapshots/feat_agts @@ -9,7 +9,8 @@ "sub_heading": "Featured agent subheading", "description": "Featured agent description", "runs": 100, - "rating": 4.5 + "rating": 4.5, + "agent_graph_id": "test-graph-1" } ], "pagination": { diff --git a/autogpt_platform/backend/test/agent_generator/test_library_agents.py b/autogpt_platform/backend/test/agent_generator/test_library_agents.py index e62b0746e7..8387339582 100644 --- a/autogpt_platform/backend/test/agent_generator/test_library_agents.py +++ b/autogpt_platform/backend/test/agent_generator/test_library_agents.py @@ -134,15 +134,28 @@ class TestSearchMarketplaceAgentsForGeneration: description="A public agent", sub_heading="Does something useful", creator="creator-1", + agent_graph_id="graph-123", ) ] - # The store_db is dynamically imported, so patch the import path - with patch( - "backend.api.features.store.db.get_store_agents", - new_callable=AsyncMock, - return_value=mock_response, - ) as mock_search: + mock_graph = MagicMock() + mock_graph.id = "graph-123" + mock_graph.version = 1 + mock_graph.input_schema = {"type": "object"} + mock_graph.output_schema = {"type": "object"} + + with ( + patch( + "backend.api.features.store.db.get_store_agents", + new_callable=AsyncMock, + return_value=mock_response, + ) as mock_search, + patch( + "backend.api.features.chat.tools.agent_generator.core.get_store_listed_graphs", + new_callable=AsyncMock, + return_value={"graph-123": mock_graph}, + ), + ): result = await core.search_marketplace_agents_for_generation( search_query="automation", max_results=10, @@ -156,7 +169,7 @@ class TestSearchMarketplaceAgentsForGeneration: assert len(result) == 1 assert result[0]["name"] == "Public Agent" - assert result[0]["is_marketplace_agent"] is True + assert result[0]["graph_id"] == "graph-123" @pytest.mark.asyncio async def test_handles_marketplace_error_gracefully(self): @@ -193,11 +206,12 @@ class TestGetAllRelevantAgentsForGeneration: marketplace_agents = [ { + "graph_id": "market-456", + "graph_version": 1, "name": "Market Agent", "description": "From marketplace", - "sub_heading": "Sub heading", - "creator": "creator-1", - "is_marketplace_agent": True, + "input_schema": {}, + "output_schema": {}, } ] @@ -225,11 +239,11 @@ class TestGetAllRelevantAgentsForGeneration: assert result[1]["name"] == "Market Agent" @pytest.mark.asyncio - async def test_deduplicates_by_name(self): - """Test that marketplace agents with same name as library are excluded.""" + async def test_deduplicates_by_graph_id(self): + """Test that marketplace agents with same graph_id as library are excluded.""" library_agents = [ { - "graph_id": "lib-123", + "graph_id": "shared-123", "graph_version": 1, "name": "Shared Agent", "description": "From library", @@ -240,18 +254,20 @@ class TestGetAllRelevantAgentsForGeneration: marketplace_agents = [ { - "name": "Shared Agent", # Same name, should be deduplicated + "graph_id": "shared-123", # Same graph_id, should be deduplicated + "graph_version": 1, + "name": "Shared Agent", "description": "From marketplace", - "sub_heading": "Sub heading", - "creator": "creator-1", - "is_marketplace_agent": True, + "input_schema": {}, + "output_schema": {}, }, { + "graph_id": "unique-456", + "graph_version": 1, "name": "Unique Agent", "description": "Only in marketplace", - "sub_heading": "Sub heading", - "creator": "creator-2", - "is_marketplace_agent": True, + "input_schema": {}, + "output_schema": {}, }, ] @@ -273,7 +289,7 @@ class TestGetAllRelevantAgentsForGeneration: include_marketplace=True, ) - # Shared Agent from marketplace should be excluded + # Shared Agent from marketplace should be excluded by graph_id assert len(result) == 2 names = [a["name"] for a in result] assert "Shared Agent" in names diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index a44ceb8388..aa4c49b1a2 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -9833,7 +9833,8 @@ "sub_heading": { "type": "string", "title": "Sub Heading" }, "description": { "type": "string", "title": "Description" }, "runs": { "type": "integer", "title": "Runs" }, - "rating": { "type": "number", "title": "Rating" } + "rating": { "type": "number", "title": "Rating" }, + "agent_graph_id": { "type": "string", "title": "Agent Graph Id" } }, "type": "object", "required": [ @@ -9845,7 +9846,8 @@ "sub_heading", "description", "runs", - "rating" + "rating", + "agent_graph_id" ], "title": "StoreAgent" }, From 1081590384c5355cc7af95f7f3936b25f16aa4aa Mon Sep 17 00:00:00 2001 From: "Guofang.Tang" Date: Mon, 2 Feb 2026 04:29:15 +0800 Subject: [PATCH 6/6] feat(backend): cover webhook ingress URL route (#11747) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ - Add a unit test to verify webhook ingress URL generation matches the FastAPI route. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] poetry run pytest backend/integrations/webhooks/utils_test.py --confcutdir=backend/integrations/webhooks #### For configuration changes: - [x] .env.default is updated or already compatible with my changes - [x] docker-compose.yml is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under Changes) ## Summary by CodeRabbit * **Tests** * Added a unit test that validates webhook ingress URL generation matches the application's resolved route (scheme, host, and path) for provider-specific webhook endpoints, improving confidence in routing behavior and helping prevent regressions. ✏️ Tip: You can customize this high-level summary in your review settings. --------- Co-authored-by: Reinier van der Leer --- .../integrations/webhooks/utils_test.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 autogpt_platform/backend/backend/integrations/webhooks/utils_test.py diff --git a/autogpt_platform/backend/backend/integrations/webhooks/utils_test.py b/autogpt_platform/backend/backend/integrations/webhooks/utils_test.py new file mode 100644 index 0000000000..bc502a8e44 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/webhooks/utils_test.py @@ -0,0 +1,39 @@ +from urllib.parse import urlparse + +import fastapi +from fastapi.routing import APIRoute + +from backend.api.features.integrations.router import router as integrations_router +from backend.integrations.providers import ProviderName +from backend.integrations.webhooks import utils as webhooks_utils + + +def test_webhook_ingress_url_matches_route(monkeypatch) -> None: + app = fastapi.FastAPI() + app.include_router(integrations_router, prefix="/api/integrations") + + provider = ProviderName.GITHUB + webhook_id = "webhook_123" + base_url = "https://example.com" + + monkeypatch.setattr(webhooks_utils.app_config, "platform_base_url", base_url) + + route = next( + route + for route in integrations_router.routes + if isinstance(route, APIRoute) + and route.path == "/{provider}/webhooks/{webhook_id}/ingress" + and "POST" in route.methods + ) + expected_path = f"/api/integrations{route.path}".format( + provider=provider.value, + webhook_id=webhook_id, + ) + actual_url = urlparse(webhooks_utils.webhook_ingress_url(provider, webhook_id)) + expected_base = urlparse(base_url) + + assert (actual_url.scheme, actual_url.netloc) == ( + expected_base.scheme, + expected_base.netloc, + ) + assert actual_url.path == expected_path