diff --git a/autogpt_platform/backend/.gitignore b/autogpt_platform/backend/.gitignore index 9224c07d9e..6e688311a6 100644 --- a/autogpt_platform/backend/.gitignore +++ b/autogpt_platform/backend/.gitignore @@ -19,3 +19,6 @@ load-tests/*.json load-tests/*.log load-tests/node_modules/* migrations/*/rollback*.sql + +# Workspace files +workspaces/ diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 218575085b..06da6bdf2b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -33,7 +33,7 @@ from backend.data.understanding import ( get_business_understanding, ) from backend.util.exceptions import NotFoundError -from backend.util.settings import Settings +from backend.util.settings import AppEnvironment, Settings from . import db as chat_db from . import stream_registry @@ -222,8 +222,18 @@ async def _get_system_prompt_template(context: str) -> str: try: # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt # Use asyncio.to_thread to avoid blocking the event loop + # In non-production environments, fetch the latest prompt version + # instead of the production-labeled version for easier testing + label = ( + None + if settings.config.app_env == AppEnvironment.PRODUCTION + else "latest" + ) prompt = await asyncio.to_thread( - langfuse.get_prompt, config.langfuse_prompt_name, cache_ttl_seconds=0 + langfuse.get_prompt, + config.langfuse_prompt_name, + label=label, + cache_ttl_seconds=0, ) return prompt.compile(users_information=context) except Exception as e: @@ -618,6 +628,9 @@ async def stream_chat_completion( total_tokens=chunk.totalTokens, ) ) + elif isinstance(chunk, StreamHeartbeat): + # Pass through heartbeat to keep SSE connection alive + yield chunk else: logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py index b88b9b2924..f83ca30b5c 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py @@ -7,15 +7,7 @@ from typing import Any, NotRequired, TypedDict from backend.api.features.library import db as library_db from backend.api.features.store import db as store_db -from backend.data.graph import ( - Graph, - Link, - Node, - create_graph, - get_graph, - get_graph_all_versions, - get_store_listed_graphs, -) +from backend.data.graph import Graph, Link, Node, get_graph, get_store_listed_graphs from backend.util.exceptions import DatabaseError, NotFoundError from .service import ( @@ -28,8 +20,6 @@ from .service import ( logger = logging.getLogger(__name__) -AGENT_EXECUTOR_BLOCK_ID = "e189baac-8c20-45a1-94a7-55177ea42565" - class ExecutionSummary(TypedDict): """Summary of a single execution for quality assessment.""" @@ -669,45 +659,6 @@ def json_to_graph(agent_json: dict[str, Any]) -> Graph: ) -def _reassign_node_ids(graph: Graph) -> None: - """Reassign all node and link IDs to new UUIDs. - - This is needed when creating a new version to avoid unique constraint violations. - """ - id_map = {node.id: str(uuid.uuid4()) for node in graph.nodes} - - for node in graph.nodes: - node.id = id_map[node.id] - - for link in graph.links: - link.id = str(uuid.uuid4()) - if link.source_id in id_map: - link.source_id = id_map[link.source_id] - if link.sink_id in id_map: - link.sink_id = id_map[link.sink_id] - - -def _populate_agent_executor_user_ids(agent_json: dict[str, Any], user_id: str) -> None: - """Populate user_id in AgentExecutorBlock nodes. - - The external agent generator creates AgentExecutorBlock nodes with empty user_id. - This function fills in the actual user_id so sub-agents run with correct permissions. - - Args: - agent_json: Agent JSON dict (modified in place) - user_id: User ID to set - """ - for node in agent_json.get("nodes", []): - if node.get("block_id") == AGENT_EXECUTOR_BLOCK_ID: - input_default = node.get("input_default") or {} - if not input_default.get("user_id"): - input_default["user_id"] = user_id - node["input_default"] = input_default - logger.debug( - f"Set user_id for AgentExecutorBlock node {node.get('id')}" - ) - - async def save_agent_to_library( agent_json: dict[str, Any], user_id: str, is_update: bool = False ) -> tuple[Graph, Any]: @@ -721,35 +672,10 @@ async def save_agent_to_library( Returns: Tuple of (created Graph, LibraryAgent) """ - # Populate user_id in AgentExecutorBlock nodes before conversion - _populate_agent_executor_user_ids(agent_json, user_id) - graph = json_to_graph(agent_json) - if is_update: - if graph.id: - existing_versions = await get_graph_all_versions(graph.id, user_id) - if existing_versions: - latest_version = max(v.version for v in existing_versions) - graph.version = latest_version + 1 - _reassign_node_ids(graph) - logger.info(f"Updating agent {graph.id} to version {graph.version}") - else: - graph.id = str(uuid.uuid4()) - graph.version = 1 - _reassign_node_ids(graph) - logger.info(f"Creating new agent with ID {graph.id}") - - created_graph = await create_graph(graph, user_id) - - library_agents = await library_db.create_library_agent( - graph=created_graph, - user_id=user_id, - sensitive_action_safe_mode=True, - create_library_agents_for_sub_graphs=False, - ) - - return created_graph, library_agents[0] + return await library_db.update_graph_in_library(graph, user_id) + return await library_db.create_graph_in_library(graph, user_id) def graph_to_json(graph: Graph) -> dict[str, Any]: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py index 62d59c470e..61cdba1ef9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py @@ -206,9 +206,9 @@ async def search_agents( ] ) no_results_msg = ( - f"No agents found matching '{query}'. Try different keywords or browse the marketplace." + f"No agents found matching '{query}'. Let the user know they can try different keywords or browse the marketplace. Also let them know you can create a custom agent for them based on their needs." if source == "marketplace" - else f"No agents matching '{query}' found in your library." + else f"No agents matching '{query}' found in your library. Let the user know you can create a custom agent for them based on their needs." ) return NoResultsResponse( message=no_results_msg, session_id=session_id, suggestions=suggestions @@ -224,10 +224,10 @@ async def search_agents( message = ( "Now you have found some options for the user to choose from. " "You can add a link to a recommended agent at: /marketplace/agent/agent_id " - "Please ask the user if they would like to use any of these agents." + "Please ask the user if they would like to use any of these agents. Let the user know we can create a custom agent for them based on their needs." if source == "marketplace" else "Found agents in the user's library. You can provide a link to view an agent at: " - "/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute." + "/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute. Let the user know we can create a custom agent for them based on their needs." ) return AgentsFoundResponse( diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py index 0046d0b249..bd25594b8a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py @@ -8,7 +8,12 @@ from backend.api.features.library import model as library_model from backend.api.features.store import db as store_db from backend.data import graph as graph_db from backend.data.graph import GraphModel -from backend.data.model import Credentials, CredentialsFieldInfo, CredentialsMetaInput +from backend.data.model import ( + CredentialsFieldInfo, + CredentialsMetaInput, + HostScopedCredentials, + OAuth2Credentials, +) from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util.exceptions import NotFoundError @@ -273,7 +278,14 @@ async def match_user_credentials_to_graph( for cred in available_creds if cred.provider in credential_requirements.provider and cred.type in credential_requirements.supported_types - and _credential_has_required_scopes(cred, credential_requirements) + and ( + cred.type != "oauth2" + or _credential_has_required_scopes(cred, credential_requirements) + ) + and ( + cred.type != "host_scoped" + or _credential_is_for_host(cred, credential_requirements) + ) ), None, ) @@ -318,19 +330,10 @@ async def match_user_credentials_to_graph( def _credential_has_required_scopes( - credential: Credentials, + credential: OAuth2Credentials, requirements: CredentialsFieldInfo, ) -> bool: - """ - Check if a credential has all the scopes required by the block. - - For OAuth2 credentials, verifies that the credential's scopes are a superset - of the required scopes. For other credential types, returns True (no scope check). - """ - # Only OAuth2 credentials have scopes to check - if credential.type != "oauth2": - return True - + """Check if an OAuth2 credential has all the scopes required by the input.""" # If no scopes are required, any credential matches if not requirements.required_scopes: return True @@ -339,6 +342,22 @@ def _credential_has_required_scopes( return set(credential.scopes).issuperset(requirements.required_scopes) +def _credential_is_for_host( + credential: HostScopedCredentials, + requirements: CredentialsFieldInfo, +) -> bool: + """Check if a host-scoped credential matches the host required by the input.""" + # We need to know the host to match host-scoped credentials to. + # Graph.aggregate_credentials_inputs() adds the node's set URL value (if any) + # to discriminator_values. No discriminator_values -> no host to match against. + if not requirements.discriminator_values: + return True + + # Check that credential host matches required host. + # Host-scoped credential inputs are grouped by host, so any item from the set works. + return credential.matches_url(list(requirements.discriminator_values)[0]) + + async def check_user_has_required_credentials( user_id: str, required_credentials: list[CredentialsMetaInput], diff --git a/autogpt_platform/backend/backend/api/features/library/db.py b/autogpt_platform/backend/backend/api/features/library/db.py index 394f959953..6bebfb573c 100644 --- a/autogpt_platform/backend/backend/api/features/library/db.py +++ b/autogpt_platform/backend/backend/api/features/library/db.py @@ -19,7 +19,10 @@ from backend.data.graph import GraphSettings from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include from backend.data.model import CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager -from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate +from backend.integrations.webhooks.graph_lifecycle_hooks import ( + on_graph_activate, + on_graph_deactivate, +) from backend.util.clients import get_scheduler_client from backend.util.exceptions import DatabaseError, InvalidInputError, NotFoundError from backend.util.json import SafeJson @@ -537,6 +540,92 @@ async def update_agent_version_in_library( return library_model.LibraryAgent.from_db(lib) +async def create_graph_in_library( + graph: graph_db.Graph, + user_id: str, +) -> tuple[graph_db.GraphModel, library_model.LibraryAgent]: + """Create a new graph and add it to the user's library.""" + graph.version = 1 + graph_model = graph_db.make_graph_model(graph, user_id) + graph_model.reassign_ids(user_id=user_id, reassign_graph_id=True) + + created_graph = await graph_db.create_graph(graph_model, user_id) + + library_agents = await create_library_agent( + graph=created_graph, + user_id=user_id, + sensitive_action_safe_mode=True, + create_library_agents_for_sub_graphs=False, + ) + + if created_graph.is_active: + created_graph = await on_graph_activate(created_graph, user_id=user_id) + + return created_graph, library_agents[0] + + +async def update_graph_in_library( + graph: graph_db.Graph, + user_id: str, +) -> tuple[graph_db.GraphModel, library_model.LibraryAgent]: + """Create a new version of an existing graph and update the library entry.""" + existing_versions = await graph_db.get_graph_all_versions(graph.id, user_id) + current_active_version = ( + next((v for v in existing_versions if v.is_active), None) + if existing_versions + else None + ) + graph.version = ( + max(v.version for v in existing_versions) + 1 if existing_versions else 1 + ) + + graph_model = graph_db.make_graph_model(graph, user_id) + graph_model.reassign_ids(user_id=user_id, reassign_graph_id=False) + + created_graph = await graph_db.create_graph(graph_model, user_id) + + library_agent = await get_library_agent_by_graph_id(user_id, created_graph.id) + if not library_agent: + raise NotFoundError(f"Library agent not found for graph {created_graph.id}") + + library_agent = await update_library_agent_version_and_settings( + user_id, created_graph + ) + + if created_graph.is_active: + created_graph = await on_graph_activate(created_graph, user_id=user_id) + await graph_db.set_graph_active_version( + graph_id=created_graph.id, + version=created_graph.version, + user_id=user_id, + ) + if current_active_version: + await on_graph_deactivate(current_active_version, user_id=user_id) + + return created_graph, library_agent + + +async def update_library_agent_version_and_settings( + user_id: str, agent_graph: graph_db.GraphModel +) -> library_model.LibraryAgent: + """Update library agent to point to new graph version and sync settings.""" + library = await update_agent_version_in_library( + user_id, agent_graph.id, agent_graph.version + ) + updated_settings = GraphSettings.from_graph( + graph=agent_graph, + hitl_safe_mode=library.settings.human_in_the_loop_safe_mode, + sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode, + ) + if updated_settings != library.settings: + library = await update_library_agent( + library_agent_id=library.id, + user_id=user_id, + settings=updated_settings, + ) + return library + + async def update_library_agent( library_agent_id: str, user_id: str, diff --git a/autogpt_platform/backend/backend/api/features/v1.py b/autogpt_platform/backend/backend/api/features/v1.py index 09d3759a65..a8610702cc 100644 --- a/autogpt_platform/backend/backend/api/features/v1.py +++ b/autogpt_platform/backend/backend/api/features/v1.py @@ -101,7 +101,6 @@ from backend.util.timezone_utils import ( from backend.util.virus_scanner import scan_content_safe from .library import db as library_db -from .library import model as library_model from .store.model import StoreAgentDetails @@ -823,18 +822,16 @@ async def update_graph( graph: graph_db.Graph, user_id: Annotated[str, Security(get_user_id)], ) -> graph_db.GraphModel: - # Sanity check if graph.id and graph.id != graph_id: raise HTTPException(400, detail="Graph ID does not match ID in URI") - # Determine new version existing_versions = await graph_db.get_graph_all_versions(graph_id, user_id=user_id) if not existing_versions: raise HTTPException(404, detail=f"Graph #{graph_id} not found") - latest_version_number = max(g.version for g in existing_versions) - graph.version = latest_version_number + 1 + graph.version = max(g.version for g in existing_versions) + 1 current_active_version = next((v for v in existing_versions if v.is_active), None) + graph = graph_db.make_graph_model(graph, user_id) graph.reassign_ids(user_id=user_id, reassign_graph_id=False) graph.validate_graph(for_run=False) @@ -842,27 +839,23 @@ async def update_graph( new_graph_version = await graph_db.create_graph(graph, user_id=user_id) if new_graph_version.is_active: - # Keep the library agent up to date with the new active version - await _update_library_agent_version_and_settings(user_id, new_graph_version) - - # Handle activation of the new graph first to ensure continuity + await library_db.update_library_agent_version_and_settings( + user_id, new_graph_version + ) new_graph_version = await on_graph_activate(new_graph_version, user_id=user_id) - # Ensure new version is the only active version await graph_db.set_graph_active_version( graph_id=graph_id, version=new_graph_version.version, user_id=user_id ) if current_active_version: - # Handle deactivation of the previously active version await on_graph_deactivate(current_active_version, user_id=user_id) - # Fetch new graph version *with sub-graphs* (needed for credentials input schema) new_graph_version_with_subgraphs = await graph_db.get_graph( graph_id, new_graph_version.version, user_id=user_id, include_subgraphs=True, ) - assert new_graph_version_with_subgraphs # make type checker happy + assert new_graph_version_with_subgraphs return new_graph_version_with_subgraphs @@ -900,33 +893,15 @@ async def set_graph_active_version( ) # Keep the library agent up to date with the new active version - await _update_library_agent_version_and_settings(user_id, new_active_graph) + await library_db.update_library_agent_version_and_settings( + user_id, new_active_graph + ) if current_active_graph and current_active_graph.version != new_active_version: # Handle deactivation of the previously active version await on_graph_deactivate(current_active_graph, user_id=user_id) -async def _update_library_agent_version_and_settings( - user_id: str, agent_graph: graph_db.GraphModel -) -> library_model.LibraryAgent: - library = await library_db.update_agent_version_in_library( - user_id, agent_graph.id, agent_graph.version - ) - updated_settings = GraphSettings.from_graph( - graph=agent_graph, - hitl_safe_mode=library.settings.human_in_the_loop_safe_mode, - sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode, - ) - if updated_settings != library.settings: - library = await library_db.update_library_agent( - library_agent_id=library.id, - user_id=user_id, - settings=updated_settings, - ) - return library - - @v1_router.patch( path="/graphs/{graph_id}/settings", summary="Update graph settings", diff --git a/autogpt_platform/backend/backend/blocks/linear/_api.py b/autogpt_platform/backend/backend/blocks/linear/_api.py index 477b8a209c..ea609d515a 100644 --- a/autogpt_platform/backend/backend/blocks/linear/_api.py +++ b/autogpt_platform/backend/backend/blocks/linear/_api.py @@ -162,8 +162,16 @@ class LinearClient: "searchTerm": team_name, } - team_id = await self.query(query, variables) - return team_id["teams"]["nodes"][0]["id"] + result = await self.query(query, variables) + nodes = result["teams"]["nodes"] + + if not nodes: + raise LinearAPIException( + f"Team '{team_name}' not found. Check the team name or key and try again.", + status_code=404, + ) + + return nodes[0]["id"] except LinearAPIException as e: raise e @@ -240,17 +248,44 @@ class LinearClient: except LinearAPIException as e: raise e - async def try_search_issues(self, term: str) -> list[Issue]: + async def try_search_issues( + self, + term: str, + max_results: int = 10, + team_id: str | None = None, + ) -> list[Issue]: try: query = """ - query SearchIssues($term: String!, $includeComments: Boolean!) { - searchIssues(term: $term, includeComments: $includeComments) { + query SearchIssues( + $term: String!, + $first: Int, + $teamId: String + ) { + searchIssues( + term: $term, + first: $first, + teamId: $teamId + ) { nodes { id identifier title description priority + createdAt + state { + id + name + type + } + project { + id + name + } + assignee { + id + name + } } } } @@ -258,7 +293,8 @@ class LinearClient: variables: dict[str, Any] = { "term": term, - "includeComments": True, + "first": max_results, + "teamId": team_id, } issues = await self.query(query, variables) diff --git a/autogpt_platform/backend/backend/blocks/linear/issues.py b/autogpt_platform/backend/backend/blocks/linear/issues.py index baac01214c..165178f8ee 100644 --- a/autogpt_platform/backend/backend/blocks/linear/issues.py +++ b/autogpt_platform/backend/backend/blocks/linear/issues.py @@ -17,7 +17,7 @@ from ._config import ( LinearScope, linear, ) -from .models import CreateIssueResponse, Issue +from .models import CreateIssueResponse, Issue, State class LinearCreateIssueBlock(Block): @@ -135,9 +135,20 @@ class LinearSearchIssuesBlock(Block): description="Linear credentials with read permissions", required_scopes={LinearScope.READ}, ) + max_results: int = SchemaField( + description="Maximum number of results to return", + default=10, + ge=1, + le=100, + ) + team_name: str | None = SchemaField( + description="Optional team name to filter results (e.g., 'Internal', 'Open Source')", + default=None, + ) class Output(BlockSchemaOutput): issues: list[Issue] = SchemaField(description="List of issues") + error: str = SchemaField(description="Error message if the search failed") def __init__(self): super().__init__( @@ -145,8 +156,11 @@ class LinearSearchIssuesBlock(Block): description="Searches for issues on Linear", input_schema=self.Input, output_schema=self.Output, + categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING}, test_input={ "term": "Test issue", + "max_results": 10, + "team_name": None, "credentials": TEST_CREDENTIALS_INPUT_OAUTH, }, test_credentials=TEST_CREDENTIALS_OAUTH, @@ -156,10 +170,14 @@ class LinearSearchIssuesBlock(Block): [ Issue( id="abc123", - identifier="abc123", + identifier="TST-123", title="Test issue", description="Test description", priority=1, + state=State( + id="state1", name="In Progress", type="started" + ), + createdAt="2026-01-15T10:00:00.000Z", ) ], ) @@ -168,10 +186,12 @@ class LinearSearchIssuesBlock(Block): "search_issues": lambda *args, **kwargs: [ Issue( id="abc123", - identifier="abc123", + identifier="TST-123", title="Test issue", description="Test description", priority=1, + state=State(id="state1", name="In Progress", type="started"), + createdAt="2026-01-15T10:00:00.000Z", ) ] }, @@ -181,10 +201,22 @@ class LinearSearchIssuesBlock(Block): async def search_issues( credentials: OAuth2Credentials | APIKeyCredentials, term: str, + max_results: int = 10, + team_name: str | None = None, ) -> list[Issue]: client = LinearClient(credentials=credentials) - response: list[Issue] = await client.try_search_issues(term=term) - return response + + # Resolve team name to ID if provided + # Raises LinearAPIException with descriptive message if team not found + team_id: str | None = None + if team_name: + team_id = await client.try_get_team_by_name(team_name=team_name) + + return await client.try_search_issues( + term=term, + max_results=max_results, + team_id=team_id, + ) async def run( self, @@ -196,7 +228,10 @@ class LinearSearchIssuesBlock(Block): """Execute the issue search""" try: issues = await self.search_issues( - credentials=credentials, term=input_data.term + credentials=credentials, + term=input_data.term, + max_results=input_data.max_results, + team_name=input_data.team_name, ) yield "issues", issues except LinearAPIException as e: diff --git a/autogpt_platform/backend/backend/blocks/linear/models.py b/autogpt_platform/backend/backend/blocks/linear/models.py index bfeaa13656..dd1f603459 100644 --- a/autogpt_platform/backend/backend/blocks/linear/models.py +++ b/autogpt_platform/backend/backend/blocks/linear/models.py @@ -36,12 +36,21 @@ class Project(BaseModel): content: str | None = None +class State(BaseModel): + id: str + name: str + type: str | None = ( + None # Workflow state type (e.g., "triage", "backlog", "started", "completed", "canceled") + ) + + class Issue(BaseModel): id: str identifier: str title: str description: str | None priority: int + state: State | None = None project: Project | None = None createdAt: str | None = None comments: list[Comment] | None = None diff --git a/autogpt_platform/backend/backend/blocks/youtube.py b/autogpt_platform/backend/backend/blocks/youtube.py index e79be3e99b..6d81a86b4c 100644 --- a/autogpt_platform/backend/backend/blocks/youtube.py +++ b/autogpt_platform/backend/backend/blocks/youtube.py @@ -165,10 +165,13 @@ class TranscribeYoutubeVideoBlock(Block): credentials: WebshareProxyCredentials, **kwargs, ) -> BlockOutput: - video_id = self.extract_video_id(input_data.youtube_url) - yield "video_id", video_id + try: + video_id = self.extract_video_id(input_data.youtube_url) + transcript = self.get_transcript(video_id, credentials) + transcript_text = self.format_transcript(transcript=transcript) - transcript = self.get_transcript(video_id, credentials) - transcript_text = self.format_transcript(transcript=transcript) - - yield "transcript", transcript_text + # Only yield after all operations succeed + yield "video_id", video_id + yield "transcript", transcript_text + except Exception as e: + yield "error", str(e) diff --git a/autogpt_platform/backend/backend/data/credit_test.py b/autogpt_platform/backend/backend/data/credit_test.py index 391a373b86..2b10c62882 100644 --- a/autogpt_platform/backend/backend/data/credit_test.py +++ b/autogpt_platform/backend/backend/data/credit_test.py @@ -134,6 +134,16 @@ async def test_block_credit_reset(server: SpinTestServer): month1 = datetime.now(timezone.utc).replace(month=1, day=1) user_credit.time_now = lambda: month1 + # IMPORTANT: Set updatedAt to December of previous year to ensure it's + # in a different month than month1 (January). This fixes a timing bug + # where if the test runs in early February, 35 days ago would be January, + # matching the mocked month1 and preventing the refill from triggering. + dec_previous_year = month1.replace(year=month1.year - 1, month=12, day=15) + await UserBalance.prisma().update( + where={"userId": DEFAULT_USER_ID}, + data={"updatedAt": dec_previous_year}, + ) + # First call in month 1 should trigger refill balance = await user_credit.get_credits(DEFAULT_USER_ID) assert balance == REFILL_VALUE # Should get 1000 credits diff --git a/autogpt_platform/backend/backend/data/model.py b/autogpt_platform/backend/backend/data/model.py index 331126fbd6..5a09c591c9 100644 --- a/autogpt_platform/backend/backend/data/model.py +++ b/autogpt_platform/backend/backend/data/model.py @@ -19,7 +19,6 @@ from typing import ( cast, get_args, ) -from urllib.parse import urlparse from uuid import uuid4 from prisma.enums import CreditTransactionType, OnboardingStep @@ -42,6 +41,7 @@ from typing_extensions import TypedDict from backend.integrations.providers import ProviderName from backend.util.json import loads as json_loads +from backend.util.request import parse_url from backend.util.settings import Secrets # Type alias for any provider name (including custom ones) @@ -397,19 +397,25 @@ class HostScopedCredentials(_BaseCredentials): def matches_url(self, url: str) -> bool: """Check if this credential should be applied to the given URL.""" - parsed_url = urlparse(url) - # Extract hostname without port - request_host = parsed_url.hostname + request_host, request_port = _extract_host_from_url(url) + cred_scope_host, cred_scope_port = _extract_host_from_url(self.host) if not request_host: return False - # Simple host matching - exact match or wildcard subdomain match - if self.host == request_host: + # If a port is specified in credential host, the request host port must match + if cred_scope_port is not None and request_port != cred_scope_port: + return False + # Non-standard ports are only allowed if explicitly specified in credential host + elif cred_scope_port is None and request_port not in (80, 443, None): + return False + + # Simple host matching + if cred_scope_host == request_host: return True # Support wildcard matching (e.g., "*.example.com" matches "api.example.com") - if self.host.startswith("*."): - domain = self.host[2:] # Remove "*." + if cred_scope_host.startswith("*."): + domain = cred_scope_host[2:] # Remove "*." return request_host.endswith(f".{domain}") or request_host == domain return False @@ -551,13 +557,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]): ) -def _extract_host_from_url(url: str) -> str: - """Extract host from URL for grouping host-scoped credentials.""" +def _extract_host_from_url(url: str) -> tuple[str, int | None]: + """Extract host and port from URL for grouping host-scoped credentials.""" try: - parsed = urlparse(url) - return parsed.hostname or url + parsed = parse_url(url) + return parsed.hostname or url, parsed.port except Exception: - return "" + return "", None class CredentialsFieldInfo(BaseModel, Generic[CP, CT]): @@ -606,7 +612,7 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]): providers = frozenset( [cast(CP, "http")] + [ - cast(CP, _extract_host_from_url(str(value))) + cast(CP, parse_url(str(value)).netloc) for value in field.discriminator_values ] ) diff --git a/autogpt_platform/backend/backend/data/model_test.py b/autogpt_platform/backend/backend/data/model_test.py index 37ec6be82f..e8e2ddfa35 100644 --- a/autogpt_platform/backend/backend/data/model_test.py +++ b/autogpt_platform/backend/backend/data/model_test.py @@ -79,10 +79,23 @@ class TestHostScopedCredentials: headers={"Authorization": SecretStr("Bearer token")}, ) - assert creds.matches_url("http://localhost:8080/api/v1") + # Non-standard ports require explicit port in credential host + assert not creds.matches_url("http://localhost:8080/api/v1") assert creds.matches_url("https://localhost:443/secure/endpoint") assert creds.matches_url("http://localhost/simple") + def test_matches_url_with_explicit_port(self): + """Test URL matching with explicit port in credential host.""" + creds = HostScopedCredentials( + provider="custom", + host="localhost:8080", + headers={"Authorization": SecretStr("Bearer token")}, + ) + + assert creds.matches_url("http://localhost:8080/api/v1") + assert not creds.matches_url("http://localhost:3000/api/v1") + assert not creds.matches_url("http://localhost/simple") + def test_empty_headers_dict(self): """Test HostScopedCredentials with empty headers.""" creds = HostScopedCredentials( @@ -128,8 +141,20 @@ class TestHostScopedCredentials: ("*.example.com", "https://sub.api.example.com/test", True), ("*.example.com", "https://example.com/test", True), ("*.example.com", "https://example.org/test", False), - ("localhost", "http://localhost:3000/test", True), + # Non-standard ports require explicit port in credential host + ("localhost", "http://localhost:3000/test", False), + ("localhost:3000", "http://localhost:3000/test", True), ("localhost", "http://127.0.0.1:3000/test", False), + # IPv6 addresses (frontend stores with brackets via URL.hostname) + ("[::1]", "http://[::1]/test", True), + ("[::1]", "http://[::1]:80/test", True), + ("[::1]", "https://[::1]:443/test", True), + ("[::1]", "http://[::1]:8080/test", False), # Non-standard port + ("[::1]:8080", "http://[::1]:8080/test", True), + ("[::1]:8080", "http://[::1]:9090/test", False), + ("[2001:db8::1]", "http://[2001:db8::1]/path", True), + ("[2001:db8::1]", "https://[2001:db8::1]:443/path", True), + ("[2001:db8::1]", "http://[2001:db8::ff]/path", False), ], ) def test_url_matching_parametrized(self, host: str, test_url: str, expected: bool): diff --git a/autogpt_platform/backend/backend/util/request.py b/autogpt_platform/backend/backend/util/request.py index 9744372b15..95e5ee32f7 100644 --- a/autogpt_platform/backend/backend/util/request.py +++ b/autogpt_platform/backend/backend/util/request.py @@ -157,12 +157,7 @@ async def validate_url( is_trusted: Boolean indicating if the hostname is in trusted_origins ip_addresses: List of IP addresses for the host; empty if the host is trusted """ - # Canonicalize URL - url = url.strip("/ ").replace("\\", "/") - parsed = urlparse(url) - if not parsed.scheme: - url = f"http://{url}" - parsed = urlparse(url) + parsed = parse_url(url) # Check scheme if parsed.scheme not in ALLOWED_SCHEMES: @@ -220,6 +215,17 @@ async def validate_url( ) +def parse_url(url: str) -> URL: + """Canonicalizes and parses a URL string.""" + url = url.strip("/ ").replace("\\", "/") + + # Ensure scheme is present for proper parsing + if not re.match(r"[a-z0-9+.\-]+://", url): + url = f"http://{url}" + + return urlparse(url) + + def pin_url(url: URL, ip_addresses: Optional[list[str]] = None) -> URL: """ Pins a URL to a specific IP address to prevent DNS rebinding attacks. diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts index 692a5741f4..c6e479f896 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts @@ -26,8 +26,20 @@ export function buildCopilotChatUrl(prompt: string): string { export function getQuickActions(): string[] { return [ - "Show me what I can automate", - "Design a custom workflow", - "Help me with content creation", + "I don't know where to start, just ask me stuff", + "I do the same thing every week and it's killing me", + "Help me find where I'm wasting my time", ]; } + +export function getInputPlaceholder(width?: number) { + if (!width) return "What's your role and what eats up most of your day?"; + + if (width < 500) { + return "I'm a chef and I hate..."; + } + if (width <= 1080) { + return "What's your role and what eats up most of your day?"; + } + return "What's your role and what eats up most of your day? e.g. 'I'm a recruiter and I hate...'"; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx index e9bc018c1b..542173a99c 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx @@ -6,7 +6,9 @@ import { Text } from "@/components/atoms/Text/Text"; import { Chat } from "@/components/contextual/Chat/Chat"; import { ChatInput } from "@/components/contextual/Chat/components/ChatInput/ChatInput"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useEffect, useState } from "react"; import { useCopilotStore } from "./copilot-page-store"; +import { getInputPlaceholder } from "./helpers"; import { useCopilotPage } from "./useCopilotPage"; export default function CopilotPage() { @@ -14,8 +16,25 @@ export default function CopilotPage() { const isInterruptModalOpen = useCopilotStore((s) => s.isInterruptModalOpen); const confirmInterrupt = useCopilotStore((s) => s.confirmInterrupt); const cancelInterrupt = useCopilotStore((s) => s.cancelInterrupt); + + const [inputPlaceholder, setInputPlaceholder] = useState( + getInputPlaceholder(), + ); + + useEffect(() => { + const handleResize = () => { + setInputPlaceholder(getInputPlaceholder(window.innerWidth)); + }; + + handleResize(); + + window.addEventListener("resize", handleResize); + return () => window.removeEventListener("resize", handleResize); + }, []); + const { greetingName, quickActions, isLoading, hasSession, initialPrompt } = state; + const { handleQuickAction, startChatWithPrompt, @@ -73,7 +92,7 @@ export default function CopilotPage() { } return ( -
+
{isLoading ? (
@@ -90,25 +109,25 @@ export default function CopilotPage() {
) : ( <> -
+
Hey, {greetingName} - What do you want to automate? + Tell me about your work — I'll find what to automate.
-
+
{quickActions.map((action) => ( diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx index 5df9944f47..fbf2d5d143 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx @@ -2,7 +2,6 @@ import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessi import { Button } from "@/components/atoms/Button/Button"; import { Text } from "@/components/atoms/Text/Text"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; -import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { cn } from "@/lib/utils"; import { GlobeHemisphereEastIcon } from "@phosphor-icons/react"; import { useEffect } from "react"; @@ -56,10 +55,6 @@ export function ChatContainer({ onStreamingChange?.(isStreaming); }, [isStreaming, onStreamingChange]); - const breakpoint = useBreakpoint(); - const isMobile = - breakpoint === "base" || breakpoint === "sm" || breakpoint === "md"; - return (
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx index beb4678e73..bac004f6ed 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx @@ -74,19 +74,20 @@ export function ChatInput({ hasMultipleLines ? "rounded-xlarge" : "rounded-full", )} > + {!value && !isRecording && ( + + )}