From caf9ff34e653406dd4fb90cac6f26cc90b38f358 Mon Sep 17 00:00:00 2001 From: Bently Date: Mon, 9 Feb 2026 10:24:08 +0000 Subject: [PATCH 01/11] fix(backend): Handle stale RabbitMQ channels on connection drop (#11929) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes πŸ—οΈ Fixes [**AUTOGPT-SERVER-1TN**](https://autoagpt.sentry.io/issues/?query=AUTOGPT-SERVER-1TN) (~39K events since Feb 2025) and related connection issues **6JC/6JD/6JE/6JF** (~6K combined). #### Problem When the RabbitMQ TCP connection drops (network blip, server restart, etc.): 1. `connect_robust` (aio_pika) automatically reconnects the underlying AMQP connection 2. But `AsyncRabbitMQ._channel` still references the **old dead channel** 3. `is_ready` checks `not self._channel.is_closed` β€” but the channel object doesn't know the transport is gone 4. `publish_message` tries to use the stale channel β†’ `ChannelInvalidStateError: No active transport in channel` 5. `@func_retry` retries 5 times, but each retry hits the same stale channel (it passes `is_ready`) This means every connection drop generates errors until the process is restarted. #### Fix **New `_ensure_channel()` helper** that resets stale channels before reconnecting, so `connect()` creates a fresh one instead of short-circuiting on `is_connected`. **Explicit `ChannelInvalidStateError` handling in `publish_message`:** 1. First attempt uses `_ensure_channel()` (handles normal staleness) 2. If publish throws `ChannelInvalidStateError`, does a full reconnect (resets both `_channel` and `_connection`) and retries once 3. `@func_retry` provides additional retry resilience on top **Simplified `get_channel()`** to use the same resilient helper. **1 file changed, 62 insertions, 24 deletions.** #### Impact - Eliminates ~39K `ChannelInvalidStateError` Sentry events - RabbitMQ operations self-heal after connection drops without process restart - Related transport EOF errors (6JC/6JD/6JE/6JF) should also reduce --- .../backend/backend/data/rabbitmq.py | 81 +++++++++++++++---- 1 file changed, 66 insertions(+), 15 deletions(-) diff --git a/autogpt_platform/backend/backend/data/rabbitmq.py b/autogpt_platform/backend/backend/data/rabbitmq.py index bdf2090083..524e21748a 100644 --- a/autogpt_platform/backend/backend/data/rabbitmq.py +++ b/autogpt_platform/backend/backend/data/rabbitmq.py @@ -1,3 +1,4 @@ +import asyncio import logging from abc import ABC, abstractmethod from enum import Enum @@ -225,6 +226,10 @@ class SyncRabbitMQ(RabbitMQBase): class AsyncRabbitMQ(RabbitMQBase): """Asynchronous RabbitMQ client""" + def __init__(self, config: RabbitMQConfig): + super().__init__(config) + self._reconnect_lock: asyncio.Lock | None = None + @property def is_connected(self) -> bool: return bool(self._connection and not self._connection.is_closed) @@ -235,7 +240,17 @@ class AsyncRabbitMQ(RabbitMQBase): @conn_retry("AsyncRabbitMQ", "Acquiring async connection") async def connect(self): - if self.is_connected: + if self.is_connected and self._channel and not self._channel.is_closed: + return + + if ( + self.is_connected + and self._connection + and (self._channel is None or self._channel.is_closed) + ): + self._channel = await self._connection.channel() + await self._channel.set_qos(prefetch_count=1) + await self.declare_infrastructure() return self._connection = await aio_pika.connect_robust( @@ -291,24 +306,46 @@ class AsyncRabbitMQ(RabbitMQBase): exchange, routing_key=queue.routing_key or queue.name ) - @func_retry - async def publish_message( + @property + def _lock(self) -> asyncio.Lock: + if self._reconnect_lock is None: + self._reconnect_lock = asyncio.Lock() + return self._reconnect_lock + + async def _ensure_channel(self) -> aio_pika.abc.AbstractChannel: + """Get a valid channel, reconnecting if the current one is stale. + + Uses a lock to prevent concurrent reconnection attempts from racing. + """ + if self.is_ready: + return self._channel # type: ignore # is_ready guarantees non-None + + async with self._lock: + # Double-check after acquiring lock + if self.is_ready: + return self._channel # type: ignore + + self._channel = None + await self.connect() + + if self._channel is None: + raise RuntimeError("Channel should be established after connect") + + return self._channel + + async def _publish_once( self, routing_key: str, message: str, exchange: Optional[Exchange] = None, persistent: bool = True, ) -> None: - if not self.is_ready: - await self.connect() - - if self._channel is None: - raise RuntimeError("Channel should be established after connect") + channel = await self._ensure_channel() if exchange: - exchange_obj = await self._channel.get_exchange(exchange.name) + exchange_obj = await channel.get_exchange(exchange.name) else: - exchange_obj = self._channel.default_exchange + exchange_obj = channel.default_exchange await exchange_obj.publish( aio_pika.Message( @@ -322,9 +359,23 @@ class AsyncRabbitMQ(RabbitMQBase): routing_key=routing_key, ) + @func_retry + async def publish_message( + self, + routing_key: str, + message: str, + exchange: Optional[Exchange] = None, + persistent: bool = True, + ) -> None: + try: + await self._publish_once(routing_key, message, exchange, persistent) + except aio_pika.exceptions.ChannelInvalidStateError: + logger.warning( + "RabbitMQ channel invalid, forcing reconnect and retrying publish" + ) + async with self._lock: + self._channel = None + await self._publish_once(routing_key, message, exchange, persistent) + async def get_channel(self) -> aio_pika.abc.AbstractChannel: - if not self.is_ready: - await self.connect() - if self._channel is None: - raise RuntimeError("Channel should be established after connect") - return self._channel + return await self._ensure_channel() From 1f4105e8f9554d7bff016ce8216041bf4c378bf3 Mon Sep 17 00:00:00 2001 From: Bently Date: Mon, 9 Feb 2026 10:25:08 +0000 Subject: [PATCH 02/11] fix(frontend): Handle object values in FileInput component (#11948) Fixes [#11800](https://github.com/Significant-Gravitas/AutoGPT/issues/11800) ## Problem The FileInput component crashed with `TypeError: e.startsWith is not a function` when the value was an object (from external API) instead of a string. ## Example Input Object When using the external API (`/external-api/v1/graphs/{id}/execute/{version}`), file inputs can be passed as objects: ```json { "node_input": { "input_image": { "name": "image.jpeg", "type": "image/jpeg", "size": 131147, "data": "/9j/4QAW..." } } } ``` ## Changes - Updated `getFileLabelFromValue()` to handle object format: `{ name, type, size, data }` - Added type guards for string vs object values - Graceful fallback for edge cases (null, undefined, empty object) ## Test cases verified - Object with name: returns filename - Object with type only: extracts and formats MIME type - String data URI: parses correctly - String file path: extracts extension - Edge cases: returns "File" fallback --- .../components/atoms/FileInput/FileInput.tsx | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx index d43063b411..2677a7483b 100644 --- a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx +++ b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx @@ -104,7 +104,31 @@ export function FileInput(props: Props) { return false; } - const getFileLabelFromValue = (val: string) => { + const getFileLabelFromValue = (val: unknown): string => { + // Handle object format from external API: { name, type, size, data } + if (val && typeof val === "object") { + const obj = val as Record; + if (typeof obj.name === "string") { + return getFileLabel( + obj.name, + typeof obj.type === "string" ? obj.type : "", + ); + } + if (typeof obj.type === "string") { + const mimeParts = obj.type.split("/"); + if (mimeParts.length > 1) { + return `${mimeParts[1].toUpperCase()} file`; + } + return `${obj.type} file`; + } + return "File"; + } + + // Handle string values (data URIs or file paths) + if (typeof val !== "string") { + return "File"; + } + if (val.startsWith("data:")) { const matches = val.match(/^data:([^;]+);/); if (matches?.[1]) { From 5a30d114164d3e680e56e2ec5d7c1f6d772b5586 Mon Sep 17 00:00:00 2001 From: Otto Date: Mon, 9 Feb 2026 13:43:55 +0000 Subject: [PATCH 03/11] refactor(copilot): Code cleanup and deduplication (#11950) ## Summary Code cleanup of the AI Copilot codebase - rebased onto latest dev. ## Changes ### New Files - `backend/util/validation.py` - UUID validation helpers - `backend/api/features/chat/tools/helpers.py` - Shared tool utilities ### Credential Matching Consolidation - Added shared utilities to `utils.py` - Refactored `run_block._check_block_credentials()` with discriminator support - Extracted `_resolve_discriminated_credentials()` for multi-provider handling ### Routes Cleanup - Extracted `_create_stream_generator()` and `SSE_RESPONSE_HEADERS` ### Tool Files Cleanup - Updated `run_agent.py` and `run_block.py` to use shared helpers **WIP** - This PR will be updated incrementally. --- .../api/features/chat/tools/helpers.py | 29 +++ .../api/features/chat/tools/run_agent.py | 21 +- .../api/features/chat/tools/run_block.py | 185 +++++++----------- .../backend/api/features/chat/tools/utils.py | 96 ++++++++- 4 files changed, 201 insertions(+), 130 deletions(-) create mode 100644 autogpt_platform/backend/backend/api/features/chat/tools/helpers.py diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/helpers.py b/autogpt_platform/backend/backend/api/features/chat/tools/helpers.py new file mode 100644 index 0000000000..cf53605ac0 --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tools/helpers.py @@ -0,0 +1,29 @@ +"""Shared helpers for chat tools.""" + +from typing import Any + + +def get_inputs_from_schema( + input_schema: dict[str, Any], + exclude_fields: set[str] | None = None, +) -> list[dict[str, Any]]: + """Extract input field info from JSON schema.""" + if not isinstance(input_schema, dict): + return [] + + exclude = exclude_fields or set() + properties = input_schema.get("properties", {}) + required = set(input_schema.get("required", [])) + + return [ + { + "name": name, + "title": schema.get("title", name), + "type": schema.get("type", "string"), + "description": schema.get("description", ""), + "required": name in required, + "default": schema.get("default"), + } + for name, schema in properties.items() + if name not in exclude + ] diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py index 73d4cf81f2..a9f19bcf62 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py @@ -24,6 +24,7 @@ from backend.util.timezone_utils import ( ) from .base import BaseTool +from .helpers import get_inputs_from_schema from .models import ( AgentDetails, AgentDetailsResponse, @@ -261,7 +262,7 @@ class RunAgentTool(BaseTool): ), requirements={ "credentials": requirements_creds_list, - "inputs": self._get_inputs_list(graph.input_schema), + "inputs": get_inputs_from_schema(graph.input_schema), "execution_modes": self._get_execution_modes(graph), }, ), @@ -369,22 +370,6 @@ class RunAgentTool(BaseTool): session_id=session_id, ) - def _get_inputs_list(self, input_schema: dict[str, Any]) -> list[dict[str, Any]]: - """Extract inputs list from schema.""" - inputs_list = [] - if isinstance(input_schema, dict) and "properties" in input_schema: - for field_name, field_schema in input_schema["properties"].items(): - inputs_list.append( - { - "name": field_name, - "title": field_schema.get("title", field_name), - "type": field_schema.get("type", "string"), - "description": field_schema.get("description", ""), - "required": field_name in input_schema.get("required", []), - } - ) - return inputs_list - def _get_execution_modes(self, graph: GraphModel) -> list[str]: """Get available execution modes for the graph.""" trigger_info = graph.trigger_setup_info @@ -398,7 +383,7 @@ class RunAgentTool(BaseTool): suffix: str, ) -> str: """Build a message describing available inputs for an agent.""" - inputs_list = self._get_inputs_list(graph.input_schema) + inputs_list = get_inputs_from_schema(graph.input_schema) required_names = [i["name"] for i in inputs_list if i["required"]] optional_names = [i["name"] for i in inputs_list if not i["required"]] diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index 590f81ff23..fc4a470fdd 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -12,14 +12,15 @@ from backend.api.features.chat.tools.find_block import ( COPILOT_EXCLUDED_BLOCK_IDS, COPILOT_EXCLUDED_BLOCK_TYPES, ) -from backend.data.block import get_block +from backend.data.block import AnyBlockSchema, get_block from backend.data.execution import ExecutionContext -from backend.data.model import CredentialsMetaInput +from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput from backend.data.workspace import get_or_create_workspace from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util.exceptions import BlockError from .base import BaseTool +from .helpers import get_inputs_from_schema from .models import ( BlockOutputResponse, ErrorResponse, @@ -28,7 +29,10 @@ from .models import ( ToolResponseBase, UserReadiness, ) -from .utils import build_missing_credentials_from_field_info +from .utils import ( + build_missing_credentials_from_field_info, + match_credentials_to_requirements, +) logger = logging.getLogger(__name__) @@ -77,91 +81,6 @@ class RunBlockTool(BaseTool): def requires_auth(self) -> bool: return True - async def _check_block_credentials( - self, - user_id: str, - block: Any, - input_data: dict[str, Any] | None = None, - ) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]: - """ - Check if user has required credentials for a block. - - Args: - user_id: User ID - block: Block to check credentials for - input_data: Input data for the block (used to determine provider via discriminator) - - Returns: - tuple[matched_credentials, missing_credentials] - """ - matched_credentials: dict[str, CredentialsMetaInput] = {} - missing_credentials: list[CredentialsMetaInput] = [] - input_data = input_data or {} - - # Get credential field info from block's input schema - credentials_fields_info = block.input_schema.get_credentials_fields_info() - - if not credentials_fields_info: - return matched_credentials, missing_credentials - - # Get user's available credentials - creds_manager = IntegrationCredentialsManager() - available_creds = await creds_manager.store.get_all_creds(user_id) - - for field_name, field_info in credentials_fields_info.items(): - effective_field_info = field_info - if field_info.discriminator and field_info.discriminator_mapping: - # Get discriminator from input, falling back to schema default - discriminator_value = input_data.get(field_info.discriminator) - if discriminator_value is None: - field = block.input_schema.model_fields.get( - field_info.discriminator - ) - if field and field.default is not PydanticUndefined: - discriminator_value = field.default - - if ( - discriminator_value - and discriminator_value in field_info.discriminator_mapping - ): - effective_field_info = field_info.discriminate(discriminator_value) - logger.debug( - f"Discriminated provider for {field_name}: " - f"{discriminator_value} -> {effective_field_info.provider}" - ) - - matching_cred = next( - ( - cred - for cred in available_creds - if cred.provider in effective_field_info.provider - and cred.type in effective_field_info.supported_types - ), - None, - ) - - if matching_cred: - matched_credentials[field_name] = CredentialsMetaInput( - id=matching_cred.id, - provider=matching_cred.provider, # type: ignore - type=matching_cred.type, - title=matching_cred.title, - ) - else: - # Create a placeholder for the missing credential - provider = next(iter(effective_field_info.provider), "unknown") - cred_type = next(iter(effective_field_info.supported_types), "api_key") - missing_credentials.append( - CredentialsMetaInput( - id=field_name, - provider=provider, # type: ignore - type=cred_type, # type: ignore - title=field_name.replace("_", " ").title(), - ) - ) - - return matched_credentials, missing_credentials - async def _execute( self, user_id: str | None, @@ -232,8 +151,8 @@ class RunBlockTool(BaseTool): logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}") creds_manager = IntegrationCredentialsManager() - matched_credentials, missing_credentials = await self._check_block_credentials( - user_id, block, input_data + matched_credentials, missing_credentials = ( + await self._resolve_block_credentials(user_id, block, input_data) ) if missing_credentials: @@ -362,29 +281,75 @@ class RunBlockTool(BaseTool): session_id=session_id, ) - def _get_inputs_list(self, block: Any) -> list[dict[str, Any]]: + async def _resolve_block_credentials( + self, + user_id: str, + block: AnyBlockSchema, + input_data: dict[str, Any] | None = None, + ) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]: + """ + Resolve credentials for a block by matching user's available credentials. + + Args: + user_id: User ID + block: Block to resolve credentials for + input_data: Input data for the block (used to determine provider via discriminator) + + Returns: + tuple of (matched_credentials, missing_credentials) - matched credentials + are used for block execution, missing ones indicate setup requirements. + """ + input_data = input_data or {} + requirements = self._resolve_discriminated_credentials(block, input_data) + + if not requirements: + return {}, [] + + return await match_credentials_to_requirements(user_id, requirements) + + def _get_inputs_list(self, block: AnyBlockSchema) -> list[dict[str, Any]]: """Extract non-credential inputs from block schema.""" - inputs_list = [] schema = block.input_schema.jsonschema() - properties = schema.get("properties", {}) - required_fields = set(schema.get("required", [])) - - # Get credential field names to exclude credentials_fields = set(block.input_schema.get_credentials_fields().keys()) + return get_inputs_from_schema(schema, exclude_fields=credentials_fields) - for field_name, field_schema in properties.items(): - # Skip credential fields - if field_name in credentials_fields: - continue + def _resolve_discriminated_credentials( + self, + block: AnyBlockSchema, + input_data: dict[str, Any], + ) -> dict[str, CredentialsFieldInfo]: + """Resolve credential requirements, applying discriminator logic where needed.""" + credentials_fields_info = block.input_schema.get_credentials_fields_info() + if not credentials_fields_info: + return {} - inputs_list.append( - { - "name": field_name, - "title": field_schema.get("title", field_name), - "type": field_schema.get("type", "string"), - "description": field_schema.get("description", ""), - "required": field_name in required_fields, - } - ) + resolved: dict[str, CredentialsFieldInfo] = {} - return inputs_list + for field_name, field_info in credentials_fields_info.items(): + effective_field_info = field_info + + if field_info.discriminator and field_info.discriminator_mapping: + discriminator_value = input_data.get(field_info.discriminator) + if discriminator_value is None: + field = block.input_schema.model_fields.get( + field_info.discriminator + ) + if field and field.default is not PydanticUndefined: + discriminator_value = field.default + + if ( + discriminator_value + and discriminator_value in field_info.discriminator_mapping + ): + effective_field_info = field_info.discriminate(discriminator_value) + # For host-scoped credentials, add the discriminator value + # (e.g., URL) so _credential_is_for_host can match it + effective_field_info.discriminator_values.add(discriminator_value) + logger.debug( + f"Discriminated provider for {field_name}: " + f"{discriminator_value} -> {effective_field_info.provider}" + ) + + resolved[field_name] = effective_field_info + + return resolved diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py index cda0914809..80a842bf36 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py @@ -8,6 +8,7 @@ from backend.api.features.library import model as library_model from backend.api.features.store import db as store_db from backend.data.graph import GraphModel from backend.data.model import ( + Credentials, CredentialsFieldInfo, CredentialsMetaInput, HostScopedCredentials, @@ -223,6 +224,99 @@ async def get_or_create_library_agent( return library_agents[0] +async def match_credentials_to_requirements( + user_id: str, + requirements: dict[str, CredentialsFieldInfo], +) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]: + """ + Match user's credentials against a dictionary of credential requirements. + + This is the core matching logic shared by both graph and block credential matching. + """ + matched: dict[str, CredentialsMetaInput] = {} + missing: list[CredentialsMetaInput] = [] + + if not requirements: + return matched, missing + + available_creds = await get_user_credentials(user_id) + + for field_name, field_info in requirements.items(): + matching_cred = find_matching_credential(available_creds, field_info) + + if matching_cred: + try: + matched[field_name] = create_credential_meta_from_match(matching_cred) + except Exception as e: + logger.error( + f"Failed to create CredentialsMetaInput for field '{field_name}': " + f"provider={matching_cred.provider}, type={matching_cred.type}, " + f"credential_id={matching_cred.id}", + exc_info=True, + ) + provider = next(iter(field_info.provider), "unknown") + cred_type = next(iter(field_info.supported_types), "api_key") + missing.append( + CredentialsMetaInput( + id=field_name, + provider=provider, # type: ignore + type=cred_type, # type: ignore + title=f"{field_name} (validation failed: {e})", + ) + ) + else: + provider = next(iter(field_info.provider), "unknown") + cred_type = next(iter(field_info.supported_types), "api_key") + missing.append( + CredentialsMetaInput( + id=field_name, + provider=provider, # type: ignore + type=cred_type, # type: ignore + title=field_name.replace("_", " ").title(), + ) + ) + + return matched, missing + + +async def get_user_credentials(user_id: str) -> list[Credentials]: + """Get all available credentials for a user.""" + creds_manager = IntegrationCredentialsManager() + return await creds_manager.store.get_all_creds(user_id) + + +def find_matching_credential( + available_creds: list[Credentials], + field_info: CredentialsFieldInfo, +) -> Credentials | None: + """Find a credential that matches the required provider, type, scopes, and host.""" + for cred in available_creds: + if cred.provider not in field_info.provider: + continue + if cred.type not in field_info.supported_types: + continue + if cred.type == "oauth2" and not _credential_has_required_scopes( + cred, field_info + ): + continue + if cred.type == "host_scoped" and not _credential_is_for_host(cred, field_info): + continue + return cred + return None + + +def create_credential_meta_from_match( + matching_cred: Credentials, +) -> CredentialsMetaInput: + """Create a CredentialsMetaInput from a matched credential.""" + return CredentialsMetaInput( + id=matching_cred.id, + provider=matching_cred.provider, # type: ignore + type=matching_cred.type, + title=matching_cred.title, + ) + + async def match_user_credentials_to_graph( user_id: str, graph: GraphModel, @@ -331,8 +425,6 @@ def _credential_has_required_scopes( # If no scopes are required, any credential matches if not requirements.required_scopes: return True - - # Check that credential scopes are a superset of required scopes return set(credential.scopes).issuperset(requirements.required_scopes) From 6467f6734f145af70356d61ae5f461e8dc65346f Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 9 Feb 2026 15:05:29 +0100 Subject: [PATCH 04/11] debug(backend/chat): Add timing logging to chat stream generation mechanism (#12019) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [SECRT-1912: Investigate & eliminate chat session start latency](https://linear.app/autogpt/issue/SECRT-1912) ### Changes πŸ—οΈ - Add timing logs to `backend.api.features.chat` in `routes.py`, `service.py`, and `stream_registry.py` - Remove unneeded DB join in `create_chat_session` ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - CI checks --- .../backend/backend/api/features/chat/db.py | 5 +- .../backend/api/features/chat/routes.py | 226 +++++++++++++- .../backend/api/features/chat/service.py | 147 ++++++++- .../api/features/chat/stream_registry.py | 285 +++++++++++++++++- 4 files changed, 629 insertions(+), 34 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/db.py b/autogpt_platform/backend/backend/api/features/chat/db.py index d34b4e5b07..303ea0a698 100644 --- a/autogpt_platform/backend/backend/api/features/chat/db.py +++ b/autogpt_platform/backend/backend/api/features/chat/db.py @@ -45,10 +45,7 @@ async def create_chat_session( successfulAgentRuns=SafeJson({}), successfulAgentSchedules=SafeJson({}), ) - return await PrismaChatSession.prisma().create( - data=data, - include={"Messages": True}, - ) + return await PrismaChatSession.prisma().create(data=data) async def update_chat_session( diff --git a/autogpt_platform/backend/backend/api/features/chat/routes.py b/autogpt_platform/backend/backend/api/features/chat/routes.py index 3e731d86ac..74e6e8ba1e 100644 --- a/autogpt_platform/backend/backend/api/features/chat/routes.py +++ b/autogpt_platform/backend/backend/api/features/chat/routes.py @@ -266,12 +266,38 @@ async def stream_chat_post( """ import asyncio + import time + + stream_start_time = time.perf_counter() + + # Base log metadata (task_id added after creation) + log_meta = {"component": "ChatStream", "session_id": session_id} + if user_id: + log_meta["user_id"] = user_id + + logger.info( + f"[TIMING] stream_chat_post STARTED, session={session_id}, " + f"user={user_id}, message_len={len(request.message)}", + extra={"json_fields": log_meta}, + ) session = await _validate_and_get_session(session_id, user_id) + logger.info( + f"[TIMING] session validated in {(time.perf_counter() - stream_start_time)*1000:.1f}ms", + extra={ + "json_fields": { + **log_meta, + "duration_ms": (time.perf_counter() - stream_start_time) * 1000, + } + }, + ) # Create a task in the stream registry for reconnection support task_id = str(uuid_module.uuid4()) operation_id = str(uuid_module.uuid4()) + log_meta["task_id"] = task_id + + task_create_start = time.perf_counter() await stream_registry.create_task( task_id=task_id, session_id=session_id, @@ -280,14 +306,46 @@ async def stream_chat_post( tool_name="chat", operation_id=operation_id, ) + logger.info( + f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start)*1000:.1f}ms", + extra={ + "json_fields": { + **log_meta, + "duration_ms": (time.perf_counter() - task_create_start) * 1000, + } + }, + ) # Background task that runs the AI generation independently of SSE connection async def run_ai_generation(): + import time as time_module + + gen_start_time = time_module.perf_counter() + logger.info( + f"[TIMING] run_ai_generation STARTED, task={task_id}, session={session_id}, user={user_id}", + extra={"json_fields": log_meta}, + ) + first_chunk_time, ttfc = None, None + chunk_count = 0 try: # Emit a start event with task_id for reconnection start_chunk = StreamStart(messageId=task_id, taskId=task_id) await stream_registry.publish_chunk(task_id, start_chunk) + logger.info( + f"[TIMING] StreamStart published at {(time_module.perf_counter() - gen_start_time)*1000:.1f}ms", + extra={ + "json_fields": { + **log_meta, + "elapsed_ms": (time_module.perf_counter() - gen_start_time) + * 1000, + } + }, + ) + logger.info( + "[TIMING] Calling stream_chat_completion", + extra={"json_fields": log_meta}, + ) async for chunk in chat_service.stream_chat_completion( session_id, request.message, @@ -296,54 +354,202 @@ async def stream_chat_post( session=session, # Pass pre-fetched session to avoid double-fetch context=request.context, ): + chunk_count += 1 + if first_chunk_time is None: + first_chunk_time = time_module.perf_counter() + ttfc = first_chunk_time - gen_start_time + logger.info( + f"[TIMING] FIRST AI CHUNK at {ttfc:.2f}s, type={type(chunk).__name__}", + extra={ + "json_fields": { + **log_meta, + "chunk_type": type(chunk).__name__, + "time_to_first_chunk_ms": ttfc * 1000, + } + }, + ) # Write to Redis (subscribers will receive via XREAD) await stream_registry.publish_chunk(task_id, chunk) - # Mark task as completed + gen_end_time = time_module.perf_counter() + total_time = (gen_end_time - gen_start_time) * 1000 + logger.info( + f"[TIMING] run_ai_generation FINISHED in {total_time/1000:.1f}s; " + f"task={task_id}, session={session_id}, " + f"ttfc={ttfc or -1:.2f}s, n_chunks={chunk_count}", + extra={ + "json_fields": { + **log_meta, + "total_time_ms": total_time, + "time_to_first_chunk_ms": ( + ttfc * 1000 if ttfc is not None else None + ), + "n_chunks": chunk_count, + } + }, + ) + await stream_registry.mark_task_completed(task_id, "completed") except Exception as e: + elapsed = time_module.perf_counter() - gen_start_time logger.error( - f"Error in background AI generation for session {session_id}: {e}" + f"[TIMING] run_ai_generation ERROR after {elapsed:.2f}s: {e}", + extra={ + "json_fields": { + **log_meta, + "elapsed_ms": elapsed * 1000, + "error": str(e), + } + }, ) await stream_registry.mark_task_completed(task_id, "failed") # Start the AI generation in a background task bg_task = asyncio.create_task(run_ai_generation()) await stream_registry.set_task_asyncio_task(task_id, bg_task) + setup_time = (time.perf_counter() - stream_start_time) * 1000 + logger.info( + f"[TIMING] Background task started, setup={setup_time:.1f}ms", + extra={"json_fields": {**log_meta, "setup_time_ms": setup_time}}, + ) # SSE endpoint that subscribes to the task's stream async def event_generator() -> AsyncGenerator[str, None]: + import time as time_module + + event_gen_start = time_module.perf_counter() + logger.info( + f"[TIMING] event_generator STARTED, task={task_id}, session={session_id}, " + f"user={user_id}", + extra={"json_fields": log_meta}, + ) subscriber_queue = None + first_chunk_yielded = False + chunks_yielded = 0 try: # Subscribe to the task stream (this replays existing messages + live updates) + subscribe_start = time_module.perf_counter() + logger.info( + "[TIMING] Calling subscribe_to_task", + extra={"json_fields": log_meta}, + ) subscriber_queue = await stream_registry.subscribe_to_task( task_id=task_id, user_id=user_id, last_message_id="0-0", # Get all messages from the beginning ) + subscribe_time = (time_module.perf_counter() - subscribe_start) * 1000 + logger.info( + f"[TIMING] subscribe_to_task completed in {subscribe_time:.1f}ms, " + f"queue_ok={subscriber_queue is not None}", + extra={ + "json_fields": { + **log_meta, + "duration_ms": subscribe_time, + "queue_obtained": subscriber_queue is not None, + } + }, + ) if subscriber_queue is None: + logger.info( + "[TIMING] subscriber_queue is None, yielding finish", + extra={"json_fields": log_meta}, + ) yield StreamFinish().to_sse() yield "data: [DONE]\n\n" return # Read from the subscriber queue and yield to SSE + logger.info( + "[TIMING] Starting to read from subscriber_queue", + extra={"json_fields": log_meta}, + ) while True: try: + queue_wait_start = time_module.perf_counter() chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=30.0) + queue_wait_time = ( + time_module.perf_counter() - queue_wait_start + ) * 1000 + chunks_yielded += 1 + + if not first_chunk_yielded: + first_chunk_yielded = True + elapsed = time_module.perf_counter() - event_gen_start + logger.info( + f"[TIMING] FIRST CHUNK from queue at {elapsed:.2f}s, " + f"type={type(chunk).__name__}, " + f"wait={queue_wait_time:.1f}ms", + extra={ + "json_fields": { + **log_meta, + "chunk_type": type(chunk).__name__, + "elapsed_ms": elapsed * 1000, + "queue_wait_ms": queue_wait_time, + } + }, + ) + elif chunks_yielded % 50 == 0: + logger.info( + f"[TIMING] Chunk #{chunks_yielded}, " + f"type={type(chunk).__name__}", + extra={ + "json_fields": { + **log_meta, + "chunk_number": chunks_yielded, + "chunk_type": type(chunk).__name__, + } + }, + ) + yield chunk.to_sse() # Check for finish signal if isinstance(chunk, StreamFinish): + total_time = time_module.perf_counter() - event_gen_start + logger.info( + f"[TIMING] StreamFinish received in {total_time:.2f}s; " + f"n_chunks={chunks_yielded}", + extra={ + "json_fields": { + **log_meta, + "chunks_yielded": chunks_yielded, + "total_time_ms": total_time * 1000, + } + }, + ) break except asyncio.TimeoutError: # Send heartbeat to keep connection alive + logger.info( + f"[TIMING] Heartbeat timeout, chunks_so_far={chunks_yielded}", + extra={ + "json_fields": {**log_meta, "chunks_so_far": chunks_yielded} + }, + ) yield StreamHeartbeat().to_sse() except GeneratorExit: + logger.info( + f"[TIMING] GeneratorExit (client disconnected), chunks={chunks_yielded}", + extra={ + "json_fields": { + **log_meta, + "chunks_yielded": chunks_yielded, + "reason": "client_disconnect", + } + }, + ) pass # Client disconnected - background task continues except Exception as e: - logger.error(f"Error in SSE stream for task {task_id}: {e}") + elapsed = (time_module.perf_counter() - event_gen_start) * 1000 + logger.error( + f"[TIMING] event_generator ERROR after {elapsed:.1f}ms: {e}", + extra={ + "json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)} + }, + ) finally: # Unsubscribe when client disconnects or stream ends to prevent resource leak if subscriber_queue is not None: @@ -357,6 +563,18 @@ async def stream_chat_post( exc_info=True, ) # AI SDK protocol termination - always yield even if unsubscribe fails + total_time = time_module.perf_counter() - event_gen_start + logger.info( + f"[TIMING] event_generator FINISHED in {total_time:.2f}s; " + f"task={task_id}, session={session_id}, n_chunks={chunks_yielded}", + extra={ + "json_fields": { + **log_meta, + "total_time_ms": total_time * 1000, + "chunks_yielded": chunks_yielded, + } + }, + ) yield "data: [DONE]\n\n" return StreamingResponse( @@ -425,7 +643,7 @@ async def stream_chat_get( "Chat stream completed", extra={ "session_id": session_id, - "chunk_count": chunk_count, + "n_chunks": chunk_count, "first_chunk_type": first_chunk_type, }, ) diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 06da6bdf2b..da18421b98 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -371,21 +371,45 @@ async def stream_chat_completion( ValueError: If max_context_messages is exceeded """ + completion_start = time.monotonic() + + # Build log metadata for structured logging + log_meta = {"component": "ChatService", "session_id": session_id} + if user_id: + log_meta["user_id"] = user_id + logger.info( - f"Streaming chat completion for session {session_id} for message {message} and user id {user_id}. Message is user message: {is_user_message}" + f"[TIMING] stream_chat_completion STARTED, session={session_id}, user={user_id}, " + f"message_len={len(message) if message else 0}, is_user={is_user_message}", + extra={ + "json_fields": { + **log_meta, + "message_len": len(message) if message else 0, + "is_user_message": is_user_message, + } + }, ) # Only fetch from Redis if session not provided (initial call) if session is None: + fetch_start = time.monotonic() session = await get_chat_session(session_id, user_id) + fetch_time = (time.monotonic() - fetch_start) * 1000 logger.info( - f"Fetched session from Redis: {session.session_id if session else 'None'}, " - f"message_count={len(session.messages) if session else 0}" + f"[TIMING] get_chat_session took {fetch_time:.1f}ms, " + f"n_messages={len(session.messages) if session else 0}", + extra={ + "json_fields": { + **log_meta, + "duration_ms": fetch_time, + "n_messages": len(session.messages) if session else 0, + } + }, ) else: logger.info( - f"Using provided session object: {session.session_id}, " - f"message_count={len(session.messages)}" + f"[TIMING] Using provided session, messages={len(session.messages)}", + extra={"json_fields": {**log_meta, "n_messages": len(session.messages)}}, ) if not session: @@ -406,17 +430,25 @@ async def stream_chat_completion( # Track user message in PostHog if is_user_message: + posthog_start = time.monotonic() track_user_message( user_id=user_id, session_id=session_id, message_length=len(message), ) + posthog_time = (time.monotonic() - posthog_start) * 1000 + logger.info( + f"[TIMING] track_user_message took {posthog_time:.1f}ms", + extra={"json_fields": {**log_meta, "duration_ms": posthog_time}}, + ) - logger.info( - f"Upserting session: {session.session_id} with user id {session.user_id}, " - f"message_count={len(session.messages)}" - ) + upsert_start = time.monotonic() session = await upsert_chat_session(session) + upsert_time = (time.monotonic() - upsert_start) * 1000 + logger.info( + f"[TIMING] upsert_chat_session took {upsert_time:.1f}ms", + extra={"json_fields": {**log_meta, "duration_ms": upsert_time}}, + ) assert session, "Session not found" # Generate title for new sessions on first user message (non-blocking) @@ -454,7 +486,13 @@ async def stream_chat_completion( asyncio.create_task(_update_title()) # Build system prompt with business understanding + prompt_start = time.monotonic() system_prompt, understanding = await _build_system_prompt(user_id) + prompt_time = (time.monotonic() - prompt_start) * 1000 + logger.info( + f"[TIMING] _build_system_prompt took {prompt_time:.1f}ms", + extra={"json_fields": {**log_meta, "duration_ms": prompt_time}}, + ) # Initialize variables for streaming assistant_response = ChatMessage( @@ -483,9 +521,18 @@ async def stream_chat_completion( text_block_id = str(uuid_module.uuid4()) # Yield message start + setup_time = (time.monotonic() - completion_start) * 1000 + logger.info( + f"[TIMING] Setup complete, yielding StreamStart at {setup_time:.1f}ms", + extra={"json_fields": {**log_meta, "setup_time_ms": setup_time}}, + ) yield StreamStart(messageId=message_id) try: + logger.info( + "[TIMING] Calling _stream_chat_chunks", + extra={"json_fields": log_meta}, + ) async for chunk in _stream_chat_chunks( session=session, tools=tools, @@ -893,9 +940,21 @@ async def _stream_chat_chunks( SSE formatted JSON response objects """ + import time as time_module + + stream_chunks_start = time_module.perf_counter() model = config.model - logger.info("Starting pure chat stream") + # Build log metadata for structured logging + log_meta = {"component": "ChatService", "session_id": session.session_id} + if session.user_id: + log_meta["user_id"] = session.user_id + + logger.info( + f"[TIMING] _stream_chat_chunks STARTED, session={session.session_id}, " + f"user={session.user_id}, n_messages={len(session.messages)}", + extra={"json_fields": {**log_meta, "n_messages": len(session.messages)}}, + ) messages = session.to_openai_messages() if system_prompt: @@ -906,12 +965,18 @@ async def _stream_chat_chunks( messages = [system_message] + messages # Apply context window management + context_start = time_module.perf_counter() context_result = await _manage_context_window( messages=messages, model=model, api_key=config.api_key, base_url=config.base_url, ) + context_time = (time_module.perf_counter() - context_start) * 1000 + logger.info( + f"[TIMING] _manage_context_window took {context_time:.1f}ms", + extra={"json_fields": {**log_meta, "duration_ms": context_time}}, + ) if context_result.error: if "System prompt dropped" in context_result.error: @@ -946,9 +1011,19 @@ async def _stream_chat_chunks( while retry_count <= MAX_RETRIES: try: + elapsed = (time_module.perf_counter() - stream_chunks_start) * 1000 + retry_info = ( + f" (retry {retry_count}/{MAX_RETRIES})" if retry_count > 0 else "" + ) logger.info( - f"Creating OpenAI chat completion stream..." - f"{f' (retry {retry_count}/{MAX_RETRIES})' if retry_count > 0 else ''}" + f"[TIMING] Creating OpenAI stream at {elapsed:.1f}ms{retry_info}", + extra={ + "json_fields": { + **log_meta, + "elapsed_ms": elapsed, + "retry_count": retry_count, + } + }, ) # Build extra_body for OpenRouter tracing and PostHog analytics @@ -965,6 +1040,7 @@ async def _stream_chat_chunks( :128 ] # OpenRouter limit + api_call_start = time_module.perf_counter() stream = await client.chat.completions.create( model=model, messages=cast(list[ChatCompletionMessageParam], messages), @@ -974,6 +1050,11 @@ async def _stream_chat_chunks( stream_options=ChatCompletionStreamOptionsParam(include_usage=True), extra_body=extra_body, ) + api_init_time = (time_module.perf_counter() - api_call_start) * 1000 + logger.info( + f"[TIMING] OpenAI stream object returned in {api_init_time:.1f}ms", + extra={"json_fields": {**log_meta, "duration_ms": api_init_time}}, + ) # Variables to accumulate tool calls tool_calls: list[dict[str, Any]] = [] @@ -984,10 +1065,13 @@ async def _stream_chat_chunks( # Track if we've started the text block text_started = False + first_content_chunk = True + chunk_count = 0 # Process the stream chunk: ChatCompletionChunk async for chunk in stream: + chunk_count += 1 if chunk.usage: yield StreamUsage( promptTokens=chunk.usage.prompt_tokens, @@ -1010,6 +1094,23 @@ async def _stream_chat_chunks( if not text_started and text_block_id: yield StreamTextStart(id=text_block_id) text_started = True + # Log timing for first content chunk + if first_content_chunk: + first_content_chunk = False + ttfc = ( + time_module.perf_counter() - api_call_start + ) * 1000 + logger.info( + f"[TIMING] FIRST CONTENT CHUNK at {ttfc:.1f}ms " + f"(since API call), n_chunks={chunk_count}", + extra={ + "json_fields": { + **log_meta, + "time_to_first_chunk_ms": ttfc, + "n_chunks": chunk_count, + } + }, + ) # Stream the text delta text_response = StreamTextDelta( id=text_block_id or "", @@ -1066,7 +1167,21 @@ async def _stream_chat_chunks( toolName=tool_calls[idx]["function"]["name"], ) emitted_start_for_idx.add(idx) - logger.info(f"Stream complete. Finish reason: {finish_reason}") + stream_duration = time_module.perf_counter() - api_call_start + logger.info( + f"[TIMING] OpenAI stream COMPLETE, finish_reason={finish_reason}, " + f"duration={stream_duration:.2f}s, " + f"n_chunks={chunk_count}, n_tool_calls={len(tool_calls)}", + extra={ + "json_fields": { + **log_meta, + "stream_duration_ms": stream_duration * 1000, + "finish_reason": finish_reason, + "n_chunks": chunk_count, + "n_tool_calls": len(tool_calls), + } + }, + ) # Yield all accumulated tool calls after the stream is complete # This ensures all tool call arguments have been fully received @@ -1086,6 +1201,12 @@ async def _stream_chat_chunks( # Re-raise to trigger retry logic in the parent function raise + total_time = (time_module.perf_counter() - stream_chunks_start) * 1000 + logger.info( + f"[TIMING] _stream_chat_chunks COMPLETED in {total_time/1000:.1f}s; " + f"session={session.session_id}, user={session.user_id}", + extra={"json_fields": {**log_meta, "total_time_ms": total_time}}, + ) yield StreamFinish() return except Exception as e: diff --git a/autogpt_platform/backend/backend/api/features/chat/stream_registry.py b/autogpt_platform/backend/backend/api/features/chat/stream_registry.py index 88a5023e2b..509d20d9f4 100644 --- a/autogpt_platform/backend/backend/api/features/chat/stream_registry.py +++ b/autogpt_platform/backend/backend/api/features/chat/stream_registry.py @@ -104,6 +104,24 @@ async def create_task( Returns: The created ActiveTask instance (metadata only) """ + import time + + start_time = time.perf_counter() + + # Build log metadata for structured logging + log_meta = { + "component": "StreamRegistry", + "task_id": task_id, + "session_id": session_id, + } + if user_id: + log_meta["user_id"] = user_id + + logger.info( + f"[TIMING] create_task STARTED, task={task_id}, session={session_id}, user={user_id}", + extra={"json_fields": log_meta}, + ) + task = ActiveTask( task_id=task_id, session_id=session_id, @@ -114,10 +132,18 @@ async def create_task( ) # Store metadata in Redis + redis_start = time.perf_counter() redis = await get_redis_async() + redis_time = (time.perf_counter() - redis_start) * 1000 + logger.info( + f"[TIMING] get_redis_async took {redis_time:.1f}ms", + extra={"json_fields": {**log_meta, "duration_ms": redis_time}}, + ) + meta_key = _get_task_meta_key(task_id) op_key = _get_operation_mapping_key(operation_id) + hset_start = time.perf_counter() await redis.hset( # type: ignore[misc] meta_key, mapping={ @@ -131,12 +157,22 @@ async def create_task( "created_at": task.created_at.isoformat(), }, ) + hset_time = (time.perf_counter() - hset_start) * 1000 + logger.info( + f"[TIMING] redis.hset took {hset_time:.1f}ms", + extra={"json_fields": {**log_meta, "duration_ms": hset_time}}, + ) + await redis.expire(meta_key, config.stream_ttl) # Create operation_id -> task_id mapping for webhook lookups await redis.set(op_key, task_id, ex=config.stream_ttl) - logger.debug(f"Created task {task_id} for session {session_id}") + total_time = (time.perf_counter() - start_time) * 1000 + logger.info( + f"[TIMING] create_task COMPLETED in {total_time:.1f}ms; task={task_id}, session={session_id}", + extra={"json_fields": {**log_meta, "total_time_ms": total_time}}, + ) return task @@ -156,26 +192,60 @@ async def publish_chunk( Returns: The Redis Stream message ID """ + import time + + start_time = time.perf_counter() + chunk_type = type(chunk).__name__ chunk_json = chunk.model_dump_json() message_id = "0-0" + # Build log metadata + log_meta = { + "component": "StreamRegistry", + "task_id": task_id, + "chunk_type": chunk_type, + } + try: redis = await get_redis_async() stream_key = _get_task_stream_key(task_id) # Write to Redis Stream for persistence and real-time delivery + xadd_start = time.perf_counter() raw_id = await redis.xadd( stream_key, {"data": chunk_json}, maxlen=config.stream_max_length, ) + xadd_time = (time.perf_counter() - xadd_start) * 1000 message_id = raw_id if isinstance(raw_id, str) else raw_id.decode() # Set TTL on stream to match task metadata TTL await redis.expire(stream_key, config.stream_ttl) + + total_time = (time.perf_counter() - start_time) * 1000 + # Only log timing for significant chunks or slow operations + if ( + chunk_type + in ("StreamStart", "StreamFinish", "StreamTextStart", "StreamTextEnd") + or total_time > 50 + ): + logger.info( + f"[TIMING] publish_chunk {chunk_type} in {total_time:.1f}ms (xadd={xadd_time:.1f}ms)", + extra={ + "json_fields": { + **log_meta, + "total_time_ms": total_time, + "xadd_time_ms": xadd_time, + "message_id": message_id, + } + }, + ) except Exception as e: + elapsed = (time.perf_counter() - start_time) * 1000 logger.error( - f"Failed to publish chunk for task {task_id}: {e}", + f"[TIMING] Failed to publish chunk {chunk_type} after {elapsed:.1f}ms: {e}", + extra={"json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}}, exc_info=True, ) @@ -200,24 +270,61 @@ async def subscribe_to_task( An asyncio Queue that will receive stream chunks, or None if task not found or user doesn't have access """ + import time + + start_time = time.perf_counter() + + # Build log metadata + log_meta = {"component": "StreamRegistry", "task_id": task_id} + if user_id: + log_meta["user_id"] = user_id + + logger.info( + f"[TIMING] subscribe_to_task STARTED, task={task_id}, user={user_id}, last_msg={last_message_id}", + extra={"json_fields": {**log_meta, "last_message_id": last_message_id}}, + ) + + redis_start = time.perf_counter() redis = await get_redis_async() meta_key = _get_task_meta_key(task_id) meta: dict[Any, Any] = await redis.hgetall(meta_key) # type: ignore[misc] + hgetall_time = (time.perf_counter() - redis_start) * 1000 + logger.info( + f"[TIMING] Redis hgetall took {hgetall_time:.1f}ms", + extra={"json_fields": {**log_meta, "duration_ms": hgetall_time}}, + ) if not meta: - logger.debug(f"Task {task_id} not found in Redis") + elapsed = (time.perf_counter() - start_time) * 1000 + logger.info( + f"[TIMING] Task not found in Redis after {elapsed:.1f}ms", + extra={ + "json_fields": { + **log_meta, + "elapsed_ms": elapsed, + "reason": "task_not_found", + } + }, + ) return None # Note: Redis client uses decode_responses=True, so keys are strings task_status = meta.get("status", "") task_user_id = meta.get("user_id", "") or None + log_meta["session_id"] = meta.get("session_id", "") # Validate ownership - if task has an owner, requester must match if task_user_id: if user_id != task_user_id: logger.warning( - f"User {user_id} denied access to task {task_id} " - f"owned by {task_user_id}" + f"[TIMING] Access denied: user {user_id} tried to access task owned by {task_user_id}", + extra={ + "json_fields": { + **log_meta, + "task_owner": task_user_id, + "reason": "access_denied", + } + }, ) return None @@ -225,7 +332,19 @@ async def subscribe_to_task( stream_key = _get_task_stream_key(task_id) # Step 1: Replay messages from Redis Stream + xread_start = time.perf_counter() messages = await redis.xread({stream_key: last_message_id}, block=0, count=1000) + xread_time = (time.perf_counter() - xread_start) * 1000 + logger.info( + f"[TIMING] Redis xread (replay) took {xread_time:.1f}ms, status={task_status}", + extra={ + "json_fields": { + **log_meta, + "duration_ms": xread_time, + "task_status": task_status, + } + }, + ) replayed_count = 0 replay_last_id = last_message_id @@ -244,19 +363,48 @@ async def subscribe_to_task( except Exception as e: logger.warning(f"Failed to replay message: {e}") - logger.debug(f"Task {task_id}: replayed {replayed_count} messages") + logger.info( + f"[TIMING] Replayed {replayed_count} messages, last_id={replay_last_id}", + extra={ + "json_fields": { + **log_meta, + "n_messages_replayed": replayed_count, + "replay_last_id": replay_last_id, + } + }, + ) # Step 2: If task is still running, start stream listener for live updates if task_status == "running": + logger.info( + "[TIMING] Task still running, starting _stream_listener", + extra={"json_fields": {**log_meta, "task_status": task_status}}, + ) listener_task = asyncio.create_task( - _stream_listener(task_id, subscriber_queue, replay_last_id) + _stream_listener(task_id, subscriber_queue, replay_last_id, log_meta) ) # Track listener task for cleanup on unsubscribe _listener_tasks[id(subscriber_queue)] = (task_id, listener_task) else: # Task is completed/failed - add finish marker + logger.info( + f"[TIMING] Task already {task_status}, adding StreamFinish", + extra={"json_fields": {**log_meta, "task_status": task_status}}, + ) await subscriber_queue.put(StreamFinish()) + total_time = (time.perf_counter() - start_time) * 1000 + logger.info( + f"[TIMING] subscribe_to_task COMPLETED in {total_time:.1f}ms; task={task_id}, " + f"n_messages_replayed={replayed_count}", + extra={ + "json_fields": { + **log_meta, + "total_time_ms": total_time, + "n_messages_replayed": replayed_count, + } + }, + ) return subscriber_queue @@ -264,6 +412,7 @@ async def _stream_listener( task_id: str, subscriber_queue: asyncio.Queue[StreamBaseResponse], last_replayed_id: str, + log_meta: dict | None = None, ) -> None: """Listen to Redis Stream for new messages using blocking XREAD. @@ -274,10 +423,27 @@ async def _stream_listener( task_id: Task ID to listen for subscriber_queue: Queue to deliver messages to last_replayed_id: Last message ID from replay (continue from here) + log_meta: Structured logging metadata """ + import time + + start_time = time.perf_counter() + + # Use provided log_meta or build minimal one + if log_meta is None: + log_meta = {"component": "StreamRegistry", "task_id": task_id} + + logger.info( + f"[TIMING] _stream_listener STARTED, task={task_id}, last_id={last_replayed_id}", + extra={"json_fields": {**log_meta, "last_replayed_id": last_replayed_id}}, + ) + queue_id = id(subscriber_queue) # Track the last successfully delivered message ID for recovery hints last_delivered_id = last_replayed_id + messages_delivered = 0 + first_message_time = None + xread_count = 0 try: redis = await get_redis_async() @@ -287,9 +453,39 @@ async def _stream_listener( while True: # Block for up to 30 seconds waiting for new messages # This allows periodic checking if task is still running + xread_start = time.perf_counter() + xread_count += 1 messages = await redis.xread( {stream_key: current_id}, block=30000, count=100 ) + xread_time = (time.perf_counter() - xread_start) * 1000 + + if messages: + msg_count = sum(len(msgs) for _, msgs in messages) + logger.info( + f"[TIMING] xread #{xread_count} returned {msg_count} messages in {xread_time:.1f}ms", + extra={ + "json_fields": { + **log_meta, + "xread_count": xread_count, + "n_messages": msg_count, + "duration_ms": xread_time, + } + }, + ) + elif xread_time > 1000: + # Only log timeouts (30s blocking) + logger.info( + f"[TIMING] xread #{xread_count} timeout after {xread_time:.1f}ms", + extra={ + "json_fields": { + **log_meta, + "xread_count": xread_count, + "duration_ms": xread_time, + "reason": "timeout", + } + }, + ) if not messages: # Timeout - check if task is still running @@ -326,10 +522,30 @@ async def _stream_listener( ) # Update last delivered ID on successful delivery last_delivered_id = current_id + messages_delivered += 1 + if first_message_time is None: + first_message_time = time.perf_counter() + elapsed = (first_message_time - start_time) * 1000 + logger.info( + f"[TIMING] FIRST live message at {elapsed:.1f}ms, type={type(chunk).__name__}", + extra={ + "json_fields": { + **log_meta, + "elapsed_ms": elapsed, + "chunk_type": type(chunk).__name__, + } + }, + ) except asyncio.TimeoutError: logger.warning( - f"Subscriber queue full for task {task_id}, " - f"message delivery timed out after {QUEUE_PUT_TIMEOUT}s" + f"[TIMING] Subscriber queue full, delivery timed out after {QUEUE_PUT_TIMEOUT}s", + extra={ + "json_fields": { + **log_meta, + "timeout_s": QUEUE_PUT_TIMEOUT, + "reason": "queue_full", + } + }, ) # Send overflow error with recovery info try: @@ -351,15 +567,44 @@ async def _stream_listener( # Stop listening on finish if isinstance(chunk, StreamFinish): + total_time = (time.perf_counter() - start_time) * 1000 + logger.info( + f"[TIMING] StreamFinish received in {total_time/1000:.1f}s; delivered={messages_delivered}", + extra={ + "json_fields": { + **log_meta, + "total_time_ms": total_time, + "messages_delivered": messages_delivered, + } + }, + ) return except Exception as e: - logger.warning(f"Error processing stream message: {e}") + logger.warning( + f"Error processing stream message: {e}", + extra={"json_fields": {**log_meta, "error": str(e)}}, + ) except asyncio.CancelledError: - logger.debug(f"Stream listener cancelled for task {task_id}") + elapsed = (time.perf_counter() - start_time) * 1000 + logger.info( + f"[TIMING] _stream_listener CANCELLED after {elapsed:.1f}ms, delivered={messages_delivered}", + extra={ + "json_fields": { + **log_meta, + "elapsed_ms": elapsed, + "messages_delivered": messages_delivered, + "reason": "cancelled", + } + }, + ) raise # Re-raise to propagate cancellation except Exception as e: - logger.error(f"Stream listener error for task {task_id}: {e}") + elapsed = (time.perf_counter() - start_time) * 1000 + logger.error( + f"[TIMING] _stream_listener ERROR after {elapsed:.1f}ms: {e}", + extra={"json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}}, + ) # On error, send finish to unblock subscriber try: await asyncio.wait_for( @@ -368,10 +613,24 @@ async def _stream_listener( ) except (asyncio.TimeoutError, asyncio.QueueFull): logger.warning( - f"Could not deliver finish event for task {task_id} after error" + "Could not deliver finish event after error", + extra={"json_fields": log_meta}, ) finally: # Clean up listener task mapping on exit + total_time = (time.perf_counter() - start_time) * 1000 + logger.info( + f"[TIMING] _stream_listener FINISHED in {total_time/1000:.1f}s; task={task_id}, " + f"delivered={messages_delivered}, xread_count={xread_count}", + extra={ + "json_fields": { + **log_meta, + "total_time_ms": total_time, + "messages_delivered": messages_delivered, + "xread_count": xread_count, + } + }, + ) _listener_tasks.pop(queue_id, None) From 81f8290f0176eea29be3428c5e6ed21b82dc5da2 Mon Sep 17 00:00:00 2001 From: Otto Date: Tue, 10 Feb 2026 07:35:13 +0000 Subject: [PATCH 05/11] debug(backend/db): Add diagnostic logging for vector type errors (#12024) Adds diagnostic logging when the `type vector does not exist` error occurs in raw SQL queries. ## Problem We're seeing intermittent "type vector does not exist" errors on dev-behave ([Sentry issue](https://significant-gravitas.sentry.io/issues/7205929979/)). The pgvector extension should be in the search_path, but occasionally queries fail to resolve the vector type. ## Solution When a query fails with this specific error, we now log: - `SHOW search_path` - what schemas are being searched - `SELECT current_schema()` - the active schema - `SELECT current_user, session_user, current_database()` - connection context This diagnostic info will help identify why the vector extension isn't visible in certain cases. ## Changes - Added `_log_vector_error_diagnostics()` helper function in `backend/data/db.py` - Wrapped SQL execution in try/except to catch and diagnose vector type errors - Original exception is re-raised after logging (no behavior change) ## Testing This is observational/diagnostic code. It will be validated by waiting for the error to occur naturally on dev and checking the logs. ## Rollout Once we've captured diagnostic logs and identified the root cause, this logging can be removed or reduced in verbosity. --- .../api/features/store/hybrid_search.py | 94 ++++++++++++++++++- 1 file changed, 92 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py index e1b8f402c8..b10bfbcc06 100644 --- a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py +++ b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py @@ -8,6 +8,7 @@ Includes BM25 reranking for improved lexical relevance. import logging import re +import time from dataclasses import dataclass from typing import Any, Literal @@ -362,7 +363,11 @@ async def unified_hybrid_search( LIMIT {limit_param} OFFSET {offset_param} """ - results = await query_raw_with_schema(sql_query, *params) + try: + results = await query_raw_with_schema(sql_query, *params) + except Exception as e: + await _log_vector_error_diagnostics(e) + raise total = results[0]["total_count"] if results else 0 # Apply BM25 reranking @@ -686,7 +691,11 @@ async def hybrid_search( LIMIT {limit_param} OFFSET {offset_param} """ - results = await query_raw_with_schema(sql_query, *params) + try: + results = await query_raw_with_schema(sql_query, *params) + except Exception as e: + await _log_vector_error_diagnostics(e) + raise total = results[0]["total_count"] if results else 0 @@ -718,6 +727,87 @@ async def hybrid_search_simple( return await hybrid_search(query=query, page=page, page_size=page_size) +# ============================================================================ +# Diagnostics +# ============================================================================ + +# Rate limit: only log vector error diagnostics once per this interval +_VECTOR_DIAG_INTERVAL_SECONDS = 60 +_last_vector_diag_time: float = 0 + + +async def _log_vector_error_diagnostics(error: Exception) -> None: + """Log diagnostic info when 'type vector does not exist' error occurs. + + Note: Diagnostic queries use query_raw_with_schema which may run on a different + pooled connection than the one that failed. Session-level search_path can differ, + so these diagnostics show cluster-wide state, not necessarily the failed session. + + Includes rate limiting to avoid log spam - only logs once per minute. + Caller should re-raise the error after calling this function. + """ + global _last_vector_diag_time + + # Check if this is the vector type error + error_str = str(error).lower() + if not ( + "type" in error_str and "vector" in error_str and "does not exist" in error_str + ): + return + + # Rate limit: only log once per interval + now = time.time() + if now - _last_vector_diag_time < _VECTOR_DIAG_INTERVAL_SECONDS: + return + _last_vector_diag_time = now + + try: + diagnostics: dict[str, object] = {} + + try: + search_path_result = await query_raw_with_schema("SHOW search_path") + diagnostics["search_path"] = search_path_result + except Exception as e: + diagnostics["search_path"] = f"Error: {e}" + + try: + schema_result = await query_raw_with_schema("SELECT current_schema()") + diagnostics["current_schema"] = schema_result + except Exception as e: + diagnostics["current_schema"] = f"Error: {e}" + + try: + user_result = await query_raw_with_schema( + "SELECT current_user, session_user, current_database()" + ) + diagnostics["user_info"] = user_result + except Exception as e: + diagnostics["user_info"] = f"Error: {e}" + + try: + # Check pgvector extension installation (cluster-wide, stable info) + ext_result = await query_raw_with_schema( + "SELECT extname, extversion, nspname as schema " + "FROM pg_extension e " + "JOIN pg_namespace n ON e.extnamespace = n.oid " + "WHERE extname = 'vector'" + ) + diagnostics["pgvector_extension"] = ext_result + except Exception as e: + diagnostics["pgvector_extension"] = f"Error: {e}" + + logger.error( + f"Vector type error diagnostics:\n" + f" Error: {error}\n" + f" search_path: {diagnostics.get('search_path')}\n" + f" current_schema: {diagnostics.get('current_schema')}\n" + f" user_info: {diagnostics.get('user_info')}\n" + f" pgvector_extension: {diagnostics.get('pgvector_extension')}" + ) + except Exception as diag_error: + logger.error(f"Failed to collect vector error diagnostics: {diag_error}") + + # Backward compatibility alias - HybridSearchWeights maps to StoreAgentSearchWeights # for existing code that expects the popularity parameter HybridSearchWeights = StoreAgentSearchWeights From e596ea87cbae55eedba3bc58f144702e9e310504 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Feb 2026 18:22:25 +0700 Subject: [PATCH 06/11] chore(libs/deps-dev): bump pytest-cov from 6.2.1 to 7.0.0 in /autogpt_platform/autogpt_libs (#12030) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [pytest-cov](https://github.com/pytest-dev/pytest-cov) from 6.2.1 to 7.0.0.
Changelog

Sourced from pytest-cov's changelog.

7.0.0 (2025-09-09)

  • Dropped support for subprocesses measurement.

    It was a feature added long time ago when coverage lacked a nice way to measure subprocesses created in tests. It relied on a .pth file, there was no way to opt-out and it created bad interations with coverage's new patch system <https://coverage.readthedocs.io/en/latest/config.html#run-patch>_ added in 7.10 <https://coverage.readthedocs.io/en/7.10.6/changes.html#version-7-10-0-2025-07-24>_.

    To migrate to this release you might need to enable the suprocess patch, example for .coveragerc:

    .. code-block:: ini

    [run] patch = subprocess

    This release also requires at least coverage 7.10.6.

  • Switched packaging to have metadata completely in pyproject.toml and use hatchling <https://pypi.org/project/hatchling/>_ for building. Contributed by Ofek Lev in [#551](https://github.com/pytest-dev/pytest-cov/issues/551) <https://github.com/pytest-dev/pytest-cov/pull/551>_ with some extras in [#716](https://github.com/pytest-dev/pytest-cov/issues/716) <https://github.com/pytest-dev/pytest-cov/pull/716>_.

  • Removed some not really necessary testing deps like six.

6.3.0 (2025-09-06)

  • Added support for markdown reports. Contributed by Marcos Boger in [#712](https://github.com/pytest-dev/pytest-cov/issues/712) <https://github.com/pytest-dev/pytest-cov/pull/712>_ and [#714](https://github.com/pytest-dev/pytest-cov/issues/714) <https://github.com/pytest-dev/pytest-cov/pull/714>_.
  • Fixed some formatting issues in docs. Anonymous contribution in [#706](https://github.com/pytest-dev/pytest-cov/issues/706) <https://github.com/pytest-dev/pytest-cov/pull/706>_.
Commits
  • 224d896 Bump version: 6.3.0 β†’ 7.0.0
  • 73424e3 Cleanup the docs a bit.
  • 36f1cc2 Bump pins in template.
  • f299c59 Bump the github-actions group with 2 updates
  • 25f0b2e Update docs/config.rst
  • bb23eac Improve configuration docs
  • a19531e Switch from build/pre-commit to uv/prek - this should make this faster.
  • 82f9993 Update changelog.
  • 211b5cd Fix links.
  • 97aadd7 Update some ci config, reformat and apply some lint fixes.
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pytest-cov&package-manager=pip&previous-version=6.2.1&new-version=7.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Zamil Majdy Co-authored-by: Otto --- autogpt_platform/autogpt_libs/poetry.lock | 220 ++++++++++--------- autogpt_platform/autogpt_libs/pyproject.toml | 2 +- 2 files changed, 120 insertions(+), 102 deletions(-) diff --git a/autogpt_platform/autogpt_libs/poetry.lock b/autogpt_platform/autogpt_libs/poetry.lock index f1d1e932fe..0a421dda31 100644 --- a/autogpt_platform/autogpt_libs/poetry.lock +++ b/autogpt_platform/autogpt_libs/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-doc" @@ -67,7 +67,7 @@ description = "Backport of asyncio.Runner, a context manager that controls event optional = false python-versions = "<3.11,>=3.8" groups = ["dev"] -markers = "python_version == \"3.10\"" +markers = "python_version < \"3.11\"" files = [ {file = "backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5"}, {file = "backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162"}, @@ -326,100 +326,118 @@ files = [ [[package]] name = "coverage" -version = "7.10.5" +version = "7.13.4" description = "Code coverage measurement for Python" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["dev"] files = [ - {file = "coverage-7.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c6a5c3414bfc7451b879141ce772c546985163cf553f08e0f135f0699a911801"}, - {file = "coverage-7.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bc8e4d99ce82f1710cc3c125adc30fd1487d3cf6c2cd4994d78d68a47b16989a"}, - {file = "coverage-7.10.5-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:02252dc1216e512a9311f596b3169fad54abcb13827a8d76d5630c798a50a754"}, - {file = "coverage-7.10.5-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:73269df37883e02d460bee0cc16be90509faea1e3bd105d77360b512d5bb9c33"}, - {file = "coverage-7.10.5-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f8a81b0614642f91c9effd53eec284f965577591f51f547a1cbeb32035b4c2f"}, - {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6a29f8e0adb7f8c2b95fa2d4566a1d6e6722e0a637634c6563cb1ab844427dd9"}, - {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fcf6ab569436b4a647d4e91accba12509ad9f2554bc93d3aee23cc596e7f99c3"}, - {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:90dc3d6fb222b194a5de60af8d190bedeeddcbc7add317e4a3cd333ee6b7c879"}, - {file = "coverage-7.10.5-cp310-cp310-win32.whl", hash = "sha256:414a568cd545f9dc75f0686a0049393de8098414b58ea071e03395505b73d7a8"}, - {file = "coverage-7.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:e551f9d03347196271935fd3c0c165f0e8c049220280c1120de0084d65e9c7ff"}, - {file = "coverage-7.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c177e6ffe2ebc7c410785307758ee21258aa8e8092b44d09a2da767834f075f2"}, - {file = "coverage-7.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:14d6071c51ad0f703d6440827eaa46386169b5fdced42631d5a5ac419616046f"}, - {file = "coverage-7.10.5-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:61f78c7c3bc272a410c5ae3fde7792b4ffb4acc03d35a7df73ca8978826bb7ab"}, - {file = "coverage-7.10.5-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f39071caa126f69d63f99b324fb08c7b1da2ec28cbb1fe7b5b1799926492f65c"}, - {file = "coverage-7.10.5-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:343a023193f04d46edc46b2616cdbee68c94dd10208ecd3adc56fcc54ef2baa1"}, - {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:585ffe93ae5894d1ebdee69fc0b0d4b7c75d8007983692fb300ac98eed146f78"}, - {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b0ef4e66f006ed181df29b59921bd8fc7ed7cd6a9289295cd8b2824b49b570df"}, - {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:eb7b0bbf7cc1d0453b843eca7b5fa017874735bef9bfdfa4121373d2cc885ed6"}, - {file = "coverage-7.10.5-cp311-cp311-win32.whl", hash = "sha256:1d043a8a06987cc0c98516e57c4d3fc2c1591364831e9deb59c9e1b4937e8caf"}, - {file = "coverage-7.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:fefafcca09c3ac56372ef64a40f5fe17c5592fab906e0fdffd09543f3012ba50"}, - {file = "coverage-7.10.5-cp311-cp311-win_arm64.whl", hash = "sha256:7e78b767da8b5fc5b2faa69bb001edafcd6f3995b42a331c53ef9572c55ceb82"}, - {file = "coverage-7.10.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c2d05c7e73c60a4cecc7d9b60dbfd603b4ebc0adafaef371445b47d0f805c8a9"}, - {file = "coverage-7.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:32ddaa3b2c509778ed5373b177eb2bf5662405493baeff52278a0b4f9415188b"}, - {file = "coverage-7.10.5-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dd382410039fe062097aa0292ab6335a3f1e7af7bba2ef8d27dcda484918f20c"}, - {file = "coverage-7.10.5-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7fa22800f3908df31cea6fb230f20ac49e343515d968cc3a42b30d5c3ebf9b5a"}, - {file = "coverage-7.10.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f366a57ac81f5e12797136552f5b7502fa053c861a009b91b80ed51f2ce651c6"}, - {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f1dc8f1980a272ad4a6c84cba7981792344dad33bf5869361576b7aef42733a"}, - {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2285c04ee8676f7938b02b4936d9b9b672064daab3187c20f73a55f3d70e6b4a"}, - {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c2492e4dd9daab63f5f56286f8a04c51323d237631eb98505d87e4c4ff19ec34"}, - {file = "coverage-7.10.5-cp312-cp312-win32.whl", hash = "sha256:38a9109c4ee8135d5df5505384fc2f20287a47ccbe0b3f04c53c9a1989c2bbaf"}, - {file = "coverage-7.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:6b87f1ad60b30bc3c43c66afa7db6b22a3109902e28c5094957626a0143a001f"}, - {file = "coverage-7.10.5-cp312-cp312-win_arm64.whl", hash = "sha256:672a6c1da5aea6c629819a0e1461e89d244f78d7b60c424ecf4f1f2556c041d8"}, - {file = "coverage-7.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ef3b83594d933020f54cf65ea1f4405d1f4e41a009c46df629dd964fcb6e907c"}, - {file = "coverage-7.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2b96bfdf7c0ea9faebce088a3ecb2382819da4fbc05c7b80040dbc428df6af44"}, - {file = "coverage-7.10.5-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:63df1fdaffa42d914d5c4d293e838937638bf75c794cf20bee12978fc8c4e3bc"}, - {file = "coverage-7.10.5-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8002dc6a049aac0e81ecec97abfb08c01ef0c1fbf962d0c98da3950ace89b869"}, - {file = "coverage-7.10.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:63d4bb2966d6f5f705a6b0c6784c8969c468dbc4bcf9d9ded8bff1c7e092451f"}, - {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1f672efc0731a6846b157389b6e6d5d5e9e59d1d1a23a5c66a99fd58339914d5"}, - {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3f39cef43d08049e8afc1fde4a5da8510fc6be843f8dea350ee46e2a26b2f54c"}, - {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2968647e3ed5a6c019a419264386b013979ff1fb67dd11f5c9886c43d6a31fc2"}, - {file = "coverage-7.10.5-cp313-cp313-win32.whl", hash = "sha256:0d511dda38595b2b6934c2b730a1fd57a3635c6aa2a04cb74714cdfdd53846f4"}, - {file = "coverage-7.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:9a86281794a393513cf117177fd39c796b3f8e3759bb2764259a2abba5cce54b"}, - {file = "coverage-7.10.5-cp313-cp313-win_arm64.whl", hash = "sha256:cebd8e906eb98bb09c10d1feed16096700b1198d482267f8bf0474e63a7b8d84"}, - {file = "coverage-7.10.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0520dff502da5e09d0d20781df74d8189ab334a1e40d5bafe2efaa4158e2d9e7"}, - {file = "coverage-7.10.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d9cd64aca68f503ed3f1f18c7c9174cbb797baba02ca8ab5112f9d1c0328cd4b"}, - {file = "coverage-7.10.5-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0913dd1613a33b13c4f84aa6e3f4198c1a21ee28ccb4f674985c1f22109f0aae"}, - {file = "coverage-7.10.5-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1b7181c0feeb06ed8a02da02792f42f829a7b29990fef52eff257fef0885d760"}, - {file = "coverage-7.10.5-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36d42b7396b605f774d4372dd9c49bed71cbabce4ae1ccd074d155709dd8f235"}, - {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b4fdc777e05c4940b297bf47bf7eedd56a39a61dc23ba798e4b830d585486ca5"}, - {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:42144e8e346de44a6f1dbd0a56575dd8ab8dfa7e9007da02ea5b1c30ab33a7db"}, - {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:66c644cbd7aed8fe266d5917e2c9f65458a51cfe5eeff9c05f15b335f697066e"}, - {file = "coverage-7.10.5-cp313-cp313t-win32.whl", hash = "sha256:2d1b73023854068c44b0c554578a4e1ef1b050ed07cf8b431549e624a29a66ee"}, - {file = "coverage-7.10.5-cp313-cp313t-win_amd64.whl", hash = "sha256:54a1532c8a642d8cc0bd5a9a51f5a9dcc440294fd06e9dda55e743c5ec1a8f14"}, - {file = "coverage-7.10.5-cp313-cp313t-win_arm64.whl", hash = "sha256:74d5b63fe3f5f5d372253a4ef92492c11a4305f3550631beaa432fc9df16fcff"}, - {file = "coverage-7.10.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:68c5e0bc5f44f68053369fa0d94459c84548a77660a5f2561c5e5f1e3bed7031"}, - {file = "coverage-7.10.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:cf33134ffae93865e32e1e37df043bef15a5e857d8caebc0099d225c579b0fa3"}, - {file = "coverage-7.10.5-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ad8fa9d5193bafcf668231294241302b5e683a0518bf1e33a9a0dfb142ec3031"}, - {file = "coverage-7.10.5-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:146fa1531973d38ab4b689bc764592fe6c2f913e7e80a39e7eeafd11f0ef6db2"}, - {file = "coverage-7.10.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6013a37b8a4854c478d3219ee8bc2392dea51602dd0803a12d6f6182a0061762"}, - {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:eb90fe20db9c3d930fa2ad7a308207ab5b86bf6a76f54ab6a40be4012d88fcae"}, - {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:384b34482272e960c438703cafe63316dfbea124ac62006a455c8410bf2a2262"}, - {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:467dc74bd0a1a7de2bedf8deaf6811f43602cb532bd34d81ffd6038d6d8abe99"}, - {file = "coverage-7.10.5-cp314-cp314-win32.whl", hash = "sha256:556d23d4e6393ca898b2e63a5bca91e9ac2d5fb13299ec286cd69a09a7187fde"}, - {file = "coverage-7.10.5-cp314-cp314-win_amd64.whl", hash = "sha256:f4446a9547681533c8fa3e3c6cf62121eeee616e6a92bd9201c6edd91beffe13"}, - {file = "coverage-7.10.5-cp314-cp314-win_arm64.whl", hash = "sha256:5e78bd9cf65da4c303bf663de0d73bf69f81e878bf72a94e9af67137c69b9fe9"}, - {file = "coverage-7.10.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:5661bf987d91ec756a47c7e5df4fbcb949f39e32f9334ccd3f43233bbb65e508"}, - {file = "coverage-7.10.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a46473129244db42a720439a26984f8c6f834762fc4573616c1f37f13994b357"}, - {file = "coverage-7.10.5-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1f64b8d3415d60f24b058b58d859e9512624bdfa57a2d1f8aff93c1ec45c429b"}, - {file = "coverage-7.10.5-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:44d43de99a9d90b20e0163f9770542357f58860a26e24dc1d924643bd6aa7cb4"}, - {file = "coverage-7.10.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a931a87e5ddb6b6404e65443b742cb1c14959622777f2a4efd81fba84f5d91ba"}, - {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9559b906a100029274448f4c8b8b0a127daa4dade5661dfd821b8c188058842"}, - {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b08801e25e3b4526ef9ced1aa29344131a8f5213c60c03c18fe4c6170ffa2874"}, - {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ed9749bb8eda35f8b636fb7632f1c62f735a236a5d4edadd8bbcc5ea0542e732"}, - {file = "coverage-7.10.5-cp314-cp314t-win32.whl", hash = "sha256:609b60d123fc2cc63ccee6d17e4676699075db72d14ac3c107cc4976d516f2df"}, - {file = "coverage-7.10.5-cp314-cp314t-win_amd64.whl", hash = "sha256:0666cf3d2c1626b5a3463fd5b05f5e21f99e6aec40a3192eee4d07a15970b07f"}, - {file = "coverage-7.10.5-cp314-cp314t-win_arm64.whl", hash = "sha256:bc85eb2d35e760120540afddd3044a5bf69118a91a296a8b3940dfc4fdcfe1e2"}, - {file = "coverage-7.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:62835c1b00c4a4ace24c1a88561a5a59b612fbb83a525d1c70ff5720c97c0610"}, - {file = "coverage-7.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5255b3bbcc1d32a4069d6403820ac8e6dbcc1d68cb28a60a1ebf17e47028e898"}, - {file = "coverage-7.10.5-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3876385722e335d6e991c430302c24251ef9c2a9701b2b390f5473199b1b8ebf"}, - {file = "coverage-7.10.5-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8048ce4b149c93447a55d279078c8ae98b08a6951a3c4d2d7e87f4efc7bfe100"}, - {file = "coverage-7.10.5-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4028e7558e268dd8bcf4d9484aad393cafa654c24b4885f6f9474bf53183a82a"}, - {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03f47dc870eec0367fcdd603ca6a01517d2504e83dc18dbfafae37faec66129a"}, - {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2d488d7d42b6ded7ea0704884f89dcabd2619505457de8fc9a6011c62106f6e5"}, - {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b3dcf2ead47fa8be14224ee817dfc1df98043af568fe120a22f81c0eb3c34ad2"}, - {file = "coverage-7.10.5-cp39-cp39-win32.whl", hash = "sha256:02650a11324b80057b8c9c29487020073d5e98a498f1857f37e3f9b6ea1b2426"}, - {file = "coverage-7.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:b45264dd450a10f9e03237b41a9a24e85cbb1e278e5a32adb1a303f58f0017f3"}, - {file = "coverage-7.10.5-py3-none-any.whl", hash = "sha256:0be24d35e4db1d23d0db5c0f6a74a962e2ec83c426b5cac09f4234aadef38e4a"}, - {file = "coverage-7.10.5.tar.gz", hash = "sha256:f2e57716a78bc3ae80b2207be0709a3b2b63b9f2dcf9740ee6ac03588a2015b6"}, + {file = "coverage-7.13.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fc31c787a84f8cd6027eba44010517020e0d18487064cd3d8968941856d1415"}, + {file = "coverage-7.13.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a32ebc02a1805adf637fc8dec324b5cdacd2e493515424f70ee33799573d661b"}, + {file = "coverage-7.13.4-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e24f9156097ff9dc286f2f913df3a7f63c0e333dcafa3c196f2c18b4175ca09a"}, + {file = "coverage-7.13.4-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8041b6c5bfdc03257666e9881d33b1abc88daccaf73f7b6340fb7946655cd10f"}, + {file = "coverage-7.13.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2a09cfa6a5862bc2fc6ca7c3def5b2926194a56b8ab78ffcf617d28911123012"}, + {file = "coverage-7.13.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:296f8b0af861d3970c2a4d8c91d48eb4dd4771bcef9baedec6a9b515d7de3def"}, + {file = "coverage-7.13.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e101609bcbbfb04605ea1027b10dc3735c094d12d40826a60f897b98b1c30256"}, + {file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aa3feb8db2e87ff5e6d00d7e1480ae241876286691265657b500886c98f38bda"}, + {file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4fc7fa81bbaf5a02801b65346c8b3e657f1d93763e58c0abdf7c992addd81a92"}, + {file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:33901f604424145c6e9c2398684b92e176c0b12df77d52db81c20abd48c3794c"}, + {file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:bb28c0f2cf2782508a40cec377935829d5fcc3ad9a3681375af4e84eb34b6b58"}, + {file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9d107aff57a83222ddbd8d9ee705ede2af2cc926608b57abed8ef96b50b7e8f9"}, + {file = "coverage-7.13.4-cp310-cp310-win32.whl", hash = "sha256:a6f94a7d00eb18f1b6d403c91a88fd58cfc92d4b16080dfdb774afc8294469bf"}, + {file = "coverage-7.13.4-cp310-cp310-win_amd64.whl", hash = "sha256:2cb0f1e000ebc419632bbe04366a8990b6e32c4e0b51543a6484ffe15eaeda95"}, + {file = "coverage-7.13.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d490ba50c3f35dd7c17953c68f3270e7ccd1c6642e2d2afe2d8e720b98f5a053"}, + {file = "coverage-7.13.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:19bc3c88078789f8ef36acb014d7241961dbf883fd2533d18cb1e7a5b4e28b11"}, + {file = "coverage-7.13.4-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3998e5a32e62fdf410c0dbd3115df86297995d6e3429af80b8798aad894ca7aa"}, + {file = "coverage-7.13.4-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8e264226ec98e01a8e1054314af91ee6cde0eacac4f465cc93b03dbe0bce2fd7"}, + {file = "coverage-7.13.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a3aa4e7b9e416774b21797365b358a6e827ffadaaca81b69ee02946852449f00"}, + {file = "coverage-7.13.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:71ca20079dd8f27fcf808817e281e90220475cd75115162218d0e27549f95fef"}, + {file = "coverage-7.13.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e2f25215f1a359ab17320b47bcdaca3e6e6356652e8256f2441e4ef972052903"}, + {file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d65b2d373032411e86960604dc4edac91fdfb5dca539461cf2cbe78327d1e64f"}, + {file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94eb63f9b363180aff17de3e7c8760c3ba94664ea2695c52f10111244d16a299"}, + {file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e856bf6616714c3a9fbc270ab54103f4e685ba236fa98c054e8f87f266c93505"}, + {file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:65dfcbe305c3dfe658492df2d85259e0d79ead4177f9ae724b6fb245198f55d6"}, + {file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b507778ae8a4c915436ed5c2e05b4a6cecfa70f734e19c22a005152a11c7b6a9"}, + {file = "coverage-7.13.4-cp311-cp311-win32.whl", hash = "sha256:784fc3cf8be001197b652d51d3fd259b1e2262888693a4636e18879f613a62a9"}, + {file = "coverage-7.13.4-cp311-cp311-win_amd64.whl", hash = "sha256:2421d591f8ca05b308cf0092807308b2facbefe54af7c02ac22548b88b95c98f"}, + {file = "coverage-7.13.4-cp311-cp311-win_arm64.whl", hash = "sha256:79e73a76b854d9c6088fe5d8b2ebe745f8681c55f7397c3c0a016192d681045f"}, + {file = "coverage-7.13.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:02231499b08dabbe2b96612993e5fc34217cdae907a51b906ac7fca8027a4459"}, + {file = "coverage-7.13.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40aa8808140e55dc022b15d8aa7f651b6b3d68b365ea0398f1441e0b04d859c3"}, + {file = "coverage-7.13.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5b856a8ccf749480024ff3bd7310adaef57bf31fd17e1bfc404b7940b6986634"}, + {file = "coverage-7.13.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2c048ea43875fbf8b45d476ad79f179809c590ec7b79e2035c662e7afa3192e3"}, + {file = "coverage-7.13.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b7b38448866e83176e28086674fe7368ab8590e4610fb662b44e345b86d63ffa"}, + {file = "coverage-7.13.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:de6defc1c9badbf8b9e67ae90fd00519186d6ab64e5cc5f3d21359c2a9b2c1d3"}, + {file = "coverage-7.13.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7eda778067ad7ffccd23ecffce537dface96212576a07924cbf0d8799d2ded5a"}, + {file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e87f6c587c3f34356c3759f0420693e35e7eb0e2e41e4c011cb6ec6ecbbf1db7"}, + {file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:8248977c2e33aecb2ced42fef99f2d319e9904a36e55a8a68b69207fb7e43edc"}, + {file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:25381386e80ae727608e662474db537d4df1ecd42379b5ba33c84633a2b36d47"}, + {file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:ee756f00726693e5ba94d6df2bdfd64d4852d23b09bb0bc700e3b30e6f333985"}, + {file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fdfc1e28e7c7cdce44985b3043bc13bbd9c747520f94a4d7164af8260b3d91f0"}, + {file = "coverage-7.13.4-cp312-cp312-win32.whl", hash = "sha256:01d4cbc3c283a17fc1e42d614a119f7f438eabb593391283adca8dc86eff1246"}, + {file = "coverage-7.13.4-cp312-cp312-win_amd64.whl", hash = "sha256:9401ebc7ef522f01d01d45532c68c5ac40fb27113019b6b7d8b208f6e9baa126"}, + {file = "coverage-7.13.4-cp312-cp312-win_arm64.whl", hash = "sha256:b1ec7b6b6e93255f952e27ab58fbc68dcc468844b16ecbee881aeb29b6ab4d8d"}, + {file = "coverage-7.13.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b66a2da594b6068b48b2692f043f35d4d3693fb639d5ea8b39533c2ad9ac3ab9"}, + {file = "coverage-7.13.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3599eb3992d814d23b35c536c28df1a882caa950f8f507cef23d1cbf334995ac"}, + {file = "coverage-7.13.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:93550784d9281e374fb5a12bf1324cc8a963fd63b2d2f223503ef0fd4aa339ea"}, + {file = "coverage-7.13.4-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b720ce6a88a2755f7c697c23268ddc47a571b88052e6b155224347389fdf6a3b"}, + {file = "coverage-7.13.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7b322db1284a2ed3aa28ffd8ebe3db91c929b7a333c0820abec3d838ef5b3525"}, + {file = "coverage-7.13.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f4594c67d8a7c89cf922d9df0438c7c7bb022ad506eddb0fdb2863359ff78242"}, + {file = "coverage-7.13.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:53d133df809c743eb8bce33b24bcababb371f4441340578cd406e084d94a6148"}, + {file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76451d1978b95ba6507a039090ba076105c87cc76fc3efd5d35d72093964d49a"}, + {file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7f57b33491e281e962021de110b451ab8a24182589be17e12a22c79047935e23"}, + {file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1731dc33dc276dafc410a885cbf5992f1ff171393e48a21453b78727d090de80"}, + {file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:bd60d4fe2f6fa7dff9223ca1bbc9f05d2b6697bc5961072e5d3b952d46e1b1ea"}, + {file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9181a3ccead280b828fae232df12b16652702b49d41e99d657f46cc7b1f6ec7a"}, + {file = "coverage-7.13.4-cp313-cp313-win32.whl", hash = "sha256:f53d492307962561ac7de4cd1de3e363589b000ab69617c6156a16ba7237998d"}, + {file = "coverage-7.13.4-cp313-cp313-win_amd64.whl", hash = "sha256:e6f70dec1cc557e52df5306d051ef56003f74d56e9c4dd7ddb07e07ef32a84dd"}, + {file = "coverage-7.13.4-cp313-cp313-win_arm64.whl", hash = "sha256:fb07dc5da7e849e2ad31a5d74e9bece81f30ecf5a42909d0a695f8bd1874d6af"}, + {file = "coverage-7.13.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:40d74da8e6c4b9ac18b15331c4b5ebc35a17069410cad462ad4f40dcd2d50c0d"}, + {file = "coverage-7.13.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4223b4230a376138939a9173f1bdd6521994f2aff8047fae100d6d94d50c5a12"}, + {file = "coverage-7.13.4-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1d4be36a5114c499f9f1f9195e95ebf979460dbe2d88e6816ea202010ba1c34b"}, + {file = "coverage-7.13.4-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:200dea7d1e8095cc6e98cdabe3fd1d21ab17d3cee6dab00cadbb2fe35d9c15b9"}, + {file = "coverage-7.13.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8eb931ee8e6d8243e253e5ed7336deea6904369d2fd8ae6e43f68abbf167092"}, + {file = "coverage-7.13.4-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:75eab1ebe4f2f64d9509b984f9314d4aa788540368218b858dad56dc8f3e5eb9"}, + {file = "coverage-7.13.4-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c35eb28c1d085eb7d8c9b3296567a1bebe03ce72962e932431b9a61f28facf26"}, + {file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb88b316ec33760714a4720feb2816a3a59180fd58c1985012054fa7aebee4c2"}, + {file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7d41eead3cc673cbd38a4417deb7fd0b4ca26954ff7dc6078e33f6ff97bed940"}, + {file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:fb26a934946a6afe0e326aebe0730cdff393a8bc0bbb65a2f41e30feddca399c"}, + {file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:dae88bc0fc77edaa65c14be099bd57ee140cf507e6bfdeea7938457ab387efb0"}, + {file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:845f352911777a8e722bfce168958214951e07e47e5d5d9744109fa5fe77f79b"}, + {file = "coverage-7.13.4-cp313-cp313t-win32.whl", hash = "sha256:2fa8d5f8de70688a28240de9e139fa16b153cc3cbb01c5f16d88d6505ebdadf9"}, + {file = "coverage-7.13.4-cp313-cp313t-win_amd64.whl", hash = "sha256:9351229c8c8407645840edcc277f4a2d44814d1bc34a2128c11c2a031d45a5dd"}, + {file = "coverage-7.13.4-cp313-cp313t-win_arm64.whl", hash = "sha256:30b8d0512f2dc8c8747557e8fb459d6176a2c9e5731e2b74d311c03b78451997"}, + {file = "coverage-7.13.4-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:300deaee342f90696ed186e3a00c71b5b3d27bffe9e827677954f4ee56969601"}, + {file = "coverage-7.13.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:29e3220258d682b6226a9b0925bc563ed9a1ebcff3cad30f043eceea7eaf2689"}, + {file = "coverage-7.13.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:391ee8f19bef69210978363ca930f7328081c6a0152f1166c91f0b5fdd2a773c"}, + {file = "coverage-7.13.4-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0dd7ab8278f0d58a0128ba2fca25824321f05d059c1441800e934ff2efa52129"}, + {file = "coverage-7.13.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78cdf0d578b15148b009ccf18c686aa4f719d887e76e6b40c38ffb61d264a552"}, + {file = "coverage-7.13.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:48685fee12c2eb3b27c62f2658e7ea21e9c3239cba5a8a242801a0a3f6a8c62a"}, + {file = "coverage-7.13.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4e83efc079eb39480e6346a15a1bcb3e9b04759c5202d157e1dd4303cd619356"}, + {file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ecae9737b72408d6a950f7e525f30aca12d4bd8dd95e37342e5beb3a2a8c4f71"}, + {file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ae4578f8528569d3cf303fef2ea569c7f4c4059a38c8667ccef15c6e1f118aa5"}, + {file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:6fdef321fdfbb30a197efa02d48fcd9981f0d8ad2ae8903ac318adc653f5df98"}, + {file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b0f6ccf3dbe577170bebfce1318707d0e8c3650003cb4b3a9dd744575daa8b5"}, + {file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75fcd519f2a5765db3f0e391eb3b7d150cce1a771bf4c9f861aeab86c767a3c0"}, + {file = "coverage-7.13.4-cp314-cp314-win32.whl", hash = "sha256:8e798c266c378da2bd819b0677df41ab46d78065fb2a399558f3f6cae78b2fbb"}, + {file = "coverage-7.13.4-cp314-cp314-win_amd64.whl", hash = "sha256:245e37f664d89861cf2329c9afa2c1fe9e6d4e1a09d872c947e70718aeeac505"}, + {file = "coverage-7.13.4-cp314-cp314-win_arm64.whl", hash = "sha256:ad27098a189e5838900ce4c2a99f2fe42a0bf0c2093c17c69b45a71579e8d4a2"}, + {file = "coverage-7.13.4-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:85480adfb35ffc32d40918aad81b89c69c9cc5661a9b8a81476d3e645321a056"}, + {file = "coverage-7.13.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:79be69cf7f3bf9b0deeeb062eab7ac7f36cd4cc4c4dd694bd28921ba4d8596cc"}, + {file = "coverage-7.13.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:caa421e2684e382c5d8973ac55e4f36bed6821a9bad5c953494de960c74595c9"}, + {file = "coverage-7.13.4-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:14375934243ee05f56c45393fe2ce81fe5cc503c07cee2bdf1725fb8bef3ffaf"}, + {file = "coverage-7.13.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:25a41c3104d08edb094d9db0d905ca54d0cd41c928bb6be3c4c799a54753af55"}, + {file = "coverage-7.13.4-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6f01afcff62bf9a08fb32b2c1d6e924236c0383c02c790732b6537269e466a72"}, + {file = "coverage-7.13.4-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eb9078108fbf0bcdde37c3f4779303673c2fa1fe8f7956e68d447d0dd426d38a"}, + {file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0e086334e8537ddd17e5f16a344777c1ab8194986ec533711cbe6c41cde841b6"}, + {file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:725d985c5ab621268b2edb8e50dfe57633dc69bda071abc470fed55a14935fd3"}, + {file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3c06f0f1337c667b971ca2f975523347e63ec5e500b9aa5882d91931cd3ef750"}, + {file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:590c0ed4bf8e85f745e6b805b2e1c457b2e33d5255dd9729743165253bc9ad39"}, + {file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:eb30bf180de3f632cd043322dad5751390e5385108b2807368997d1a92a509d0"}, + {file = "coverage-7.13.4-cp314-cp314t-win32.whl", hash = "sha256:c4240e7eded42d131a2d2c4dec70374b781b043ddc79a9de4d55ca71f8e98aea"}, + {file = "coverage-7.13.4-cp314-cp314t-win_amd64.whl", hash = "sha256:4c7d3cc01e7350f2f0f6f7036caaf5673fb56b6998889ccfe9e1c1fe75a9c932"}, + {file = "coverage-7.13.4-cp314-cp314t-win_arm64.whl", hash = "sha256:23e3f687cf945070d1c90f85db66d11e3025665d8dafa831301a0e0038f3db9b"}, + {file = "coverage-7.13.4-py3-none-any.whl", hash = "sha256:1af1641e57cf7ba1bd67d677c9abdbcd6cc2ab7da3bca7fa1e2b7e50e65f2ad0"}, + {file = "coverage-7.13.4.tar.gz", hash = "sha256:e5c8f6ed1e61a8b2dcdf31eb0b9bbf0130750ca79c1c49eb898e2ad86f5ccc91"}, ] [package.dependencies] @@ -523,7 +541,7 @@ description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" groups = ["main", "dev"] -markers = "python_version == \"3.10\"" +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, @@ -2162,23 +2180,23 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] [[package]] name = "pytest-cov" -version = "6.2.1" +version = "7.0.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5"}, - {file = "pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2"}, + {file = "pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861"}, + {file = "pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1"}, ] [package.dependencies] -coverage = {version = ">=7.5", extras = ["toml"]} +coverage = {version = ">=7.10.6", extras = ["toml"]} pluggy = ">=1.2" -pytest = ">=6.2.5" +pytest = ">=7" [package.extras] -testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] +testing = ["process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-mock" @@ -2545,7 +2563,7 @@ description = "A lil' TOML parser" optional = false python-versions = ">=3.8" groups = ["dev"] -markers = "python_version == \"3.10\"" +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -2893,4 +2911,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<4.0" -content-hash = "b7ac335a86aa44c3d7d2802298818b389a6f1286e3e9b7b0edb2ff06377cecaf" +content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d" diff --git a/autogpt_platform/autogpt_libs/pyproject.toml b/autogpt_platform/autogpt_libs/pyproject.toml index 9b2bcb8fbe..8deb4d2169 100644 --- a/autogpt_platform/autogpt_libs/pyproject.toml +++ b/autogpt_platform/autogpt_libs/pyproject.toml @@ -26,7 +26,7 @@ pyright = "^1.1.408" pytest = "^8.4.1" pytest-asyncio = "^1.3.0" pytest-mock = "^3.15.1" -pytest-cov = "^6.2.1" +pytest-cov = "^7.0.0" ruff = "^0.15.0" [build-system] From 7d4c020a9bd6b25dc4e25c2934e4ae6656af6257 Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Tue, 10 Feb 2026 18:42:21 +0530 Subject: [PATCH 07/11] feat(chat): implement AI SDK integration with custom streaming response handling (#11901) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes πŸ—οΈ - Added AI SDK integration for chat streaming with proper message handling - Implemented custom to_sse method in StreamToolOutputAvailable to exclude non-spec fields - Modified stream_chat_completion to reuse message IDs for tool call continuations - Created new Copilot 2.0 UI with AI SDK React components - Added streamdown and related packages for markdown rendering - Built reusable conversation and message components for the chat interface - Added support for tool output display in the chat UI ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Start a new chat session and verify streaming works correctly - [x] Test tool calls and verify they display properly in the UI - [x] Verify message continuations don't create duplicate messages - [x] Test markdown rendering with code blocks and other formatting - [x] Verify the UI is responsive and scrolls correctly #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**) --------- Co-authored-by: Lluis Agusti Co-authored-by: Ubbe --- .github/workflows/platform-fullstack-ci.yml | 4 +- .../api/features/chat/response_model.py | 47 +- .../backend/api/features/chat/routes.py | 250 +-- .../backend/api/features/chat/service.py | 36 +- .../api/features/chat/stream_registry.py | 4 + autogpt_platform/frontend/Dockerfile | 6 +- autogpt_platform/frontend/next.config.mjs | 51 +- autogpt_platform/frontend/package.json | 14 +- autogpt_platform/frontend/pnpm-lock.yaml | 1180 ++++++++++++- .../useBlockMenuSearchBar.ts | 2 +- .../NewBlockMenu/HorizontalScroll.tsx | 4 +- .../app/(platform)/copilot/CopilotPage.tsx | 76 + .../ChatContainer/ChatContainer.tsx | 74 + .../components/ChatInput/ChatInput.tsx | 17 +- .../ChatInput/components/AudioWaveform.tsx | 0 .../components/RecordingIndicator.tsx | 0 .../copilot}/components/ChatInput/helpers.ts | 0 .../components/ChatInput/useChatInput.ts | 44 +- .../components/ChatInput/useVoiceRecording.ts | 32 +- .../ChatMessagesContainer.tsx | 274 +++ .../components/ChatSidebar/ChatSidebar.tsx | 188 ++ .../CopilotChatActionsProvider.tsx | 16 + .../useCopilotChatActions.ts | 23 + .../components/CopilotShell/CopilotShell.tsx | 99 -- .../DesktopSidebar/DesktopSidebar.tsx | 70 - .../components/MobileDrawer/MobileDrawer.tsx | 91 - .../MobileDrawer/useMobileDrawer.ts | 24 - .../components/SessionsList/SessionsList.tsx | 80 - .../SessionsList/useSessionsPagination.ts | 91 - .../components/CopilotShell/helpers.ts | 106 -- .../CopilotShell/useCopilotShell.ts | 124 -- .../CopilotShell/useShellSessionList.ts | 113 -- .../components/EmptySession/EmptySession.tsx | 111 ++ .../{ => components/EmptySession}/helpers.ts | 51 +- .../components/MobileDrawer/MobileDrawer.tsx | 140 ++ .../MobileHeader/MobileHeader.tsx | 0 .../MorphingTextAnimation.tsx | 54 + .../OrbitLoader/OrbitLoader.module.css | 69 + .../components/OrbitLoader/OrbitLoader.tsx | 28 + .../components/ProgressBar/ProgressBar.tsx | 26 + .../PulseLoader/PulseLoader.module.css | 34 + .../components/PulseLoader/PulseLoader.tsx | 16 + .../SpinnerLoader/SpinnerLoader.module.css | 57 + .../SpinnerLoader/SpinnerLoader.tsx | 16 + .../ToolAccordion/AccordionContent.tsx | 235 +++ .../ToolAccordion/ToolAccordion.tsx | 102 ++ .../ToolAccordion/useToolAccordion.ts | 32 + .../(platform)/copilot/copilot-page-store.ts | 56 - .../helpers/convertChatSessionToUiMessages.ts | 128 ++ .../Untitled} | 2 +- .../copilot/hooks}/useAsymptoticProgress.ts | 7 +- .../src/app/(platform)/copilot/layout.tsx | 13 - .../src/app/(platform)/copilot/page.tsx | 150 +- .../(platform)/copilot/styleguide/page.tsx | 1533 +++++++++++++++++ .../copilot/tools/CreateAgent/CreateAgent.tsx | 237 +++ .../ClarificationQuestionsCard.tsx} | 14 +- .../copilot/tools/CreateAgent/helpers.tsx | 186 ++ .../copilot/tools/EditAgent/EditAgent.tsx | 234 +++ .../copilot/tools/EditAgent/helpers.tsx | 188 ++ .../copilot/tools/FindAgents/FindAgents.tsx | 127 ++ .../copilot/tools/FindAgents/helpers.tsx | 187 ++ .../copilot/tools/FindBlocks/FindBlocks.tsx | 92 + .../copilot/tools/FindBlocks/helpers.tsx | 75 + .../copilot/tools/RunAgent/RunAgent.tsx | 93 + .../AgentDetailsCard/AgentDetailsCard.tsx | 116 ++ .../components/AgentDetailsCard/helpers.ts | 8 + .../components/ErrorCard/ErrorCard.tsx | 27 + .../ExecutionStartedCard.tsx | 39 + .../SetupRequirementsCard.tsx | 105 ++ .../SetupRequirementsCard/helpers.ts | 116 ++ .../copilot/tools/RunAgent/helpers.tsx | 248 +++ .../copilot/tools/RunBlock/RunBlock.tsx | 76 + .../BlockOutputCard/BlockOutputCard.tsx | 133 ++ .../components/ErrorCard/ErrorCard.tsx | 27 + .../SetupRequirementsCard.tsx | 197 +++ .../SetupRequirementsCard/helpers.ts | 156 ++ .../copilot/tools/RunBlock/helpers.tsx | 185 ++ .../copilot/tools/SearchDocs/SearchDocs.tsx | 186 ++ .../copilot/tools/SearchDocs/helpers.tsx | 215 +++ .../tools/ViewAgentOutput/ViewAgentOutput.tsx | 261 +++ .../copilot/tools/ViewAgentOutput/helpers.tsx | 158 ++ .../app/(platform)/copilot/useChatSession.ts | 109 ++ .../app/(platform)/copilot/useCopilotPage.ts | 221 +-- .../RunDetailHeader/RunDetailHeader.tsx | 9 +- .../components/ScheduleListItem.tsx | 6 +- .../components/TaskListItem.tsx | 8 +- .../components/TemplateListItem.tsx | 6 +- .../components/TriggerListItem.tsx | 4 +- .../components/agent-run-details-view.tsx | 8 +- .../components/agent-run-summary-card.tsx | 8 +- .../LibrarySearchBar/useLibrarySearchbar.tsx | 2 +- .../monitoring/components/AgentFlowList.tsx | 10 +- .../monitoring/components/FlowRunInfo.tsx | 10 +- .../monitoring/components/FlowRunsList.tsx | 6 +- .../components/FlowRunsTimeline.tsx | 16 +- .../chat/sessions/[sessionId]/stream/route.ts | 33 +- .../frontend/src/app/api/openapi.json | 957 +++++++++- autogpt_platform/frontend/src/app/globals.css | 17 + .../components/ai-elements/conversation.tsx | 109 ++ .../src/components/ai-elements/message.tsx | 338 ++++ .../atoms/OverflowText/OverflowText.tsx | 2 +- .../src/components/atoms/Text/Text.tsx | 3 +- .../src/components/contextual/Chat/Chat.tsx | 114 -- .../contextual/Chat/SSE_RECONNECTION.md | 159 -- .../contextual/Chat/chat-constants.ts | 16 - .../components/contextual/Chat/chat-store.ts | 501 ------ .../components/contextual/Chat/chat-types.ts | 163 -- .../components/AIChatBubble/AIChatBubble.tsx | 15 - .../AgentCarouselMessage.tsx | 119 -- .../AgentInputsSetup/AgentInputsSetup.tsx | 246 --- .../AgentInputsSetup/useAgentInputsSetup.ts | 38 - .../AuthPromptWidget/AuthPromptWidget.tsx | 120 -- .../ChatContainer/ChatContainer.tsx | 130 -- .../createStreamEventDispatcher.ts | 89 - .../Chat/components/ChatContainer/handlers.ts | 362 ---- .../Chat/components/ChatContainer/helpers.ts | 607 ------- .../ChatContainer/useChatContainer.ts | 517 ------ .../ChatCredentialsSetup.tsx | 151 -- .../useChatCredentialsSetup.ts | 36 - .../ChatErrorState/ChatErrorState.tsx | 30 - .../Chat/components/ChatLoader/ChatLoader.tsx | 7 - .../ChatLoadingState/ChatLoadingState.tsx | 19 - .../components/ChatMessage/ChatMessage.tsx | 448 ----- .../components/ChatMessage/useChatMessage.ts | 157 -- .../ExecutionStartedMessage.tsx | 90 - .../MarkdownContent/MarkdownContent.tsx | 349 ---- .../MessageBubble/MessageBubble.tsx | 53 - .../components/MessageList/MessageList.tsx | 115 -- .../LastToolResponse/LastToolResponse.tsx | 30 - .../components/MessageItem/MessageItem.tsx | 40 - .../components/MessageItem/useMessageItem.ts | 62 - .../Chat/components/MessageList/helpers.ts | 68 - .../components/MessageList/useMessageList.ts | 28 - .../NoResultsMessage/NoResultsMessage.tsx | 64 - .../PendingOperationWidget.tsx | 109 -- .../QuickActionsWelcome.tsx | 94 - .../SessionsDrawer/SessionsDrawer.tsx | 136 -- .../StreamingMessage/StreamingMessage.tsx | 35 - .../StreamingMessage/useStreamingMessage.ts | 25 - .../ThinkingMessage/ThinkingMessage.tsx | 82 - .../ToolCallMessage/ToolCallMessage.tsx | 55 - .../components/ToolCallMessage/helpers.ts | 184 -- .../AgentCreatedPrompt.tsx | 128 -- .../ToolResponseMessage.tsx | 69 - .../components/ToolResponseMessage/helpers.ts | 461 ----- .../UserChatBubble/UserChatBubble.tsx | 25 - .../contextual/Chat/stream-executor.ts | 255 --- .../contextual/Chat/stream-utils.ts | 89 - .../src/components/contextual/Chat/useChat.ts | 100 -- .../contextual/Chat/useChatSession.ts | 385 ----- .../contextual/Chat/useChatStream.ts | 115 -- .../contextual/Chat/usePageContext.ts | 98 -- .../src/components/layout/Navbar/Navbar.tsx | 2 +- .../renderers/InputRenderer/FormRenderer.tsx | 2 - .../InputRenderer/base/anyof/AnyOfField.tsx | 2 +- .../src/components/ui/button-group.tsx | 83 + .../frontend/src/components/ui/button.tsx | 59 + .../frontend/src/components/ui/input.tsx | 22 + .../frontend/src/components/ui/separator.tsx | 31 + .../frontend/src/components/ui/sheet.tsx | 143 ++ .../frontend/src/components/ui/sidebar.tsx | 778 +++++++++ .../frontend/src/components/ui/skeleton.tsx | 18 + .../frontend/src/components/ui/tooltip.tsx | 32 + .../frontend/src/hooks/use-mobile.tsx | 21 + autogpt_platform/frontend/src/lib/utils.ts | 2 +- .../frontend/src/tests/signin.spec.ts | 2 +- autogpt_platform/frontend/tailwind.config.ts | 10 + 167 files changed, 11359 insertions(+), 8804 deletions(-) create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/CopilotPage.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatContainer/ChatContainer.tsx rename autogpt_platform/frontend/src/{components/contextual/Chat => app/(platform)/copilot}/components/ChatInput/ChatInput.tsx (93%) rename autogpt_platform/frontend/src/{components/contextual/Chat => app/(platform)/copilot}/components/ChatInput/components/AudioWaveform.tsx (100%) rename autogpt_platform/frontend/src/{components/contextual/Chat => app/(platform)/copilot}/components/ChatInput/components/RecordingIndicator.tsx (100%) rename autogpt_platform/frontend/src/{components/contextual/Chat => app/(platform)/copilot}/components/ChatInput/helpers.ts (100%) rename autogpt_platform/frontend/src/{components/contextual/Chat => app/(platform)/copilot}/components/ChatInput/useChatInput.ts (83%) rename autogpt_platform/frontend/src/{components/contextual/Chat => app/(platform)/copilot}/components/ChatInput/useVoiceRecording.ts (86%) create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatSidebar/ChatSidebar.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotChatActionsProvider/CopilotChatActionsProvider.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotChatActionsProvider/useCopilotChatActions.ts delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useShellSessionList.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/EmptySession/EmptySession.tsx rename autogpt_platform/frontend/src/app/(platform)/copilot/{ => components/EmptySession}/helpers.ts (72%) create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/MobileDrawer/MobileDrawer.tsx rename autogpt_platform/frontend/src/app/(platform)/copilot/components/{CopilotShell/components => }/MobileHeader/MobileHeader.tsx (100%) create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/MorphingTextAnimation/MorphingTextAnimation.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/OrbitLoader/OrbitLoader.module.css create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/OrbitLoader/OrbitLoader.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/ProgressBar/ProgressBar.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/PulseLoader/PulseLoader.module.css create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/PulseLoader/PulseLoader.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/SpinnerLoader/SpinnerLoader.module.css create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/SpinnerLoader/SpinnerLoader.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/AccordionContent.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/ToolAccordion.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/useToolAccordion.ts delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/helpers/convertChatSessionToUiMessages.ts rename autogpt_platform/frontend/src/app/(platform)/copilot/{useCopilotSessionId.ts => hooks/Untitled} (99%) rename autogpt_platform/frontend/src/{components/contextual/Chat/components/ToolCallMessage => app/(platform)/copilot/hooks}/useAsymptoticProgress.ts (83%) delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx rename autogpt_platform/frontend/src/{components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx => app/(platform)/copilot/tools/CreateAgent/components/ClarificationQuestionsCard.tsx} (98%) create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/helpers.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/helpers.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindAgents/FindAgents.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindAgents/helpers.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindBlocks/FindBlocks.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindBlocks/helpers.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/AgentDetailsCard/AgentDetailsCard.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/AgentDetailsCard/helpers.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/ErrorCard/ErrorCard.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/ExecutionStartedCard/ExecutionStartedCard.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/SetupRequirementsCard/SetupRequirementsCard.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/SetupRequirementsCard/helpers.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/BlockOutputCard/BlockOutputCard.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/ErrorCard/ErrorCard.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/SetupRequirementsCard/SetupRequirementsCard.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/SetupRequirementsCard/helpers.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/SearchDocs/SearchDocs.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/SearchDocs/helpers.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/ViewAgentOutput/ViewAgentOutput.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/tools/ViewAgentOutput/helpers.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/useChatSession.ts create mode 100644 autogpt_platform/frontend/src/components/ai-elements/conversation.tsx create mode 100644 autogpt_platform/frontend/src/components/ai-elements/message.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/SSE_RECONNECTION.md delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/chat-constants.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatErrorState/ChatErrorState.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatLoader/ChatLoader.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatLoadingState/ChatLoadingState.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/useChatMessage.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ExecutionStartedMessage/ExecutionStartedMessage.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MarkdownContent/MarkdownContent.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageBubble/MessageBubble.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/MessageList.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/MessageItem/MessageItem.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/MessageItem/useMessageItem.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/helpers.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/useMessageList.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/NoResultsMessage/NoResultsMessage.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/PendingOperationWidget/PendingOperationWidget.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/QuickActionsWelcome/QuickActionsWelcome.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/SessionsDrawer/SessionsDrawer.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/StreamingMessage/StreamingMessage.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/StreamingMessage/useStreamingMessage.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/ToolCallMessage.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/helpers.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/AgentCreatedPrompt.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/UserChatBubble/UserChatBubble.tsx delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/stream-executor.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/usePageContext.ts create mode 100644 autogpt_platform/frontend/src/components/ui/button-group.tsx create mode 100644 autogpt_platform/frontend/src/components/ui/button.tsx create mode 100644 autogpt_platform/frontend/src/components/ui/input.tsx create mode 100644 autogpt_platform/frontend/src/components/ui/separator.tsx create mode 100644 autogpt_platform/frontend/src/components/ui/sheet.tsx create mode 100644 autogpt_platform/frontend/src/components/ui/sidebar.tsx create mode 100644 autogpt_platform/frontend/src/components/ui/skeleton.tsx create mode 100644 autogpt_platform/frontend/src/components/ui/tooltip.tsx create mode 100644 autogpt_platform/frontend/src/hooks/use-mobile.tsx diff --git a/.github/workflows/platform-fullstack-ci.yml b/.github/workflows/platform-fullstack-ci.yml index 67be0ae939..ab483b98af 100644 --- a/.github/workflows/platform-fullstack-ci.yml +++ b/.github/workflows/platform-fullstack-ci.yml @@ -56,7 +56,7 @@ jobs: run: pnpm install --frozen-lockfile types: - runs-on: ubuntu-latest + runs-on: big-boi needs: setup strategy: fail-fast: false @@ -85,7 +85,7 @@ jobs: - name: Run docker compose run: | - docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d + docker compose -f ../docker-compose.yml --profile local up -d deps_backend - name: Restore dependencies cache uses: actions/cache@v5 diff --git a/autogpt_platform/backend/backend/api/features/chat/response_model.py b/autogpt_platform/backend/backend/api/features/chat/response_model.py index f627a42fcc..1ae836f7d1 100644 --- a/autogpt_platform/backend/backend/api/features/chat/response_model.py +++ b/autogpt_platform/backend/backend/api/features/chat/response_model.py @@ -18,6 +18,10 @@ class ResponseType(str, Enum): START = "start" FINISH = "finish" + # Step lifecycle (one LLM API call within a message) + START_STEP = "start-step" + FINISH_STEP = "finish-step" + # Text streaming TEXT_START = "text-start" TEXT_DELTA = "text-delta" @@ -57,6 +61,16 @@ class StreamStart(StreamBaseResponse): description="Task ID for SSE reconnection. Clients can reconnect using GET /tasks/{taskId}/stream", ) + def to_sse(self) -> str: + """Convert to SSE format, excluding non-protocol fields like taskId.""" + import json + + data: dict[str, Any] = { + "type": self.type.value, + "messageId": self.messageId, + } + return f"data: {json.dumps(data)}\n\n" + class StreamFinish(StreamBaseResponse): """End of message/stream.""" @@ -64,6 +78,26 @@ class StreamFinish(StreamBaseResponse): type: ResponseType = ResponseType.FINISH +class StreamStartStep(StreamBaseResponse): + """Start of a step (one LLM API call within a message). + + The AI SDK uses this to add a step-start boundary to message.parts, + enabling visual separation between multiple LLM calls in a single message. + """ + + type: ResponseType = ResponseType.START_STEP + + +class StreamFinishStep(StreamBaseResponse): + """End of a step (one LLM API call within a message). + + The AI SDK uses this to reset activeTextParts and activeReasoningParts, + so the next LLM call in a tool-call continuation starts with clean state. + """ + + type: ResponseType = ResponseType.FINISH_STEP + + # ========== Text Streaming ========== @@ -117,7 +151,7 @@ class StreamToolOutputAvailable(StreamBaseResponse): type: ResponseType = ResponseType.TOOL_OUTPUT_AVAILABLE toolCallId: str = Field(..., description="Tool call ID this responds to") output: str | dict[str, Any] = Field(..., description="Tool execution output") - # Additional fields for internal use (not part of AI SDK spec but useful) + # Keep these for internal backend use toolName: str | None = Field( default=None, description="Name of the tool that was executed" ) @@ -125,6 +159,17 @@ class StreamToolOutputAvailable(StreamBaseResponse): default=True, description="Whether the tool execution succeeded" ) + def to_sse(self) -> str: + """Convert to SSE format, excluding non-spec fields.""" + import json + + data = { + "type": self.type.value, + "toolCallId": self.toolCallId, + "output": self.output, + } + return f"data: {json.dumps(data)}\n\n" + # ========== Other ========== diff --git a/autogpt_platform/backend/backend/api/features/chat/routes.py b/autogpt_platform/backend/backend/api/features/chat/routes.py index 74e6e8ba1e..c6f37569b7 100644 --- a/autogpt_platform/backend/backend/api/features/chat/routes.py +++ b/autogpt_platform/backend/backend/api/features/chat/routes.py @@ -6,7 +6,7 @@ from collections.abc import AsyncGenerator from typing import Annotated from autogpt_libs import auth -from fastapi import APIRouter, Depends, Header, HTTPException, Query, Security +from fastapi import APIRouter, Depends, Header, HTTPException, Query, Response, Security from fastapi.responses import StreamingResponse from pydantic import BaseModel @@ -17,7 +17,29 @@ from . import stream_registry from .completion_handler import process_operation_failure, process_operation_success from .config import ChatConfig from .model import ChatSession, create_chat_session, get_chat_session, get_user_sessions -from .response_model import StreamFinish, StreamHeartbeat, StreamStart +from .response_model import StreamFinish, StreamHeartbeat +from .tools.models import ( + AgentDetailsResponse, + AgentOutputResponse, + AgentPreviewResponse, + AgentSavedResponse, + AgentsFoundResponse, + BlockListResponse, + BlockOutputResponse, + ClarificationNeededResponse, + DocPageResponse, + DocSearchResultsResponse, + ErrorResponse, + ExecutionStartedResponse, + InputValidationErrorResponse, + NeedLoginResponse, + NoResultsResponse, + OperationInProgressResponse, + OperationPendingResponse, + OperationStartedResponse, + SetupRequirementsResponse, + UnderstandingUpdatedResponse, +) config = ChatConfig() @@ -269,8 +291,6 @@ async def stream_chat_post( import time stream_start_time = time.perf_counter() - - # Base log metadata (task_id added after creation) log_meta = {"component": "ChatStream", "session_id": session_id} if user_id: log_meta["user_id"] = user_id @@ -328,24 +348,6 @@ async def stream_chat_post( first_chunk_time, ttfc = None, None chunk_count = 0 try: - # Emit a start event with task_id for reconnection - start_chunk = StreamStart(messageId=task_id, taskId=task_id) - await stream_registry.publish_chunk(task_id, start_chunk) - logger.info( - f"[TIMING] StreamStart published at {(time_module.perf_counter() - gen_start_time)*1000:.1f}ms", - extra={ - "json_fields": { - **log_meta, - "elapsed_ms": (time_module.perf_counter() - gen_start_time) - * 1000, - } - }, - ) - - logger.info( - "[TIMING] Calling stream_chat_completion", - extra={"json_fields": log_meta}, - ) async for chunk in chat_service.stream_chat_completion( session_id, request.message, @@ -353,6 +355,7 @@ async def stream_chat_post( user_id=user_id, session=session, # Pass pre-fetched session to avoid double-fetch context=request.context, + _task_id=task_id, # Pass task_id so service emits start with taskId for reconnection ): chunk_count += 1 if first_chunk_time is None: @@ -388,7 +391,6 @@ async def stream_chat_post( } }, ) - await stream_registry.mark_task_completed(task_id, "completed") except Exception as e: elapsed = time_module.perf_counter() - gen_start_time @@ -428,34 +430,13 @@ async def stream_chat_post( chunks_yielded = 0 try: # Subscribe to the task stream (this replays existing messages + live updates) - subscribe_start = time_module.perf_counter() - logger.info( - "[TIMING] Calling subscribe_to_task", - extra={"json_fields": log_meta}, - ) subscriber_queue = await stream_registry.subscribe_to_task( task_id=task_id, user_id=user_id, last_message_id="0-0", # Get all messages from the beginning ) - subscribe_time = (time_module.perf_counter() - subscribe_start) * 1000 - logger.info( - f"[TIMING] subscribe_to_task completed in {subscribe_time:.1f}ms, " - f"queue_ok={subscriber_queue is not None}", - extra={ - "json_fields": { - **log_meta, - "duration_ms": subscribe_time, - "queue_obtained": subscriber_queue is not None, - } - }, - ) if subscriber_queue is None: - logger.info( - "[TIMING] subscriber_queue is None, yielding finish", - extra={"json_fields": log_meta}, - ) yield StreamFinish().to_sse() yield "data: [DONE]\n\n" return @@ -467,11 +448,7 @@ async def stream_chat_post( ) while True: try: - queue_wait_start = time_module.perf_counter() chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=30.0) - queue_wait_time = ( - time_module.perf_counter() - queue_wait_start - ) * 1000 chunks_yielded += 1 if not first_chunk_yielded: @@ -479,26 +456,12 @@ async def stream_chat_post( elapsed = time_module.perf_counter() - event_gen_start logger.info( f"[TIMING] FIRST CHUNK from queue at {elapsed:.2f}s, " - f"type={type(chunk).__name__}, " - f"wait={queue_wait_time:.1f}ms", + f"type={type(chunk).__name__}", extra={ "json_fields": { **log_meta, "chunk_type": type(chunk).__name__, "elapsed_ms": elapsed * 1000, - "queue_wait_ms": queue_wait_time, - } - }, - ) - elif chunks_yielded % 50 == 0: - logger.info( - f"[TIMING] Chunk #{chunks_yielded}, " - f"type={type(chunk).__name__}", - extra={ - "json_fields": { - **log_meta, - "chunk_number": chunks_yielded, - "chunk_type": type(chunk).__name__, } }, ) @@ -521,13 +484,6 @@ async def stream_chat_post( ) break except asyncio.TimeoutError: - # Send heartbeat to keep connection alive - logger.info( - f"[TIMING] Heartbeat timeout, chunks_so_far={chunks_yielded}", - extra={ - "json_fields": {**log_meta, "chunks_so_far": chunks_yielded} - }, - ) yield StreamHeartbeat().to_sse() except GeneratorExit: @@ -592,63 +548,90 @@ async def stream_chat_post( @router.get( "/sessions/{session_id}/stream", ) -async def stream_chat_get( +async def resume_session_stream( session_id: str, - message: Annotated[str, Query(min_length=1, max_length=10000)], user_id: str | None = Depends(auth.get_user_id), - is_user_message: bool = Query(default=True), ): """ - Stream chat responses for a session (GET - legacy endpoint). + Resume an active stream for a session. - Streams the AI/completion responses in real time over Server-Sent Events (SSE), including: - - Text fragments as they are generated - - Tool call UI elements (if invoked) - - Tool execution results + Called by the AI SDK's ``useChat(resume: true)`` on page load. + Checks for an active (in-progress) task on the session and either replays + the full SSE stream or returns 204 No Content if nothing is running. Args: - session_id: The chat session identifier to associate with the streamed messages. - message: The user's new message to process. + session_id: The chat session identifier. user_id: Optional authenticated user ID. - is_user_message: Whether the message is a user message. - Returns: - StreamingResponse: SSE-formatted response chunks. + Returns: + StreamingResponse (SSE) when an active stream exists, + or 204 No Content when there is nothing to resume. """ - session = await _validate_and_get_session(session_id, user_id) + import asyncio + + active_task, _last_id = await stream_registry.get_active_task_for_session( + session_id, user_id + ) + + if not active_task: + return Response(status_code=204) + + subscriber_queue = await stream_registry.subscribe_to_task( + task_id=active_task.task_id, + user_id=user_id, + last_message_id="0-0", # Full replay so useChat rebuilds the message + ) + + if subscriber_queue is None: + return Response(status_code=204) async def event_generator() -> AsyncGenerator[str, None]: chunk_count = 0 first_chunk_type: str | None = None - async for chunk in chat_service.stream_chat_completion( - session_id, - message, - is_user_message=is_user_message, - user_id=user_id, - session=session, # Pass pre-fetched session to avoid double-fetch - ): - if chunk_count < 3: - logger.info( - "Chat stream chunk", - extra={ - "session_id": session_id, - "chunk_type": str(chunk.type), - }, + try: + while True: + try: + chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=30.0) + if chunk_count < 3: + logger.info( + "Resume stream chunk", + extra={ + "session_id": session_id, + "chunk_type": str(chunk.type), + }, + ) + if not first_chunk_type: + first_chunk_type = str(chunk.type) + chunk_count += 1 + yield chunk.to_sse() + + if isinstance(chunk, StreamFinish): + break + except asyncio.TimeoutError: + yield StreamHeartbeat().to_sse() + except GeneratorExit: + pass + except Exception as e: + logger.error(f"Error in resume stream for session {session_id}: {e}") + finally: + try: + await stream_registry.unsubscribe_from_task( + active_task.task_id, subscriber_queue ) - if not first_chunk_type: - first_chunk_type = str(chunk.type) - chunk_count += 1 - yield chunk.to_sse() - logger.info( - "Chat stream completed", - extra={ - "session_id": session_id, - "n_chunks": chunk_count, - "first_chunk_type": first_chunk_type, - }, - ) - # AI SDK protocol termination - yield "data: [DONE]\n\n" + except Exception as unsub_err: + logger.error( + f"Error unsubscribing from task {active_task.task_id}: {unsub_err}", + exc_info=True, + ) + logger.info( + "Resume stream completed", + extra={ + "session_id": session_id, + "n_chunks": chunk_count, + "first_chunk_type": first_chunk_type, + }, + ) + yield "data: [DONE]\n\n" return StreamingResponse( event_generator(), @@ -656,8 +639,8 @@ async def stream_chat_get( headers={ "Cache-Control": "no-cache", "Connection": "keep-alive", - "X-Accel-Buffering": "no", # Disable nginx buffering - "x-vercel-ai-ui-message-stream": "v1", # AI SDK protocol header + "X-Accel-Buffering": "no", + "x-vercel-ai-ui-message-stream": "v1", }, ) @@ -969,3 +952,42 @@ async def health_check() -> dict: "service": "chat", "version": "0.1.0", } + + +# ========== Schema Export (for OpenAPI / Orval codegen) ========== + +ToolResponseUnion = ( + AgentsFoundResponse + | NoResultsResponse + | AgentDetailsResponse + | SetupRequirementsResponse + | ExecutionStartedResponse + | NeedLoginResponse + | ErrorResponse + | InputValidationErrorResponse + | AgentOutputResponse + | UnderstandingUpdatedResponse + | AgentPreviewResponse + | AgentSavedResponse + | ClarificationNeededResponse + | BlockListResponse + | BlockOutputResponse + | DocSearchResultsResponse + | DocPageResponse + | OperationStartedResponse + | OperationPendingResponse + | OperationInProgressResponse +) + + +@router.get( + "/schema/tool-responses", + response_model=ToolResponseUnion, + include_in_schema=True, + summary="[Dummy] Tool response type export for codegen", + description="This endpoint is not meant to be called. It exists solely to " + "expose tool response models in the OpenAPI schema for frontend codegen.", +) +async def _tool_response_schema() -> ToolResponseUnion: # type: ignore[return] + """Never called at runtime. Exists only so Orval generates TS types.""" + raise HTTPException(status_code=501, detail="Schema-only endpoint") diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index da18421b98..49e70265fa 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -52,8 +52,10 @@ from .response_model import ( StreamBaseResponse, StreamError, StreamFinish, + StreamFinishStep, StreamHeartbeat, StreamStart, + StreamStartStep, StreamTextDelta, StreamTextEnd, StreamTextStart, @@ -351,6 +353,10 @@ async def stream_chat_completion( retry_count: int = 0, session: ChatSession | None = None, context: dict[str, str] | None = None, # {url: str, content: str} + _continuation_message_id: ( + str | None + ) = None, # Internal: reuse message ID for tool call continuations + _task_id: str | None = None, # Internal: task ID for SSE reconnection support ) -> AsyncGenerator[StreamBaseResponse, None]: """Main entry point for streaming chat completions with database handling. @@ -517,16 +523,21 @@ async def stream_chat_completion( # Generate unique IDs for AI SDK protocol import uuid as uuid_module - message_id = str(uuid_module.uuid4()) + is_continuation = _continuation_message_id is not None + message_id = _continuation_message_id or str(uuid_module.uuid4()) text_block_id = str(uuid_module.uuid4()) - # Yield message start + # Only yield message start for the initial call, not for continuations. setup_time = (time.monotonic() - completion_start) * 1000 logger.info( f"[TIMING] Setup complete, yielding StreamStart at {setup_time:.1f}ms", extra={"json_fields": {**log_meta, "setup_time_ms": setup_time}}, ) - yield StreamStart(messageId=message_id) + if not is_continuation: + yield StreamStart(messageId=message_id, taskId=_task_id) + + # Emit start-step before each LLM call (AI SDK uses this to add step boundaries) + yield StreamStartStep() try: logger.info( @@ -632,6 +643,10 @@ async def stream_chat_completion( ) yield chunk elif isinstance(chunk, StreamFinish): + if has_done_tool_call: + # Tool calls happened β€” close the step but don't send message-level finish. + # The continuation will open a new step, and finish will come at the end. + yield StreamFinishStep() if not has_done_tool_call: # Emit text-end before finish if we received text but haven't closed it if has_received_text and not text_streaming_ended: @@ -663,6 +678,8 @@ async def stream_chat_completion( has_saved_assistant_message = True has_yielded_end = True + # Emit finish-step before finish (resets AI SDK text/reasoning state) + yield StreamFinishStep() yield chunk elif isinstance(chunk, StreamError): has_yielded_error = True @@ -712,6 +729,10 @@ async def stream_chat_completion( logger.info( f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" ) + # Close the current step before retrying so the recursive call's + # StreamStartStep doesn't produce unbalanced step events. + if not has_yielded_end: + yield StreamFinishStep() should_retry = True else: # Non-retryable error or max retries exceeded @@ -747,6 +768,7 @@ async def stream_chat_completion( error_response = StreamError(errorText=error_message) yield error_response if not has_yielded_end: + yield StreamFinishStep() yield StreamFinish() return @@ -761,6 +783,8 @@ async def stream_chat_completion( retry_count=retry_count + 1, session=session, context=context, + _continuation_message_id=message_id, # Reuse message ID since start was already sent + _task_id=_task_id, ): yield chunk return # Exit after retry to avoid double-saving in finally block @@ -830,6 +854,8 @@ async def stream_chat_completion( session=session, # Pass session object to avoid Redis refetch context=context, tool_call_response=str(tool_response_messages), + _continuation_message_id=message_id, # Reuse message ID to avoid duplicates + _task_id=_task_id, ): yield chunk @@ -1686,6 +1712,7 @@ async def _execute_long_running_tool_with_streaming( task_id, StreamError(errorText=str(e)), ) + await stream_registry.publish_chunk(task_id, StreamFinishStep()) await stream_registry.publish_chunk(task_id, StreamFinish()) await _update_pending_operation( @@ -1943,6 +1970,7 @@ async def _generate_llm_continuation_with_streaming( # Publish start event await stream_registry.publish_chunk(task_id, StreamStart(messageId=message_id)) + await stream_registry.publish_chunk(task_id, StreamStartStep()) await stream_registry.publish_chunk(task_id, StreamTextStart(id=text_block_id)) # Stream the response @@ -1966,6 +1994,7 @@ async def _generate_llm_continuation_with_streaming( # Publish end events await stream_registry.publish_chunk(task_id, StreamTextEnd(id=text_block_id)) + await stream_registry.publish_chunk(task_id, StreamFinishStep()) if assistant_content: # Reload session from DB to avoid race condition with user messages @@ -2007,4 +2036,5 @@ async def _generate_llm_continuation_with_streaming( task_id, StreamError(errorText=f"Failed to generate response: {e}"), ) + await stream_registry.publish_chunk(task_id, StreamFinishStep()) await stream_registry.publish_chunk(task_id, StreamFinish()) diff --git a/autogpt_platform/backend/backend/api/features/chat/stream_registry.py b/autogpt_platform/backend/backend/api/features/chat/stream_registry.py index 509d20d9f4..abc34b1fc9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/stream_registry.py +++ b/autogpt_platform/backend/backend/api/features/chat/stream_registry.py @@ -857,8 +857,10 @@ def _reconstruct_chunk(chunk_data: dict) -> StreamBaseResponse | None: ResponseType, StreamError, StreamFinish, + StreamFinishStep, StreamHeartbeat, StreamStart, + StreamStartStep, StreamTextDelta, StreamTextEnd, StreamTextStart, @@ -872,6 +874,8 @@ def _reconstruct_chunk(chunk_data: dict) -> StreamBaseResponse | None: type_to_class: dict[str, type[StreamBaseResponse]] = { ResponseType.START.value: StreamStart, ResponseType.FINISH.value: StreamFinish, + ResponseType.START_STEP.value: StreamStartStep, + ResponseType.FINISH_STEP.value: StreamFinishStep, ResponseType.TEXT_START.value: StreamTextStart, ResponseType.TEXT_DELTA.value: StreamTextDelta, ResponseType.TEXT_END.value: StreamTextEnd, diff --git a/autogpt_platform/frontend/Dockerfile b/autogpt_platform/frontend/Dockerfile index 2b120af5e1..ab2708f1f9 100644 --- a/autogpt_platform/frontend/Dockerfile +++ b/autogpt_platform/frontend/Dockerfile @@ -25,8 +25,12 @@ RUN if [ -f .env.production ]; then \ cp .env.default .env; \ fi RUN pnpm run generate:api +# Disable source-map generation in Docker builds to halve webpack memory usage. +# Source maps are only useful when SENTRY_AUTH_TOKEN is set (Vercel deploys); +# the Docker image never uploads them, so generating them just wastes RAM. +ENV NEXT_PUBLIC_SOURCEMAPS="false" # In CI, we want NEXT_PUBLIC_PW_TEST=true during build so Next.js inlines it -RUN if [ "$NEXT_PUBLIC_PW_TEST" = "true" ]; then NEXT_PUBLIC_PW_TEST=true NODE_OPTIONS="--max-old-space-size=4096" pnpm build; else NODE_OPTIONS="--max-old-space-size=4096" pnpm build; fi +RUN if [ "$NEXT_PUBLIC_PW_TEST" = "true" ]; then NEXT_PUBLIC_PW_TEST=true NODE_OPTIONS="--max-old-space-size=8192" pnpm build; else NODE_OPTIONS="--max-old-space-size=8192" pnpm build; fi # Prod stage - based on NextJS reference Dockerfile https://github.com/vercel/next.js/blob/64271354533ed16da51be5dce85f0dbd15f17517/examples/with-docker/Dockerfile FROM node:21-alpine AS prod diff --git a/autogpt_platform/frontend/next.config.mjs b/autogpt_platform/frontend/next.config.mjs index bb4410039d..9bb5983801 100644 --- a/autogpt_platform/frontend/next.config.mjs +++ b/autogpt_platform/frontend/next.config.mjs @@ -1,8 +1,12 @@ import { withSentryConfig } from "@sentry/nextjs"; +// Allow Docker builds to skip source-map generation (halves memory usage). +// Defaults to true so Vercel/local builds are unaffected. +const enableSourceMaps = process.env.NEXT_PUBLIC_SOURCEMAPS !== "false"; + /** @type {import('next').NextConfig} */ const nextConfig = { - productionBrowserSourceMaps: true, + productionBrowserSourceMaps: enableSourceMaps, // Externalize OpenTelemetry packages to fix Turbopack HMR issues serverExternalPackages: [ "@opentelemetry/instrumentation", @@ -14,9 +18,37 @@ const nextConfig = { serverActions: { bodySizeLimit: "256mb", }, - // Increase body size limit for API routes (file uploads) - 256MB to match backend limit - proxyClientMaxBodySize: "256mb", middlewareClientMaxBodySize: "256mb", + // Limit parallel webpack workers to reduce peak memory during builds. + cpus: 2, + }, + // Work around cssnano "Invalid array length" bug in Next.js's bundled + // cssnano-simple comment parser when processing very large CSS chunks. + // CSS is still bundled correctly; gzip handles most of the size savings anyway. + webpack: (config, { dev }) => { + if (!dev) { + // Next.js adds CssMinimizerPlugin internally (after user config), so we + // can't filter it from config.plugins. Instead, intercept the webpack + // compilation hooks and replace the buggy plugin's tap with a no-op. + config.plugins.push({ + apply(compiler) { + compiler.hooks.compilation.tap( + "DisableCssMinimizer", + (compilation) => { + compilation.hooks.processAssets.intercept({ + register: (tap) => { + if (tap.name === "CssMinimizerPlugin") { + return { ...tap, fn: async () => {} }; + } + return tap; + }, + }); + }, + ); + }, + }); + } + return config; }, images: { domains: [ @@ -54,9 +86,16 @@ const nextConfig = { transpilePackages: ["geist"], }; -const isDevelopmentBuild = process.env.NODE_ENV !== "production"; +// Only run the Sentry webpack plugin when we can actually upload source maps +// (i.e. on Vercel with SENTRY_AUTH_TOKEN set). The Sentry *runtime* SDK +// (imported in app code) still captures errors without the plugin. +// Skipping the plugin saves ~1 GB of peak memory during `next build`. +const skipSentryPlugin = + process.env.NODE_ENV !== "production" || + !enableSourceMaps || + !process.env.SENTRY_AUTH_TOKEN; -export default isDevelopmentBuild +export default skipSentryPlugin ? nextConfig : withSentryConfig(nextConfig, { // For all available options, see: @@ -96,7 +135,7 @@ export default isDevelopmentBuild // This helps Sentry with sourcemaps... https://docs.sentry.io/platforms/javascript/guides/nextjs/sourcemaps/ sourcemaps: { - disable: false, + disable: !enableSourceMaps, assets: [".next/**/*.js", ".next/**/*.js.map"], ignore: ["**/node_modules/**"], deleteSourcemapsAfterUpload: false, // Source is public anyway :) diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index e8c9871a72..5988e59c90 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -7,7 +7,7 @@ }, "scripts": { "dev": "pnpm run generate:api:force && next dev --turbo", - "build": "next build", + "build": "cross-env NODE_OPTIONS=--max-old-space-size=16384 next build", "start": "next start", "start:standalone": "cd .next/standalone && node server.js", "lint": "next lint && prettier --check .", @@ -30,6 +30,7 @@ "defaults" ], "dependencies": { + "@ai-sdk/react": "3.0.61", "@faker-js/faker": "10.0.0", "@hookform/resolvers": "5.2.2", "@next/third-parties": "15.4.6", @@ -60,6 +61,10 @@ "@rjsf/utils": "6.1.2", "@rjsf/validator-ajv8": "6.1.2", "@sentry/nextjs": "10.27.0", + "@streamdown/cjk": "1.0.1", + "@streamdown/code": "1.0.1", + "@streamdown/math": "1.0.1", + "@streamdown/mermaid": "1.0.1", "@supabase/ssr": "0.7.0", "@supabase/supabase-js": "2.78.0", "@tanstack/react-query": "5.90.6", @@ -68,6 +73,7 @@ "@vercel/analytics": "1.5.0", "@vercel/speed-insights": "1.2.0", "@xyflow/react": "12.9.2", + "ai": "6.0.59", "boring-avatars": "1.11.2", "class-variance-authority": "0.7.1", "clsx": "2.1.1", @@ -87,7 +93,6 @@ "launchdarkly-react-client-sdk": "3.9.0", "lodash": "4.17.21", "lucide-react": "0.552.0", - "moment": "2.30.1", "next": "15.4.10", "next-themes": "0.4.6", "nuqs": "2.7.2", @@ -112,9 +117,11 @@ "remark-math": "6.0.0", "shepherd.js": "14.5.1", "sonner": "2.0.7", + "streamdown": "2.1.0", "tailwind-merge": "2.6.0", "tailwind-scrollbar": "3.1.0", "tailwindcss-animate": "1.0.7", + "use-stick-to-bottom": "1.1.2", "uuid": "11.1.0", "vaul": "1.1.2", "zod": "3.25.76", @@ -172,7 +179,8 @@ }, "pnpm": { "overrides": { - "@opentelemetry/instrumentation": "0.209.0" + "@opentelemetry/instrumentation": "0.209.0", + "lodash-es": "4.17.23" } }, "packageManager": "pnpm@10.20.0+sha512.cf9998222162dd85864d0a8102e7892e7ba4ceadebbf5a31f9c2fce48dfce317a9c53b9f6464d1ef9042cba2e02ae02a9f7c143a2b438cd93c91840f0192b9dd" diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml index 377a298564..468e2f312d 100644 --- a/autogpt_platform/frontend/pnpm-lock.yaml +++ b/autogpt_platform/frontend/pnpm-lock.yaml @@ -6,11 +6,15 @@ settings: overrides: '@opentelemetry/instrumentation': 0.209.0 + lodash-es: 4.17.23 importers: .: dependencies: + '@ai-sdk/react': + specifier: 3.0.61 + version: 3.0.61(react@18.3.1)(zod@3.25.76) '@faker-js/faker': specifier: 10.0.0 version: 10.0.0 @@ -101,6 +105,18 @@ importers: '@sentry/nextjs': specifier: 10.27.0 version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.104.1(esbuild@0.25.12)) + '@streamdown/cjk': + specifier: 1.0.1 + version: 1.0.1(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(react@18.3.1)(unified@11.0.5) + '@streamdown/code': + specifier: 1.0.1 + version: 1.0.1(react@18.3.1) + '@streamdown/math': + specifier: 1.0.1 + version: 1.0.1(react@18.3.1) + '@streamdown/mermaid': + specifier: 1.0.1 + version: 1.0.1(react@18.3.1) '@supabase/ssr': specifier: 0.7.0 version: 0.7.0(@supabase/supabase-js@2.78.0) @@ -125,6 +141,9 @@ importers: '@xyflow/react': specifier: 12.9.2 version: 12.9.2(@types/react@18.3.17)(immer@11.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + ai: + specifier: 6.0.59 + version: 6.0.59(zod@3.25.76) boring-avatars: specifier: 1.11.2 version: 1.11.2 @@ -182,9 +201,6 @@ importers: lucide-react: specifier: 0.552.0 version: 0.552.0(react@18.3.1) - moment: - specifier: 2.30.1 - version: 2.30.1 next: specifier: 15.4.10 version: 15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -257,6 +273,9 @@ importers: sonner: specifier: 2.0.7 version: 2.0.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + streamdown: + specifier: 2.1.0 + version: 2.1.0(react@18.3.1) tailwind-merge: specifier: 2.6.0 version: 2.6.0 @@ -266,6 +285,9 @@ importers: tailwindcss-animate: specifier: 1.0.7 version: 1.0.7(tailwindcss@3.4.17) + use-stick-to-bottom: + specifier: 1.1.2 + version: 1.1.2(react@18.3.1) uuid: specifier: 11.1.0 version: 11.1.0 @@ -417,10 +439,35 @@ packages: '@adobe/css-tools@4.4.4': resolution: {integrity: sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==} + '@ai-sdk/gateway@3.0.27': + resolution: {integrity: sha512-Pr+ApS9k6/jcR3kNltJNxo60OdYvnVU4DeRhzVtxUAYTXCHx4qO+qTMG9nNRn+El1acJnNRA//Su47srjXkT/w==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + + '@ai-sdk/provider-utils@4.0.10': + resolution: {integrity: sha512-VeDAiCH+ZK8Xs4hb9Cw7pHlujWNL52RKe8TExOkrw6Ir1AmfajBZTb9XUdKOZO08RwQElIKA8+Ltm+Gqfo8djQ==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + + '@ai-sdk/provider@3.0.5': + resolution: {integrity: sha512-2Xmoq6DBJqmSl80U6V9z5jJSJP7ehaJJQMy2iFUqTay06wdCqTnPVBBQbtEL8RCChenL+q5DC5H5WzU3vV3v8w==} + engines: {node: '>=18'} + + '@ai-sdk/react@3.0.61': + resolution: {integrity: sha512-vCjZBnY2+TawFBXamSKt6elAt9n1MXMfcjSd9DSgT9peCJN27qNGVSXgaGNh/B3cUgeOktFfhB2GVmIqOjvmLQ==} + engines: {node: '>=18'} + peerDependencies: + react: ^18 || ~19.0.1 || ~19.1.2 || ^19.2.1 + '@alloc/quick-lru@5.2.0': resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} engines: {node: '>=10'} + '@antfu/install-pkg@1.1.0': + resolution: {integrity: sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==} + '@apidevtools/json-schema-ref-parser@14.0.1': resolution: {integrity: sha512-Oc96zvmxx1fqoSEdUmfmvvb59/KDOnUoJ7s2t7bISyAn0XEz57LCCw8k2Y4Pf3mwKaZLMciESALORLgfe2frCw==} engines: {node: '>= 16'} @@ -1032,6 +1079,24 @@ packages: resolution: {integrity: sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==} engines: {node: '>=6.9.0'} + '@braintree/sanitize-url@7.1.2': + resolution: {integrity: sha512-jigsZK+sMF/cuiB7sERuo9V7N9jx+dhmHHnQyDSVdpZwVutaBu7WvNYqMDLSgFgfB30n452TP3vjDAvFC973mA==} + + '@chevrotain/cst-dts-gen@11.0.3': + resolution: {integrity: sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==} + + '@chevrotain/gast@11.0.3': + resolution: {integrity: sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==} + + '@chevrotain/regexp-to-ast@11.0.3': + resolution: {integrity: sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==} + + '@chevrotain/types@11.0.3': + resolution: {integrity: sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==} + + '@chevrotain/utils@11.0.3': + resolution: {integrity: sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==} + '@chromatic-com/storybook@4.1.2': resolution: {integrity: sha512-QAWGtHwib0qsP5CcO64aJCF75zpFgpKK3jNpxILzQiPK3sVo4EmnVGJVdwcZWpWrGdH8E4YkncGoitw4EXzKMg==} engines: {node: '>=20.0.0', yarn: '>=1.22.18'} @@ -1486,6 +1551,12 @@ packages: resolution: {integrity: sha512-oT8USsTulFAA8FiBN0lA2rJqQI2lIt+HP2pdakGQXo3EviL2vqJTgpSCRwjl6mLJL158f1BVcdQUOEFGxomK3w==} engines: {node: '>=16.0.0'} + '@iconify/types@2.0.0': + resolution: {integrity: sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==} + + '@iconify/utils@3.1.0': + resolution: {integrity: sha512-Zlzem1ZXhI1iHeeERabLNzBHdOa4VhQbqAcOQaMKuTuyZCpwKbC2R4Dd0Zo3g9EAc+Y4fiarO8HIHRAth7+skw==} + '@img/colour@1.0.0': resolution: {integrity: sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==} engines: {node: '>=18'} @@ -1705,6 +1776,9 @@ packages: '@types/react': '>=16' react: '>=16' + '@mermaid-js/parser@0.6.3': + resolution: {integrity: sha512-lnjOhe7zyHjc+If7yT4zoedx2vo4sHaTmtkl1+or8BRTnCtDmcTpAjpzDSfCZrshM5bCoz0GyidzadJAH1xobA==} + '@mswjs/interceptors@0.40.0': resolution: {integrity: sha512-EFd6cVbHsgLa6wa4RljGj6Wk75qoHxUSyc5asLyyPSyuhIcdS2Q3Phw6ImS1q+CkALthJRShiYfKANcQMuMqsQ==} engines: {node: '>=18'} @@ -3019,6 +3093,12 @@ packages: peerDependencies: webpack: '>=4.40.0' + '@shikijs/core@3.21.0': + resolution: {integrity: sha512-AXSQu/2n1UIQekY8euBJlvFYZIw0PHY63jUzGbrOma4wPxzznJXTXkri+QcHeBNaFxiiOljKxxJkVSoB3PjbyA==} + + '@shikijs/engine-javascript@3.21.0': + resolution: {integrity: sha512-ATwv86xlbmfD9n9gKRiwuPpWgPENAWCLwYCGz9ugTJlsO2kOzhOkvoyV/UD+tJ0uT7YRyD530x6ugNSffmvIiQ==} + '@shikijs/engine-oniguruma@3.21.0': resolution: {integrity: sha512-OYknTCct6qiwpQDqDdf3iedRdzj6hFlOPv5hMvI+hkWfCKs5mlJ4TXziBG9nyabLwGulrUjHiCq3xCspSzErYQ==} @@ -3222,6 +3302,26 @@ packages: typescript: optional: true + '@streamdown/cjk@1.0.1': + resolution: {integrity: sha512-ElDoEfad2u8iFzmgmEEab15N4mt19r47xeUIPJtHaHVyEF5baojamGo+xw3MywMj2qUsAY3LnTnKbrUtL5tGkg==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + + '@streamdown/code@1.0.1': + resolution: {integrity: sha512-U9LITfQ28tZYAoY922jdtw1ryg4kgRBdURopqK9hph7G2fBUwPeHthjH7SvaV0fvFv7EqjqCzARJuWUljLe9Ag==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + + '@streamdown/math@1.0.1': + resolution: {integrity: sha512-R9WdHbpERiRU7WeO7oT1aIbnLJ/jraDr89F7X9x2OM//Y8G8UMATRnLD/RUwg4VLr8Nu7QSIJ0Pa8lXd2meM4Q==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + + '@streamdown/mermaid@1.0.1': + resolution: {integrity: sha512-LVGbxYd6t1DKMCMqm3cpbfsdD4/EKpQelanOlJaBMKv83kbrl8syZJhVBsd/jka+CawhpeR9xsGQJzSJEpjoVw==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + '@supabase/auth-js@2.78.0': resolution: {integrity: sha512-cXDtu1U0LeZj/xfnFoV7yCze37TcbNo8FCxy1FpqhMbB9u9QxxDSW6pA5gm/07Ei7m260Lof4CZx67Cu6DPeig==} @@ -3344,21 +3444,69 @@ packages: '@types/d3-array@3.2.2': resolution: {integrity: sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==} + '@types/d3-axis@3.0.6': + resolution: {integrity: sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==} + + '@types/d3-brush@3.0.6': + resolution: {integrity: sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==} + + '@types/d3-chord@3.0.6': + resolution: {integrity: sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==} + '@types/d3-color@3.1.3': resolution: {integrity: sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==} + '@types/d3-contour@3.0.6': + resolution: {integrity: sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==} + + '@types/d3-delaunay@6.0.4': + resolution: {integrity: sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==} + + '@types/d3-dispatch@3.0.7': + resolution: {integrity: sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==} + '@types/d3-drag@3.0.7': resolution: {integrity: sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==} + '@types/d3-dsv@3.0.7': + resolution: {integrity: sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==} + '@types/d3-ease@3.0.2': resolution: {integrity: sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==} + '@types/d3-fetch@3.0.7': + resolution: {integrity: sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==} + + '@types/d3-force@3.0.10': + resolution: {integrity: sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==} + + '@types/d3-format@3.0.4': + resolution: {integrity: sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==} + + '@types/d3-geo@3.1.0': + resolution: {integrity: sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==} + + '@types/d3-hierarchy@3.1.7': + resolution: {integrity: sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==} + '@types/d3-interpolate@3.0.4': resolution: {integrity: sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==} '@types/d3-path@3.1.1': resolution: {integrity: sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==} + '@types/d3-polygon@3.0.2': + resolution: {integrity: sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==} + + '@types/d3-quadtree@3.0.6': + resolution: {integrity: sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==} + + '@types/d3-random@3.0.3': + resolution: {integrity: sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==} + + '@types/d3-scale-chromatic@3.1.0': + resolution: {integrity: sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==} + '@types/d3-scale@4.0.9': resolution: {integrity: sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==} @@ -3368,6 +3516,9 @@ packages: '@types/d3-shape@3.1.7': resolution: {integrity: sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==} + '@types/d3-time-format@4.0.3': + resolution: {integrity: sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==} + '@types/d3-time@3.0.4': resolution: {integrity: sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==} @@ -3380,6 +3531,9 @@ packages: '@types/d3-zoom@3.0.8': resolution: {integrity: sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==} + '@types/d3@7.4.3': + resolution: {integrity: sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==} + '@types/debug@4.1.12': resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} @@ -3404,6 +3558,9 @@ packages: '@types/estree@1.0.8': resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + '@types/geojson@7946.0.16': + resolution: {integrity: sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==} + '@types/hast@3.0.4': resolution: {integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==} @@ -3692,6 +3849,10 @@ packages: vue-router: optional: true + '@vercel/oidc@3.1.0': + resolution: {integrity: sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==} + engines: {node: '>= 20'} + '@vercel/speed-insights@1.2.0': resolution: {integrity: sha512-y9GVzrUJ2xmgtQlzFP2KhVRoCglwfRQgjyfY607aU0hh0Un6d0OUyrJkjuAlsV18qR4zfoFPs/BiIj9YDS6Wzw==} peerDependencies: @@ -3873,6 +4034,12 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} + ai@6.0.59: + resolution: {integrity: sha512-9SfCvcr4kVk4t8ZzIuyHpuL1hFYKsYMQfBSbBq3dipXPa+MphARvI8wHEjNaRqYl3JOsJbWxEBIMqHL0L92mUA==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + ajv-draft-04@1.0.0: resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==} peerDependencies: @@ -4227,6 +4394,14 @@ packages: resolution: {integrity: sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==} engines: {node: '>= 16'} + chevrotain-allstar@0.3.1: + resolution: {integrity: sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==} + peerDependencies: + chevrotain: ^11.0.0 + + chevrotain@11.0.3: + resolution: {integrity: sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==} + chokidar@3.6.0: resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} engines: {node: '>= 8.10.0'} @@ -4325,6 +4500,10 @@ packages: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} engines: {node: '>= 6'} + commander@7.2.0: + resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} + engines: {node: '>= 10'} + commander@8.3.0: resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==} engines: {node: '>= 12'} @@ -4346,6 +4525,9 @@ packages: engines: {node: '>=18'} hasBin: true + confbox@0.1.8: + resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==} + console-browserify@1.2.0: resolution: {integrity: sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==} @@ -4374,6 +4556,12 @@ packages: core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} + cose-base@1.0.3: + resolution: {integrity: sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==} + + cose-base@2.2.0: + resolution: {integrity: sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==} + cosmiconfig@7.1.0: resolution: {integrity: sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==} engines: {node: '>=10'} @@ -4447,14 +4635,51 @@ packages: csstype@3.2.3: resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} + cytoscape-cose-bilkent@4.1.0: + resolution: {integrity: sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==} + peerDependencies: + cytoscape: ^3.2.0 + + cytoscape-fcose@2.2.0: + resolution: {integrity: sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==} + peerDependencies: + cytoscape: ^3.2.0 + + cytoscape@3.33.1: + resolution: {integrity: sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==} + engines: {node: '>=0.10'} + + d3-array@2.12.1: + resolution: {integrity: sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==} + d3-array@3.2.4: resolution: {integrity: sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==} engines: {node: '>=12'} + d3-axis@3.0.0: + resolution: {integrity: sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==} + engines: {node: '>=12'} + + d3-brush@3.0.0: + resolution: {integrity: sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==} + engines: {node: '>=12'} + + d3-chord@3.0.1: + resolution: {integrity: sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==} + engines: {node: '>=12'} + d3-color@3.1.0: resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==} engines: {node: '>=12'} + d3-contour@4.0.2: + resolution: {integrity: sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==} + engines: {node: '>=12'} + + d3-delaunay@6.0.4: + resolution: {integrity: sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==} + engines: {node: '>=12'} + d3-dispatch@3.0.1: resolution: {integrity: sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==} engines: {node: '>=12'} @@ -4463,22 +4688,65 @@ packages: resolution: {integrity: sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==} engines: {node: '>=12'} + d3-dsv@3.0.1: + resolution: {integrity: sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==} + engines: {node: '>=12'} + hasBin: true + d3-ease@3.0.1: resolution: {integrity: sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==} engines: {node: '>=12'} + d3-fetch@3.0.1: + resolution: {integrity: sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==} + engines: {node: '>=12'} + + d3-force@3.0.0: + resolution: {integrity: sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==} + engines: {node: '>=12'} + d3-format@3.1.0: resolution: {integrity: sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==} engines: {node: '>=12'} + d3-geo@3.1.1: + resolution: {integrity: sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==} + engines: {node: '>=12'} + + d3-hierarchy@3.1.2: + resolution: {integrity: sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==} + engines: {node: '>=12'} + d3-interpolate@3.0.1: resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} engines: {node: '>=12'} + d3-path@1.0.9: + resolution: {integrity: sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==} + d3-path@3.1.0: resolution: {integrity: sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==} engines: {node: '>=12'} + d3-polygon@3.0.1: + resolution: {integrity: sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==} + engines: {node: '>=12'} + + d3-quadtree@3.0.1: + resolution: {integrity: sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==} + engines: {node: '>=12'} + + d3-random@3.0.1: + resolution: {integrity: sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==} + engines: {node: '>=12'} + + d3-sankey@0.12.3: + resolution: {integrity: sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==} + + d3-scale-chromatic@3.1.0: + resolution: {integrity: sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==} + engines: {node: '>=12'} + d3-scale@4.0.2: resolution: {integrity: sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==} engines: {node: '>=12'} @@ -4487,6 +4755,9 @@ packages: resolution: {integrity: sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==} engines: {node: '>=12'} + d3-shape@1.3.7: + resolution: {integrity: sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==} + d3-shape@3.2.0: resolution: {integrity: sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==} engines: {node: '>=12'} @@ -4513,6 +4784,13 @@ packages: resolution: {integrity: sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==} engines: {node: '>=12'} + d3@7.9.0: + resolution: {integrity: sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==} + engines: {node: '>=12'} + + dagre-d3-es@7.0.13: + resolution: {integrity: sha512-efEhnxpSuwpYOKRm/L5KbqoZmNNukHa/Flty4Wp62JRvgH2ojwVgPgdYyr4twpieZnyRDdIH7PY2mopX26+j2Q==} + damerau-levenshtein@1.0.8: resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==} @@ -4538,6 +4816,9 @@ packages: date-fns@4.1.0: resolution: {integrity: sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==} + dayjs@1.11.19: + resolution: {integrity: sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==} + debug@3.2.7: resolution: {integrity: sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==} peerDependencies: @@ -4594,6 +4875,9 @@ packages: resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} engines: {node: '>= 0.4'} + delaunator@5.0.1: + resolution: {integrity: sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==} + dependency-graph@0.11.0: resolution: {integrity: sha512-JeMq7fEshyepOWDfcfHK06N3MhyPhz++vtqWhMT5O9A3K42rdsEDpfdVqjaqaAhsw6a+ZqeDvQVtD0hFHQWrzg==} engines: {node: '>= 0.6.0'} @@ -4974,6 +5258,10 @@ packages: resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} engines: {node: '>=0.8.x'} + eventsource-parser@3.0.6: + resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==} + engines: {node: '>=18.0.0'} + evp_bytestokey@1.0.3: resolution: {integrity: sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==} @@ -5174,6 +5462,10 @@ packages: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} + get-east-asian-width@1.4.0: + resolution: {integrity: sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==} + engines: {node: '>=18'} + get-intrinsic@1.3.0: resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} engines: {node: '>= 0.4'} @@ -5213,11 +5505,12 @@ packages: glob@10.5.0: resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==} + deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me hasBin: true glob@7.2.3: resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} - deprecated: Glob versions prior to v9 are no longer supported + deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me globals@13.24.0: resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} @@ -5248,6 +5541,9 @@ packages: resolution: {integrity: sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==} engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} + hachure-fill@0.5.2: + resolution: {integrity: sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==} + happy-dom@20.3.4: resolution: {integrity: sha512-rfbiwB6OKxZFIFQ7SRnCPB2WL9WhyXsFoTfecYgeCeFSOBxvkWLaXsdv5ehzJrfqwXQmDephAKWLRQoFoJwrew==} engines: {node: '>=20.0.0'} @@ -5311,9 +5607,21 @@ packages: hast-util-parse-selector@4.0.0: resolution: {integrity: sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==} + hast-util-raw@9.1.0: + resolution: {integrity: sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==} + + hast-util-sanitize@5.0.2: + resolution: {integrity: sha512-3yTWghByc50aGS7JlGhk61SPenfE/p1oaFeNwkOOyrscaOkMGrcW9+Cy/QAIOBpZxP1yqDIzFMR0+Np0i0+usg==} + + hast-util-to-html@9.0.5: + resolution: {integrity: sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==} + hast-util-to-jsx-runtime@2.3.6: resolution: {integrity: sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==} + hast-util-to-parse5@8.0.1: + resolution: {integrity: sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==} + hast-util-to-string@3.0.1: resolution: {integrity: sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A==} @@ -5358,6 +5666,9 @@ packages: html-url-attributes@3.0.1: resolution: {integrity: sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==} + html-void-elements@3.0.0: + resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==} + html-webpack-plugin@5.6.5: resolution: {integrity: sha512-4xynFbKNNk+WlzXeQQ+6YYsH2g7mpfPszQZUi3ovKlj+pDmngQ7vRXjrrmGROabmKwyQkcgcX5hqfOwHbFmK5g==} engines: {node: '>=10.13.0'} @@ -5395,6 +5706,10 @@ packages: resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} engines: {node: '>=10.17.0'} + iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + icss-utils@5.1.0: resolution: {integrity: sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==} engines: {node: ^10 || ^12 || >= 14} @@ -5458,6 +5773,9 @@ packages: resolution: {integrity: sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==} engines: {node: '>= 0.4'} + internmap@1.0.1: + resolution: {integrity: sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==} + internmap@2.0.3: resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} engines: {node: '>=12'} @@ -5698,6 +6016,9 @@ packages: json-schema-traverse@1.0.0: resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + json-schema@0.4.0: + resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==} + json-stable-stringify-without-jsonify@1.0.1: resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} @@ -5740,9 +6061,20 @@ packages: resolution: {integrity: sha512-woHRUZ/iF23GBP1dkDQMh1QBad9dmr8/PAwNA54VrSOVYgI12MAcE14TqnDdQOdzyEonGzMepYnqBMYdsoAr8Q==} hasBin: true + katex@0.16.28: + resolution: {integrity: sha512-YHzO7721WbmAL6Ov1uzN/l5mY5WWWhJBSW+jq4tkfZfsxmo1hu6frS0EOswvjBUnWE6NtjEs48SFn5CQESRLZg==} + hasBin: true + keyv@4.5.4: resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + khroma@2.1.0: + resolution: {integrity: sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==} + + langium@3.3.1: + resolution: {integrity: sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==} + engines: {node: '>=16.0.0'} + language-subtag-registry@0.3.23: resolution: {integrity: sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==} @@ -5762,6 +6094,12 @@ packages: react: ^16.6.3 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-dom: ^16.8.4 || ^17.0.0 || ^18.0.0 || ^19.0.0 + layout-base@1.0.2: + resolution: {integrity: sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==} + + layout-base@2.0.1: + resolution: {integrity: sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==} + leven@3.1.0: resolution: {integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==} engines: {node: '>=6'} @@ -5804,8 +6142,8 @@ packages: resolution: {integrity: sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - lodash-es@4.17.22: - resolution: {integrity: sha512-XEawp1t0gxSi9x01glktRZ5HDy0HXqrM0x5pXQM98EaI0NxO6jVM7omDOxsuEo5UIASAnm2bRp1Jt/e0a2XU8Q==} + lodash-es@4.17.23: + resolution: {integrity: sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg==} lodash.camelcase@4.3.0: resolution: {integrity: sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==} @@ -5912,6 +6250,16 @@ packages: react: optional: true + marked@16.4.2: + resolution: {integrity: sha512-TI3V8YYWvkVf3KJe1dRkpnjs68JUPyEa5vjKrp1XEEJUAOaQc+Qj+L1qWbPd0SJuAdQkFU0h73sXXqwDYxsiDA==} + engines: {node: '>= 20'} + hasBin: true + + marked@17.0.1: + resolution: {integrity: sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg==} + engines: {node: '>= 20'} + hasBin: true + math-intrinsics@1.1.0: resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} engines: {node: '>= 0.4'} @@ -5984,9 +6332,41 @@ packages: resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} engines: {node: '>= 8'} + mermaid@11.12.2: + resolution: {integrity: sha512-n34QPDPEKmaeCG4WDMGy0OT6PSyxKCfy2pJgShP+Qow2KLrvWjclwbc3yXfSIf4BanqWEhQEpngWwNp/XhZt6w==} + micromark-core-commonmark@2.0.3: resolution: {integrity: sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==} + micromark-extension-cjk-friendly-gfm-strikethrough@1.2.3: + resolution: {integrity: sha512-gSPnxgHDDqXYOBvQRq6lerrq9mjDhdtKn+7XETuXjxWcL62yZEfUdA28Ml1I2vDIPfAOIKLa0h2XDSGkInGHFQ==} + engines: {node: '>=16'} + peerDependencies: + micromark: ^4.0.0 + micromark-util-types: ^2.0.0 + peerDependenciesMeta: + micromark-util-types: + optional: true + + micromark-extension-cjk-friendly-util@2.1.1: + resolution: {integrity: sha512-egs6+12JU2yutskHY55FyR48ZiEcFOJFyk9rsiyIhcJ6IvWB6ABBqVrBw8IobqJTDZ/wdSr9eoXDPb5S2nW1bg==} + engines: {node: '>=16'} + peerDependencies: + micromark-util-types: '*' + peerDependenciesMeta: + micromark-util-types: + optional: true + + micromark-extension-cjk-friendly@1.2.3: + resolution: {integrity: sha512-gRzVLUdjXBLX6zNPSnHGDoo+ZTp5zy+MZm0g3sv+3chPXY7l9gW+DnrcHcZh/jiPR6MjPKO4AEJNp4Aw6V9z5Q==} + engines: {node: '>=16'} + peerDependencies: + micromark: ^4.0.0 + micromark-util-types: ^2.0.0 + peerDependenciesMeta: + micromark-util-types: + optional: true + micromark-extension-gfm-autolink-literal@2.1.0: resolution: {integrity: sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==} @@ -6119,12 +6499,12 @@ packages: resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} engines: {node: '>=16 || 14 >=14.17'} + mlly@1.8.0: + resolution: {integrity: sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==} + module-details-from-path@1.0.4: resolution: {integrity: sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==} - moment@2.30.1: - resolution: {integrity: sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==} - motion-dom@12.24.8: resolution: {integrity: sha512-wX64WITk6gKOhaTqhsFqmIkayLAAx45SVFiMnJIxIrH5uqyrwrxjrfo8WX9Kh8CaUAixjeMn82iH0W0QT9wD5w==} @@ -6339,6 +6719,12 @@ packages: resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} engines: {node: '>=6'} + oniguruma-parser@0.12.1: + resolution: {integrity: sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==} + + oniguruma-to-es@4.3.4: + resolution: {integrity: sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==} + open@8.4.2: resolution: {integrity: sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==} engines: {node: '>=12'} @@ -6398,6 +6784,9 @@ packages: package-json-from-dist@1.0.1: resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==} + package-manager-detector@1.6.0: + resolution: {integrity: sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==} + pako@1.0.11: resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==} @@ -6434,6 +6823,9 @@ packages: path-browserify@1.0.1: resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} + path-data-parser@0.1.0: + resolution: {integrity: sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==} + path-exists@4.0.0: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} @@ -6513,6 +6905,9 @@ packages: resolution: {integrity: sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==} engines: {node: '>=14.16'} + pkg-types@1.3.1: + resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} + playwright-core@1.56.1: resolution: {integrity: sha512-hutraynyn31F+Bifme+Ps9Vq59hKuUCz7H1kDOcBs+2oGguKkWTU50bBWrtz34OUWmIwpBTWDxaRPXrIXkgvmQ==} engines: {node: '>=18'} @@ -6523,6 +6918,12 @@ packages: engines: {node: '>=18'} hasBin: true + points-on-curve@0.2.0: + resolution: {integrity: sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==} + + points-on-path@0.2.1: + resolution: {integrity: sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==} + pony-cause@1.1.1: resolution: {integrity: sha512-PxkIc/2ZpLiEzQXu5YRDOUgBlfGYBY8156HY5ZcRAwwonMk5W/MrJP2LLkG/hF7GEQzaHo2aS7ho6ZLCOvf+6g==} engines: {node: '>=12.0.0'} @@ -6962,6 +7363,15 @@ packages: regex-parser@2.3.1: resolution: {integrity: sha512-yXLRqatcCuKtVHsWrNg0JL3l1zGfdXeEvDa0bdu4tCDQw0RpMDZsqbkyRTUnKMR0tXF627V2oEWjBEaEdqTwtQ==} + regex-recursion@6.0.2: + resolution: {integrity: sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==} + + regex-utilities@2.3.0: + resolution: {integrity: sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==} + + regex@6.1.0: + resolution: {integrity: sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==} + regexp.prototype.flags@1.5.4: resolution: {integrity: sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==} engines: {node: '>= 0.4'} @@ -6980,12 +7390,21 @@ packages: rehype-autolink-headings@7.1.0: resolution: {integrity: sha512-rItO/pSdvnvsP4QRB1pmPiNHUskikqtPojZKJPPPAVx9Hj8i8TwMBhofrrAYRhYOOBZH9tgmG5lPqDLuIWPWmw==} + rehype-harden@1.1.7: + resolution: {integrity: sha512-j5DY0YSK2YavvNGV+qBHma15J9m0WZmRe8posT5AtKDS6TNWtMVTo6RiqF8SidfcASYz8f3k2J/1RWmq5zTXUw==} + rehype-highlight@7.0.2: resolution: {integrity: sha512-k158pK7wdC2qL3M5NcZROZ2tR/l7zOzjxXd5VGdcfIyoijjQqpHd3JKtYSBDpDZ38UI2WJWuFAtkMDxmx5kstA==} rehype-katex@7.0.1: resolution: {integrity: sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==} + rehype-raw@7.0.0: + resolution: {integrity: sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==} + + rehype-sanitize@6.0.0: + resolution: {integrity: sha512-CsnhKNsyI8Tub6L4sm5ZFsme4puGfc6pYylvXo1AeqaGbjOYyzNv3qZPwvs0oMJ39eryyeOdmxwUIo94IpEhqg==} + rehype-slug@6.0.0: resolution: {integrity: sha512-lWyvf/jwu+oS5+hL5eClVd3hNdmwM1kAC0BUvEGD19pajQMIzcNUd/k9GsfQ+FfECvX+JE+e9/btsKH0EjJT6A==} @@ -6993,6 +7412,26 @@ packages: resolution: {integrity: sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==} engines: {node: '>= 0.10'} + remark-cjk-friendly-gfm-strikethrough@1.2.3: + resolution: {integrity: sha512-bXfMZtsaomK6ysNN/UGRIcasQAYkC10NtPmP0oOHOV8YOhA2TXmwRXCku4qOzjIFxAPfish5+XS0eIug2PzNZA==} + engines: {node: '>=16'} + peerDependencies: + '@types/mdast': ^4.0.0 + unified: ^11.0.0 + peerDependenciesMeta: + '@types/mdast': + optional: true + + remark-cjk-friendly@1.2.3: + resolution: {integrity: sha512-UvAgxwlNk+l9Oqgl/9MWK2eWRS7zgBW/nXX9AthV7nd/3lNejF138E7Xbmk9Zs4WjTJGs721r7fAEc7tNFoH7g==} + engines: {node: '>=16'} + peerDependencies: + '@types/mdast': ^4.0.0 + unified: ^11.0.0 + peerDependenciesMeta: + '@types/mdast': + optional: true + remark-gfm@4.0.1: resolution: {integrity: sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==} @@ -7008,6 +7447,9 @@ packages: remark-stringify@11.0.0: resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + remend@1.1.0: + resolution: {integrity: sha512-JENGyuIhTwzUfCarW43X4r9cehoqTo9QyYxfNDZSud2AmqeuWjZ5pfybasTa4q0dxTJAj5m8NB+wR+YueAFpxQ==} + renderkid@3.0.0: resolution: {integrity: sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==} @@ -7066,14 +7508,23 @@ packages: resolution: {integrity: sha512-5Di9UC0+8h1L6ZD2d7awM7E/T4uA1fJRlx6zk/NvdCCVEoAnFqvHmCuNeIKoCeIixBX/q8uM+6ycDvF8woqosA==} engines: {node: '>= 0.8'} + robust-predicates@3.0.2: + resolution: {integrity: sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==} + rollup@4.55.1: resolution: {integrity: sha512-wDv/Ht1BNHB4upNbK74s9usvl7hObDnvVzknxqY/E/O3X6rW1U1rV1aENEfJ54eFZDTNo7zv1f5N4edCluH7+A==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true + roughjs@4.6.6: + resolution: {integrity: sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==} + run-parallel@1.2.0: resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + rw@1.3.3: + resolution: {integrity: sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==} + rxjs@7.8.2: resolution: {integrity: sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==} @@ -7098,6 +7549,9 @@ packages: safe-stable-stringify@1.1.1: resolution: {integrity: sha512-ERq4hUjKDbJfE4+XtZLFPCDi8Vb1JqaxAPTxWFLBx8XcAlf9Bda/ZJdVezs/NAfsMQScyIlUMx+Yeu7P7rx5jw==} + safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + sass-loader@16.0.6: resolution: {integrity: sha512-sglGzId5gmlfxNs4gK2U3h7HlVRfx278YK6Ono5lwzuvi1jxig80YiuHkaDBVsYIKFhx8wN7XSCI0M2IDS/3qA==} engines: {node: '>= 18.12.0'} @@ -7186,6 +7640,9 @@ packages: resolution: {integrity: sha512-VuvPvLG1QjNOLP7AIm2HGyfmxEIz8QdskvWOHwUcxLDibYWjLRBmCWd8LSL5FlwhBW7D/GU+3gNVC/ASxAWdxg==} engines: {node: 18.* || >= 20} + shiki@3.21.0: + resolution: {integrity: sha512-N65B/3bqL/TI2crrXr+4UivctrAGEjmsib5rPMMPpFp1xAx/w03v8WZ9RDDFYteXoEgY7qZ4HGgl5KBIu1153w==} + should-equal@2.0.0: resolution: {integrity: sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==} @@ -7301,6 +7758,11 @@ packages: stream-http@3.2.0: resolution: {integrity: sha512-Oq1bLqisTyK3TSCXpPbT4sdeYNdmyZJv1LxpEm2vu1ZhK89kSE5YXwZc3cWk0MagGaKriBh9mCFbVGtO+vY29A==} + streamdown@2.1.0: + resolution: {integrity: sha512-u9gWd0AmjKg1d+74P44XaPlGrMeC21oDOSIhjGNEYMAttDMzCzlJO6lpTyJ9JkSinQQF65YcK4eOd3q9iTvULw==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + strict-event-emitter@0.5.1: resolution: {integrity: sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==} @@ -7414,6 +7876,9 @@ packages: babel-plugin-macros: optional: true + stylis@4.3.6: + resolution: {integrity: sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==} + sucrase@3.35.1: resolution: {integrity: sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==} engines: {node: '>=16 || 14 >=14.17'} @@ -7435,12 +7900,20 @@ packages: resolution: {integrity: sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g==} hasBin: true + swr@2.3.8: + resolution: {integrity: sha512-gaCPRVoMq8WGDcWj9p4YWzCMPHzE0WNl6W8ADIx9c3JBEIdMkJGMzW+uzXvxHMltwcYACr9jP+32H8/hgwMR7w==} + peerDependencies: + react: ^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + symbol-tree@3.2.4: resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} tailwind-merge@2.6.0: resolution: {integrity: sha512-P+Vu1qXfzediirmHOC3xKGAYeZtPcV9g76X+xg2FD4tYgR71ewMA35Y3sCz3zhiN/dwefRpJX0yBcgwi1fXNQA==} + tailwind-merge@3.4.0: + resolution: {integrity: sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==} + tailwind-scrollbar@3.1.0: resolution: {integrity: sha512-pmrtDIZeHyu2idTejfV59SbaJyvp1VRjYxAjZBH0jnyrPRo6HL1kD5Glz8VPagasqr6oAx6M05+Tuw429Z8jxg==} engines: {node: '>=12.13.0'} @@ -7495,6 +7968,10 @@ packages: third-party-capital@1.0.20: resolution: {integrity: sha512-oB7yIimd8SuGptespDAZnNkzIz+NWaJCu2RMsbs4Wmp9zSDUM8Nhi3s2OOcqYuv3mN4hitXc8DVx+LyUmbUDiA==} + throttleit@2.1.0: + resolution: {integrity: sha512-nt6AMGKW1p/70DF/hGBdJB57B8Tspmbp5gfJ8ilhLnt7kkr2ye7hzD6NVG8GGErk2HWF34igrL2CXmNIkzKqKw==} + engines: {node: '>=18'} + timers-browserify@2.0.12: resolution: {integrity: sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==} engines: {node: '>=0.6.0'} @@ -7677,6 +8154,9 @@ packages: uc.micro@2.1.0: resolution: {integrity: sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==} + ufo@1.6.3: + resolution: {integrity: sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q==} + unbox-primitive@1.1.0: resolution: {integrity: sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==} engines: {node: '>= 0.4'} @@ -7781,6 +8261,11 @@ packages: '@types/react': optional: true + use-stick-to-bottom@1.1.2: + resolution: {integrity: sha512-ssUfMNvfH8a8hGLoAt5kcOsjbsVORknon2tbkECuf3EsVucFFBbyXl+Xnv3b58P8ZRuZelzO81fgb6M0eRo8cg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + use-sync-external-store@1.6.0: resolution: {integrity: sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==} peerDependencies: @@ -7918,6 +8403,26 @@ packages: vm-browserify@1.1.2: resolution: {integrity: sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==} + vscode-jsonrpc@8.2.0: + resolution: {integrity: sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==} + engines: {node: '>=14.0.0'} + + vscode-languageserver-protocol@3.17.5: + resolution: {integrity: sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==} + + vscode-languageserver-textdocument@1.0.12: + resolution: {integrity: sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==} + + vscode-languageserver-types@3.17.5: + resolution: {integrity: sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==} + + vscode-languageserver@9.0.1: + resolution: {integrity: sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==} + hasBin: true + + vscode-uri@3.0.8: + resolution: {integrity: sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==} + w3c-xmlserializer@5.0.0: resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==} engines: {node: '>=18'} @@ -8147,8 +8652,41 @@ snapshots: '@adobe/css-tools@4.4.4': {} + '@ai-sdk/gateway@3.0.27(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.10(zod@3.25.76) + '@vercel/oidc': 3.1.0 + zod: 3.25.76 + + '@ai-sdk/provider-utils@4.0.10(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.5 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.6 + zod: 3.25.76 + + '@ai-sdk/provider@3.0.5': + dependencies: + json-schema: 0.4.0 + + '@ai-sdk/react@3.0.61(react@18.3.1)(zod@3.25.76)': + dependencies: + '@ai-sdk/provider-utils': 4.0.10(zod@3.25.76) + ai: 6.0.59(zod@3.25.76) + react: 18.3.1 + swr: 2.3.8(react@18.3.1) + throttleit: 2.1.0 + transitivePeerDependencies: + - zod + '@alloc/quick-lru@5.2.0': {} + '@antfu/install-pkg@1.1.0': + dependencies: + package-manager-detector: 1.6.0 + tinyexec: 1.0.2 + '@apidevtools/json-schema-ref-parser@14.0.1': dependencies: '@types/json-schema': 7.0.15 @@ -8962,6 +9500,25 @@ snapshots: '@babel/helper-string-parser': 7.27.1 '@babel/helper-validator-identifier': 7.28.5 + '@braintree/sanitize-url@7.1.2': {} + + '@chevrotain/cst-dts-gen@11.0.3': + dependencies: + '@chevrotain/gast': 11.0.3 + '@chevrotain/types': 11.0.3 + lodash-es: 4.17.23 + + '@chevrotain/gast@11.0.3': + dependencies: + '@chevrotain/types': 11.0.3 + lodash-es: 4.17.23 + + '@chevrotain/regexp-to-ast@11.0.3': {} + + '@chevrotain/types@11.0.3': {} + + '@chevrotain/utils@11.0.3': {} + '@chromatic-com/storybook@4.1.2(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))': dependencies: '@neoconfetti/react': 1.0.0 @@ -9281,6 +9838,14 @@ snapshots: transitivePeerDependencies: - encoding + '@iconify/types@2.0.0': {} + + '@iconify/utils@3.1.0': + dependencies: + '@antfu/install-pkg': 1.1.0 + '@iconify/types': 2.0.0 + mlly: 1.8.0 + '@img/colour@1.0.0': optional: true @@ -9457,6 +10022,10 @@ snapshots: '@types/react': 18.3.17 react: 18.3.1 + '@mermaid-js/parser@0.6.3': + dependencies: + langium: 3.3.1 + '@mswjs/interceptors@0.40.0': dependencies: '@open-draft/deferred-promise': 2.2.0 @@ -10608,7 +11177,7 @@ snapshots: dependencies: '@rjsf/utils': 6.1.2(react@18.3.1) lodash: 4.17.21 - lodash-es: 4.17.22 + lodash-es: 4.17.23 markdown-to-jsx: 8.0.0(react@18.3.1) prop-types: 15.8.1 react: 18.3.1 @@ -10619,7 +11188,7 @@ snapshots: fast-uri: 3.1.0 jsonpointer: 5.0.1 lodash: 4.17.21 - lodash-es: 4.17.22 + lodash-es: 4.17.23 react: 18.3.1 react-is: 18.3.1 @@ -10629,7 +11198,7 @@ snapshots: ajv: 8.17.1 ajv-formats: 2.1.1(ajv@8.17.1) lodash: 4.17.21 - lodash-es: 4.17.22 + lodash-es: 4.17.23 '@rolldown/pluginutils@1.0.0-beta.53': {} @@ -10936,6 +11505,19 @@ snapshots: - encoding - supports-color + '@shikijs/core@3.21.0': + dependencies: + '@shikijs/types': 3.21.0 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + hast-util-to-html: 9.0.5 + + '@shikijs/engine-javascript@3.21.0': + dependencies: + '@shikijs/types': 3.21.0 + '@shikijs/vscode-textmate': 10.0.2 + oniguruma-to-es: 4.3.4 + '@shikijs/engine-oniguruma@3.21.0': dependencies: '@shikijs/types': 3.21.0 @@ -11313,6 +11895,37 @@ snapshots: optionalDependencies: typescript: 5.9.3 + '@streamdown/cjk@1.0.1(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(react@18.3.1)(unified@11.0.5)': + dependencies: + react: 18.3.1 + remark-cjk-friendly: 1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5) + remark-cjk-friendly-gfm-strikethrough: 1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5) + unist-util-visit: 5.0.0 + transitivePeerDependencies: + - '@types/mdast' + - micromark + - micromark-util-types + - unified + + '@streamdown/code@1.0.1(react@18.3.1)': + dependencies: + react: 18.3.1 + shiki: 3.21.0 + + '@streamdown/math@1.0.1(react@18.3.1)': + dependencies: + katex: 0.16.28 + react: 18.3.1 + rehype-katex: 7.0.1 + remark-math: 6.0.0 + transitivePeerDependencies: + - supports-color + + '@streamdown/mermaid@1.0.1(react@18.3.1)': + dependencies: + mermaid: 11.12.2 + react: 18.3.1 + '@supabase/auth-js@2.78.0': dependencies: '@supabase/node-fetch': 2.6.15 @@ -11475,20 +12088,63 @@ snapshots: '@types/d3-array@3.2.2': {} + '@types/d3-axis@3.0.6': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-brush@3.0.6': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-chord@3.0.6': {} + '@types/d3-color@3.1.3': {} + '@types/d3-contour@3.0.6': + dependencies: + '@types/d3-array': 3.2.2 + '@types/geojson': 7946.0.16 + + '@types/d3-delaunay@6.0.4': {} + + '@types/d3-dispatch@3.0.7': {} + '@types/d3-drag@3.0.7': dependencies: '@types/d3-selection': 3.0.11 + '@types/d3-dsv@3.0.7': {} + '@types/d3-ease@3.0.2': {} + '@types/d3-fetch@3.0.7': + dependencies: + '@types/d3-dsv': 3.0.7 + + '@types/d3-force@3.0.10': {} + + '@types/d3-format@3.0.4': {} + + '@types/d3-geo@3.1.0': + dependencies: + '@types/geojson': 7946.0.16 + + '@types/d3-hierarchy@3.1.7': {} + '@types/d3-interpolate@3.0.4': dependencies: '@types/d3-color': 3.1.3 '@types/d3-path@3.1.1': {} + '@types/d3-polygon@3.0.2': {} + + '@types/d3-quadtree@3.0.6': {} + + '@types/d3-random@3.0.3': {} + + '@types/d3-scale-chromatic@3.1.0': {} + '@types/d3-scale@4.0.9': dependencies: '@types/d3-time': 3.0.4 @@ -11499,6 +12155,8 @@ snapshots: dependencies: '@types/d3-path': 3.1.1 + '@types/d3-time-format@4.0.3': {} + '@types/d3-time@3.0.4': {} '@types/d3-timer@3.0.2': {} @@ -11512,6 +12170,39 @@ snapshots: '@types/d3-interpolate': 3.0.4 '@types/d3-selection': 3.0.11 + '@types/d3@7.4.3': + dependencies: + '@types/d3-array': 3.2.2 + '@types/d3-axis': 3.0.6 + '@types/d3-brush': 3.0.6 + '@types/d3-chord': 3.0.6 + '@types/d3-color': 3.1.3 + '@types/d3-contour': 3.0.6 + '@types/d3-delaunay': 6.0.4 + '@types/d3-dispatch': 3.0.7 + '@types/d3-drag': 3.0.7 + '@types/d3-dsv': 3.0.7 + '@types/d3-ease': 3.0.2 + '@types/d3-fetch': 3.0.7 + '@types/d3-force': 3.0.10 + '@types/d3-format': 3.0.4 + '@types/d3-geo': 3.1.0 + '@types/d3-hierarchy': 3.1.7 + '@types/d3-interpolate': 3.0.4 + '@types/d3-path': 3.1.1 + '@types/d3-polygon': 3.0.2 + '@types/d3-quadtree': 3.0.6 + '@types/d3-random': 3.0.3 + '@types/d3-scale': 4.0.9 + '@types/d3-scale-chromatic': 3.1.0 + '@types/d3-selection': 3.0.11 + '@types/d3-shape': 3.1.7 + '@types/d3-time': 3.0.4 + '@types/d3-time-format': 4.0.3 + '@types/d3-timer': 3.0.2 + '@types/d3-transition': 3.0.9 + '@types/d3-zoom': 3.0.8 + '@types/debug@4.1.12': dependencies: '@types/ms': 2.1.0 @@ -11540,6 +12231,8 @@ snapshots: '@types/estree@1.0.8': {} + '@types/geojson@7946.0.16': {} + '@types/hast@3.0.4': dependencies: '@types/unist': 3.0.3 @@ -11796,6 +12489,8 @@ snapshots: next: 15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 + '@vercel/oidc@3.1.0': {} + '@vercel/speed-insights@1.2.0(next@15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': optionalDependencies: next: 15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -12023,6 +12718,14 @@ snapshots: agent-base@7.1.4: optional: true + ai@6.0.59(zod@3.25.76): + dependencies: + '@ai-sdk/gateway': 3.0.27(zod@3.25.76) + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.10(zod@3.25.76) + '@opentelemetry/api': 1.9.0 + zod: 3.25.76 + ajv-draft-04@1.0.0(ajv@8.17.1): optionalDependencies: ajv: 8.17.1 @@ -12411,6 +13114,20 @@ snapshots: check-error@2.1.3: {} + chevrotain-allstar@0.3.1(chevrotain@11.0.3): + dependencies: + chevrotain: 11.0.3 + lodash-es: 4.17.23 + + chevrotain@11.0.3: + dependencies: + '@chevrotain/cst-dts-gen': 11.0.3 + '@chevrotain/gast': 11.0.3 + '@chevrotain/regexp-to-ast': 11.0.3 + '@chevrotain/types': 11.0.3 + '@chevrotain/utils': 11.0.3 + lodash-es: 4.17.23 + chokidar@3.6.0: dependencies: anymatch: 3.1.3 @@ -12491,6 +13208,8 @@ snapshots: commander@4.1.1: {} + commander@7.2.0: {} + commander@8.3.0: {} common-path-prefix@3.0.0: {} @@ -12510,6 +13229,8 @@ snapshots: tree-kill: 1.2.2 yargs: 17.7.2 + confbox@0.1.8: {} + console-browserify@1.2.0: {} constants-browserify@1.0.0: {} @@ -12530,6 +13251,14 @@ snapshots: core-util-is@1.0.3: {} + cose-base@1.0.3: + dependencies: + layout-base: 1.0.2 + + cose-base@2.2.0: + dependencies: + layout-base: 2.0.1 + cosmiconfig@7.1.0: dependencies: '@types/parse-json': 4.0.2 @@ -12638,12 +13367,50 @@ snapshots: csstype@3.2.3: {} + cytoscape-cose-bilkent@4.1.0(cytoscape@3.33.1): + dependencies: + cose-base: 1.0.3 + cytoscape: 3.33.1 + + cytoscape-fcose@2.2.0(cytoscape@3.33.1): + dependencies: + cose-base: 2.2.0 + cytoscape: 3.33.1 + + cytoscape@3.33.1: {} + + d3-array@2.12.1: + dependencies: + internmap: 1.0.1 + d3-array@3.2.4: dependencies: internmap: 2.0.3 + d3-axis@3.0.0: {} + + d3-brush@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + + d3-chord@3.0.1: + dependencies: + d3-path: 3.1.0 + d3-color@3.1.0: {} + d3-contour@4.0.2: + dependencies: + d3-array: 3.2.4 + + d3-delaunay@6.0.4: + dependencies: + delaunator: 5.0.1 + d3-dispatch@3.0.1: {} d3-drag@3.0.0: @@ -12651,16 +13418,56 @@ snapshots: d3-dispatch: 3.0.1 d3-selection: 3.0.0 + d3-dsv@3.0.1: + dependencies: + commander: 7.2.0 + iconv-lite: 0.6.3 + rw: 1.3.3 + d3-ease@3.0.1: {} + d3-fetch@3.0.1: + dependencies: + d3-dsv: 3.0.1 + + d3-force@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-quadtree: 3.0.1 + d3-timer: 3.0.1 + d3-format@3.1.0: {} + d3-geo@3.1.1: + dependencies: + d3-array: 3.2.4 + + d3-hierarchy@3.1.2: {} + d3-interpolate@3.0.1: dependencies: d3-color: 3.1.0 + d3-path@1.0.9: {} + d3-path@3.1.0: {} + d3-polygon@3.0.1: {} + + d3-quadtree@3.0.1: {} + + d3-random@3.0.1: {} + + d3-sankey@0.12.3: + dependencies: + d3-array: 2.12.1 + d3-shape: 1.3.7 + + d3-scale-chromatic@3.1.0: + dependencies: + d3-color: 3.1.0 + d3-interpolate: 3.0.1 + d3-scale@4.0.2: dependencies: d3-array: 3.2.4 @@ -12671,6 +13478,10 @@ snapshots: d3-selection@3.0.0: {} + d3-shape@1.3.7: + dependencies: + d3-path: 1.0.9 + d3-shape@3.2.0: dependencies: d3-path: 3.1.0 @@ -12702,6 +13513,44 @@ snapshots: d3-selection: 3.0.0 d3-transition: 3.0.1(d3-selection@3.0.0) + d3@7.9.0: + dependencies: + d3-array: 3.2.4 + d3-axis: 3.0.0 + d3-brush: 3.0.0 + d3-chord: 3.0.1 + d3-color: 3.1.0 + d3-contour: 4.0.2 + d3-delaunay: 6.0.4 + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-dsv: 3.0.1 + d3-ease: 3.0.1 + d3-fetch: 3.0.1 + d3-force: 3.0.0 + d3-format: 3.1.0 + d3-geo: 3.1.1 + d3-hierarchy: 3.1.2 + d3-interpolate: 3.0.1 + d3-path: 3.1.0 + d3-polygon: 3.0.1 + d3-quadtree: 3.0.1 + d3-random: 3.0.1 + d3-scale: 4.0.2 + d3-scale-chromatic: 3.1.0 + d3-selection: 3.0.0 + d3-shape: 3.2.0 + d3-time: 3.1.0 + d3-time-format: 4.1.0 + d3-timer: 3.0.1 + d3-transition: 3.0.1(d3-selection@3.0.0) + d3-zoom: 3.0.0 + + dagre-d3-es@7.0.13: + dependencies: + d3: 7.9.0 + lodash-es: 4.17.23 + damerau-levenshtein@1.0.8: {} data-urls@6.0.1: @@ -12732,6 +13581,8 @@ snapshots: date-fns@4.1.0: {} + dayjs@1.11.19: {} + debug@3.2.7: dependencies: ms: 2.1.3 @@ -12773,6 +13624,10 @@ snapshots: has-property-descriptors: 1.0.2 object-keys: 1.1.1 + delaunator@5.0.1: + dependencies: + robust-predicates: 3.0.2 + dependency-graph@0.11.0: {} dequal@2.0.3: {} @@ -13347,6 +14202,8 @@ snapshots: events@3.3.0: {} + eventsource-parser@3.0.6: {} + evp_bytestokey@1.0.3: dependencies: md5.js: 1.3.5 @@ -13553,6 +14410,8 @@ snapshots: get-caller-file@2.0.5: {} + get-east-asian-width@1.4.0: {} + get-intrinsic@1.3.0: dependencies: call-bind-apply-helpers: 1.0.2 @@ -13643,6 +14502,8 @@ snapshots: graphql@16.12.0: {} + hachure-fill@0.5.2: {} + happy-dom@20.3.4: dependencies: '@types/node': 24.10.0 @@ -13739,6 +14600,42 @@ snapshots: dependencies: '@types/hast': 3.0.4 + hast-util-raw@9.1.0: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + '@ungap/structured-clone': 1.3.0 + hast-util-from-parse5: 8.0.3 + hast-util-to-parse5: 8.0.1 + html-void-elements: 3.0.0 + mdast-util-to-hast: 13.2.1 + parse5: 7.3.0 + unist-util-position: 5.0.0 + unist-util-visit: 5.0.0 + vfile: 6.0.3 + web-namespaces: 2.0.1 + zwitch: 2.0.4 + + hast-util-sanitize@5.0.2: + dependencies: + '@types/hast': 3.0.4 + '@ungap/structured-clone': 1.3.0 + unist-util-position: 5.0.0 + + hast-util-to-html@9.0.5: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + ccount: 2.0.1 + comma-separated-tokens: 2.0.3 + hast-util-whitespace: 3.0.0 + html-void-elements: 3.0.0 + mdast-util-to-hast: 13.2.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + stringify-entities: 4.0.4 + zwitch: 2.0.4 + hast-util-to-jsx-runtime@2.3.6: dependencies: '@types/estree': 1.0.8 @@ -13759,6 +14656,16 @@ snapshots: transitivePeerDependencies: - supports-color + hast-util-to-parse5@8.0.1: + dependencies: + '@types/hast': 3.0.4 + comma-separated-tokens: 2.0.3 + devlop: 1.1.0 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + web-namespaces: 2.0.1 + zwitch: 2.0.4 + hast-util-to-string@3.0.1: dependencies: '@types/hast': 3.0.4 @@ -13819,6 +14726,8 @@ snapshots: html-url-attributes@3.0.1: {} + html-void-elements@3.0.0: {} + html-webpack-plugin@5.6.5(webpack@5.104.1(esbuild@0.25.12)): dependencies: '@types/html-minifier-terser': 6.1.0 @@ -13865,6 +14774,10 @@ snapshots: human-signals@2.1.0: {} + iconv-lite@0.6.3: + dependencies: + safer-buffer: 2.1.2 + icss-utils@5.1.0(postcss@8.5.6): dependencies: postcss: 8.5.6 @@ -13916,6 +14829,8 @@ snapshots: hasown: 2.0.2 side-channel: 1.1.0 + internmap@1.0.1: {} + internmap@2.0.3: {} is-alphabetical@2.0.1: {} @@ -14164,6 +15079,8 @@ snapshots: json-schema-traverse@1.0.0: {} + json-schema@0.4.0: {} + json-stable-stringify-without-jsonify@1.0.1: {} json5@1.0.2: @@ -14207,10 +15124,24 @@ snapshots: dependencies: commander: 8.3.0 + katex@0.16.28: + dependencies: + commander: 8.3.0 + keyv@4.5.4: dependencies: json-buffer: 3.0.1 + khroma@2.1.0: {} + + langium@3.3.1: + dependencies: + chevrotain: 11.0.3 + chevrotain-allstar: 0.3.1(chevrotain@11.0.3) + vscode-languageserver: 9.0.1 + vscode-languageserver-textdocument: 1.0.12 + vscode-uri: 3.0.8 + language-subtag-registry@0.3.23: {} language-tags@1.0.9: @@ -14236,6 +15167,10 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) + layout-base@1.0.2: {} + + layout-base@2.0.1: {} + leven@3.1.0: {} levn@0.4.1: @@ -14273,7 +15208,7 @@ snapshots: dependencies: p-locate: 6.0.0 - lodash-es@4.17.22: {} + lodash-es@4.17.23: {} lodash.camelcase@4.3.0: {} @@ -14363,6 +15298,10 @@ snapshots: optionalDependencies: react: 18.3.1 + marked@16.4.2: {} + + marked@17.0.1: {} + math-intrinsics@1.1.0: {} md5.js@1.3.5: @@ -14549,6 +15488,29 @@ snapshots: merge2@1.4.1: {} + mermaid@11.12.2: + dependencies: + '@braintree/sanitize-url': 7.1.2 + '@iconify/utils': 3.1.0 + '@mermaid-js/parser': 0.6.3 + '@types/d3': 7.4.3 + cytoscape: 3.33.1 + cytoscape-cose-bilkent: 4.1.0(cytoscape@3.33.1) + cytoscape-fcose: 2.2.0(cytoscape@3.33.1) + d3: 7.9.0 + d3-sankey: 0.12.3 + dagre-d3-es: 7.0.13 + dayjs: 1.11.19 + dompurify: 3.3.1 + katex: 0.16.25 + khroma: 2.1.0 + lodash-es: 4.17.23 + marked: 16.4.2 + roughjs: 4.6.6 + stylis: 4.3.6 + ts-dedent: 2.2.0 + uuid: 11.1.0 + micromark-core-commonmark@2.0.3: dependencies: decode-named-character-reference: 1.2.0 @@ -14568,6 +15530,38 @@ snapshots: micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 + micromark-extension-cjk-friendly-gfm-strikethrough@1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2): + dependencies: + devlop: 1.1.0 + get-east-asian-width: 1.4.0 + micromark: 4.0.2 + micromark-extension-cjk-friendly-util: 2.1.1(micromark-util-types@2.0.2) + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-symbol: 2.0.1 + optionalDependencies: + micromark-util-types: 2.0.2 + + micromark-extension-cjk-friendly-util@2.1.1(micromark-util-types@2.0.2): + dependencies: + get-east-asian-width: 1.4.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + optionalDependencies: + micromark-util-types: 2.0.2 + + micromark-extension-cjk-friendly@1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2): + dependencies: + devlop: 1.1.0 + micromark: 4.0.2 + micromark-extension-cjk-friendly-util: 2.1.1(micromark-util-types@2.0.2) + micromark-util-chunked: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-symbol: 2.0.1 + optionalDependencies: + micromark-util-types: 2.0.2 + micromark-extension-gfm-autolink-literal@2.1.0: dependencies: micromark-util-character: 2.1.1 @@ -14790,9 +15784,14 @@ snapshots: minipass@7.1.2: {} - module-details-from-path@1.0.4: {} + mlly@1.8.0: + dependencies: + acorn: 8.15.0 + pathe: 2.0.3 + pkg-types: 1.3.1 + ufo: 1.6.3 - moment@2.30.1: {} + module-details-from-path@1.0.4: {} motion-dom@12.24.8: dependencies: @@ -15049,6 +16048,14 @@ snapshots: dependencies: mimic-fn: 2.1.0 + oniguruma-parser@0.12.1: {} + + oniguruma-to-es@4.3.4: + dependencies: + oniguruma-parser: 0.12.1 + regex: 6.1.0 + regex-recursion: 6.0.2 + open@8.4.2: dependencies: define-lazy-prop: 2.0.0 @@ -15143,6 +16150,8 @@ snapshots: package-json-from-dist@1.0.1: {} + package-manager-detector@1.6.0: {} + pako@1.0.11: {} param-case@3.0.4: @@ -15197,6 +16206,8 @@ snapshots: path-browserify@1.0.1: {} + path-data-parser@0.1.0: {} + path-exists@4.0.0: {} path-exists@5.0.0: {} @@ -15259,6 +16270,12 @@ snapshots: dependencies: find-up: 6.3.0 + pkg-types@1.3.1: + dependencies: + confbox: 0.1.8 + mlly: 1.8.0 + pathe: 2.0.3 + playwright-core@1.56.1: {} playwright@1.56.1: @@ -15267,6 +16284,13 @@ snapshots: optionalDependencies: fsevents: 2.3.2 + points-on-curve@0.2.0: {} + + points-on-path@0.2.1: + dependencies: + path-data-parser: 0.1.0 + points-on-curve: 0.2.0 + pony-cause@1.1.1: {} possible-typed-array-names@1.1.0: {} @@ -15693,6 +16717,16 @@ snapshots: regex-parser@2.3.1: {} + regex-recursion@6.0.2: + dependencies: + regex-utilities: 2.3.0 + + regex-utilities@2.3.0: {} + + regex@6.1.0: + dependencies: + regex-utilities: 2.3.0 + regexp.prototype.flags@1.5.4: dependencies: call-bind: 1.0.8 @@ -15726,6 +16760,10 @@ snapshots: unified: 11.0.5 unist-util-visit: 5.0.0 + rehype-harden@1.1.7: + dependencies: + unist-util-visit: 5.0.0 + rehype-highlight@7.0.2: dependencies: '@types/hast': 3.0.4 @@ -15744,6 +16782,17 @@ snapshots: unist-util-visit-parents: 6.0.2 vfile: 6.0.3 + rehype-raw@7.0.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-raw: 9.1.0 + vfile: 6.0.3 + + rehype-sanitize@6.0.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-sanitize: 5.0.2 + rehype-slug@6.0.0: dependencies: '@types/hast': 3.0.4 @@ -15754,6 +16803,26 @@ snapshots: relateurl@0.2.7: {} + remark-cjk-friendly-gfm-strikethrough@1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5): + dependencies: + micromark-extension-cjk-friendly-gfm-strikethrough: 1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2) + unified: 11.0.5 + optionalDependencies: + '@types/mdast': 4.0.4 + transitivePeerDependencies: + - micromark + - micromark-util-types + + remark-cjk-friendly@1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5): + dependencies: + micromark-extension-cjk-friendly: 1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2) + unified: 11.0.5 + optionalDependencies: + '@types/mdast': 4.0.4 + transitivePeerDependencies: + - micromark + - micromark-util-types + remark-gfm@4.0.1: dependencies: '@types/mdast': 4.0.4 @@ -15797,6 +16866,8 @@ snapshots: mdast-util-to-markdown: 2.1.2 unified: 11.0.5 + remend@1.1.0: {} + renderkid@3.0.0: dependencies: css-select: 4.3.0 @@ -15861,6 +16932,8 @@ snapshots: hash-base: 3.1.2 inherits: 2.0.4 + robust-predicates@3.0.2: {} + rollup@4.55.1: dependencies: '@types/estree': 1.0.8 @@ -15892,10 +16965,19 @@ snapshots: '@rollup/rollup-win32-x64-msvc': 4.55.1 fsevents: 2.3.3 + roughjs@4.6.6: + dependencies: + hachure-fill: 0.5.2 + path-data-parser: 0.1.0 + points-on-curve: 0.2.0 + points-on-path: 0.2.1 + run-parallel@1.2.0: dependencies: queue-microtask: 1.2.3 + rw@1.3.3: {} + rxjs@7.8.2: dependencies: tslib: 2.8.1 @@ -15925,6 +17007,8 @@ snapshots: safe-stable-stringify@1.1.1: {} + safer-buffer@2.1.2: {} + sass-loader@16.0.6(webpack@5.104.1(esbuild@0.25.12)): dependencies: neo-async: 2.6.2 @@ -16037,6 +17121,17 @@ snapshots: '@scarf/scarf': 1.4.0 deepmerge-ts: 7.1.5 + shiki@3.21.0: + dependencies: + '@shikijs/core': 3.21.0 + '@shikijs/engine-javascript': 3.21.0 + '@shikijs/engine-oniguruma': 3.21.0 + '@shikijs/langs': 3.21.0 + '@shikijs/themes': 3.21.0 + '@shikijs/types': 3.21.0 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + should-equal@2.0.0: dependencies: should-type: 1.4.0 @@ -16176,6 +17271,26 @@ snapshots: readable-stream: 3.6.2 xtend: 4.0.2 + streamdown@2.1.0(react@18.3.1): + dependencies: + clsx: 2.1.1 + hast-util-to-jsx-runtime: 2.3.6 + html-url-attributes: 3.0.1 + marked: 17.0.1 + react: 18.3.1 + rehype-harden: 1.1.7 + rehype-raw: 7.0.0 + rehype-sanitize: 6.0.0 + remark-gfm: 4.0.1 + remark-parse: 11.0.0 + remark-rehype: 11.1.2 + remend: 1.1.0 + tailwind-merge: 3.4.0 + unified: 11.0.5 + unist-util-visit: 5.0.0 + transitivePeerDependencies: + - supports-color + strict-event-emitter@0.5.1: {} string-argv@0.3.2: {} @@ -16301,6 +17416,8 @@ snapshots: optionalDependencies: '@babel/core': 7.28.5 + stylis@4.3.6: {} + sucrase@3.35.1: dependencies: '@jridgewell/gen-mapping': 0.3.13 @@ -16337,11 +17454,19 @@ snapshots: transitivePeerDependencies: - encoding + swr@2.3.8(react@18.3.1): + dependencies: + dequal: 2.0.3 + react: 18.3.1 + use-sync-external-store: 1.6.0(react@18.3.1) + symbol-tree@3.2.4: optional: true tailwind-merge@2.6.0: {} + tailwind-merge@3.4.0: {} + tailwind-scrollbar@3.1.0(tailwindcss@3.4.17): dependencies: tailwindcss: 3.4.17 @@ -16409,6 +17534,8 @@ snapshots: third-party-capital@1.0.20: {} + throttleit@2.1.0: {} + timers-browserify@2.0.12: dependencies: setimmediate: 1.0.5 @@ -16571,6 +17698,8 @@ snapshots: uc.micro@2.1.0: {} + ufo@1.6.3: {} + unbox-primitive@1.1.0: dependencies: call-bound: 1.0.4 @@ -16708,6 +17837,10 @@ snapshots: optionalDependencies: '@types/react': 18.3.17 + use-stick-to-bottom@1.1.2(react@18.3.1): + dependencies: + react: 18.3.1 + use-sync-external-store@1.6.0(react@18.3.1): dependencies: react: 18.3.1 @@ -16843,6 +17976,23 @@ snapshots: vm-browserify@1.1.2: {} + vscode-jsonrpc@8.2.0: {} + + vscode-languageserver-protocol@3.17.5: + dependencies: + vscode-jsonrpc: 8.2.0 + vscode-languageserver-types: 3.17.5 + + vscode-languageserver-textdocument@1.0.12: {} + + vscode-languageserver-types@3.17.5: {} + + vscode-languageserver@9.0.1: + dependencies: + vscode-languageserver-protocol: 3.17.5 + + vscode-uri@3.0.8: {} + w3c-xmlserializer@5.0.0: dependencies: xml-name-validator: 5.0.0 diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts index ab1af16584..e98d240215 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts @@ -1,4 +1,4 @@ -import { debounce } from "lodash"; +import debounce from "lodash/debounce"; import { useCallback, useEffect, useRef, useState } from "react"; import { useBlockMenuStore } from "../../../../stores/blockMenuStore"; import { getQueryClient } from "@/lib/react-query/queryClient"; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx index 0f953394e6..bee0b85721 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx @@ -70,10 +70,10 @@ export const HorizontalScroll: React.FC = ({ {children} {canScrollLeft && ( -
+
)} {canScrollRight && ( -
+
)} {canScrollLeft && ( +
+ + + )} + + {!isCollapsed && ( + + + Your chats + +
+ +
+
+ )} + + {!isCollapsed && ( + + {isLoadingSessions ? ( +
+ +
+ ) : sessions.length === 0 ? ( +

+ No conversations yet +

+ ) : ( + sessions.map((session) => ( + + )) + )} +
+ )} +
+ {!isCollapsed && sessionId && ( + + + + + + )} + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotChatActionsProvider/CopilotChatActionsProvider.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotChatActionsProvider/CopilotChatActionsProvider.tsx new file mode 100644 index 0000000000..5c80348e8c --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotChatActionsProvider/CopilotChatActionsProvider.tsx @@ -0,0 +1,16 @@ +"use client"; + +import { CopilotChatActionsContext } from "./useCopilotChatActions"; + +interface Props { + onSend: (message: string) => void | Promise; + children: React.ReactNode; +} + +export function CopilotChatActionsProvider({ onSend, children }: Props) { + return ( + + {children} + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotChatActionsProvider/useCopilotChatActions.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotChatActionsProvider/useCopilotChatActions.ts new file mode 100644 index 0000000000..31b27c0f6e --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotChatActionsProvider/useCopilotChatActions.ts @@ -0,0 +1,23 @@ +"use client"; + +import { createContext, useContext } from "react"; + +interface CopilotChatActions { + onSend: (message: string) => void | Promise; +} + +const CopilotChatActionsContext = createContext( + null, +); + +export function useCopilotChatActions(): CopilotChatActions { + const ctx = useContext(CopilotChatActionsContext); + if (!ctx) { + throw new Error( + "useCopilotChatActions must be used within CopilotChatActionsProvider", + ); + } + return ctx; +} + +export { CopilotChatActionsContext }; diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx deleted file mode 100644 index 3f695da5ed..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx +++ /dev/null @@ -1,99 +0,0 @@ -"use client"; - -import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; -import { Text } from "@/components/atoms/Text/Text"; -import { NAVBAR_HEIGHT_PX } from "@/lib/constants"; -import type { ReactNode } from "react"; -import { DesktopSidebar } from "./components/DesktopSidebar/DesktopSidebar"; -import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer"; -import { MobileHeader } from "./components/MobileHeader/MobileHeader"; -import { useCopilotShell } from "./useCopilotShell"; - -interface Props { - children: ReactNode; -} - -export function CopilotShell({ children }: Props) { - const { - isMobile, - isDrawerOpen, - isLoading, - isCreatingSession, - isLoggedIn, - hasActiveSession, - sessions, - currentSessionId, - handleOpenDrawer, - handleCloseDrawer, - handleDrawerOpenChange, - handleNewChatClick, - handleSessionClick, - hasNextPage, - isFetchingNextPage, - fetchNextPage, - } = useCopilotShell(); - - if (!isLoggedIn) { - return ( -
- -
- ); - } - - return ( -
- {!isMobile && ( - - )} - -
- {isMobile && } -
- {isCreatingSession ? ( -
-
- - - Creating your chat... - -
-
- ) : ( - children - )} -
-
- - {isMobile && ( - - )} -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx deleted file mode 100644 index 122a09a02f..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx +++ /dev/null @@ -1,70 +0,0 @@ -import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; -import { Button } from "@/components/atoms/Button/Button"; -import { Text } from "@/components/atoms/Text/Text"; -import { scrollbarStyles } from "@/components/styles/scrollbars"; -import { cn } from "@/lib/utils"; -import { Plus } from "@phosphor-icons/react"; -import { SessionsList } from "../SessionsList/SessionsList"; - -interface Props { - sessions: SessionSummaryResponse[]; - currentSessionId: string | null; - isLoading: boolean; - hasNextPage: boolean; - isFetchingNextPage: boolean; - onSelectSession: (sessionId: string) => void; - onFetchNextPage: () => void; - onNewChat: () => void; - hasActiveSession: boolean; -} - -export function DesktopSidebar({ - sessions, - currentSessionId, - isLoading, - hasNextPage, - isFetchingNextPage, - onSelectSession, - onFetchNextPage, - onNewChat, - hasActiveSession, -}: Props) { - return ( - - ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx deleted file mode 100644 index ea3b39f829..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx +++ /dev/null @@ -1,91 +0,0 @@ -import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; -import { Button } from "@/components/atoms/Button/Button"; -import { scrollbarStyles } from "@/components/styles/scrollbars"; -import { cn } from "@/lib/utils"; -import { PlusIcon, X } from "@phosphor-icons/react"; -import { Drawer } from "vaul"; -import { SessionsList } from "../SessionsList/SessionsList"; - -interface Props { - isOpen: boolean; - sessions: SessionSummaryResponse[]; - currentSessionId: string | null; - isLoading: boolean; - hasNextPage: boolean; - isFetchingNextPage: boolean; - onSelectSession: (sessionId: string) => void; - onFetchNextPage: () => void; - onNewChat: () => void; - onClose: () => void; - onOpenChange: (open: boolean) => void; - hasActiveSession: boolean; -} - -export function MobileDrawer({ - isOpen, - sessions, - currentSessionId, - isLoading, - hasNextPage, - isFetchingNextPage, - onSelectSession, - onFetchNextPage, - onNewChat, - onClose, - onOpenChange, - hasActiveSession, -}: Props) { - return ( - - - - -
-
- - Your chats - - -
-
-
- -
- {hasActiveSession && ( -
- -
- )} -
-
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts deleted file mode 100644 index 2ef63a4422..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { useState } from "react"; - -export function useMobileDrawer() { - const [isDrawerOpen, setIsDrawerOpen] = useState(false); - - const handleOpenDrawer = () => { - setIsDrawerOpen(true); - }; - - const handleCloseDrawer = () => { - setIsDrawerOpen(false); - }; - - const handleDrawerOpenChange = (open: boolean) => { - setIsDrawerOpen(open); - }; - - return { - isDrawerOpen, - handleOpenDrawer, - handleCloseDrawer, - handleDrawerOpenChange, - }; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx deleted file mode 100644 index ef63e1aff4..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx +++ /dev/null @@ -1,80 +0,0 @@ -import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; -import { Text } from "@/components/atoms/Text/Text"; -import { InfiniteList } from "@/components/molecules/InfiniteList/InfiniteList"; -import { cn } from "@/lib/utils"; -import { getSessionTitle } from "../../helpers"; - -interface Props { - sessions: SessionSummaryResponse[]; - currentSessionId: string | null; - isLoading: boolean; - hasNextPage: boolean; - isFetchingNextPage: boolean; - onSelectSession: (sessionId: string) => void; - onFetchNextPage: () => void; -} - -export function SessionsList({ - sessions, - currentSessionId, - isLoading, - hasNextPage, - isFetchingNextPage, - onSelectSession, - onFetchNextPage, -}: Props) { - if (isLoading) { - return ( -
- {Array.from({ length: 5 }).map((_, i) => ( -
- -
- ))} -
- ); - } - - if (sessions.length === 0) { - return ( -
- - You don't have previous chats - -
- ); - } - - return ( - { - const isActive = session.id === currentSessionId; - return ( - - ); - }} - /> - ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts deleted file mode 100644 index 61e3e6f37f..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts +++ /dev/null @@ -1,91 +0,0 @@ -import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat"; -import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; -import { okData } from "@/app/api/helpers"; -import { useEffect, useState } from "react"; - -const PAGE_SIZE = 50; - -export interface UseSessionsPaginationArgs { - enabled: boolean; -} - -export function useSessionsPagination({ enabled }: UseSessionsPaginationArgs) { - const [offset, setOffset] = useState(0); - - const [accumulatedSessions, setAccumulatedSessions] = useState< - SessionSummaryResponse[] - >([]); - - const [totalCount, setTotalCount] = useState(null); - - const { data, isLoading, isFetching, isError } = useGetV2ListSessions( - { limit: PAGE_SIZE, offset }, - { - query: { - enabled: enabled && offset >= 0, - }, - }, - ); - - useEffect(() => { - const responseData = okData(data); - if (responseData) { - const newSessions = responseData.sessions; - const total = responseData.total; - setTotalCount(total); - - if (offset === 0) { - setAccumulatedSessions(newSessions); - } else { - setAccumulatedSessions((prev) => [...prev, ...newSessions]); - } - } else if (!enabled) { - setAccumulatedSessions([]); - setTotalCount(null); - } - }, [data, offset, enabled]); - - const hasNextPage = - totalCount !== null && accumulatedSessions.length < totalCount; - - const areAllSessionsLoaded = - totalCount !== null && - accumulatedSessions.length >= totalCount && - !isFetching && - !isLoading; - - useEffect(() => { - if ( - hasNextPage && - !isFetching && - !isLoading && - !isError && - totalCount !== null - ) { - setOffset((prev) => prev + PAGE_SIZE); - } - }, [hasNextPage, isFetching, isLoading, isError, totalCount]); - - const fetchNextPage = () => { - if (hasNextPage && !isFetching) { - setOffset((prev) => prev + PAGE_SIZE); - } - }; - - const reset = () => { - // Only reset the offset - keep existing sessions visible during refetch - // The effect will replace sessions when new data arrives at offset 0 - setOffset(0); - }; - - return { - sessions: accumulatedSessions, - isLoading, - isFetching, - hasNextPage, - areAllSessionsLoaded, - totalCount, - fetchNextPage, - reset, - }; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts deleted file mode 100644 index ef0d414edf..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts +++ /dev/null @@ -1,106 +0,0 @@ -import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; -import { format, formatDistanceToNow, isToday } from "date-fns"; - -export function convertSessionDetailToSummary(session: SessionDetailResponse) { - return { - id: session.id, - created_at: session.created_at, - updated_at: session.updated_at, - title: undefined, - }; -} - -export function filterVisibleSessions(sessions: SessionSummaryResponse[]) { - const fiveMinutesAgo = Date.now() - 5 * 60 * 1000; - return sessions.filter((session) => { - const hasBeenUpdated = session.updated_at !== session.created_at; - - if (hasBeenUpdated) return true; - - const isRecentlyCreated = - new Date(session.created_at).getTime() > fiveMinutesAgo; - - return isRecentlyCreated; - }); -} - -export function getSessionTitle(session: SessionSummaryResponse) { - if (session.title) return session.title; - - const isNewSession = session.updated_at === session.created_at; - - if (isNewSession) { - const createdDate = new Date(session.created_at); - if (isToday(createdDate)) { - return "Today"; - } - return format(createdDate, "MMM d, yyyy"); - } - - return "Untitled Chat"; -} - -export function getSessionUpdatedLabel(session: SessionSummaryResponse) { - if (!session.updated_at) return ""; - return formatDistanceToNow(new Date(session.updated_at), { addSuffix: true }); -} - -export function mergeCurrentSessionIntoList( - accumulatedSessions: SessionSummaryResponse[], - currentSessionId: string | null, - currentSessionData: SessionDetailResponse | null | undefined, - recentlyCreatedSessions?: Map, -) { - const filteredSessions: SessionSummaryResponse[] = []; - const addedIds = new Set(); - - if (accumulatedSessions.length > 0) { - const visibleSessions = filterVisibleSessions(accumulatedSessions); - - if (currentSessionId) { - const currentInAll = accumulatedSessions.find( - (s) => s.id === currentSessionId, - ); - if (currentInAll) { - const isInVisible = visibleSessions.some( - (s) => s.id === currentSessionId, - ); - if (!isInVisible) { - filteredSessions.push(currentInAll); - addedIds.add(currentInAll.id); - } - } - } - - for (const session of visibleSessions) { - if (!addedIds.has(session.id)) { - filteredSessions.push(session); - addedIds.add(session.id); - } - } - } - - if (currentSessionId && currentSessionData) { - if (!addedIds.has(currentSessionId)) { - const summarySession = convertSessionDetailToSummary(currentSessionData); - filteredSessions.unshift(summarySession); - addedIds.add(currentSessionId); - } - } - - if (recentlyCreatedSessions) { - for (const [sessionId, sessionData] of recentlyCreatedSessions) { - if (!addedIds.has(sessionId)) { - filteredSessions.unshift(sessionData); - addedIds.add(sessionId); - } - } - } - - return filteredSessions; -} - -export function getCurrentSessionId(searchParams: URLSearchParams) { - return searchParams.get("sessionId"); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts deleted file mode 100644 index 913c4d7ded..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts +++ /dev/null @@ -1,124 +0,0 @@ -"use client"; - -import { - getGetV2GetSessionQueryKey, - getGetV2ListSessionsQueryKey, - useGetV2GetSession, -} from "@/app/api/__generated__/endpoints/chat/chat"; -import { okData } from "@/app/api/helpers"; -import { useChatStore } from "@/components/contextual/Chat/chat-store"; -import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; -import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; -import { useQueryClient } from "@tanstack/react-query"; -import { usePathname, useSearchParams } from "next/navigation"; -import { useCopilotStore } from "../../copilot-page-store"; -import { useCopilotSessionId } from "../../useCopilotSessionId"; -import { useMobileDrawer } from "./components/MobileDrawer/useMobileDrawer"; -import { getCurrentSessionId } from "./helpers"; -import { useShellSessionList } from "./useShellSessionList"; - -export function useCopilotShell() { - const pathname = usePathname(); - const searchParams = useSearchParams(); - const queryClient = useQueryClient(); - const breakpoint = useBreakpoint(); - const { isLoggedIn } = useSupabase(); - const isMobile = - breakpoint === "base" || breakpoint === "sm" || breakpoint === "md"; - - const { urlSessionId, setUrlSessionId } = useCopilotSessionId(); - - const isOnHomepage = pathname === "/copilot"; - const paramSessionId = searchParams.get("sessionId"); - - const { - isDrawerOpen, - handleOpenDrawer, - handleCloseDrawer, - handleDrawerOpenChange, - } = useMobileDrawer(); - - const paginationEnabled = !isMobile || isDrawerOpen || !!paramSessionId; - - const currentSessionId = getCurrentSessionId(searchParams); - - const { data: currentSessionData } = useGetV2GetSession( - currentSessionId || "", - { - query: { - enabled: !!currentSessionId, - select: okData, - }, - }, - ); - - const { - sessions, - isLoading, - isSessionsFetching, - hasNextPage, - fetchNextPage, - resetPagination, - recentlyCreatedSessionsRef, - } = useShellSessionList({ - paginationEnabled, - currentSessionId, - currentSessionData, - isOnHomepage, - paramSessionId, - }); - - const stopStream = useChatStore((s) => s.stopStream); - const isCreatingSession = useCopilotStore((s) => s.isCreatingSession); - - function handleSessionClick(sessionId: string) { - if (sessionId === currentSessionId) return; - - // Stop current stream - SSE reconnection allows resuming later - if (currentSessionId) { - stopStream(currentSessionId); - } - - if (recentlyCreatedSessionsRef.current.has(sessionId)) { - queryClient.invalidateQueries({ - queryKey: getGetV2GetSessionQueryKey(sessionId), - }); - } - setUrlSessionId(sessionId, { shallow: false }); - if (isMobile) handleCloseDrawer(); - } - - function handleNewChatClick() { - // Stop current stream - SSE reconnection allows resuming later - if (currentSessionId) { - stopStream(currentSessionId); - } - - resetPagination(); - queryClient.invalidateQueries({ - queryKey: getGetV2ListSessionsQueryKey(), - }); - setUrlSessionId(null, { shallow: false }); - if (isMobile) handleCloseDrawer(); - } - - return { - isMobile, - isDrawerOpen, - isLoggedIn, - hasActiveSession: - Boolean(currentSessionId) && (!isOnHomepage || Boolean(paramSessionId)), - isLoading: isLoading || isCreatingSession, - isCreatingSession, - sessions, - currentSessionId: urlSessionId, - handleOpenDrawer, - handleCloseDrawer, - handleDrawerOpenChange, - handleNewChatClick, - handleSessionClick, - hasNextPage, - isFetchingNextPage: isSessionsFetching, - fetchNextPage, - }; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useShellSessionList.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useShellSessionList.ts deleted file mode 100644 index fb39a11096..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useShellSessionList.ts +++ /dev/null @@ -1,113 +0,0 @@ -import { getGetV2ListSessionsQueryKey } from "@/app/api/__generated__/endpoints/chat/chat"; -import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; -import { useChatStore } from "@/components/contextual/Chat/chat-store"; -import { useQueryClient } from "@tanstack/react-query"; -import { useEffect, useMemo, useRef } from "react"; -import { useSessionsPagination } from "./components/SessionsList/useSessionsPagination"; -import { - convertSessionDetailToSummary, - filterVisibleSessions, - mergeCurrentSessionIntoList, -} from "./helpers"; - -interface UseShellSessionListArgs { - paginationEnabled: boolean; - currentSessionId: string | null; - currentSessionData: SessionDetailResponse | null | undefined; - isOnHomepage: boolean; - paramSessionId: string | null; -} - -export function useShellSessionList({ - paginationEnabled, - currentSessionId, - currentSessionData, - isOnHomepage, - paramSessionId, -}: UseShellSessionListArgs) { - const queryClient = useQueryClient(); - const onStreamComplete = useChatStore((s) => s.onStreamComplete); - - const { - sessions: accumulatedSessions, - isLoading: isSessionsLoading, - isFetching: isSessionsFetching, - hasNextPage, - fetchNextPage, - reset: resetPagination, - } = useSessionsPagination({ - enabled: paginationEnabled, - }); - - const recentlyCreatedSessionsRef = useRef< - Map - >(new Map()); - - useEffect(() => { - if (isOnHomepage && !paramSessionId) { - queryClient.invalidateQueries({ - queryKey: getGetV2ListSessionsQueryKey(), - }); - } - }, [isOnHomepage, paramSessionId, queryClient]); - - useEffect(() => { - if (currentSessionId && currentSessionData) { - const isNewSession = - currentSessionData.updated_at === currentSessionData.created_at; - const isNotInAccumulated = !accumulatedSessions.some( - (s) => s.id === currentSessionId, - ); - if (isNewSession || isNotInAccumulated) { - const summary = convertSessionDetailToSummary(currentSessionData); - recentlyCreatedSessionsRef.current.set(currentSessionId, summary); - } - } - }, [currentSessionId, currentSessionData, accumulatedSessions]); - - useEffect(() => { - for (const sessionId of recentlyCreatedSessionsRef.current.keys()) { - if (accumulatedSessions.some((s) => s.id === sessionId)) { - recentlyCreatedSessionsRef.current.delete(sessionId); - } - } - }, [accumulatedSessions]); - - useEffect(() => { - const unsubscribe = onStreamComplete(() => { - queryClient.invalidateQueries({ - queryKey: getGetV2ListSessionsQueryKey(), - }); - }); - return unsubscribe; - }, [onStreamComplete, queryClient]); - - const sessions = useMemo( - () => - mergeCurrentSessionIntoList( - accumulatedSessions, - currentSessionId, - currentSessionData, - recentlyCreatedSessionsRef.current, - ), - [accumulatedSessions, currentSessionId, currentSessionData], - ); - - const visibleSessions = useMemo( - () => filterVisibleSessions(sessions), - [sessions], - ); - - const isLoading = isSessionsLoading && accumulatedSessions.length === 0; - - return { - sessions: visibleSessions, - isLoading, - isSessionsFetching, - hasNextPage, - fetchNextPage, - resetPagination, - recentlyCreatedSessionsRef, - }; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/EmptySession/EmptySession.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/EmptySession/EmptySession.tsx new file mode 100644 index 0000000000..cbbe6c570e --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/EmptySession/EmptySession.tsx @@ -0,0 +1,111 @@ +"use client"; + +import { ChatInput } from "@/app/(platform)/copilot/components/ChatInput/ChatInput"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { SpinnerGapIcon } from "@phosphor-icons/react"; +import { motion } from "framer-motion"; +import { useEffect, useState } from "react"; +import { + getGreetingName, + getInputPlaceholder, + getQuickActions, +} from "./helpers"; + +interface Props { + inputLayoutId: string; + isCreatingSession: boolean; + onCreateSession: () => void | Promise; + onSend: (message: string) => void | Promise; +} + +export function EmptySession({ + inputLayoutId, + isCreatingSession, + onSend, +}: Props) { + const { user } = useSupabase(); + const greetingName = getGreetingName(user); + const quickActions = getQuickActions(); + const [loadingAction, setLoadingAction] = useState(null); + const [inputPlaceholder, setInputPlaceholder] = useState( + getInputPlaceholder(), + ); + + useEffect(() => { + setInputPlaceholder(getInputPlaceholder(window.innerWidth)); + }, [window.innerWidth]); + + async function handleQuickActionClick(action: string) { + if (isCreatingSession || loadingAction) return; + + setLoadingAction(action); + try { + await onSend(action); + } finally { + setLoadingAction(null); + } + } + + return ( +
+ +
+ + Hey, {greetingName} + + + Tell me about your work β€” I'll find what to automate. + + +
+ + + +
+
+ +
+ {quickActions.map((action) => ( + + ))} +
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/EmptySession/helpers.ts similarity index 72% rename from autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts rename to autogpt_platform/frontend/src/app/(platform)/copilot/components/EmptySession/helpers.ts index c6e479f896..f6f8980fd4 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/EmptySession/helpers.ts @@ -1,6 +1,26 @@ -import type { User } from "@supabase/supabase-js"; +import { User } from "@supabase/supabase-js"; -export function getGreetingName(user?: User | null): string { +export function getInputPlaceholder(width?: number) { + if (!width) return "What's your role and what eats up most of your day?"; + + if (width < 500) { + return "I'm a chef and I hate..."; + } + if (width <= 1080) { + return "What's your role and what eats up most of your day?"; + } + return "What's your role and what eats up most of your day? e.g. 'I'm a recruiter and I hate...'"; +} + +export function getQuickActions() { + return [ + "I don't know where to start, just ask me stuff", + "I do the same thing every week and it's killing me", + "Help me find where I'm wasting my time", + ]; +} + +export function getGreetingName(user?: User | null) { if (!user) return "there"; const metadata = user.user_metadata as Record | undefined; const fullName = metadata?.full_name; @@ -16,30 +36,3 @@ export function getGreetingName(user?: User | null): string { } return "there"; } - -export function buildCopilotChatUrl(prompt: string): string { - const trimmed = prompt.trim(); - if (!trimmed) return "/copilot/chat"; - const encoded = encodeURIComponent(trimmed); - return `/copilot/chat?prompt=${encoded}`; -} - -export function getQuickActions(): string[] { - return [ - "I don't know where to start, just ask me stuff", - "I do the same thing every week and it's killing me", - "Help me find where I'm wasting my time", - ]; -} - -export function getInputPlaceholder(width?: number) { - if (!width) return "What's your role and what eats up most of your day?"; - - if (width < 500) { - return "I'm a chef and I hate..."; - } - if (width <= 1080) { - return "What's your role and what eats up most of your day?"; - } - return "What's your role and what eats up most of your day? e.g. 'I'm a recruiter and I hate...'"; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/MobileDrawer/MobileDrawer.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/MobileDrawer/MobileDrawer.tsx new file mode 100644 index 0000000000..80ccfc9c03 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/MobileDrawer/MobileDrawer.tsx @@ -0,0 +1,140 @@ +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { scrollbarStyles } from "@/components/styles/scrollbars"; +import { cn } from "@/lib/utils"; +import { PlusIcon, SpinnerGapIcon, X } from "@phosphor-icons/react"; +import { Drawer } from "vaul"; + +interface Props { + isOpen: boolean; + sessions: SessionSummaryResponse[]; + currentSessionId: string | null; + isLoading: boolean; + onSelectSession: (sessionId: string) => void; + onNewChat: () => void; + onClose: () => void; + onOpenChange: (open: boolean) => void; +} + +function formatDate(dateString: string) { + const date = new Date(dateString); + const now = new Date(); + const diffMs = now.getTime() - date.getTime(); + const diffDays = Math.floor(diffMs / (1000 * 60 * 60 * 24)); + + if (diffDays === 0) return "Today"; + if (diffDays === 1) return "Yesterday"; + if (diffDays < 7) return `${diffDays} days ago`; + + const day = date.getDate(); + const ordinal = + day % 10 === 1 && day !== 11 + ? "st" + : day % 10 === 2 && day !== 12 + ? "nd" + : day % 10 === 3 && day !== 13 + ? "rd" + : "th"; + const month = date.toLocaleDateString("en-US", { month: "short" }); + const year = date.getFullYear(); + + return `${day}${ordinal} ${month} ${year}`; +} + +export function MobileDrawer({ + isOpen, + sessions, + currentSessionId, + isLoading, + onSelectSession, + onNewChat, + onClose, + onOpenChange, +}: Props) { + return ( + + + + +
+
+ + Your chats + + +
+
+
+ {isLoading ? ( +
+ +
+ ) : sessions.length === 0 ? ( +

+ No conversations yet +

+ ) : ( + sessions.map((session) => ( + + )) + )} +
+ {currentSessionId && ( +
+ +
+ )} +
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileHeader/MobileHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/MobileHeader/MobileHeader.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileHeader/MobileHeader.tsx rename to autogpt_platform/frontend/src/app/(platform)/copilot/components/MobileHeader/MobileHeader.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/MorphingTextAnimation/MorphingTextAnimation.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/MorphingTextAnimation/MorphingTextAnimation.tsx new file mode 100644 index 0000000000..aac615bb2b --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/MorphingTextAnimation/MorphingTextAnimation.tsx @@ -0,0 +1,54 @@ +import { cn } from "@/lib/utils"; +import { AnimatePresence, motion } from "framer-motion"; + +interface Props { + text: string; + className?: string; +} + +export function MorphingTextAnimation({ text, className }: Props) { + const letters = text.split(""); + + return ( +
+ + + + {letters.map((char, index) => ( + + {char === " " ? "\u00A0" : char} + + ))} + + + +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/OrbitLoader/OrbitLoader.module.css b/autogpt_platform/frontend/src/app/(platform)/copilot/components/OrbitLoader/OrbitLoader.module.css new file mode 100644 index 0000000000..cd04402348 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/OrbitLoader/OrbitLoader.module.css @@ -0,0 +1,69 @@ +.loader { + position: relative; + animation: rotate 1s infinite; +} + +.loader::before, +.loader::after { + border-radius: 50%; + content: ""; + display: block; + /* 40% of container size */ + height: 40%; + width: 40%; +} + +.loader::before { + animation: ball1 1s infinite; + background-color: #a1a1aa; /* zinc-400 */ + box-shadow: calc(var(--spacing)) 0 0 #18181b; /* zinc-900 */ + margin-bottom: calc(var(--gap)); +} + +.loader::after { + animation: ball2 1s infinite; + background-color: #18181b; /* zinc-900 */ + box-shadow: calc(var(--spacing)) 0 0 #a1a1aa; /* zinc-400 */ +} + +@keyframes rotate { + 0% { + transform: rotate(0deg) scale(0.8); + } + 50% { + transform: rotate(360deg) scale(1.2); + } + 100% { + transform: rotate(720deg) scale(0.8); + } +} + +@keyframes ball1 { + 0% { + box-shadow: calc(var(--spacing)) 0 0 #18181b; + } + 50% { + box-shadow: 0 0 0 #18181b; + margin-bottom: 0; + transform: translate(calc(var(--spacing) / 2), calc(var(--spacing) / 2)); + } + 100% { + box-shadow: calc(var(--spacing)) 0 0 #18181b; + margin-bottom: calc(var(--gap)); + } +} + +@keyframes ball2 { + 0% { + box-shadow: calc(var(--spacing)) 0 0 #a1a1aa; + } + 50% { + box-shadow: 0 0 0 #a1a1aa; + margin-top: calc(var(--ball-size) * -1); + transform: translate(calc(var(--spacing) / 2), calc(var(--spacing) / 2)); + } + 100% { + box-shadow: calc(var(--spacing)) 0 0 #a1a1aa; + margin-top: 0; + } +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/OrbitLoader/OrbitLoader.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/OrbitLoader/OrbitLoader.tsx new file mode 100644 index 0000000000..cc47c16132 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/OrbitLoader/OrbitLoader.tsx @@ -0,0 +1,28 @@ +import { cn } from "@/lib/utils"; +import styles from "./OrbitLoader.module.css"; + +interface Props { + size?: number; + className?: string; +} + +export function OrbitLoader({ size = 24, className }: Props) { + const ballSize = Math.round(size * 0.4); + const spacing = Math.round(size * 0.6); + const gap = Math.round(size * 0.2); + + return ( +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ProgressBar/ProgressBar.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ProgressBar/ProgressBar.tsx new file mode 100644 index 0000000000..d251b08640 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ProgressBar/ProgressBar.tsx @@ -0,0 +1,26 @@ +import { cn } from "@/lib/utils"; + +interface Props { + value: number; + label?: string; + className?: string; +} + +export function ProgressBar({ value, label, className }: Props) { + const clamped = Math.min(100, Math.max(0, value)); + + return ( +
+
+ {label ?? "Working on it..."} + {Math.round(clamped)}% +
+
+
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/PulseLoader/PulseLoader.module.css b/autogpt_platform/frontend/src/app/(platform)/copilot/components/PulseLoader/PulseLoader.module.css new file mode 100644 index 0000000000..77ab5ae931 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/PulseLoader/PulseLoader.module.css @@ -0,0 +1,34 @@ +.loader { + position: relative; + display: inline-block; + flex-shrink: 0; +} + +.loader::before, +.loader::after { + content: ""; + box-sizing: border-box; + width: 100%; + height: 100%; + border-radius: 50%; + background: currentColor; + position: absolute; + left: 0; + top: 0; + animation: ripple 2s linear infinite; +} + +.loader::after { + animation-delay: 1s; +} + +@keyframes ripple { + 0% { + transform: scale(0); + opacity: 1; + } + 100% { + transform: scale(1); + opacity: 0; + } +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/PulseLoader/PulseLoader.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/PulseLoader/PulseLoader.tsx new file mode 100644 index 0000000000..599874daaa --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/PulseLoader/PulseLoader.tsx @@ -0,0 +1,16 @@ +import { cn } from "@/lib/utils"; +import styles from "./PulseLoader.module.css"; + +interface Props { + size?: number; + className?: string; +} + +export function PulseLoader({ size = 24, className }: Props) { + return ( +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/SpinnerLoader/SpinnerLoader.module.css b/autogpt_platform/frontend/src/app/(platform)/copilot/components/SpinnerLoader/SpinnerLoader.module.css new file mode 100644 index 0000000000..ee456bfac4 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/SpinnerLoader/SpinnerLoader.module.css @@ -0,0 +1,57 @@ +.loader { + position: relative; + display: inline-block; + flex-shrink: 0; + transform: rotateZ(45deg); + perspective: 1000px; + border-radius: 50%; + color: currentColor; +} + +.loader::before, +.loader::after { + content: ""; + display: block; + position: absolute; + top: 0; + left: 0; + width: inherit; + height: inherit; + border-radius: 50%; + transform: rotateX(70deg); + animation: spin 1s linear infinite; +} + +.loader::after { + color: var(--spinner-accent, #a855f7); + transform: rotateY(70deg); + animation-delay: 0.4s; +} + +@keyframes spin { + 0%, + 100% { + box-shadow: 0.2em 0 0 0 currentColor; + } + 12% { + box-shadow: 0.2em 0.2em 0 0 currentColor; + } + 25% { + box-shadow: 0 0.2em 0 0 currentColor; + } + 37% { + box-shadow: -0.2em 0.2em 0 0 currentColor; + } + 50% { + box-shadow: -0.2em 0 0 0 currentColor; + } + 62% { + box-shadow: -0.2em -0.2em 0 0 currentColor; + } + 75% { + box-shadow: 0 -0.2em 0 0 currentColor; + } + 87% { + box-shadow: 0.2em -0.2em 0 0 currentColor; + } +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/SpinnerLoader/SpinnerLoader.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/SpinnerLoader/SpinnerLoader.tsx new file mode 100644 index 0000000000..d921b5f778 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/SpinnerLoader/SpinnerLoader.tsx @@ -0,0 +1,16 @@ +import { cn } from "@/lib/utils"; +import styles from "./SpinnerLoader.module.css"; + +interface Props { + size?: number; + className?: string; +} + +export function SpinnerLoader({ size = 24, className }: Props) { + return ( +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/AccordionContent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/AccordionContent.tsx new file mode 100644 index 0000000000..987941eee1 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/AccordionContent.tsx @@ -0,0 +1,235 @@ +import { Link } from "@/components/atoms/Link/Link"; +import { Text } from "@/components/atoms/Text/Text"; +import { cn } from "@/lib/utils"; + +/* ------------------------------------------------------------------ */ +/* Layout */ +/* ------------------------------------------------------------------ */ + +export function ContentGrid({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) { + return
{children}
; +} + +/* ------------------------------------------------------------------ */ +/* Card */ +/* ------------------------------------------------------------------ */ + +export function ContentCard({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) { + return ( +
+
{children}
+
+ ); +} + +/** Flex row with a left content area (`children`) and an optional right‑side `action`. */ +export function ContentCardHeader({ + children, + action, + className, +}: { + children: React.ReactNode; + action?: React.ReactNode; + className?: string; +}) { + return ( +
+
{children}
+ {action} +
+ ); +} + +export function ContentCardTitle({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) { + return ( + + {children} + + ); +} + +export function ContentCardSubtitle({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) { + return ( + + {children} + + ); +} + +export function ContentCardDescription({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) { + return ( + + {children} + + ); +} + +/* ------------------------------------------------------------------ */ +/* Text */ +/* ------------------------------------------------------------------ */ + +export function ContentMessage({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) { + return ( + + {children} + + ); +} + +export function ContentHint({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) { + return ( + + {children} + + ); +} + +/* ------------------------------------------------------------------ */ +/* Code / data */ +/* ------------------------------------------------------------------ */ + +export function ContentCodeBlock({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) { + return ( +
+      {children}
+    
+ ); +} + +/* ------------------------------------------------------------------ */ +/* Inline elements */ +/* ------------------------------------------------------------------ */ + +export function ContentBadge({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) { + return ( + + {children} + + ); +} + +export function ContentLink({ + href, + children, + className, + ...rest +}: Omit, "className"> & { + className?: string; +}) { + return ( + + {children} + + ); +} + +/* ------------------------------------------------------------------ */ +/* Lists */ +/* ------------------------------------------------------------------ */ + +export function ContentSuggestionsList({ + items, + max = 5, + className, +}: { + items: string[]; + max?: number; + className?: string; +}) { + if (items.length === 0) return null; + return ( +
    + {items.slice(0, max).map((s) => ( +
  • {s}
  • + ))} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/ToolAccordion.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/ToolAccordion.tsx new file mode 100644 index 0000000000..e53df15e6c --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/ToolAccordion.tsx @@ -0,0 +1,102 @@ +"use client"; + +import { cn } from "@/lib/utils"; +import { CaretDownIcon } from "@phosphor-icons/react"; +import { AnimatePresence, motion, useReducedMotion } from "framer-motion"; +import { useId } from "react"; +import { useToolAccordion } from "./useToolAccordion"; + +interface Props { + icon: React.ReactNode; + title: React.ReactNode; + titleClassName?: string; + description?: React.ReactNode; + children: React.ReactNode; + className?: string; + defaultExpanded?: boolean; + expanded?: boolean; + onExpandedChange?: (expanded: boolean) => void; +} + +export function ToolAccordion({ + icon, + title, + titleClassName, + description, + children, + className, + defaultExpanded, + expanded, + onExpandedChange, +}: Props) { + const shouldReduceMotion = useReducedMotion(); + const contentId = useId(); + const { isExpanded, toggle } = useToolAccordion({ + expanded, + defaultExpanded, + onExpandedChange, + }); + + return ( +
+ + + + {isExpanded && ( + +
{children}
+
+ )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/useToolAccordion.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/useToolAccordion.ts new file mode 100644 index 0000000000..bc2a177e8d --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ToolAccordion/useToolAccordion.ts @@ -0,0 +1,32 @@ +import { useState } from "react"; + +interface UseToolAccordionOptions { + expanded?: boolean; + defaultExpanded?: boolean; + onExpandedChange?: (expanded: boolean) => void; +} + +interface UseToolAccordionResult { + isExpanded: boolean; + toggle: () => void; +} + +export function useToolAccordion({ + expanded, + defaultExpanded = false, + onExpandedChange, +}: UseToolAccordionOptions): UseToolAccordionResult { + const [uncontrolledExpanded, setUncontrolledExpanded] = + useState(defaultExpanded); + + const isControlled = typeof expanded === "boolean"; + const isExpanded = isControlled ? expanded : uncontrolledExpanded; + + function toggle() { + const next = !isExpanded; + if (!isControlled) setUncontrolledExpanded(next); + onExpandedChange?.(next); + } + + return { isExpanded, toggle }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts deleted file mode 100644 index 9fc97a14e3..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts +++ /dev/null @@ -1,56 +0,0 @@ -"use client"; - -import { create } from "zustand"; - -interface CopilotStoreState { - isStreaming: boolean; - isSwitchingSession: boolean; - isCreatingSession: boolean; - isInterruptModalOpen: boolean; - pendingAction: (() => void) | null; -} - -interface CopilotStoreActions { - setIsStreaming: (isStreaming: boolean) => void; - setIsSwitchingSession: (isSwitchingSession: boolean) => void; - setIsCreatingSession: (isCreating: boolean) => void; - openInterruptModal: (onConfirm: () => void) => void; - confirmInterrupt: () => void; - cancelInterrupt: () => void; -} - -type CopilotStore = CopilotStoreState & CopilotStoreActions; - -export const useCopilotStore = create((set, get) => ({ - isStreaming: false, - isSwitchingSession: false, - isCreatingSession: false, - isInterruptModalOpen: false, - pendingAction: null, - - setIsStreaming(isStreaming) { - set({ isStreaming }); - }, - - setIsSwitchingSession(isSwitchingSession) { - set({ isSwitchingSession }); - }, - - setIsCreatingSession(isCreatingSession) { - set({ isCreatingSession }); - }, - - openInterruptModal(onConfirm) { - set({ isInterruptModalOpen: true, pendingAction: onConfirm }); - }, - - confirmInterrupt() { - const { pendingAction } = get(); - set({ isInterruptModalOpen: false, pendingAction: null }); - if (pendingAction) pendingAction(); - }, - - cancelInterrupt() { - set({ isInterruptModalOpen: false, pendingAction: null }); - }, -})); diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/helpers/convertChatSessionToUiMessages.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/helpers/convertChatSessionToUiMessages.ts new file mode 100644 index 0000000000..a3f2bc28bf --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/helpers/convertChatSessionToUiMessages.ts @@ -0,0 +1,128 @@ +import type { UIMessage, UIDataTypes, UITools } from "ai"; + +interface SessionChatMessage { + role: string; + content: string | null; + tool_call_id: string | null; + tool_calls: unknown[] | null; +} + +function coerceSessionChatMessages( + rawMessages: unknown[], +): SessionChatMessage[] { + return rawMessages + .map((m) => { + if (!m || typeof m !== "object") return null; + const msg = m as Record; + + const role = typeof msg.role === "string" ? msg.role : null; + if (!role) return null; + + return { + role, + content: + typeof msg.content === "string" + ? msg.content + : msg.content == null + ? null + : String(msg.content), + tool_call_id: + typeof msg.tool_call_id === "string" + ? msg.tool_call_id + : msg.tool_call_id == null + ? null + : String(msg.tool_call_id), + tool_calls: Array.isArray(msg.tool_calls) ? msg.tool_calls : null, + }; + }) + .filter((m): m is SessionChatMessage => m !== null); +} + +function safeJsonParse(value: string): unknown { + try { + return JSON.parse(value) as unknown; + } catch { + return value; + } +} + +function toToolInput(rawArguments: unknown): unknown { + if (typeof rawArguments === "string") { + const trimmed = rawArguments.trim(); + return trimmed ? safeJsonParse(trimmed) : {}; + } + if (rawArguments && typeof rawArguments === "object") return rawArguments; + return {}; +} + +export function convertChatSessionMessagesToUiMessages( + sessionId: string, + rawMessages: unknown[], +): UIMessage[] { + const messages = coerceSessionChatMessages(rawMessages); + const toolOutputsByCallId = new Map(); + + for (const msg of messages) { + if (msg.role !== "tool") continue; + if (!msg.tool_call_id) continue; + if (msg.content == null) continue; + toolOutputsByCallId.set(msg.tool_call_id, msg.content); + } + + const uiMessages: UIMessage[] = []; + + messages.forEach((msg, index) => { + if (msg.role === "tool") return; + if (msg.role !== "user" && msg.role !== "assistant") return; + + const parts: UIMessage["parts"] = []; + + if (typeof msg.content === "string" && msg.content.trim()) { + parts.push({ type: "text", text: msg.content, state: "done" }); + } + + if (msg.role === "assistant" && Array.isArray(msg.tool_calls)) { + for (const rawToolCall of msg.tool_calls) { + if (!rawToolCall || typeof rawToolCall !== "object") continue; + const toolCall = rawToolCall as { + id?: unknown; + function?: { name?: unknown; arguments?: unknown }; + }; + + const toolCallId = String(toolCall.id ?? "").trim(); + const toolName = String(toolCall.function?.name ?? "").trim(); + if (!toolCallId || !toolName) continue; + + const input = toToolInput(toolCall.function?.arguments); + const output = toolOutputsByCallId.get(toolCallId); + + if (output !== undefined) { + parts.push({ + type: `tool-${toolName}`, + toolCallId, + state: "output-available", + input, + output: typeof output === "string" ? safeJsonParse(output) : output, + }); + } else { + parts.push({ + type: `tool-${toolName}`, + toolCallId, + state: "input-available", + input, + }); + } + } + } + + if (parts.length === 0) return; + + uiMessages.push({ + id: `${sessionId}-${index}`, + role: msg.role, + parts, + }); + }); + + return uiMessages; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotSessionId.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/Untitled similarity index 99% rename from autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotSessionId.ts rename to autogpt_platform/frontend/src/app/(platform)/copilot/hooks/Untitled index 87f9b7d3ae..13769eb726 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotSessionId.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/Untitled @@ -7,4 +7,4 @@ export function useCopilotSessionId() { ); return { urlSessionId, setUrlSessionId }; -} +} \ No newline at end of file diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/useAsymptoticProgress.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/useAsymptoticProgress.ts similarity index 83% rename from autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/useAsymptoticProgress.ts rename to autogpt_platform/frontend/src/app/(platform)/copilot/hooks/useAsymptoticProgress.ts index cf1b89e7c4..408ec74175 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/useAsymptoticProgress.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/useAsymptoticProgress.ts @@ -5,17 +5,16 @@ import { useEffect, useRef, useState } from "react"; * asymptotically approaching but never reaching the max value. * * Uses a half-life formula: progress = max * (1 - 0.5^(time/halfLife)) - * This creates the "game loading bar" effect where: + * This creates a "loading bar" effect where: * - 50% is reached at halfLifeSeconds * - 75% is reached at 2 * halfLifeSeconds * - 87.5% is reached at 3 * halfLifeSeconds - * - and so on... * * @param isActive - Whether the progress should be animating * @param halfLifeSeconds - Time in seconds to reach 50% progress (default: 30) * @param maxProgress - Maximum progress value to approach (default: 100) * @param intervalMs - Update interval in milliseconds (default: 100) - * @returns Current progress value (0-maxProgress) + * @returns Current progress value (0–maxProgress) */ export function useAsymptoticProgress( isActive: boolean, @@ -35,8 +34,6 @@ export function useAsymptoticProgress( const interval = setInterval(() => { elapsedTimeRef.current += intervalMs / 1000; - // Half-life approach: progress = max * (1 - 0.5^(time/halfLife)) - // At t=halfLife: 50%, at t=2*halfLife: 75%, at t=3*halfLife: 87.5%, etc. const newProgress = maxProgress * (1 - Math.pow(0.5, elapsedTimeRef.current / halfLifeSeconds)); diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx deleted file mode 100644 index 876e5accfb..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx +++ /dev/null @@ -1,13 +0,0 @@ -"use client"; -import { FeatureFlagPage } from "@/services/feature-flags/FeatureFlagPage"; -import { Flag } from "@/services/feature-flags/use-get-flag"; -import { type ReactNode } from "react"; -import { CopilotShell } from "./components/CopilotShell/CopilotShell"; - -export default function CopilotLayout({ children }: { children: ReactNode }) { - return ( - - {children} - - ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx index 542173a99c..3e4a81dd51 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx @@ -1,149 +1,13 @@ "use client"; -import { Button } from "@/components/atoms/Button/Button"; -import { Skeleton } from "@/components/atoms/Skeleton/Skeleton"; -import { Text } from "@/components/atoms/Text/Text"; -import { Chat } from "@/components/contextual/Chat/Chat"; -import { ChatInput } from "@/components/contextual/Chat/components/ChatInput/ChatInput"; -import { Dialog } from "@/components/molecules/Dialog/Dialog"; -import { useEffect, useState } from "react"; -import { useCopilotStore } from "./copilot-page-store"; -import { getInputPlaceholder } from "./helpers"; -import { useCopilotPage } from "./useCopilotPage"; - -export default function CopilotPage() { - const { state, handlers } = useCopilotPage(); - const isInterruptModalOpen = useCopilotStore((s) => s.isInterruptModalOpen); - const confirmInterrupt = useCopilotStore((s) => s.confirmInterrupt); - const cancelInterrupt = useCopilotStore((s) => s.cancelInterrupt); - - const [inputPlaceholder, setInputPlaceholder] = useState( - getInputPlaceholder(), - ); - - useEffect(() => { - const handleResize = () => { - setInputPlaceholder(getInputPlaceholder(window.innerWidth)); - }; - - handleResize(); - - window.addEventListener("resize", handleResize); - return () => window.removeEventListener("resize", handleResize); - }, []); - - const { greetingName, quickActions, isLoading, hasSession, initialPrompt } = - state; - - const { - handleQuickAction, - startChatWithPrompt, - handleSessionNotFound, - handleStreamingChange, - } = handlers; - - if (hasSession) { - return ( -
- - { - if (!open) cancelInterrupt(); - }, - }} - onClose={cancelInterrupt} - > - -
- - The current chat response will be interrupted. Are you sure you - want to continue? - - - - - -
-
-
-
- ); - } +import { FeatureFlagPage } from "@/services/feature-flags/FeatureFlagPage"; +import { Flag } from "@/services/feature-flags/use-get-flag"; +import { CopilotPage } from "./CopilotPage"; +export default function Page() { return ( -
-
- {isLoading ? ( -
- - -
- -
-
- {Array.from({ length: 4 }).map((_, i) => ( - - ))} -
-
- ) : ( - <> -
- - Hey, {greetingName} - - - Tell me about your work β€” I'll find what to automate. - - -
- -
-
-
- {quickActions.map((action) => ( - - ))} -
- - )} -
-
+ + + ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx new file mode 100644 index 0000000000..6030665f1c --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/styleguide/page.tsx @@ -0,0 +1,1533 @@ +"use client"; + +import { ResponseType } from "@/app/api/__generated__/models/responseType"; +import { + Conversation, + ConversationContent, +} from "@/components/ai-elements/conversation"; +import { + Message, + MessageContent, + MessageResponse, +} from "@/components/ai-elements/message"; +import { Text } from "@/components/atoms/Text/Text"; +import { CopilotChatActionsProvider } from "../components/CopilotChatActionsProvider/CopilotChatActionsProvider"; +import { CreateAgentTool } from "../tools/CreateAgent/CreateAgent"; +import { EditAgentTool } from "../tools/EditAgent/EditAgent"; +import { FindAgentsTool } from "../tools/FindAgents/FindAgents"; +import { FindBlocksTool } from "../tools/FindBlocks/FindBlocks"; +import { RunAgentTool } from "../tools/RunAgent/RunAgent"; +import { RunBlockTool } from "../tools/RunBlock/RunBlock"; +import { SearchDocsTool } from "../tools/SearchDocs/SearchDocs"; +import { ViewAgentOutputTool } from "../tools/ViewAgentOutput/ViewAgentOutput"; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function slugify(text: string) { + return text + .toLowerCase() + .replace(/[^a-z0-9]+/g, "-") + .replace(/(^-|-$)/g, ""); +} + +const SECTIONS = [ + "Messages", + "Tool: Find Blocks", + "Tool: Find Agents (Marketplace)", + "Tool: Find Agents (Library)", + "Tool: Search Docs", + "Tool: Get Doc Page", + "Tool: Run Block", + "Tool: Run Agent", + "Tool: Schedule Agent", + "Tool: Create Agent", + "Tool: Edit Agent", + "Tool: View Agent Output", + "Full Conversation Example", +] as const; + +function Section({ + title, + children, +}: { + title: string; + children: React.ReactNode; +}) { + return ( +
+

+ {title} +

+
{children}
+
+ ); +} + +function SubSection({ + label, + children, +}: { + label: string; + children: React.ReactNode; +}) { + return ( +
+

+ {label} +

+ {children} +
+ ); +} + +// --------------------------------------------------------------------------- +// Mock data factories +// --------------------------------------------------------------------------- + +let _id = 0; +function uid() { + return `sg-${++_id}`; +} + +// --------------------------------------------------------------------------- +// Page +// --------------------------------------------------------------------------- + +export default function StyleguidePage() { + return ( + alert(`onSend: ${msg}`)}> +
+ {/* Sidebar */} + + + {/* Content */} +
+
+ Copilot Styleguide +

+ Static showcase of all chat message types, tool states & + variants. +

+ + {/* ============================================================= */} + {/* MESSAGE TYPES */} + {/* ============================================================= */} + +
+ + + + + Find me an agent that can summarize YouTube videos + + + + + + + + + + I found a few agents that can help with YouTube video + summarization. Let me search for the best options for you. + + + + + + + + + + {`Here's what I found:\n\n1. **YouTube Summarizer** β€” Extracts key points from any YouTube video\n2. **Video Digest** β€” Creates bullet-point summaries with timestamps\n\n> Both agents support videos up to 2 hours long.\n\n\`\`\`python\n# Example usage\nresult = agent.run(url="https://youtube.com/watch?v=...")\nprint(result.summary)\n\`\`\``} + + + + + + + + + + Thinking... + + + + + + +
+ Error: Connection timed out. Please try again. +
+
+
+ + {/* ============================================================= */} + {/* FIND BLOCKS */} + {/* ============================================================= */} + +
+ + + + + + + + + + + + + + + +
+ + {/* ============================================================= */} + {/* FIND AGENTS (Marketplace) */} + {/* ============================================================= */} + +
+ + + + + + + + + + + + + + + +
+ + {/* ============================================================= */} + {/* FIND AGENTS (Library) */} + {/* ============================================================= */} + +
+ + + + + + + +
+ + {/* ============================================================= */} + {/* SEARCH DOCS */} + {/* ============================================================= */} + +
+ + + + + + + + + + + + + + + +
+ + {/* ============================================================= */} + {/* GET DOC PAGE */} + {/* ============================================================= */} + +
+ + + + + + + +
+ + {/* ============================================================= */} + {/* RUN BLOCK */} + {/* ============================================================= */} + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Overall, this was our strongest quarter to date.\n\n| Metric | Q3 | Q4 | Change |\n|--------|-----|-----|--------|\n| Revenue | $2.1M | $2.6M | +23% |\n| Users | 10k | 20k | +100% |\n| NPS | 72 | 78 | +6 |", + ], + }, + }, + }} + /> + + + + + + + + + + + + + +
+ + {/* ============================================================= */} + {/* RUN AGENT */} + {/* ============================================================= */} + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + {/* ============================================================= */} + {/* SCHEDULE AGENT */} + {/* ============================================================= */} + +
+ + + + + + + +
+ + {/* ============================================================= */} + {/* CREATE AGENT */} + {/* ============================================================= */} + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + {/* ============================================================= */} + {/* EDIT AGENT */} + {/* ============================================================= */} + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + {/* ============================================================= */} + {/* VIEW AGENT OUTPUT */} + {/* ============================================================= */} + +
+ + + + + + + + + + AI is not replacing doctors β€” it's augmenting their capabilities.\n\n### Adoption by Region\n\n| Region | Adoption Rate | Growth |\n|--------|--------------|--------|\n| North America | 78% | +15% |\n| Europe | 62% | +22% |\n| Asia Pacific | 71% | +31% |", + ], + metadata: [ + { + sources_analyzed: 142, + confidence_score: 0.94, + processing_time_ms: 3420, + model_version: "v2.3.1", + categories: [ + "healthcare", + "machine-learning", + "diagnostics", + ], + }, + ], + chart: [ + "https://picsum.photos/seed/chart-demo/500/300", + ], + }, + }, + }, + }} + /> + + + + + + + + + + + + + + + + + +
+ + {/* ============================================================= */} + {/* FULL CONVERSATION EXAMPLE */} + {/* ============================================================= */} + +
+ + + + + + Find me a block that can fetch weather data + + + + + + + + Let me search for weather-related blocks for you. + + + + + + I found 2 blocks related to weather. The **Get Weather** + block fetches current conditions, while **Weather + Forecast** provides a 5-day outlook. Would you like me + to run one of these? + + + + + + + + Yes, run the Get Weather block for San Francisco + + + + + + + + + + The current weather in San Francisco is **68Β°F** and + **Foggy** with 85% humidity and winds from the west at + 12 mph. + + + + + +
+
+
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx new file mode 100644 index 0000000000..5dc2f40dfe --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx @@ -0,0 +1,237 @@ +"use client"; + +import { WarningDiamondIcon } from "@phosphor-icons/react"; +import type { ToolUIPart } from "ai"; +import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions"; +import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; +import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; +import { ProgressBar } from "../../components/ProgressBar/ProgressBar"; +import { + ContentCardDescription, + ContentCodeBlock, + ContentGrid, + ContentHint, + ContentLink, + ContentMessage, +} from "../../components/ToolAccordion/AccordionContent"; +import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; +import { useAsymptoticProgress } from "../../hooks/useAsymptoticProgress"; +import { + ClarificationQuestionsCard, + ClarifyingQuestion, +} from "./components/ClarificationQuestionsCard"; +import { + AccordionIcon, + formatMaybeJson, + getAnimationText, + getCreateAgentToolOutput, + isAgentPreviewOutput, + isAgentSavedOutput, + isClarificationNeededOutput, + isErrorOutput, + isOperationInProgressOutput, + isOperationPendingOutput, + isOperationStartedOutput, + ToolIcon, + truncateText, + type CreateAgentToolOutput, +} from "./helpers"; + +export interface CreateAgentToolPart { + type: string; + toolCallId: string; + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +} + +interface Props { + part: CreateAgentToolPart; +} + +function getAccordionMeta(output: CreateAgentToolOutput): { + icon: React.ReactNode; + title: React.ReactNode; + titleClassName?: string; + description?: string; +} { + const icon = ; + + if (isAgentSavedOutput(output)) { + return { icon, title: output.agent_name }; + } + if (isAgentPreviewOutput(output)) { + return { + icon, + title: output.agent_name, + description: `${output.node_count} block${output.node_count === 1 ? "" : "s"}`, + }; + } + if (isClarificationNeededOutput(output)) { + const questions = output.questions ?? []; + return { + icon, + title: "Needs clarification", + description: `${questions.length} question${questions.length === 1 ? "" : "s"}`, + }; + } + if ( + isOperationStartedOutput(output) || + isOperationPendingOutput(output) || + isOperationInProgressOutput(output) + ) { + return { + icon: , + title: "Creating agent, this may take a few minutes. Sit back and relax.", + }; + } + return { + icon: ( + + ), + title: "Error", + titleClassName: "text-red-500", + }; +} + +export function CreateAgentTool({ part }: Props) { + const text = getAnimationText(part); + const { onSend } = useCopilotChatActions(); + const isStreaming = + part.state === "input-streaming" || part.state === "input-available"; + + const output = getCreateAgentToolOutput(part); + const isError = + part.state === "output-error" || (!!output && isErrorOutput(output)); + const isOperating = + !!output && + (isOperationStartedOutput(output) || + isOperationPendingOutput(output) || + isOperationInProgressOutput(output)); + const progress = useAsymptoticProgress(isOperating); + const hasExpandableContent = + part.state === "output-available" && + !!output && + (isOperationStartedOutput(output) || + isOperationPendingOutput(output) || + isOperationInProgressOutput(output) || + isAgentPreviewOutput(output) || + isAgentSavedOutput(output) || + isClarificationNeededOutput(output) || + isErrorOutput(output)); + + function handleClarificationAnswers(answers: Record) { + const questions = + output && isClarificationNeededOutput(output) + ? (output.questions ?? []) + : []; + + const contextMessage = questions + .map((q) => { + const answer = answers[q.keyword] || ""; + return `> ${q.question}\n\n${answer}`; + }) + .join("\n\n"); + + onSend( + `**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with creating the agent.`, + ); + } + + return ( +
+
+ + +
+ + {hasExpandableContent && output && ( + + {isOperating && ( + + + + This could take a few minutes, grab a coffee β˜• + + + )} + + {isAgentSavedOutput(output) && ( + + {output.message} +
+ + Open in library + + + Open in builder + +
+ + {truncateText( + formatMaybeJson({ agent_id: output.agent_id }), + 800, + )} + +
+ )} + + {isAgentPreviewOutput(output) && ( + + {output.message} + {output.description?.trim() && ( + + {output.description} + + )} + + {truncateText(formatMaybeJson(output.agent_json), 1600)} + + + )} + + {isClarificationNeededOutput(output) && ( + { + const item: ClarifyingQuestion = { + question: q.question, + keyword: q.keyword, + }; + const example = + typeof q.example === "string" && q.example.trim() + ? q.example.trim() + : null; + if (example) item.example = example; + return item; + })} + message={output.message} + onSubmitAnswers={handleClarificationAnswers} + /> + )} + + {isErrorOutput(output) && ( + + {output.message} + {output.error && ( + + {formatMaybeJson(output.error)} + + )} + {output.details && ( + + {formatMaybeJson(output.details)} + + )} + + )} +
+ )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/ClarificationQuestionsCard.tsx similarity index 98% rename from autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx rename to autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/ClarificationQuestionsCard.tsx index 3b225d1ef1..abcb04731e 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/ClarificationQuestionsCard.tsx @@ -6,7 +6,7 @@ import { Input } from "@/components/atoms/Input/Input"; import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; import { CheckCircleIcon, QuestionIcon } from "@phosphor-icons/react"; -import { useState, useEffect, useRef } from "react"; +import { useEffect, useRef, useState } from "react"; export interface ClarifyingQuestion { question: string; @@ -24,12 +24,7 @@ interface Props { className?: string; } -function getStorageKey(sessionId?: string): string | null { - if (!sessionId) return null; - return `clarification_answers_${sessionId}`; -} - -export function ClarificationQuestionsWidget({ +export function ClarificationQuestionsCard({ questions, message, sessionId, @@ -241,3 +236,8 @@ export function ClarificationQuestionsWidget({
); } + +function getStorageKey(sessionId?: string): string | null { + if (!sessionId) return null; + return `clarification_answers_${sessionId}`; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/helpers.tsx new file mode 100644 index 0000000000..bd47eac051 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/helpers.tsx @@ -0,0 +1,186 @@ +import type { AgentPreviewResponse } from "@/app/api/__generated__/models/agentPreviewResponse"; +import type { AgentSavedResponse } from "@/app/api/__generated__/models/agentSavedResponse"; +import type { ClarificationNeededResponse } from "@/app/api/__generated__/models/clarificationNeededResponse"; +import type { ErrorResponse } from "@/app/api/__generated__/models/errorResponse"; +import type { OperationInProgressResponse } from "@/app/api/__generated__/models/operationInProgressResponse"; +import type { OperationPendingResponse } from "@/app/api/__generated__/models/operationPendingResponse"; +import type { OperationStartedResponse } from "@/app/api/__generated__/models/operationStartedResponse"; +import { ResponseType } from "@/app/api/__generated__/models/responseType"; +import { + PlusCircleIcon, + PlusIcon, + WarningDiamondIcon, +} from "@phosphor-icons/react"; +import type { ToolUIPart } from "ai"; +import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; + +export type CreateAgentToolOutput = + | OperationStartedResponse + | OperationPendingResponse + | OperationInProgressResponse + | AgentPreviewResponse + | AgentSavedResponse + | ClarificationNeededResponse + | ErrorResponse; + +function parseOutput(output: unknown): CreateAgentToolOutput | null { + if (!output) return null; + if (typeof output === "string") { + const trimmed = output.trim(); + if (!trimmed) return null; + try { + return parseOutput(JSON.parse(trimmed) as unknown); + } catch { + return null; + } + } + if (typeof output === "object") { + const type = (output as { type?: unknown }).type; + if ( + type === ResponseType.operation_started || + type === ResponseType.operation_pending || + type === ResponseType.operation_in_progress || + type === ResponseType.agent_preview || + type === ResponseType.agent_saved || + type === ResponseType.clarification_needed || + type === ResponseType.error + ) { + return output as CreateAgentToolOutput; + } + if ("operation_id" in output && "tool_name" in output) + return output as OperationStartedResponse | OperationPendingResponse; + if ("tool_call_id" in output) return output as OperationInProgressResponse; + if ("agent_json" in output && "agent_name" in output) + return output as AgentPreviewResponse; + if ("agent_id" in output && "library_agent_id" in output) + return output as AgentSavedResponse; + if ("questions" in output) return output as ClarificationNeededResponse; + if ("error" in output || "details" in output) + return output as ErrorResponse; + } + return null; +} + +export function getCreateAgentToolOutput( + part: unknown, +): CreateAgentToolOutput | null { + if (!part || typeof part !== "object") return null; + return parseOutput((part as { output?: unknown }).output); +} + +export function isOperationStartedOutput( + output: CreateAgentToolOutput, +): output is OperationStartedResponse { + return ( + output.type === ResponseType.operation_started || + ("operation_id" in output && "tool_name" in output) + ); +} + +export function isOperationPendingOutput( + output: CreateAgentToolOutput, +): output is OperationPendingResponse { + return output.type === ResponseType.operation_pending; +} + +export function isOperationInProgressOutput( + output: CreateAgentToolOutput, +): output is OperationInProgressResponse { + return ( + output.type === ResponseType.operation_in_progress || + "tool_call_id" in output + ); +} + +export function isAgentPreviewOutput( + output: CreateAgentToolOutput, +): output is AgentPreviewResponse { + return output.type === ResponseType.agent_preview || "agent_json" in output; +} + +export function isAgentSavedOutput( + output: CreateAgentToolOutput, +): output is AgentSavedResponse { + return ( + output.type === ResponseType.agent_saved || "agent_page_link" in output + ); +} + +export function isClarificationNeededOutput( + output: CreateAgentToolOutput, +): output is ClarificationNeededResponse { + return ( + output.type === ResponseType.clarification_needed || "questions" in output + ); +} + +export function isErrorOutput( + output: CreateAgentToolOutput, +): output is ErrorResponse { + return output.type === ResponseType.error || "error" in output; +} + +export function getAnimationText(part: { + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +}): string { + switch (part.state) { + case "input-streaming": + case "input-available": + return "Creating a new agent"; + case "output-available": { + const output = parseOutput(part.output); + if (!output) return "Creating a new agent"; + if (isOperationStartedOutput(output)) return "Agent creation started"; + if (isOperationPendingOutput(output)) return "Agent creation in progress"; + if (isOperationInProgressOutput(output)) + return "Agent creation already in progress"; + if (isAgentSavedOutput(output)) return `Saved "${output.agent_name}"`; + if (isAgentPreviewOutput(output)) return `Preview "${output.agent_name}"`; + if (isClarificationNeededOutput(output)) return "Needs clarification"; + return "Error creating agent"; + } + case "output-error": + return "Error creating agent"; + default: + return "Creating a new agent"; + } +} + +export function ToolIcon({ + isStreaming, + isError, +}: { + isStreaming?: boolean; + isError?: boolean; +}) { + if (isError) { + return ( + + ); + } + if (isStreaming) { + return ; + } + return ; +} + +export function AccordionIcon() { + return ; +} + +export function formatMaybeJson(value: unknown): string { + if (typeof value === "string") return value; + try { + return JSON.stringify(value, null, 2); + } catch { + return String(value); + } +} + +export function truncateText(text: string, maxChars: number): string { + const trimmed = text.trim(); + if (trimmed.length <= maxChars) return trimmed; + return `${trimmed.slice(0, maxChars).trimEnd()}…`; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx new file mode 100644 index 0000000000..3beb9e7e1e --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx @@ -0,0 +1,234 @@ +"use client"; + +import { WarningDiamondIcon } from "@phosphor-icons/react"; +import type { ToolUIPart } from "ai"; +import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions"; +import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; +import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; +import { ProgressBar } from "../../components/ProgressBar/ProgressBar"; +import { + ContentCardDescription, + ContentCodeBlock, + ContentGrid, + ContentHint, + ContentLink, + ContentMessage, +} from "../../components/ToolAccordion/AccordionContent"; +import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; +import { useAsymptoticProgress } from "../../hooks/useAsymptoticProgress"; +import { + ClarificationQuestionsCard, + ClarifyingQuestion, +} from "../CreateAgent/components/ClarificationQuestionsCard"; +import { + AccordionIcon, + formatMaybeJson, + getAnimationText, + getEditAgentToolOutput, + isAgentPreviewOutput, + isAgentSavedOutput, + isClarificationNeededOutput, + isErrorOutput, + isOperationInProgressOutput, + isOperationPendingOutput, + isOperationStartedOutput, + ToolIcon, + truncateText, + type EditAgentToolOutput, +} from "./helpers"; + +export interface EditAgentToolPart { + type: string; + toolCallId: string; + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +} + +interface Props { + part: EditAgentToolPart; +} + +function getAccordionMeta(output: EditAgentToolOutput): { + icon: React.ReactNode; + title: string; + titleClassName?: string; + description?: string; +} { + const icon = ; + + if (isAgentSavedOutput(output)) { + return { icon, title: output.agent_name }; + } + if (isAgentPreviewOutput(output)) { + return { + icon, + title: output.agent_name, + description: `${output.node_count} block${output.node_count === 1 ? "" : "s"}`, + }; + } + if (isClarificationNeededOutput(output)) { + const questions = output.questions ?? []; + return { + icon, + title: "Needs clarification", + description: `${questions.length} question${questions.length === 1 ? "" : "s"}`, + }; + } + if ( + isOperationStartedOutput(output) || + isOperationPendingOutput(output) || + isOperationInProgressOutput(output) + ) { + return { icon: , title: "Editing agent" }; + } + return { + icon: ( + + ), + title: "Error", + titleClassName: "text-red-500", + }; +} + +export function EditAgentTool({ part }: Props) { + const text = getAnimationText(part); + const { onSend } = useCopilotChatActions(); + const isStreaming = + part.state === "input-streaming" || part.state === "input-available"; + + const output = getEditAgentToolOutput(part); + const isError = + part.state === "output-error" || (!!output && isErrorOutput(output)); + const isOperating = + !!output && + (isOperationStartedOutput(output) || + isOperationPendingOutput(output) || + isOperationInProgressOutput(output)); + const progress = useAsymptoticProgress(isOperating); + const hasExpandableContent = + part.state === "output-available" && + !!output && + (isOperationStartedOutput(output) || + isOperationPendingOutput(output) || + isOperationInProgressOutput(output) || + isAgentPreviewOutput(output) || + isAgentSavedOutput(output) || + isClarificationNeededOutput(output) || + isErrorOutput(output)); + + function handleClarificationAnswers(answers: Record) { + const questions = + output && isClarificationNeededOutput(output) + ? (output.questions ?? []) + : []; + + const contextMessage = questions + .map((q) => { + const answer = answers[q.keyword] || ""; + return `> ${q.question}\n\n${answer}`; + }) + .join("\n\n"); + + onSend( + `**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with editing the agent.`, + ); + } + + return ( +
+
+ + +
+ + {hasExpandableContent && output && ( + + {isOperating && ( + + + + This could take a few minutes, grab a coffee β˜• + + + )} + + {isAgentSavedOutput(output) && ( + + {output.message} +
+ + Open in library + + + Open in builder + +
+ + {truncateText( + formatMaybeJson({ agent_id: output.agent_id }), + 800, + )} + +
+ )} + + {isAgentPreviewOutput(output) && ( + + {output.message} + {output.description?.trim() && ( + + {output.description} + + )} + + {truncateText(formatMaybeJson(output.agent_json), 1600)} + + + )} + + {isClarificationNeededOutput(output) && ( + { + const item: ClarifyingQuestion = { + question: q.question, + keyword: q.keyword, + }; + const example = + typeof q.example === "string" && q.example.trim() + ? q.example.trim() + : null; + if (example) item.example = example; + return item; + })} + message={output.message} + onSubmitAnswers={handleClarificationAnswers} + /> + )} + + {isErrorOutput(output) && ( + + {output.message} + {output.error && ( + + {formatMaybeJson(output.error)} + + )} + {output.details && ( + + {formatMaybeJson(output.details)} + + )} + + )} +
+ )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/helpers.tsx new file mode 100644 index 0000000000..a0db50cddc --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/helpers.tsx @@ -0,0 +1,188 @@ +import type { AgentPreviewResponse } from "@/app/api/__generated__/models/agentPreviewResponse"; +import type { AgentSavedResponse } from "@/app/api/__generated__/models/agentSavedResponse"; +import type { ClarificationNeededResponse } from "@/app/api/__generated__/models/clarificationNeededResponse"; +import type { ErrorResponse } from "@/app/api/__generated__/models/errorResponse"; +import type { OperationInProgressResponse } from "@/app/api/__generated__/models/operationInProgressResponse"; +import type { OperationPendingResponse } from "@/app/api/__generated__/models/operationPendingResponse"; +import type { OperationStartedResponse } from "@/app/api/__generated__/models/operationStartedResponse"; +import { ResponseType } from "@/app/api/__generated__/models/responseType"; +import { + NotePencilIcon, + PencilLineIcon, + WarningDiamondIcon, +} from "@phosphor-icons/react"; +import type { ToolUIPart } from "ai"; +import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; + +export type EditAgentToolOutput = + | OperationStartedResponse + | OperationPendingResponse + | OperationInProgressResponse + | AgentPreviewResponse + | AgentSavedResponse + | ClarificationNeededResponse + | ErrorResponse; + +function parseOutput(output: unknown): EditAgentToolOutput | null { + if (!output) return null; + if (typeof output === "string") { + const trimmed = output.trim(); + if (!trimmed) return null; + try { + return parseOutput(JSON.parse(trimmed) as unknown); + } catch { + return null; + } + } + if (typeof output === "object") { + const type = (output as { type?: unknown }).type; + if ( + type === ResponseType.operation_started || + type === ResponseType.operation_pending || + type === ResponseType.operation_in_progress || + type === ResponseType.agent_preview || + type === ResponseType.agent_saved || + type === ResponseType.clarification_needed || + type === ResponseType.error + ) { + return output as EditAgentToolOutput; + } + if ("operation_id" in output && "tool_name" in output) + return output as OperationStartedResponse | OperationPendingResponse; + if ("tool_call_id" in output) return output as OperationInProgressResponse; + if ("agent_json" in output && "agent_name" in output) + return output as AgentPreviewResponse; + if ("agent_id" in output && "library_agent_id" in output) + return output as AgentSavedResponse; + if ("questions" in output) return output as ClarificationNeededResponse; + if ("error" in output || "details" in output) + return output as ErrorResponse; + } + return null; +} + +export function getEditAgentToolOutput( + part: unknown, +): EditAgentToolOutput | null { + if (!part || typeof part !== "object") return null; + return parseOutput((part as { output?: unknown }).output); +} + +export function isOperationStartedOutput( + output: EditAgentToolOutput, +): output is OperationStartedResponse { + return ( + output.type === ResponseType.operation_started || + ("operation_id" in output && "tool_name" in output) + ); +} + +export function isOperationPendingOutput( + output: EditAgentToolOutput, +): output is OperationPendingResponse { + return output.type === ResponseType.operation_pending; +} + +export function isOperationInProgressOutput( + output: EditAgentToolOutput, +): output is OperationInProgressResponse { + return ( + output.type === ResponseType.operation_in_progress || + "tool_call_id" in output + ); +} + +export function isAgentPreviewOutput( + output: EditAgentToolOutput, +): output is AgentPreviewResponse { + return output.type === ResponseType.agent_preview || "agent_json" in output; +} + +export function isAgentSavedOutput( + output: EditAgentToolOutput, +): output is AgentSavedResponse { + return ( + output.type === ResponseType.agent_saved || "agent_page_link" in output + ); +} + +export function isClarificationNeededOutput( + output: EditAgentToolOutput, +): output is ClarificationNeededResponse { + return ( + output.type === ResponseType.clarification_needed || "questions" in output + ); +} + +export function isErrorOutput( + output: EditAgentToolOutput, +): output is ErrorResponse { + return output.type === ResponseType.error || "error" in output; +} + +export function getAnimationText(part: { + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +}): string { + switch (part.state) { + case "input-streaming": + case "input-available": + return "Editing the agent"; + case "output-available": { + const output = parseOutput(part.output); + if (!output) return "Editing the agent"; + if (isOperationStartedOutput(output)) return "Agent update started"; + if (isOperationPendingOutput(output)) return "Agent update in progress"; + if (isOperationInProgressOutput(output)) + return "Agent update already in progress"; + if (isAgentSavedOutput(output)) return `Saved "${output.agent_name}"`; + if (isAgentPreviewOutput(output)) return `Preview "${output.agent_name}"`; + if (isClarificationNeededOutput(output)) return "Needs clarification"; + return "Error editing agent"; + } + case "output-error": + return "Error editing agent"; + default: + return "Editing the agent"; + } +} + +export function ToolIcon({ + isStreaming, + isError, +}: { + isStreaming?: boolean; + isError?: boolean; +}) { + if (isError) { + return ( + + ); + } + if (isStreaming) { + return ; + } + return ( + + ); +} + +export function AccordionIcon() { + return ; +} + +export function formatMaybeJson(value: unknown): string { + if (typeof value === "string") return value; + try { + return JSON.stringify(value, null, 2); + } catch { + return String(value); + } +} + +export function truncateText(text: string, maxChars: number): string { + const trimmed = text.trim(); + if (trimmed.length <= maxChars) return trimmed; + return `${trimmed.slice(0, maxChars).trimEnd()}…`; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindAgents/FindAgents.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindAgents/FindAgents.tsx new file mode 100644 index 0000000000..4f0068b2c5 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindAgents/FindAgents.tsx @@ -0,0 +1,127 @@ +"use client"; + +import { ToolUIPart } from "ai"; +import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; +import { + ContentBadge, + ContentCard, + ContentCardDescription, + ContentCardHeader, + ContentCardTitle, + ContentGrid, + ContentLink, +} from "../../components/ToolAccordion/AccordionContent"; +import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; +import { + AccordionIcon, + getAgentHref, + getAnimationText, + getFindAgentsOutput, + getSourceLabelFromToolType, + isAgentsFoundOutput, + isErrorOutput, + ToolIcon, +} from "./helpers"; + +export interface FindAgentsToolPart { + type: string; + toolCallId: string; + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +} + +interface Props { + part: FindAgentsToolPart; +} + +export function FindAgentsTool({ part }: Props) { + const text = getAnimationText(part); + const output = getFindAgentsOutput(part); + const isStreaming = + part.state === "input-streaming" || part.state === "input-available"; + const isError = + part.state === "output-error" || (!!output && isErrorOutput(output)); + + const query = + typeof part.input === "object" && part.input !== null + ? String((part.input as { query?: unknown }).query ?? "").trim() + : ""; + + const agentsFoundOutput = + part.state === "output-available" && output && isAgentsFoundOutput(output) + ? output + : null; + + const hasAgents = + !!agentsFoundOutput && + agentsFoundOutput.agents.length > 0 && + (typeof agentsFoundOutput.count !== "number" || + agentsFoundOutput.count > 0); + const totalCount = agentsFoundOutput ? agentsFoundOutput.count : 0; + const { source } = getSourceLabelFromToolType(part.type); + const scopeText = + source === "library" + ? "in your library" + : source === "marketplace" + ? "in marketplace" + : ""; + const accordionDescription = `Found ${totalCount}${scopeText ? ` ${scopeText}` : ""}${ + query ? ` for "${query}"` : "" + }`; + + return ( +
+
+ + +
+ + {hasAgents && agentsFoundOutput && ( + } + title="Agent results" + description={accordionDescription} + > + + {agentsFoundOutput.agents.map((agent) => { + const href = getAgentHref(agent); + const agentSource = + agent.source === "library" + ? "Library" + : agent.source === "marketplace" + ? "Marketplace" + : null; + return ( + + Open : null + } + > +
+ {agent.name} + {agentSource && ( + {agentSource} + )} +
+ + {agent.description} + +
+
+ ); + })} +
+
+ )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindAgents/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindAgents/helpers.tsx new file mode 100644 index 0000000000..f253947953 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindAgents/helpers.tsx @@ -0,0 +1,187 @@ +import type { AgentInfo } from "@/app/api/__generated__/models/agentInfo"; +import type { AgentsFoundResponse } from "@/app/api/__generated__/models/agentsFoundResponse"; +import type { ErrorResponse } from "@/app/api/__generated__/models/errorResponse"; +import type { NoResultsResponse } from "@/app/api/__generated__/models/noResultsResponse"; +import { ResponseType } from "@/app/api/__generated__/models/responseType"; +import { + FolderOpenIcon, + MagnifyingGlassIcon, + SquaresFourIcon, + StorefrontIcon, +} from "@phosphor-icons/react"; +import { ToolUIPart } from "ai"; + +export interface FindAgentInput { + query: string; +} + +export type FindAgentsOutput = + | AgentsFoundResponse + | NoResultsResponse + | ErrorResponse; + +export type FindAgentsToolType = + | "tool-find_agent" + | "tool-find_library_agent" + | (string & {}); + +function parseOutput(output: unknown): FindAgentsOutput | null { + if (!output) return null; + if (typeof output === "string") { + const trimmed = output.trim(); + if (!trimmed) return null; + try { + return parseOutput(JSON.parse(trimmed) as unknown); + } catch { + return null; + } + } + if (typeof output === "object") { + const type = (output as { type?: unknown }).type; + if ( + type === ResponseType.agents_found || + type === ResponseType.no_results || + type === ResponseType.error + ) { + return output as FindAgentsOutput; + } + if ("agents" in output && "count" in output) + return output as AgentsFoundResponse; + if ("suggestions" in output && !("error" in output)) + return output as NoResultsResponse; + if ("error" in output || "details" in output) + return output as ErrorResponse; + } + return null; +} + +export function getFindAgentsOutput(part: unknown): FindAgentsOutput | null { + if (!part || typeof part !== "object") return null; + return parseOutput((part as { output?: unknown }).output); +} + +export function isAgentsFoundOutput( + output: FindAgentsOutput, +): output is AgentsFoundResponse { + return output.type === ResponseType.agents_found || "agents" in output; +} + +export function isNoResultsOutput( + output: FindAgentsOutput, +): output is NoResultsResponse { + return ( + output.type === ResponseType.no_results || + ("suggestions" in output && !("error" in output)) + ); +} + +export function isErrorOutput( + output: FindAgentsOutput, +): output is ErrorResponse { + return output.type === ResponseType.error || "error" in output; +} + +export function getSourceLabelFromToolType(toolType?: FindAgentsToolType): { + source: "marketplace" | "library" | "unknown"; + label: string; +} { + if (toolType === "tool-find_library_agent") { + return { source: "library", label: "Library" }; + } + if (toolType === "tool-find_agent") { + return { source: "marketplace", label: "Marketplace" }; + } + return { source: "unknown", label: "Agents" }; +} + +export function getAnimationText(part: { + type?: FindAgentsToolType; + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +}): string { + const { source } = getSourceLabelFromToolType(part.type); + const query = (part.input as FindAgentInput | undefined)?.query?.trim(); + + // Action phrase matching legacy ToolCallMessage + const actionPhrase = + source === "library" + ? "Looking for library agents" + : "Looking for agents in the marketplace"; + + const queryText = query ? ` matching "${query}"` : ""; + + switch (part.state) { + case "input-streaming": + case "input-available": + return `${actionPhrase}${queryText}`; + + case "output-available": { + const output = parseOutput(part.output); + if (!output) { + return `${actionPhrase}${queryText}`; + } + if (isNoResultsOutput(output)) { + return `No agents found${queryText}`; + } + if (isAgentsFoundOutput(output)) { + const count = output.count ?? output.agents?.length ?? 0; + return `Found ${count} agent${count === 1 ? "" : "s"}${queryText}`; + } + if (isErrorOutput(output)) { + return `Error finding agents${queryText}`; + } + return `${actionPhrase}${queryText}`; + } + + case "output-error": + return `Error finding agents${queryText}`; + + default: + return actionPhrase; + } +} + +export function getAgentHref(agent: AgentInfo): string | null { + if (agent.source === "library") { + return `/library/agents/${encodeURIComponent(agent.id)}`; + } + + const [creator, slug, ...rest] = agent.id.split("/"); + if (!creator || !slug || rest.length > 0) return null; + return `/marketplace/agent/${encodeURIComponent(creator)}/${encodeURIComponent(slug)}`; +} + +export function ToolIcon({ + toolType, + isStreaming, + isError, +}: { + toolType?: FindAgentsToolType; + isStreaming?: boolean; + isError?: boolean; +}) { + const { source } = getSourceLabelFromToolType(toolType); + const IconComponent = + source === "library" ? MagnifyingGlassIcon : SquaresFourIcon; + + return ( + + ); +} + +export function AccordionIcon({ toolType }: { toolType?: FindAgentsToolType }) { + const { source } = getSourceLabelFromToolType(toolType); + const IconComponent = source === "library" ? FolderOpenIcon : StorefrontIcon; + return ; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindBlocks/FindBlocks.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindBlocks/FindBlocks.tsx new file mode 100644 index 0000000000..3684a2da14 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindBlocks/FindBlocks.tsx @@ -0,0 +1,92 @@ +"use client"; + +import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; +import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; +import { + ContentCard, + ContentCardDescription, + ContentCardTitle, +} from "../../components/ToolAccordion/AccordionContent"; +import type { BlockListResponse } from "@/app/api/__generated__/models/blockListResponse"; +import type { BlockInfoSummary } from "@/app/api/__generated__/models/blockInfoSummary"; +import { ToolUIPart } from "ai"; +import { HorizontalScroll } from "@/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll"; +import { + AccordionIcon, + getAnimationText, + parseOutput, + ToolIcon, +} from "./helpers"; + +export interface FindBlockInput { + query: string; +} + +export type FindBlockOutput = BlockListResponse; + +export interface FindBlockToolPart { + type: string; + toolName?: string; + toolCallId: string; + state: ToolUIPart["state"]; + input?: FindBlockInput | unknown; + output?: string | FindBlockOutput | unknown; + title?: string; +} + +interface Props { + part: FindBlockToolPart; +} + +function BlockCard({ block }: { block: BlockInfoSummary }) { + return ( + + {block.name} + + {block.description} + + + ); +} + +export function FindBlocksTool({ part }: Props) { + const text = getAnimationText(part); + const isStreaming = + part.state === "input-streaming" || part.state === "input-available"; + const isError = part.state === "output-error"; + + const parsed = + part.state === "output-available" ? parseOutput(part.output) : null; + const hasBlocks = !!parsed && parsed.blocks.length > 0; + + const query = (part.input as FindBlockInput | undefined)?.query?.trim(); + const accordionDescription = parsed + ? `Found ${parsed.count} block${parsed.count === 1 ? "" : "s"}${query ? ` for "${query}"` : ""}` + : undefined; + + return ( +
+
+ + +
+ + {hasBlocks && parsed && ( + } + title="Block results" + description={accordionDescription} + > + + {parsed.blocks.map((block) => ( + + ))} + + + )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindBlocks/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindBlocks/helpers.tsx new file mode 100644 index 0000000000..eaebe98ea5 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/FindBlocks/helpers.tsx @@ -0,0 +1,75 @@ +import type { BlockListResponse } from "@/app/api/__generated__/models/blockListResponse"; +import { ResponseType } from "@/app/api/__generated__/models/responseType"; +import { CubeIcon, PackageIcon } from "@phosphor-icons/react"; +import { FindBlockInput, FindBlockToolPart } from "./FindBlocks"; + +export function parseOutput(output: unknown): BlockListResponse | null { + if (!output) return null; + if (typeof output === "string") { + const trimmed = output.trim(); + if (!trimmed) return null; + try { + return parseOutput(JSON.parse(trimmed) as unknown); + } catch { + return null; + } + } + if (typeof output === "object") { + const type = (output as { type?: unknown }).type; + if (type === ResponseType.block_list || "blocks" in output) { + return output as BlockListResponse; + } + } + return null; +} + +export function getAnimationText(part: FindBlockToolPart): string { + const query = (part.input as FindBlockInput | undefined)?.query?.trim(); + const queryText = query ? ` matching "${query}"` : ""; + + switch (part.state) { + case "input-streaming": + case "input-available": + return `Searching for blocks${queryText}`; + + case "output-available": { + const parsed = parseOutput(part.output); + if (parsed) { + return `Found ${parsed.count} block${parsed.count === 1 ? "" : "s"}${queryText}`; + } + return `Searching for blocks${queryText}`; + } + + case "output-error": + return `Error finding blocks${queryText}`; + + default: + return "Searching for blocks"; + } +} + +export function ToolIcon({ + isStreaming, + isError, +}: { + isStreaming?: boolean; + isError?: boolean; +}) { + return ( + + ); +} + +export function AccordionIcon() { + return ; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx new file mode 100644 index 0000000000..51044848b9 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx @@ -0,0 +1,93 @@ +"use client"; + +import type { ToolUIPart } from "ai"; +import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; +import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; +import { ContentMessage } from "../../components/ToolAccordion/AccordionContent"; +import { + getAccordionMeta, + getAnimationText, + getRunAgentToolOutput, + isRunAgentAgentDetailsOutput, + isRunAgentErrorOutput, + isRunAgentExecutionStartedOutput, + isRunAgentNeedLoginOutput, + isRunAgentSetupRequirementsOutput, + ToolIcon, +} from "./helpers"; +import { ExecutionStartedCard } from "./components/ExecutionStartedCard/ExecutionStartedCard"; +import { AgentDetailsCard } from "./components/AgentDetailsCard/AgentDetailsCard"; +import { SetupRequirementsCard } from "./components/SetupRequirementsCard/SetupRequirementsCard"; +import { ErrorCard } from "./components/ErrorCard/ErrorCard"; + +export interface RunAgentToolPart { + type: string; + toolCallId: string; + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +} + +interface Props { + part: RunAgentToolPart; +} + +export function RunAgentTool({ part }: Props) { + const text = getAnimationText(part); + const isStreaming = + part.state === "input-streaming" || part.state === "input-available"; + + const output = getRunAgentToolOutput(part); + const isError = + part.state === "output-error" || + (!!output && isRunAgentErrorOutput(output)); + const hasExpandableContent = + part.state === "output-available" && + !!output && + (isRunAgentExecutionStartedOutput(output) || + isRunAgentAgentDetailsOutput(output) || + isRunAgentSetupRequirementsOutput(output) || + isRunAgentNeedLoginOutput(output) || + isRunAgentErrorOutput(output)); + + return ( +
+
+ + +
+ + {hasExpandableContent && output && ( + + {isRunAgentExecutionStartedOutput(output) && ( + + )} + + {isRunAgentAgentDetailsOutput(output) && ( + + )} + + {isRunAgentSetupRequirementsOutput(output) && ( + + )} + + {isRunAgentNeedLoginOutput(output) && ( + {output.message} + )} + + {isRunAgentErrorOutput(output) && } + + )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/AgentDetailsCard/AgentDetailsCard.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/AgentDetailsCard/AgentDetailsCard.tsx new file mode 100644 index 0000000000..f18568faec --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/AgentDetailsCard/AgentDetailsCard.tsx @@ -0,0 +1,116 @@ +"use client"; + +import type { AgentDetailsResponse } from "@/app/api/__generated__/models/agentDetailsResponse"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { FormRenderer } from "@/components/renderers/InputRenderer/FormRenderer"; +import { AnimatePresence, motion } from "framer-motion"; +import { useState } from "react"; +import { useCopilotChatActions } from "../../../../components/CopilotChatActionsProvider/useCopilotChatActions"; +import { ContentMessage } from "../../../../components/ToolAccordion/AccordionContent"; +import { buildInputSchema } from "./helpers"; + +interface Props { + output: AgentDetailsResponse; +} + +export function AgentDetailsCard({ output }: Props) { + const { onSend } = useCopilotChatActions(); + const [showInputForm, setShowInputForm] = useState(false); + const [inputValues, setInputValues] = useState>({}); + + function handleRunWithExamples() { + onSend( + `Run the agent "${output.agent.name}" with placeholder/example values so I can test it.`, + ); + } + + function handleRunWithInputs() { + const nonEmpty = Object.fromEntries( + Object.entries(inputValues).filter( + ([, v]) => v !== undefined && v !== null && v !== "", + ), + ); + onSend( + `Run the agent "${output.agent.name}" with these inputs: ${JSON.stringify(nonEmpty, null, 2)}`, + ); + setShowInputForm(false); + setInputValues({}); + } + + return ( +
+ + Run this agent with example values or your own inputs. + + +
+ + +
+ + + {showInputForm && buildInputSchema(output.agent.inputs) && ( + +
+ Enter your inputs + setInputValues(v.formData ?? {})} + uiSchema={{ + "ui:submitButtonOptions": { norender: true }, + }} + initialValues={inputValues} + formContext={{ + showHandles: false, + size: "small", + }} + /> +
+ + +
+
+
+ )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/AgentDetailsCard/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/AgentDetailsCard/helpers.ts new file mode 100644 index 0000000000..635b8d20d7 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/AgentDetailsCard/helpers.ts @@ -0,0 +1,8 @@ +import type { RJSFSchema } from "@rjsf/utils"; + +export function buildInputSchema(inputs: unknown): RJSFSchema | null { + if (!inputs || typeof inputs !== "object") return null; + const properties = inputs as RJSFSchema["properties"]; + if (!properties || Object.keys(properties).length === 0) return null; + return inputs as RJSFSchema; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/ErrorCard/ErrorCard.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/ErrorCard/ErrorCard.tsx new file mode 100644 index 0000000000..7990428947 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/ErrorCard/ErrorCard.tsx @@ -0,0 +1,27 @@ +"use client"; + +import type { ErrorResponse } from "@/app/api/__generated__/models/errorResponse"; +import { + ContentCodeBlock, + ContentGrid, + ContentMessage, +} from "../../../../components/ToolAccordion/AccordionContent"; +import { formatMaybeJson } from "../../helpers"; + +interface Props { + output: ErrorResponse; +} + +export function ErrorCard({ output }: Props) { + return ( + + {output.message} + {output.error && ( + {formatMaybeJson(output.error)} + )} + {output.details && ( + {formatMaybeJson(output.details)} + )} + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/ExecutionStartedCard/ExecutionStartedCard.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/ExecutionStartedCard/ExecutionStartedCard.tsx new file mode 100644 index 0000000000..f98656e5ff --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/ExecutionStartedCard/ExecutionStartedCard.tsx @@ -0,0 +1,39 @@ +"use client"; + +import type { ExecutionStartedResponse } from "@/app/api/__generated__/models/executionStartedResponse"; +import { Button } from "@/components/atoms/Button/Button"; +import { useRouter } from "next/navigation"; +import { + ContentCard, + ContentCardDescription, + ContentCardSubtitle, + ContentCardTitle, + ContentGrid, +} from "../../../../components/ToolAccordion/AccordionContent"; + +interface Props { + output: ExecutionStartedResponse; +} + +export function ExecutionStartedCard({ output }: Props) { + const router = useRouter(); + + return ( + + + Execution started + {output.execution_id} + {output.message} + {output.library_agent_link && ( + + )} + + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/SetupRequirementsCard/SetupRequirementsCard.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/SetupRequirementsCard/SetupRequirementsCard.tsx new file mode 100644 index 0000000000..c6d116e62a --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/SetupRequirementsCard/SetupRequirementsCard.tsx @@ -0,0 +1,105 @@ +"use client"; + +import { useState } from "react"; +import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView"; +import { Button } from "@/components/atoms/Button/Button"; +import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; +import type { SetupRequirementsResponse } from "@/app/api/__generated__/models/setupRequirementsResponse"; +import { useCopilotChatActions } from "../../../../components/CopilotChatActionsProvider/useCopilotChatActions"; +import { + ContentBadge, + ContentCardDescription, + ContentCardTitle, + ContentMessage, +} from "../../../../components/ToolAccordion/AccordionContent"; +import { coerceCredentialFields, coerceExpectedInputs } from "./helpers"; + +interface Props { + output: SetupRequirementsResponse; +} + +export function SetupRequirementsCard({ output }: Props) { + const { onSend } = useCopilotChatActions(); + + const [inputCredentials, setInputCredentials] = useState< + Record + >({}); + const [hasSent, setHasSent] = useState(false); + + const { credentialFields, requiredCredentials } = coerceCredentialFields( + output.setup_info.user_readiness?.missing_credentials, + ); + + const expectedInputs = coerceExpectedInputs( + (output.setup_info.requirements as Record)?.inputs, + ); + + function handleCredentialChange(key: string, value?: CredentialsMetaInput) { + setInputCredentials((prev) => ({ ...prev, [key]: value })); + } + + const isAllComplete = + credentialFields.length > 0 && + [...requiredCredentials].every((key) => !!inputCredentials[key]); + + function handleProceed() { + setHasSent(true); + onSend( + "I've configured the required credentials. Please check if everything is ready and proceed with running the agent.", + ); + } + + return ( +
+ {output.message} + + {credentialFields.length > 0 && ( +
+ + {isAllComplete && !hasSent && ( + + )} +
+ )} + + {expectedInputs.length > 0 && ( +
+ + Expected inputs + +
+ {expectedInputs.map((input) => ( +
+
+ + {input.title} + + + {input.required ? "Required" : "Optional"} + +
+ + {input.name} • {input.type} + {input.description ? ` \u2022 ${input.description}` : ""} + +
+ ))} +
+
+ )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/SetupRequirementsCard/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/SetupRequirementsCard/helpers.ts new file mode 100644 index 0000000000..6bb10751f0 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/components/SetupRequirementsCard/helpers.ts @@ -0,0 +1,116 @@ +import type { CredentialField } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers"; + +const VALID_CREDENTIAL_TYPES = new Set([ + "api_key", + "oauth2", + "user_password", + "host_scoped", +]); + +/** + * Transforms raw missing_credentials from SetupRequirementsResponse + * into CredentialField[] tuples compatible with CredentialsGroupedView. + * + * Each CredentialField is [key, schema] where schema matches + * BlockIOCredentialsSubSchema shape. + */ +export function coerceCredentialFields(rawMissingCredentials: unknown): { + credentialFields: CredentialField[]; + requiredCredentials: Set; +} { + const missing = + rawMissingCredentials && typeof rawMissingCredentials === "object" + ? (rawMissingCredentials as Record) + : {}; + + const credentialFields: CredentialField[] = []; + const requiredCredentials = new Set(); + + Object.entries(missing).forEach(([key, value]) => { + if (!value || typeof value !== "object") return; + const cred = value as Record; + + const provider = + typeof cred.provider === "string" ? cred.provider.trim() : ""; + if (!provider) return; + + const types = + Array.isArray(cred.types) && cred.types.length > 0 + ? cred.types + : typeof cred.type === "string" + ? [cred.type] + : []; + + const credentialTypes = types + .map((t) => (typeof t === "string" ? t.trim() : "")) + .filter((t) => VALID_CREDENTIAL_TYPES.has(t)); + + if (credentialTypes.length === 0) return; + + const scopes = Array.isArray(cred.scopes) + ? cred.scopes.filter((s): s is string => typeof s === "string") + : undefined; + + const schema = { + type: "object" as const, + properties: {}, + credentials_provider: [provider], + credentials_types: credentialTypes, + credentials_scopes: scopes, + }; + + credentialFields.push([key, schema]); + requiredCredentials.add(key); + }); + + return { credentialFields, requiredCredentials }; +} + +export function coerceExpectedInputs(rawInputs: unknown): Array<{ + name: string; + title: string; + type: string; + description?: string; + required: boolean; +}> { + if (!Array.isArray(rawInputs)) return []; + const results: Array<{ + name: string; + title: string; + type: string; + description?: string; + required: boolean; + }> = []; + + rawInputs.forEach((value, index) => { + if (!value || typeof value !== "object") return; + const input = value as Record; + + const name = + typeof input.name === "string" && input.name.trim() + ? input.name.trim() + : `input-${index}`; + const title = + typeof input.title === "string" && input.title.trim() + ? input.title.trim() + : name; + const type = typeof input.type === "string" ? input.type : "unknown"; + const description = + typeof input.description === "string" && input.description.trim() + ? input.description.trim() + : undefined; + const required = Boolean(input.required); + + const item: { + name: string; + title: string; + type: string; + description?: string; + required: boolean; + } = { name, title, type, required }; + if (description) item.description = description; + results.push(item); + }); + + return results; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx new file mode 100644 index 0000000000..0a117a71f2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx @@ -0,0 +1,248 @@ +import type { AgentDetailsResponse } from "@/app/api/__generated__/models/agentDetailsResponse"; +import type { ErrorResponse } from "@/app/api/__generated__/models/errorResponse"; +import type { ExecutionStartedResponse } from "@/app/api/__generated__/models/executionStartedResponse"; +import type { NeedLoginResponse } from "@/app/api/__generated__/models/needLoginResponse"; +import { ResponseType } from "@/app/api/__generated__/models/responseType"; +import type { SetupRequirementsResponse } from "@/app/api/__generated__/models/setupRequirementsResponse"; +import { + PlayIcon, + RocketLaunchIcon, + WarningDiamondIcon, +} from "@phosphor-icons/react"; +import type { ToolUIPart } from "ai"; +import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader"; + +export interface RunAgentInput { + username_agent_slug?: string; + library_agent_id?: string; + inputs?: Record; + use_defaults?: boolean; + schedule_name?: string; + cron?: string; + timezone?: string; +} + +export type RunAgentToolOutput = + | SetupRequirementsResponse + | ExecutionStartedResponse + | AgentDetailsResponse + | NeedLoginResponse + | ErrorResponse; + +const RUN_AGENT_OUTPUT_TYPES = new Set([ + ResponseType.setup_requirements, + ResponseType.execution_started, + ResponseType.agent_details, + ResponseType.need_login, + ResponseType.error, +]); + +export function isRunAgentSetupRequirementsOutput( + output: RunAgentToolOutput, +): output is SetupRequirementsResponse { + return ( + output.type === ResponseType.setup_requirements || + ("setup_info" in output && typeof output.setup_info === "object") + ); +} + +export function isRunAgentExecutionStartedOutput( + output: RunAgentToolOutput, +): output is ExecutionStartedResponse { + return ( + output.type === ResponseType.execution_started || "execution_id" in output + ); +} + +export function isRunAgentAgentDetailsOutput( + output: RunAgentToolOutput, +): output is AgentDetailsResponse { + return output.type === ResponseType.agent_details || "agent" in output; +} + +export function isRunAgentNeedLoginOutput( + output: RunAgentToolOutput, +): output is NeedLoginResponse { + return output.type === ResponseType.need_login; +} + +export function isRunAgentErrorOutput( + output: RunAgentToolOutput, +): output is ErrorResponse { + return output.type === ResponseType.error || "error" in output; +} + +function parseOutput(output: unknown): RunAgentToolOutput | null { + if (!output) return null; + if (typeof output === "string") { + const trimmed = output.trim(); + if (!trimmed) return null; + try { + return parseOutput(JSON.parse(trimmed) as unknown); + } catch { + return null; + } + } + if (typeof output === "object") { + const type = (output as { type?: unknown }).type; + if (typeof type === "string" && RUN_AGENT_OUTPUT_TYPES.has(type)) { + return output as RunAgentToolOutput; + } + if ("execution_id" in output) return output as ExecutionStartedResponse; + if ("setup_info" in output) return output as SetupRequirementsResponse; + if ("agent" in output) return output as AgentDetailsResponse; + if ("error" in output || "details" in output) + return output as ErrorResponse; + if (type === ResponseType.need_login) return output as NeedLoginResponse; + } + return null; +} + +export function getRunAgentToolOutput( + part: unknown, +): RunAgentToolOutput | null { + if (!part || typeof part !== "object") return null; + return parseOutput((part as { output?: unknown }).output); +} + +function getAgentIdentifierText( + input: RunAgentInput | undefined, +): string | null { + if (!input) return null; + const slug = input.username_agent_slug?.trim(); + if (slug) return slug; + const libraryId = input.library_agent_id?.trim(); + if (libraryId) return `Library agent ${libraryId}`; + return null; +} + +export function getAnimationText(part: { + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +}): string { + const input = part.input as RunAgentInput | undefined; + const agentIdentifier = getAgentIdentifierText(input); + const isSchedule = Boolean( + input?.schedule_name?.trim() || input?.cron?.trim(), + ); + const actionPhrase = isSchedule + ? "Scheduling the agent to run" + : "Running the agent"; + const identifierText = agentIdentifier ? ` "${agentIdentifier}"` : ""; + + switch (part.state) { + case "input-streaming": + case "input-available": + return `${actionPhrase}${identifierText}`; + case "output-available": { + const output = parseOutput(part.output); + if (!output) return `${actionPhrase}${identifierText}`; + if (isRunAgentExecutionStartedOutput(output)) { + return `Started "${output.graph_name}"`; + } + if (isRunAgentAgentDetailsOutput(output)) { + return `Agent inputs needed for "${output.agent.name}"`; + } + if (isRunAgentSetupRequirementsOutput(output)) { + return `Setup needed for "${output.setup_info.agent_name}"`; + } + if (isRunAgentNeedLoginOutput(output)) + return "Sign in required to run agent"; + return "Error running agent"; + } + case "output-error": + return "Error running agent"; + default: + return actionPhrase; + } +} + +export function ToolIcon({ + isStreaming, + isError, +}: { + isStreaming?: boolean; + isError?: boolean; +}) { + if (isError) { + return ( + + ); + } + if (isStreaming) { + return ; + } + return ; +} + +export function AccordionIcon() { + return ; +} + +export function formatMaybeJson(value: unknown): string { + if (typeof value === "string") return value; + try { + return JSON.stringify(value, null, 2); + } catch { + return String(value); + } +} + +export function getAccordionMeta(output: RunAgentToolOutput): { + icon: React.ReactNode; + title: string; + titleClassName?: string; + description?: string; +} { + const icon = ; + + if (isRunAgentExecutionStartedOutput(output)) { + const statusText = + typeof output.status === "string" && output.status.trim() + ? output.status.trim() + : "started"; + return { + icon: , + title: output.graph_name, + description: `Status: ${statusText}`, + }; + } + + if (isRunAgentAgentDetailsOutput(output)) { + return { + icon, + title: output.agent.name, + description: "Inputs required", + }; + } + + if (isRunAgentSetupRequirementsOutput(output)) { + const missingCredsCount = Object.keys( + (output.setup_info.user_readiness?.missing_credentials ?? {}) as Record< + string, + unknown + >, + ).length; + return { + icon, + title: output.setup_info.agent_name, + description: + missingCredsCount > 0 + ? `Missing ${missingCredsCount} credential${missingCredsCount === 1 ? "" : "s"}` + : output.message, + }; + } + + if (isRunAgentNeedLoginOutput(output)) { + return { icon, title: "Sign in required" }; + } + + return { + icon: ( + + ), + title: "Error", + titleClassName: "text-red-500", + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx new file mode 100644 index 0000000000..ded344efa2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx @@ -0,0 +1,76 @@ +"use client"; + +import type { ToolUIPart } from "ai"; +import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; +import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; +import { BlockOutputCard } from "./components/BlockOutputCard/BlockOutputCard"; +import { ErrorCard } from "./components/ErrorCard/ErrorCard"; +import { SetupRequirementsCard } from "./components/SetupRequirementsCard/SetupRequirementsCard"; +import { + getAccordionMeta, + getAnimationText, + getRunBlockToolOutput, + isRunBlockBlockOutput, + isRunBlockErrorOutput, + isRunBlockSetupRequirementsOutput, + ToolIcon, +} from "./helpers"; + +export interface RunBlockToolPart { + type: string; + toolCallId: string; + state: ToolUIPart["state"]; + input?: unknown; + output?: unknown; +} + +interface Props { + part: RunBlockToolPart; +} + +export function RunBlockTool({ part }: Props) { + const text = getAnimationText(part); + const isStreaming = + part.state === "input-streaming" || part.state === "input-available"; + + const output = getRunBlockToolOutput(part); + const isError = + part.state === "output-error" || + (!!output && isRunBlockErrorOutput(output)); + const hasExpandableContent = + part.state === "output-available" && + !!output && + (isRunBlockBlockOutput(output) || + isRunBlockSetupRequirementsOutput(output) || + isRunBlockErrorOutput(output)); + + return ( +
+
+ + +
+ + {hasExpandableContent && output && ( + + {isRunBlockBlockOutput(output) && } + + {isRunBlockSetupRequirementsOutput(output) && ( + + )} + + {isRunBlockErrorOutput(output) && } + + )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/BlockOutputCard/BlockOutputCard.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/BlockOutputCard/BlockOutputCard.tsx new file mode 100644 index 0000000000..4051927653 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/BlockOutputCard/BlockOutputCard.tsx @@ -0,0 +1,133 @@ +"use client"; + +import React, { useState } from "react"; +import { getGetWorkspaceDownloadFileByIdUrl } from "@/app/api/__generated__/endpoints/workspace/workspace"; +import { Button } from "@/components/atoms/Button/Button"; +import type { BlockOutputResponse } from "@/app/api/__generated__/models/blockOutputResponse"; +import { + globalRegistry, + OutputItem, +} from "@/components/contextual/OutputRenderers"; +import type { OutputMetadata } from "@/components/contextual/OutputRenderers"; +import { + ContentBadge, + ContentCard, + ContentCardTitle, + ContentGrid, + ContentMessage, +} from "../../../../components/ToolAccordion/AccordionContent"; + +interface Props { + output: BlockOutputResponse; +} + +const COLLAPSED_LIMIT = 3; + +function isWorkspaceRef(value: unknown): value is string { + return typeof value === "string" && value.startsWith("workspace://"); +} + +function resolveForRenderer(value: unknown): { + value: unknown; + metadata?: OutputMetadata; +} { + if (!isWorkspaceRef(value)) return { value }; + + const withoutPrefix = value.replace("workspace://", ""); + const fileId = withoutPrefix.split("#")[0]; + const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId); + const url = `/api/proxy${apiPath}`; + + const hashIndex = value.indexOf("#"); + const mimeHint = + hashIndex !== -1 ? value.slice(hashIndex + 1) || undefined : undefined; + + const metadata: OutputMetadata = {}; + if (mimeHint) { + metadata.mimeType = mimeHint; + if (mimeHint.startsWith("image/")) metadata.type = "image"; + else if (mimeHint.startsWith("video/")) metadata.type = "video"; + } + + return { value: url, metadata }; +} + +function RenderOutputValue({ value }: { value: unknown }) { + const resolved = resolveForRenderer(value); + const renderer = globalRegistry.getRenderer( + resolved.value, + resolved.metadata, + ); + + if (renderer) { + return ( + + ); + } + + // Fallback for audio workspace refs + if ( + isWorkspaceRef(value) && + resolved.metadata?.mimeType?.startsWith("audio/") + ) { + return ( +

Started:{" "} - {moment(data.started_at).format("YYYY-MM-DD HH:mm:ss")} + {data.started_at + ? format(data.started_at, "yyyy-MM-dd HH:mm:ss") + : "β€”"}

{data.stats && (

diff --git a/autogpt_platform/frontend/src/app/api/chat/sessions/[sessionId]/stream/route.ts b/autogpt_platform/frontend/src/app/api/chat/sessions/[sessionId]/stream/route.ts index d63eed0ca2..6facf80c58 100644 --- a/autogpt_platform/frontend/src/app/api/chat/sessions/[sessionId]/stream/route.ts +++ b/autogpt_platform/frontend/src/app/api/chat/sessions/[sessionId]/stream/route.ts @@ -88,39 +88,27 @@ export async function POST( } /** - * Legacy GET endpoint for backward compatibility + * Resume an active stream for a session. + * + * Called by the AI SDK's `useChat(resume: true)` on page load. + * Proxies to the backend which checks for an active stream and either + * replays it (200 + SSE) or returns 204 No Content. */ export async function GET( - request: NextRequest, + _request: NextRequest, { params }: { params: Promise<{ sessionId: string }> }, ) { const { sessionId } = await params; - const searchParams = request.nextUrl.searchParams; - const message = searchParams.get("message"); - const isUserMessage = searchParams.get("is_user_message"); - - if (!message) { - return new Response("Missing message parameter", { status: 400 }); - } try { - // Get auth token from server-side session const token = await getServerAuthToken(); - // Build backend URL const backendUrl = environment.getAGPTServerBaseUrl(); const streamUrl = new URL( `/api/chat/sessions/${sessionId}/stream`, backendUrl, ); - streamUrl.searchParams.set("message", message); - // Pass is_user_message parameter if provided - if (isUserMessage !== null) { - streamUrl.searchParams.set("is_user_message", isUserMessage); - } - - // Forward request to backend with auth header const headers: Record = { Accept: "text/event-stream", "Cache-Control": "no-cache", @@ -136,6 +124,11 @@ export async function GET( headers, }); + // 204 = no active stream to resume + if (response.status === 204) { + return new Response(null, { status: 204 }); + } + if (!response.ok) { const error = await response.text(); return new Response(error, { @@ -144,17 +137,17 @@ export async function GET( }); } - // Return the SSE stream directly return new Response(response.body, { headers: { "Content-Type": "text/event-stream", "Cache-Control": "no-cache, no-transform", Connection: "keep-alive", "X-Accel-Buffering": "no", + "x-vercel-ai-ui-message-stream": "v1", }, }); } catch (error) { - console.error("SSE proxy error:", error); + console.error("Resume stream proxy error:", error); return new Response( JSON.stringify({ error: "Failed to connect to chat service", diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index ccf5ad3e34..172419d27e 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -1018,6 +1018,58 @@ } } }, + "/api/chat/schema/tool-responses": { + "get": { + "tags": ["v2", "chat", "chat"], + "summary": "[Dummy] Tool response type export for codegen", + "description": "This endpoint is not meant to be called. It exists solely to expose tool response models in the OpenAPI schema for frontend codegen.", + "operationId": "getV2[dummy] tool response type export for codegen", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "anyOf": [ + { "$ref": "#/components/schemas/AgentsFoundResponse" }, + { "$ref": "#/components/schemas/NoResultsResponse" }, + { "$ref": "#/components/schemas/AgentDetailsResponse" }, + { + "$ref": "#/components/schemas/SetupRequirementsResponse" + }, + { "$ref": "#/components/schemas/ExecutionStartedResponse" }, + { "$ref": "#/components/schemas/NeedLoginResponse" }, + { "$ref": "#/components/schemas/ErrorResponse" }, + { + "$ref": "#/components/schemas/InputValidationErrorResponse" + }, + { "$ref": "#/components/schemas/AgentOutputResponse" }, + { + "$ref": "#/components/schemas/UnderstandingUpdatedResponse" + }, + { "$ref": "#/components/schemas/AgentPreviewResponse" }, + { "$ref": "#/components/schemas/AgentSavedResponse" }, + { + "$ref": "#/components/schemas/ClarificationNeededResponse" + }, + { "$ref": "#/components/schemas/BlockListResponse" }, + { "$ref": "#/components/schemas/BlockOutputResponse" }, + { "$ref": "#/components/schemas/DocSearchResultsResponse" }, + { "$ref": "#/components/schemas/DocPageResponse" }, + { "$ref": "#/components/schemas/OperationStartedResponse" }, + { "$ref": "#/components/schemas/OperationPendingResponse" }, + { + "$ref": "#/components/schemas/OperationInProgressResponse" + } + ], + "title": "Response Getv2[Dummy] Tool Response Type Export For Codegen" + } + } + } + } + } + } + }, "/api/chat/sessions": { "get": { "tags": ["v2", "chat", "chat"], @@ -1182,9 +1234,9 @@ "/api/chat/sessions/{session_id}/stream": { "get": { "tags": ["v2", "chat", "chat"], - "summary": "Stream Chat Get", - "description": "Stream chat responses for a session (GET - legacy endpoint).\n\nStreams the AI/completion responses in real time over Server-Sent Events (SSE), including:\n - Text fragments as they are generated\n - Tool call UI elements (if invoked)\n - Tool execution results\n\nArgs:\n session_id: The chat session identifier to associate with the streamed messages.\n message: The user's new message to process.\n user_id: Optional authenticated user ID.\n is_user_message: Whether the message is a user message.\nReturns:\n StreamingResponse: SSE-formatted response chunks.", - "operationId": "getV2StreamChatGet", + "summary": "Resume Session Stream", + "description": "Resume an active stream for a session.\n\nCalled by the AI SDK's ``useChat(resume: true)`` on page load.\nChecks for an active (in-progress) task on the session and either replays\nthe full SSE stream or returns 204 No Content if nothing is running.\n\nArgs:\n session_id: The chat session identifier.\n user_id: Optional authenticated user ID.\n\nReturns:\n StreamingResponse (SSE) when an active stream exists,\n or 204 No Content when there is nothing to resume.", + "operationId": "getV2ResumeSessionStream", "security": [{ "HTTPBearerJWT": [] }], "parameters": [ { @@ -1192,27 +1244,6 @@ "in": "path", "required": true, "schema": { "type": "string", "title": "Session Id" } - }, - { - "name": "message", - "in": "query", - "required": true, - "schema": { - "type": "string", - "minLength": 1, - "maxLength": 10000, - "title": "Message" - } - }, - { - "name": "is_user_message", - "in": "query", - "required": false, - "schema": { - "type": "boolean", - "default": true, - "title": "Is User Message" - } } ], "responses": { @@ -6358,6 +6389,75 @@ "required": ["new_balance", "transaction_key"], "title": "AddUserCreditsResponse" }, + "AgentDetails": { + "properties": { + "id": { "type": "string", "title": "Id" }, + "name": { "type": "string", "title": "Name" }, + "description": { "type": "string", "title": "Description" }, + "in_library": { + "type": "boolean", + "title": "In Library", + "default": false + }, + "inputs": { + "additionalProperties": true, + "type": "object", + "title": "Inputs", + "default": {} + }, + "credentials": { + "items": { "$ref": "#/components/schemas/CredentialsMetaInput" }, + "type": "array", + "title": "Credentials", + "default": [] + }, + "execution_options": { + "$ref": "#/components/schemas/ExecutionOptions" + }, + "trigger_info": { + "anyOf": [ + { "additionalProperties": true, "type": "object" }, + { "type": "null" } + ], + "title": "Trigger Info" + } + }, + "type": "object", + "required": ["id", "name", "description"], + "title": "AgentDetails", + "description": "Detailed agent information." + }, + "AgentDetailsResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "agent_details" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "agent": { "$ref": "#/components/schemas/AgentDetails" }, + "user_authenticated": { + "type": "boolean", + "title": "User Authenticated", + "default": false + }, + "graph_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Graph Id" + }, + "graph_version": { + "anyOf": [{ "type": "integer" }, { "type": "null" }], + "title": "Graph Version" + } + }, + "type": "object", + "required": ["message", "agent"], + "title": "AgentDetailsResponse", + "description": "Response for get_details action." + }, "AgentExecutionStatus": { "type": "string", "enum": [ @@ -6371,6 +6471,224 @@ ], "title": "AgentExecutionStatus" }, + "AgentInfo": { + "properties": { + "id": { "type": "string", "title": "Id" }, + "name": { "type": "string", "title": "Name" }, + "description": { "type": "string", "title": "Description" }, + "source": { + "type": "string", + "title": "Source", + "description": "marketplace or library" + }, + "in_library": { + "type": "boolean", + "title": "In Library", + "default": false + }, + "creator": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Creator" + }, + "category": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Category" + }, + "rating": { + "anyOf": [{ "type": "number" }, { "type": "null" }], + "title": "Rating" + }, + "runs": { + "anyOf": [{ "type": "integer" }, { "type": "null" }], + "title": "Runs" + }, + "is_featured": { + "anyOf": [{ "type": "boolean" }, { "type": "null" }], + "title": "Is Featured" + }, + "status": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Status" + }, + "can_access_graph": { + "anyOf": [{ "type": "boolean" }, { "type": "null" }], + "title": "Can Access Graph" + }, + "has_external_trigger": { + "anyOf": [{ "type": "boolean" }, { "type": "null" }], + "title": "Has External Trigger" + }, + "new_output": { + "anyOf": [{ "type": "boolean" }, { "type": "null" }], + "title": "New Output" + }, + "graph_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Graph Id" + }, + "inputs": { + "anyOf": [ + { "additionalProperties": true, "type": "object" }, + { "type": "null" } + ], + "title": "Inputs", + "description": "Input schema for the agent, including field names, types, and defaults" + } + }, + "type": "object", + "required": ["id", "name", "description", "source"], + "title": "AgentInfo", + "description": "Information about an agent." + }, + "AgentOutputResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "agent_output" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "agent_name": { "type": "string", "title": "Agent Name" }, + "agent_id": { "type": "string", "title": "Agent Id" }, + "library_agent_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Library Agent Id" + }, + "library_agent_link": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Library Agent Link" + }, + "execution": { + "anyOf": [ + { "$ref": "#/components/schemas/ExecutionOutputInfo" }, + { "type": "null" } + ] + }, + "available_executions": { + "anyOf": [ + { + "items": { "additionalProperties": true, "type": "object" }, + "type": "array" + }, + { "type": "null" } + ], + "title": "Available Executions" + }, + "total_executions": { + "type": "integer", + "title": "Total Executions", + "default": 0 + } + }, + "type": "object", + "required": ["message", "agent_name", "agent_id"], + "title": "AgentOutputResponse", + "description": "Response for agent_output tool." + }, + "AgentPreviewResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "agent_preview" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "agent_json": { + "additionalProperties": true, + "type": "object", + "title": "Agent Json" + }, + "agent_name": { "type": "string", "title": "Agent Name" }, + "description": { "type": "string", "title": "Description" }, + "node_count": { "type": "integer", "title": "Node Count" }, + "link_count": { + "type": "integer", + "title": "Link Count", + "default": 0 + } + }, + "type": "object", + "required": [ + "message", + "agent_json", + "agent_name", + "description", + "node_count" + ], + "title": "AgentPreviewResponse", + "description": "Response for previewing a generated agent before saving." + }, + "AgentSavedResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "agent_saved" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "agent_id": { "type": "string", "title": "Agent Id" }, + "agent_name": { "type": "string", "title": "Agent Name" }, + "library_agent_id": { "type": "string", "title": "Library Agent Id" }, + "library_agent_link": { + "type": "string", + "title": "Library Agent Link" + }, + "agent_page_link": { "type": "string", "title": "Agent Page Link" } + }, + "type": "object", + "required": [ + "message", + "agent_id", + "agent_name", + "library_agent_id", + "library_agent_link", + "agent_page_link" + ], + "title": "AgentSavedResponse", + "description": "Response when an agent is saved to the library." + }, + "AgentsFoundResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "agents_found" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "title": { + "type": "string", + "title": "Title", + "default": "Available Agents" + }, + "agents": { + "items": { "$ref": "#/components/schemas/AgentInfo" }, + "type": "array", + "title": "Agents" + }, + "count": { "type": "integer", "title": "Count" }, + "name": { + "type": "string", + "title": "Name", + "default": "agents_found" + } + }, + "type": "object", + "required": ["message", "agents", "count"], + "title": "AgentsFoundResponse", + "description": "Response for find_agent tool." + }, "ApiResponse": { "properties": { "answer": { "type": "string", "title": "Answer" }, @@ -6691,6 +7009,120 @@ ], "title": "BlockInfo" }, + "BlockInfoSummary": { + "properties": { + "id": { "type": "string", "title": "Id" }, + "name": { "type": "string", "title": "Name" }, + "description": { "type": "string", "title": "Description" }, + "categories": { + "items": { "type": "string" }, + "type": "array", + "title": "Categories" + }, + "input_schema": { + "additionalProperties": true, + "type": "object", + "title": "Input Schema" + }, + "output_schema": { + "additionalProperties": true, + "type": "object", + "title": "Output Schema" + }, + "required_inputs": { + "items": { "$ref": "#/components/schemas/BlockInputFieldInfo" }, + "type": "array", + "title": "Required Inputs", + "description": "List of required input fields for this block" + } + }, + "type": "object", + "required": [ + "id", + "name", + "description", + "categories", + "input_schema", + "output_schema" + ], + "title": "BlockInfoSummary", + "description": "Summary of a block for search results." + }, + "BlockInputFieldInfo": { + "properties": { + "name": { "type": "string", "title": "Name" }, + "type": { "type": "string", "title": "Type" }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "required": { + "type": "boolean", + "title": "Required", + "default": false + }, + "default": { "anyOf": [{}, { "type": "null" }], "title": "Default" } + }, + "type": "object", + "required": ["name", "type"], + "title": "BlockInputFieldInfo", + "description": "Information about a block input field." + }, + "BlockListResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "block_list" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "blocks": { + "items": { "$ref": "#/components/schemas/BlockInfoSummary" }, + "type": "array", + "title": "Blocks" + }, + "count": { "type": "integer", "title": "Count" }, + "query": { "type": "string", "title": "Query" }, + "usage_hint": { + "type": "string", + "title": "Usage Hint", + "default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the required fields from input_schema." + } + }, + "type": "object", + "required": ["message", "blocks", "count", "query"], + "title": "BlockListResponse", + "description": "Response for find_block tool." + }, + "BlockOutputResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "block_output" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "block_id": { "type": "string", "title": "Block Id" }, + "block_name": { "type": "string", "title": "Block Name" }, + "outputs": { + "additionalProperties": { "items": {}, "type": "array" }, + "type": "object", + "title": "Outputs" + }, + "success": { "type": "boolean", "title": "Success", "default": true } + }, + "type": "object", + "required": ["message", "block_id", "block_name", "outputs"], + "title": "BlockOutputResponse", + "description": "Response for run_block tool." + }, "BlockResponse": { "properties": { "blocks": { @@ -6937,6 +7369,42 @@ "required": ["query", "conversation_history", "message_id"], "title": "ChatRequest" }, + "ClarificationNeededResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "clarification_needed" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "questions": { + "items": { "$ref": "#/components/schemas/ClarifyingQuestion" }, + "type": "array", + "title": "Questions" + } + }, + "type": "object", + "required": ["message"], + "title": "ClarificationNeededResponse", + "description": "Response when the LLM needs more information from the user." + }, + "ClarifyingQuestion": { + "properties": { + "question": { "type": "string", "title": "Question" }, + "keyword": { "type": "string", "title": "Keyword" }, + "example": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Example" + } + }, + "type": "object", + "required": ["question", "keyword"], + "title": "ClarifyingQuestion", + "description": "A question that needs user clarification." + }, "CountResponse": { "properties": { "all_blocks": { "type": "integer", "title": "All Blocks" }, @@ -7195,6 +7663,71 @@ "required": ["version_counts"], "title": "DeleteGraphResponse" }, + "DocPageResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "doc_page" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "title": { "type": "string", "title": "Title" }, + "path": { "type": "string", "title": "Path" }, + "content": { "type": "string", "title": "Content" }, + "doc_url": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Doc Url" + } + }, + "type": "object", + "required": ["message", "title", "path", "content"], + "title": "DocPageResponse", + "description": "Response for get_doc_page tool." + }, + "DocSearchResult": { + "properties": { + "title": { "type": "string", "title": "Title" }, + "path": { "type": "string", "title": "Path" }, + "section": { "type": "string", "title": "Section" }, + "snippet": { "type": "string", "title": "Snippet" }, + "score": { "type": "number", "title": "Score" }, + "doc_url": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Doc Url" + } + }, + "type": "object", + "required": ["title", "path", "section", "snippet", "score"], + "title": "DocSearchResult", + "description": "A single documentation search result." + }, + "DocSearchResultsResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "doc_search_results" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "results": { + "items": { "$ref": "#/components/schemas/DocSearchResult" }, + "type": "array", + "title": "Results" + }, + "count": { "type": "integer", "title": "Count" }, + "query": { "type": "string", "title": "Query" } + }, + "type": "object", + "required": ["message", "results", "count", "query"], + "title": "DocSearchResultsResponse", + "description": "Response for search_docs tool." + }, "Document": { "properties": { "url": { "type": "string", "title": "Url" }, @@ -7204,6 +7737,34 @@ "required": ["url", "relevance_score"], "title": "Document" }, + "ErrorResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "error" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "error": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Error" + }, + "details": { + "anyOf": [ + { "additionalProperties": true, "type": "object" }, + { "type": "null" } + ], + "title": "Details" + } + }, + "type": "object", + "required": ["message"], + "title": "ErrorResponse", + "description": "Response for errors." + }, "ExecutionAnalyticsConfig": { "properties": { "available_models": { @@ -7380,6 +7941,85 @@ ], "title": "ExecutionAnalyticsResult" }, + "ExecutionOptions": { + "properties": { + "manual": { "type": "boolean", "title": "Manual", "default": true }, + "scheduled": { + "type": "boolean", + "title": "Scheduled", + "default": true + }, + "webhook": { "type": "boolean", "title": "Webhook", "default": false } + }, + "type": "object", + "title": "ExecutionOptions", + "description": "Available execution options for an agent." + }, + "ExecutionOutputInfo": { + "properties": { + "execution_id": { "type": "string", "title": "Execution Id" }, + "status": { "type": "string", "title": "Status" }, + "started_at": { + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Started At" + }, + "ended_at": { + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Ended At" + }, + "outputs": { + "additionalProperties": { "items": {}, "type": "array" }, + "type": "object", + "title": "Outputs" + }, + "inputs_summary": { + "anyOf": [ + { "additionalProperties": true, "type": "object" }, + { "type": "null" } + ], + "title": "Inputs Summary" + } + }, + "type": "object", + "required": ["execution_id", "status", "outputs"], + "title": "ExecutionOutputInfo", + "description": "Summary of a single execution's outputs." + }, + "ExecutionStartedResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "execution_started" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "execution_id": { "type": "string", "title": "Execution Id" }, + "graph_id": { "type": "string", "title": "Graph Id" }, + "graph_name": { "type": "string", "title": "Graph Name" }, + "library_agent_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Library Agent Id" + }, + "library_agent_link": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Library Agent Link" + }, + "status": { "type": "string", "title": "Status", "default": "QUEUED" } + }, + "type": "object", + "required": ["message", "execution_id", "graph_id", "graph_name"], + "title": "ExecutionStartedResponse", + "description": "Response for run/schedule actions." + }, "Graph": { "properties": { "id": { "type": "string", "title": "Id" }, @@ -8131,6 +8771,43 @@ "required": ["provider", "host"], "title": "HostScopedCredentials" }, + "InputValidationErrorResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "input_validation_error" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "unrecognized_fields": { + "items": { "type": "string" }, + "type": "array", + "title": "Unrecognized Fields", + "description": "List of input field names that were not recognized" + }, + "inputs": { + "additionalProperties": true, + "type": "object", + "title": "Inputs", + "description": "The agent's valid input schema for reference" + }, + "graph_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Graph Id" + }, + "graph_version": { + "anyOf": [{ "type": "integer" }, { "type": "null" }], + "title": "Graph Version" + } + }, + "type": "object", + "required": ["message", "unrecognized_fields", "inputs"], + "title": "InputValidationErrorResponse", + "description": "Response when run_agent receives unknown input fields." + }, "LibraryAgent": { "properties": { "id": { "type": "string", "title": "Id" }, @@ -8649,6 +9326,54 @@ "required": ["agents", "pagination"], "title": "MyAgentsResponse" }, + "NeedLoginResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "need_login" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "agent_info": { + "anyOf": [ + { "additionalProperties": true, "type": "object" }, + { "type": "null" } + ], + "title": "Agent Info" + } + }, + "type": "object", + "required": ["message"], + "title": "NeedLoginResponse", + "description": "Response when login is needed." + }, + "NoResultsResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "no_results" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "suggestions": { + "items": { "type": "string" }, + "type": "array", + "title": "Suggestions", + "default": [] + }, + "name": { "type": "string", "title": "Name", "default": "no_results" } + }, + "type": "object", + "required": ["message"], + "title": "NoResultsResponse", + "description": "Response when no agents found." + }, "Node": { "properties": { "id": { "type": "string", "title": "Id" }, @@ -9058,6 +9783,66 @@ "title": "OperationCompleteRequest", "description": "Request model for external completion webhook." }, + "OperationInProgressResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "operation_in_progress" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "tool_call_id": { "type": "string", "title": "Tool Call Id" } + }, + "type": "object", + "required": ["message", "tool_call_id"], + "title": "OperationInProgressResponse", + "description": "Response when an operation is already in progress.\n\nReturned for idempotency when the same tool_call_id is requested again\nwhile the background task is still running." + }, + "OperationPendingResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "operation_pending" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "operation_id": { "type": "string", "title": "Operation Id" }, + "tool_name": { "type": "string", "title": "Tool Name" } + }, + "type": "object", + "required": ["message", "operation_id", "tool_name"], + "title": "OperationPendingResponse", + "description": "Response stored in chat history while a long-running operation is executing.\n\nThis is persisted to the database so users see a pending state when they\nrefresh before the operation completes." + }, + "OperationStartedResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "operation_started" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "operation_id": { "type": "string", "title": "Operation Id" }, + "tool_name": { "type": "string", "title": "Tool Name" }, + "task_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Task Id" + } + }, + "type": "object", + "required": ["message", "operation_id", "tool_name"], + "title": "OperationStartedResponse", + "description": "Response when a long-running operation has been started in the background.\n\nThis is returned immediately to the client while the operation continues\nto execute. The user can close the tab and check back later.\n\nThe task_id can be used to reconnect to the SSE stream via\nGET /chat/tasks/{task_id}/stream?last_idx=0" + }, "Pagination": { "properties": { "total_items": { @@ -9689,6 +10474,38 @@ "required": ["credit_amount"], "title": "RequestTopUp" }, + "ResponseType": { + "type": "string", + "enum": [ + "agents_found", + "agent_details", + "setup_requirements", + "execution_started", + "need_login", + "error", + "no_results", + "agent_output", + "understanding_updated", + "agent_preview", + "agent_saved", + "clarification_needed", + "block_list", + "block_output", + "doc_search_results", + "doc_page", + "workspace_file_list", + "workspace_file_content", + "workspace_file_metadata", + "workspace_file_written", + "workspace_file_deleted", + "operation_started", + "operation_pending", + "operation_in_progress", + "input_validation_error" + ], + "title": "ResponseType", + "description": "Types of tool responses." + }, "ReviewItem": { "properties": { "node_exec_id": { @@ -9952,6 +10769,48 @@ "required": ["active_graph_version"], "title": "SetGraphActiveVersion" }, + "SetupInfo": { + "properties": { + "agent_id": { "type": "string", "title": "Agent Id" }, + "agent_name": { "type": "string", "title": "Agent Name" }, + "requirements": { + "additionalProperties": { "items": {}, "type": "array" }, + "type": "object", + "title": "Requirements" + }, + "user_readiness": { "$ref": "#/components/schemas/UserReadiness" } + }, + "type": "object", + "required": ["agent_id", "agent_name"], + "title": "SetupInfo", + "description": "Complete setup information." + }, + "SetupRequirementsResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "setup_requirements" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "setup_info": { "$ref": "#/components/schemas/SetupInfo" }, + "graph_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Graph Id" + }, + "graph_version": { + "anyOf": [{ "type": "integer" }, { "type": "null" }], + "title": "Graph Version" + } + }, + "type": "object", + "required": ["message", "setup_info"], + "title": "SetupRequirementsResponse", + "description": "Response for validate action." + }, "ShareRequest": { "properties": {}, "type": "object", @@ -11348,6 +12207,33 @@ "required": ["name", "graph_id", "graph_version", "trigger_config"], "title": "TriggeredPresetSetupRequest" }, + "UnderstandingUpdatedResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "understanding_updated" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "updated_fields": { + "items": { "type": "string" }, + "type": "array", + "title": "Updated Fields" + }, + "current_understanding": { + "additionalProperties": true, + "type": "object", + "title": "Current Understanding" + } + }, + "type": "object", + "required": ["message"], + "title": "UnderstandingUpdatedResponse", + "description": "Response for add_understanding tool." + }, "UnifiedSearchResponse": { "properties": { "results": { @@ -12226,6 +13112,29 @@ "required": ["provider", "username", "password"], "title": "UserPasswordCredentials" }, + "UserReadiness": { + "properties": { + "has_all_credentials": { + "type": "boolean", + "title": "Has All Credentials", + "default": false + }, + "missing_credentials": { + "additionalProperties": true, + "type": "object", + "title": "Missing Credentials", + "default": {} + }, + "ready_to_run": { + "type": "boolean", + "title": "Ready To Run", + "default": false + } + }, + "type": "object", + "title": "UserReadiness", + "description": "User readiness status." + }, "UserTransaction": { "properties": { "transaction_key": { diff --git a/autogpt_platform/frontend/src/app/globals.css b/autogpt_platform/frontend/src/app/globals.css index 1f782f753b..dd1d17cde7 100644 --- a/autogpt_platform/frontend/src/app/globals.css +++ b/autogpt_platform/frontend/src/app/globals.css @@ -1,6 +1,7 @@ @tailwind base; @tailwind components; @tailwind utilities; +@source "../node_modules/streamdown/dist/*.js"; @layer base { :root { @@ -29,6 +30,14 @@ --chart-3: 197 37% 24%; --chart-4: 43 74% 66%; --chart-5: 27 87% 67%; + --sidebar-background: 0 0% 98%; + --sidebar-foreground: 240 5.3% 26.1%; + --sidebar-primary: 240 5.9% 10%; + --sidebar-primary-foreground: 0 0% 98%; + --sidebar-accent: 240 4.8% 95.9%; + --sidebar-accent-foreground: 240 5.9% 10%; + --sidebar-border: 220 13% 91%; + --sidebar-ring: 217.2 91.2% 59.8%; } .dark { @@ -56,6 +65,14 @@ --chart-3: 30 80% 55%; --chart-4: 280 65% 60%; --chart-5: 340 75% 55%; + --sidebar-background: 240 5.9% 10%; + --sidebar-foreground: 240 4.8% 95.9%; + --sidebar-primary: 224.3 76.3% 48%; + --sidebar-primary-foreground: 0 0% 100%; + --sidebar-accent: 240 3.7% 15.9%; + --sidebar-accent-foreground: 240 4.8% 95.9%; + --sidebar-border: 240 3.7% 15.9%; + --sidebar-ring: 217.2 91.2% 59.8%; } * { diff --git a/autogpt_platform/frontend/src/components/ai-elements/conversation.tsx b/autogpt_platform/frontend/src/components/ai-elements/conversation.tsx new file mode 100644 index 0000000000..92e940c715 --- /dev/null +++ b/autogpt_platform/frontend/src/components/ai-elements/conversation.tsx @@ -0,0 +1,109 @@ +"use client"; + +import { Button } from "@/components/ui/button"; +import { scrollbarStyles } from "@/components/styles/scrollbars"; +import { cn } from "@/lib/utils"; +import { ArrowDownIcon } from "lucide-react"; +import type { ComponentProps } from "react"; +import { useCallback } from "react"; +import { StickToBottom, useStickToBottomContext } from "use-stick-to-bottom"; + +export type ConversationProps = ComponentProps; + +export const Conversation = ({ className, ...props }: ConversationProps) => ( + +); + +export type ConversationContentProps = ComponentProps< + typeof StickToBottom.Content +>; + +export const ConversationContent = ({ + className, + ...props +}: ConversationContentProps) => ( + +); + +export type ConversationEmptyStateProps = ComponentProps<"div"> & { + title?: string; + description?: string; + icon?: React.ReactNode; +}; + +export const ConversationEmptyState = ({ + className, + title = "No messages yet", + description = "Start a conversation to see messages here", + icon, + children, + ...props +}: ConversationEmptyStateProps) => ( +

+ {children ?? ( + <> + {icon && ( +
{icon}
+ )} +
+

{title}

+ {description && ( +

+ {description} +

+ )} +
+ + )} +
+); + +export type ConversationScrollButtonProps = ComponentProps; + +export const ConversationScrollButton = ({ + className, + ...props +}: ConversationScrollButtonProps) => { + const { isAtBottom, scrollToBottom } = useStickToBottomContext(); + + const handleScrollToBottom = useCallback(() => { + scrollToBottom(); + }, [scrollToBottom]); + + return ( + !isAtBottom && ( + + ) + ); +}; diff --git a/autogpt_platform/frontend/src/components/ai-elements/message.tsx b/autogpt_platform/frontend/src/components/ai-elements/message.tsx new file mode 100644 index 0000000000..5cc330e57c --- /dev/null +++ b/autogpt_platform/frontend/src/components/ai-elements/message.tsx @@ -0,0 +1,338 @@ +"use client"; + +import { Button } from "@/components/ui/button"; +import { ButtonGroup, ButtonGroupText } from "@/components/ui/button-group"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { cn } from "@/lib/utils"; +import { cjk } from "@streamdown/cjk"; +import { code } from "@streamdown/code"; +import { math } from "@streamdown/math"; +import { mermaid } from "@streamdown/mermaid"; +import type { UIMessage } from "ai"; +import { ChevronLeftIcon, ChevronRightIcon } from "lucide-react"; +import type { ComponentProps, HTMLAttributes, ReactElement } from "react"; +import { createContext, memo, useContext, useEffect, useState } from "react"; +import { Streamdown } from "streamdown"; + +export type MessageProps = HTMLAttributes & { + from: UIMessage["role"]; +}; + +export const Message = ({ className, from, ...props }: MessageProps) => ( +
+); + +export type MessageContentProps = HTMLAttributes; + +export const MessageContent = ({ + children, + className, + ...props +}: MessageContentProps) => ( +
+ {children} +
+); + +export type MessageActionsProps = ComponentProps<"div">; + +export const MessageActions = ({ + className, + children, + ...props +}: MessageActionsProps) => ( +
+ {children} +
+); + +export type MessageActionProps = ComponentProps & { + tooltip?: string; + label?: string; +}; + +export const MessageAction = ({ + tooltip, + children, + label, + variant = "ghost", + size = "icon-sm", + ...props +}: MessageActionProps) => { + const button = ( + + ); + + if (tooltip) { + return ( + + + {button} + +

{tooltip}

+
+
+
+ ); + } + + return button; +}; + +interface MessageBranchContextType { + currentBranch: number; + totalBranches: number; + goToPrevious: () => void; + goToNext: () => void; + branches: ReactElement[]; + setBranches: (branches: ReactElement[]) => void; +} + +const MessageBranchContext = createContext( + null, +); + +const useMessageBranch = () => { + const context = useContext(MessageBranchContext); + + if (!context) { + throw new Error("MessageBranch components must be used within"); + } + + return context; +}; + +export type MessageBranchProps = HTMLAttributes & { + defaultBranch?: number; + onBranchChange?: (branchIndex: number) => void; +}; + +export const MessageBranch = ({ + defaultBranch = 0, + onBranchChange, + className, + ...props +}: MessageBranchProps) => { + const [currentBranch, setCurrentBranch] = useState(defaultBranch); + const [branches, setBranches] = useState([]); + + const handleBranchChange = (newBranch: number) => { + setCurrentBranch(newBranch); + onBranchChange?.(newBranch); + }; + + const goToPrevious = () => { + const newBranch = + currentBranch > 0 ? currentBranch - 1 : branches.length - 1; + handleBranchChange(newBranch); + }; + + const goToNext = () => { + const newBranch = + currentBranch < branches.length - 1 ? currentBranch + 1 : 0; + handleBranchChange(newBranch); + }; + + const contextValue: MessageBranchContextType = { + currentBranch, + totalBranches: branches.length, + goToPrevious, + goToNext, + branches, + setBranches, + }; + + return ( + +
div]:pb-0", className)} + {...props} + /> + + ); +}; + +export type MessageBranchContentProps = HTMLAttributes; + +export const MessageBranchContent = ({ + children, + ...props +}: MessageBranchContentProps) => { + const { currentBranch, setBranches, branches } = useMessageBranch(); + const childrenArray = Array.isArray(children) ? children : [children]; + + // Use useEffect to update branches when they change + useEffect(() => { + if (branches.length !== childrenArray.length) { + setBranches(childrenArray); + } + }, [childrenArray, branches, setBranches]); + + return childrenArray.map((branch, index) => ( +
div]:pb-0", + index === currentBranch ? "block" : "hidden", + )} + key={branch.key} + {...props} + > + {branch} +
+ )); +}; + +export type MessageBranchSelectorProps = HTMLAttributes & { + from: UIMessage["role"]; +}; + +export const MessageBranchSelector = ({ + className, + from: _from, + ...props +}: MessageBranchSelectorProps) => { + const { totalBranches } = useMessageBranch(); + + // Don't render if there's only one branch + if (totalBranches <= 1) { + return null; + } + + return ( + *:not(:first-child)]:rounded-l-md [&>*:not(:last-child)]:rounded-r-md", + className, + )} + orientation="horizontal" + {...props} + /> + ); +}; + +export type MessageBranchPreviousProps = ComponentProps; + +export const MessageBranchPrevious = ({ + children, + ...props +}: MessageBranchPreviousProps) => { + const { goToPrevious, totalBranches } = useMessageBranch(); + + return ( + + ); +}; + +export type MessageBranchNextProps = ComponentProps; + +export const MessageBranchNext = ({ + children, + ...props +}: MessageBranchNextProps) => { + const { goToNext, totalBranches } = useMessageBranch(); + + return ( + + ); +}; + +export type MessageBranchPageProps = HTMLAttributes; + +export const MessageBranchPage = ({ + className, + ...props +}: MessageBranchPageProps) => { + const { currentBranch, totalBranches } = useMessageBranch(); + + return ( + + {currentBranch + 1} of {totalBranches} + + ); +}; + +export type MessageResponseProps = ComponentProps; + +export const MessageResponse = memo( + ({ className, ...props }: MessageResponseProps) => ( + *:first-child]:mt-0 [&>*:last-child]:mb-0 [&_pre]:!bg-white", + className, + )} + plugins={{ code, mermaid, math, cjk }} + {...props} + /> + ), + (prevProps, nextProps) => prevProps.children === nextProps.children, +); + +MessageResponse.displayName = "MessageResponse"; + +export type MessageToolbarProps = ComponentProps<"div">; + +export const MessageToolbar = ({ + className, + children, + ...props +}: MessageToolbarProps) => ( +
+ {children} +
+); diff --git a/autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.tsx b/autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.tsx index efc345f79c..b118cc5aa0 100644 --- a/autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.tsx +++ b/autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.tsx @@ -77,7 +77,7 @@ export function OverflowText(props: Props) { "block min-w-0 overflow-hidden text-ellipsis whitespace-nowrap", )} > - + {value} diff --git a/autogpt_platform/frontend/src/components/atoms/Text/Text.tsx b/autogpt_platform/frontend/src/components/atoms/Text/Text.tsx index 8bae184e5b..86c39b6436 100644 --- a/autogpt_platform/frontend/src/components/atoms/Text/Text.tsx +++ b/autogpt_platform/frontend/src/components/atoms/Text/Text.tsx @@ -1,4 +1,5 @@ import React from "react"; +import { cn } from "@/lib/utils"; import { As, Variant, variantElementMap, variants } from "./helpers"; type CustomProps = { @@ -22,7 +23,7 @@ export function Text({ }: TextProps) { const variantClasses = variants[size || variant] || variants.body; const Element = outerAs || variantElementMap[variant]; - const combinedClassName = `${variantClasses} ${className}`.trim(); + const combinedClassName = cn(variantClasses, className); return React.createElement( Element, diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx deleted file mode 100644 index da454150bf..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx +++ /dev/null @@ -1,114 +0,0 @@ -"use client"; - -import { useCopilotSessionId } from "@/app/(platform)/copilot/useCopilotSessionId"; -import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; -import { Text } from "@/components/atoms/Text/Text"; -import { cn } from "@/lib/utils"; -import { useEffect, useRef } from "react"; -import { ChatContainer } from "./components/ChatContainer/ChatContainer"; -import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState"; -import { useChat } from "./useChat"; - -export interface ChatProps { - className?: string; - initialPrompt?: string; - onSessionNotFound?: () => void; - onStreamingChange?: (isStreaming: boolean) => void; -} - -export function Chat({ - className, - initialPrompt, - onSessionNotFound, - onStreamingChange, -}: ChatProps) { - const { urlSessionId } = useCopilotSessionId(); - const hasHandledNotFoundRef = useRef(false); - const { - session, - messages, - isLoading, - isCreating, - error, - isSessionNotFound, - sessionId, - createSession, - showLoader, - startPollingForOperation, - } = useChat({ urlSessionId }); - - // Extract active stream info for reconnection - const activeStream = ( - session as { - active_stream?: { - task_id: string; - last_message_id: string; - operation_id: string; - tool_name: string; - }; - } - )?.active_stream; - - useEffect(() => { - if (!onSessionNotFound) return; - if (!urlSessionId) return; - if (!isSessionNotFound || isLoading || isCreating) return; - if (hasHandledNotFoundRef.current) return; - hasHandledNotFoundRef.current = true; - onSessionNotFound(); - }, [ - onSessionNotFound, - urlSessionId, - isSessionNotFound, - isLoading, - isCreating, - ]); - - const shouldShowLoader = showLoader && (isLoading || isCreating); - - return ( -
- {/* Main Content */} -
- {/* Loading State */} - {shouldShowLoader && ( -
-
- - - Loading your chat... - -
-
- )} - - {/* Error State */} - {error && !isLoading && ( - - )} - - {/* Session Content */} - {sessionId && !isLoading && !error && ( - - )} -
-
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/SSE_RECONNECTION.md b/autogpt_platform/frontend/src/components/contextual/Chat/SSE_RECONNECTION.md deleted file mode 100644 index 9e78679f4e..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/SSE_RECONNECTION.md +++ /dev/null @@ -1,159 +0,0 @@ -# SSE Reconnection Contract for Long-Running Operations - -This document describes the client-side contract for handling SSE (Server-Sent Events) disconnections and reconnecting to long-running background tasks. - -## Overview - -When a user triggers a long-running operation (like agent generation), the backend: - -1. Spawns a background task that survives SSE disconnections -2. Returns an `operation_started` response with a `task_id` -3. Stores stream messages in Redis Streams for replay - -Clients can reconnect to the task stream at any time to receive missed messages. - -## Client-Side Flow - -### 1. Receiving Operation Started - -When you receive an `operation_started` tool response: - -```typescript -// The response includes a task_id for reconnection -{ - type: "operation_started", - tool_name: "generate_agent", - operation_id: "uuid-...", - task_id: "task-uuid-...", // <-- Store this for reconnection - message: "Operation started. You can close this tab." -} -``` - -### 2. Storing Task Info - -Use the chat store to track the active task: - -```typescript -import { useChatStore } from "./chat-store"; - -// When operation_started is received: -useChatStore.getState().setActiveTask(sessionId, { - taskId: response.task_id, - operationId: response.operation_id, - toolName: response.tool_name, - lastMessageId: "0", -}); -``` - -### 3. Reconnecting to a Task - -To reconnect (e.g., after page refresh or tab reopen): - -```typescript -const { reconnectToTask, getActiveTask } = useChatStore.getState(); - -// Check if there's an active task for this session -const activeTask = getActiveTask(sessionId); - -if (activeTask) { - // Reconnect to the task stream - await reconnectToTask( - sessionId, - activeTask.taskId, - activeTask.lastMessageId, // Resume from last position - (chunk) => { - // Handle incoming chunks - console.log("Received chunk:", chunk); - }, - ); -} -``` - -### 4. Tracking Message Position - -To enable precise replay, update the last message ID as chunks arrive: - -```typescript -const { updateTaskLastMessageId } = useChatStore.getState(); - -function handleChunk(chunk: StreamChunk) { - // If chunk has an index/id, track it - if (chunk.idx !== undefined) { - updateTaskLastMessageId(sessionId, String(chunk.idx)); - } -} -``` - -## API Endpoints - -### Task Stream Reconnection - -``` -GET /api/chat/tasks/{taskId}/stream?last_message_id={idx} -``` - -- `taskId`: The task ID from `operation_started` -- `last_message_id`: Last received message index (default: "0" for full replay) - -Returns: SSE stream of missed messages + live updates - -## Chunk Types - -The reconnected stream follows the same Vercel AI SDK protocol: - -| Type | Description | -| ----------------------- | ----------------------- | -| `start` | Message lifecycle start | -| `text-delta` | Streaming text content | -| `text-end` | Text block completed | -| `tool-output-available` | Tool result available | -| `finish` | Stream completed | -| `error` | Error occurred | - -## Error Handling - -If reconnection fails: - -1. Check if task still exists (may have expired - default TTL: 1 hour) -2. Fall back to polling the session for final state -3. Show appropriate UI message to user - -## Persistence Considerations - -For robust reconnection across browser restarts: - -```typescript -// Store in localStorage/sessionStorage -const ACTIVE_TASKS_KEY = "chat_active_tasks"; - -function persistActiveTask(sessionId: string, task: ActiveTaskInfo) { - const tasks = JSON.parse(localStorage.getItem(ACTIVE_TASKS_KEY) || "{}"); - tasks[sessionId] = task; - localStorage.setItem(ACTIVE_TASKS_KEY, JSON.stringify(tasks)); -} - -function loadPersistedTasks(): Record { - return JSON.parse(localStorage.getItem(ACTIVE_TASKS_KEY) || "{}"); -} -``` - -## Backend Configuration - -The following backend settings affect reconnection behavior: - -| Setting | Default | Description | -| ------------------- | ------- | ---------------------------------- | -| `stream_ttl` | 3600s | How long streams are kept in Redis | -| `stream_max_length` | 1000 | Max messages per stream | - -## Testing - -To test reconnection locally: - -1. Start a long-running operation (e.g., agent generation) -2. Note the `task_id` from the `operation_started` response -3. Close the browser tab -4. Reopen and call `reconnectToTask` with the saved `task_id` -5. Verify that missed messages are replayed - -See the main README for full local development setup. diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/chat-constants.ts b/autogpt_platform/frontend/src/components/contextual/Chat/chat-constants.ts deleted file mode 100644 index 8802de2155..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/chat-constants.ts +++ /dev/null @@ -1,16 +0,0 @@ -/** - * Constants for the chat system. - * - * Centralizes magic strings and values used across chat components. - */ - -// LocalStorage keys -export const STORAGE_KEY_ACTIVE_TASKS = "chat_active_tasks"; - -// Redis Stream IDs -export const INITIAL_MESSAGE_ID = "0"; -export const INITIAL_STREAM_ID = "0-0"; - -// TTL values (in milliseconds) -export const COMPLETED_STREAM_TTL_MS = 5 * 60 * 1000; // 5 minutes -export const ACTIVE_TASK_TTL_MS = 60 * 60 * 1000; // 1 hour diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts b/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts deleted file mode 100644 index 3083f65d2c..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts +++ /dev/null @@ -1,501 +0,0 @@ -"use client"; - -import { create } from "zustand"; -import { - ACTIVE_TASK_TTL_MS, - COMPLETED_STREAM_TTL_MS, - INITIAL_STREAM_ID, - STORAGE_KEY_ACTIVE_TASKS, -} from "./chat-constants"; -import type { - ActiveStream, - StreamChunk, - StreamCompleteCallback, - StreamResult, - StreamStatus, -} from "./chat-types"; -import { executeStream, executeTaskReconnect } from "./stream-executor"; - -export interface ActiveTaskInfo { - taskId: string; - sessionId: string; - operationId: string; - toolName: string; - lastMessageId: string; - startedAt: number; -} - -/** Load active tasks from localStorage */ -function loadPersistedTasks(): Map { - if (typeof window === "undefined") return new Map(); - try { - const stored = localStorage.getItem(STORAGE_KEY_ACTIVE_TASKS); - if (!stored) return new Map(); - const parsed = JSON.parse(stored) as Record; - const now = Date.now(); - const tasks = new Map(); - // Filter out expired tasks - for (const [sessionId, task] of Object.entries(parsed)) { - if (now - task.startedAt < ACTIVE_TASK_TTL_MS) { - tasks.set(sessionId, task); - } - } - return tasks; - } catch { - return new Map(); - } -} - -/** Save active tasks to localStorage */ -function persistTasks(tasks: Map): void { - if (typeof window === "undefined") return; - try { - const obj: Record = {}; - for (const [sessionId, task] of tasks) { - obj[sessionId] = task; - } - localStorage.setItem(STORAGE_KEY_ACTIVE_TASKS, JSON.stringify(obj)); - } catch { - // Ignore storage errors - } -} - -interface ChatStoreState { - activeStreams: Map; - completedStreams: Map; - activeSessions: Set; - streamCompleteCallbacks: Set; - /** Active tasks for SSE reconnection - keyed by sessionId */ - activeTasks: Map; -} - -interface ChatStoreActions { - startStream: ( - sessionId: string, - message: string, - isUserMessage: boolean, - context?: { url: string; content: string }, - onChunk?: (chunk: StreamChunk) => void, - ) => Promise; - stopStream: (sessionId: string) => void; - subscribeToStream: ( - sessionId: string, - onChunk: (chunk: StreamChunk) => void, - skipReplay?: boolean, - ) => () => void; - getStreamStatus: (sessionId: string) => StreamStatus; - getCompletedStream: (sessionId: string) => StreamResult | undefined; - clearCompletedStream: (sessionId: string) => void; - isStreaming: (sessionId: string) => boolean; - registerActiveSession: (sessionId: string) => void; - unregisterActiveSession: (sessionId: string) => void; - isSessionActive: (sessionId: string) => boolean; - onStreamComplete: (callback: StreamCompleteCallback) => () => void; - /** Track active task for SSE reconnection */ - setActiveTask: ( - sessionId: string, - taskInfo: Omit, - ) => void; - /** Get active task for a session */ - getActiveTask: (sessionId: string) => ActiveTaskInfo | undefined; - /** Clear active task when operation completes */ - clearActiveTask: (sessionId: string) => void; - /** Reconnect to an existing task stream */ - reconnectToTask: ( - sessionId: string, - taskId: string, - lastMessageId?: string, - onChunk?: (chunk: StreamChunk) => void, - ) => Promise; - /** Update last message ID for a task (for tracking replay position) */ - updateTaskLastMessageId: (sessionId: string, lastMessageId: string) => void; -} - -type ChatStore = ChatStoreState & ChatStoreActions; - -function notifyStreamComplete( - callbacks: Set, - sessionId: string, -) { - for (const callback of callbacks) { - try { - callback(sessionId); - } catch (err) { - console.warn("[ChatStore] Stream complete callback error:", err); - } - } -} - -function cleanupExpiredStreams( - completedStreams: Map, -): Map { - const now = Date.now(); - const cleaned = new Map(completedStreams); - for (const [sessionId, result] of cleaned) { - if (now - result.completedAt > COMPLETED_STREAM_TTL_MS) { - cleaned.delete(sessionId); - } - } - return cleaned; -} - -/** - * Finalize a stream by moving it from activeStreams to completedStreams. - * Also handles cleanup and notifications. - */ -function finalizeStream( - sessionId: string, - stream: ActiveStream, - onChunk: ((chunk: StreamChunk) => void) | undefined, - get: () => ChatStoreState & ChatStoreActions, - set: (state: Partial) => void, -): void { - if (onChunk) stream.onChunkCallbacks.delete(onChunk); - - if (stream.status !== "streaming") { - const currentState = get(); - const finalActiveStreams = new Map(currentState.activeStreams); - let finalCompletedStreams = new Map(currentState.completedStreams); - - const storedStream = finalActiveStreams.get(sessionId); - if (storedStream === stream) { - const result: StreamResult = { - sessionId, - status: stream.status, - chunks: stream.chunks, - completedAt: Date.now(), - error: stream.error, - }; - finalCompletedStreams.set(sessionId, result); - finalActiveStreams.delete(sessionId); - finalCompletedStreams = cleanupExpiredStreams(finalCompletedStreams); - set({ - activeStreams: finalActiveStreams, - completedStreams: finalCompletedStreams, - }); - - if (stream.status === "completed" || stream.status === "error") { - notifyStreamComplete(currentState.streamCompleteCallbacks, sessionId); - } - } - } -} - -/** - * Clean up an existing stream for a session and move it to completed streams. - * Returns updated maps for both active and completed streams. - */ -function cleanupExistingStream( - sessionId: string, - activeStreams: Map, - completedStreams: Map, - callbacks: Set, -): { - activeStreams: Map; - completedStreams: Map; -} { - const newActiveStreams = new Map(activeStreams); - let newCompletedStreams = new Map(completedStreams); - - const existingStream = newActiveStreams.get(sessionId); - if (existingStream) { - existingStream.abortController.abort(); - const normalizedStatus = - existingStream.status === "streaming" - ? "completed" - : existingStream.status; - const result: StreamResult = { - sessionId, - status: normalizedStatus, - chunks: existingStream.chunks, - completedAt: Date.now(), - error: existingStream.error, - }; - newCompletedStreams.set(sessionId, result); - newActiveStreams.delete(sessionId); - newCompletedStreams = cleanupExpiredStreams(newCompletedStreams); - if (normalizedStatus === "completed" || normalizedStatus === "error") { - notifyStreamComplete(callbacks, sessionId); - } - } - - return { - activeStreams: newActiveStreams, - completedStreams: newCompletedStreams, - }; -} - -/** - * Create a new active stream with initial state. - */ -function createActiveStream( - sessionId: string, - onChunk?: (chunk: StreamChunk) => void, -): ActiveStream { - const abortController = new AbortController(); - const initialCallbacks = new Set<(chunk: StreamChunk) => void>(); - if (onChunk) initialCallbacks.add(onChunk); - - return { - sessionId, - abortController, - status: "streaming", - startedAt: Date.now(), - chunks: [], - onChunkCallbacks: initialCallbacks, - }; -} - -export const useChatStore = create((set, get) => ({ - activeStreams: new Map(), - completedStreams: new Map(), - activeSessions: new Set(), - streamCompleteCallbacks: new Set(), - activeTasks: loadPersistedTasks(), - - startStream: async function startStream( - sessionId, - message, - isUserMessage, - context, - onChunk, - ) { - const state = get(); - const callbacks = state.streamCompleteCallbacks; - - // Clean up any existing stream for this session - const { - activeStreams: newActiveStreams, - completedStreams: newCompletedStreams, - } = cleanupExistingStream( - sessionId, - state.activeStreams, - state.completedStreams, - callbacks, - ); - - // Create new stream - const stream = createActiveStream(sessionId, onChunk); - newActiveStreams.set(sessionId, stream); - set({ - activeStreams: newActiveStreams, - completedStreams: newCompletedStreams, - }); - - try { - await executeStream(stream, message, isUserMessage, context); - } finally { - finalizeStream(sessionId, stream, onChunk, get, set); - } - }, - - stopStream: function stopStream(sessionId) { - const state = get(); - const stream = state.activeStreams.get(sessionId); - if (!stream) return; - - stream.abortController.abort(); - stream.status = "completed"; - - const newActiveStreams = new Map(state.activeStreams); - let newCompletedStreams = new Map(state.completedStreams); - - const result: StreamResult = { - sessionId, - status: stream.status, - chunks: stream.chunks, - completedAt: Date.now(), - error: stream.error, - }; - newCompletedStreams.set(sessionId, result); - newActiveStreams.delete(sessionId); - newCompletedStreams = cleanupExpiredStreams(newCompletedStreams); - - set({ - activeStreams: newActiveStreams, - completedStreams: newCompletedStreams, - }); - - notifyStreamComplete(state.streamCompleteCallbacks, sessionId); - }, - - subscribeToStream: function subscribeToStream( - sessionId, - onChunk, - skipReplay = false, - ) { - const state = get(); - const stream = state.activeStreams.get(sessionId); - - if (stream) { - if (!skipReplay) { - for (const chunk of stream.chunks) { - onChunk(chunk); - } - } - - stream.onChunkCallbacks.add(onChunk); - - return function unsubscribe() { - stream.onChunkCallbacks.delete(onChunk); - }; - } - - return function noop() {}; - }, - - getStreamStatus: function getStreamStatus(sessionId) { - const { activeStreams, completedStreams } = get(); - - const active = activeStreams.get(sessionId); - if (active) return active.status; - - const completed = completedStreams.get(sessionId); - if (completed) return completed.status; - - return "idle"; - }, - - getCompletedStream: function getCompletedStream(sessionId) { - return get().completedStreams.get(sessionId); - }, - - clearCompletedStream: function clearCompletedStream(sessionId) { - const state = get(); - if (!state.completedStreams.has(sessionId)) return; - - const newCompletedStreams = new Map(state.completedStreams); - newCompletedStreams.delete(sessionId); - set({ completedStreams: newCompletedStreams }); - }, - - isStreaming: function isStreaming(sessionId) { - const stream = get().activeStreams.get(sessionId); - return stream?.status === "streaming"; - }, - - registerActiveSession: function registerActiveSession(sessionId) { - const state = get(); - if (state.activeSessions.has(sessionId)) return; - - const newActiveSessions = new Set(state.activeSessions); - newActiveSessions.add(sessionId); - set({ activeSessions: newActiveSessions }); - }, - - unregisterActiveSession: function unregisterActiveSession(sessionId) { - const state = get(); - if (!state.activeSessions.has(sessionId)) return; - - const newActiveSessions = new Set(state.activeSessions); - newActiveSessions.delete(sessionId); - set({ activeSessions: newActiveSessions }); - }, - - isSessionActive: function isSessionActive(sessionId) { - return get().activeSessions.has(sessionId); - }, - - onStreamComplete: function onStreamComplete(callback) { - const state = get(); - const newCallbacks = new Set(state.streamCompleteCallbacks); - newCallbacks.add(callback); - set({ streamCompleteCallbacks: newCallbacks }); - - return function unsubscribe() { - const currentState = get(); - const cleanedCallbacks = new Set(currentState.streamCompleteCallbacks); - cleanedCallbacks.delete(callback); - set({ streamCompleteCallbacks: cleanedCallbacks }); - }; - }, - - setActiveTask: function setActiveTask(sessionId, taskInfo) { - const state = get(); - const newActiveTasks = new Map(state.activeTasks); - newActiveTasks.set(sessionId, { - ...taskInfo, - sessionId, - startedAt: Date.now(), - }); - set({ activeTasks: newActiveTasks }); - persistTasks(newActiveTasks); - }, - - getActiveTask: function getActiveTask(sessionId) { - return get().activeTasks.get(sessionId); - }, - - clearActiveTask: function clearActiveTask(sessionId) { - const state = get(); - if (!state.activeTasks.has(sessionId)) return; - - const newActiveTasks = new Map(state.activeTasks); - newActiveTasks.delete(sessionId); - set({ activeTasks: newActiveTasks }); - persistTasks(newActiveTasks); - }, - - reconnectToTask: async function reconnectToTask( - sessionId, - taskId, - lastMessageId = INITIAL_STREAM_ID, - onChunk, - ) { - const state = get(); - const callbacks = state.streamCompleteCallbacks; - - // Clean up any existing stream for this session - const { - activeStreams: newActiveStreams, - completedStreams: newCompletedStreams, - } = cleanupExistingStream( - sessionId, - state.activeStreams, - state.completedStreams, - callbacks, - ); - - // Create new stream for reconnection - const stream = createActiveStream(sessionId, onChunk); - newActiveStreams.set(sessionId, stream); - set({ - activeStreams: newActiveStreams, - completedStreams: newCompletedStreams, - }); - - try { - await executeTaskReconnect(stream, taskId, lastMessageId); - } finally { - finalizeStream(sessionId, stream, onChunk, get, set); - - // Clear active task on completion - if (stream.status === "completed" || stream.status === "error") { - const taskState = get(); - if (taskState.activeTasks.has(sessionId)) { - const newActiveTasks = new Map(taskState.activeTasks); - newActiveTasks.delete(sessionId); - set({ activeTasks: newActiveTasks }); - persistTasks(newActiveTasks); - } - } - } - }, - - updateTaskLastMessageId: function updateTaskLastMessageId( - sessionId, - lastMessageId, - ) { - const state = get(); - const task = state.activeTasks.get(sessionId); - if (!task) return; - - const newActiveTasks = new Map(state.activeTasks); - newActiveTasks.set(sessionId, { - ...task, - lastMessageId, - }); - set({ activeTasks: newActiveTasks }); - persistTasks(newActiveTasks); - }, -})); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts b/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts deleted file mode 100644 index 34813e17fe..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts +++ /dev/null @@ -1,163 +0,0 @@ -import type { ToolArguments, ToolResult } from "@/types/chat"; - -export type StreamStatus = "idle" | "streaming" | "completed" | "error"; - -export interface StreamChunk { - type: - | "stream_start" - | "text_chunk" - | "text_ended" - | "tool_call" - | "tool_call_start" - | "tool_response" - | "login_needed" - | "need_login" - | "credentials_needed" - | "error" - | "usage" - | "stream_end"; - taskId?: string; - timestamp?: string; - content?: string; - message?: string; - code?: string; - details?: Record; - tool_id?: string; - tool_name?: string; - arguments?: ToolArguments; - result?: ToolResult; - success?: boolean; - idx?: number; - session_id?: string; - agent_info?: { - graph_id: string; - name: string; - trigger_type: string; - }; - provider?: string; - provider_name?: string; - credential_type?: string; - scopes?: string[]; - title?: string; - [key: string]: unknown; -} - -export type VercelStreamChunk = - | { type: "start"; messageId: string; taskId?: string } - | { type: "finish" } - | { type: "text-start"; id: string } - | { type: "text-delta"; id: string; delta: string } - | { type: "text-end"; id: string } - | { type: "tool-input-start"; toolCallId: string; toolName: string } - | { - type: "tool-input-available"; - toolCallId: string; - toolName: string; - input: Record; - } - | { - type: "tool-output-available"; - toolCallId: string; - toolName?: string; - output: unknown; - success?: boolean; - } - | { - type: "usage"; - promptTokens: number; - completionTokens: number; - totalTokens: number; - } - | { - type: "error"; - errorText: string; - code?: string; - details?: Record; - }; - -export interface ActiveStream { - sessionId: string; - abortController: AbortController; - status: StreamStatus; - startedAt: number; - chunks: StreamChunk[]; - error?: Error; - onChunkCallbacks: Set<(chunk: StreamChunk) => void>; -} - -export interface StreamResult { - sessionId: string; - status: StreamStatus; - chunks: StreamChunk[]; - completedAt: number; - error?: Error; -} - -export type StreamCompleteCallback = (sessionId: string) => void; - -// Type guards for message types - -/** - * Check if a message has a toolId property. - */ -export function hasToolId( - msg: T, -): msg is T & { toolId: string } { - return ( - "toolId" in msg && - typeof (msg as Record).toolId === "string" - ); -} - -/** - * Check if a message has an operationId property. - */ -export function hasOperationId( - msg: T, -): msg is T & { operationId: string } { - return ( - "operationId" in msg && - typeof (msg as Record).operationId === "string" - ); -} - -/** - * Check if a message has a toolCallId property. - */ -export function hasToolCallId( - msg: T, -): msg is T & { toolCallId: string } { - return ( - "toolCallId" in msg && - typeof (msg as Record).toolCallId === "string" - ); -} - -/** - * Check if a message is an operation message type. - */ -export function isOperationMessage( - msg: T, -): msg is T & { - type: "operation_started" | "operation_pending" | "operation_in_progress"; -} { - return ( - msg.type === "operation_started" || - msg.type === "operation_pending" || - msg.type === "operation_in_progress" - ); -} - -/** - * Get the tool ID from a message if available. - * Checks toolId, operationId, and toolCallId properties. - */ -export function getToolIdFromMessage( - msg: T, -): string | undefined { - const record = msg as Record; - if (typeof record.toolId === "string") return record.toolId; - if (typeof record.operationId === "string") return record.operationId; - if (typeof record.toolCallId === "string") return record.toolCallId; - return undefined; -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx deleted file mode 100644 index f5d56fcb15..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import { cn } from "@/lib/utils"; -import { ReactNode } from "react"; - -export interface AIChatBubbleProps { - children: ReactNode; - className?: string; -} - -export function AIChatBubble({ children, className }: AIChatBubbleProps) { - return ( -
- {children} -
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx deleted file mode 100644 index 582b24de5e..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx +++ /dev/null @@ -1,119 +0,0 @@ -import { Button } from "@/components/atoms/Button/Button"; -import { Card } from "@/components/atoms/Card/Card"; -import { Text } from "@/components/atoms/Text/Text"; -import { cn } from "@/lib/utils"; -import { ArrowRight, List, Robot } from "@phosphor-icons/react"; -import Image from "next/image"; - -export interface Agent { - id: string; - name: string; - description: string; - version?: number; - image_url?: string; -} - -export interface AgentCarouselMessageProps { - agents: Agent[]; - totalCount?: number; - onSelectAgent?: (agentId: string) => void; - className?: string; -} - -export function AgentCarouselMessage({ - agents, - totalCount, - onSelectAgent, - className, -}: AgentCarouselMessageProps) { - const displayCount = totalCount ?? agents.length; - - return ( -
- {/* Header */} -
-
- -
-
- - Found {displayCount} {displayCount === 1 ? "Agent" : "Agents"} - - - Select an agent to view details or run it - -
-
- - {/* Agent Cards */} -
- {agents.map((agent) => ( - -
-
- {agent.image_url ? ( - {`${agent.name} - ) : ( -
- -
- )} -
-
-
- - {agent.name} - - {agent.version && ( - - v{agent.version} - - )} -
- - {agent.description} - - {onSelectAgent && ( - - )} -
-
-
- ))} -
- - {totalCount && totalCount > agents.length && ( - - Showing {agents.length} of {totalCount} results - - )} -
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx deleted file mode 100644 index 3ef71eca09..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx +++ /dev/null @@ -1,246 +0,0 @@ -"use client"; - -import { Button } from "@/components/atoms/Button/Button"; -import { Card } from "@/components/atoms/Card/Card"; -import { Text } from "@/components/atoms/Text/Text"; -import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput"; -import { RunAgentInputs } from "@/components/contextual/RunAgentInputs/RunAgentInputs"; - -import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { - BlockIOCredentialsSubSchema, - BlockIOSubSchema, -} from "@/lib/autogpt-server-api/types"; -import { cn, isEmpty } from "@/lib/utils"; -import { PlayIcon, WarningIcon } from "@phosphor-icons/react"; -import { useMemo } from "react"; -import { useAgentInputsSetup } from "./useAgentInputsSetup"; - -type LibraryAgentInputSchemaProperties = LibraryAgent["input_schema"] extends { - properties: infer P; -} - ? P extends Record - ? P - : Record - : Record; - -type LibraryAgentCredentialsInputSchemaProperties = - LibraryAgent["credentials_input_schema"] extends { - properties: infer P; - } - ? P extends Record - ? P - : Record - : Record; - -interface Props { - agentName?: string; - inputSchema: LibraryAgentInputSchemaProperties | Record; - credentialsSchema?: - | LibraryAgentCredentialsInputSchemaProperties - | Record; - message: string; - requiredFields?: string[]; - onRun: ( - inputs: Record, - credentials: Record, - ) => void; - onCancel?: () => void; - className?: string; -} - -export function AgentInputsSetup({ - agentName, - inputSchema, - credentialsSchema, - message, - requiredFields, - onRun, - onCancel, - className, -}: Props) { - const { inputValues, setInputValue, credentialsValues, setCredentialsValue } = - useAgentInputsSetup(); - - const inputSchemaObj = useMemo(() => { - if (!inputSchema) return { properties: {}, required: [] }; - if ("properties" in inputSchema && "type" in inputSchema) { - return inputSchema as { - properties: Record; - required?: string[]; - }; - } - return { properties: inputSchema as Record, required: [] }; - }, [inputSchema]); - - const credentialsSchemaObj = useMemo(() => { - if (!credentialsSchema) return { properties: {}, required: [] }; - if ("properties" in credentialsSchema && "type" in credentialsSchema) { - return credentialsSchema as { - properties: Record; - required?: string[]; - }; - } - return { - properties: credentialsSchema as Record, - required: [], - }; - }, [credentialsSchema]); - - const agentInputFields = useMemo(() => { - const properties = inputSchemaObj.properties || {}; - return Object.fromEntries( - Object.entries(properties).filter( - ([_, subSchema]: [string, any]) => !subSchema.hidden, - ), - ); - }, [inputSchemaObj]); - - const agentCredentialsInputFields = useMemo(() => { - return credentialsSchemaObj.properties || {}; - }, [credentialsSchemaObj]); - - const inputFields = Object.entries(agentInputFields); - const credentialFields = Object.entries(agentCredentialsInputFields); - - const defaultsFromSchema = useMemo(() => { - const defaults: Record = {}; - Object.entries(agentInputFields).forEach(([key, schema]) => { - if ("default" in schema && schema.default !== undefined) { - defaults[key] = schema.default; - } - }); - return defaults; - }, [agentInputFields]); - - const defaultsFromCredentialsSchema = useMemo(() => { - const defaults: Record = {}; - Object.entries(agentCredentialsInputFields).forEach(([key, schema]) => { - if ("default" in schema && schema.default !== undefined) { - defaults[key] = schema.default; - } - }); - return defaults; - }, [agentCredentialsInputFields]); - - const mergedInputValues = useMemo(() => { - return { ...defaultsFromSchema, ...inputValues }; - }, [defaultsFromSchema, inputValues]); - - const mergedCredentialsValues = useMemo(() => { - return { ...defaultsFromCredentialsSchema, ...credentialsValues }; - }, [defaultsFromCredentialsSchema, credentialsValues]); - - const allRequiredInputsAreSet = useMemo(() => { - const requiredInputs = new Set( - requiredFields || (inputSchemaObj.required as string[]) || [], - ); - const nonEmptyInputs = new Set( - Object.keys(mergedInputValues).filter( - (k) => !isEmpty(mergedInputValues[k]), - ), - ); - const missing = [...requiredInputs].filter( - (input) => !nonEmptyInputs.has(input), - ); - return missing.length === 0; - }, [inputSchemaObj.required, mergedInputValues, requiredFields]); - - const allCredentialsAreSet = useMemo(() => { - const requiredCredentials = new Set( - (credentialsSchemaObj.required as string[]) || [], - ); - if (requiredCredentials.size === 0) { - return true; - } - const missing = [...requiredCredentials].filter((key) => { - const cred = mergedCredentialsValues[key]; - return !cred || !cred.id; - }); - return missing.length === 0; - }, [credentialsSchemaObj.required, mergedCredentialsValues]); - - const canRun = allRequiredInputsAreSet && allCredentialsAreSet; - - function handleRun() { - if (canRun) { - onRun(mergedInputValues, mergedCredentialsValues); - } - } - - return ( - -
-
- -
-
- - {agentName ? `Configure ${agentName}` : "Agent Configuration"} - - - {message} - - - {inputFields.length > 0 && ( -
- {inputFields.map(([key, inputSubSchema]) => ( - setInputValue(key, value)} - /> - ))} -
- )} - - {credentialFields.length > 0 && ( -
- {credentialFields.map(([key, schema]) => { - const requiredCredentials = new Set( - (credentialsSchemaObj.required as string[]) || [], - ); - return ( - - setCredentialsValue(key, value) - } - siblingInputs={mergedInputValues} - isOptional={!requiredCredentials.has(key)} - /> - ); - })} -
- )} - -
- - {onCancel && ( - - )} -
-
-
-
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts deleted file mode 100644 index e36a3f3c5d..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts +++ /dev/null @@ -1,38 +0,0 @@ -import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; -import { useState } from "react"; - -export function useAgentInputsSetup() { - const [inputValues, setInputValues] = useState>({}); - const [credentialsValues, setCredentialsValues] = useState< - Record - >({}); - - function setInputValue(key: string, value: any) { - setInputValues((prev) => ({ - ...prev, - [key]: value, - })); - } - - function setCredentialsValue(key: string, value?: CredentialsMetaInput) { - if (value) { - setCredentialsValues((prev) => ({ - ...prev, - [key]: value, - })); - } else { - setCredentialsValues((prev) => { - const next = { ...prev }; - delete next[key]; - return next; - }); - } - } - - return { - inputValues, - setInputValue, - credentialsValues, - setCredentialsValue, - }; -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx deleted file mode 100644 index b2cf92ec56..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx +++ /dev/null @@ -1,120 +0,0 @@ -"use client"; - -import { Button } from "@/components/atoms/Button/Button"; -import { cn } from "@/lib/utils"; -import { ShieldIcon, SignInIcon, UserPlusIcon } from "@phosphor-icons/react"; -import { useRouter } from "next/navigation"; - -export interface AuthPromptWidgetProps { - message: string; - sessionId: string; - agentInfo?: { - graph_id: string; - name: string; - trigger_type: string; - }; - returnUrl?: string; - className?: string; -} - -export function AuthPromptWidget({ - message, - sessionId, - agentInfo, - returnUrl = "/copilot/chat", - className, -}: AuthPromptWidgetProps) { - const router = useRouter(); - - function handleSignIn() { - if (typeof window !== "undefined") { - localStorage.setItem("pending_chat_session", sessionId); - if (agentInfo) { - localStorage.setItem("pending_agent_setup", JSON.stringify(agentInfo)); - } - } - const returnUrlWithSession = `${returnUrl}?session_id=${sessionId}`; - const encodedReturnUrl = encodeURIComponent(returnUrlWithSession); - router.push(`/login?returnUrl=${encodedReturnUrl}`); - } - - function handleSignUp() { - if (typeof window !== "undefined") { - localStorage.setItem("pending_chat_session", sessionId); - if (agentInfo) { - localStorage.setItem("pending_agent_setup", JSON.stringify(agentInfo)); - } - } - const returnUrlWithSession = `${returnUrl}?session_id=${sessionId}`; - const encodedReturnUrl = encodeURIComponent(returnUrlWithSession); - router.push(`/signup?returnUrl=${encodedReturnUrl}`); - } - - return ( -
-
-
-
- -
-
-

- Authentication Required -

-

- Sign in to set up and manage agents -

-
-
- -
-

{message}

- {agentInfo && ( -
-

- Ready to set up:{" "} - {agentInfo.name} -

-

- Type:{" "} - {agentInfo.trigger_type} -

-
- )} -
- -
- - -
- -
- Your chat session will be preserved after signing in -
-
-
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx deleted file mode 100644 index fbf2d5d143..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx +++ /dev/null @@ -1,130 +0,0 @@ -import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { Button } from "@/components/atoms/Button/Button"; -import { Text } from "@/components/atoms/Text/Text"; -import { Dialog } from "@/components/molecules/Dialog/Dialog"; -import { cn } from "@/lib/utils"; -import { GlobeHemisphereEastIcon } from "@phosphor-icons/react"; -import { useEffect } from "react"; -import { ChatInput } from "../ChatInput/ChatInput"; -import { MessageList } from "../MessageList/MessageList"; -import { useChatContainer } from "./useChatContainer"; - -export interface ChatContainerProps { - sessionId: string | null; - initialMessages: SessionDetailResponse["messages"]; - initialPrompt?: string; - className?: string; - onStreamingChange?: (isStreaming: boolean) => void; - onOperationStarted?: () => void; - /** Active stream info from the server for reconnection */ - activeStream?: { - taskId: string; - lastMessageId: string; - operationId: string; - toolName: string; - }; -} - -export function ChatContainer({ - sessionId, - initialMessages, - initialPrompt, - className, - onStreamingChange, - onOperationStarted, - activeStream, -}: ChatContainerProps) { - const { - messages, - streamingChunks, - isStreaming, - stopStreaming, - isRegionBlockedModalOpen, - sendMessageWithContext, - handleRegionModalOpenChange, - handleRegionModalClose, - } = useChatContainer({ - sessionId, - initialMessages, - initialPrompt, - onOperationStarted, - activeStream, - }); - - useEffect(() => { - onStreamingChange?.(isStreaming); - }, [isStreaming, onStreamingChange]); - - return ( -
- - - - Service unavailable - -
- } - controlled={{ - isOpen: isRegionBlockedModalOpen, - set: handleRegionModalOpenChange, - }} - onClose={handleRegionModalClose} - styling={{ maxWidth: 550, width: "100%", minWidth: "auto" }} - > - -
- - The Autogpt AI model is not available in your region or your - connection is blocking it. Please try again with a different - connection. - -
- -
-
-
- - {/* Messages - Scrollable */} -
-
- -
-
- - {/* Input - Fixed at bottom */} -
-
- -
-
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts deleted file mode 100644 index af3b3329b7..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts +++ /dev/null @@ -1,89 +0,0 @@ -import { toast } from "sonner"; -import type { StreamChunk } from "../../chat-types"; -import type { HandlerDependencies } from "./handlers"; -import { - getErrorDisplayMessage, - handleError, - handleLoginNeeded, - handleStreamEnd, - handleTextChunk, - handleTextEnded, - handleToolCallStart, - handleToolResponse, - isRegionBlockedError, -} from "./handlers"; - -export function createStreamEventDispatcher( - deps: HandlerDependencies, -): (chunk: StreamChunk) => void { - return function dispatchStreamEvent(chunk: StreamChunk): void { - if ( - chunk.type === "text_chunk" || - chunk.type === "tool_call_start" || - chunk.type === "tool_response" || - chunk.type === "login_needed" || - chunk.type === "need_login" || - chunk.type === "error" - ) { - deps.hasResponseRef.current = true; - } - - switch (chunk.type) { - case "stream_start": - // Store task ID for SSE reconnection - if (chunk.taskId && deps.onActiveTaskStarted) { - deps.onActiveTaskStarted({ - taskId: chunk.taskId, - operationId: chunk.taskId, - toolName: "chat", - toolCallId: "chat_stream", - }); - } - break; - - case "text_chunk": - handleTextChunk(chunk, deps); - break; - - case "text_ended": - handleTextEnded(chunk, deps); - break; - - case "tool_call_start": - handleToolCallStart(chunk, deps); - break; - - case "tool_response": - handleToolResponse(chunk, deps); - break; - - case "login_needed": - case "need_login": - handleLoginNeeded(chunk, deps); - break; - - case "stream_end": - // Note: "finish" type from backend gets normalized to "stream_end" by normalizeStreamChunk - handleStreamEnd(chunk, deps); - break; - - case "error": - const isRegionBlocked = isRegionBlockedError(chunk); - handleError(chunk, deps); - // Show toast at dispatcher level to avoid circular dependencies - if (!isRegionBlocked) { - toast.error("Chat Error", { - description: getErrorDisplayMessage(chunk), - }); - } - break; - - case "usage": - // TODO: Handle usage for display - break; - - default: - console.warn("Unknown stream chunk type:", chunk); - } - }; -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts deleted file mode 100644 index 5aec5b9818..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts +++ /dev/null @@ -1,362 +0,0 @@ -import type { Dispatch, MutableRefObject, SetStateAction } from "react"; -import { StreamChunk } from "../../useChatStream"; -import type { ChatMessageData } from "../ChatMessage/useChatMessage"; -import { - extractCredentialsNeeded, - extractInputsNeeded, - parseToolResponse, -} from "./helpers"; - -function isToolCallMessage( - message: ChatMessageData, -): message is Extract { - return message.type === "tool_call"; -} - -export interface HandlerDependencies { - setHasTextChunks: Dispatch>; - setStreamingChunks: Dispatch>; - streamingChunksRef: MutableRefObject; - hasResponseRef: MutableRefObject; - textFinalizedRef: MutableRefObject; - streamEndedRef: MutableRefObject; - setMessages: Dispatch>; - setIsStreamingInitiated: Dispatch>; - setIsRegionBlockedModalOpen: Dispatch>; - sessionId: string; - onOperationStarted?: () => void; - onActiveTaskStarted?: (taskInfo: { - taskId: string; - operationId: string; - toolName: string; - toolCallId: string; - }) => void; -} - -export function isRegionBlockedError(chunk: StreamChunk): boolean { - if (chunk.code === "MODEL_NOT_AVAILABLE_REGION") return true; - const message = chunk.message || chunk.content; - if (typeof message !== "string") return false; - return message.toLowerCase().includes("not available in your region"); -} - -export function getUserFriendlyErrorMessage( - code: string | undefined, -): string | undefined { - switch (code) { - case "TASK_EXPIRED": - return "This operation has expired. Please try again."; - case "TASK_NOT_FOUND": - return "Could not find the requested operation."; - case "ACCESS_DENIED": - return "You do not have access to this operation."; - case "QUEUE_OVERFLOW": - return "Connection was interrupted. Please refresh to continue."; - case "MODEL_NOT_AVAILABLE_REGION": - return "This model is not available in your region."; - default: - return undefined; - } -} - -export function handleTextChunk(chunk: StreamChunk, deps: HandlerDependencies) { - if (!chunk.content) return; - deps.setHasTextChunks(true); - deps.setStreamingChunks((prev) => { - const updated = [...prev, chunk.content!]; - deps.streamingChunksRef.current = updated; - return updated; - }); -} - -export function handleTextEnded( - _chunk: StreamChunk, - deps: HandlerDependencies, -) { - if (deps.textFinalizedRef.current) { - return; - } - - const completedText = deps.streamingChunksRef.current.join(""); - if (completedText.trim()) { - deps.textFinalizedRef.current = true; - - deps.setMessages((prev) => { - const exists = prev.some( - (msg) => - msg.type === "message" && - msg.role === "assistant" && - msg.content === completedText, - ); - if (exists) return prev; - - const assistantMessage: ChatMessageData = { - type: "message", - role: "assistant", - content: completedText, - timestamp: new Date(), - }; - return [...prev, assistantMessage]; - }); - } - deps.setStreamingChunks([]); - deps.streamingChunksRef.current = []; - deps.setHasTextChunks(false); -} - -export function handleToolCallStart( - chunk: StreamChunk, - deps: HandlerDependencies, -) { - // Use deterministic fallback instead of Date.now() to ensure same ID on replay - const toolId = - chunk.tool_id || - `tool-${deps.sessionId}-${chunk.idx ?? "unknown"}-${chunk.tool_name || "unknown"}`; - - const toolCallMessage: Extract = { - type: "tool_call", - toolId, - toolName: chunk.tool_name || "Executing", - arguments: chunk.arguments || {}, - timestamp: new Date(), - }; - - function updateToolCallMessages(prev: ChatMessageData[]) { - const existingIndex = prev.findIndex(function findToolCallIndex(msg) { - return isToolCallMessage(msg) && msg.toolId === toolCallMessage.toolId; - }); - if (existingIndex === -1) { - return [...prev, toolCallMessage]; - } - const nextMessages = [...prev]; - const existing = nextMessages[existingIndex]; - if (!isToolCallMessage(existing)) return prev; - const nextArguments = - toolCallMessage.arguments && - Object.keys(toolCallMessage.arguments).length > 0 - ? toolCallMessage.arguments - : existing.arguments; - nextMessages[existingIndex] = { - ...existing, - toolName: toolCallMessage.toolName || existing.toolName, - arguments: nextArguments, - timestamp: toolCallMessage.timestamp, - }; - return nextMessages; - } - - deps.setMessages(updateToolCallMessages); -} - -const TOOL_RESPONSE_TYPES = new Set([ - "tool_response", - "operation_started", - "operation_pending", - "operation_in_progress", - "execution_started", - "agent_carousel", - "clarification_needed", -]); - -function hasResponseForTool( - messages: ChatMessageData[], - toolId: string, -): boolean { - return messages.some((msg) => { - if (!TOOL_RESPONSE_TYPES.has(msg.type)) return false; - const msgToolId = - (msg as { toolId?: string }).toolId || - (msg as { toolCallId?: string }).toolCallId; - return msgToolId === toolId; - }); -} - -export function handleToolResponse( - chunk: StreamChunk, - deps: HandlerDependencies, -) { - let toolName = chunk.tool_name || "unknown"; - if (!chunk.tool_name || chunk.tool_name === "unknown") { - deps.setMessages((prev) => { - const matchingToolCall = [...prev] - .reverse() - .find( - (msg) => msg.type === "tool_call" && msg.toolId === chunk.tool_id, - ); - if (matchingToolCall && matchingToolCall.type === "tool_call") { - toolName = matchingToolCall.toolName; - } - return prev; - }); - } - const responseMessage = parseToolResponse( - chunk.result!, - chunk.tool_id!, - toolName, - new Date(), - ); - if (!responseMessage) { - let parsedResult: Record | null = null; - try { - parsedResult = - typeof chunk.result === "string" - ? JSON.parse(chunk.result) - : (chunk.result as Record); - } catch { - parsedResult = null; - } - if ( - (chunk.tool_name === "run_agent" || chunk.tool_name === "run_block") && - chunk.success && - parsedResult?.type === "setup_requirements" - ) { - const inputsMessage = extractInputsNeeded(parsedResult, chunk.tool_name); - if (inputsMessage) { - deps.setMessages((prev) => { - // Check for duplicate inputs_needed message - const exists = prev.some((msg) => msg.type === "inputs_needed"); - if (exists) return prev; - return [...prev, inputsMessage]; - }); - } - const credentialsMessage = extractCredentialsNeeded( - parsedResult, - chunk.tool_name, - ); - if (credentialsMessage) { - deps.setMessages((prev) => { - // Check for duplicate credentials_needed message - const exists = prev.some((msg) => msg.type === "credentials_needed"); - if (exists) return prev; - return [...prev, credentialsMessage]; - }); - } - } - return; - } - if (responseMessage.type === "operation_started") { - deps.onOperationStarted?.(); - const taskId = (responseMessage as { taskId?: string }).taskId; - if (taskId && deps.onActiveTaskStarted) { - deps.onActiveTaskStarted({ - taskId, - operationId: - (responseMessage as { operationId?: string }).operationId || "", - toolName: (responseMessage as { toolName?: string }).toolName || "", - toolCallId: (responseMessage as { toolId?: string }).toolId || "", - }); - } - } - - deps.setMessages((prev) => { - const toolCallIndex = prev.findIndex( - (msg) => msg.type === "tool_call" && msg.toolId === chunk.tool_id, - ); - if (hasResponseForTool(prev, chunk.tool_id!)) { - return prev; - } - if (toolCallIndex !== -1) { - const newMessages = [...prev]; - newMessages.splice(toolCallIndex + 1, 0, responseMessage); - return newMessages; - } - return [...prev, responseMessage]; - }); -} - -export function handleLoginNeeded( - chunk: StreamChunk, - deps: HandlerDependencies, -) { - const loginNeededMessage: ChatMessageData = { - type: "login_needed", - toolName: "login_needed", - message: chunk.message || "Please sign in to use chat and agent features", - sessionId: chunk.session_id || deps.sessionId, - agentInfo: chunk.agent_info, - timestamp: new Date(), - }; - deps.setMessages((prev) => { - // Check for duplicate login_needed message - const exists = prev.some((msg) => msg.type === "login_needed"); - if (exists) return prev; - return [...prev, loginNeededMessage]; - }); -} - -export function handleStreamEnd( - _chunk: StreamChunk, - deps: HandlerDependencies, -) { - if (deps.streamEndedRef.current) { - return; - } - deps.streamEndedRef.current = true; - - const completedContent = deps.streamingChunksRef.current.join(""); - if (!completedContent.trim() && !deps.hasResponseRef.current) { - deps.setMessages((prev) => { - const exists = prev.some( - (msg) => - msg.type === "message" && - msg.role === "assistant" && - msg.content === "No response received. Please try again.", - ); - if (exists) return prev; - return [ - ...prev, - { - type: "message", - role: "assistant", - content: "No response received. Please try again.", - timestamp: new Date(), - }, - ]; - }); - } - if (completedContent.trim() && !deps.textFinalizedRef.current) { - deps.textFinalizedRef.current = true; - - deps.setMessages((prev) => { - const exists = prev.some( - (msg) => - msg.type === "message" && - msg.role === "assistant" && - msg.content === completedContent, - ); - if (exists) return prev; - - const assistantMessage: ChatMessageData = { - type: "message", - role: "assistant", - content: completedContent, - timestamp: new Date(), - }; - return [...prev, assistantMessage]; - }); - } - deps.setStreamingChunks([]); - deps.streamingChunksRef.current = []; - deps.setHasTextChunks(false); - deps.setIsStreamingInitiated(false); -} - -export function handleError(chunk: StreamChunk, deps: HandlerDependencies) { - if (isRegionBlockedError(chunk)) { - deps.setIsRegionBlockedModalOpen(true); - } - deps.setIsStreamingInitiated(false); - deps.setHasTextChunks(false); - deps.setStreamingChunks([]); - deps.streamingChunksRef.current = []; - deps.textFinalizedRef.current = false; - deps.streamEndedRef.current = true; -} - -export function getErrorDisplayMessage(chunk: StreamChunk): string { - const friendlyMessage = getUserFriendlyErrorMessage(chunk.code); - if (friendlyMessage) { - return friendlyMessage; - } - return chunk.message || chunk.content || "An error occurred"; -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts deleted file mode 100644 index f1e94cea17..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts +++ /dev/null @@ -1,607 +0,0 @@ -import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { SessionKey, sessionStorage } from "@/services/storage/session-storage"; -import type { ToolResult } from "@/types/chat"; -import type { ChatMessageData } from "../ChatMessage/useChatMessage"; - -export function processInitialMessages( - initialMessages: SessionDetailResponse["messages"], -): ChatMessageData[] { - const processedMessages: ChatMessageData[] = []; - const toolCallMap = new Map(); - - for (const msg of initialMessages) { - if (!isValidMessage(msg)) { - console.warn("Invalid message structure from backend:", msg); - continue; - } - - let content = String(msg.content || ""); - const role = String(msg.role || "assistant").toLowerCase(); - const toolCalls = msg.tool_calls; - const timestamp = msg.timestamp - ? new Date(msg.timestamp as string) - : undefined; - - if (role === "user") { - content = removePageContext(content); - if (!content.trim()) continue; - processedMessages.push({ - type: "message", - role: "user", - content, - timestamp, - }); - continue; - } - - if (role === "assistant") { - content = content - .replace(/[\s\S]*?<\/thinking>/gi, "") - .replace(/[\s\S]*?<\/internal_reasoning>/gi, "") - .trim(); - - if (toolCalls && isToolCallArray(toolCalls) && toolCalls.length > 0) { - for (const toolCall of toolCalls) { - const toolName = toolCall.function.name; - const toolId = toolCall.id; - toolCallMap.set(toolId, toolName); - - try { - const args = JSON.parse(toolCall.function.arguments || "{}"); - processedMessages.push({ - type: "tool_call", - toolId, - toolName, - arguments: args, - timestamp, - }); - } catch (err) { - console.warn("Failed to parse tool call arguments:", err); - processedMessages.push({ - type: "tool_call", - toolId, - toolName, - arguments: {}, - timestamp, - }); - } - } - if (content.trim()) { - processedMessages.push({ - type: "message", - role: "assistant", - content, - timestamp, - }); - } - } else if (content.trim()) { - processedMessages.push({ - type: "message", - role: "assistant", - content, - timestamp, - }); - } - continue; - } - - if (role === "tool") { - const toolCallId = (msg.tool_call_id as string) || ""; - const toolName = toolCallMap.get(toolCallId) || "unknown"; - const toolResponse = parseToolResponse( - content, - toolCallId, - toolName, - timestamp, - ); - if (toolResponse) { - processedMessages.push(toolResponse); - } - continue; - } - - if (content.trim()) { - processedMessages.push({ - type: "message", - role: role as "user" | "assistant" | "system", - content, - timestamp, - }); - } - } - - return processedMessages; -} - -export function hasSentInitialPrompt(sessionId: string): boolean { - try { - const sent = JSON.parse( - sessionStorage.get(SessionKey.CHAT_SENT_INITIAL_PROMPTS) || "{}", - ); - return sent[sessionId] === true; - } catch { - return false; - } -} - -export function markInitialPromptSent(sessionId: string): void { - try { - const sent = JSON.parse( - sessionStorage.get(SessionKey.CHAT_SENT_INITIAL_PROMPTS) || "{}", - ); - sent[sessionId] = true; - sessionStorage.set( - SessionKey.CHAT_SENT_INITIAL_PROMPTS, - JSON.stringify(sent), - ); - } catch { - // Ignore storage errors - } -} - -export function removePageContext(content: string): string { - // Remove "Page URL: ..." pattern at start of line (case insensitive, handles various formats) - let cleaned = content.replace(/^\s*Page URL:\s*[^\n\r]*/gim, ""); - - // Find "User Message:" marker at start of line to preserve the actual user message - const userMessageMatch = cleaned.match(/^\s*User Message:\s*([\s\S]*)$/im); - if (userMessageMatch) { - // If we found "User Message:", extract everything after it - cleaned = userMessageMatch[1]; - } else { - // If no "User Message:" marker, remove "Page Content:" and everything after it at start of line - cleaned = cleaned.replace(/^\s*Page Content:[\s\S]*$/gim, ""); - } - - // Clean up extra whitespace and newlines - cleaned = cleaned.replace(/\n\s*\n\s*\n+/g, "\n\n").trim(); - return cleaned; -} - -export function createUserMessage(content: string): ChatMessageData { - return { - type: "message", - role: "user", - content, - timestamp: new Date(), - }; -} - -export function filterAuthMessages( - messages: ChatMessageData[], -): ChatMessageData[] { - return messages.filter( - (msg) => msg.type !== "credentials_needed" && msg.type !== "login_needed", - ); -} - -export function isValidMessage(msg: unknown): msg is Record { - if (typeof msg !== "object" || msg === null) { - return false; - } - const m = msg as Record; - if (typeof m.role !== "string") { - return false; - } - if (m.content !== undefined && typeof m.content !== "string") { - return false; - } - return true; -} - -export function isToolCallArray(value: unknown): value is Array<{ - id: string; - type: string; - function: { name: string; arguments: string }; -}> { - if (!Array.isArray(value)) { - return false; - } - return value.every( - (item) => - typeof item === "object" && - item !== null && - "id" in item && - typeof item.id === "string" && - "type" in item && - typeof item.type === "string" && - "function" in item && - typeof item.function === "object" && - item.function !== null && - "name" in item.function && - typeof item.function.name === "string" && - "arguments" in item.function && - typeof item.function.arguments === "string", - ); -} - -export function isAgentArray(value: unknown): value is Array<{ - id: string; - name: string; - description: string; - version?: number; - image_url?: string; -}> { - if (!Array.isArray(value)) { - return false; - } - return value.every( - (item) => - typeof item === "object" && - item !== null && - "id" in item && - typeof item.id === "string" && - "name" in item && - typeof item.name === "string" && - "description" in item && - typeof item.description === "string" && - (!("version" in item) || typeof item.version === "number") && - (!("image_url" in item) || typeof item.image_url === "string"), - ); -} - -export function extractJsonFromErrorMessage( - message: string, -): Record | null { - try { - const start = message.indexOf("{"); - if (start === -1) { - return null; - } - let depth = 0; - let end = -1; - for (let i = start; i < message.length; i++) { - const ch = message[i]; - if (ch === "{") { - depth++; - } else if (ch === "}") { - depth--; - if (depth === 0) { - end = i; - break; - } - } - } - if (end === -1) { - return null; - } - const jsonStr = message.slice(start, end + 1); - return JSON.parse(jsonStr) as Record; - } catch { - return null; - } -} - -export function parseToolResponse( - result: ToolResult, - toolId: string, - toolName: string, - timestamp?: Date, -): ChatMessageData | null { - let parsedResult: Record | null = null; - try { - parsedResult = - typeof result === "string" - ? JSON.parse(result) - : (result as Record); - } catch { - parsedResult = null; - } - if (parsedResult && typeof parsedResult === "object") { - const responseType = parsedResult.type as string | undefined; - if (responseType === "no_results") { - return { - type: "tool_response", - toolId, - toolName, - result: (parsedResult.message as string) || "No results found", - success: true, - timestamp: timestamp || new Date(), - }; - } - if (responseType === "agent_carousel") { - const agentsData = parsedResult.agents; - if (isAgentArray(agentsData)) { - return { - type: "agent_carousel", - toolId, - toolName: "agent_carousel", - agents: agentsData, - totalCount: parsedResult.total_count as number | undefined, - timestamp: timestamp || new Date(), - }; - } else { - console.warn("Invalid agents array in agent_carousel response"); - } - } - if (responseType === "execution_started") { - return { - type: "execution_started", - toolId, - toolName: "execution_started", - executionId: (parsedResult.execution_id as string) || "", - agentName: (parsedResult.graph_name as string) || undefined, - message: parsedResult.message as string | undefined, - libraryAgentLink: parsedResult.library_agent_link as string | undefined, - timestamp: timestamp || new Date(), - }; - } - if (responseType === "clarification_needed") { - return { - type: "clarification_needed", - toolName, - questions: - (parsedResult.questions as Array<{ - question: string; - keyword: string; - example?: string; - }>) || [], - message: - (parsedResult.message as string) || - "I need more information to proceed.", - sessionId: (parsedResult.session_id as string) || "", - timestamp: timestamp || new Date(), - }; - } - if (responseType === "operation_started") { - return { - type: "operation_started", - toolName: (parsedResult.tool_name as string) || toolName, - toolId, - operationId: (parsedResult.operation_id as string) || "", - taskId: (parsedResult.task_id as string) || undefined, // For SSE reconnection - message: - (parsedResult.message as string) || - "Operation started. You can close this tab.", - timestamp: timestamp || new Date(), - }; - } - if (responseType === "operation_pending") { - return { - type: "operation_pending", - toolName: (parsedResult.tool_name as string) || toolName, - toolId, - operationId: (parsedResult.operation_id as string) || "", - message: - (parsedResult.message as string) || - "Operation in progress. Please wait...", - timestamp: timestamp || new Date(), - }; - } - if (responseType === "operation_in_progress") { - return { - type: "operation_in_progress", - toolName: (parsedResult.tool_name as string) || toolName, - toolCallId: (parsedResult.tool_call_id as string) || toolId, - message: - (parsedResult.message as string) || - "Operation already in progress. Please wait...", - timestamp: timestamp || new Date(), - }; - } - if (responseType === "need_login") { - return { - type: "login_needed", - toolName: "login_needed", - message: - (parsedResult.message as string) || - "Please sign in to use chat and agent features", - sessionId: (parsedResult.session_id as string) || "", - agentInfo: parsedResult.agent_info as - | { - graph_id: string; - name: string; - trigger_type: string; - } - | undefined, - timestamp: timestamp || new Date(), - }; - } - if (responseType === "setup_requirements") { - return null; - } - if (responseType === "understanding_updated") { - return { - type: "tool_response", - toolId, - toolName, - result: (parsedResult || result) as ToolResult, - success: true, - timestamp: timestamp || new Date(), - }; - } - } - return { - type: "tool_response", - toolId, - toolName, - result: parsedResult ? (parsedResult as ToolResult) : result, - success: true, - timestamp: timestamp || new Date(), - }; -} - -export function isUserReadiness( - value: unknown, -): value is { missing_credentials?: Record } { - return ( - typeof value === "object" && - value !== null && - (!("missing_credentials" in value) || - typeof (value as any).missing_credentials === "object") - ); -} - -export function isMissingCredentials( - value: unknown, -): value is Record> { - if (typeof value !== "object" || value === null) { - return false; - } - return Object.values(value).every((v) => typeof v === "object" && v !== null); -} - -export function isSetupInfo(value: unknown): value is { - user_readiness?: Record; - agent_name?: string; -} { - return ( - typeof value === "object" && - value !== null && - (!("user_readiness" in value) || - typeof (value as any).user_readiness === "object") && - (!("agent_name" in value) || typeof (value as any).agent_name === "string") - ); -} - -export function extractCredentialsNeeded( - parsedResult: Record, - toolName: string = "run_agent", -): ChatMessageData | null { - try { - const setupInfo = parsedResult?.setup_info as - | Record - | undefined; - const userReadiness = setupInfo?.user_readiness as - | Record - | undefined; - const missingCreds = userReadiness?.missing_credentials as - | Record> - | undefined; - if (missingCreds && Object.keys(missingCreds).length > 0) { - const agentName = (setupInfo?.agent_name as string) || "this block"; - const credentials = Object.values(missingCreds).map((credInfo) => { - // Normalize to array at boundary - prefer 'types' array, fall back to single 'type' - const typesArray = credInfo.types as - | Array<"api_key" | "oauth2" | "user_password" | "host_scoped"> - | undefined; - const singleType = - (credInfo.type as - | "api_key" - | "oauth2" - | "user_password" - | "host_scoped" - | undefined) || "api_key"; - const credentialTypes = - typesArray && typesArray.length > 0 ? typesArray : [singleType]; - - return { - provider: (credInfo.provider as string) || "unknown", - providerName: - (credInfo.provider_name as string) || - (credInfo.provider as string) || - "Unknown Provider", - credentialTypes, - title: - (credInfo.title as string) || - `${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`, - scopes: credInfo.scopes as string[] | undefined, - }; - }); - return { - type: "credentials_needed", - toolName, - credentials, - message: `To run ${agentName}, you need to add ${credentials.length === 1 ? "credentials" : `${credentials.length} credentials`}.`, - agentName, - timestamp: new Date(), - }; - } - return null; - } catch (err) { - console.error("Failed to extract credentials from setup info:", err); - return null; - } -} - -export function extractInputsNeeded( - parsedResult: Record, - toolName: string = "run_agent", -): ChatMessageData | null { - try { - const setupInfo = parsedResult?.setup_info as - | Record - | undefined; - const requirements = setupInfo?.requirements as - | Record - | undefined; - const inputs = requirements?.inputs as - | Array> - | undefined; - const credentials = requirements?.credentials as - | Array> - | undefined; - - if (!inputs || inputs.length === 0) { - return null; - } - - const agentName = (setupInfo?.agent_name as string) || "this agent"; - const agentId = parsedResult?.graph_id as string | undefined; - const graphVersion = parsedResult?.graph_version as number | undefined; - - const properties: Record = {}; - const requiredProps: string[] = []; - inputs.forEach((input) => { - const name = input.name as string; - if (name) { - properties[name] = { - title: input.name as string, - description: (input.description as string) || "", - type: (input.type as string) || "string", - default: input.default, - enum: input.options, - format: input.format, - }; - if ((input.required as boolean) === true) { - requiredProps.push(name); - } - } - }); - - const inputSchema: Record = { - type: "object", - properties, - }; - if (requiredProps.length > 0) { - inputSchema.required = requiredProps; - } - - const credentialsSchema: Record = {}; - if (credentials && credentials.length > 0) { - credentials.forEach((cred) => { - const id = cred.id as string; - if (id) { - const credentialTypes = Array.isArray(cred.types) - ? cred.types - : [(cred.type as string) || "api_key"]; - credentialsSchema[id] = { - type: "object", - properties: {}, - credentials_provider: [cred.provider as string], - credentials_types: credentialTypes, - credentials_scopes: cred.scopes as string[] | undefined, - }; - } - }); - } - - return { - type: "inputs_needed", - toolName, - agentName, - agentId, - graphVersion, - inputSchema, - credentialsSchema: - Object.keys(credentialsSchema).length > 0 - ? credentialsSchema - : undefined, - message: `Please provide the required inputs to run ${agentName}.`, - timestamp: new Date(), - }; - } catch (err) { - console.error("Failed to extract inputs from setup info:", err); - return null; - } -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts deleted file mode 100644 index 248383df42..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts +++ /dev/null @@ -1,517 +0,0 @@ -import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { useEffect, useMemo, useRef, useState } from "react"; -import { INITIAL_STREAM_ID } from "../../chat-constants"; -import { useChatStore } from "../../chat-store"; -import { toast } from "sonner"; -import { useChatStream } from "../../useChatStream"; -import { usePageContext } from "../../usePageContext"; -import type { ChatMessageData } from "../ChatMessage/useChatMessage"; -import { - getToolIdFromMessage, - hasToolId, - isOperationMessage, - type StreamChunk, -} from "../../chat-types"; -import { createStreamEventDispatcher } from "./createStreamEventDispatcher"; -import { - createUserMessage, - filterAuthMessages, - hasSentInitialPrompt, - markInitialPromptSent, - processInitialMessages, -} from "./helpers"; - -const TOOL_RESULT_TYPES = new Set([ - "tool_response", - "agent_carousel", - "execution_started", - "clarification_needed", -]); - -// Helper to generate deduplication key for a message -function getMessageKey(msg: ChatMessageData): string { - if (msg.type === "message") { - // Don't include timestamp - dedupe by role + content only - // This handles the case where local and server timestamps differ - // Server messages are authoritative, so duplicates from local state are filtered - return `msg:${msg.role}:${msg.content}`; - } else if (msg.type === "tool_call") { - return `toolcall:${msg.toolId}`; - } else if (TOOL_RESULT_TYPES.has(msg.type)) { - // Unified key for all tool result types - same toolId with different types - // (tool_response vs agent_carousel) should deduplicate to the same key - const toolId = getToolIdFromMessage(msg); - // If no toolId, fall back to content-based key to avoid empty key collisions - if (!toolId) { - return `toolresult:content:${JSON.stringify(msg).slice(0, 200)}`; - } - return `toolresult:${toolId}`; - } else if (isOperationMessage(msg)) { - const toolId = getToolIdFromMessage(msg) || ""; - return `op:${toolId}:${msg.toolName}`; - } else { - return `${msg.type}:${JSON.stringify(msg).slice(0, 100)}`; - } -} - -interface Args { - sessionId: string | null; - initialMessages: SessionDetailResponse["messages"]; - initialPrompt?: string; - onOperationStarted?: () => void; - /** Active stream info from the server for reconnection */ - activeStream?: { - taskId: string; - lastMessageId: string; - operationId: string; - toolName: string; - }; -} - -export function useChatContainer({ - sessionId, - initialMessages, - initialPrompt, - onOperationStarted, - activeStream, -}: Args) { - const [messages, setMessages] = useState([]); - const [streamingChunks, setStreamingChunks] = useState([]); - const [hasTextChunks, setHasTextChunks] = useState(false); - const [isStreamingInitiated, setIsStreamingInitiated] = useState(false); - const [isRegionBlockedModalOpen, setIsRegionBlockedModalOpen] = - useState(false); - const hasResponseRef = useRef(false); - const streamingChunksRef = useRef([]); - const textFinalizedRef = useRef(false); - const streamEndedRef = useRef(false); - const previousSessionIdRef = useRef(null); - const { - error, - sendMessage: sendStreamMessage, - stopStreaming, - } = useChatStream(); - const activeStreams = useChatStore((s) => s.activeStreams); - const subscribeToStream = useChatStore((s) => s.subscribeToStream); - const setActiveTask = useChatStore((s) => s.setActiveTask); - const getActiveTask = useChatStore((s) => s.getActiveTask); - const reconnectToTask = useChatStore((s) => s.reconnectToTask); - const isStreaming = isStreamingInitiated || hasTextChunks; - // Track whether we've already connected to this activeStream to avoid duplicate connections - const connectedActiveStreamRef = useRef(null); - // Track if component is mounted to prevent state updates after unmount - const isMountedRef = useRef(true); - // Track current dispatcher to prevent multiple dispatchers from adding messages - const currentDispatcherIdRef = useRef(0); - - // Set mounted flag - reset on every mount, cleanup on unmount - useEffect(function trackMountedState() { - isMountedRef.current = true; - return function cleanup() { - isMountedRef.current = false; - }; - }, []); - - // Callback to store active task info for SSE reconnection - function handleActiveTaskStarted(taskInfo: { - taskId: string; - operationId: string; - toolName: string; - toolCallId: string; - }) { - if (!sessionId) return; - setActiveTask(sessionId, { - taskId: taskInfo.taskId, - operationId: taskInfo.operationId, - toolName: taskInfo.toolName, - lastMessageId: INITIAL_STREAM_ID, - }); - } - - // Create dispatcher for stream events - stable reference for current sessionId - // Each dispatcher gets a unique ID to prevent stale dispatchers from updating state - function createDispatcher() { - if (!sessionId) return () => {}; - // Increment dispatcher ID - only the most recent dispatcher should update state - const dispatcherId = ++currentDispatcherIdRef.current; - - const baseDispatcher = createStreamEventDispatcher({ - setHasTextChunks, - setStreamingChunks, - streamingChunksRef, - hasResponseRef, - textFinalizedRef, - streamEndedRef, - setMessages, - setIsRegionBlockedModalOpen, - sessionId, - setIsStreamingInitiated, - onOperationStarted, - onActiveTaskStarted: handleActiveTaskStarted, - }); - - // Wrap dispatcher to check if it's still the current one - return function guardedDispatcher(chunk: StreamChunk) { - // Skip if component unmounted or this is a stale dispatcher - if (!isMountedRef.current) { - return; - } - if (dispatcherId !== currentDispatcherIdRef.current) { - return; - } - baseDispatcher(chunk); - }; - } - - useEffect( - function handleSessionChange() { - const isSessionChange = sessionId !== previousSessionIdRef.current; - - // Handle session change - reset state - if (isSessionChange) { - const prevSession = previousSessionIdRef.current; - if (prevSession) { - stopStreaming(prevSession); - } - previousSessionIdRef.current = sessionId; - connectedActiveStreamRef.current = null; - setMessages([]); - setStreamingChunks([]); - streamingChunksRef.current = []; - setHasTextChunks(false); - setIsStreamingInitiated(false); - hasResponseRef.current = false; - textFinalizedRef.current = false; - streamEndedRef.current = false; - } - - if (!sessionId) return; - - // Priority 1: Check if server told us there's an active stream (most authoritative) - if (activeStream) { - const streamKey = `${sessionId}:${activeStream.taskId}`; - - if (connectedActiveStreamRef.current === streamKey) { - return; - } - - // Skip if there's already an active stream for this session in the store - const existingStream = activeStreams.get(sessionId); - if (existingStream && existingStream.status === "streaming") { - connectedActiveStreamRef.current = streamKey; - return; - } - - connectedActiveStreamRef.current = streamKey; - - // Clear all state before reconnection to prevent duplicates - // Server's initialMessages is authoritative; local state will be rebuilt from SSE replay - setMessages([]); - setStreamingChunks([]); - streamingChunksRef.current = []; - setHasTextChunks(false); - textFinalizedRef.current = false; - streamEndedRef.current = false; - hasResponseRef.current = false; - - setIsStreamingInitiated(true); - setActiveTask(sessionId, { - taskId: activeStream.taskId, - operationId: activeStream.operationId, - toolName: activeStream.toolName, - lastMessageId: activeStream.lastMessageId, - }); - reconnectToTask( - sessionId, - activeStream.taskId, - activeStream.lastMessageId, - createDispatcher(), - ); - // Don't return cleanup here - the guarded dispatcher handles stale events - // and the stream will complete naturally. Cleanup would prematurely stop - // the stream when effect re-runs due to activeStreams changing. - return; - } - - // Only check localStorage/in-memory on session change - if (!isSessionChange) return; - - // Priority 2: Check localStorage for active task - const activeTask = getActiveTask(sessionId); - if (activeTask) { - // Clear all state before reconnection to prevent duplicates - // Server's initialMessages is authoritative; local state will be rebuilt from SSE replay - setMessages([]); - setStreamingChunks([]); - streamingChunksRef.current = []; - setHasTextChunks(false); - textFinalizedRef.current = false; - streamEndedRef.current = false; - hasResponseRef.current = false; - - setIsStreamingInitiated(true); - reconnectToTask( - sessionId, - activeTask.taskId, - activeTask.lastMessageId, - createDispatcher(), - ); - // Don't return cleanup here - the guarded dispatcher handles stale events - return; - } - - // Priority 3: Check for an in-memory active stream (same-tab scenario) - const inMemoryStream = activeStreams.get(sessionId); - if (!inMemoryStream || inMemoryStream.status !== "streaming") { - return; - } - - setIsStreamingInitiated(true); - const skipReplay = initialMessages.length > 0; - return subscribeToStream(sessionId, createDispatcher(), skipReplay); - }, - [ - sessionId, - stopStreaming, - activeStreams, - subscribeToStream, - onOperationStarted, - getActiveTask, - reconnectToTask, - activeStream, - setActiveTask, - ], - ); - - // Collect toolIds from completed tool results in initialMessages - // Used to filter out operation messages when their results arrive - const completedToolIds = useMemo(() => { - const processedInitial = processInitialMessages(initialMessages); - const ids = new Set(); - for (const msg of processedInitial) { - if ( - msg.type === "tool_response" || - msg.type === "agent_carousel" || - msg.type === "execution_started" - ) { - const toolId = hasToolId(msg) ? msg.toolId : undefined; - if (toolId) { - ids.add(toolId); - } - } - } - return ids; - }, [initialMessages]); - - // Clean up local operation messages when their completed results arrive from polling - // This effect runs when completedToolIds changes (i.e., when polling brings new results) - useEffect( - function cleanupCompletedOperations() { - if (completedToolIds.size === 0) return; - - setMessages((prev) => { - const filtered = prev.filter((msg) => { - if (isOperationMessage(msg)) { - const toolId = getToolIdFromMessage(msg); - if (toolId && completedToolIds.has(toolId)) { - return false; // Remove - operation completed - } - } - return true; - }); - // Only update state if something was actually filtered - return filtered.length === prev.length ? prev : filtered; - }); - }, - [completedToolIds], - ); - - // Combine initial messages from backend with local streaming messages, - // Server messages maintain correct order; only append truly new local messages - const allMessages = useMemo(() => { - const processedInitial = processInitialMessages(initialMessages); - - // Build a set of keys from server messages for deduplication - const serverKeys = new Set(); - for (const msg of processedInitial) { - serverKeys.add(getMessageKey(msg)); - } - - // Filter local messages: remove duplicates and completed operation messages - const newLocalMessages = messages.filter((msg) => { - // Remove operation messages for completed tools - if (isOperationMessage(msg)) { - const toolId = getToolIdFromMessage(msg); - if (toolId && completedToolIds.has(toolId)) { - return false; - } - } - // Remove messages that already exist in server data - const key = getMessageKey(msg); - return !serverKeys.has(key); - }); - - // Server messages first (correct order), then new local messages - const combined = [...processedInitial, ...newLocalMessages]; - - // Post-processing: Remove duplicate assistant messages that can occur during - // race conditions (e.g., rapid screen switching during SSE reconnection). - // Two assistant messages are considered duplicates if: - // - They are both text messages with role "assistant" - // - One message's content starts with the other's content (partial vs complete) - // - Or they have very similar content (>80% overlap at the start) - const deduplicated: ChatMessageData[] = []; - for (let i = 0; i < combined.length; i++) { - const current = combined[i]; - - // Check if this is an assistant text message - if (current.type !== "message" || current.role !== "assistant") { - deduplicated.push(current); - continue; - } - - // Look for duplicate assistant messages in the rest of the array - let dominated = false; - for (let j = 0; j < combined.length; j++) { - if (i === j) continue; - const other = combined[j]; - if (other.type !== "message" || other.role !== "assistant") continue; - - const currentContent = current.content || ""; - const otherContent = other.content || ""; - - // Skip empty messages - if (!currentContent.trim() || !otherContent.trim()) continue; - - // Check if current is a prefix of other (current is incomplete version) - if ( - otherContent.length > currentContent.length && - otherContent.startsWith(currentContent.slice(0, 100)) - ) { - // Current is a shorter/incomplete version of other - skip it - dominated = true; - break; - } - - // Check if messages are nearly identical (within a small difference) - // This catches cases where content differs only slightly - const minLen = Math.min(currentContent.length, otherContent.length); - const compareLen = Math.min(minLen, 200); // Compare first 200 chars - if ( - compareLen > 50 && - currentContent.slice(0, compareLen) === - otherContent.slice(0, compareLen) - ) { - // Same prefix - keep the longer one - if (otherContent.length > currentContent.length) { - dominated = true; - break; - } - } - } - - if (!dominated) { - deduplicated.push(current); - } - } - - return deduplicated; - }, [initialMessages, messages, completedToolIds]); - - async function sendMessage( - content: string, - isUserMessage: boolean = true, - context?: { url: string; content: string }, - ) { - if (!sessionId) return; - - setIsRegionBlockedModalOpen(false); - if (isUserMessage) { - const userMessage = createUserMessage(content); - setMessages((prev) => [...filterAuthMessages(prev), userMessage]); - } else { - setMessages((prev) => filterAuthMessages(prev)); - } - setStreamingChunks([]); - streamingChunksRef.current = []; - setHasTextChunks(false); - setIsStreamingInitiated(true); - hasResponseRef.current = false; - textFinalizedRef.current = false; - streamEndedRef.current = false; - - try { - await sendStreamMessage( - sessionId, - content, - createDispatcher(), - isUserMessage, - context, - ); - } catch (err) { - setIsStreamingInitiated(false); - if (err instanceof Error && err.name === "AbortError") return; - - const errorMessage = - err instanceof Error ? err.message : "Failed to send message"; - toast.error("Failed to send message", { - description: errorMessage, - }); - } - } - - function handleStopStreaming() { - stopStreaming(); - setStreamingChunks([]); - streamingChunksRef.current = []; - setHasTextChunks(false); - setIsStreamingInitiated(false); - } - - const { capturePageContext } = usePageContext(); - const sendMessageRef = useRef(sendMessage); - sendMessageRef.current = sendMessage; - - useEffect( - function handleInitialPrompt() { - if (!initialPrompt || !sessionId) return; - if (initialMessages.length > 0) return; - if (hasSentInitialPrompt(sessionId)) return; - - markInitialPromptSent(sessionId); - const context = capturePageContext(); - sendMessageRef.current(initialPrompt, true, context); - }, - [initialPrompt, sessionId, initialMessages.length, capturePageContext], - ); - - async function sendMessageWithContext( - content: string, - isUserMessage: boolean = true, - ) { - const context = capturePageContext(); - await sendMessage(content, isUserMessage, context); - } - - function handleRegionModalOpenChange(open: boolean) { - setIsRegionBlockedModalOpen(open); - } - - function handleRegionModalClose() { - setIsRegionBlockedModalOpen(false); - } - - return { - messages: allMessages, - streamingChunks, - isStreaming, - error, - isRegionBlockedModalOpen, - setIsRegionBlockedModalOpen, - sendMessageWithContext, - handleRegionModalOpenChange, - handleRegionModalClose, - sendMessage, - stopStreaming: handleStopStreaming, - }; -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx deleted file mode 100644 index f0dfadd1f7..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx +++ /dev/null @@ -1,151 +0,0 @@ -import { Text } from "@/components/atoms/Text/Text"; -import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput"; -import type { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api"; -import { cn } from "@/lib/utils"; -import { CheckIcon, RobotIcon, WarningIcon } from "@phosphor-icons/react"; -import { useEffect, useRef } from "react"; -import { useChatCredentialsSetup } from "./useChatCredentialsSetup"; - -export interface CredentialInfo { - provider: string; - providerName: string; - credentialTypes: Array< - "api_key" | "oauth2" | "user_password" | "host_scoped" - >; - title: string; - scopes?: string[]; -} - -interface Props { - credentials: CredentialInfo[]; - agentName?: string; - message: string; - onAllCredentialsComplete: () => void; - onCancel: () => void; - className?: string; -} - -function createSchemaFromCredentialInfo( - credential: CredentialInfo, -): BlockIOCredentialsSubSchema { - return { - type: "object", - properties: {}, - credentials_provider: [credential.provider], - credentials_types: credential.credentialTypes, - credentials_scopes: credential.scopes, - discriminator: undefined, - discriminator_mapping: undefined, - discriminator_values: undefined, - }; -} - -export function ChatCredentialsSetup({ - credentials, - agentName: _agentName, - message, - onAllCredentialsComplete, - onCancel: _onCancel, -}: Props) { - const { selectedCredentials, isAllComplete, handleCredentialSelect } = - useChatCredentialsSetup(credentials); - - // Track if we've already called completion to prevent double calls - const hasCalledCompleteRef = useRef(false); - - // Reset the completion flag when credentials change (new credential setup flow) - useEffect( - function resetCompletionFlag() { - hasCalledCompleteRef.current = false; - }, - [credentials], - ); - - // Auto-call completion when all credentials are configured - useEffect( - function autoCompleteWhenReady() { - if (isAllComplete && !hasCalledCompleteRef.current) { - hasCalledCompleteRef.current = true; - onAllCredentialsComplete(); - } - }, - [isAllComplete, onAllCredentialsComplete], - ); - - return ( -
-
-
-
- -
-
- -
-
-
-
-
- - Credentials Required - - - {message} - -
- -
- {credentials.map((cred, index) => { - const schema = createSchemaFromCredentialInfo(cred); - const isSelected = !!selectedCredentials[cred.provider]; - - return ( -
-
- {isSelected ? ( - - ) : ( - - )} - - {cred.providerName} - -
- - - handleCredentialSelect(cred.provider, credMeta) - } - /> -
- ); - })} -
-
-
-
-
-
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts deleted file mode 100644 index 6b4b26e834..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts +++ /dev/null @@ -1,36 +0,0 @@ -import { useState, useMemo } from "react"; -import type { CredentialInfo } from "./ChatCredentialsSetup"; -import type { CredentialsMetaInput } from "@/lib/autogpt-server-api"; - -export function useChatCredentialsSetup(credentials: CredentialInfo[]) { - const [selectedCredentials, setSelectedCredentials] = useState< - Record - >({}); - - // Check if all credentials are configured - const isAllComplete = useMemo( - function checkAllComplete() { - if (credentials.length === 0) return false; - return credentials.every((cred) => selectedCredentials[cred.provider]); - }, - [credentials, selectedCredentials], - ); - - function handleCredentialSelect( - provider: string, - credential?: CredentialsMetaInput, - ) { - if (credential) { - setSelectedCredentials((prev) => ({ - ...prev, - [provider]: credential, - })); - } - } - - return { - selectedCredentials, - isAllComplete, - handleCredentialSelect, - }; -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatErrorState/ChatErrorState.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatErrorState/ChatErrorState.tsx deleted file mode 100644 index bac13d1b0c..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatErrorState/ChatErrorState.tsx +++ /dev/null @@ -1,30 +0,0 @@ -import React from "react"; -import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { cn } from "@/lib/utils"; - -export interface ChatErrorStateProps { - error: Error; - onRetry?: () => void; - className?: string; -} - -export function ChatErrorState({ - error, - onRetry, - className, -}: ChatErrorStateProps) { - return ( -
- -
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatLoader/ChatLoader.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatLoader/ChatLoader.tsx deleted file mode 100644 index 76cee8dbae..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatLoader/ChatLoader.tsx +++ /dev/null @@ -1,7 +0,0 @@ -export function ChatLoader() { - return ( -
-
-
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatLoadingState/ChatLoadingState.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatLoadingState/ChatLoadingState.tsx deleted file mode 100644 index c0cdb33c50..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatLoadingState/ChatLoadingState.tsx +++ /dev/null @@ -1,19 +0,0 @@ -import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; -import { cn } from "@/lib/utils"; - -export interface ChatLoadingStateProps { - message?: string; - className?: string; -} - -export function ChatLoadingState({ className }: ChatLoadingStateProps) { - return ( -
-
- -
-
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx deleted file mode 100644 index 44dae40eb4..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx +++ /dev/null @@ -1,448 +0,0 @@ -"use client"; - -import { Button } from "@/components/atoms/Button/Button"; -import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; -import { cn } from "@/lib/utils"; -import { - ArrowsClockwiseIcon, - CheckCircleIcon, - CheckIcon, -} from "@phosphor-icons/react"; -import { useRouter } from "next/navigation"; -import { useCallback, useState } from "react"; -import { AgentCarouselMessage } from "../AgentCarouselMessage/AgentCarouselMessage"; -import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; -import { AuthPromptWidget } from "../AuthPromptWidget/AuthPromptWidget"; -import { ChatCredentialsSetup } from "../ChatCredentialsSetup/ChatCredentialsSetup"; -import { ClarificationQuestionsWidget } from "../ClarificationQuestionsWidget/ClarificationQuestionsWidget"; -import { ExecutionStartedMessage } from "../ExecutionStartedMessage/ExecutionStartedMessage"; -import { PendingOperationWidget } from "../PendingOperationWidget/PendingOperationWidget"; -import { MarkdownContent } from "../MarkdownContent/MarkdownContent"; -import { NoResultsMessage } from "../NoResultsMessage/NoResultsMessage"; -import { ToolCallMessage } from "../ToolCallMessage/ToolCallMessage"; -import { ToolResponseMessage } from "../ToolResponseMessage/ToolResponseMessage"; -import { UserChatBubble } from "../UserChatBubble/UserChatBubble"; -import { useChatMessage, type ChatMessageData } from "./useChatMessage"; - -function stripInternalReasoning(content: string): string { - const cleaned = content.replace( - /[\s\S]*?<\/internal_reasoning>/gi, - "", - ); - return cleaned.replace(/\n{3,}/g, "\n\n").trim(); -} - -function getDisplayContent(message: ChatMessageData, isUser: boolean): string { - if (message.type !== "message") return ""; - if (isUser) return message.content; - return stripInternalReasoning(message.content); -} - -export interface ChatMessageProps { - message: ChatMessageData; - messages?: ChatMessageData[]; - index?: number; - isStreaming?: boolean; - className?: string; - onDismissLogin?: () => void; - onDismissCredentials?: () => void; - onSendMessage?: (content: string, isUserMessage?: boolean) => void; - agentOutput?: ChatMessageData; - isFinalMessage?: boolean; -} - -export function ChatMessage({ - message, - messages = [], - index = -1, - isStreaming = false, - className, - onDismissCredentials, - onSendMessage, - agentOutput, - isFinalMessage = true, -}: ChatMessageProps) { - const { user } = useSupabase(); - const router = useRouter(); - const [copied, setCopied] = useState(false); - const { - isUser, - isToolCall, - isToolResponse, - isLoginNeeded, - isCredentialsNeeded, - isClarificationNeeded, - isOperationStarted, - isOperationPending, - isOperationInProgress, - } = useChatMessage(message); - const displayContent = getDisplayContent(message, isUser); - - const handleAllCredentialsComplete = useCallback( - function handleAllCredentialsComplete() { - // Send a user message that explicitly asks to retry the setup - // This ensures the LLM calls get_required_setup_info again and proceeds with execution - if (onSendMessage) { - onSendMessage( - "I've configured the required credentials. Please check if everything is ready and proceed with setting up the agent.", - ); - } - // Optionally dismiss the credentials prompt - if (onDismissCredentials) { - onDismissCredentials(); - } - }, - [onSendMessage, onDismissCredentials], - ); - - function handleCancelCredentials() { - // Dismiss the credentials prompt - if (onDismissCredentials) { - onDismissCredentials(); - } - } - - const handleCopy = useCallback( - async function handleCopy() { - if (message.type !== "message") return; - if (!displayContent) return; - - try { - await navigator.clipboard.writeText(displayContent); - setCopied(true); - setTimeout(() => setCopied(false), 2000); - } catch (error) { - console.error("Failed to copy:", error); - } - }, - [displayContent, message], - ); - - const handleTryAgain = useCallback(() => { - if (message.type !== "message" || !onSendMessage) return; - onSendMessage(message.content, message.role === "user"); - }, [message, onSendMessage]); - - const handleViewExecution = useCallback(() => { - if (message.type === "execution_started" && message.libraryAgentLink) { - router.push(message.libraryAgentLink); - } - }, [message, router]); - - // Render credentials needed messages - if (isCredentialsNeeded && message.type === "credentials_needed") { - return ( - - ); - } - - if (isClarificationNeeded && message.type === "clarification_needed") { - const hasUserReplyAfter = - index >= 0 && - messages - .slice(index + 1) - .some((m) => m.type === "message" && m.role === "user"); - - const handleClarificationAnswers = (answers: Record) => { - if (onSendMessage) { - // Iterate over questions (preserves original order) instead of answers - const contextMessage = message.questions - .map((q) => { - const answer = answers[q.keyword] || ""; - return `> ${q.question}\n\n${answer}`; - }) - .join("\n\n"); - - onSendMessage( - `**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with creating the agent.`, - ); - } - }; - - return ( - - ); - } - - // Render login needed messages - if (isLoginNeeded && message.type === "login_needed") { - // If user is already logged in, show success message instead of auth prompt - if (user) { - return ( -
-
-
-
-
- -
-
-

- Successfully Authenticated -

-

- You're now signed in and ready to continue -

-
-
-
-
-
- ); - } - - // Show auth prompt if not logged in - return ( -
- -
- ); - } - - // Render tool call messages - if (isToolCall && message.type === "tool_call") { - // Check if this tool call is currently streaming - // A tool call is streaming if: - // 1. isStreaming is true - // 2. This is the last tool_call message - // 3. There's no tool_response for this tool call yet - const isToolCallStreaming = - isStreaming && - index >= 0 && - (() => { - // Find the last tool_call index - let lastToolCallIndex = -1; - for (let i = messages.length - 1; i >= 0; i--) { - if (messages[i].type === "tool_call") { - lastToolCallIndex = i; - break; - } - } - // Check if this is the last tool_call and there's no response yet - if (index === lastToolCallIndex) { - // Check if there's a tool_response for this tool call - const hasResponse = messages - .slice(index + 1) - .some( - (msg) => - msg.type === "tool_response" && msg.toolId === message.toolId, - ); - return !hasResponse; - } - return false; - })(); - - return ( -
- -
- ); - } - - // Render no_results messages - use dedicated component, not ToolResponseMessage - if (message.type === "no_results") { - return ( -
- -
- ); - } - - // Render agent_carousel messages - use dedicated component, not ToolResponseMessage - if (message.type === "agent_carousel") { - return ( -
- -
- ); - } - - // Render execution_started messages - use dedicated component, not ToolResponseMessage - if (message.type === "execution_started") { - return ( -
- -
- ); - } - - // Render operation_started messages (long-running background operations) - if (isOperationStarted && message.type === "operation_started") { - return ( - - ); - } - - // Render operation_pending messages (operations in progress when refreshing) - if (isOperationPending && message.type === "operation_pending") { - return ( - - ); - } - - // Render operation_in_progress messages (duplicate request while operation running) - if (isOperationInProgress && message.type === "operation_in_progress") { - return ( - - ); - } - - // Render tool response messages (but skip agent_output if it's being rendered inside assistant message) - if (isToolResponse && message.type === "tool_response") { - return ( -
- -
- ); - } - - // Render regular chat messages - if (message.type === "message") { - return ( -
-
-
- {isUser ? ( - - - - ) : ( - - - {agentOutput && agentOutput.type === "tool_response" && ( -
- -
- )} -
- )} -
- {isUser && onSendMessage && ( - - )} - {!isUser && isFinalMessage && !isStreaming && ( - - )} -
-
-
-
- ); - } - - // Fallback for unknown message types - return null; -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/useChatMessage.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/useChatMessage.ts deleted file mode 100644 index 6809497a93..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/useChatMessage.ts +++ /dev/null @@ -1,157 +0,0 @@ -import type { ToolArguments, ToolResult } from "@/types/chat"; -import { formatDistanceToNow } from "date-fns"; - -export type ChatMessageData = - | { - type: "message"; - role: "user" | "assistant" | "system"; - content: string; - timestamp?: string | Date; - } - | { - type: "tool_call"; - toolId: string; - toolName: string; - arguments?: ToolArguments; - timestamp?: string | Date; - } - | { - type: "tool_response"; - toolId: string; - toolName: string; - result: ToolResult; - success?: boolean; - timestamp?: string | Date; - } - | { - type: "login_needed"; - toolName: string; - message: string; - sessionId: string; - agentInfo?: { - graph_id: string; - name: string; - trigger_type: string; - }; - timestamp?: string | Date; - } - | { - type: "credentials_needed"; - toolName: string; - credentials: Array<{ - provider: string; - providerName: string; - credentialTypes: Array< - "api_key" | "oauth2" | "user_password" | "host_scoped" - >; - title: string; - scopes?: string[]; - }>; - message: string; - agentName?: string; - timestamp?: string | Date; - } - | { - type: "no_results"; - toolName: string; - message: string; - suggestions?: string[]; - sessionId?: string; - timestamp?: string | Date; - } - | { - type: "agent_carousel"; - toolId: string; - toolName: string; - agents: Array<{ - id: string; - name: string; - description: string; - version?: number; - image_url?: string; - }>; - totalCount?: number; - timestamp?: string | Date; - } - | { - type: "execution_started"; - toolId: string; - toolName: string; - executionId: string; - agentName?: string; - message?: string; - libraryAgentLink?: string; - timestamp?: string | Date; - } - | { - type: "inputs_needed"; - toolName: string; - agentName?: string; - agentId?: string; - graphVersion?: number; - inputSchema: Record; - credentialsSchema?: Record; - message: string; - timestamp?: string | Date; - } - | { - type: "clarification_needed"; - toolName: string; - questions: Array<{ - question: string; - keyword: string; - example?: string; - }>; - message: string; - sessionId: string; - timestamp?: string | Date; - } - | { - type: "operation_started"; - toolName: string; - toolId: string; - operationId: string; - taskId?: string; // For SSE reconnection - message: string; - timestamp?: string | Date; - } - | { - type: "operation_pending"; - toolName: string; - toolId: string; - operationId: string; - message: string; - timestamp?: string | Date; - } - | { - type: "operation_in_progress"; - toolName: string; - toolCallId: string; - message: string; - timestamp?: string | Date; - }; - -export function useChatMessage(message: ChatMessageData) { - const formattedTimestamp = message.timestamp - ? formatDistanceToNow(new Date(message.timestamp), { addSuffix: true }) - : "Just now"; - - return { - formattedTimestamp, - isUser: message.type === "message" && message.role === "user", - isAssistant: message.type === "message" && message.role === "assistant", - isSystem: message.type === "message" && message.role === "system", - isToolCall: message.type === "tool_call", - isToolResponse: message.type === "tool_response", - isLoginNeeded: message.type === "login_needed", - isCredentialsNeeded: message.type === "credentials_needed", - isNoResults: message.type === "no_results", - isAgentCarousel: message.type === "agent_carousel", - isExecutionStarted: message.type === "execution_started", - isInputsNeeded: message.type === "inputs_needed", - isClarificationNeeded: message.type === "clarification_needed", - isOperationStarted: message.type === "operation_started", - isOperationPending: message.type === "operation_pending", - isOperationInProgress: message.type === "operation_in_progress", - }; -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ExecutionStartedMessage/ExecutionStartedMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ExecutionStartedMessage/ExecutionStartedMessage.tsx deleted file mode 100644 index 1ac3b440e0..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ExecutionStartedMessage/ExecutionStartedMessage.tsx +++ /dev/null @@ -1,90 +0,0 @@ -import { Button } from "@/components/atoms/Button/Button"; -import { Text } from "@/components/atoms/Text/Text"; -import { cn } from "@/lib/utils"; -import { ArrowSquareOut, CheckCircle, Play } from "@phosphor-icons/react"; - -export interface ExecutionStartedMessageProps { - executionId: string; - agentName?: string; - message?: string; - onViewExecution?: () => void; - className?: string; -} - -export function ExecutionStartedMessage({ - executionId, - agentName, - message = "Agent execution started successfully", - onViewExecution, - className, -}: ExecutionStartedMessageProps) { - return ( -
- {/* Icon & Header */} -
-
- -
-
- - Execution Started - - - {message} - -
-
- - {/* Details */} -
-
- {agentName && ( -
- - Agent: - - - {agentName} - -
- )} -
- - Execution ID: - - - {executionId.slice(0, 16)}... - -
-
-
- - {/* Action Buttons */} - {onViewExecution && ( -
- -
- )} - -
- - - Your agent is now running. You can monitor its progress in the monitor - page. - -
-
- ); -} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/MarkdownContent/MarkdownContent.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/MarkdownContent/MarkdownContent.tsx deleted file mode 100644 index ecadbe938b..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/MarkdownContent/MarkdownContent.tsx +++ /dev/null @@ -1,349 +0,0 @@ -"use client"; - -import { getGetWorkspaceDownloadFileByIdUrl } from "@/app/api/__generated__/endpoints/workspace/workspace"; -import { cn } from "@/lib/utils"; -import { EyeSlash } from "@phosphor-icons/react"; -import React, { useState } from "react"; -import ReactMarkdown from "react-markdown"; -import remarkGfm from "remark-gfm"; - -interface MarkdownContentProps { - content: string; - className?: string; -} - -interface CodeProps extends React.HTMLAttributes { - children?: React.ReactNode; - className?: string; -} - -interface ListProps extends React.HTMLAttributes { - children?: React.ReactNode; - className?: string; -} - -interface ListItemProps extends React.HTMLAttributes { - children?: React.ReactNode; - className?: string; -} - -interface InputProps extends React.InputHTMLAttributes { - type?: string; -} - -/** - * Converts a workspace:// URL to a proxy URL that routes through Next.js to the backend. - * workspace://abc123 -> /api/proxy/api/workspace/files/abc123/download - * - * Uses the generated API URL helper and routes through the Next.js proxy - * which handles authentication and proper backend routing. - */ -/** - * URL transformer for ReactMarkdown. - * Converts workspace:// URLs to proxy URLs that route through Next.js to the backend. - * workspace://abc123 -> /api/proxy/api/workspace/files/abc123/download - * - * This is needed because ReactMarkdown sanitizes URLs and only allows - * http, https, mailto, and tel protocols by default. - */ -function resolveWorkspaceUrl(src: string): string { - if (src.startsWith("workspace://")) { - // Strip MIME type fragment if present (e.g., workspace://abc123#video/mp4 β†’ abc123) - const withoutPrefix = src.replace("workspace://", ""); - const fileId = withoutPrefix.split("#")[0]; - // Use the generated API URL helper to get the correct path - const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId); - // Route through the Next.js proxy (same pattern as customMutator for client-side) - return `/api/proxy${apiPath}`; - } - return src; -} - -/** - * Check if the image URL is a workspace file (AI cannot see these yet). - * After URL transformation, workspace files have URLs like /api/proxy/api/workspace/files/... - */ -function isWorkspaceImage(src: string | undefined): boolean { - return src?.includes("/workspace/files/") ?? false; -} - -/** - * Renders a workspace video with controls and an optional "AI cannot see" badge. - */ -function WorkspaceVideo({ - src, - aiCannotSee, -}: { - src: string; - aiCannotSee: boolean; -}) { - return ( - - - {aiCannotSee && ( - - - AI cannot see this video - - )} - - ); -} - -/** - * Custom image component that shows an indicator when the AI cannot see the image. - * Also handles the "video:" alt-text prefix convention to render