From cfb7dc5aca31b675dd1b634fcd2bc7df7b328b2b Mon Sep 17 00:00:00 2001 From: Swifty Date: Mon, 26 Jan 2026 13:26:15 +0100 Subject: [PATCH 01/21] feat(backend): Add PostHog analytics and OpenRouter tracing to chat system (#11828) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds analytics tracking to the chat copilot system for better observability of user interactions and agent operations. ### Changes 🏗️ **PostHog Analytics Integration:** - Added `posthog` dependency (v7.6.0) to track chat events - Created new tracking module (`backend/api/features/chat/tracking.py`) with events: - `chat_message_sent` - When a user sends a message - `chat_tool_called` - When a tool is called (includes tool name) - `chat_agent_run_success` - When an agent runs successfully - `chat_agent_scheduled` - When an agent is scheduled - `chat_trigger_setup` - When a trigger is set up - Added PostHog configuration to settings: - `POSTHOG_API_KEY` - API key for PostHog - `POSTHOG_HOST` - PostHog host URL (defaults to `https://us.i.posthog.com`) **OpenRouter Tracing:** - Added `user` and `session_id` fields to chat completion API calls for OpenRouter tracing - Added `posthogDistinctId` and `posthogProperties` (with environment) to API calls **Files Changed:** - `backend/api/features/chat/tracking.py` - New PostHog tracking module - `backend/api/features/chat/service.py` - Added user message tracking and OpenRouter tracing - `backend/api/features/chat/tools/__init__.py` - Added tool call tracking - `backend/api/features/chat/tools/run_agent.py` - Added agent run/schedule tracking - `backend/util/settings.py` - Added PostHog configuration fields - `pyproject.toml` - Added posthog dependency ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Verified code passes linting and formatting - [x] Verified PostHog client initializes correctly when API key is provided - [x] Verified tracking is gracefully skipped when PostHog is not configured #### For configuration changes: - [ ] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**) **New environment variables (optional):** - `POSTHOG_API_KEY` - PostHog project API key - `POSTHOG_HOST` - PostHog host URL (optional, defaults to US cloud) --- autogpt_platform/backend/.env.default | 5 + .../backend/api/features/chat/service.py | 51 +++- .../api/features/chat/tools/__init__.py | 17 ++ .../api/features/chat/tools/run_agent.py | 26 ++ .../backend/api/features/chat/tracking.py | 250 ++++++++++++++++++ .../backend/backend/util/settings.py | 6 + autogpt_platform/backend/poetry.lock | 12 +- autogpt_platform/backend/pyproject.toml | 1 + autogpt_platform/frontend/.env.default | 4 + autogpt_platform/frontend/package.json | 4 +- autogpt_platform/frontend/pnpm-lock.yaml | 246 +++++++++++++++++ .../frontend/src/app/providers.tsx | 36 ++- .../providers/posthog/posthog-provider.tsx | 64 +++++ 13 files changed, 701 insertions(+), 21 deletions(-) create mode 100644 autogpt_platform/backend/backend/api/features/chat/tracking.py create mode 100644 autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx diff --git a/autogpt_platform/backend/.env.default b/autogpt_platform/backend/.env.default index 49689d7ad6..b393f13017 100644 --- a/autogpt_platform/backend/.env.default +++ b/autogpt_platform/backend/.env.default @@ -178,5 +178,10 @@ AYRSHARE_JWT_KEY= SMARTLEAD_API_KEY= ZEROBOUNCE_API_KEY= +# PostHog Analytics +# Get API key from https://posthog.com - Project Settings > Project API Key +POSTHOG_API_KEY= +POSTHOG_HOST=https://eu.i.posthog.com + # Other Services AUTOMOD_API_KEY= diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 3daf378f65..43fb7b35a7 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -48,6 +48,7 @@ from .response_model import ( StreamUsage, ) from .tools import execute_tool, tools +from .tracking import track_user_message logger = logging.getLogger(__name__) @@ -103,16 +104,33 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: return compiled, understanding -async def _generate_session_title(message: str) -> str | None: +async def _generate_session_title( + message: str, + user_id: str | None = None, + session_id: str | None = None, +) -> str | None: """Generate a concise title for a chat session based on the first message. Args: message: The first user message in the session + user_id: User ID for OpenRouter tracing (optional) + session_id: Session ID for OpenRouter tracing (optional) Returns: A short title (3-6 words) or None if generation fails """ try: + # Build extra_body for OpenRouter tracing and PostHog analytics + extra_body: dict[str, Any] = {} + if user_id: + extra_body["user"] = user_id[:128] # OpenRouter limit + extra_body["posthogDistinctId"] = user_id + if session_id: + extra_body["session_id"] = session_id[:128] # OpenRouter limit + extra_body["posthogProperties"] = { + "environment": settings.config.app_env.value, + } + response = await client.chat.completions.create( model=config.title_model, messages=[ @@ -127,6 +145,7 @@ async def _generate_session_title(message: str) -> str | None: {"role": "user", "content": message[:500]}, # Limit input length ], max_tokens=20, + extra_body=extra_body, ) title = response.choices[0].message.content if title: @@ -237,6 +256,14 @@ async def stream_chat_completion( f"new message_count={len(session.messages)}" ) + # Track user message in PostHog + if is_user_message: + track_user_message( + user_id=user_id, + session_id=session_id, + message_length=len(message), + ) + logger.info( f"Upserting session: {session.session_id} with user id {session.user_id}, " f"message_count={len(session.messages)}" @@ -256,10 +283,15 @@ async def stream_chat_completion( # stale data issues when the main flow modifies the session captured_session_id = session_id captured_message = message + captured_user_id = user_id async def _update_title(): try: - title = await _generate_session_title(captured_message) + title = await _generate_session_title( + captured_message, + user_id=captured_user_id, + session_id=captured_session_id, + ) if title: # Use dedicated title update function that doesn't # touch messages, avoiding race conditions @@ -698,6 +730,20 @@ async def _stream_chat_chunks( f"{f' (retry {retry_count}/{MAX_RETRIES})' if retry_count > 0 else ''}" ) + # Build extra_body for OpenRouter tracing and PostHog analytics + extra_body: dict[str, Any] = { + "posthogProperties": { + "environment": settings.config.app_env.value, + }, + } + if session.user_id: + extra_body["user"] = session.user_id[:128] # OpenRouter limit + extra_body["posthogDistinctId"] = session.user_id + if session.session_id: + extra_body["session_id"] = session.session_id[ + :128 + ] # OpenRouter limit + # Create the stream with proper types stream = await client.chat.completions.create( model=model, @@ -706,6 +752,7 @@ async def _stream_chat_chunks( tool_choice="auto", stream=True, stream_options={"include_usage": True}, + extra_body=extra_body, ) # Variables to accumulate tool calls diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py index 82ce5cfd6f..284273f3b9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py @@ -1,8 +1,10 @@ +import logging from typing import TYPE_CHECKING, Any from openai.types.chat import ChatCompletionToolParam from backend.api.features.chat.model import ChatSession +from backend.api.features.chat.tracking import track_tool_called from .add_understanding import AddUnderstandingTool from .agent_output import AgentOutputTool @@ -20,6 +22,8 @@ from .search_docs import SearchDocsTool if TYPE_CHECKING: from backend.api.features.chat.response_model import StreamToolOutputAvailable +logger = logging.getLogger(__name__) + # Single source of truth for all tools TOOL_REGISTRY: dict[str, BaseTool] = { "add_understanding": AddUnderstandingTool(), @@ -56,4 +60,17 @@ async def execute_tool( tool = TOOL_REGISTRY.get(tool_name) if not tool: raise ValueError(f"Tool {tool_name} not found") + + # Track tool call in PostHog + logger.info( + f"Tracking tool call: tool={tool_name}, user={user_id}, " + f"session={session.session_id}, call_id={tool_call_id}" + ) + track_tool_called( + user_id=user_id, + session_id=session.session_id, + tool_name=tool_name, + tool_call_id=tool_call_id, + ) + return await tool.execute(user_id, session, tool_call_id, **parameters) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py index b212c11e8a..88d432a797 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py @@ -8,6 +8,10 @@ from pydantic import BaseModel, Field, field_validator from backend.api.features.chat.config import ChatConfig from backend.api.features.chat.model import ChatSession +from backend.api.features.chat.tracking import ( + track_agent_run_success, + track_agent_scheduled, +) from backend.api.features.library import db as library_db from backend.data.graph import GraphModel from backend.data.model import CredentialsMetaInput @@ -453,6 +457,16 @@ class RunAgentTool(BaseTool): session.successful_agent_runs.get(library_agent.graph_id, 0) + 1 ) + # Track in PostHog + track_agent_run_success( + user_id=user_id, + session_id=session_id, + graph_id=library_agent.graph_id, + graph_name=library_agent.name, + execution_id=execution.id, + library_agent_id=library_agent.id, + ) + library_agent_link = f"/library/agents/{library_agent.id}" return ExecutionStartedResponse( message=( @@ -534,6 +548,18 @@ class RunAgentTool(BaseTool): session.successful_agent_schedules.get(library_agent.graph_id, 0) + 1 ) + # Track in PostHog + track_agent_scheduled( + user_id=user_id, + session_id=session_id, + graph_id=library_agent.graph_id, + graph_name=library_agent.name, + schedule_id=result.id, + schedule_name=schedule_name, + cron=cron, + library_agent_id=library_agent.id, + ) + library_agent_link = f"/library/agents/{library_agent.id}" return ExecutionStartedResponse( message=( diff --git a/autogpt_platform/backend/backend/api/features/chat/tracking.py b/autogpt_platform/backend/backend/api/features/chat/tracking.py new file mode 100644 index 0000000000..b2c0fd032f --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tracking.py @@ -0,0 +1,250 @@ +"""PostHog analytics tracking for the chat system.""" + +import atexit +import logging +from typing import Any + +from posthog import Posthog + +from backend.util.settings import Settings + +logger = logging.getLogger(__name__) +settings = Settings() + +# PostHog client instance (lazily initialized) +_posthog_client: Posthog | None = None + + +def _shutdown_posthog() -> None: + """Flush and shutdown PostHog client on process exit.""" + if _posthog_client is not None: + _posthog_client.flush() + _posthog_client.shutdown() + + +atexit.register(_shutdown_posthog) + + +def _get_posthog_client() -> Posthog | None: + """Get or create the PostHog client instance.""" + global _posthog_client + if _posthog_client is not None: + return _posthog_client + + if not settings.secrets.posthog_api_key: + logger.debug("PostHog API key not configured, analytics disabled") + return None + + _posthog_client = Posthog( + settings.secrets.posthog_api_key, + host=settings.secrets.posthog_host, + ) + logger.info( + f"PostHog client initialized with host: {settings.secrets.posthog_host}" + ) + return _posthog_client + + +def _get_base_properties() -> dict[str, Any]: + """Get base properties included in all events.""" + return { + "environment": settings.config.app_env.value, + "source": "chat_copilot", + } + + +def track_user_message( + user_id: str | None, + session_id: str, + message_length: int, +) -> None: + """Track when a user sends a message in chat. + + Args: + user_id: The user's ID (or None for anonymous) + session_id: The chat session ID + message_length: Length of the user's message + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "message_length": message_length, + } + client.capture( + distinct_id=user_id or f"anonymous_{session_id}", + event="copilot_message_sent", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track user message: {e}") + + +def track_tool_called( + user_id: str | None, + session_id: str, + tool_name: str, + tool_call_id: str, +) -> None: + """Track when a tool is called in chat. + + Args: + user_id: The user's ID (or None for anonymous) + session_id: The chat session ID + tool_name: Name of the tool being called + tool_call_id: Unique ID of the tool call + """ + client = _get_posthog_client() + if not client: + logger.info("PostHog client not available for tool tracking") + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "tool_name": tool_name, + "tool_call_id": tool_call_id, + } + distinct_id = user_id or f"anonymous_{session_id}" + logger.info( + f"Sending copilot_tool_called event to PostHog: distinct_id={distinct_id}, " + f"tool_name={tool_name}" + ) + client.capture( + distinct_id=distinct_id, + event="copilot_tool_called", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track tool call: {e}") + + +def track_agent_run_success( + user_id: str, + session_id: str, + graph_id: str, + graph_name: str, + execution_id: str, + library_agent_id: str, +) -> None: + """Track when an agent is successfully run. + + Args: + user_id: The user's ID + session_id: The chat session ID + graph_id: ID of the agent graph + graph_name: Name of the agent + execution_id: ID of the execution + library_agent_id: ID of the library agent + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "graph_id": graph_id, + "graph_name": graph_name, + "execution_id": execution_id, + "library_agent_id": library_agent_id, + } + client.capture( + distinct_id=user_id, + event="copilot_agent_run_success", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track agent run: {e}") + + +def track_agent_scheduled( + user_id: str, + session_id: str, + graph_id: str, + graph_name: str, + schedule_id: str, + schedule_name: str, + cron: str, + library_agent_id: str, +) -> None: + """Track when an agent is successfully scheduled. + + Args: + user_id: The user's ID + session_id: The chat session ID + graph_id: ID of the agent graph + graph_name: Name of the agent + schedule_id: ID of the schedule + schedule_name: Name of the schedule + cron: Cron expression for the schedule + library_agent_id: ID of the library agent + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "graph_id": graph_id, + "graph_name": graph_name, + "schedule_id": schedule_id, + "schedule_name": schedule_name, + "cron": cron, + "library_agent_id": library_agent_id, + } + client.capture( + distinct_id=user_id, + event="copilot_agent_scheduled", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track agent schedule: {e}") + + +def track_trigger_setup( + user_id: str, + session_id: str, + graph_id: str, + graph_name: str, + trigger_type: str, + library_agent_id: str, +) -> None: + """Track when a trigger is set up for an agent. + + Args: + user_id: The user's ID + session_id: The chat session ID + graph_id: ID of the agent graph + graph_name: Name of the agent + trigger_type: Type of trigger (e.g., 'webhook') + library_agent_id: ID of the library agent + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "graph_id": graph_id, + "graph_name": graph_name, + "trigger_type": trigger_type, + "library_agent_id": library_agent_id, + } + client.capture( + distinct_id=user_id, + event="copilot_trigger_setup", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track trigger setup: {e}") diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 2972fc07c2..8d34292803 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -679,6 +679,12 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings): default="https://cloud.langfuse.com", description="Langfuse host URL" ) + # PostHog analytics + posthog_api_key: str = Field(default="", description="PostHog API key") + posthog_host: str = Field( + default="https://eu.i.posthog.com", description="PostHog host URL" + ) + # Add more secret fields as needed model_config = SettingsConfigDict( env_file=".env", diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock index 2aa55ce5b6..91ac358ade 100644 --- a/autogpt_platform/backend/poetry.lock +++ b/autogpt_platform/backend/poetry.lock @@ -4204,14 +4204,14 @@ strenum = {version = ">=0.4.9,<0.5.0", markers = "python_version < \"3.11\""} [[package]] name = "posthog" -version = "6.1.1" +version = "7.6.0" description = "Integrate PostHog into any python application." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "posthog-6.1.1-py3-none-any.whl", hash = "sha256:329fd3d06b4d54cec925f47235bd8e327c91403c2f9ec38f1deb849535934dba"}, - {file = "posthog-6.1.1.tar.gz", hash = "sha256:b453f54c4a2589da859fd575dd3bf86fcb40580727ec399535f268b1b9f318b8"}, + {file = "posthog-7.6.0-py3-none-any.whl", hash = "sha256:c4dd78cf77c4fecceb965f86066e5ac37886ef867d68ffe75a1db5d681d7d9ad"}, + {file = "posthog-7.6.0.tar.gz", hash = "sha256:941dfd278ee427c9b14640f09b35b5bb52a71bdf028d7dbb7307e1838fd3002e"}, ] [package.dependencies] @@ -4225,7 +4225,7 @@ typing-extensions = ">=4.2.0" [package.extras] dev = ["django-stubs", "lxml", "mypy", "mypy-baseline", "packaging", "pre-commit", "pydantic", "ruff", "setuptools", "tomli", "tomli_w", "twine", "types-mock", "types-python-dateutil", "types-requests", "types-setuptools", "types-six", "wheel"] langchain = ["langchain (>=0.2.0)"] -test = ["anthropic", "coverage", "django", "freezegun (==1.5.1)", "google-genai", "langchain-anthropic (>=0.3.15)", "langchain-community (>=0.3.25)", "langchain-core (>=0.3.65)", "langchain-openai (>=0.3.22)", "langgraph (>=0.4.8)", "mock (>=2.0.0)", "openai", "parameterized (>=0.8.1)", "pydantic", "pytest", "pytest-asyncio", "pytest-timeout"] +test = ["anthropic (>=0.72)", "coverage", "django", "freezegun (==1.5.1)", "google-genai", "langchain-anthropic (>=1.0)", "langchain-community (>=0.4)", "langchain-core (>=1.0)", "langchain-openai (>=1.0)", "langgraph (>=1.0)", "mock (>=2.0.0)", "openai (>=2.0)", "parameterized (>=0.8.1)", "pydantic", "pytest", "pytest-asyncio", "pytest-timeout"] [[package]] name = "postmarker" @@ -7512,4 +7512,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.14" -content-hash = "18b92e09596298c82432e4d0a85cb6d80a40b4229bee0a0c15f0529fd6cb21a4" +content-hash = "ee5742dc1a9df50dfc06d4b26a1682cbb2b25cab6b79ce5625ec272f93e4f4bf" diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml index 1f489d2bc4..fe263e47c0 100644 --- a/autogpt_platform/backend/pyproject.toml +++ b/autogpt_platform/backend/pyproject.toml @@ -85,6 +85,7 @@ exa-py = "^1.14.20" croniter = "^6.0.0" stagehand = "^0.5.1" gravitas-md2gdocs = "^0.1.0" +posthog = "^7.6.0" [tool.poetry.group.dev.dependencies] aiohappyeyeballs = "^2.6.1" diff --git a/autogpt_platform/frontend/.env.default b/autogpt_platform/frontend/.env.default index 197a37e8bb..af250fb8bf 100644 --- a/autogpt_platform/frontend/.env.default +++ b/autogpt_platform/frontend/.env.default @@ -30,3 +30,7 @@ NEXT_PUBLIC_TURNSTILE=disabled # PR previews NEXT_PUBLIC_PREVIEW_STEALING_DEV= + +# PostHog Analytics +NEXT_PUBLIC_POSTHOG_KEY= +NEXT_PUBLIC_POSTHOG_HOST=https://eu.i.posthog.com diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index bc1e2d7443..f22a182d20 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -34,6 +34,7 @@ "@hookform/resolvers": "5.2.2", "@next/third-parties": "15.4.6", "@phosphor-icons/react": "2.1.10", + "@posthog/react": "1.7.0", "@radix-ui/react-accordion": "1.2.12", "@radix-ui/react-alert-dialog": "1.1.15", "@radix-ui/react-avatar": "1.1.10", @@ -91,6 +92,7 @@ "next-themes": "0.4.6", "nuqs": "2.7.2", "party-js": "2.2.0", + "posthog-js": "1.334.1", "react": "18.3.1", "react-currency-input-field": "4.0.3", "react-day-picker": "9.11.1", @@ -120,7 +122,6 @@ }, "devDependencies": { "@chromatic-com/storybook": "4.1.2", - "happy-dom": "20.3.4", "@opentelemetry/instrumentation": "0.209.0", "@playwright/test": "1.56.1", "@storybook/addon-a11y": "9.1.5", @@ -148,6 +149,7 @@ "eslint": "8.57.1", "eslint-config-next": "15.5.7", "eslint-plugin-storybook": "9.1.5", + "happy-dom": "20.3.4", "import-in-the-middle": "2.0.2", "msw": "2.11.6", "msw-storybook-addon": "2.0.6", diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml index 8e83289f03..db891ccf3f 100644 --- a/autogpt_platform/frontend/pnpm-lock.yaml +++ b/autogpt_platform/frontend/pnpm-lock.yaml @@ -23,6 +23,9 @@ importers: '@phosphor-icons/react': specifier: 2.1.10 version: 2.1.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@posthog/react': + specifier: 1.7.0 + version: 1.7.0(@types/react@18.3.17)(posthog-js@1.334.1)(react@18.3.1) '@radix-ui/react-accordion': specifier: 1.2.12 version: 1.2.12(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -194,6 +197,9 @@ importers: party-js: specifier: 2.2.0 version: 2.2.0 + posthog-js: + specifier: 1.334.1 + version: 1.334.1 react: specifier: 18.3.1 version: 18.3.1 @@ -1794,6 +1800,10 @@ packages: '@open-draft/until@2.1.0': resolution: {integrity: sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==} + '@opentelemetry/api-logs@0.208.0': + resolution: {integrity: sha512-CjruKY9V6NMssL/T1kAFgzosF1v9o6oeN+aX5JB/C/xPNtmgIJqcXHG7fA82Ou1zCpWGl4lROQUKwUNE1pMCyg==} + engines: {node: '>=8.0.0'} + '@opentelemetry/api-logs@0.209.0': resolution: {integrity: sha512-xomnUNi7TiAGtOgs0tb54LyrjRZLu9shJGGwkcN7NgtiPYOpNnKLkRJtzZvTjD/w6knSZH9sFZcUSUovYOPg6A==} engines: {node: '>=8.0.0'} @@ -1814,6 +1824,12 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' + '@opentelemetry/exporter-logs-otlp-http@0.208.0': + resolution: {integrity: sha512-jOv40Bs9jy9bZVLo/i8FwUiuCvbjWDI+ZW13wimJm4LjnlwJxGgB+N/VWOZUTpM+ah/awXeQqKdNlpLf2EjvYg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + '@opentelemetry/instrumentation-amqplib@0.55.0': resolution: {integrity: sha512-5ULoU8p+tWcQw5PDYZn8rySptGSLZHNX/7srqo2TioPnAAcvTy6sQFQXsNPrAnyRRtYGMetXVyZUy5OaX1+IfA==} engines: {node: ^18.19.0 || >=20.6.0} @@ -1952,6 +1968,18 @@ packages: peerDependencies: '@opentelemetry/api': ^1.3.0 + '@opentelemetry/otlp-exporter-base@0.208.0': + resolution: {integrity: sha512-gMd39gIfVb2OgxldxUtOwGJYSH8P1kVFFlJLuut32L6KgUC4gl1dMhn+YC2mGn0bDOiQYSk/uHOdSjuKp58vvA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-transformer@0.208.0': + resolution: {integrity: sha512-DCFPY8C6lAQHUNkzcNT9R+qYExvsk6C5Bto2pbNxgicpcSWbe2WHShLxkOxIdNcBiYPdVHv/e7vH7K6TI+C+fQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + '@opentelemetry/redis-common@0.38.2': resolution: {integrity: sha512-1BCcU93iwSRZvDAgwUxC/DV4T/406SkMfxGqu5ojc3AvNI+I9GhV7v0J1HljsczuuhcnFLYqD5VmwVXfCGHzxA==} engines: {node: ^18.19.0 || >=20.6.0} @@ -1962,6 +1990,18 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.3.0 <1.10.0' + '@opentelemetry/sdk-logs@0.208.0': + resolution: {integrity: sha512-QlAyL1jRpOeaqx7/leG1vJMp84g0xKP6gJmfELBpnI4O/9xPX+Hu5m1POk9Kl+veNkyth5t19hRlN6tNY1sjbA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.4.0 <1.10.0' + + '@opentelemetry/sdk-metrics@2.2.0': + resolution: {integrity: sha512-G5KYP6+VJMZzpGipQw7Giif48h6SGQ2PFKEYCybeXJsOCB4fp8azqMAAzE5lnnHK3ZVwYQrgmFbsUJO/zOnwGw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.9.0 <1.10.0' + '@opentelemetry/sdk-trace-base@2.2.0': resolution: {integrity: sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw==} engines: {node: ^18.19.0 || >=20.6.0} @@ -2050,11 +2090,57 @@ packages: webpack-plugin-serve: optional: true + '@posthog/core@1.13.0': + resolution: {integrity: sha512-knjncrk7qRmssFRbGzBl1Tunt21GRpe0Wv+uVelyL0Rh7PdQUsgguulzXFTps8hA6wPwTU4kq85qnbAJ3eH6Wg==} + + '@posthog/react@1.7.0': + resolution: {integrity: sha512-pM7GL7z/rKjiIwosbRiQA3buhLI6vUo+wg+T/ZrVZC7O5bVU07TfgNZTcuOj8E9dx7vDbfNrc1kjDN7PKMM8ug==} + peerDependencies: + '@types/react': '>=16.8.0' + posthog-js: '>=1.257.2' + react: '>=16.8.0' + peerDependenciesMeta: + '@types/react': + optional: true + + '@posthog/types@1.334.1': + resolution: {integrity: sha512-ypFnwTO7qbV7icylLbujbamPdQXbJq0a61GUUBnJAeTbBw/qYPIss5IRYICcbCj0uunQrwD7/CGxVb5TOYKWgA==} + '@prisma/instrumentation@6.19.0': resolution: {integrity: sha512-QcuYy25pkXM8BJ37wVFBO7Zh34nyRV1GOb2n3lPkkbRYfl4hWl3PTcImP41P0KrzVXfa/45p6eVCos27x3exIg==} peerDependencies: '@opentelemetry/api': ^1.8 + '@protobufjs/aspromise@1.1.2': + resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} + + '@protobufjs/base64@1.1.2': + resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} + + '@protobufjs/codegen@2.0.4': + resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} + + '@protobufjs/eventemitter@1.1.0': + resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} + + '@protobufjs/fetch@1.1.0': + resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} + + '@protobufjs/float@1.0.2': + resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} + + '@protobufjs/inquire@1.1.0': + resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} + + '@protobufjs/path@1.1.2': + resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} + + '@protobufjs/pool@1.1.0': + resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} + + '@protobufjs/utf8@1.1.0': + resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} + '@radix-ui/number@1.1.1': resolution: {integrity: sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==} @@ -3401,6 +3487,9 @@ packages: '@types/tedious@4.0.14': resolution: {integrity: sha512-KHPsfX/FoVbUGbyYvk1q9MMQHLPeRZhRJZdO45Q4YjvFkv4hMNghCWTvy7rdKessBsmtz4euWCWAB6/tVpI1Iw==} + '@types/trusted-types@2.0.7': + resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + '@types/unist@2.0.11': resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==} @@ -4278,6 +4367,9 @@ packages: core-js-pure@3.47.0: resolution: {integrity: sha512-BcxeDbzUrRnXGYIVAGFtcGQVNpFcUhVjr6W7F8XktvQW2iJP9e66GP6xdKotCRFlrxBvNIBrhwKteRXqMV86Nw==} + core-js@3.48.0: + resolution: {integrity: sha512-zpEHTy1fjTMZCKLHUZoVeylt9XrzaIN2rbPXEt0k+q7JE5CkCZdo6bNq55bn24a69CH7ErAVLKijxJja4fw+UQ==} + core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} @@ -4569,6 +4661,9 @@ packages: resolution: {integrity: sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==} engines: {node: '>= 4'} + dompurify@3.3.1: + resolution: {integrity: sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==} + domutils@2.8.0: resolution: {integrity: sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==} @@ -4939,6 +5034,9 @@ packages: picomatch: optional: true + fflate@0.4.8: + resolution: {integrity: sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==} + file-entry-cache@6.0.1: resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} engines: {node: ^10.12.0 || >=12.0.0} @@ -5745,6 +5843,9 @@ packages: resolution: {integrity: sha512-HgMmCqIJSAKqo68l0rS2AanEWfkxaZ5wNiEFb5ggm08lDs9Xl2KxBlX3PTcaD2chBM1gXAYf491/M2Rv8Jwayg==} engines: {node: '>= 0.6.0'} + long@5.3.2: + resolution: {integrity: sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==} + longest-streak@3.1.0: resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==} @@ -6534,6 +6635,12 @@ packages: resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==} engines: {node: '>=0.10.0'} + posthog-js@1.334.1: + resolution: {integrity: sha512-5cDzLICr2afnwX/cR9fwoLC0vN0Nb5gP5HiCigzHkgHdO+E3WsYefla3EFMQz7U4r01CBPZ+nZ9/srkzeACxtQ==} + + preact@10.28.2: + resolution: {integrity: sha512-lbteaWGzGHdlIuiJ0l2Jq454m6kcpI1zNje6d8MlGAFlYvP2GO4ibnat7P74Esfz4sPTdM6UxtTwh/d3pwM9JA==} + prelude-ls@1.2.1: resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} engines: {node: '>= 0.8.0'} @@ -6622,6 +6729,10 @@ packages: property-information@7.1.0: resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==} + protobufjs@7.5.4: + resolution: {integrity: sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==} + engines: {node: '>=12.0.0'} + proxy-from-env@1.1.0: resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} @@ -6643,6 +6754,9 @@ packages: resolution: {integrity: sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==} engines: {node: '>=0.6'} + query-selector-shadow-dom@1.0.1: + resolution: {integrity: sha512-lT5yCqEBgfoMYpf3F2xQRK7zEr1rhIIZuceDK6+xRkJQ4NMbHTwXqk4NkwDwQMNqXgG9r9fyHnzwNVs6zV5KRw==} + querystring-es3@0.2.1: resolution: {integrity: sha512-773xhDQnZBMFobEiztv8LIl70ch5MSF/jUQVlhwFyBILqq96anmoctVIYz+ZRp0qbCKATTn6ev02M3r7Ga5vqA==} engines: {node: '>=0.4.x'} @@ -7821,6 +7935,9 @@ packages: web-namespaces@2.0.1: resolution: {integrity: sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==} + web-vitals@5.1.0: + resolution: {integrity: sha512-ArI3kx5jI0atlTtmV0fWU3fjpLmq/nD3Zr1iFFlJLaqa5wLBkUSzINwBPySCX/8jRyjlmy1Volw1kz1g9XE4Jg==} + webidl-conversions@3.0.1: resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} @@ -9420,6 +9537,10 @@ snapshots: '@open-draft/until@2.1.0': {} + '@opentelemetry/api-logs@0.208.0': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs@0.209.0': dependencies: '@opentelemetry/api': 1.9.0 @@ -9435,6 +9556,15 @@ snapshots: '@opentelemetry/api': 1.9.0 '@opentelemetry/semantic-conventions': 1.38.0 + '@opentelemetry/exporter-logs-otlp-http@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-amqplib@0.55.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -9629,6 +9759,23 @@ snapshots: transitivePeerDependencies: - supports-color + '@opentelemetry/otlp-exporter-base@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.208.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-transformer@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + protobufjs: 7.5.4 + '@opentelemetry/redis-common@0.38.2': {} '@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0)': @@ -9637,6 +9784,19 @@ snapshots: '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.38.0 + '@opentelemetry/sdk-logs@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -9801,6 +9961,19 @@ snapshots: type-fest: 4.41.0 webpack-hot-middleware: 2.26.1 + '@posthog/core@1.13.0': + dependencies: + cross-spawn: 7.0.6 + + '@posthog/react@1.7.0(@types/react@18.3.17)(posthog-js@1.334.1)(react@18.3.1)': + dependencies: + posthog-js: 1.334.1 + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.17 + + '@posthog/types@1.334.1': {} + '@prisma/instrumentation@6.19.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -9808,6 +9981,29 @@ snapshots: transitivePeerDependencies: - supports-color + '@protobufjs/aspromise@1.1.2': {} + + '@protobufjs/base64@1.1.2': {} + + '@protobufjs/codegen@2.0.4': {} + + '@protobufjs/eventemitter@1.1.0': {} + + '@protobufjs/fetch@1.1.0': + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/inquire': 1.1.0 + + '@protobufjs/float@1.0.2': {} + + '@protobufjs/inquire@1.1.0': {} + + '@protobufjs/path@1.1.2': {} + + '@protobufjs/pool@1.1.0': {} + + '@protobufjs/utf8@1.1.0': {} + '@radix-ui/number@1.1.1': {} '@radix-ui/primitive@1.1.3': {} @@ -11426,6 +11622,9 @@ snapshots: dependencies: '@types/node': 24.10.0 + '@types/trusted-types@2.0.7': + optional: true + '@types/unist@2.0.11': {} '@types/unist@3.0.3': {} @@ -12327,6 +12526,8 @@ snapshots: core-js-pure@3.47.0: {} + core-js@3.48.0: {} + core-util-is@1.0.3: {} cosmiconfig@7.1.0: @@ -12636,6 +12837,10 @@ snapshots: dependencies: domelementtype: 2.3.0 + dompurify@3.3.1: + optionalDependencies: + '@types/trusted-types': 2.0.7 + domutils@2.8.0: dependencies: dom-serializer: 1.4.1 @@ -13205,6 +13410,8 @@ snapshots: optionalDependencies: picomatch: 4.0.3 + fflate@0.4.8: {} + file-entry-cache@6.0.1: dependencies: flat-cache: 3.2.0 @@ -14092,6 +14299,8 @@ snapshots: loglevel@1.9.2: {} + long@5.3.2: {} + longest-streak@3.1.0: {} loose-envify@1.4.0: @@ -15154,6 +15363,24 @@ snapshots: dependencies: xtend: 4.0.2 + posthog-js@1.334.1: + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/exporter-logs-otlp-http': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@posthog/core': 1.13.0 + '@posthog/types': 1.334.1 + core-js: 3.48.0 + dompurify: 3.3.1 + fflate: 0.4.8 + preact: 10.28.2 + query-selector-shadow-dom: 1.0.1 + web-vitals: 5.1.0 + + preact@10.28.2: {} + prelude-ls@1.2.1: {} prettier-plugin-tailwindcss@0.7.1(prettier@3.6.2): @@ -15187,6 +15414,21 @@ snapshots: property-information@7.1.0: {} + protobufjs@7.5.4: + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 24.10.0 + long: 5.3.2 + proxy-from-env@1.1.0: {} public-encrypt@4.0.3: @@ -15208,6 +15450,8 @@ snapshots: dependencies: side-channel: 1.1.0 + query-selector-shadow-dom@1.0.1: {} + querystring-es3@0.2.1: {} queue-microtask@1.2.3: {} @@ -16619,6 +16863,8 @@ snapshots: web-namespaces@2.0.1: {} + web-vitals@5.1.0: {} + webidl-conversions@3.0.1: {} webidl-conversions@8.0.1: diff --git a/autogpt_platform/frontend/src/app/providers.tsx b/autogpt_platform/frontend/src/app/providers.tsx index 8ea199abc8..267814e7c2 100644 --- a/autogpt_platform/frontend/src/app/providers.tsx +++ b/autogpt_platform/frontend/src/app/providers.tsx @@ -6,28 +6,40 @@ import { BackendAPIProvider } from "@/lib/autogpt-server-api/context"; import { getQueryClient } from "@/lib/react-query/queryClient"; import CredentialsProvider from "@/providers/agent-credentials/credentials-provider"; import OnboardingProvider from "@/providers/onboarding/onboarding-provider"; +import { + PostHogPageViewTracker, + PostHogProvider, + PostHogUserTracker, +} from "@/providers/posthog/posthog-provider"; import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider"; import { QueryClientProvider } from "@tanstack/react-query"; import { ThemeProvider, ThemeProviderProps } from "next-themes"; import { NuqsAdapter } from "nuqs/adapters/next/app"; +import { Suspense } from "react"; export function Providers({ children, ...props }: ThemeProviderProps) { const queryClient = getQueryClient(); return ( - - - - - - - {children} - - - - - + + + + + + + + + + + + {children} + + + + + + ); diff --git a/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx b/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx new file mode 100644 index 0000000000..58ee2394ef --- /dev/null +++ b/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx @@ -0,0 +1,64 @@ +"use client"; + +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { PostHogProvider as PHProvider } from "@posthog/react"; +import { usePathname, useSearchParams } from "next/navigation"; +import posthog from "posthog-js"; +import { ReactNode, useEffect, useRef } from "react"; + +export function PostHogProvider({ children }: { children: ReactNode }) { + useEffect(() => { + if (process.env.NEXT_PUBLIC_POSTHOG_KEY) { + posthog.init(process.env.NEXT_PUBLIC_POSTHOG_KEY, { + api_host: process.env.NEXT_PUBLIC_POSTHOG_HOST, + defaults: "2025-11-30", + capture_pageview: false, + capture_pageleave: true, + autocapture: true, + }); + } + }, []); + + return {children}; +} + +export function PostHogUserTracker() { + const { user, isUserLoading } = useSupabase(); + const previousUserIdRef = useRef(null); + + useEffect(() => { + if (isUserLoading) return; + + if (user) { + if (previousUserIdRef.current !== user.id) { + posthog.identify(user.id, { + email: user.email, + ...(user.user_metadata?.name && { name: user.user_metadata.name }), + }); + previousUserIdRef.current = user.id; + } + } else if (previousUserIdRef.current !== null) { + posthog.reset(); + previousUserIdRef.current = null; + } + }, [user, isUserLoading]); + + return null; +} + +export function PostHogPageViewTracker() { + const pathname = usePathname(); + const searchParams = useSearchParams(); + + useEffect(() => { + if (pathname) { + let url = window.origin + pathname; + if (searchParams && searchParams.toString()) { + url = url + `?${searchParams.toString()}`; + } + posthog.capture("$pageview", { $current_url: url }); + } + }, [pathname, searchParams]); + + return null; +} From f0c25036082a5e53650ae7230bcfcf9309bbb5d5 Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Mon, 26 Jan 2026 18:03:22 +0530 Subject: [PATCH 02/21] feat(frontend): support multiple node execution results and accumulated data display (#11834) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ - Refactored node execution results storage to maintain a history of executions instead of just the latest result - Added support for viewing accumulated output data across multiple executions - Implemented a cleaner UI for viewing historical execution results with proper grouping - Added functionality to clear execution results when starting a new run - Created helper functions to normalize and process execution data consistently - Updated the NodeDataViewer component to display both latest and historical execution data - Added ability to view input data alongside output data in the execution history ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Create and run a flow with multiple blocks that produce output - [x] Verify that execution results are properly accumulated and displayed - [x] Run the same flow multiple times and confirm historical data is preserved - [x] Test the "View more data" functionality to ensure it displays all execution history - [x] Verify that execution results are properly cleared when starting a new run --- .../components/AgentOutputs/AgentOutputs.tsx | 8 +- .../RunInputDialog/useRunInputDialog.ts | 3 + .../nodes/CustomNode/CustomNode.tsx | 8 +- .../components/NodeOutput/NodeOutput.tsx | 136 +++++------ .../NodeDataViewer/NodeDataViewer.tsx | 199 +++++++++++++--- .../NodeDataViewer/useNodeDataViewer.ts | 160 +++++++------ .../NodeOutput/components/ViewMoreData.tsx | 189 +++++++++------ .../components/NodeOutput/helpers.ts | 83 +++++++ .../components/NodeOutput/useNodeOutput.tsx | 22 +- .../SubAgentUpdate/useSubAgentUpdateState.ts | 6 +- .../FlowEditor/nodes/CustomNode/helpers.ts | 2 +- .../app/(platform)/build/stores/helpers.ts | 16 ++ .../app/(platform)/build/stores/nodeStore.ts | 217 +++++++++++++++--- .../src/app/(platform)/build/stores/types.ts | 14 ++ 14 files changed, 782 insertions(+), 281 deletions(-) create mode 100644 autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/helpers.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/build/stores/helpers.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/build/stores/types.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx index cfea5d9452..8ec1ba8be3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx @@ -38,8 +38,12 @@ export const AgentOutputs = ({ flowID }: { flowID: string | null }) => { return outputNodes .map((node) => { - const executionResult = node.data.nodeExecutionResult; - const outputData = executionResult?.output_data?.output; + const executionResults = node.data.nodeExecutionResults || []; + const latestResult = + executionResults.length > 0 + ? executionResults[executionResults.length - 1] + : undefined; + const outputData = latestResult?.output_data?.output; const renderer = globalRegistry.getRenderer(outputData); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts index 0eba6e8188..629d4662a9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts @@ -153,6 +153,9 @@ export const useRunInputDialog = ({ Object.entries(credentialValues).filter(([_, cred]) => cred && cred.id), ); + useNodeStore.getState().clearAllNodeExecutionResults(); + useNodeStore.getState().cleanNodesStatuses(); + await executeGraph({ graphId: flowID ?? "", graphVersion: flowVersion || null, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx index 6306582c3b..d4aa26480d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx @@ -34,7 +34,7 @@ export type CustomNodeData = { uiType: BlockUIType; block_id: string; status?: AgentExecutionStatus; - nodeExecutionResult?: NodeExecutionResult; + nodeExecutionResults?: NodeExecutionResult[]; staticOutput?: boolean; // TODO : We need better type safety for the following backend fields. costs: BlockCost[]; @@ -75,7 +75,11 @@ export const CustomNode: React.FC> = React.memo( (value) => value !== null && value !== undefined && value !== "", ); - const outputData = data.nodeExecutionResult?.output_data; + const latestResult = + data.nodeExecutionResults && data.nodeExecutionResults.length > 0 + ? data.nodeExecutionResults[data.nodeExecutionResults.length - 1] + : undefined; + const outputData = latestResult?.output_data; const hasOutputError = typeof outputData === "object" && outputData !== null && diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx index 17134ae299..c5df24e0e6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx @@ -14,10 +14,15 @@ import { useNodeOutput } from "./useNodeOutput"; import { ViewMoreData } from "./components/ViewMoreData"; export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => { - const { outputData, copiedKey, handleCopy, executionResultId, inputData } = - useNodeOutput(nodeId); + const { + latestOutputData, + copiedKey, + handleCopy, + executionResultId, + latestInputData, + } = useNodeOutput(nodeId); - if (Object.keys(outputData).length === 0) { + if (Object.keys(latestOutputData).length === 0) { return null; } @@ -41,18 +46,19 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
Input - +
- {Object.entries(outputData) + {Object.entries(latestOutputData) .slice(0, 2) - .map(([key, value]) => ( -
-
- - Pin: - - - {beautifyString(key)} - -
-
- - Data: - -
- {value.map((item, index) => ( -
- -
- ))} + .map(([key, value]) => { + return ( +
+
+ + Pin: + + + {beautifyString(key)} + +
+
+ + Data: + +
+ {value.map((item, index) => ( +
+ +
+ ))} -
- - +
+ + +
-
- ))} + ); + })}
- - {Object.keys(outputData).length > 2 && ( - - )} + diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx index 0858db8f0e..680b6bc44a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx @@ -19,22 +19,51 @@ import { CopyIcon, DownloadIcon, } from "@phosphor-icons/react"; -import { FC } from "react"; +import React, { FC } from "react"; import { useNodeDataViewer } from "./useNodeDataViewer"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; +import { useShallow } from "zustand/react/shallow"; +import { NodeDataType } from "../../helpers"; -interface NodeDataViewerProps { - data: any; +export interface NodeDataViewerProps { + data?: any; pinName: string; + nodeId?: string; execId?: string; isViewMoreData?: boolean; + dataType?: NodeDataType; } export const NodeDataViewer: FC = ({ data, pinName, + nodeId, execId = "N/A", isViewMoreData = false, + dataType = "output", }) => { + const executionResults = useNodeStore( + useShallow((state) => + nodeId ? state.getNodeExecutionResults(nodeId) : [], + ), + ); + const latestInputData = useNodeStore( + useShallow((state) => + nodeId ? state.getLatestNodeInputData(nodeId) : undefined, + ), + ); + const accumulatedOutputData = useNodeStore( + useShallow((state) => + nodeId ? state.getAccumulatedNodeOutputData(nodeId) : {}, + ), + ); + + const resolvedData = + data ?? + (dataType === "input" + ? (latestInputData ?? {}) + : (accumulatedOutputData[pinName] ?? [])); + const { outputItems, copyExecutionId, @@ -42,7 +71,20 @@ export const NodeDataViewer: FC = ({ handleDownloadItem, dataArray, copiedIndex, - } = useNodeDataViewer(data, pinName, execId); + groupedExecutions, + totalGroupedItems, + handleCopyGroupedItem, + handleDownloadGroupedItem, + copiedKey, + } = useNodeDataViewer( + resolvedData, + pinName, + execId, + executionResults, + dataType, + ); + + const shouldGroupExecutions = groupedExecutions.length > 0; return ( @@ -68,44 +110,141 @@ export const NodeDataViewer: FC = ({
- Full Output Preview + Full {dataType === "input" ? "Input" : "Output"} Preview
- {dataArray.length} item{dataArray.length !== 1 ? "s" : ""} total + {shouldGroupExecutions ? totalGroupedItems : dataArray.length}{" "} + item + {shouldGroupExecutions + ? totalGroupedItems !== 1 + ? "s" + : "" + : dataArray.length !== 1 + ? "s" + : ""}{" "} + total
-
- - Execution ID: - - - {execId} - - -
-
- Pin:{" "} - {beautifyString(pinName)} -
+ {shouldGroupExecutions ? ( +
+ Pin:{" "} + {beautifyString(pinName)} +
+ ) : ( + <> +
+ + Execution ID: + + + {execId} + + +
+
+ Pin:{" "} + + {beautifyString(pinName)} + +
+ + )}
- {dataArray.length > 0 ? ( + {shouldGroupExecutions ? ( +
+ {groupedExecutions.map((execution) => ( +
+
+ + Execution ID: + + + {execution.execId} + +
+
+ {execution.outputItems.length > 0 ? ( + execution.outputItems.map((item, index) => ( +
+
+ +
+ +
+ + +
+
+ )) + ) : ( +
+ No data available +
+ )} +
+
+ ))} +
+ ) : dataArray.length > 0 ? (
{outputItems.map((item, index) => (
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts index d3c555970c..818d1266c1 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts @@ -1,82 +1,70 @@ -import type { OutputMetadata } from "@/components/contextual/OutputRenderers"; -import { globalRegistry } from "@/components/contextual/OutputRenderers"; import { downloadOutputs } from "@/components/contextual/OutputRenderers/utils/download"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { beautifyString } from "@/lib/utils"; -import React, { useMemo, useState } from "react"; +import { useState } from "react"; +import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult"; +import { + NodeDataType, + createOutputItems, + getExecutionData, + normalizeToArray, + type OutputItem, +} from "../../helpers"; + +export type GroupedExecution = { + execId: string; + outputItems: Array; +}; export const useNodeDataViewer = ( data: any, pinName: string, execId: string, + executionResults?: NodeExecutionResult[], + dataType?: NodeDataType, ) => { const { toast } = useToast(); const [copiedIndex, setCopiedIndex] = useState(null); + const [copiedKey, setCopiedKey] = useState(null); - // Normalize data to array format - const dataArray = useMemo(() => { - return Array.isArray(data) ? data : [data]; - }, [data]); + const dataArray = Array.isArray(data) ? data : [data]; - // Prepare items for the enhanced renderer system - const outputItems = useMemo(() => { - if (!dataArray) return []; - - const items: Array<{ - key: string; - label: string; - value: unknown; - metadata?: OutputMetadata; - renderer: any; - }> = []; - - dataArray.forEach((value, index) => { - const metadata: OutputMetadata = {}; - - // Extract metadata from the value if it's an object - if ( - typeof value === "object" && - value !== null && - !React.isValidElement(value) - ) { - const objValue = value as any; - if (objValue.type) metadata.type = objValue.type; - if (objValue.mimeType) metadata.mimeType = objValue.mimeType; - if (objValue.filename) metadata.filename = objValue.filename; - if (objValue.language) metadata.language = objValue.language; - } - - const renderer = globalRegistry.getRenderer(value, metadata); - if (renderer) { - items.push({ - key: `item-${index}`, + const outputItems = + !dataArray || dataArray.length === 0 + ? [] + : createOutputItems(dataArray).map((item, index) => ({ + ...item, label: index === 0 ? beautifyString(pinName) : "", - value, - metadata, - renderer, - }); - } else { - // Fallback to text renderer - const textRenderer = globalRegistry - .getAllRenderers() - .find((r) => r.name === "TextRenderer"); - if (textRenderer) { - items.push({ - key: `item-${index}`, - label: index === 0 ? beautifyString(pinName) : "", - value: - typeof value === "string" - ? value - : JSON.stringify(value, null, 2), - metadata, - renderer: textRenderer, - }); - } - } - }); + })); - return items; - }, [dataArray, pinName]); + const groupedExecutions = + !executionResults || executionResults.length === 0 + ? [] + : [...executionResults].reverse().map((result) => { + const rawData = getExecutionData( + result, + dataType || "output", + pinName, + ); + let dataArray: unknown[]; + if (dataType === "input") { + dataArray = + rawData !== undefined && rawData !== null ? [rawData] : []; + } else { + dataArray = normalizeToArray(rawData); + } + + const outputItems = createOutputItems(dataArray); + return { + execId: result.node_exec_id, + outputItems, + }; + }); + + const totalGroupedItems = groupedExecutions.reduce( + (total, execution) => total + execution.outputItems.length, + 0, + ); const copyExecutionId = () => { navigator.clipboard.writeText(execId).then(() => { @@ -122,6 +110,45 @@ export const useNodeDataViewer = ( ]); }; + const handleCopyGroupedItem = async ( + execId: string, + index: number, + item: OutputItem, + ) => { + const copyContent = item.renderer.getCopyContent(item.value, item.metadata); + + if (!copyContent) { + return; + } + + try { + let text: string; + if (typeof copyContent.data === "string") { + text = copyContent.data; + } else if (copyContent.fallbackText) { + text = copyContent.fallbackText; + } else { + return; + } + + await navigator.clipboard.writeText(text); + setCopiedKey(`${execId}-${index}`); + setTimeout(() => setCopiedKey(null), 2000); + } catch (error) { + console.error("Failed to copy:", error); + } + }; + + const handleDownloadGroupedItem = (item: OutputItem) => { + downloadOutputs([ + { + value: item.value, + metadata: item.metadata, + renderer: item.renderer, + }, + ]); + }; + return { outputItems, dataArray, @@ -129,5 +156,10 @@ export const useNodeDataViewer = ( handleCopyItem, handleDownloadItem, copiedIndex, + groupedExecutions, + totalGroupedItems, + handleCopyGroupedItem, + handleDownloadGroupedItem, + copiedKey, }; }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx index 7bf026fe43..74d0da06c2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx @@ -8,16 +8,28 @@ import { useState } from "react"; import { NodeDataViewer } from "./NodeDataViewer/NodeDataViewer"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { CheckIcon, CopyIcon } from "@phosphor-icons/react"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; +import { useShallow } from "zustand/react/shallow"; +import { + NodeDataType, + getExecutionEntries, + normalizeToArray, +} from "../helpers"; export const ViewMoreData = ({ - outputData, - execId, + nodeId, + dataType = "output", }: { - outputData: Record>; - execId?: string; + nodeId: string; + dataType?: NodeDataType; }) => { const [copiedKey, setCopiedKey] = useState(null); const { toast } = useToast(); + const executionResults = useNodeStore( + useShallow((state) => state.getNodeExecutionResults(nodeId)), + ); + + const reversedExecutionResults = [...executionResults].reverse(); const handleCopy = (key: string, value: any) => { const textToCopy = @@ -29,8 +41,8 @@ export const ViewMoreData = ({ setTimeout(() => setCopiedKey(null), 2000); }; - const copyExecutionId = () => { - navigator.clipboard.writeText(execId || "N/A").then(() => { + const copyExecutionId = (executionId: string) => { + navigator.clipboard.writeText(executionId || "N/A").then(() => { toast({ title: "Execution ID copied to clipboard!", duration: 2000, @@ -42,7 +54,7 @@ export const ViewMoreData = ({ -
-
- {Object.entries(outputData).map(([key, value]) => ( -
+ {reversedExecutionResults.map((result) => ( +
+ + Execution ID: + - Pin: - - - {beautifyString(key)} + {result.node_exec_id} +
-
- - Data: - -
- {value.map((item, index) => ( -
- -
- ))} -
- - -
-
+
+ {getExecutionEntries(result, dataType).map( + ([key, value]) => { + const normalizedValue = normalizeToArray(value); + return ( +
+
+ + Pin: + + + {beautifyString(key)} + +
+
+ + Data: + +
+ {normalizedValue.map((item, index) => ( +
+ +
+ ))} + +
+ + +
+
+
+
+ ); + }, + )}
))} diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/helpers.ts new file mode 100644 index 0000000000..c75cd83cac --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/helpers.ts @@ -0,0 +1,83 @@ +import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult"; +import type { OutputMetadata } from "@/components/contextual/OutputRenderers"; +import { globalRegistry } from "@/components/contextual/OutputRenderers"; +import React from "react"; + +export type NodeDataType = "input" | "output"; + +export type OutputItem = { + key: string; + value: unknown; + metadata?: OutputMetadata; + renderer: any; +}; + +export const normalizeToArray = (value: unknown) => { + if (value === undefined) return []; + return Array.isArray(value) ? value : [value]; +}; + +export const getExecutionData = ( + result: NodeExecutionResult, + dataType: NodeDataType, + pinName: string, +) => { + if (dataType === "input") { + return result.input_data; + } + + return result.output_data?.[pinName]; +}; + +export const createOutputItems = (dataArray: unknown[]): Array => { + const items: Array = []; + + dataArray.forEach((value, index) => { + const metadata: OutputMetadata = {}; + + if ( + typeof value === "object" && + value !== null && + !React.isValidElement(value) + ) { + const objValue = value as any; + if (objValue.type) metadata.type = objValue.type; + if (objValue.mimeType) metadata.mimeType = objValue.mimeType; + if (objValue.filename) metadata.filename = objValue.filename; + if (objValue.language) metadata.language = objValue.language; + } + + const renderer = globalRegistry.getRenderer(value, metadata); + if (renderer) { + items.push({ + key: `item-${index}`, + value, + metadata, + renderer, + }); + } else { + const textRenderer = globalRegistry + .getAllRenderers() + .find((r) => r.name === "TextRenderer"); + if (textRenderer) { + items.push({ + key: `item-${index}`, + value: + typeof value === "string" ? value : JSON.stringify(value, null, 2), + metadata, + renderer: textRenderer, + }); + } + } + }); + + return items; +}; + +export const getExecutionEntries = ( + result: NodeExecutionResult, + dataType: NodeDataType, +) => { + const data = dataType === "input" ? result.input_data : result.output_data; + return Object.entries(data || {}); +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx index cfc599c6e4..8ebf1dfaf3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx @@ -7,15 +7,18 @@ export const useNodeOutput = (nodeId: string) => { const [copiedKey, setCopiedKey] = useState(null); const { toast } = useToast(); - const nodeExecutionResult = useNodeStore( - useShallow((state) => state.getNodeExecutionResult(nodeId)), + const latestResult = useNodeStore( + useShallow((state) => state.getLatestNodeExecutionResult(nodeId)), ); - const inputData = nodeExecutionResult?.input_data; + const latestInputData = useNodeStore( + useShallow((state) => state.getLatestNodeInputData(nodeId)), + ); + + const latestOutputData: Record> = useNodeStore( + useShallow((state) => state.getLatestNodeOutputData(nodeId) || {}), + ); - const outputData: Record> = { - ...nodeExecutionResult?.output_data, - }; const handleCopy = async (key: string, value: any) => { try { const text = JSON.stringify(value, null, 2); @@ -35,11 +38,12 @@ export const useNodeOutput = (nodeId: string) => { }); } }; + return { - outputData, - inputData, + latestOutputData, + latestInputData, copiedKey, handleCopy, - executionResultId: nodeExecutionResult?.node_exec_id, + executionResultId: latestResult?.node_exec_id, }; }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts index d4ba538172..143cd58509 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts @@ -1,10 +1,7 @@ import { useState, useCallback, useEffect } from "react"; import { useShallow } from "zustand/react/shallow"; import { useGraphStore } from "@/app/(platform)/build/stores/graphStore"; -import { - useNodeStore, - NodeResolutionData, -} from "@/app/(platform)/build/stores/nodeStore"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore"; import { useSubAgentUpdate, @@ -13,6 +10,7 @@ import { } from "@/app/(platform)/build/hooks/useSubAgentUpdate"; import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api"; import { CustomNodeData } from "../../CustomNode"; +import { NodeResolutionData } from "@/app/(platform)/build/stores/types"; // Stable empty set to avoid creating new references in selectors const EMPTY_SET: Set = new Set(); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts index 54ddf2a61d..50326a03e6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts @@ -1,5 +1,5 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; -import { NodeResolutionData } from "@/app/(platform)/build/stores/nodeStore"; +import { NodeResolutionData } from "@/app/(platform)/build/stores/types"; import { RJSFSchema } from "@rjsf/utils"; export const nodeStyleBasedOnStatus: Record = { diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/helpers.ts new file mode 100644 index 0000000000..bcdfd4c313 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/helpers.ts @@ -0,0 +1,16 @@ +export const accumulateExecutionData = ( + accumulated: Record, + data: Record | undefined, +) => { + if (!data) return { ...accumulated }; + const next = { ...accumulated }; + Object.entries(data).forEach(([key, values]) => { + const nextValues = Array.isArray(values) ? values : [values]; + if (next[key]) { + next[key] = [...next[key], ...nextValues]; + } else { + next[key] = [...nextValues]; + } + }); + return next; +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts index 5502a8780d..f7a52636f3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts @@ -10,6 +10,8 @@ import { import { Node } from "@/app/api/__generated__/models/node"; import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; import { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult"; +import { NodeExecutionResultInputData } from "@/app/api/__generated__/models/nodeExecutionResultInputData"; +import { NodeExecutionResultOutputData } from "@/app/api/__generated__/models/nodeExecutionResultOutputData"; import { useHistoryStore } from "./historyStore"; import { useEdgeStore } from "./edgeStore"; import { BlockUIType } from "../components/types"; @@ -18,31 +20,10 @@ import { ensurePathExists, parseHandleIdToPath, } from "@/components/renderers/InputRenderer/helpers"; -import { IncompatibilityInfo } from "../hooks/useSubAgentUpdate/types"; +import { accumulateExecutionData } from "./helpers"; +import { NodeResolutionData } from "./types"; -// Resolution mode data stored per node -export type NodeResolutionData = { - incompatibilities: IncompatibilityInfo; - // The NEW schema from the update (what we're updating TO) - pendingUpdate: { - input_schema: Record; - output_schema: Record; - }; - // The OLD schema before the update (what we're updating FROM) - // Needed to merge and show removed inputs during resolution - currentSchema: { - input_schema: Record; - output_schema: Record; - }; - // The full updated hardcoded values to apply when resolution completes - pendingHardcodedValues: Record; -}; - -// Minimum movement (in pixels) required before logging position change to history -// Prevents spamming history with small movements when clicking on inputs inside blocks const MINIMUM_MOVE_BEFORE_LOG = 50; - -// Track initial positions when drag starts (outside store to avoid re-renders) const dragStartPositions: Record = {}; let dragStartState: { nodes: CustomNode[]; edges: CustomEdge[] } | null = null; @@ -52,6 +33,15 @@ type NodeStore = { nodeCounter: number; setNodeCounter: (nodeCounter: number) => void; nodeAdvancedStates: Record; + + latestNodeInputData: Record; + latestNodeOutputData: Record< + string, + NodeExecutionResultOutputData | undefined + >; + accumulatedNodeInputData: Record>; + accumulatedNodeOutputData: Record>; + setNodes: (nodes: CustomNode[]) => void; onNodesChange: (changes: NodeChange[]) => void; addNode: (node: CustomNode) => void; @@ -72,12 +62,26 @@ type NodeStore = { updateNodeStatus: (nodeId: string, status: AgentExecutionStatus) => void; getNodeStatus: (nodeId: string) => AgentExecutionStatus | undefined; + cleanNodesStatuses: () => void; updateNodeExecutionResult: ( nodeId: string, result: NodeExecutionResult, ) => void; - getNodeExecutionResult: (nodeId: string) => NodeExecutionResult | undefined; + getNodeExecutionResults: (nodeId: string) => NodeExecutionResult[]; + getLatestNodeInputData: ( + nodeId: string, + ) => NodeExecutionResultInputData | undefined; + getLatestNodeOutputData: ( + nodeId: string, + ) => NodeExecutionResultOutputData | undefined; + getAccumulatedNodeInputData: (nodeId: string) => Record; + getAccumulatedNodeOutputData: (nodeId: string) => Record; + getLatestNodeExecutionResult: ( + nodeId: string, + ) => NodeExecutionResult | undefined; + clearAllNodeExecutionResults: () => void; + getNodeBlockUIType: (nodeId: string) => BlockUIType; hasWebhookNodes: () => boolean; @@ -122,6 +126,10 @@ export const useNodeStore = create((set, get) => ({ nodeCounter: 0, setNodeCounter: (nodeCounter) => set({ nodeCounter }), nodeAdvancedStates: {}, + latestNodeInputData: {}, + latestNodeOutputData: {}, + accumulatedNodeInputData: {}, + accumulatedNodeOutputData: {}, incrementNodeCounter: () => set((state) => ({ nodeCounter: state.nodeCounter + 1, @@ -317,17 +325,162 @@ export const useNodeStore = create((set, get) => ({ return get().nodes.find((n) => n.id === nodeId)?.data?.status; }, - updateNodeExecutionResult: (nodeId: string, result: NodeExecutionResult) => { + cleanNodesStatuses: () => { set((state) => ({ - nodes: state.nodes.map((n) => - n.id === nodeId - ? { ...n, data: { ...n.data, nodeExecutionResult: result } } - : n, - ), + nodes: state.nodes.map((n) => ({ + ...n, + data: { ...n.data, status: undefined }, + })), })); }, - getNodeExecutionResult: (nodeId: string) => { - return get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResult; + + updateNodeExecutionResult: (nodeId: string, result: NodeExecutionResult) => { + set((state) => { + let latestNodeInputData = state.latestNodeInputData; + let latestNodeOutputData = state.latestNodeOutputData; + let accumulatedNodeInputData = state.accumulatedNodeInputData; + let accumulatedNodeOutputData = state.accumulatedNodeOutputData; + + const nodes = state.nodes.map((n) => { + if (n.id !== nodeId) return n; + + const existingResults = n.data.nodeExecutionResults || []; + const duplicateIndex = existingResults.findIndex( + (r) => r.node_exec_id === result.node_exec_id, + ); + + if (duplicateIndex !== -1) { + const oldResult = existingResults[duplicateIndex]; + const inputDataChanged = + JSON.stringify(oldResult.input_data) !== + JSON.stringify(result.input_data); + const outputDataChanged = + JSON.stringify(oldResult.output_data) !== + JSON.stringify(result.output_data); + + if (!inputDataChanged && !outputDataChanged) { + return n; + } + + const updatedResults = [...existingResults]; + updatedResults[duplicateIndex] = result; + + const recomputedAccumulatedInput = updatedResults.reduce( + (acc, r) => accumulateExecutionData(acc, r.input_data), + {} as Record, + ); + const recomputedAccumulatedOutput = updatedResults.reduce( + (acc, r) => accumulateExecutionData(acc, r.output_data), + {} as Record, + ); + + const mostRecentResult = updatedResults[updatedResults.length - 1]; + latestNodeInputData = { + ...latestNodeInputData, + [nodeId]: mostRecentResult.input_data, + }; + latestNodeOutputData = { + ...latestNodeOutputData, + [nodeId]: mostRecentResult.output_data, + }; + + accumulatedNodeInputData = { + ...accumulatedNodeInputData, + [nodeId]: recomputedAccumulatedInput, + }; + accumulatedNodeOutputData = { + ...accumulatedNodeOutputData, + [nodeId]: recomputedAccumulatedOutput, + }; + + return { + ...n, + data: { + ...n.data, + nodeExecutionResults: updatedResults, + }, + }; + } + + accumulatedNodeInputData = { + ...accumulatedNodeInputData, + [nodeId]: accumulateExecutionData( + accumulatedNodeInputData[nodeId] || {}, + result.input_data, + ), + }; + accumulatedNodeOutputData = { + ...accumulatedNodeOutputData, + [nodeId]: accumulateExecutionData( + accumulatedNodeOutputData[nodeId] || {}, + result.output_data, + ), + }; + + latestNodeInputData = { + ...latestNodeInputData, + [nodeId]: result.input_data, + }; + latestNodeOutputData = { + ...latestNodeOutputData, + [nodeId]: result.output_data, + }; + + return { + ...n, + data: { + ...n.data, + nodeExecutionResults: [...existingResults, result], + }, + }; + }); + + return { + nodes, + latestNodeInputData, + latestNodeOutputData, + accumulatedNodeInputData, + accumulatedNodeOutputData, + }; + }); + }, + getNodeExecutionResults: (nodeId: string) => { + return ( + get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResults || [] + ); + }, + getLatestNodeInputData: (nodeId: string) => { + return get().latestNodeInputData[nodeId]; + }, + getLatestNodeOutputData: (nodeId: string) => { + return get().latestNodeOutputData[nodeId]; + }, + getAccumulatedNodeInputData: (nodeId: string) => { + return get().accumulatedNodeInputData[nodeId] || {}; + }, + getAccumulatedNodeOutputData: (nodeId: string) => { + return get().accumulatedNodeOutputData[nodeId] || {}; + }, + getLatestNodeExecutionResult: (nodeId: string) => { + const results = + get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResults || + []; + return results.length > 0 ? results[results.length - 1] : undefined; + }, + clearAllNodeExecutionResults: () => { + set((state) => ({ + nodes: state.nodes.map((n) => ({ + ...n, + data: { + ...n.data, + nodeExecutionResults: [], + }, + })), + latestNodeInputData: {}, + latestNodeOutputData: {}, + accumulatedNodeInputData: {}, + accumulatedNodeOutputData: {}, + })); }, getNodeBlockUIType: (nodeId: string) => { return ( diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/types.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/types.ts new file mode 100644 index 0000000000..f0ec7e6c1c --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/types.ts @@ -0,0 +1,14 @@ +import { IncompatibilityInfo } from "../hooks/useSubAgentUpdate/types"; + +export type NodeResolutionData = { + incompatibilities: IncompatibilityInfo; + pendingUpdate: { + input_schema: Record; + output_schema: Record; + }; + currentSchema: { + input_schema: Record; + output_schema: Record; + }; + pendingHardcodedValues: Record; +}; From 75ecc4de92281d72f256993989763b66a7acd8a5 Mon Sep 17 00:00:00 2001 From: Swifty Date: Mon, 26 Jan 2026 14:56:24 +0100 Subject: [PATCH 03/21] fix(backend): enforce block disabled flag on execution endpoints (#11839) ## Summary This PR adds security checks to prevent execution of disabled blocks across all block execution endpoints. - Add `disabled` flag check to main web API endpoint (`/api/blocks/{block_id}/execute`) - Add `disabled` flag check to external API endpoint (`/api/blocks/{block_id}/execute`) - Add `disabled` flag check to chat tool block execution Previously, block execution endpoints only checked if a block existed but did not verify the `disabled` flag, allowing any authenticated user to execute disabled blocks. ## Test plan - [x] Verify disabled blocks return 403 Forbidden on main API endpoint - [x] Verify disabled blocks return 403 Forbidden on external API endpoint - [x] Verify disabled blocks return error response in chat tool execution - [x] Verify enabled blocks continue to execute normally --- autogpt_platform/backend/backend/api/external/v1/routes.py | 2 ++ .../backend/backend/api/features/chat/tools/run_block.py | 5 +++++ autogpt_platform/backend/backend/api/features/v1.py | 2 ++ autogpt_platform/backend/backend/api/features/v1_test.py | 1 + 4 files changed, 10 insertions(+) diff --git a/autogpt_platform/backend/backend/api/external/v1/routes.py b/autogpt_platform/backend/backend/api/external/v1/routes.py index 58e15dc6a3..00933c1899 100644 --- a/autogpt_platform/backend/backend/api/external/v1/routes.py +++ b/autogpt_platform/backend/backend/api/external/v1/routes.py @@ -86,6 +86,8 @@ async def execute_graph_block( obj = backend.data.block.get_block(block_id) if not obj: raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") + if obj.disabled: + raise HTTPException(status_code=403, detail=f"Block #{block_id} is disabled.") output = defaultdict(list) async for name, data in obj.execute(data): diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index c29cc92556..0d233fcfec 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -179,6 +179,11 @@ class RunBlockTool(BaseTool): message=f"Block '{block_id}' not found", session_id=session_id, ) + if block.disabled: + return ErrorResponse( + message=f"Block '{block_id}' is disabled", + session_id=session_id, + ) logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}") diff --git a/autogpt_platform/backend/backend/api/features/v1.py b/autogpt_platform/backend/backend/api/features/v1.py index 3a5dd3ec12..51789f9e2b 100644 --- a/autogpt_platform/backend/backend/api/features/v1.py +++ b/autogpt_platform/backend/backend/api/features/v1.py @@ -364,6 +364,8 @@ async def execute_graph_block( obj = get_block(block_id) if not obj: raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") + if obj.disabled: + raise HTTPException(status_code=403, detail=f"Block #{block_id} is disabled.") user = await get_user_by_id(user_id) if not user: diff --git a/autogpt_platform/backend/backend/api/features/v1_test.py b/autogpt_platform/backend/backend/api/features/v1_test.py index a186d38810..d57ad49949 100644 --- a/autogpt_platform/backend/backend/api/features/v1_test.py +++ b/autogpt_platform/backend/backend/api/features/v1_test.py @@ -138,6 +138,7 @@ def test_execute_graph_block( """Test execute block endpoint""" # Mock block mock_block = Mock() + mock_block.disabled = False async def mock_execute(*args, **kwargs): yield "output1", {"data": "result1"} From fbc2da36e630a98c699f42dacb788b41432df780 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Mon, 26 Jan 2026 22:54:19 +0700 Subject: [PATCH 04/21] fix(analytics): only try to init Posthog when on cloud (#11843) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes 🏗️ This prevents Posthog from being initialised locally, where we should not be collecting analytics during local development. ## Checklist 📋 ### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run locally and test the above --- .../src/providers/posthog/posthog-provider.tsx | 15 +++++++++++---- .../frontend/src/services/environment/index.ts | 16 ++++++++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx b/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx index 58ee2394ef..249d74596a 100644 --- a/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx +++ b/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx @@ -1,12 +1,15 @@ "use client"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { environment } from "@/services/environment"; import { PostHogProvider as PHProvider } from "@posthog/react"; import { usePathname, useSearchParams } from "next/navigation"; import posthog from "posthog-js"; import { ReactNode, useEffect, useRef } from "react"; export function PostHogProvider({ children }: { children: ReactNode }) { + const isPostHogEnabled = environment.isPostHogEnabled(); + useEffect(() => { if (process.env.NEXT_PUBLIC_POSTHOG_KEY) { posthog.init(process.env.NEXT_PUBLIC_POSTHOG_KEY, { @@ -19,15 +22,18 @@ export function PostHogProvider({ children }: { children: ReactNode }) { } }, []); + if (!isPostHogEnabled) return <>{children}; + return {children}; } export function PostHogUserTracker() { const { user, isUserLoading } = useSupabase(); const previousUserIdRef = useRef(null); + const isPostHogEnabled = environment.isPostHogEnabled(); useEffect(() => { - if (isUserLoading) return; + if (isUserLoading || !isPostHogEnabled) return; if (user) { if (previousUserIdRef.current !== user.id) { @@ -41,7 +47,7 @@ export function PostHogUserTracker() { posthog.reset(); previousUserIdRef.current = null; } - }, [user, isUserLoading]); + }, [user, isUserLoading, isPostHogEnabled]); return null; } @@ -49,16 +55,17 @@ export function PostHogUserTracker() { export function PostHogPageViewTracker() { const pathname = usePathname(); const searchParams = useSearchParams(); + const isPostHogEnabled = environment.isPostHogEnabled(); useEffect(() => { - if (pathname) { + if (pathname && isPostHogEnabled) { let url = window.origin + pathname; if (searchParams && searchParams.toString()) { url = url + `?${searchParams.toString()}`; } posthog.capture("$pageview", { $current_url: url }); } - }, [pathname, searchParams]); + }, [pathname, searchParams, isPostHogEnabled]); return null; } diff --git a/autogpt_platform/frontend/src/services/environment/index.ts b/autogpt_platform/frontend/src/services/environment/index.ts index cdd5b421b5..f19bc417e3 100644 --- a/autogpt_platform/frontend/src/services/environment/index.ts +++ b/autogpt_platform/frontend/src/services/environment/index.ts @@ -76,6 +76,13 @@ function getPreviewStealingDev() { return branch; } +function getPostHogCredentials() { + return { + key: process.env.NEXT_PUBLIC_POSTHOG_KEY, + host: process.env.NEXT_PUBLIC_POSTHOG_HOST, + }; +} + function isProductionBuild() { return process.env.NODE_ENV === "production"; } @@ -116,6 +123,13 @@ function areFeatureFlagsEnabled() { return process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "enabled"; } +function isPostHogEnabled() { + const inCloud = isCloud(); + const key = process.env.NEXT_PUBLIC_POSTHOG_KEY; + const host = process.env.NEXT_PUBLIC_POSTHOG_HOST; + return inCloud && key && host; +} + export const environment = { // Generic getEnvironmentStr, @@ -128,6 +142,7 @@ export const environment = { getSupabaseUrl, getSupabaseAnonKey, getPreviewStealingDev, + getPostHogCredentials, // Assertions isServerSide, isClientSide, @@ -138,5 +153,6 @@ export const environment = { isCloud, isLocal, isVercelPreview, + isPostHogEnabled, areFeatureFlagsEnabled, }; From d5c0f5b2df67b5eeb31d32f2982a6ca5af739b49 Mon Sep 17 00:00:00 2001 From: Swifty Date: Mon, 26 Jan 2026 17:00:48 +0100 Subject: [PATCH 05/21] refactor(backend): remove page context from chat service (#11844) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Background The chat service previously supported including page context (URL and content) in user messages. This functionality is being removed. ### Changes 🏗️ - Removed page context handling from `stream_chat_completion` in the chat service - User messages are now passed directly without URL/content context injection - Removed associated logging for page context ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Verify chat functionality works without page context - [x] Confirm no regressions in basic chat message handling --- .../backend/backend/api/features/chat/service.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 43fb7b35a7..e10343fff6 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -237,18 +237,9 @@ async def stream_chat_completion( ) if message: - # Build message content with context if provided - message_content = message - if context and context.get("url") and context.get("content"): - context_text = f"Page URL: {context['url']}\n\nPage Content:\n{context['content']}\n\n---\n\nUser Message: {message}" - message_content = context_text - logger.info( - f"Including page context: URL={context['url']}, content_length={len(context['content'])}" - ) - session.messages.append( ChatMessage( - role="user" if is_user_message else "assistant", content=message_content + role="user" if is_user_message else "assistant", content=message ) ) logger.info( From 859f3f8c06a862abc068df7c3bd04f30a8a93325 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Tue, 27 Jan 2026 03:22:30 -0600 Subject: [PATCH 06/21] feat(frontend): implement clarification questions UI for agent generation (#11833) ## Summary Add interactive UI to collect user answers when the agent-generator service returns clarifying questions during agent creation/editing. Previously, when the backend asked clarifying questions, the frontend would just display them as text with no way for users to answer. This caused the chat to keep retrying without the necessary context. ## Changes - **ChatMessageData type**: Add `clarification_needed` variant with questions field - **ClarificationQuestionsWidget**: New component with interactive form to collect answers - **parseToolResponse**: Detect and parse `clarification_needed` responses from backend - **ChatMessage**: Render the widget when clarification is needed ## How It Works 1. User requests to create/edit agent 2. Backend returns `ClarificationNeededResponse` with list of questions 3. Frontend shows interactive form with text inputs for each question 4. User fills in answers and clicks "Submit Answers" 5. Answers are sent back as context to the tool 6. Backend receives full context and continues ## UI Features - Shows all questions with examples (if provided) - Input validation (all questions must be answered to submit) - Visual feedback (checkmarks when answered) - Numbered questions for clarity - Submit button disabled until all answered - Follows same design pattern as `credentials_needed` flow ## Related - Backend support for clarification was added in #11819 - Fixes the issue shown in the screenshot where users couldn't answer clarifying questions ## Test plan - [ ] Test creating agent that requires clarifying questions - [ ] Verify questions are displayed in interactive form - [ ] Verify all questions must be answered before submitting - [ ] Verify answers are sent back to backend as context - [ ] Verify agent creation continues with full context --- .../Chat/components/ChatContainer/helpers.ts | 17 ++ .../components/ChatMessage/ChatMessage.tsx | 25 +++ .../components/ChatMessage/useChatMessage.ts | 13 ++ .../ClarificationQuestionsWidget.tsx | 154 ++++++++++++++++++ 4 files changed, 209 insertions(+) create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts index 9d51003a93..0edd1b411a 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts @@ -213,6 +213,23 @@ export function parseToolResponse( timestamp: timestamp || new Date(), }; } + if (responseType === "clarification_needed") { + return { + type: "clarification_needed", + toolName, + questions: + (parsedResult.questions as Array<{ + question: string; + keyword: string; + example?: string; + }>) || [], + message: + (parsedResult.message as string) || + "I need more information to proceed.", + sessionId: (parsedResult.session_id as string) || "", + timestamp: timestamp || new Date(), + }; + } if (responseType === "need_login") { return { type: "login_needed", diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx index a2827ce611..0fee33dbc0 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx @@ -14,6 +14,7 @@ import { AgentCarouselMessage } from "../AgentCarouselMessage/AgentCarouselMessa import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; import { AuthPromptWidget } from "../AuthPromptWidget/AuthPromptWidget"; import { ChatCredentialsSetup } from "../ChatCredentialsSetup/ChatCredentialsSetup"; +import { ClarificationQuestionsWidget } from "../ClarificationQuestionsWidget/ClarificationQuestionsWidget"; import { ExecutionStartedMessage } from "../ExecutionStartedMessage/ExecutionStartedMessage"; import { MarkdownContent } from "../MarkdownContent/MarkdownContent"; import { NoResultsMessage } from "../NoResultsMessage/NoResultsMessage"; @@ -69,6 +70,7 @@ export function ChatMessage({ isToolResponse, isLoginNeeded, isCredentialsNeeded, + isClarificationNeeded, } = useChatMessage(message); const displayContent = getDisplayContent(message, isUser); @@ -96,6 +98,18 @@ export function ChatMessage({ } } + function handleClarificationAnswers(answers: Record) { + if (onSendMessage) { + const contextMessage = Object.entries(answers) + .map(([keyword, answer]) => `${keyword}: ${answer}`) + .join("\n"); + + onSendMessage( + `I have the answers to your questions:\n\n${contextMessage}\n\nPlease proceed with creating the agent.`, + ); + } + } + const handleCopy = useCallback( async function handleCopy() { if (message.type !== "message") return; @@ -141,6 +155,17 @@ export function ChatMessage({ ); } + if (isClarificationNeeded && message.type === "clarification_needed") { + return ( + + ); + } + // Render login needed messages if (isLoginNeeded && message.type === "login_needed") { // If user is already logged in, show success message instead of auth prompt diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/useChatMessage.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/useChatMessage.ts index 5ee61bc554..142b140c8b 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/useChatMessage.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/useChatMessage.ts @@ -91,6 +91,18 @@ export type ChatMessageData = credentialsSchema?: Record; message: string; timestamp?: string | Date; + } + | { + type: "clarification_needed"; + toolName: string; + questions: Array<{ + question: string; + keyword: string; + example?: string; + }>; + message: string; + sessionId: string; + timestamp?: string | Date; }; export function useChatMessage(message: ChatMessageData) { @@ -111,5 +123,6 @@ export function useChatMessage(message: ChatMessageData) { isAgentCarousel: message.type === "agent_carousel", isExecutionStarted: message.type === "execution_started", isInputsNeeded: message.type === "inputs_needed", + isClarificationNeeded: message.type === "clarification_needed", }; } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx new file mode 100644 index 0000000000..b2d3608254 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx @@ -0,0 +1,154 @@ +"use client"; + +import { Button } from "@/components/atoms/Button/Button"; +import { Card } from "@/components/atoms/Card/Card"; +import { Input } from "@/components/atoms/Input/Input"; +import { Text } from "@/components/atoms/Text/Text"; +import { cn } from "@/lib/utils"; +import { CheckCircleIcon, QuestionIcon } from "@phosphor-icons/react"; +import { useState } from "react"; + +export interface ClarifyingQuestion { + question: string; + keyword: string; + example?: string; +} + +interface Props { + questions: ClarifyingQuestion[]; + message: string; + onSubmitAnswers: (answers: Record) => void; + onCancel?: () => void; + className?: string; +} + +export function ClarificationQuestionsWidget({ + questions, + message, + onSubmitAnswers, + onCancel, + className, +}: Props) { + const [answers, setAnswers] = useState>({}); + + function handleAnswerChange(keyword: string, value: string) { + setAnswers((prev) => ({ ...prev, [keyword]: value })); + } + + function handleSubmit() { + // Check if all questions are answered + const allAnswered = questions.every((q) => answers[q.keyword]?.trim()); + if (!allAnswered) { + return; + } + onSubmitAnswers(answers); + } + + const allAnswered = questions.every((q) => answers[q.keyword]?.trim()); + + return ( +
+
+
+
+ +
+
+ +
+ +
+ + I need more information + + + {message} + +
+ +
+ {questions.map((q, index) => { + const isAnswered = !!answers[q.keyword]?.trim(); + + return ( +
+
+ {isAnswered ? ( + + ) : ( +
+ {index + 1} +
+ )} +
+ + {q.question} + + {q.example && ( + + Example: {q.example} + + )} + + handleAnswerChange(q.keyword, e.target.value) + } + /> +
+
+
+ ); + })} +
+ +
+ + {onCancel && ( + + )} +
+
+
+
+
+ ); +} From bab436231a1e330e4213cca0993ad5dbcdc2efea Mon Sep 17 00:00:00 2001 From: Swifty Date: Tue, 27 Jan 2026 13:07:42 +0100 Subject: [PATCH 07/21] refactor(backend): remove Langfuse tracing from chat system (#11829) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are removing Langfuse tracing from the chat/copilot system in favor of using OpenRouter's broadcast feature, which keeps our codebase simpler. Langfuse prompt management is retained for fetching system prompts. ### Changes 🏗️ **Removed Langfuse tracing:** - Removed `@observe` decorators from all 11 chat tool files - Removed `langfuse.openai` wrapper (now using standard `openai` client) - Removed `start_as_current_observation` and `propagate_attributes` context managers from `service.py` - Removed `update_current_trace()`, `update_current_span()`, `span.update()` calls **Retained Langfuse prompt management:** - `langfuse.get_prompt()` for fetching system prompts - `_is_langfuse_configured()` check for prompt availability - Configuration for `langfuse_prompt_name` **Files modified:** - `backend/api/features/chat/service.py` - `backend/api/features/chat/tools/*.py` (11 tool files) ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Verified `poetry run format` passes - [x] Verified no `@observe` decorators remain in chat tools - [x] Verified Langfuse prompt fetching is still functional (code preserved) --- .../backend/api/features/chat/service.py | 609 ++++++++---------- .../features/chat/tools/add_understanding.py | 3 - .../api/features/chat/tools/agent_output.py | 2 - .../api/features/chat/tools/create_agent.py | 3 - .../api/features/chat/tools/edit_agent.py | 3 - .../api/features/chat/tools/find_agent.py | 3 - .../api/features/chat/tools/find_block.py | 2 - .../features/chat/tools/find_library_agent.py | 3 - .../api/features/chat/tools/get_doc_page.py | 3 - .../api/features/chat/tools/run_agent.py | 2 - .../api/features/chat/tools/run_block.py | 3 - .../api/features/chat/tools/search_docs.py | 2 - 12 files changed, 282 insertions(+), 356 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index e10343fff6..3976cd5f38 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -5,9 +5,9 @@ from asyncio import CancelledError from collections.abc import AsyncGenerator from typing import Any +import openai import orjson -from langfuse import get_client, propagate_attributes -from langfuse.openai import openai # type: ignore +from langfuse import get_client from openai import ( APIConnectionError, APIError, @@ -299,347 +299,302 @@ async def stream_chat_completion( # Build system prompt with business understanding system_prompt, understanding = await _build_system_prompt(user_id) - # Create Langfuse trace for this LLM call (each call gets its own trace, grouped by session_id) - # Using v3 SDK: start_observation creates a root span, update_trace sets trace-level attributes - input = message - if not message and tool_call_response: - input = tool_call_response + # Initialize variables for streaming + assistant_response = ChatMessage( + role="assistant", + content="", + ) + accumulated_tool_calls: list[dict[str, Any]] = [] + has_saved_assistant_message = False + has_appended_streaming_message = False + last_cache_time = 0.0 + last_cache_content_len = 0 - langfuse = get_client() - with langfuse.start_as_current_observation( - as_type="span", - name="user-copilot-request", - input=input, - ) as span: - with propagate_attributes( - session_id=session_id, - user_id=user_id, - tags=["copilot"], - metadata={ - "users_information": format_understanding_for_prompt(understanding)[ - :200 - ] # langfuse only accepts upto to 200 chars - }, + has_yielded_end = False + has_yielded_error = False + has_done_tool_call = False + has_received_text = False + text_streaming_ended = False + tool_response_messages: list[ChatMessage] = [] + should_retry = False + + # Generate unique IDs for AI SDK protocol + import uuid as uuid_module + + message_id = str(uuid_module.uuid4()) + text_block_id = str(uuid_module.uuid4()) + + # Yield message start + yield StreamStart(messageId=message_id) + + try: + async for chunk in _stream_chat_chunks( + session=session, + tools=tools, + system_prompt=system_prompt, + text_block_id=text_block_id, ): - # Initialize variables that will be used in finally block (must be defined before try) - assistant_response = ChatMessage( - role="assistant", - content="", - ) - accumulated_tool_calls: list[dict[str, Any]] = [] - has_saved_assistant_message = False - has_appended_streaming_message = False - last_cache_time = 0.0 - last_cache_content_len = 0 - - # Wrap main logic in try/finally to ensure Langfuse observations are always ended - has_yielded_end = False - has_yielded_error = False - has_done_tool_call = False - has_received_text = False - text_streaming_ended = False - tool_response_messages: list[ChatMessage] = [] - should_retry = False - - # Generate unique IDs for AI SDK protocol - import uuid as uuid_module - - message_id = str(uuid_module.uuid4()) - text_block_id = str(uuid_module.uuid4()) - - # Yield message start - yield StreamStart(messageId=message_id) - - try: - async for chunk in _stream_chat_chunks( - session=session, - tools=tools, - system_prompt=system_prompt, - text_block_id=text_block_id, + if isinstance(chunk, StreamTextStart): + # Emit text-start before first text delta + if not has_received_text: + yield chunk + elif isinstance(chunk, StreamTextDelta): + delta = chunk.delta or "" + assert assistant_response.content is not None + assistant_response.content += delta + has_received_text = True + if not has_appended_streaming_message: + session.messages.append(assistant_response) + has_appended_streaming_message = True + current_time = time.monotonic() + content_len = len(assistant_response.content) + if ( + current_time - last_cache_time >= 1.0 + and content_len > last_cache_content_len ): - - if isinstance(chunk, StreamTextStart): - # Emit text-start before first text delta - if not has_received_text: - yield chunk - elif isinstance(chunk, StreamTextDelta): - delta = chunk.delta or "" - assert assistant_response.content is not None - assistant_response.content += delta - has_received_text = True - if not has_appended_streaming_message: - session.messages.append(assistant_response) - has_appended_streaming_message = True - current_time = time.monotonic() - content_len = len(assistant_response.content) - if ( - current_time - last_cache_time >= 1.0 - and content_len > last_cache_content_len - ): - try: - await cache_chat_session(session) - except Exception as e: - logger.warning( - f"Failed to cache partial session {session.session_id}: {e}" - ) - last_cache_time = current_time - last_cache_content_len = content_len - yield chunk - elif isinstance(chunk, StreamTextEnd): - # Emit text-end after text completes - if has_received_text and not text_streaming_ended: - text_streaming_ended = True - if assistant_response.content: - logger.warn( - f"StreamTextEnd: Attempting to set output {assistant_response.content}" - ) - span.update_trace(output=assistant_response.content) - span.update(output=assistant_response.content) - yield chunk - elif isinstance(chunk, StreamToolInputStart): - # Emit text-end before first tool call, but only if we've received text - if has_received_text and not text_streaming_ended: - yield StreamTextEnd(id=text_block_id) - text_streaming_ended = True - yield chunk - elif isinstance(chunk, StreamToolInputAvailable): - # Accumulate tool calls in OpenAI format - accumulated_tool_calls.append( - { - "id": chunk.toolCallId, - "type": "function", - "function": { - "name": chunk.toolName, - "arguments": orjson.dumps(chunk.input).decode( - "utf-8" - ), - }, - } - ) - elif isinstance(chunk, StreamToolOutputAvailable): - result_content = ( - chunk.output - if isinstance(chunk.output, str) - else orjson.dumps(chunk.output).decode("utf-8") - ) - tool_response_messages.append( - ChatMessage( - role="tool", - content=result_content, - tool_call_id=chunk.toolCallId, - ) - ) - has_done_tool_call = True - # Track if any tool execution failed - if not chunk.success: - logger.warning( - f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed" - ) - yield chunk - elif isinstance(chunk, StreamFinish): - if not has_done_tool_call: - # Emit text-end before finish if we received text but haven't closed it - if has_received_text and not text_streaming_ended: - yield StreamTextEnd(id=text_block_id) - text_streaming_ended = True - - # Save assistant message before yielding finish to ensure it's persisted - # even if client disconnects immediately after receiving StreamFinish - if not has_saved_assistant_message: - messages_to_save_early: list[ChatMessage] = [] - if accumulated_tool_calls: - assistant_response.tool_calls = ( - accumulated_tool_calls - ) - if not has_appended_streaming_message and ( - assistant_response.content - or assistant_response.tool_calls - ): - messages_to_save_early.append(assistant_response) - messages_to_save_early.extend(tool_response_messages) - - if messages_to_save_early: - session.messages.extend(messages_to_save_early) - logger.info( - f"Saving assistant message before StreamFinish: " - f"content_len={len(assistant_response.content or '')}, " - f"tool_calls={len(assistant_response.tool_calls or [])}, " - f"tool_responses={len(tool_response_messages)}" - ) - if ( - messages_to_save_early - or has_appended_streaming_message - ): - await upsert_chat_session(session) - has_saved_assistant_message = True - - has_yielded_end = True - yield chunk - elif isinstance(chunk, StreamError): - has_yielded_error = True - yield chunk - elif isinstance(chunk, StreamUsage): - session.usage.append( - Usage( - prompt_tokens=chunk.promptTokens, - completion_tokens=chunk.completionTokens, - total_tokens=chunk.totalTokens, - ) - ) - else: - logger.error( - f"Unknown chunk type: {type(chunk)}", exc_info=True - ) - if assistant_response.content: - langfuse.update_current_trace(output=assistant_response.content) - langfuse.update_current_span(output=assistant_response.content) - elif tool_response_messages: - langfuse.update_current_trace(output=str(tool_response_messages)) - langfuse.update_current_span(output=str(tool_response_messages)) - - except CancelledError: - if not has_saved_assistant_message: - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - if assistant_response.content: - assistant_response.content = ( - f"{assistant_response.content}\n\n[interrupted]" - ) - else: - assistant_response.content = "[interrupted]" - if not has_appended_streaming_message: - session.messages.append(assistant_response) - if tool_response_messages: - session.messages.extend(tool_response_messages) try: - await upsert_chat_session(session) + await cache_chat_session(session) except Exception as e: logger.warning( - f"Failed to save interrupted session {session.session_id}: {e}" + f"Failed to cache partial session {session.session_id}: {e}" ) - raise - except Exception as e: - logger.error(f"Error during stream: {e!s}", exc_info=True) - - # Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.) - is_retryable = isinstance( - e, (orjson.JSONDecodeError, KeyError, TypeError) - ) - - if is_retryable and retry_count < config.max_retries: - logger.info( - f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" - ) - should_retry = True - else: - # Non-retryable error or max retries exceeded - # Save any partial progress before reporting error - messages_to_save: list[ChatMessage] = [] - - # Add assistant message if it has content or tool calls - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - if not has_appended_streaming_message and ( - assistant_response.content or assistant_response.tool_calls - ): - messages_to_save.append(assistant_response) - - # Add tool response messages after assistant message - messages_to_save.extend(tool_response_messages) - - if not has_saved_assistant_message: - if messages_to_save: - session.messages.extend(messages_to_save) - if messages_to_save or has_appended_streaming_message: - await upsert_chat_session(session) - - if not has_yielded_error: - error_message = str(e) - if not is_retryable: - error_message = f"Non-retryable error: {error_message}" - elif retry_count >= config.max_retries: - error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}" - - error_response = StreamError(errorText=error_message) - yield error_response - if not has_yielded_end: - yield StreamFinish() - return - - # Handle retry outside of exception handler to avoid nesting - if should_retry and retry_count < config.max_retries: - logger.info( - f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}" - ) - async for chunk in stream_chat_completion( - session_id=session.session_id, - user_id=user_id, - retry_count=retry_count + 1, - session=session, - context=context, - ): + last_cache_time = current_time + last_cache_content_len = content_len + yield chunk + elif isinstance(chunk, StreamTextEnd): + # Emit text-end after text completes + if has_received_text and not text_streaming_ended: + text_streaming_ended = True yield chunk - return # Exit after retry to avoid double-saving in finally block + elif isinstance(chunk, StreamToolInputStart): + # Emit text-end before first tool call, but only if we've received text + if has_received_text and not text_streaming_ended: + yield StreamTextEnd(id=text_block_id) + text_streaming_ended = True + yield chunk + elif isinstance(chunk, StreamToolInputAvailable): + # Accumulate tool calls in OpenAI format + accumulated_tool_calls.append( + { + "id": chunk.toolCallId, + "type": "function", + "function": { + "name": chunk.toolName, + "arguments": orjson.dumps(chunk.input).decode("utf-8"), + }, + } + ) + yield chunk + elif isinstance(chunk, StreamToolOutputAvailable): + result_content = ( + chunk.output + if isinstance(chunk.output, str) + else orjson.dumps(chunk.output).decode("utf-8") + ) + tool_response_messages.append( + ChatMessage( + role="tool", + content=result_content, + tool_call_id=chunk.toolCallId, + ) + ) + has_done_tool_call = True + # Track if any tool execution failed + if not chunk.success: + logger.warning( + f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed" + ) + yield chunk + elif isinstance(chunk, StreamFinish): + if not has_done_tool_call: + # Emit text-end before finish if we received text but haven't closed it + if has_received_text and not text_streaming_ended: + yield StreamTextEnd(id=text_block_id) + text_streaming_ended = True + + # Save assistant message before yielding finish to ensure it's persisted + # even if client disconnects immediately after receiving StreamFinish + if not has_saved_assistant_message: + messages_to_save_early: list[ChatMessage] = [] + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if not has_appended_streaming_message and ( + assistant_response.content or assistant_response.tool_calls + ): + messages_to_save_early.append(assistant_response) + messages_to_save_early.extend(tool_response_messages) + + if messages_to_save_early: + session.messages.extend(messages_to_save_early) + logger.info( + f"Saving assistant message before StreamFinish: " + f"content_len={len(assistant_response.content or '')}, " + f"tool_calls={len(assistant_response.tool_calls or [])}, " + f"tool_responses={len(tool_response_messages)}" + ) + if messages_to_save_early or has_appended_streaming_message: + await upsert_chat_session(session) + has_saved_assistant_message = True + + has_yielded_end = True + yield chunk + elif isinstance(chunk, StreamError): + has_yielded_error = True + yield chunk + elif isinstance(chunk, StreamUsage): + session.usage.append( + Usage( + prompt_tokens=chunk.promptTokens, + completion_tokens=chunk.completionTokens, + total_tokens=chunk.totalTokens, + ) + ) + else: + logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True) + + except CancelledError: + if not has_saved_assistant_message: + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if assistant_response.content: + assistant_response.content = ( + f"{assistant_response.content}\n\n[interrupted]" + ) + else: + assistant_response.content = "[interrupted]" + if not has_appended_streaming_message: + session.messages.append(assistant_response) + if tool_response_messages: + session.messages.extend(tool_response_messages) + try: + await upsert_chat_session(session) + except Exception as e: + logger.warning( + f"Failed to save interrupted session {session.session_id}: {e}" + ) + raise + except Exception as e: + logger.error(f"Error during stream: {e!s}", exc_info=True) + + # Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.) + is_retryable = isinstance(e, (orjson.JSONDecodeError, KeyError, TypeError)) + + if is_retryable and retry_count < config.max_retries: + logger.info( + f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" + ) + should_retry = True + else: + # Non-retryable error or max retries exceeded + # Save any partial progress before reporting error + messages_to_save: list[ChatMessage] = [] + + # Add assistant message if it has content or tool calls + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if not has_appended_streaming_message and ( + assistant_response.content or assistant_response.tool_calls + ): + messages_to_save.append(assistant_response) + + # Add tool response messages after assistant message + messages_to_save.extend(tool_response_messages) - # Normal completion path - save session and handle tool call continuation - # Only save if we haven't already saved when StreamFinish was received if not has_saved_assistant_message: - logger.info( - f"Normal completion path: session={session.session_id}, " - f"current message_count={len(session.messages)}" - ) - - # Build the messages list in the correct order - messages_to_save: list[ChatMessage] = [] - - # Add assistant message with tool_calls if any - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - logger.info( - f"Added {len(accumulated_tool_calls)} tool calls to assistant message" - ) - if not has_appended_streaming_message and ( - assistant_response.content or assistant_response.tool_calls - ): - messages_to_save.append(assistant_response) - logger.info( - f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" - ) - - # Add tool response messages after assistant message - messages_to_save.extend(tool_response_messages) - logger.info( - f"Saving {len(tool_response_messages)} tool response messages, " - f"total_to_save={len(messages_to_save)}" - ) - if messages_to_save: session.messages.extend(messages_to_save) - logger.info( - f"Extended session messages, new message_count={len(session.messages)}" - ) if messages_to_save or has_appended_streaming_message: await upsert_chat_session(session) - else: - logger.info( - "Assistant message already saved when StreamFinish was received, " - "skipping duplicate save" - ) - # If we did a tool call, stream the chat completion again to get the next response - if has_done_tool_call: - logger.info( - "Tool call executed, streaming chat completion again to get assistant response" - ) - async for chunk in stream_chat_completion( - session_id=session.session_id, - user_id=user_id, - session=session, # Pass session object to avoid Redis refetch - context=context, - tool_call_response=str(tool_response_messages), - ): - yield chunk + if not has_yielded_error: + error_message = str(e) + if not is_retryable: + error_message = f"Non-retryable error: {error_message}" + elif retry_count >= config.max_retries: + error_message = ( + f"Max retries ({config.max_retries}) exceeded: {error_message}" + ) + + error_response = StreamError(errorText=error_message) + yield error_response + if not has_yielded_end: + yield StreamFinish() + return + + # Handle retry outside of exception handler to avoid nesting + if should_retry and retry_count < config.max_retries: + logger.info( + f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}" + ) + async for chunk in stream_chat_completion( + session_id=session.session_id, + user_id=user_id, + retry_count=retry_count + 1, + session=session, + context=context, + ): + yield chunk + return # Exit after retry to avoid double-saving in finally block + + # Normal completion path - save session and handle tool call continuation + # Only save if we haven't already saved when StreamFinish was received + if not has_saved_assistant_message: + logger.info( + f"Normal completion path: session={session.session_id}, " + f"current message_count={len(session.messages)}" + ) + + # Build the messages list in the correct order + messages_to_save: list[ChatMessage] = [] + + # Add assistant message with tool_calls if any + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + logger.info( + f"Added {len(accumulated_tool_calls)} tool calls to assistant message" + ) + if not has_appended_streaming_message and ( + assistant_response.content or assistant_response.tool_calls + ): + messages_to_save.append(assistant_response) + logger.info( + f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" + ) + + # Add tool response messages after assistant message + messages_to_save.extend(tool_response_messages) + logger.info( + f"Saving {len(tool_response_messages)} tool response messages, " + f"total_to_save={len(messages_to_save)}" + ) + + if messages_to_save: + session.messages.extend(messages_to_save) + logger.info( + f"Extended session messages, new message_count={len(session.messages)}" + ) + if messages_to_save or has_appended_streaming_message: + await upsert_chat_session(session) + else: + logger.info( + "Assistant message already saved when StreamFinish was received, " + "skipping duplicate save" + ) + + # If we did a tool call, stream the chat completion again to get the next response + if has_done_tool_call: + logger.info( + "Tool call executed, streaming chat completion again to get assistant response" + ) + async for chunk in stream_chat_completion( + session_id=session.session_id, + user_id=user_id, + session=session, # Pass session object to avoid Redis refetch + context=context, + tool_call_response=str(tool_response_messages), + ): + yield chunk # Retry configuration for OpenAI API calls diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py b/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py index bd93f0e2a6..fe3d5e8984 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py @@ -3,8 +3,6 @@ import logging from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from backend.data.understanding import ( BusinessUnderstandingInput, @@ -61,7 +59,6 @@ and automations for the user's specific needs.""" """Requires authentication to store user-specific data.""" return True - @observe(as_type="tool", name="add_understanding") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py index 00c6d8499b..457e4a4f9b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py @@ -5,7 +5,6 @@ import re from datetime import datetime, timedelta, timezone from typing import Any -from langfuse import observe from pydantic import BaseModel, field_validator from backend.api.features.chat.model import ChatSession @@ -329,7 +328,6 @@ class AgentOutputTool(BaseTool): total_executions=len(available_executions) if available_executions else 1, ) - @observe(as_type="tool", name="view_agent_output") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py index 5a3c44fb94..6469cc4442 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py @@ -3,8 +3,6 @@ import logging from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from .agent_generator import ( @@ -75,7 +73,6 @@ class CreateAgentTool(BaseTool): "required": ["description"], } - @observe(as_type="tool", name="create_agent") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py index 777c39a254..df1e4a9c3e 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py @@ -3,8 +3,6 @@ import logging from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from .agent_generator import ( @@ -81,7 +79,6 @@ class EditAgentTool(BaseTool): "required": ["agent_id", "changes"], } - @observe(as_type="tool", name="edit_agent") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py index f231ef4484..477522757d 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py @@ -2,8 +2,6 @@ from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from .agent_search import search_agents @@ -37,7 +35,6 @@ class FindAgentTool(BaseTool): "required": ["query"], } - @observe(as_type="tool", name="find_agent") async def _execute( self, user_id: str | None, session: ChatSession, **kwargs ) -> ToolResponseBase: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py index fc20fdfc4a..a5e66f0a1c 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py @@ -1,7 +1,6 @@ import logging from typing import Any -from langfuse import observe from prisma.enums import ContentType from backend.api.features.chat.model import ChatSession @@ -56,7 +55,6 @@ class FindBlockTool(BaseTool): def requires_auth(self) -> bool: return True - @observe(as_type="tool", name="find_block") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py index d9b5edfa9b..108fba75ae 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py @@ -2,8 +2,6 @@ from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from .agent_search import search_agents @@ -43,7 +41,6 @@ class FindLibraryAgentTool(BaseTool): def requires_auth(self) -> bool: return True - @observe(as_type="tool", name="find_library_agent") async def _execute( self, user_id: str | None, session: ChatSession, **kwargs ) -> ToolResponseBase: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py b/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py index b2fdcccfcd..7040cd7db5 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py @@ -4,8 +4,6 @@ import logging from pathlib import Path from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from backend.api.features.chat.tools.base import BaseTool from backend.api.features.chat.tools.models import ( @@ -73,7 +71,6 @@ class GetDocPageTool(BaseTool): url_path = path.rsplit(".", 1)[0] if "." in path else path return f"{DOCS_BASE_URL}/{url_path}" - @observe(as_type="tool", name="get_doc_page") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py index 88d432a797..a7fa65348a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py @@ -3,7 +3,6 @@ import logging from typing import Any -from langfuse import observe from pydantic import BaseModel, Field, field_validator from backend.api.features.chat.config import ChatConfig @@ -159,7 +158,6 @@ class RunAgentTool(BaseTool): """All operations require authentication.""" return True - @observe(as_type="tool", name="run_agent") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index 0d233fcfec..3f57236564 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -4,8 +4,6 @@ import logging from collections import defaultdict from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from backend.data.block import get_block from backend.data.execution import ExecutionContext @@ -130,7 +128,6 @@ class RunBlockTool(BaseTool): return matched_credentials, missing_credentials - @observe(as_type="tool", name="run_block") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py b/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py index 4903230b40..edb0c0de1e 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py @@ -3,7 +3,6 @@ import logging from typing import Any -from langfuse import observe from prisma.enums import ContentType from backend.api.features.chat.model import ChatSession @@ -88,7 +87,6 @@ class SearchDocsTool(BaseTool): url_path = path.rsplit(".", 1)[0] if "." in path else path return f"{DOCS_BASE_URL}/{url_path}" - @observe(as_type="tool", name="search_docs") async def _execute( self, user_id: str | None, From 91c78968599a8bbe9ac15925a798a4023fb5acf9 Mon Sep 17 00:00:00 2001 From: Bently Date: Tue, 27 Jan 2026 15:37:17 +0100 Subject: [PATCH 08/21] fix(backend): implement context window management for long chat sessions (#11848) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes 🏗️ Implements automatic context window management to prevent chat failures when conversations exceed token limits. ### Problem - **Issue**: [SECRT-1800] Long chat conversations stop working when context grows beyond model limits (~113k tokens observed) - **Root Cause**: Chat service sends ALL messages to LLM without token-aware compression, eventually exceeding Claude Opus 4.5's 200k context window ### Solution Implements a sliding window with summarization strategy: 1. Monitors token count before sending to LLM (triggers at 120k tokens) 2. Keeps last 15 messages completely intact (preserves recent conversation flow) 3. Summarizes older messages using gpt-4o-mini (fast & cheap) 4. Rebuilds context: `[system_prompt] + [summary] + [recent_15_messages]` 5. Full history preserved in database (only compresses when sending to LLM) ### Changes Made - **Added** `_summarize_messages()` helper function to create concise summaries using gpt-4o-mini - **Modified** `_stream_chat_chunks()` to implement token counting and conditional summarization - **Integrated** existing `estimate_token_count()` utility for accurate token measurement - **Added** graceful fallback - continues with original messages if summarization fails ## Motivation and Context 🎯 Without context management, users with long chat sessions (250+ messages) experience: - Complete chat failure when hitting 200k token limit - Lost conversation context - Poor user experience This fix enables: - ✅ Unlimited conversation length - ✅ Transparent operation (no UX changes) - ✅ Preserved conversation quality (recent messages intact) - ✅ Cost-efficient (~$0.0001 per summarization) ## Testing 🧪 ### Expected Behavior - Conversations < 120k tokens: No change (normal operation) - Conversations > 120k tokens: - Log message: `Context summarized: {tokens} tokens, kept last 15 messages + summary` - Chat continues working smoothly - Recent context remains intact ### How to Verify 1. Start a chat session in copilot 2. Send 250-600 messages (or 50+ with large code blocks) 3. Check logs for "Context summarized:" message 4. Verify chat continues working without errors 5. Verify conversation quality remains good ## Checklist ✅ - [x] My code follows the style guidelines of this project - [x] I have performed a self-review of my own code - [x] I have commented my code, particularly in hard-to-understand areas - [x] My changes generate no new warnings - [x] I have tested my changes and verified they work as expected --- .../backend/api/features/chat/service.py | 392 +++++++++++++++++- 1 file changed, 390 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 3976cd5f38..f8336b9107 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -628,6 +628,101 @@ def _is_region_blocked_error(error: Exception) -> bool: return "not available in your region" in str(error).lower() +async def _summarize_messages( + messages: list, + model: str, + api_key: str | None = None, + base_url: str | None = None, + timeout: float = 30.0, +) -> str: + """Summarize a list of messages into concise context. + + Uses the same model as the chat for higher quality summaries. + + Args: + messages: List of message dicts to summarize + model: Model to use for summarization (same as chat model) + api_key: API key for OpenAI client + base_url: Base URL for OpenAI client + timeout: Request timeout in seconds (default: 30.0) + + Returns: + Summarized text + """ + # Format messages for summarization + conversation = [] + for msg in messages: + role = msg.get("role", "") + content = msg.get("content", "") + # Include user, assistant, and tool messages (tool outputs are important context) + if content and role in ("user", "assistant", "tool"): + conversation.append(f"{role.upper()}: {content}") + + conversation_text = "\n\n".join(conversation) + + # Handle empty conversation + if not conversation_text: + return "No conversation history available." + + # Truncate conversation to fit within summarization model's context + # gpt-4o-mini has 128k context, but we limit to ~25k tokens (~100k chars) for safety + MAX_CHARS = 100_000 + if len(conversation_text) > MAX_CHARS: + conversation_text = conversation_text[:MAX_CHARS] + "\n\n[truncated]" + + # Call LLM to summarize + import openai + + summarization_client = openai.AsyncOpenAI( + api_key=api_key, base_url=base_url, timeout=timeout + ) + + response = await summarization_client.chat.completions.create( + model=model, + messages=[ + { + "role": "system", + "content": ( + "Create a detailed summary of the conversation so far. " + "This summary will be used as context when continuing the conversation.\n\n" + "Before writing the summary, analyze each message chronologically to identify:\n" + "- User requests and their explicit goals\n" + "- Your approach and key decisions made\n" + "- Technical specifics (file names, tool outputs, function signatures)\n" + "- Errors encountered and resolutions applied\n\n" + "You MUST include ALL of the following sections:\n\n" + "## 1. Primary Request and Intent\n" + "The user's explicit goals and what they are trying to accomplish.\n\n" + "## 2. Key Technical Concepts\n" + "Technologies, frameworks, tools, and patterns being used or discussed.\n\n" + "## 3. Files and Resources Involved\n" + "Specific files examined or modified, with relevant snippets and identifiers.\n\n" + "## 4. Errors and Fixes\n" + "Problems encountered, error messages, and their resolutions. " + "Include any user feedback on fixes.\n\n" + "## 5. Problem Solving\n" + "Issues that have been resolved and how they were addressed.\n\n" + "## 6. All User Messages\n" + "A complete list of all user inputs (excluding tool outputs) to preserve their exact requests.\n\n" + "## 7. Pending Tasks\n" + "Work items the user explicitly requested that have not yet been completed.\n\n" + "## 8. Current Work\n" + "Precise description of what was being worked on most recently, including relevant context.\n\n" + "## 9. Next Steps\n" + "What should happen next, aligned with the user's most recent requests. " + "Include verbatim quotes of recent instructions if relevant." + ), + }, + {"role": "user", "content": f"Summarize:\n\n{conversation_text}"}, + ], + max_tokens=1500, + temperature=0.3, + ) + + summary = response.choices[0].message.content + return summary or "No summary available." + + async def _stream_chat_chunks( session: ChatSession, tools: list[ChatCompletionToolParam], @@ -664,6 +759,292 @@ async def _stream_chat_chunks( ) messages = [system_message] + messages + # Apply context window management + token_count = 0 # Initialize for exception handler + try: + from backend.util.prompt import estimate_token_count + + # Convert to dict for token counting + # OpenAI message types are TypedDicts, so they're already dict-like + messages_dict = [] + for msg in messages: + # TypedDict objects are already dicts, just filter None values + if isinstance(msg, dict): + msg_dict = {k: v for k, v in msg.items() if v is not None} + else: + # Fallback for unexpected types + msg_dict = dict(msg) + messages_dict.append(msg_dict) + + # Estimate tokens using appropriate tokenizer + # Normalize model name for token counting (tiktoken only supports OpenAI models) + token_count_model = model + if "/" in model: + # Strip provider prefix (e.g., "anthropic/claude-opus-4.5" -> "claude-opus-4.5") + token_count_model = model.split("/")[-1] + + # For Claude and other non-OpenAI models, approximate with gpt-4o tokenizer + # Most modern LLMs have similar tokenization (~1 token per 4 chars) + if "claude" in token_count_model.lower() or not any( + known in token_count_model.lower() + for known in ["gpt", "o1", "chatgpt", "text-"] + ): + token_count_model = "gpt-4o" + + # Attempt token counting with error handling + try: + token_count = estimate_token_count(messages_dict, model=token_count_model) + except Exception as token_error: + # If token counting fails, use gpt-4o as fallback approximation + logger.warning( + f"Token counting failed for model {token_count_model}: {token_error}. " + "Using gpt-4o approximation." + ) + token_count = estimate_token_count(messages_dict, model="gpt-4o") + + # If over threshold, summarize old messages + if token_count > 120_000: + KEEP_RECENT = 15 + + # Check if we have a system prompt at the start + has_system_prompt = ( + len(messages) > 0 and messages[0].get("role") == "system" + ) + + # Always attempt mitigation when over limit, even with few messages + if messages: + # Split messages based on whether system prompt exists + recent_messages = messages[-KEEP_RECENT:] + + if has_system_prompt: + # Keep system prompt separate, summarize everything between system and recent + system_msg = messages[0] + old_messages_dict = messages_dict[1:-KEEP_RECENT] + else: + # No system prompt, summarize everything except recent + system_msg = None + old_messages_dict = messages_dict[:-KEEP_RECENT] + + # Summarize any non-empty old messages (no minimum threshold) + # If we're over the token limit, we need to compress whatever we can + if old_messages_dict: + # Summarize old messages using the same model as chat + summary_text = await _summarize_messages( + old_messages_dict, + model=model, + api_key=config.api_key, + base_url=config.base_url, + ) + + # Build new message list + # Use assistant role (not system) to prevent privilege escalation + # of user-influenced content to instruction-level authority + from openai.types.chat import ChatCompletionAssistantMessageParam + + summary_msg = ChatCompletionAssistantMessageParam( + role="assistant", + content=( + "[Previous conversation summary — for context only]: " + f"{summary_text}" + ), + ) + + # Rebuild messages based on whether we have a system prompt + if has_system_prompt: + # system_prompt + summary + recent_messages + messages = [system_msg, summary_msg] + recent_messages + else: + # summary + recent_messages (no original system prompt) + messages = [summary_msg] + recent_messages + + logger.info( + f"Context summarized: {token_count} tokens, " + f"summarized {len(old_messages_dict)} old messages, " + f"kept last {KEEP_RECENT} messages" + ) + + # Fallback: If still over limit after summarization, progressively drop recent messages + # This handles edge cases where recent messages are extremely large + new_messages_dict = [] + for msg in messages: + if isinstance(msg, dict): + msg_dict = {k: v for k, v in msg.items() if v is not None} + else: + msg_dict = dict(msg) + new_messages_dict.append(msg_dict) + + new_token_count = estimate_token_count( + new_messages_dict, model=token_count_model + ) + + if new_token_count > 120_000: + # Still over limit - progressively reduce KEEP_RECENT + logger.warning( + f"Still over limit after summarization: {new_token_count} tokens. " + "Reducing number of recent messages kept." + ) + + for keep_count in [12, 10, 8, 5, 3, 2, 1, 0]: + if keep_count == 0: + # Try with just system prompt + summary (no recent messages) + if has_system_prompt: + messages = [system_msg, summary_msg] + else: + messages = [summary_msg] + logger.info( + "Trying with 0 recent messages (system + summary only)" + ) + else: + # Slice from ORIGINAL recent_messages to avoid duplicating summary + reduced_recent = ( + recent_messages[-keep_count:] + if len(recent_messages) >= keep_count + else recent_messages + ) + if has_system_prompt: + messages = [ + system_msg, + summary_msg, + ] + reduced_recent + else: + messages = [summary_msg] + reduced_recent + + new_messages_dict = [] + for msg in messages: + if isinstance(msg, dict): + msg_dict = { + k: v for k, v in msg.items() if v is not None + } + else: + msg_dict = dict(msg) + new_messages_dict.append(msg_dict) + + new_token_count = estimate_token_count( + new_messages_dict, model=token_count_model + ) + + if new_token_count <= 120_000: + logger.info( + f"Reduced to {keep_count} recent messages, " + f"now {new_token_count} tokens" + ) + break + else: + logger.error( + f"Unable to reduce token count below threshold even with 0 messages. " + f"Final count: {new_token_count} tokens" + ) + # ABSOLUTE LAST RESORT: Drop system prompt + # This should only happen if summary itself is massive + if has_system_prompt and len(messages) > 1: + messages = messages[1:] # Drop system prompt + logger.critical( + "CRITICAL: Dropped system prompt as absolute last resort. " + "Behavioral consistency may be affected." + ) + # Yield error to user + yield StreamError( + errorText=( + "Warning: System prompt dropped due to size constraints. " + "Assistant behavior may be affected." + ) + ) + else: + # No old messages to summarize - all messages are "recent" + # Apply progressive truncation to reduce token count + logger.warning( + f"Token count {token_count} exceeds threshold but no old messages to summarize. " + f"Applying progressive truncation to recent messages." + ) + + # Create a base list excluding system prompt to avoid duplication + # This is the pool of messages we'll slice from in the loop + base_msgs = messages[1:] if has_system_prompt else messages + + # Try progressively smaller keep counts + new_token_count = token_count # Initialize with current count + for keep_count in [12, 10, 8, 5, 3, 2, 1, 0]: + if keep_count == 0: + # Try with just system prompt (no recent messages) + if has_system_prompt: + messages = [system_msg] + logger.info( + "Trying with 0 recent messages (system prompt only)" + ) + else: + # No system prompt and no recent messages = empty messages list + # This is invalid, skip this iteration + continue + else: + if len(base_msgs) < keep_count: + continue # Skip if we don't have enough messages + + # Slice from base_msgs to get recent messages (without system prompt) + recent_messages = base_msgs[-keep_count:] + + if has_system_prompt: + messages = [system_msg] + recent_messages + else: + messages = recent_messages + + new_messages_dict = [] + for msg in messages: + if msg is None: + continue # Skip None messages (type safety) + if isinstance(msg, dict): + msg_dict = { + k: v for k, v in msg.items() if v is not None + } + else: + msg_dict = dict(msg) + new_messages_dict.append(msg_dict) + + new_token_count = estimate_token_count( + new_messages_dict, model=token_count_model + ) + + if new_token_count <= 120_000: + logger.info( + f"Reduced to {keep_count} recent messages, " + f"now {new_token_count} tokens" + ) + break + else: + # Even with 0 messages still over limit + logger.error( + f"Unable to reduce token count below threshold even with 0 messages. " + f"Final count: {new_token_count} tokens. Messages may be extremely large." + ) + # ABSOLUTE LAST RESORT: Drop system prompt + if has_system_prompt and len(messages) > 1: + messages = messages[1:] # Drop system prompt + logger.critical( + "CRITICAL: Dropped system prompt as absolute last resort. " + "Behavioral consistency may be affected." + ) + # Yield error to user + yield StreamError( + errorText=( + "Warning: System prompt dropped due to size constraints. " + "Assistant behavior may be affected." + ) + ) + + except Exception as e: + logger.error(f"Context summarization failed: {e}", exc_info=True) + # If we were over the token limit, yield error to user + # Don't silently continue with oversized messages that will fail + if token_count > 120_000: + yield StreamError( + errorText=( + f"Unable to manage context window (token limit exceeded: {token_count} tokens). " + "Context summarization failed. Please start a new conversation." + ) + ) + yield StreamFinish() + return + # Otherwise, continue with original messages (under limit) + # Loop to handle tool calls and continue conversation while True: retry_count = 0 @@ -691,13 +1072,20 @@ async def _stream_chat_chunks( ] # OpenRouter limit # Create the stream with proper types + from typing import cast + + from openai.types.chat import ( + ChatCompletionMessageParam, + ChatCompletionStreamOptionsParam, + ) + stream = await client.chat.completions.create( model=model, - messages=messages, + messages=cast(list[ChatCompletionMessageParam], messages), tools=tools, tool_choice="auto", stream=True, - stream_options={"include_usage": True}, + stream_options=ChatCompletionStreamOptionsParam(include_usage=True), extra_body=extra_body, ) From fac10c422bb49b164ab205142694199990136122 Mon Sep 17 00:00:00 2001 From: Swifty Date: Tue, 27 Jan 2026 15:41:58 +0100 Subject: [PATCH 09/21] fix(backend): add SSE heartbeats to prevent tool execution timeouts (#11855) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Long-running chat tools (like `create_agent` and `edit_agent`) were timing out because no SSE data was sent during tool execution. GCP load balancers and proxies have idle connection timeouts (~60 seconds), and when the external Agent Generator service takes longer than this, the connection would drop. This PR adds SSE heartbeat comments during tool execution to keep connections alive. ### Changes 🏗️ - **response_model.py**: Added `StreamHeartbeat` response type that emits SSE comments (`: heartbeat\n\n`) - **service.py**: Modified `_yield_tool_call()` to: - Run tool execution in a background asyncio task - Yield heartbeat events every 15 seconds while waiting - Handle task failures with explicit error responses (no silent failures) - Handle cancellation gracefully - **create_agent.py**: Improved error messages with more context and details - **edit_agent.py**: Improved error messages with more context and details ### How It Works ``` Tool Call → Background Task Started │ ├── Every 15 seconds: yield `: heartbeat\n\n` (SSE comment) │ └── Task Complete → yield tool result OR error response ``` SSE comments (`: heartbeat\n\n`) are: - Ignored by SSE clients (don't trigger events) - Keep TCP connections alive through proxies/load balancers - Don't affect the AI SDK data protocol ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] All chat service tests pass (17 tests) - [x] Verified heartbeats are sent during long tool execution - [x] Verified errors are properly reported to frontend --- .../api/features/chat/response_model.py | 18 ++++++ .../backend/api/features/chat/service.py | 58 +++++++++++++++++-- .../api/features/chat/tools/create_agent.py | 14 +++-- .../api/features/chat/tools/edit_agent.py | 5 +- 4 files changed, 83 insertions(+), 12 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/response_model.py b/autogpt_platform/backend/backend/api/features/chat/response_model.py index 49a9b38e8f..53a8cf3a1f 100644 --- a/autogpt_platform/backend/backend/api/features/chat/response_model.py +++ b/autogpt_platform/backend/backend/api/features/chat/response_model.py @@ -31,6 +31,7 @@ class ResponseType(str, Enum): # Other ERROR = "error" USAGE = "usage" + HEARTBEAT = "heartbeat" class StreamBaseResponse(BaseModel): @@ -142,3 +143,20 @@ class StreamError(StreamBaseResponse): details: dict[str, Any] | None = Field( default=None, description="Additional error details" ) + + +class StreamHeartbeat(StreamBaseResponse): + """Heartbeat to keep SSE connection alive during long-running operations. + + Uses SSE comment format (: comment) which is ignored by clients but keeps + the connection alive through proxies and load balancers. + """ + + type: ResponseType = ResponseType.HEARTBEAT + toolCallId: str | None = Field( + default=None, description="Tool call ID if heartbeat is for a specific tool" + ) + + def to_sse(self) -> str: + """Convert to SSE comment format to keep connection alive.""" + return ": heartbeat\n\n" diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index f8336b9107..386b37784d 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -38,6 +38,7 @@ from .response_model import ( StreamBaseResponse, StreamError, StreamFinish, + StreamHeartbeat, StreamStart, StreamTextDelta, StreamTextEnd, @@ -48,6 +49,7 @@ from .response_model import ( StreamUsage, ) from .tools import execute_tool, tools +from .tools.models import ErrorResponse from .tracking import track_user_message logger = logging.getLogger(__name__) @@ -1258,6 +1260,9 @@ async def _yield_tool_call( """ Yield a tool call and its execution result. + For long-running tools, yields heartbeat events every 15 seconds to keep + the SSE connection alive through proxies and load balancers. + Raises: orjson.JSONDecodeError: If tool call arguments cannot be parsed as JSON KeyError: If expected tool call fields are missing @@ -1280,12 +1285,53 @@ async def _yield_tool_call( input=arguments, ) - tool_execution_response: StreamToolOutputAvailable = await execute_tool( - tool_name=tool_name, - parameters=arguments, - tool_call_id=tool_call_id, - user_id=session.user_id, - session=session, + # Run tool execution in background task with heartbeats to keep connection alive + tool_task = asyncio.create_task( + execute_tool( + tool_name=tool_name, + parameters=arguments, + tool_call_id=tool_call_id, + user_id=session.user_id, + session=session, + ) ) + # Yield heartbeats every 15 seconds while waiting for tool to complete + heartbeat_interval = 15.0 # seconds + while not tool_task.done(): + try: + # Wait for either the task to complete or the heartbeat interval + await asyncio.wait_for( + asyncio.shield(tool_task), timeout=heartbeat_interval + ) + except asyncio.TimeoutError: + # Task still running, send heartbeat to keep connection alive + logger.debug(f"Sending heartbeat for tool {tool_name} ({tool_call_id})") + yield StreamHeartbeat(toolCallId=tool_call_id) + except CancelledError: + # Task was cancelled, clean up and propagate + tool_task.cancel() + logger.warning(f"Tool execution cancelled: {tool_name} ({tool_call_id})") + raise + + # Get the result - handle any exceptions that occurred during execution + try: + tool_execution_response: StreamToolOutputAvailable = await tool_task + except Exception as e: + # Task raised an exception - ensure we send an error response to the frontend + logger.error( + f"Tool execution failed: {tool_name} ({tool_call_id}): {e}", exc_info=True + ) + error_response = ErrorResponse( + message=f"Tool execution failed: {e!s}", + error=type(e).__name__, + session_id=session.session_id, + ) + tool_execution_response = StreamToolOutputAvailable( + toolCallId=tool_call_id, + toolName=tool_name, + output=error_response.model_dump_json(), + success=False, + ) + yield tool_execution_response diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py index 6469cc4442..87ca5ebca7 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py @@ -113,8 +113,11 @@ class CreateAgentTool(BaseTool): if decomposition_result is None: return ErrorResponse( - message="Failed to analyze the goal. Please try rephrasing.", - error="Decomposition failed", + message="Failed to analyze the goal. The agent generation service may be unavailable or timed out. Please try again.", + error="decomposition_failed", + details={ + "description": description[:100] + }, # Include context for debugging session_id=session_id, ) @@ -179,8 +182,11 @@ class CreateAgentTool(BaseTool): if agent_json is None: return ErrorResponse( - message="Failed to generate the agent. Please try again.", - error="Generation failed", + message="Failed to generate the agent. The agent generation service may be unavailable or timed out. Please try again.", + error="generation_failed", + details={ + "description": description[:100] + }, # Include context for debugging session_id=session_id, ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py index df1e4a9c3e..d65b050f06 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py @@ -142,8 +142,9 @@ class EditAgentTool(BaseTool): if result is None: return ErrorResponse( - message="Failed to generate changes. Please try rephrasing.", - error="Update generation failed", + message="Failed to generate changes. The agent generation service may be unavailable or timed out. Please try again.", + error="update_generation_failed", + details={"agent_id": agent_id, "changes": changes[:100]}, session_id=session_id, ) From 3e9d5d0d50a81d87f2f7de3e1e91db91e21c0d3d Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Tue, 27 Jan 2026 08:43:31 -0600 Subject: [PATCH 10/21] fix(backend): handle race condition in review processing gracefully (#11845) ## Summary - Fixes race condition when multiple concurrent requests try to process the same reviews (e.g., double-click, multiple browser tabs) - Previously the second request would fail with "Reviews not found, access denied, or not in WAITING status" - Now handles this gracefully by treating already-processed reviews with the same decision as success ## Changes - Added `get_reviews_by_node_exec_ids()` function that fetches reviews regardless of status - Modified `process_all_reviews_for_execution()` to handle already-processed reviews - Updated route to use idempotent validation ## Test plan - [x] Linter passes (`poetry run ruff check`) - [x] Type checker passes (`poetry run pyright`) - [x] Formatter passes (`poetry run format`) - [ ] Manual testing: double-click approve button should not cause errors Fixes AUTOGPT-SERVER-7HE --- .../executions/review/review_routes_test.py | 44 ++++++------- .../api/features/executions/review/routes.py | 10 +-- .../backend/backend/data/human_review.py | 65 ++++++++++++++----- 3 files changed, 77 insertions(+), 42 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py b/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py index d0c24f2cf8..c8bbfe4bb0 100644 --- a/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py +++ b/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py @@ -164,9 +164,9 @@ async def test_process_review_action_approve_success( """Test successful review approval""" # Mock the route functions - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} @@ -244,9 +244,9 @@ async def test_process_review_action_reject_success( """Test successful review rejection""" # Mock the route functions - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} @@ -339,9 +339,9 @@ async def test_process_review_action_mixed_success( # Mock the route functions - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = { "test_node_123": sample_pending_review, @@ -463,9 +463,9 @@ async def test_process_review_action_review_not_found( test_user_id: str, ) -> None: """Test error when review is not found""" - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) # Return empty dict to simulate review not found mock_get_reviews_for_user.return_value = {} @@ -506,7 +506,7 @@ async def test_process_review_action_review_not_found( response = await client.post("/api/review/action", json=request_data) assert response.status_code == 404 - assert "No pending review found" in response.json()["detail"] + assert "Review(s) not found" in response.json()["detail"] @pytest.mark.asyncio(loop_scope="session") @@ -517,9 +517,9 @@ async def test_process_review_action_partial_failure( test_user_id: str, ) -> None: """Test handling of partial failures in review processing""" - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} @@ -567,9 +567,9 @@ async def test_process_review_action_invalid_node_exec_id( test_user_id: str, ) -> None: """Test failure when trying to process review with invalid node execution ID""" - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) # Return empty dict to simulate review not found mock_get_reviews_for_user.return_value = {} @@ -596,7 +596,7 @@ async def test_process_review_action_invalid_node_exec_id( # Returns 404 when review is not found assert response.status_code == 404 - assert "No pending review found" in response.json()["detail"] + assert "Review(s) not found" in response.json()["detail"] @pytest.mark.asyncio(loop_scope="session") @@ -607,9 +607,9 @@ async def test_process_review_action_auto_approve_creates_auto_approval_records( test_user_id: str, ) -> None: """Test that auto_approve_future_actions flag creates auto-approval records""" - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} @@ -737,9 +737,9 @@ async def test_process_review_action_without_auto_approve_still_loads_settings( test_user_id: str, ) -> None: """Test that execution context is created with settings even without auto-approve""" - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} @@ -885,9 +885,9 @@ async def test_process_review_action_auto_approve_only_applies_to_approved_revie reviewed_at=FIXED_NOW, ) - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) # Need to return both reviews in WAITING state (before processing) approved_review_waiting = PendingHumanReviewModel( @@ -1031,9 +1031,9 @@ async def test_process_review_action_per_review_auto_approve_granularity( test_user_id: str, ) -> None: """Test that auto-approval can be set per-review (granular control)""" - # Mock get_pending_reviews_by_node_exec_ids - return different reviews based on node_exec_id + # Mock get_reviews_by_node_exec_ids - return different reviews based on node_exec_id mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) # Create a mapping of node_exec_id to review diff --git a/autogpt_platform/backend/backend/api/features/executions/review/routes.py b/autogpt_platform/backend/backend/api/features/executions/review/routes.py index a10071e9cb..539c7fd87b 100644 --- a/autogpt_platform/backend/backend/api/features/executions/review/routes.py +++ b/autogpt_platform/backend/backend/api/features/executions/review/routes.py @@ -14,9 +14,9 @@ from backend.data.execution import ( from backend.data.graph import get_graph_settings from backend.data.human_review import ( create_auto_approval_record, - get_pending_reviews_by_node_exec_ids, get_pending_reviews_for_execution, get_pending_reviews_for_user, + get_reviews_by_node_exec_ids, has_pending_reviews_for_graph_exec, process_all_reviews_for_execution, ) @@ -137,17 +137,17 @@ async def process_review_action( detail="At least one review must be provided", ) - # Batch fetch all requested reviews - reviews_map = await get_pending_reviews_by_node_exec_ids( + # Batch fetch all requested reviews (regardless of status for idempotent handling) + reviews_map = await get_reviews_by_node_exec_ids( list(all_request_node_ids), user_id ) - # Validate all reviews were found + # Validate all reviews were found (must exist, any status is OK for now) missing_ids = all_request_node_ids - set(reviews_map.keys()) if missing_ids: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, - detail=f"No pending review found for node execution(s): {', '.join(missing_ids)}", + detail=f"Review(s) not found: {', '.join(missing_ids)}", ) # Validate all reviews belong to the same execution diff --git a/autogpt_platform/backend/backend/data/human_review.py b/autogpt_platform/backend/backend/data/human_review.py index c70eaa7b64..f198043a38 100644 --- a/autogpt_platform/backend/backend/data/human_review.py +++ b/autogpt_platform/backend/backend/data/human_review.py @@ -263,11 +263,14 @@ async def get_pending_review_by_node_exec_id( return PendingHumanReviewModel.from_db(review, node_id=node_id) -async def get_pending_reviews_by_node_exec_ids( +async def get_reviews_by_node_exec_ids( node_exec_ids: list[str], user_id: str ) -> dict[str, "PendingHumanReviewModel"]: """ - Get multiple pending reviews by their node execution IDs in a single batch query. + Get multiple reviews by their node execution IDs regardless of status. + + Unlike get_pending_reviews_by_node_exec_ids, this returns reviews in any status + (WAITING, APPROVED, REJECTED). Used for validation in idempotent operations. Args: node_exec_ids: List of node execution IDs to look up @@ -283,7 +286,6 @@ async def get_pending_reviews_by_node_exec_ids( where={ "nodeExecId": {"in": node_exec_ids}, "userId": user_id, - "status": ReviewStatus.WAITING, } ) @@ -407,38 +409,68 @@ async def process_all_reviews_for_execution( ) -> dict[str, PendingHumanReviewModel]: """Process all pending reviews for an execution with approve/reject decisions. + Handles race conditions gracefully: if a review was already processed with the + same decision by a concurrent request, it's treated as success rather than error. + Args: user_id: User ID for ownership validation review_decisions: Map of node_exec_id -> (status, reviewed_data, message) Returns: - Dict of node_exec_id -> updated review model + Dict of node_exec_id -> updated review model (includes already-processed reviews) """ if not review_decisions: return {} node_exec_ids = list(review_decisions.keys()) - # Get all reviews for validation - reviews = await PendingHumanReview.prisma().find_many( + # Get all reviews (both WAITING and already processed) for the user + all_reviews = await PendingHumanReview.prisma().find_many( where={ "nodeExecId": {"in": node_exec_ids}, "userId": user_id, - "status": ReviewStatus.WAITING, }, ) - # Validate all reviews can be processed - if len(reviews) != len(node_exec_ids): - missing_ids = set(node_exec_ids) - {review.nodeExecId for review in reviews} + # Separate into pending and already-processed reviews + reviews_to_process = [] + already_processed = [] + for review in all_reviews: + if review.status == ReviewStatus.WAITING: + reviews_to_process.append(review) + else: + already_processed.append(review) + + # Check for truly missing reviews (not found at all) + found_ids = {review.nodeExecId for review in all_reviews} + missing_ids = set(node_exec_ids) - found_ids + if missing_ids: raise ValueError( - f"Reviews not found, access denied, or not in WAITING status: {', '.join(missing_ids)}" + f"Reviews not found or access denied: {', '.join(missing_ids)}" ) - # Create parallel update tasks + # Validate already-processed reviews have compatible status (same decision) + # This handles race conditions where another request processed the same reviews + for review in already_processed: + requested_status = review_decisions[review.nodeExecId][0] + if review.status != requested_status: + raise ValueError( + f"Review {review.nodeExecId} was already processed with status " + f"{review.status}, cannot change to {requested_status}" + ) + + # Log if we're handling a race condition (some reviews already processed) + if already_processed: + already_processed_ids = [r.nodeExecId for r in already_processed] + logger.info( + f"Race condition handled: {len(already_processed)} review(s) already " + f"processed by concurrent request: {already_processed_ids}" + ) + + # Create parallel update tasks for reviews that still need processing update_tasks = [] - for review in reviews: + for review in reviews_to_process: new_status, reviewed_data, message = review_decisions[review.nodeExecId] has_data_changes = reviewed_data is not None and reviewed_data != review.payload @@ -463,7 +495,7 @@ async def process_all_reviews_for_execution( update_tasks.append(task) # Execute all updates in parallel and get updated reviews - updated_reviews = await asyncio.gather(*update_tasks) + updated_reviews = await asyncio.gather(*update_tasks) if update_tasks else [] # Note: Execution resumption is now handled at the API layer after ALL reviews # for an execution are processed (both approved and rejected) @@ -472,8 +504,11 @@ async def process_all_reviews_for_execution( # Local import to avoid event loop conflicts in tests from backend.data.execution import get_node_execution + # Combine updated reviews with already-processed ones (for idempotent response) + all_result_reviews = list(updated_reviews) + already_processed + result = {} - for review in updated_reviews: + for review in all_result_reviews: node_exec = await get_node_execution(review.nodeExecId) node_id = node_exec.node_id if node_exec else review.nodeExecId result[review.nodeExecId] = PendingHumanReviewModel.from_db( From 962824c8afd9a566531ee123262dc6fe57dcb679 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Tue, 27 Jan 2026 22:09:25 +0700 Subject: [PATCH 11/21] refactor(frontend): copilot session management stream updates (#11853) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes 🏗️ - **Fix infinite loop in copilot page** - use Zustand selectors instead of full store object to get stable function references - **Centralize chat streaming logic** - move all streaming files from `providers/chat-stream/` to `components/contextual/Chat/` for better colocation and reusability - **Rename `copilot-store` → `copilot-page-store`**: Clarify scope - **Fix message duplication** - Only replay chunks from active streams (not completed ones) since backend already provides persisted messages in `initialMessages` - **Auto-focus chat input** - Focus textarea when streaming ends and input is re-enabled - **Graceful error display** - Render tool response errors in muted style (small text + warning icon) instead of raw "Error: ..." text ## Checklist 📋 ### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Navigate to copilot page - no infinite loop errors - [x] Start a new chat, send message, verify streaming works - [x] Navigate away and back to a completed session - no duplicate messages - [x] After stream completes, verify chat input receives focus - [x] Trigger a tool error - verify it displays with muted styling --- .../app/(platform)/copilot/NewChatContext.tsx | 41 -- .../components/CopilotShell/CopilotShell.tsx | 22 +- .../SessionsList/useSessionsPagination.ts | 73 ++- .../components/CopilotShell/helpers.ts | 70 +- .../CopilotShell/useCopilotShell.ts | 45 +- .../(platform)/copilot/copilot-page-store.ts | 54 ++ .../src/app/(platform)/copilot/layout.tsx | 7 +- .../src/app/(platform)/copilot/page.tsx | 30 +- .../app/(platform)/copilot/useCopilotPage.ts | 74 +-- .../frontend/src/app/(platform)/layout.tsx | 2 + .../components/atoms/Skeleton/Skeleton.tsx | 14 + .../atoms/Skeleton/skeleton.stories.tsx | 2 +- .../components/contextual/Chat/chat-store.ts | 234 +++++++ .../components/contextual/Chat/chat-types.ts | 94 +++ .../ChatContainer/ChatContainer.tsx | 24 +- .../createStreamEventDispatcher.ts | 2 +- .../Chat/components/ChatContainer/helpers.ts | 111 ++++ .../ChatContainer/useChatContainer.ts | 257 +++----- .../Chat/components/ChatInput/ChatInput.tsx | 11 +- .../Chat/components/ChatInput/useChatInput.ts | 55 +- .../LastToolResponse/LastToolResponse.tsx | 15 +- .../ToolResponseMessage.tsx | 27 +- .../components/ToolResponseMessage/helpers.ts | 43 +- .../contextual/Chat/stream-executor.ts | 142 ++++ .../contextual/Chat/stream-utils.ts | 84 +++ .../src/components/contextual/Chat/useChat.ts | 41 +- .../contextual/Chat/useChatDrawer.ts | 17 - .../contextual/Chat/useChatSession.ts | 12 + .../contextual/Chat/useChatStream.ts | 615 +++--------------- .../providers/posthog/posthog-provider.tsx | 7 +- .../network-status/NetworkStatusMonitor.tsx | 8 + .../network-status/useNetworkStatus.ts | 28 + 32 files changed, 1274 insertions(+), 987 deletions(-) delete mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/NewChatContext.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts create mode 100644 autogpt_platform/frontend/src/components/atoms/Skeleton/Skeleton.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/stream-executor.ts create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts delete mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/useChatDrawer.ts create mode 100644 autogpt_platform/frontend/src/services/network-status/NetworkStatusMonitor.tsx create mode 100644 autogpt_platform/frontend/src/services/network-status/useNetworkStatus.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/NewChatContext.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/NewChatContext.tsx deleted file mode 100644 index 0826637043..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/NewChatContext.tsx +++ /dev/null @@ -1,41 +0,0 @@ -"use client"; - -import { createContext, useContext, useRef, type ReactNode } from "react"; - -interface NewChatContextValue { - onNewChatClick: () => void; - setOnNewChatClick: (handler?: () => void) => void; - performNewChat?: () => void; - setPerformNewChat: (handler?: () => void) => void; -} - -const NewChatContext = createContext(null); - -export function NewChatProvider({ children }: { children: ReactNode }) { - const onNewChatRef = useRef<(() => void) | undefined>(); - const performNewChatRef = useRef<(() => void) | undefined>(); - const contextValueRef = useRef({ - onNewChatClick() { - onNewChatRef.current?.(); - }, - setOnNewChatClick(handler?: () => void) { - onNewChatRef.current = handler; - }, - performNewChat() { - performNewChatRef.current?.(); - }, - setPerformNewChat(handler?: () => void) { - performNewChatRef.current = handler; - }, - }); - - return ( - - {children} - - ); -} - -export function useNewChat() { - return useContext(NewChatContext); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx index 44e32024a8..fb22640302 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx @@ -4,7 +4,7 @@ import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/C import { NAVBAR_HEIGHT_PX } from "@/lib/constants"; import type { ReactNode } from "react"; import { useEffect } from "react"; -import { useNewChat } from "../../NewChatContext"; +import { useCopilotStore } from "../../copilot-page-store"; import { DesktopSidebar } from "./components/DesktopSidebar/DesktopSidebar"; import { LoadingState } from "./components/LoadingState/LoadingState"; import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer"; @@ -35,21 +35,23 @@ export function CopilotShell({ children }: Props) { isReadyToShowContent, } = useCopilotShell(); - const newChatContext = useNewChat(); - const handleNewChatClickWrapper = - newChatContext?.onNewChatClick || handleNewChat; + const setNewChatHandler = useCopilotStore((s) => s.setNewChatHandler); + const requestNewChat = useCopilotStore((s) => s.requestNewChat); useEffect( function registerNewChatHandler() { - if (!newChatContext) return; - newChatContext.setPerformNewChat(handleNewChat); + setNewChatHandler(handleNewChat); return function cleanup() { - newChatContext.setPerformNewChat(undefined); + setNewChatHandler(null); }; }, - [newChatContext, handleNewChat], + [handleNewChat], ); + function handleNewChatClick() { + requestNewChat(); + } + if (!isLoggedIn) { return (
@@ -72,7 +74,7 @@ export function CopilotShell({ children }: Props) { isFetchingNextPage={isFetchingNextPage} onSelectSession={handleSelectSession} onFetchNextPage={fetchNextPage} - onNewChat={handleNewChatClickWrapper} + onNewChat={handleNewChatClick} hasActiveSession={Boolean(hasActiveSession)} /> )} @@ -94,7 +96,7 @@ export function CopilotShell({ children }: Props) { isFetchingNextPage={isFetchingNextPage} onSelectSession={handleSelectSession} onFetchNextPage={fetchNextPage} - onNewChat={handleNewChatClickWrapper} + onNewChat={handleNewChatClick} onClose={handleCloseDrawer} onOpenChange={handleDrawerOpenChange} hasActiveSession={Boolean(hasActiveSession)} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts index 8833a419c1..1f241f992a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts @@ -1,7 +1,12 @@ -import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat"; +import { + getGetV2ListSessionsQueryKey, + useGetV2ListSessions, +} from "@/app/api/__generated__/endpoints/chat/chat"; import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; import { okData } from "@/app/api/helpers"; -import { useEffect, useMemo, useState } from "react"; +import { useChatStore } from "@/components/contextual/Chat/chat-store"; +import { useQueryClient } from "@tanstack/react-query"; +import { useEffect, useState } from "react"; const PAGE_SIZE = 50; @@ -15,6 +20,8 @@ export function useSessionsPagination({ enabled }: UseSessionsPaginationArgs) { SessionSummaryResponse[] >([]); const [totalCount, setTotalCount] = useState(null); + const queryClient = useQueryClient(); + const onStreamComplete = useChatStore((state) => state.onStreamComplete); const { data, isLoading, isFetching, isError } = useGetV2ListSessions( { limit: PAGE_SIZE, offset }, @@ -25,35 +32,47 @@ export function useSessionsPagination({ enabled }: UseSessionsPaginationArgs) { }, ); - useEffect(() => { - const responseData = okData(data); - if (responseData) { - const newSessions = responseData.sessions; - const total = responseData.total; - setTotalCount(total); - - if (offset === 0) { - setAccumulatedSessions(newSessions); - } else { - setAccumulatedSessions((prev) => [...prev, ...newSessions]); - } - } else if (!enabled) { + useEffect(function refreshOnStreamComplete() { + const unsubscribe = onStreamComplete(function handleStreamComplete() { + setOffset(0); setAccumulatedSessions([]); setTotalCount(null); - } - }, [data, offset, enabled]); + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + }); + return unsubscribe; + }, []); - const hasNextPage = useMemo(() => { - if (totalCount === null) return false; - return accumulatedSessions.length < totalCount; - }, [accumulatedSessions.length, totalCount]); + useEffect( + function updateSessionsFromResponse() { + const responseData = okData(data); + if (responseData) { + const newSessions = responseData.sessions; + const total = responseData.total; + setTotalCount(total); - const areAllSessionsLoaded = useMemo(() => { - if (totalCount === null) return false; - return ( - accumulatedSessions.length >= totalCount && !isFetching && !isLoading - ); - }, [accumulatedSessions.length, totalCount, isFetching, isLoading]); + if (offset === 0) { + setAccumulatedSessions(newSessions); + } else { + setAccumulatedSessions((prev) => [...prev, ...newSessions]); + } + } else if (!enabled) { + setAccumulatedSessions([]); + setTotalCount(null); + } + }, + [data, offset, enabled], + ); + + const hasNextPage = + totalCount !== null && accumulatedSessions.length < totalCount; + + const areAllSessionsLoaded = + totalCount !== null && + accumulatedSessions.length >= totalCount && + !isFetching && + !isLoading; useEffect(() => { if ( diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts index bf4eb70ccb..3e932848a0 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts @@ -2,9 +2,7 @@ import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessi import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; import { format, formatDistanceToNow, isToday } from "date-fns"; -export function convertSessionDetailToSummary( - session: SessionDetailResponse, -): SessionSummaryResponse { +export function convertSessionDetailToSummary(session: SessionDetailResponse) { return { id: session.id, created_at: session.created_at, @@ -13,17 +11,25 @@ export function convertSessionDetailToSummary( }; } -export function filterVisibleSessions( - sessions: SessionSummaryResponse[], -): SessionSummaryResponse[] { - return sessions.filter( - (session) => session.updated_at !== session.created_at, - ); +export function filterVisibleSessions(sessions: SessionSummaryResponse[]) { + const fiveMinutesAgo = Date.now() - 5 * 60 * 1000; + return sessions.filter((session) => { + const hasBeenUpdated = session.updated_at !== session.created_at; + + if (hasBeenUpdated) return true; + + const isRecentlyCreated = + new Date(session.created_at).getTime() > fiveMinutesAgo; + + return isRecentlyCreated; + }); } -export function getSessionTitle(session: SessionSummaryResponse): string { +export function getSessionTitle(session: SessionSummaryResponse) { if (session.title) return session.title; + const isNewSession = session.updated_at === session.created_at; + if (isNewSession) { const createdDate = new Date(session.created_at); if (isToday(createdDate)) { @@ -31,12 +37,11 @@ export function getSessionTitle(session: SessionSummaryResponse): string { } return format(createdDate, "MMM d, yyyy"); } + return "Untitled Chat"; } -export function getSessionUpdatedLabel( - session: SessionSummaryResponse, -): string { +export function getSessionUpdatedLabel(session: SessionSummaryResponse) { if (!session.updated_at) return ""; return formatDistanceToNow(new Date(session.updated_at), { addSuffix: true }); } @@ -45,8 +50,10 @@ export function mergeCurrentSessionIntoList( accumulatedSessions: SessionSummaryResponse[], currentSessionId: string | null, currentSessionData: SessionDetailResponse | null | undefined, -): SessionSummaryResponse[] { + recentlyCreatedSessions?: Map, +) { const filteredSessions: SessionSummaryResponse[] = []; + const addedIds = new Set(); if (accumulatedSessions.length > 0) { const visibleSessions = filterVisibleSessions(accumulatedSessions); @@ -61,29 +68,40 @@ export function mergeCurrentSessionIntoList( ); if (!isInVisible) { filteredSessions.push(currentInAll); + addedIds.add(currentInAll.id); } } } - filteredSessions.push(...visibleSessions); + for (const session of visibleSessions) { + if (!addedIds.has(session.id)) { + filteredSessions.push(session); + addedIds.add(session.id); + } + } } if (currentSessionId && currentSessionData) { - const isCurrentInList = filteredSessions.some( - (s) => s.id === currentSessionId, - ); - if (!isCurrentInList) { + if (!addedIds.has(currentSessionId)) { const summarySession = convertSessionDetailToSummary(currentSessionData); filteredSessions.unshift(summarySession); + addedIds.add(currentSessionId); + } + } + + if (recentlyCreatedSessions) { + for (const [sessionId, sessionData] of recentlyCreatedSessions) { + if (!addedIds.has(sessionId)) { + filteredSessions.unshift(sessionData); + addedIds.add(sessionId); + } } } return filteredSessions; } -export function getCurrentSessionId( - searchParams: URLSearchParams, -): string | null { +export function getCurrentSessionId(searchParams: URLSearchParams) { return searchParams.get("sessionId"); } @@ -95,11 +113,7 @@ export function shouldAutoSelectSession( accumulatedSessions: SessionSummaryResponse[], isLoading: boolean, totalCount: number | null, -): { - shouldSelect: boolean; - sessionIdToSelect: string | null; - shouldCreate: boolean; -} { +) { if (!areAllSessionsLoaded || hasAutoSelectedSession) { return { shouldSelect: false, @@ -146,7 +160,7 @@ export function checkReadyToShowContent( isCurrentSessionLoading: boolean, currentSessionData: SessionDetailResponse | null | undefined, hasAutoSelectedSession: boolean, -): boolean { +) { if (!areAllSessionsLoaded) return false; if (paramSessionId) { diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts index cadd98da3e..a3aa0b55b2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts @@ -4,23 +4,25 @@ import { getGetV2ListSessionsQueryKey, useGetV2GetSession, } from "@/app/api/__generated__/endpoints/chat/chat"; +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; import { okData } from "@/app/api/helpers"; import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useQueryClient } from "@tanstack/react-query"; -import { usePathname, useRouter, useSearchParams } from "next/navigation"; +import { parseAsString, useQueryState } from "nuqs"; +import { usePathname, useSearchParams } from "next/navigation"; import { useEffect, useRef, useState } from "react"; import { useMobileDrawer } from "./components/MobileDrawer/useMobileDrawer"; import { useSessionsPagination } from "./components/SessionsList/useSessionsPagination"; import { checkReadyToShowContent, + convertSessionDetailToSummary, filterVisibleSessions, getCurrentSessionId, mergeCurrentSessionIntoList, } from "./helpers"; export function useCopilotShell() { - const router = useRouter(); const pathname = usePathname(); const searchParams = useSearchParams(); const queryClient = useQueryClient(); @@ -29,6 +31,8 @@ export function useCopilotShell() { const isMobile = breakpoint === "base" || breakpoint === "sm" || breakpoint === "md"; + const [, setUrlSessionId] = useQueryState("sessionId", parseAsString); + const isOnHomepage = pathname === "/copilot"; const paramSessionId = searchParams.get("sessionId"); @@ -65,6 +69,9 @@ export function useCopilotShell() { const [hasAutoSelectedSession, setHasAutoSelectedSession] = useState(false); const hasAutoSelectedRef = useRef(false); + const recentlyCreatedSessionsRef = useRef< + Map + >(new Map()); // Mark as auto-selected when sessionId is in URL useEffect(() => { @@ -91,6 +98,30 @@ export function useCopilotShell() { } }, [isOnHomepage, paramSessionId, queryClient]); + // Track newly created sessions to ensure they stay visible even when switching away + useEffect(() => { + if (currentSessionId && currentSessionData) { + const isNewSession = + currentSessionData.updated_at === currentSessionData.created_at; + const isNotInAccumulated = !accumulatedSessions.some( + (s) => s.id === currentSessionId, + ); + if (isNewSession || isNotInAccumulated) { + const summary = convertSessionDetailToSummary(currentSessionData); + recentlyCreatedSessionsRef.current.set(currentSessionId, summary); + } + } + }, [currentSessionId, currentSessionData, accumulatedSessions]); + + // Clean up recently created sessions that are now in the accumulated list + useEffect(() => { + for (const sessionId of recentlyCreatedSessionsRef.current.keys()) { + if (accumulatedSessions.some((s) => s.id === sessionId)) { + recentlyCreatedSessionsRef.current.delete(sessionId); + } + } + }, [accumulatedSessions]); + // Reset pagination when query becomes disabled const prevPaginationEnabledRef = useRef(paginationEnabled); useEffect(() => { @@ -105,6 +136,7 @@ export function useCopilotShell() { accumulatedSessions, currentSessionId, currentSessionData, + recentlyCreatedSessionsRef.current, ); const visibleSessions = filterVisibleSessions(sessions); @@ -124,22 +156,17 @@ export function useCopilotShell() { ); function handleSelectSession(sessionId: string) { - // Navigate using replaceState to avoid full page reload - window.history.replaceState(null, "", `/copilot?sessionId=${sessionId}`); - // Force a re-render by updating the URL through router - router.replace(`/copilot?sessionId=${sessionId}`); + setUrlSessionId(sessionId, { shallow: false }); if (isMobile) handleCloseDrawer(); } function handleNewChat() { resetAutoSelect(); resetPagination(); - // Invalidate and refetch sessions list to ensure newly created sessions appear queryClient.invalidateQueries({ queryKey: getGetV2ListSessionsQueryKey(), }); - window.history.replaceState(null, "", "/copilot"); - router.replace("/copilot"); + setUrlSessionId(null, { shallow: false }); if (isMobile) handleCloseDrawer(); } diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts new file mode 100644 index 0000000000..22bf5000a1 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts @@ -0,0 +1,54 @@ +"use client"; + +import { create } from "zustand"; + +interface CopilotStoreState { + isStreaming: boolean; + isNewChatModalOpen: boolean; + newChatHandler: (() => void) | null; +} + +interface CopilotStoreActions { + setIsStreaming: (isStreaming: boolean) => void; + setNewChatHandler: (handler: (() => void) | null) => void; + requestNewChat: () => void; + confirmNewChat: () => void; + cancelNewChat: () => void; +} + +type CopilotStore = CopilotStoreState & CopilotStoreActions; + +export const useCopilotStore = create((set, get) => ({ + isStreaming: false, + isNewChatModalOpen: false, + newChatHandler: null, + + setIsStreaming(isStreaming) { + set({ isStreaming }); + }, + + setNewChatHandler(handler) { + set({ newChatHandler: handler }); + }, + + requestNewChat() { + const { isStreaming, newChatHandler } = get(); + if (isStreaming) { + set({ isNewChatModalOpen: true }); + } else if (newChatHandler) { + newChatHandler(); + } + }, + + confirmNewChat() { + const { newChatHandler } = get(); + set({ isNewChatModalOpen: false }); + if (newChatHandler) { + newChatHandler(); + } + }, + + cancelNewChat() { + set({ isNewChatModalOpen: false }); + }, +})); diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx index 0f40de8f25..89cf72e2ba 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx @@ -1,11 +1,6 @@ import type { ReactNode } from "react"; -import { NewChatProvider } from "./NewChatContext"; import { CopilotShell } from "./components/CopilotShell/CopilotShell"; export default function CopilotLayout({ children }: { children: ReactNode }) { - return ( - - {children} - - ); + return {children}; } diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx index 3bbafd087b..83b21bf82e 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx @@ -1,16 +1,19 @@ "use client"; -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; import { Button } from "@/components/atoms/Button/Button"; + +import { Skeleton } from "@/components/atoms/Skeleton/Skeleton"; import { Text } from "@/components/atoms/Text/Text"; import { Chat } from "@/components/contextual/Chat/Chat"; import { ChatInput } from "@/components/contextual/Chat/components/ChatInput/ChatInput"; import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useCopilotStore } from "./copilot-page-store"; import { useCopilotPage } from "./useCopilotPage"; export default function CopilotPage() { const { state, handlers } = useCopilotPage(); + const confirmNewChat = useCopilotStore((s) => s.confirmNewChat); const { greetingName, quickActions, @@ -25,15 +28,11 @@ export default function CopilotPage() { handleSessionNotFound, handleStreamingChange, handleCancelNewChat, - proceedWithNewChat, handleNewChatModalOpen, } = handlers; - if (!isReady) { - return null; - } + if (!isReady) return null; - // Show Chat when we have an active session if (pageState.type === "chat") { return (
@@ -71,7 +70,7 @@ export default function CopilotPage() { @@ -83,7 +82,7 @@ export default function CopilotPage() { ); } - if (pageState.type === "newChat") { + if (pageState.type === "newChat" || pageState.type === "creating") { return (
@@ -96,21 +95,6 @@ export default function CopilotPage() { ); } - // Show loading state while creating session and sending first message - if (pageState.type === "creating") { - return ( -
-
- - - Loading your chats... - -
-
- ); - } - - // Show Welcome screen return (
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts index cb13137432..1d9c843d7d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts @@ -1,4 +1,7 @@ -import { postV2CreateSession } from "@/app/api/__generated__/endpoints/chat/chat"; +import { + getGetV2ListSessionsQueryKey, + postV2CreateSession, +} from "@/app/api/__generated__/endpoints/chat/chat"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; @@ -8,25 +11,22 @@ import { useGetFlag, } from "@/services/feature-flags/use-get-flag"; import * as Sentry from "@sentry/nextjs"; +import { useQueryClient } from "@tanstack/react-query"; import { useFlags } from "launchdarkly-react-client-sdk"; import { useRouter } from "next/navigation"; import { useEffect, useReducer } from "react"; -import { useNewChat } from "./NewChatContext"; +import { useCopilotStore } from "./copilot-page-store"; import { getGreetingName, getQuickActions, type PageState } from "./helpers"; import { useCopilotURLState } from "./useCopilotURLState"; type CopilotState = { pageState: PageState; - isStreaming: boolean; - isNewChatModalOpen: boolean; initialPrompts: Record; previousSessionId: string | null; }; type CopilotAction = | { type: "setPageState"; pageState: PageState } - | { type: "setStreaming"; isStreaming: boolean } - | { type: "setNewChatModalOpen"; isOpen: boolean } | { type: "setInitialPrompt"; sessionId: string; prompt: string } | { type: "setPreviousSessionId"; sessionId: string | null }; @@ -52,14 +52,6 @@ function copilotReducer( if (isSamePageState(action.pageState, state.pageState)) return state; return { ...state, pageState: action.pageState }; } - if (action.type === "setStreaming") { - if (action.isStreaming === state.isStreaming) return state; - return { ...state, isStreaming: action.isStreaming }; - } - if (action.type === "setNewChatModalOpen") { - if (action.isOpen === state.isNewChatModalOpen) return state; - return { ...state, isNewChatModalOpen: action.isOpen }; - } if (action.type === "setInitialPrompt") { if (state.initialPrompts[action.sessionId] === action.prompt) return state; return { @@ -79,9 +71,14 @@ function copilotReducer( export function useCopilotPage() { const router = useRouter(); + const queryClient = useQueryClient(); const { user, isLoggedIn, isUserLoading } = useSupabase(); const { toast } = useToast(); + const isNewChatModalOpen = useCopilotStore((s) => s.isNewChatModalOpen); + const setIsStreaming = useCopilotStore((s) => s.setIsStreaming); + const cancelNewChat = useCopilotStore((s) => s.cancelNewChat); + const isChatEnabled = useGetFlag(Flag.CHAT); const flags = useFlags(); const homepageRoute = getHomepageRoute(isChatEnabled); @@ -93,13 +90,10 @@ export function useCopilotPage() { const [state, dispatch] = useReducer(copilotReducer, { pageState: { type: "welcome" }, - isStreaming: false, - isNewChatModalOpen: false, initialPrompts: {}, previousSessionId: null, }); - const newChatContext = useNewChat(); const greetingName = getGreetingName(user); const quickActions = getQuickActions(); @@ -124,17 +118,6 @@ export function useCopilotPage() { setPreviousSessionId, }); - useEffect( - function registerNewChatHandler() { - if (!newChatContext) return; - newChatContext.setOnNewChatClick(handleNewChatClick); - return function cleanup() { - newChatContext.setOnNewChatClick(undefined); - }; - }, - [newChatContext, handleNewChatClick], - ); - useEffect( function transitionNewChatToWelcome() { if (state.pageState.type === "newChat") { @@ -189,6 +172,10 @@ export function useCopilotPage() { prompt: trimmedPrompt, }); + await queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + await setUrlSessionId(sessionId, { shallow: false }); dispatch({ type: "setPageState", @@ -211,37 +198,15 @@ export function useCopilotPage() { } function handleStreamingChange(isStreamingValue: boolean) { - dispatch({ type: "setStreaming", isStreaming: isStreamingValue }); - } - - async function proceedWithNewChat() { - dispatch({ type: "setNewChatModalOpen", isOpen: false }); - if (newChatContext?.performNewChat) { - newChatContext.performNewChat(); - return; - } - try { - await setUrlSessionId(null, { shallow: false }); - } catch (error) { - console.error("[CopilotPage] Failed to clear session:", error); - } - router.replace("/copilot"); + setIsStreaming(isStreamingValue); } function handleCancelNewChat() { - dispatch({ type: "setNewChatModalOpen", isOpen: false }); + cancelNewChat(); } function handleNewChatModalOpen(isOpen: boolean) { - dispatch({ type: "setNewChatModalOpen", isOpen }); - } - - function handleNewChatClick() { - if (state.isStreaming) { - dispatch({ type: "setNewChatModalOpen", isOpen: true }); - } else { - proceedWithNewChat(); - } + if (!isOpen) cancelNewChat(); } return { @@ -250,7 +215,7 @@ export function useCopilotPage() { quickActions, isLoading: isUserLoading, pageState: state.pageState, - isNewChatModalOpen: state.isNewChatModalOpen, + isNewChatModalOpen, isReady: isFlagReady && isChatEnabled !== false && isLoggedIn, }, handlers: { @@ -259,7 +224,6 @@ export function useCopilotPage() { handleSessionNotFound, handleStreamingChange, handleCancelNewChat, - proceedWithNewChat, handleNewChatModalOpen, }, }; diff --git a/autogpt_platform/frontend/src/app/(platform)/layout.tsx b/autogpt_platform/frontend/src/app/(platform)/layout.tsx index f5e3f3b99b..048110f8b2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/layout.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/layout.tsx @@ -1,10 +1,12 @@ import { Navbar } from "@/components/layout/Navbar/Navbar"; +import { NetworkStatusMonitor } from "@/services/network-status/NetworkStatusMonitor"; import { ReactNode } from "react"; import { AdminImpersonationBanner } from "./admin/components/AdminImpersonationBanner"; export default function PlatformLayout({ children }: { children: ReactNode }) { return (
+
{children}
diff --git a/autogpt_platform/frontend/src/components/atoms/Skeleton/Skeleton.tsx b/autogpt_platform/frontend/src/components/atoms/Skeleton/Skeleton.tsx new file mode 100644 index 0000000000..4789e281ce --- /dev/null +++ b/autogpt_platform/frontend/src/components/atoms/Skeleton/Skeleton.tsx @@ -0,0 +1,14 @@ +import { cn } from "@/lib/utils"; + +interface Props extends React.HTMLAttributes { + className?: string; +} + +export function Skeleton({ className, ...props }: Props) { + return ( +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx b/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx index 04d87a6e0e..69bb7c3440 100644 --- a/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx +++ b/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx @@ -1,4 +1,4 @@ -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { Skeleton } from "./Skeleton"; import type { Meta, StoryObj } from "@storybook/nextjs"; const meta: Meta = { diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts b/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts new file mode 100644 index 0000000000..28028369a9 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts @@ -0,0 +1,234 @@ +"use client"; + +import { create } from "zustand"; +import type { + ActiveStream, + StreamChunk, + StreamCompleteCallback, + StreamResult, + StreamStatus, +} from "./chat-types"; +import { executeStream } from "./stream-executor"; + +const COMPLETED_STREAM_TTL = 5 * 60 * 1000; // 5 minutes + +interface ChatStoreState { + activeStreams: Map; + completedStreams: Map; + activeSessions: Set; + streamCompleteCallbacks: Set; +} + +interface ChatStoreActions { + startStream: ( + sessionId: string, + message: string, + isUserMessage: boolean, + context?: { url: string; content: string }, + onChunk?: (chunk: StreamChunk) => void, + ) => Promise; + stopStream: (sessionId: string) => void; + subscribeToStream: ( + sessionId: string, + onChunk: (chunk: StreamChunk) => void, + skipReplay?: boolean, + ) => () => void; + getStreamStatus: (sessionId: string) => StreamStatus; + getCompletedStream: (sessionId: string) => StreamResult | undefined; + clearCompletedStream: (sessionId: string) => void; + isStreaming: (sessionId: string) => boolean; + registerActiveSession: (sessionId: string) => void; + unregisterActiveSession: (sessionId: string) => void; + isSessionActive: (sessionId: string) => boolean; + onStreamComplete: (callback: StreamCompleteCallback) => () => void; +} + +type ChatStore = ChatStoreState & ChatStoreActions; + +function notifyStreamComplete( + callbacks: Set, + sessionId: string, +) { + for (const callback of callbacks) { + try { + callback(sessionId); + } catch (err) { + console.warn("[ChatStore] Stream complete callback error:", err); + } + } +} + +function cleanupCompletedStreams(completedStreams: Map) { + const now = Date.now(); + for (const [sessionId, result] of completedStreams) { + if (now - result.completedAt > COMPLETED_STREAM_TTL) { + completedStreams.delete(sessionId); + } + } +} + +function moveToCompleted( + activeStreams: Map, + completedStreams: Map, + streamCompleteCallbacks: Set, + sessionId: string, +) { + const stream = activeStreams.get(sessionId); + if (!stream) return; + + const result: StreamResult = { + sessionId, + status: stream.status, + chunks: stream.chunks, + completedAt: Date.now(), + error: stream.error, + }; + + completedStreams.set(sessionId, result); + activeStreams.delete(sessionId); + cleanupCompletedStreams(completedStreams); + + if (stream.status === "completed" || stream.status === "error") { + notifyStreamComplete(streamCompleteCallbacks, sessionId); + } +} + +export const useChatStore = create((set, get) => ({ + activeStreams: new Map(), + completedStreams: new Map(), + activeSessions: new Set(), + streamCompleteCallbacks: new Set(), + + startStream: async function startStream( + sessionId, + message, + isUserMessage, + context, + onChunk, + ) { + const { activeStreams, completedStreams, streamCompleteCallbacks } = get(); + + const existingStream = activeStreams.get(sessionId); + if (existingStream) { + existingStream.abortController.abort(); + moveToCompleted( + activeStreams, + completedStreams, + streamCompleteCallbacks, + sessionId, + ); + } + + const abortController = new AbortController(); + const initialCallbacks = new Set<(chunk: StreamChunk) => void>(); + if (onChunk) initialCallbacks.add(onChunk); + + const stream: ActiveStream = { + sessionId, + abortController, + status: "streaming", + startedAt: Date.now(), + chunks: [], + onChunkCallbacks: initialCallbacks, + }; + + activeStreams.set(sessionId, stream); + + try { + await executeStream(stream, message, isUserMessage, context); + } finally { + if (onChunk) stream.onChunkCallbacks.delete(onChunk); + if (stream.status !== "streaming") { + moveToCompleted( + activeStreams, + completedStreams, + streamCompleteCallbacks, + sessionId, + ); + } + } + }, + + stopStream: function stopStream(sessionId) { + const { activeStreams, completedStreams, streamCompleteCallbacks } = get(); + const stream = activeStreams.get(sessionId); + if (stream) { + stream.abortController.abort(); + stream.status = "completed"; + moveToCompleted( + activeStreams, + completedStreams, + streamCompleteCallbacks, + sessionId, + ); + } + }, + + subscribeToStream: function subscribeToStream( + sessionId, + onChunk, + skipReplay = false, + ) { + const { activeStreams } = get(); + + const stream = activeStreams.get(sessionId); + if (stream) { + if (!skipReplay) { + for (const chunk of stream.chunks) { + onChunk(chunk); + } + } + stream.onChunkCallbacks.add(onChunk); + return function unsubscribe() { + stream.onChunkCallbacks.delete(onChunk); + }; + } + + return function noop() {}; + }, + + getStreamStatus: function getStreamStatus(sessionId) { + const { activeStreams, completedStreams } = get(); + + const active = activeStreams.get(sessionId); + if (active) return active.status; + + const completed = completedStreams.get(sessionId); + if (completed) return completed.status; + + return "idle"; + }, + + getCompletedStream: function getCompletedStream(sessionId) { + return get().completedStreams.get(sessionId); + }, + + clearCompletedStream: function clearCompletedStream(sessionId) { + get().completedStreams.delete(sessionId); + }, + + isStreaming: function isStreaming(sessionId) { + const stream = get().activeStreams.get(sessionId); + return stream?.status === "streaming"; + }, + + registerActiveSession: function registerActiveSession(sessionId) { + get().activeSessions.add(sessionId); + }, + + unregisterActiveSession: function unregisterActiveSession(sessionId) { + get().activeSessions.delete(sessionId); + }, + + isSessionActive: function isSessionActive(sessionId) { + return get().activeSessions.has(sessionId); + }, + + onStreamComplete: function onStreamComplete(callback) { + const { streamCompleteCallbacks } = get(); + streamCompleteCallbacks.add(callback); + return function unsubscribe() { + streamCompleteCallbacks.delete(callback); + }; + }, +})); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts b/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts new file mode 100644 index 0000000000..8c8aa7b704 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts @@ -0,0 +1,94 @@ +import type { ToolArguments, ToolResult } from "@/types/chat"; + +export type StreamStatus = "idle" | "streaming" | "completed" | "error"; + +export interface StreamChunk { + type: + | "text_chunk" + | "text_ended" + | "tool_call" + | "tool_call_start" + | "tool_response" + | "login_needed" + | "need_login" + | "credentials_needed" + | "error" + | "usage" + | "stream_end"; + timestamp?: string; + content?: string; + message?: string; + code?: string; + details?: Record; + tool_id?: string; + tool_name?: string; + arguments?: ToolArguments; + result?: ToolResult; + success?: boolean; + idx?: number; + session_id?: string; + agent_info?: { + graph_id: string; + name: string; + trigger_type: string; + }; + provider?: string; + provider_name?: string; + credential_type?: string; + scopes?: string[]; + title?: string; + [key: string]: unknown; +} + +export type VercelStreamChunk = + | { type: "start"; messageId: string } + | { type: "finish" } + | { type: "text-start"; id: string } + | { type: "text-delta"; id: string; delta: string } + | { type: "text-end"; id: string } + | { type: "tool-input-start"; toolCallId: string; toolName: string } + | { + type: "tool-input-available"; + toolCallId: string; + toolName: string; + input: Record; + } + | { + type: "tool-output-available"; + toolCallId: string; + toolName?: string; + output: unknown; + success?: boolean; + } + | { + type: "usage"; + promptTokens: number; + completionTokens: number; + totalTokens: number; + } + | { + type: "error"; + errorText: string; + code?: string; + details?: Record; + }; + +export interface ActiveStream { + sessionId: string; + abortController: AbortController; + status: StreamStatus; + startedAt: number; + chunks: StreamChunk[]; + error?: Error; + onChunkCallbacks: Set<(chunk: StreamChunk) => void>; +} + +export interface StreamResult { + sessionId: string; + status: StreamStatus; + chunks: StreamChunk[]; + completedAt: number; + error?: Error; +} + +export type StreamCompleteCallback = (sessionId: string) => void; diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx index 17748f8dbc..f062df1397 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx @@ -4,6 +4,7 @@ import { Text } from "@/components/atoms/Text/Text"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { cn } from "@/lib/utils"; +import { GlobeHemisphereEastIcon } from "@phosphor-icons/react"; import { useEffect } from "react"; import { ChatInput } from "../ChatInput/ChatInput"; import { MessageList } from "../MessageList/MessageList"; @@ -55,24 +56,37 @@ export function ChatContainer({ )} > + + + Service unavailable + +
+ } controlled={{ isOpen: isRegionBlockedModalOpen, set: handleRegionModalOpenChange, }} onClose={handleRegionModalClose} + styling={{ maxWidth: 550, width: "100%", minWidth: "auto" }} > -
+
- This model is not available in your region. Please connect via VPN - and try again. + The Autogpt AI model is not available in your region or your + connection is blocking it. Please try again with a different + connection. -
+
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts index 791cf046d5..82e9b05e88 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts @@ -1,5 +1,5 @@ import { toast } from "sonner"; -import { StreamChunk } from "../../useChatStream"; +import type { StreamChunk } from "../../chat-types"; import type { HandlerDependencies } from "./handlers"; import { handleError, diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts index 0edd1b411a..7dee924634 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts @@ -1,7 +1,118 @@ +import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; import { SessionKey, sessionStorage } from "@/services/storage/session-storage"; import type { ToolResult } from "@/types/chat"; import type { ChatMessageData } from "../ChatMessage/useChatMessage"; +export function processInitialMessages( + initialMessages: SessionDetailResponse["messages"], +): ChatMessageData[] { + const processedMessages: ChatMessageData[] = []; + const toolCallMap = new Map(); + + for (const msg of initialMessages) { + if (!isValidMessage(msg)) { + console.warn("Invalid message structure from backend:", msg); + continue; + } + + let content = String(msg.content || ""); + const role = String(msg.role || "assistant").toLowerCase(); + const toolCalls = msg.tool_calls; + const timestamp = msg.timestamp + ? new Date(msg.timestamp as string) + : undefined; + + if (role === "user") { + content = removePageContext(content); + if (!content.trim()) continue; + processedMessages.push({ + type: "message", + role: "user", + content, + timestamp, + }); + continue; + } + + if (role === "assistant") { + content = content + .replace(/[\s\S]*?<\/thinking>/gi, "") + .replace(/[\s\S]*?<\/internal_reasoning>/gi, "") + .trim(); + + if (toolCalls && isToolCallArray(toolCalls) && toolCalls.length > 0) { + for (const toolCall of toolCalls) { + const toolName = toolCall.function.name; + const toolId = toolCall.id; + toolCallMap.set(toolId, toolName); + + try { + const args = JSON.parse(toolCall.function.arguments || "{}"); + processedMessages.push({ + type: "tool_call", + toolId, + toolName, + arguments: args, + timestamp, + }); + } catch (err) { + console.warn("Failed to parse tool call arguments:", err); + processedMessages.push({ + type: "tool_call", + toolId, + toolName, + arguments: {}, + timestamp, + }); + } + } + if (content.trim()) { + processedMessages.push({ + type: "message", + role: "assistant", + content, + timestamp, + }); + } + } else if (content.trim()) { + processedMessages.push({ + type: "message", + role: "assistant", + content, + timestamp, + }); + } + continue; + } + + if (role === "tool") { + const toolCallId = (msg.tool_call_id as string) || ""; + const toolName = toolCallMap.get(toolCallId) || "unknown"; + const toolResponse = parseToolResponse( + content, + toolCallId, + toolName, + timestamp, + ); + if (toolResponse) { + processedMessages.push(toolResponse); + } + continue; + } + + if (content.trim()) { + processedMessages.push({ + type: "message", + role: role as "user" | "assistant" | "system", + content, + timestamp, + }); + } + } + + return processedMessages; +} + export function hasSentInitialPrompt(sessionId: string): boolean { try { const sent = JSON.parse( diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts index 42dd04670d..b7f9d305dd 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts @@ -1,5 +1,6 @@ import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { useCallback, useEffect, useMemo, useRef, useState } from "react"; +import { useEffect, useMemo, useRef, useState } from "react"; +import { useChatStore } from "../../chat-store"; import { toast } from "sonner"; import { useChatStream } from "../../useChatStream"; import { usePageContext } from "../../usePageContext"; @@ -9,11 +10,8 @@ import { createUserMessage, filterAuthMessages, hasSentInitialPrompt, - isToolCallArray, - isValidMessage, markInitialPromptSent, - parseToolResponse, - removePageContext, + processInitialMessages, } from "./helpers"; interface Args { @@ -41,11 +39,18 @@ export function useChatContainer({ sendMessage: sendStreamMessage, stopStreaming, } = useChatStream(); + const activeStreams = useChatStore((s) => s.activeStreams); + const subscribeToStream = useChatStore((s) => s.subscribeToStream); const isStreaming = isStreamingInitiated || hasTextChunks; - useEffect(() => { - if (sessionId !== previousSessionIdRef.current) { - stopStreaming(previousSessionIdRef.current ?? undefined, true); + useEffect( + function handleSessionChange() { + if (sessionId === previousSessionIdRef.current) return; + + const prevSession = previousSessionIdRef.current; + if (prevSession) { + stopStreaming(prevSession); + } previousSessionIdRef.current = sessionId; setMessages([]); setStreamingChunks([]); @@ -53,138 +58,11 @@ export function useChatContainer({ setHasTextChunks(false); setIsStreamingInitiated(false); hasResponseRef.current = false; - } - }, [sessionId, stopStreaming]); - const allMessages = useMemo(() => { - const processedInitialMessages: ChatMessageData[] = []; - const toolCallMap = new Map(); + if (!sessionId) return; - for (const msg of initialMessages) { - if (!isValidMessage(msg)) { - console.warn("Invalid message structure from backend:", msg); - continue; - } - - let content = String(msg.content || ""); - const role = String(msg.role || "assistant").toLowerCase(); - const toolCalls = msg.tool_calls; - const timestamp = msg.timestamp - ? new Date(msg.timestamp as string) - : undefined; - - if (role === "user") { - content = removePageContext(content); - if (!content.trim()) continue; - processedInitialMessages.push({ - type: "message", - role: "user", - content, - timestamp, - }); - continue; - } - - if (role === "assistant") { - content = content - .replace(/[\s\S]*?<\/thinking>/gi, "") - .trim(); - - if (toolCalls && isToolCallArray(toolCalls) && toolCalls.length > 0) { - for (const toolCall of toolCalls) { - const toolName = toolCall.function.name; - const toolId = toolCall.id; - toolCallMap.set(toolId, toolName); - - try { - const args = JSON.parse(toolCall.function.arguments || "{}"); - processedInitialMessages.push({ - type: "tool_call", - toolId, - toolName, - arguments: args, - timestamp, - }); - } catch (err) { - console.warn("Failed to parse tool call arguments:", err); - processedInitialMessages.push({ - type: "tool_call", - toolId, - toolName, - arguments: {}, - timestamp, - }); - } - } - if (content.trim()) { - processedInitialMessages.push({ - type: "message", - role: "assistant", - content, - timestamp, - }); - } - } else if (content.trim()) { - processedInitialMessages.push({ - type: "message", - role: "assistant", - content, - timestamp, - }); - } - continue; - } - - if (role === "tool") { - const toolCallId = (msg.tool_call_id as string) || ""; - const toolName = toolCallMap.get(toolCallId) || "unknown"; - const toolResponse = parseToolResponse( - content, - toolCallId, - toolName, - timestamp, - ); - if (toolResponse) { - processedInitialMessages.push(toolResponse); - } - continue; - } - - if (content.trim()) { - processedInitialMessages.push({ - type: "message", - role: role as "user" | "assistant" | "system", - content, - timestamp, - }); - } - } - - return [...processedInitialMessages, ...messages]; - }, [initialMessages, messages]); - - const sendMessage = useCallback( - async function sendMessage( - content: string, - isUserMessage: boolean = true, - context?: { url: string; content: string }, - ) { - if (!sessionId) { - console.error("[useChatContainer] Cannot send message: no session ID"); - return; - } - setIsRegionBlockedModalOpen(false); - if (isUserMessage) { - const userMessage = createUserMessage(content); - setMessages((prev) => [...filterAuthMessages(prev), userMessage]); - } else { - setMessages((prev) => filterAuthMessages(prev)); - } - setStreamingChunks([]); - streamingChunksRef.current = []; - setHasTextChunks(false); - setIsStreamingInitiated(true); - hasResponseRef.current = false; + const activeStream = activeStreams.get(sessionId); + if (!activeStream || activeStream.status !== "streaming") return; const dispatcher = createStreamEventDispatcher({ setHasTextChunks, @@ -197,42 +75,85 @@ export function useChatContainer({ setIsStreamingInitiated, }); - try { - await sendStreamMessage( - sessionId, - content, - dispatcher, - isUserMessage, - context, - ); - } catch (err) { - console.error("[useChatContainer] Failed to send message:", err); - setIsStreamingInitiated(false); - - // Don't show error toast for AbortError (expected during cleanup) - if (err instanceof Error && err.name === "AbortError") return; - - const errorMessage = - err instanceof Error ? err.message : "Failed to send message"; - toast.error("Failed to send message", { - description: errorMessage, - }); - } + setIsStreamingInitiated(true); + const skipReplay = initialMessages.length > 0; + return subscribeToStream(sessionId, dispatcher, skipReplay); }, - [sessionId, sendStreamMessage], + [sessionId, stopStreaming, activeStreams, subscribeToStream], ); - const handleStopStreaming = useCallback(() => { + const allMessages = useMemo( + () => [...processInitialMessages(initialMessages), ...messages], + [initialMessages, messages], + ); + + async function sendMessage( + content: string, + isUserMessage: boolean = true, + context?: { url: string; content: string }, + ) { + if (!sessionId) { + console.error("[useChatContainer] Cannot send message: no session ID"); + return; + } + setIsRegionBlockedModalOpen(false); + if (isUserMessage) { + const userMessage = createUserMessage(content); + setMessages((prev) => [...filterAuthMessages(prev), userMessage]); + } else { + setMessages((prev) => filterAuthMessages(prev)); + } + setStreamingChunks([]); + streamingChunksRef.current = []; + setHasTextChunks(false); + setIsStreamingInitiated(true); + hasResponseRef.current = false; + + const dispatcher = createStreamEventDispatcher({ + setHasTextChunks, + setStreamingChunks, + streamingChunksRef, + hasResponseRef, + setMessages, + setIsRegionBlockedModalOpen, + sessionId, + setIsStreamingInitiated, + }); + + try { + await sendStreamMessage( + sessionId, + content, + dispatcher, + isUserMessage, + context, + ); + } catch (err) { + console.error("[useChatContainer] Failed to send message:", err); + setIsStreamingInitiated(false); + + if (err instanceof Error && err.name === "AbortError") return; + + const errorMessage = + err instanceof Error ? err.message : "Failed to send message"; + toast.error("Failed to send message", { + description: errorMessage, + }); + } + } + + function handleStopStreaming() { stopStreaming(); setStreamingChunks([]); streamingChunksRef.current = []; setHasTextChunks(false); setIsStreamingInitiated(false); - }, [stopStreaming]); + } const { capturePageContext } = usePageContext(); + const sendMessageRef = useRef(sendMessage); + sendMessageRef.current = sendMessage; - // Send initial prompt if provided (for new sessions from homepage) useEffect( function handleInitialPrompt() { if (!initialPrompt || !sessionId) return; @@ -241,15 +162,9 @@ export function useChatContainer({ markInitialPromptSent(sessionId); const context = capturePageContext(); - sendMessage(initialPrompt, true, context); + sendMessageRef.current(initialPrompt, true, context); }, - [ - initialPrompt, - sessionId, - initialMessages.length, - sendMessage, - capturePageContext, - ], + [initialPrompt, sessionId, initialMessages.length, capturePageContext], ); async function sendMessageWithContext( diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx index 8cdecf0bf4..c45e8dc250 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx @@ -21,7 +21,7 @@ export function ChatInput({ className, }: Props) { const inputId = "chat-input"; - const { value, setValue, handleKeyDown, handleSend, hasMultipleLines } = + const { value, handleKeyDown, handleSubmit, handleChange, hasMultipleLines } = useChatInput({ onSend, disabled: disabled || isStreaming, @@ -29,15 +29,6 @@ export function ChatInput({ inputId, }); - function handleSubmit(e: React.FormEvent) { - e.preventDefault(); - handleSend(); - } - - function handleChange(e: React.ChangeEvent) { - setValue(e.target.value); - } - return (
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts index 93d764b026..6fa8e7252b 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts @@ -1,4 +1,10 @@ -import { KeyboardEvent, useCallback, useEffect, useState } from "react"; +import { + ChangeEvent, + FormEvent, + KeyboardEvent, + useEffect, + useState, +} from "react"; interface UseChatInputArgs { onSend: (message: string) => void; @@ -16,6 +22,23 @@ export function useChatInput({ const [value, setValue] = useState(""); const [hasMultipleLines, setHasMultipleLines] = useState(false); + useEffect( + function focusOnMount() { + const textarea = document.getElementById(inputId) as HTMLTextAreaElement; + if (textarea) textarea.focus(); + }, + [inputId], + ); + + useEffect( + function focusWhenEnabled() { + if (disabled) return; + const textarea = document.getElementById(inputId) as HTMLTextAreaElement; + if (textarea) textarea.focus(); + }, + [disabled, inputId], + ); + useEffect(() => { const textarea = document.getElementById(inputId) as HTMLTextAreaElement; const wrapper = document.getElementById( @@ -77,7 +100,7 @@ export function useChatInput({ } }, [value, maxRows, inputId]); - const handleSend = useCallback(() => { + const handleSend = () => { if (disabled || !value.trim()) return; onSend(value.trim()); setValue(""); @@ -93,23 +116,31 @@ export function useChatInput({ wrapper.style.height = ""; wrapper.style.maxHeight = ""; } - }, [value, onSend, disabled, inputId]); + }; - const handleKeyDown = useCallback( - (event: KeyboardEvent) => { - if (event.key === "Enter" && !event.shiftKey) { - event.preventDefault(); - handleSend(); - } - }, - [handleSend], - ); + function handleKeyDown(event: KeyboardEvent) { + if (event.key === "Enter" && !event.shiftKey) { + event.preventDefault(); + handleSend(); + } + } + + function handleSubmit(e: FormEvent) { + e.preventDefault(); + handleSend(); + } + + function handleChange(e: ChangeEvent) { + setValue(e.target.value); + } return { value, setValue, handleKeyDown, handleSend, + handleSubmit, + handleChange, hasMultipleLines, }; } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx index 3e6bf91ad2..15b10e5715 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx @@ -1,7 +1,5 @@ -import { AIChatBubble } from "../../../AIChatBubble/AIChatBubble"; import type { ChatMessageData } from "../../../ChatMessage/useChatMessage"; -import { MarkdownContent } from "../../../MarkdownContent/MarkdownContent"; -import { formatToolResponse } from "../../../ToolResponseMessage/helpers"; +import { ToolResponseMessage } from "../../../ToolResponseMessage/ToolResponseMessage"; import { shouldSkipAgentOutput } from "../../helpers"; export interface LastToolResponseProps { @@ -15,16 +13,15 @@ export function LastToolResponse({ }: LastToolResponseProps) { if (message.type !== "tool_response") return null; - // Skip if this is an agent_output that should be rendered inside assistant message if (shouldSkipAgentOutput(message, prevMessage)) return null; - const formattedText = formatToolResponse(message.result, message.toolName); - return (
- - - +
); } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx index 1ba10dd248..27da02beb8 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx @@ -1,7 +1,14 @@ +import { Text } from "@/components/atoms/Text/Text"; +import { cn } from "@/lib/utils"; import type { ToolResult } from "@/types/chat"; +import { WarningCircleIcon } from "@phosphor-icons/react"; import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; import { MarkdownContent } from "../MarkdownContent/MarkdownContent"; -import { formatToolResponse } from "./helpers"; +import { + formatToolResponse, + getErrorMessage, + isErrorResponse, +} from "./helpers"; export interface ToolResponseMessageProps { toolId?: string; @@ -18,6 +25,24 @@ export function ToolResponseMessage({ success: _success, className, }: ToolResponseMessageProps) { + if (isErrorResponse(result)) { + const errorMessage = getErrorMessage(result); + return ( + +
+ + + {errorMessage} + +
+
+ ); + } + const formattedText = formatToolResponse(result, toolName); return ( diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts index cf2bca95f7..400f32936e 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts @@ -1,3 +1,42 @@ +function stripInternalReasoning(content: string): string { + return content + .replace(/[\s\S]*?<\/internal_reasoning>/gi, "") + .replace(/[\s\S]*?<\/thinking>/gi, "") + .replace(/\n{3,}/g, "\n\n") + .trim(); +} + +export function isErrorResponse(result: unknown): boolean { + if (typeof result === "string") { + const lower = result.toLowerCase(); + return ( + lower.startsWith("error:") || + lower.includes("not found") || + lower.includes("does not exist") || + lower.includes("failed to") || + lower.includes("unable to") + ); + } + if (typeof result === "object" && result !== null) { + const response = result as Record; + return response.type === "error" || response.error !== undefined; + } + return false; +} + +export function getErrorMessage(result: unknown): string { + if (typeof result === "string") { + return stripInternalReasoning(result.replace(/^error:\s*/i, "")); + } + if (typeof result === "object" && result !== null) { + const response = result as Record; + if (response.error) return stripInternalReasoning(String(response.error)); + if (response.message) + return stripInternalReasoning(String(response.message)); + } + return "An error occurred"; +} + function getToolCompletionPhrase(toolName: string): string { const toolCompletionPhrases: Record = { add_understanding: "Updated your business information", @@ -28,10 +67,10 @@ export function formatToolResponse(result: unknown, toolName: string): string { const parsed = JSON.parse(trimmed); return formatToolResponse(parsed, toolName); } catch { - return trimmed; + return stripInternalReasoning(trimmed); } } - return result; + return stripInternalReasoning(result); } if (typeof result !== "object" || result === null) { diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/stream-executor.ts b/autogpt_platform/frontend/src/components/contextual/Chat/stream-executor.ts new file mode 100644 index 0000000000..b0d970c286 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/stream-executor.ts @@ -0,0 +1,142 @@ +import type { + ActiveStream, + StreamChunk, + VercelStreamChunk, +} from "./chat-types"; +import { + INITIAL_RETRY_DELAY, + MAX_RETRIES, + normalizeStreamChunk, + parseSSELine, +} from "./stream-utils"; + +function notifySubscribers(stream: ActiveStream, chunk: StreamChunk) { + stream.chunks.push(chunk); + for (const callback of stream.onChunkCallbacks) { + try { + callback(chunk); + } catch (err) { + console.warn("[StreamExecutor] Subscriber callback error:", err); + } + } +} + +export async function executeStream( + stream: ActiveStream, + message: string, + isUserMessage: boolean, + context?: { url: string; content: string }, + retryCount: number = 0, +): Promise { + const { sessionId, abortController } = stream; + + try { + const url = `/api/chat/sessions/${sessionId}/stream`; + const body = JSON.stringify({ + message, + is_user_message: isUserMessage, + context: context || null, + }); + + const response = await fetch(url, { + method: "POST", + headers: { + "Content-Type": "application/json", + Accept: "text/event-stream", + }, + body, + signal: abortController.signal, + }); + + if (!response.ok) { + const errorText = await response.text(); + throw new Error(errorText || `HTTP ${response.status}`); + } + + if (!response.body) { + throw new Error("Response body is null"); + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + + if (done) { + notifySubscribers(stream, { type: "stream_end" }); + stream.status = "completed"; + return; + } + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + + for (const line of lines) { + const data = parseSSELine(line); + if (data !== null) { + if (data === "[DONE]") { + notifySubscribers(stream, { type: "stream_end" }); + stream.status = "completed"; + return; + } + + try { + const rawChunk = JSON.parse(data) as + | StreamChunk + | VercelStreamChunk; + const chunk = normalizeStreamChunk(rawChunk); + if (!chunk) continue; + + notifySubscribers(stream, chunk); + + if (chunk.type === "stream_end") { + stream.status = "completed"; + return; + } + + if (chunk.type === "error") { + stream.status = "error"; + stream.error = new Error( + chunk.message || chunk.content || "Stream error", + ); + return; + } + } catch (err) { + console.warn("[StreamExecutor] Failed to parse SSE chunk:", err); + } + } + } + } + } catch (err) { + if (err instanceof Error && err.name === "AbortError") { + notifySubscribers(stream, { type: "stream_end" }); + stream.status = "completed"; + return; + } + + if (retryCount < MAX_RETRIES) { + const retryDelay = INITIAL_RETRY_DELAY * Math.pow(2, retryCount); + console.log( + `[StreamExecutor] Retrying in ${retryDelay}ms (attempt ${retryCount + 1}/${MAX_RETRIES})`, + ); + await new Promise((resolve) => setTimeout(resolve, retryDelay)); + return executeStream( + stream, + message, + isUserMessage, + context, + retryCount + 1, + ); + } + + stream.status = "error"; + stream.error = err instanceof Error ? err : new Error("Stream failed"); + notifySubscribers(stream, { + type: "error", + message: stream.error.message, + }); + } +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts b/autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts new file mode 100644 index 0000000000..4100926e79 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts @@ -0,0 +1,84 @@ +import type { ToolArguments, ToolResult } from "@/types/chat"; +import type { StreamChunk, VercelStreamChunk } from "./chat-types"; + +const LEGACY_STREAM_TYPES = new Set([ + "text_chunk", + "text_ended", + "tool_call", + "tool_call_start", + "tool_response", + "login_needed", + "need_login", + "credentials_needed", + "error", + "usage", + "stream_end", +]); + +export function isLegacyStreamChunk( + chunk: StreamChunk | VercelStreamChunk, +): chunk is StreamChunk { + return LEGACY_STREAM_TYPES.has(chunk.type as StreamChunk["type"]); +} + +export function normalizeStreamChunk( + chunk: StreamChunk | VercelStreamChunk, +): StreamChunk | null { + if (isLegacyStreamChunk(chunk)) return chunk; + + switch (chunk.type) { + case "text-delta": + return { type: "text_chunk", content: chunk.delta }; + case "text-end": + return { type: "text_ended" }; + case "tool-input-available": + return { + type: "tool_call_start", + tool_id: chunk.toolCallId, + tool_name: chunk.toolName, + arguments: chunk.input as ToolArguments, + }; + case "tool-output-available": + return { + type: "tool_response", + tool_id: chunk.toolCallId, + tool_name: chunk.toolName, + result: chunk.output as ToolResult, + success: chunk.success ?? true, + }; + case "usage": + return { + type: "usage", + promptTokens: chunk.promptTokens, + completionTokens: chunk.completionTokens, + totalTokens: chunk.totalTokens, + }; + case "error": + return { + type: "error", + message: chunk.errorText, + code: chunk.code, + details: chunk.details, + }; + case "finish": + return { type: "stream_end" }; + case "start": + case "text-start": + return null; + case "tool-input-start": + return { + type: "tool_call_start", + tool_id: chunk.toolCallId, + tool_name: chunk.toolName, + arguments: {}, + }; + } +} + +export const MAX_RETRIES = 3; +export const INITIAL_RETRY_DELAY = 1000; + +export function parseSSELine(line: string): string | null { + if (line.startsWith("data: ")) return line.slice(6); + return null; +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts index cf629a287c..f6b2031059 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts @@ -2,7 +2,6 @@ import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useEffect, useRef, useState } from "react"; -import { toast } from "sonner"; import { useChatSession } from "./useChatSession"; import { useChatStream } from "./useChatStream"; @@ -67,38 +66,16 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) { ], ); - useEffect(() => { - if (isLoading || isCreating) { - const timer = setTimeout(() => { - setShowLoader(true); - }, 300); - return () => clearTimeout(timer); - } else { + useEffect( + function showLoaderWithDelay() { + if (isLoading || isCreating) { + const timer = setTimeout(() => setShowLoader(true), 300); + return () => clearTimeout(timer); + } setShowLoader(false); - } - }, [isLoading, isCreating]); - - useEffect(function monitorNetworkStatus() { - function handleOnline() { - toast.success("Connection restored", { - description: "You're back online", - }); - } - - function handleOffline() { - toast.error("You're offline", { - description: "Check your internet connection", - }); - } - - window.addEventListener("online", handleOnline); - window.addEventListener("offline", handleOffline); - - return () => { - window.removeEventListener("online", handleOnline); - window.removeEventListener("offline", handleOffline); - }; - }, []); + }, + [isLoading, isCreating], + ); function clearSession() { clearSessionBase(); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatDrawer.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatDrawer.ts deleted file mode 100644 index 62e1a5a569..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatDrawer.ts +++ /dev/null @@ -1,17 +0,0 @@ -"use client"; - -import { create } from "zustand"; - -interface ChatDrawerState { - isOpen: boolean; - open: () => void; - close: () => void; - toggle: () => void; -} - -export const useChatDrawer = create((set) => ({ - isOpen: false, - open: () => set({ isOpen: true }), - close: () => set({ isOpen: false }), - toggle: () => set((state) => ({ isOpen: !state.isOpen })), -})); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts index 553e348f79..dd743874f7 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts @@ -1,6 +1,7 @@ import { getGetV2GetSessionQueryKey, getGetV2GetSessionQueryOptions, + getGetV2ListSessionsQueryKey, postV2CreateSession, useGetV2GetSession, usePatchV2SessionAssignUser, @@ -101,6 +102,17 @@ export function useChatSession({ } }, [createError, loadError]); + useEffect( + function refreshSessionsListOnLoad() { + if (sessionId && sessionData && !isLoadingSession) { + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + } + }, + [sessionId, sessionData, isLoadingSession, queryClient], + ); + async function createSession() { try { setError(null); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts index 903c19cd30..5a9f637457 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts @@ -1,543 +1,110 @@ -import type { ToolArguments, ToolResult } from "@/types/chat"; -import { useCallback, useEffect, useRef, useState } from "react"; +"use client"; + +import { useEffect, useRef, useState } from "react"; import { toast } from "sonner"; +import { useChatStore } from "./chat-store"; +import type { StreamChunk } from "./chat-types"; -const MAX_RETRIES = 3; -const INITIAL_RETRY_DELAY = 1000; - -export interface StreamChunk { - type: - | "text_chunk" - | "text_ended" - | "tool_call" - | "tool_call_start" - | "tool_response" - | "login_needed" - | "need_login" - | "credentials_needed" - | "error" - | "usage" - | "stream_end"; - timestamp?: string; - content?: string; - message?: string; - code?: string; - details?: Record; - tool_id?: string; - tool_name?: string; - arguments?: ToolArguments; - result?: ToolResult; - success?: boolean; - idx?: number; - session_id?: string; - agent_info?: { - graph_id: string; - name: string; - trigger_type: string; - }; - provider?: string; - provider_name?: string; - credential_type?: string; - scopes?: string[]; - title?: string; - [key: string]: unknown; -} - -type VercelStreamChunk = - | { type: "start"; messageId: string } - | { type: "finish" } - | { type: "text-start"; id: string } - | { type: "text-delta"; id: string; delta: string } - | { type: "text-end"; id: string } - | { type: "tool-input-start"; toolCallId: string; toolName: string } - | { - type: "tool-input-available"; - toolCallId: string; - toolName: string; - input: ToolArguments; - } - | { - type: "tool-output-available"; - toolCallId: string; - toolName?: string; - output: ToolResult; - success?: boolean; - } - | { - type: "usage"; - promptTokens: number; - completionTokens: number; - totalTokens: number; - } - | { - type: "error"; - errorText: string; - code?: string; - details?: Record; - }; - -const LEGACY_STREAM_TYPES = new Set([ - "text_chunk", - "text_ended", - "tool_call", - "tool_call_start", - "tool_response", - "login_needed", - "need_login", - "credentials_needed", - "error", - "usage", - "stream_end", -]); - -function isLegacyStreamChunk( - chunk: StreamChunk | VercelStreamChunk, -): chunk is StreamChunk { - return LEGACY_STREAM_TYPES.has(chunk.type as StreamChunk["type"]); -} - -function normalizeStreamChunk( - chunk: StreamChunk | VercelStreamChunk, -): StreamChunk | null { - if (isLegacyStreamChunk(chunk)) { - return chunk; - } - switch (chunk.type) { - case "text-delta": - return { type: "text_chunk", content: chunk.delta }; - case "text-end": - return { type: "text_ended" }; - case "tool-input-available": - return { - type: "tool_call_start", - tool_id: chunk.toolCallId, - tool_name: chunk.toolName, - arguments: chunk.input, - }; - case "tool-output-available": - return { - type: "tool_response", - tool_id: chunk.toolCallId, - tool_name: chunk.toolName, - result: chunk.output, - success: chunk.success ?? true, - }; - case "usage": - return { - type: "usage", - promptTokens: chunk.promptTokens, - completionTokens: chunk.completionTokens, - totalTokens: chunk.totalTokens, - }; - case "error": - return { - type: "error", - message: chunk.errorText, - code: chunk.code, - details: chunk.details, - }; - case "finish": - return { type: "stream_end" }; - case "start": - case "text-start": - return null; - case "tool-input-start": - const toolInputStart = chunk as Extract< - VercelStreamChunk, - { type: "tool-input-start" } - >; - return { - type: "tool_call_start", - tool_id: toolInputStart.toolCallId, - tool_name: toolInputStart.toolName, - arguments: {}, - }; - } -} +export type { StreamChunk } from "./chat-types"; export function useChatStream() { const [isStreaming, setIsStreaming] = useState(false); const [error, setError] = useState(null); - const retryCountRef = useRef(0); - const retryTimeoutRef = useRef(null); - const abortControllerRef = useRef(null); const currentSessionIdRef = useRef(null); - const requestStartTimeRef = useRef(null); - - const stopStreaming = useCallback( - (sessionId?: string, force: boolean = false) => { - console.log("[useChatStream] stopStreaming called", { - hasAbortController: !!abortControllerRef.current, - isAborted: abortControllerRef.current?.signal.aborted, - currentSessionId: currentSessionIdRef.current, - requestedSessionId: sessionId, - requestStartTime: requestStartTimeRef.current, - timeSinceStart: requestStartTimeRef.current - ? Date.now() - requestStartTimeRef.current - : null, - force, - stack: new Error().stack, - }); - - if ( - sessionId && - currentSessionIdRef.current && - currentSessionIdRef.current !== sessionId - ) { - console.log( - "[useChatStream] Session changed, aborting previous stream", - { - oldSessionId: currentSessionIdRef.current, - newSessionId: sessionId, - }, - ); - } - - const controller = abortControllerRef.current; - if (controller) { - const timeSinceStart = requestStartTimeRef.current - ? Date.now() - requestStartTimeRef.current - : null; - - if (!force && timeSinceStart !== null && timeSinceStart < 100) { - console.log( - "[useChatStream] Request just started (<100ms), skipping abort to prevent race condition", - { - timeSinceStart, - }, - ); - return; - } - - try { - const signal = controller.signal; - - if ( - signal && - typeof signal.aborted === "boolean" && - !signal.aborted - ) { - console.log("[useChatStream] Aborting stream"); - controller.abort(); - } else { - console.log( - "[useChatStream] Stream already aborted or signal invalid", - ); - } - } catch (error) { - if (error instanceof Error && error.name === "AbortError") { - console.log( - "[useChatStream] AbortError caught (expected during cleanup)", - ); - } else { - console.warn("[useChatStream] Error aborting stream:", error); - } - } finally { - abortControllerRef.current = null; - requestStartTimeRef.current = null; - } - } - if (retryTimeoutRef.current) { - clearTimeout(retryTimeoutRef.current); - retryTimeoutRef.current = null; - } - setIsStreaming(false); - }, - [], + const onChunkCallbackRef = useRef<((chunk: StreamChunk) => void) | null>( + null, ); + const stopStream = useChatStore((s) => s.stopStream); + const unregisterActiveSession = useChatStore( + (s) => s.unregisterActiveSession, + ); + const isSessionActive = useChatStore((s) => s.isSessionActive); + const onStreamComplete = useChatStore((s) => s.onStreamComplete); + const getCompletedStream = useChatStore((s) => s.getCompletedStream); + const registerActiveSession = useChatStore((s) => s.registerActiveSession); + const startStream = useChatStore((s) => s.startStream); + const getStreamStatus = useChatStore((s) => s.getStreamStatus); + + function stopStreaming(sessionId?: string) { + const targetSession = sessionId || currentSessionIdRef.current; + if (targetSession) { + stopStream(targetSession); + unregisterActiveSession(targetSession); + } + setIsStreaming(false); + } + useEffect(() => { - console.log("[useChatStream] Component mounted"); - return () => { - const sessionIdAtUnmount = currentSessionIdRef.current; - console.log( - "[useChatStream] Component unmounting, calling stopStreaming", - { - sessionIdAtUnmount, - }, - ); - stopStreaming(undefined, false); + return function cleanup() { + const sessionId = currentSessionIdRef.current; + if (sessionId && !isSessionActive(sessionId)) { + stopStream(sessionId); + } currentSessionIdRef.current = null; + onChunkCallbackRef.current = null; }; - }, [stopStreaming]); + }, []); - const sendMessage = useCallback( - async ( - sessionId: string, - message: string, - onChunk: (chunk: StreamChunk) => void, - isUserMessage: boolean = true, - context?: { url: string; content: string }, - isRetry: boolean = false, - ) => { - console.log("[useChatStream] sendMessage called", { - sessionId, - message: message.substring(0, 50), - isUserMessage, - isRetry, - stack: new Error().stack, - }); + useEffect(() => { + const unsubscribe = onStreamComplete( + function handleStreamComplete(completedSessionId) { + if (completedSessionId !== currentSessionIdRef.current) return; - const previousSessionId = currentSessionIdRef.current; - stopStreaming(sessionId, true); - currentSessionIdRef.current = sessionId; - - const abortController = new AbortController(); - abortControllerRef.current = abortController; - requestStartTimeRef.current = Date.now(); - console.log("[useChatStream] Created new AbortController", { - sessionId, - previousSessionId, - requestStartTime: requestStartTimeRef.current, - }); - - if (abortController.signal.aborted) { - console.warn( - "[useChatStream] AbortController was aborted before request started", - ); - requestStartTimeRef.current = null; - return Promise.reject(new Error("Request aborted")); - } - - if (!isRetry) { - retryCountRef.current = 0; - } - setIsStreaming(true); - setError(null); - - try { - const url = `/api/chat/sessions/${sessionId}/stream`; - const body = JSON.stringify({ - message, - is_user_message: isUserMessage, - context: context || null, - }); - - const response = await fetch(url, { - method: "POST", - headers: { - "Content-Type": "application/json", - Accept: "text/event-stream", - }, - body, - signal: abortController.signal, - }); - - console.info("[useChatStream] Stream response", { - sessionId, - status: response.status, - ok: response.ok, - contentType: response.headers.get("content-type"), - }); - - if (!response.ok) { - const errorText = await response.text(); - console.warn("[useChatStream] Stream response error", { - sessionId, - status: response.status, - errorText, - }); - throw new Error(errorText || `HTTP ${response.status}`); - } - - if (!response.body) { - console.warn("[useChatStream] Response body is null", { sessionId }); - throw new Error("Response body is null"); - } - - const reader = response.body.getReader(); - const decoder = new TextDecoder(); - let buffer = ""; - let receivedChunkCount = 0; - let firstChunkAt: number | null = null; - let loggedLineCount = 0; - - return new Promise((resolve, reject) => { - let didDispatchStreamEnd = false; - - function dispatchStreamEnd() { - if (didDispatchStreamEnd) return; - didDispatchStreamEnd = true; - onChunk({ type: "stream_end" }); - } - - const cleanup = () => { - reader.cancel().catch(() => { - // Ignore cancel errors - }); - }; - - async function readStream() { - try { - while (true) { - const { done, value } = await reader.read(); - - if (done) { - cleanup(); - console.info("[useChatStream] Stream closed", { - sessionId, - receivedChunkCount, - timeSinceStart: requestStartTimeRef.current - ? Date.now() - requestStartTimeRef.current - : null, - }); - dispatchStreamEnd(); - retryCountRef.current = 0; - stopStreaming(); - resolve(); - return; - } - - buffer += decoder.decode(value, { stream: true }); - const lines = buffer.split("\n"); - buffer = lines.pop() || ""; - - for (const line of lines) { - if (line.startsWith("data: ")) { - const data = line.slice(6); - if (loggedLineCount < 3) { - console.info("[useChatStream] Raw stream line", { - sessionId, - data: - data.length > 300 ? `${data.slice(0, 300)}...` : data, - }); - loggedLineCount += 1; - } - if (data === "[DONE]") { - cleanup(); - console.info("[useChatStream] Stream done marker", { - sessionId, - receivedChunkCount, - timeSinceStart: requestStartTimeRef.current - ? Date.now() - requestStartTimeRef.current - : null, - }); - dispatchStreamEnd(); - retryCountRef.current = 0; - stopStreaming(); - resolve(); - return; - } - - try { - const rawChunk = JSON.parse(data) as - | StreamChunk - | VercelStreamChunk; - const chunk = normalizeStreamChunk(rawChunk); - if (!chunk) { - continue; - } - - if (!firstChunkAt) { - firstChunkAt = Date.now(); - console.info("[useChatStream] First stream chunk", { - sessionId, - chunkType: chunk.type, - timeSinceStart: requestStartTimeRef.current - ? firstChunkAt - requestStartTimeRef.current - : null, - }); - } - receivedChunkCount += 1; - - // Call the chunk handler - onChunk(chunk); - - // Handle stream lifecycle - if (chunk.type === "stream_end") { - didDispatchStreamEnd = true; - cleanup(); - console.info("[useChatStream] Stream end chunk", { - sessionId, - receivedChunkCount, - timeSinceStart: requestStartTimeRef.current - ? Date.now() - requestStartTimeRef.current - : null, - }); - retryCountRef.current = 0; - stopStreaming(); - resolve(); - return; - } else if (chunk.type === "error") { - cleanup(); - reject( - new Error( - chunk.message || chunk.content || "Stream error", - ), - ); - return; - } - } catch (err) { - // Skip invalid JSON lines - console.warn("Failed to parse SSE chunk:", err, data); - } - } - } - } - } catch (err) { - if (err instanceof Error && err.name === "AbortError") { - cleanup(); - dispatchStreamEnd(); - stopStreaming(); - resolve(); - return; - } - - const streamError = - err instanceof Error ? err : new Error("Failed to read stream"); - - if (retryCountRef.current < MAX_RETRIES) { - retryCountRef.current += 1; - const retryDelay = - INITIAL_RETRY_DELAY * Math.pow(2, retryCountRef.current - 1); - - toast.info("Connection interrupted", { - description: `Retrying in ${retryDelay / 1000} seconds...`, - }); - - retryTimeoutRef.current = setTimeout(() => { - sendMessage( - sessionId, - message, - onChunk, - isUserMessage, - context, - true, - ).catch((_err) => { - // Retry failed - }); - }, retryDelay); - } else { - setError(streamError); - toast.error("Connection Failed", { - description: - "Unable to connect to chat service. Please try again.", - }); - cleanup(); - dispatchStreamEnd(); - retryCountRef.current = 0; - stopStreaming(); - reject(streamError); - } - } - } - - readStream(); - }); - } catch (err) { - if (err instanceof Error && err.name === "AbortError") { - setIsStreaming(false); - return Promise.resolve(); - } - const streamError = - err instanceof Error ? err : new Error("Failed to start stream"); - setError(streamError); setIsStreaming(false); - throw streamError; + const completed = getCompletedStream(completedSessionId); + if (completed?.error) { + setError(completed.error); + } + unregisterActiveSession(completedSessionId); + }, + ); + + return unsubscribe; + }, []); + + async function sendMessage( + sessionId: string, + message: string, + onChunk: (chunk: StreamChunk) => void, + isUserMessage: boolean = true, + context?: { url: string; content: string }, + ) { + const previousSessionId = currentSessionIdRef.current; + if (previousSessionId && previousSessionId !== sessionId) { + stopStreaming(previousSessionId); + } + + currentSessionIdRef.current = sessionId; + onChunkCallbackRef.current = onChunk; + setIsStreaming(true); + setError(null); + + registerActiveSession(sessionId); + + try { + await startStream(sessionId, message, isUserMessage, context, onChunk); + + const status = getStreamStatus(sessionId); + if (status === "error") { + const completed = getCompletedStream(sessionId); + if (completed?.error) { + setError(completed.error); + toast.error("Connection Failed", { + description: "Unable to connect to chat service. Please try again.", + }); + throw completed.error; + } } - }, - [stopStreaming], - ); + } catch (err) { + const streamError = + err instanceof Error ? err : new Error("Failed to start stream"); + setError(streamError); + throw streamError; + } finally { + setIsStreaming(false); + } + } return { isStreaming, diff --git a/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx b/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx index 249d74596a..674f6c55eb 100644 --- a/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx +++ b/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx @@ -9,11 +9,12 @@ import { ReactNode, useEffect, useRef } from "react"; export function PostHogProvider({ children }: { children: ReactNode }) { const isPostHogEnabled = environment.isPostHogEnabled(); + const postHogCredentials = environment.getPostHogCredentials(); useEffect(() => { - if (process.env.NEXT_PUBLIC_POSTHOG_KEY) { - posthog.init(process.env.NEXT_PUBLIC_POSTHOG_KEY, { - api_host: process.env.NEXT_PUBLIC_POSTHOG_HOST, + if (postHogCredentials.key) { + posthog.init(postHogCredentials.key, { + api_host: postHogCredentials.host, defaults: "2025-11-30", capture_pageview: false, capture_pageleave: true, diff --git a/autogpt_platform/frontend/src/services/network-status/NetworkStatusMonitor.tsx b/autogpt_platform/frontend/src/services/network-status/NetworkStatusMonitor.tsx new file mode 100644 index 0000000000..7552bbf78c --- /dev/null +++ b/autogpt_platform/frontend/src/services/network-status/NetworkStatusMonitor.tsx @@ -0,0 +1,8 @@ +"use client"; + +import { useNetworkStatus } from "./useNetworkStatus"; + +export function NetworkStatusMonitor() { + useNetworkStatus(); + return null; +} diff --git a/autogpt_platform/frontend/src/services/network-status/useNetworkStatus.ts b/autogpt_platform/frontend/src/services/network-status/useNetworkStatus.ts new file mode 100644 index 0000000000..472a6e0e90 --- /dev/null +++ b/autogpt_platform/frontend/src/services/network-status/useNetworkStatus.ts @@ -0,0 +1,28 @@ +"use client"; + +import { useEffect } from "react"; +import { toast } from "sonner"; + +export function useNetworkStatus() { + useEffect(function monitorNetworkStatus() { + function handleOnline() { + toast.success("Connection restored", { + description: "You're back online", + }); + } + + function handleOffline() { + toast.error("You're offline", { + description: "Check your internet connection", + }); + } + + window.addEventListener("online", handleOnline); + window.addEventListener("offline", handleOffline); + + return function cleanup() { + window.removeEventListener("online", handleOnline); + window.removeEventListener("offline", handleOffline); + }; + }, []); +} From 2134d777bef69cc9d64a11beb843d3165dadadb7 Mon Sep 17 00:00:00 2001 From: Swifty Date: Tue, 27 Jan 2026 16:21:13 +0100 Subject: [PATCH 12/21] fix(backend): exclude disabled blocks from chat search and indexing (#11854) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Disabled blocks (e.g., webhook blocks without `platform_base_url` configured) were being indexed and returned in chat tool search results. This PR ensures they are properly filtered out. ### Changes 🏗️ - **find_block.py**: Skip disabled blocks when enriching search results - **content_handlers.py**: - Skip disabled blocks during embedding indexing - Update `get_stats()` to only count enabled blocks for accurate coverage metrics ### Why Blocks can be disabled for various reasons (missing OAuth config, no platform URL for webhooks, etc.). These blocks shouldn't appear in search results since users cannot use them. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Verified disabled blocks are filtered from search results - [x] Verified disabled blocks are not indexed - [x] Verified stats accurately reflect enabled block count --- .../api/features/chat/tools/find_block.py | 3 ++- .../api/features/store/content_handlers.py | 15 +++++++++++++-- .../features/store/content_handlers_test.py | 18 ++++++++++++++---- 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py index a5e66f0a1c..7ca85961f9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py @@ -107,7 +107,8 @@ class FindBlockTool(BaseTool): block_id = result["content_id"] block = get_block(block_id) - if block: + # Skip disabled blocks + if block and not block.disabled: # Get input/output schemas input_schema = {} output_schema = {} diff --git a/autogpt_platform/backend/backend/api/features/store/content_handlers.py b/autogpt_platform/backend/backend/api/features/store/content_handlers.py index 1560db421c..cbbdcfbebf 100644 --- a/autogpt_platform/backend/backend/api/features/store/content_handlers.py +++ b/autogpt_platform/backend/backend/api/features/store/content_handlers.py @@ -188,6 +188,10 @@ class BlockHandler(ContentHandler): try: block_instance = block_cls() + # Skip disabled blocks - they shouldn't be indexed + if block_instance.disabled: + continue + # Build searchable text from block metadata parts = [] if hasattr(block_instance, "name") and block_instance.name: @@ -248,12 +252,19 @@ class BlockHandler(ContentHandler): from backend.data.block import get_blocks all_blocks = get_blocks() - total_blocks = len(all_blocks) + + # Filter out disabled blocks - they're not indexed + enabled_block_ids = [ + block_id + for block_id, block_cls in all_blocks.items() + if not block_cls().disabled + ] + total_blocks = len(enabled_block_ids) if total_blocks == 0: return {"total": 0, "with_embeddings": 0, "without_embeddings": 0} - block_ids = list(all_blocks.keys()) + block_ids = enabled_block_ids placeholders = ",".join([f"${i+1}" for i in range(len(block_ids))]) embedded_result = await query_raw_with_schema( diff --git a/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py b/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py index 28bc88e270..fee879fae0 100644 --- a/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py +++ b/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py @@ -81,6 +81,7 @@ async def test_block_handler_get_missing_items(mocker): mock_block_instance.name = "Calculator Block" mock_block_instance.description = "Performs calculations" mock_block_instance.categories = [MagicMock(value="MATH")] + mock_block_instance.disabled = False mock_block_instance.input_schema.model_json_schema.return_value = { "properties": {"expression": {"description": "Math expression to evaluate"}} } @@ -116,11 +117,18 @@ async def test_block_handler_get_stats(mocker): """Test BlockHandler returns correct stats.""" handler = BlockHandler() - # Mock get_blocks + # Mock get_blocks - each block class returns an instance with disabled=False + def make_mock_block_class(): + mock_class = MagicMock() + mock_instance = MagicMock() + mock_instance.disabled = False + mock_class.return_value = mock_instance + return mock_class + mock_blocks = { - "block-1": MagicMock(), - "block-2": MagicMock(), - "block-3": MagicMock(), + "block-1": make_mock_block_class(), + "block-2": make_mock_block_class(), + "block-3": make_mock_block_class(), } # Mock embedded count query (2 blocks have embeddings) @@ -309,6 +317,7 @@ async def test_block_handler_handles_missing_attributes(): mock_block_class = MagicMock() mock_block_instance = MagicMock() mock_block_instance.name = "Minimal Block" + mock_block_instance.disabled = False # No description, categories, or schema del mock_block_instance.description del mock_block_instance.categories @@ -342,6 +351,7 @@ async def test_block_handler_skips_failed_blocks(): good_instance.name = "Good Block" good_instance.description = "Works fine" good_instance.categories = [] + good_instance.disabled = False good_block.return_value = good_instance bad_block = MagicMock() From 071b3bb5cd3c85f99d12970e60f83672ceab1c08 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Wed, 28 Jan 2026 00:49:28 +0700 Subject: [PATCH 13/21] fix(frontend): more copilot refinements (#11858) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes 🏗️ On the **Copilot** page: - prevent unnecessary sidebar repaints - show a disclaimer when switching chats on the sidebar to terminate a current stream - handle loading better - save streams better when disconnecting ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run the app locally and test the above --- .../components/CopilotShell/CopilotShell.tsx | 63 +++++- .../CopilotShell/useCopilotShell.ts | 106 +++++++++- .../(platform)/copilot/copilot-page-store.ts | 81 ++++++-- .../src/app/(platform)/copilot/page.tsx | 31 ++- .../app/(platform)/copilot/useCopilotPage.ts | 13 -- .../src/components/contextual/Chat/Chat.tsx | 21 +- .../components/contextual/Chat/chat-store.ts | 181 ++++++++++++------ .../components/ChatMessage/ChatMessage.tsx | 6 +- .../UserChatBubble/UserChatBubble.tsx | 2 +- .../contextual/Chat/useChatSession.ts | 12 -- 10 files changed, 376 insertions(+), 140 deletions(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx index fb22640302..8c9f9d528c 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx @@ -3,7 +3,7 @@ import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; import { NAVBAR_HEIGHT_PX } from "@/lib/constants"; import type { ReactNode } from "react"; -import { useEffect } from "react"; +import { useCallback, useEffect } from "react"; import { useCopilotStore } from "../../copilot-page-store"; import { DesktopSidebar } from "./components/DesktopSidebar/DesktopSidebar"; import { LoadingState } from "./components/LoadingState/LoadingState"; @@ -25,10 +25,12 @@ export function CopilotShell({ children }: Props) { sessions, currentSessionId, handleSelectSession, + performSelectSession, handleOpenDrawer, handleCloseDrawer, handleDrawerOpenChange, handleNewChat, + performNewChat, hasNextPage, isFetchingNextPage, fetchNextPage, @@ -36,22 +38,71 @@ export function CopilotShell({ children }: Props) { } = useCopilotShell(); const setNewChatHandler = useCopilotStore((s) => s.setNewChatHandler); + const setNewChatWithInterruptHandler = useCopilotStore( + (s) => s.setNewChatWithInterruptHandler, + ); + const setSelectSessionHandler = useCopilotStore( + (s) => s.setSelectSessionHandler, + ); + const setSelectSessionWithInterruptHandler = useCopilotStore( + (s) => s.setSelectSessionWithInterruptHandler, + ); const requestNewChat = useCopilotStore((s) => s.requestNewChat); + const requestSelectSession = useCopilotStore((s) => s.requestSelectSession); + + const stableHandleNewChat = useCallback(handleNewChat, [handleNewChat]); + const stablePerformNewChat = useCallback(performNewChat, [performNewChat]); useEffect( - function registerNewChatHandler() { - setNewChatHandler(handleNewChat); + function registerNewChatHandlers() { + setNewChatHandler(stableHandleNewChat); + setNewChatWithInterruptHandler(stablePerformNewChat); return function cleanup() { setNewChatHandler(null); + setNewChatWithInterruptHandler(null); }; }, - [handleNewChat], + [ + stableHandleNewChat, + stablePerformNewChat, + setNewChatHandler, + setNewChatWithInterruptHandler, + ], + ); + + const stableHandleSelectSession = useCallback(handleSelectSession, [ + handleSelectSession, + ]); + + const stablePerformSelectSession = useCallback(performSelectSession, [ + performSelectSession, + ]); + + useEffect( + function registerSelectSessionHandlers() { + setSelectSessionHandler(stableHandleSelectSession); + setSelectSessionWithInterruptHandler(stablePerformSelectSession); + return function cleanup() { + setSelectSessionHandler(null); + setSelectSessionWithInterruptHandler(null); + }; + }, + [ + stableHandleSelectSession, + stablePerformSelectSession, + setSelectSessionHandler, + setSelectSessionWithInterruptHandler, + ], ); function handleNewChatClick() { requestNewChat(); } + function handleSessionClick(sessionId: string) { + requestSelectSession(sessionId); + } + if (!isLoggedIn) { return (
@@ -72,7 +123,7 @@ export function CopilotShell({ children }: Props) { isLoading={isLoading} hasNextPage={hasNextPage} isFetchingNextPage={isFetchingNextPage} - onSelectSession={handleSelectSession} + onSelectSession={handleSessionClick} onFetchNextPage={fetchNextPage} onNewChat={handleNewChatClick} hasActiveSession={Boolean(hasActiveSession)} @@ -94,7 +145,7 @@ export function CopilotShell({ children }: Props) { isLoading={isLoading} hasNextPage={hasNextPage} isFetchingNextPage={isFetchingNextPage} - onSelectSession={handleSelectSession} + onSelectSession={handleSessionClick} onFetchNextPage={fetchNextPage} onNewChat={handleNewChatClick} onClose={handleCloseDrawer} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts index a3aa0b55b2..3154df2975 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts @@ -1,17 +1,20 @@ "use client"; import { + getGetV2GetSessionQueryKey, getGetV2ListSessionsQueryKey, useGetV2GetSession, } from "@/app/api/__generated__/endpoints/chat/chat"; import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; import { okData } from "@/app/api/helpers"; +import { useChatStore } from "@/components/contextual/Chat/chat-store"; import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useQueryClient } from "@tanstack/react-query"; import { parseAsString, useQueryState } from "nuqs"; import { usePathname, useSearchParams } from "next/navigation"; import { useEffect, useRef, useState } from "react"; +import { useCopilotStore } from "../../copilot-page-store"; import { useMobileDrawer } from "./components/MobileDrawer/useMobileDrawer"; import { useSessionsPagination } from "./components/SessionsList/useSessionsPagination"; import { @@ -73,6 +76,19 @@ export function useCopilotShell() { Map >(new Map()); + const [optimisticSessionId, setOptimisticSessionId] = useState( + null, + ); + + useEffect( + function clearOptimisticWhenUrlMatches() { + if (optimisticSessionId && currentSessionId === optimisticSessionId) { + setOptimisticSessionId(null); + } + }, + [currentSessionId, optimisticSessionId], + ); + // Mark as auto-selected when sessionId is in URL useEffect(() => { if (paramSessionId && !hasAutoSelectedRef.current) { @@ -142,7 +158,9 @@ export function useCopilotShell() { const visibleSessions = filterVisibleSessions(sessions); const sidebarSelectedSessionId = - isOnHomepage && !paramSessionId ? null : currentSessionId; + isOnHomepage && !paramSessionId && !optimisticSessionId + ? null + : optimisticSessionId || currentSessionId; const isReadyToShowContent = isOnHomepage ? true @@ -155,8 +173,89 @@ export function useCopilotShell() { hasAutoSelectedSession, ); - function handleSelectSession(sessionId: string) { + const stopStream = useChatStore((s) => s.stopStream); + const onStreamComplete = useChatStore((s) => s.onStreamComplete); + const setIsSwitchingSession = useCopilotStore((s) => s.setIsSwitchingSession); + + async function performSelectSession(sessionId: string) { + if (sessionId === currentSessionId) return; + + const sourceSessionId = currentSessionId; + + if (sourceSessionId) { + setIsSwitchingSession(true); + + await new Promise(function waitForStreamComplete(resolve) { + const unsubscribe = onStreamComplete( + function handleComplete(completedId) { + if (completedId === sourceSessionId) { + clearTimeout(timeout); + unsubscribe(); + resolve(); + } + }, + ); + const timeout = setTimeout(function handleTimeout() { + unsubscribe(); + resolve(); + }, 3000); + stopStream(sourceSessionId); + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2GetSessionQueryKey(sourceSessionId), + }); + } + + setOptimisticSessionId(sessionId); setUrlSessionId(sessionId, { shallow: false }); + setIsSwitchingSession(false); + if (isMobile) handleCloseDrawer(); + } + + function handleSelectSession(sessionId: string) { + if (sessionId === currentSessionId) return; + setOptimisticSessionId(sessionId); + setUrlSessionId(sessionId, { shallow: false }); + if (isMobile) handleCloseDrawer(); + } + + async function performNewChat() { + const sourceSessionId = currentSessionId; + + if (sourceSessionId) { + setIsSwitchingSession(true); + + await new Promise(function waitForStreamComplete(resolve) { + const unsubscribe = onStreamComplete( + function handleComplete(completedId) { + if (completedId === sourceSessionId) { + clearTimeout(timeout); + unsubscribe(); + resolve(); + } + }, + ); + const timeout = setTimeout(function handleTimeout() { + unsubscribe(); + resolve(); + }, 3000); + stopStream(sourceSessionId); + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2GetSessionQueryKey(sourceSessionId), + }); + setIsSwitchingSession(false); + } + + resetAutoSelect(); + resetPagination(); + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + setUrlSessionId(null, { shallow: false }); + setOptimisticSessionId(null); if (isMobile) handleCloseDrawer(); } @@ -167,6 +266,7 @@ export function useCopilotShell() { queryKey: getGetV2ListSessionsQueryKey(), }); setUrlSessionId(null, { shallow: false }); + setOptimisticSessionId(null); if (isMobile) handleCloseDrawer(); } @@ -187,10 +287,12 @@ export function useCopilotShell() { sessions: visibleSessions, currentSessionId: sidebarSelectedSessionId, handleSelectSession, + performSelectSession, handleOpenDrawer, handleCloseDrawer, handleDrawerOpenChange, handleNewChat, + performNewChat, hasNextPage, isFetchingNextPage: isSessionsFetching, fetchNextPage, diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts index 22bf5000a1..486d31865b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts @@ -4,51 +4,106 @@ import { create } from "zustand"; interface CopilotStoreState { isStreaming: boolean; - isNewChatModalOpen: boolean; + isSwitchingSession: boolean; + isInterruptModalOpen: boolean; + pendingAction: (() => void) | null; newChatHandler: (() => void) | null; + newChatWithInterruptHandler: (() => void) | null; + selectSessionHandler: ((sessionId: string) => void) | null; + selectSessionWithInterruptHandler: ((sessionId: string) => void) | null; } interface CopilotStoreActions { setIsStreaming: (isStreaming: boolean) => void; + setIsSwitchingSession: (isSwitchingSession: boolean) => void; setNewChatHandler: (handler: (() => void) | null) => void; + setNewChatWithInterruptHandler: (handler: (() => void) | null) => void; + setSelectSessionHandler: ( + handler: ((sessionId: string) => void) | null, + ) => void; + setSelectSessionWithInterruptHandler: ( + handler: ((sessionId: string) => void) | null, + ) => void; requestNewChat: () => void; - confirmNewChat: () => void; - cancelNewChat: () => void; + requestSelectSession: (sessionId: string) => void; + confirmInterrupt: () => void; + cancelInterrupt: () => void; } type CopilotStore = CopilotStoreState & CopilotStoreActions; export const useCopilotStore = create((set, get) => ({ isStreaming: false, - isNewChatModalOpen: false, + isSwitchingSession: false, + isInterruptModalOpen: false, + pendingAction: null, newChatHandler: null, + newChatWithInterruptHandler: null, + selectSessionHandler: null, + selectSessionWithInterruptHandler: null, setIsStreaming(isStreaming) { set({ isStreaming }); }, + setIsSwitchingSession(isSwitchingSession) { + set({ isSwitchingSession }); + }, + setNewChatHandler(handler) { set({ newChatHandler: handler }); }, + setNewChatWithInterruptHandler(handler) { + set({ newChatWithInterruptHandler: handler }); + }, + + setSelectSessionHandler(handler) { + set({ selectSessionHandler: handler }); + }, + + setSelectSessionWithInterruptHandler(handler) { + set({ selectSessionWithInterruptHandler: handler }); + }, + requestNewChat() { - const { isStreaming, newChatHandler } = get(); + const { isStreaming, newChatHandler, newChatWithInterruptHandler } = get(); if (isStreaming) { - set({ isNewChatModalOpen: true }); + if (!newChatWithInterruptHandler) return; + set({ + isInterruptModalOpen: true, + pendingAction: newChatWithInterruptHandler, + }); } else if (newChatHandler) { newChatHandler(); } }, - confirmNewChat() { - const { newChatHandler } = get(); - set({ isNewChatModalOpen: false }); - if (newChatHandler) { - newChatHandler(); + requestSelectSession(sessionId) { + const { + isStreaming, + selectSessionHandler, + selectSessionWithInterruptHandler, + } = get(); + if (isStreaming) { + if (!selectSessionWithInterruptHandler) return; + set({ + isInterruptModalOpen: true, + pendingAction: () => selectSessionWithInterruptHandler(sessionId), + }); + } else { + if (!selectSessionHandler) return; + selectSessionHandler(sessionId); } }, - cancelNewChat() { - set({ isNewChatModalOpen: false }); + confirmInterrupt() { + const { pendingAction } = get(); + set({ isInterruptModalOpen: false, pendingAction: null }); + if (pendingAction) pendingAction(); + }, + + cancelInterrupt() { + set({ isInterruptModalOpen: false, pendingAction: null }); }, })); diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx index 83b21bf82e..008b06fcda 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx @@ -13,22 +13,15 @@ import { useCopilotPage } from "./useCopilotPage"; export default function CopilotPage() { const { state, handlers } = useCopilotPage(); - const confirmNewChat = useCopilotStore((s) => s.confirmNewChat); - const { - greetingName, - quickActions, - isLoading, - pageState, - isNewChatModalOpen, - isReady, - } = state; + const isInterruptModalOpen = useCopilotStore((s) => s.isInterruptModalOpen); + const confirmInterrupt = useCopilotStore((s) => s.confirmInterrupt); + const cancelInterrupt = useCopilotStore((s) => s.cancelInterrupt); + const { greetingName, quickActions, isLoading, pageState, isReady } = state; const { handleQuickAction, startChatWithPrompt, handleSessionNotFound, handleStreamingChange, - handleCancelNewChat, - handleNewChatModalOpen, } = handlers; if (!isReady) return null; @@ -48,31 +41,33 @@ export default function CopilotPage() { title="Interrupt current chat?" styling={{ maxWidth: 300, width: "100%" }} controlled={{ - isOpen: isNewChatModalOpen, - set: handleNewChatModalOpen, + isOpen: isInterruptModalOpen, + set: (open) => { + if (!open) cancelInterrupt(); + }, }} - onClose={handleCancelNewChat} + onClose={cancelInterrupt} >
The current chat response will be interrupted. Are you sure you - want to start a new chat? + want to continue?
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts index 1d9c843d7d..8cf4599a12 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts @@ -75,9 +75,7 @@ export function useCopilotPage() { const { user, isLoggedIn, isUserLoading } = useSupabase(); const { toast } = useToast(); - const isNewChatModalOpen = useCopilotStore((s) => s.isNewChatModalOpen); const setIsStreaming = useCopilotStore((s) => s.setIsStreaming); - const cancelNewChat = useCopilotStore((s) => s.cancelNewChat); const isChatEnabled = useGetFlag(Flag.CHAT); const flags = useFlags(); @@ -201,21 +199,12 @@ export function useCopilotPage() { setIsStreaming(isStreamingValue); } - function handleCancelNewChat() { - cancelNewChat(); - } - - function handleNewChatModalOpen(isOpen: boolean) { - if (!isOpen) cancelNewChat(); - } - return { state: { greetingName, quickActions, isLoading: isUserLoading, pageState: state.pageState, - isNewChatModalOpen, isReady: isFlagReady && isChatEnabled !== false && isLoggedIn, }, handlers: { @@ -223,8 +212,6 @@ export function useCopilotPage() { startChatWithPrompt, handleSessionNotFound, handleStreamingChange, - handleCancelNewChat, - handleNewChatModalOpen, }, }; } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx index ba7584765d..a7a5f61674 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx @@ -1,11 +1,12 @@ "use client"; +import { useCopilotStore } from "@/app/(platform)/copilot/copilot-page-store"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; import { useEffect, useRef } from "react"; import { ChatContainer } from "./components/ChatContainer/ChatContainer"; import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState"; -import { ChatLoader } from "./components/ChatLoader/ChatLoader"; import { useChat } from "./useChat"; export interface ChatProps { @@ -24,6 +25,7 @@ export function Chat({ onStreamingChange, }: ChatProps) { const hasHandledNotFoundRef = useRef(false); + const isSwitchingSession = useCopilotStore((s) => s.isSwitchingSession); const { messages, isLoading, @@ -47,29 +49,34 @@ export function Chat({ [onSessionNotFound, urlSessionId, isSessionNotFound, isLoading, isCreating], ); + const shouldShowLoader = + (showLoader && (isLoading || isCreating)) || isSwitchingSession; + return (
{/* Main Content */}
{/* Loading State */} - {showLoader && (isLoading || isCreating) && ( + {shouldShowLoader && (
-
- +
+ - Loading your chats... + {isSwitchingSession + ? "Switching chat..." + : "Loading your chat..."}
)} {/* Error State */} - {error && !isLoading && ( + {error && !isLoading && !isSwitchingSession && ( )} {/* Session Content */} - {sessionId && !isLoading && !error && ( + {sessionId && !isLoading && !error && !isSwitchingSession && ( ) { +function cleanupExpiredStreams( + completedStreams: Map, +): Map { const now = Date.now(); - for (const [sessionId, result] of completedStreams) { + const cleaned = new Map(completedStreams); + for (const [sessionId, result] of cleaned) { if (now - result.completedAt > COMPLETED_STREAM_TTL) { - completedStreams.delete(sessionId); + cleaned.delete(sessionId); } } -} - -function moveToCompleted( - activeStreams: Map, - completedStreams: Map, - streamCompleteCallbacks: Set, - sessionId: string, -) { - const stream = activeStreams.get(sessionId); - if (!stream) return; - - const result: StreamResult = { - sessionId, - status: stream.status, - chunks: stream.chunks, - completedAt: Date.now(), - error: stream.error, - }; - - completedStreams.set(sessionId, result); - activeStreams.delete(sessionId); - cleanupCompletedStreams(completedStreams); - - if (stream.status === "completed" || stream.status === "error") { - notifyStreamComplete(streamCompleteCallbacks, sessionId); - } + return cleaned; } export const useChatStore = create((set, get) => ({ @@ -106,17 +84,31 @@ export const useChatStore = create((set, get) => ({ context, onChunk, ) { - const { activeStreams, completedStreams, streamCompleteCallbacks } = get(); + const state = get(); + const newActiveStreams = new Map(state.activeStreams); + let newCompletedStreams = new Map(state.completedStreams); + const callbacks = state.streamCompleteCallbacks; - const existingStream = activeStreams.get(sessionId); + const existingStream = newActiveStreams.get(sessionId); if (existingStream) { existingStream.abortController.abort(); - moveToCompleted( - activeStreams, - completedStreams, - streamCompleteCallbacks, + const normalizedStatus = + existingStream.status === "streaming" + ? "completed" + : existingStream.status; + const result: StreamResult = { sessionId, - ); + status: normalizedStatus, + chunks: existingStream.chunks, + completedAt: Date.now(), + error: existingStream.error, + }; + newCompletedStreams.set(sessionId, result); + newActiveStreams.delete(sessionId); + newCompletedStreams = cleanupExpiredStreams(newCompletedStreams); + if (normalizedStatus === "completed" || normalizedStatus === "error") { + notifyStreamComplete(callbacks, sessionId); + } } const abortController = new AbortController(); @@ -132,36 +124,76 @@ export const useChatStore = create((set, get) => ({ onChunkCallbacks: initialCallbacks, }; - activeStreams.set(sessionId, stream); + newActiveStreams.set(sessionId, stream); + set({ + activeStreams: newActiveStreams, + completedStreams: newCompletedStreams, + }); try { await executeStream(stream, message, isUserMessage, context); } finally { if (onChunk) stream.onChunkCallbacks.delete(onChunk); if (stream.status !== "streaming") { - moveToCompleted( - activeStreams, - completedStreams, - streamCompleteCallbacks, - sessionId, - ); + const currentState = get(); + const finalActiveStreams = new Map(currentState.activeStreams); + let finalCompletedStreams = new Map(currentState.completedStreams); + + const storedStream = finalActiveStreams.get(sessionId); + if (storedStream === stream) { + const result: StreamResult = { + sessionId, + status: stream.status, + chunks: stream.chunks, + completedAt: Date.now(), + error: stream.error, + }; + finalCompletedStreams.set(sessionId, result); + finalActiveStreams.delete(sessionId); + finalCompletedStreams = cleanupExpiredStreams(finalCompletedStreams); + set({ + activeStreams: finalActiveStreams, + completedStreams: finalCompletedStreams, + }); + if (stream.status === "completed" || stream.status === "error") { + notifyStreamComplete( + currentState.streamCompleteCallbacks, + sessionId, + ); + } + } } } }, stopStream: function stopStream(sessionId) { - const { activeStreams, completedStreams, streamCompleteCallbacks } = get(); - const stream = activeStreams.get(sessionId); - if (stream) { - stream.abortController.abort(); - stream.status = "completed"; - moveToCompleted( - activeStreams, - completedStreams, - streamCompleteCallbacks, - sessionId, - ); - } + const state = get(); + const stream = state.activeStreams.get(sessionId); + if (!stream) return; + + stream.abortController.abort(); + stream.status = "completed"; + + const newActiveStreams = new Map(state.activeStreams); + let newCompletedStreams = new Map(state.completedStreams); + + const result: StreamResult = { + sessionId, + status: stream.status, + chunks: stream.chunks, + completedAt: Date.now(), + error: stream.error, + }; + newCompletedStreams.set(sessionId, result); + newActiveStreams.delete(sessionId); + newCompletedStreams = cleanupExpiredStreams(newCompletedStreams); + + set({ + activeStreams: newActiveStreams, + completedStreams: newCompletedStreams, + }); + + notifyStreamComplete(state.streamCompleteCallbacks, sessionId); }, subscribeToStream: function subscribeToStream( @@ -169,16 +201,18 @@ export const useChatStore = create((set, get) => ({ onChunk, skipReplay = false, ) { - const { activeStreams } = get(); + const state = get(); + const stream = state.activeStreams.get(sessionId); - const stream = activeStreams.get(sessionId); if (stream) { if (!skipReplay) { for (const chunk of stream.chunks) { onChunk(chunk); } } + stream.onChunkCallbacks.add(onChunk); + return function unsubscribe() { stream.onChunkCallbacks.delete(onChunk); }; @@ -204,7 +238,12 @@ export const useChatStore = create((set, get) => ({ }, clearCompletedStream: function clearCompletedStream(sessionId) { - get().completedStreams.delete(sessionId); + const state = get(); + if (!state.completedStreams.has(sessionId)) return; + + const newCompletedStreams = new Map(state.completedStreams); + newCompletedStreams.delete(sessionId); + set({ completedStreams: newCompletedStreams }); }, isStreaming: function isStreaming(sessionId) { @@ -213,11 +252,21 @@ export const useChatStore = create((set, get) => ({ }, registerActiveSession: function registerActiveSession(sessionId) { - get().activeSessions.add(sessionId); + const state = get(); + if (state.activeSessions.has(sessionId)) return; + + const newActiveSessions = new Set(state.activeSessions); + newActiveSessions.add(sessionId); + set({ activeSessions: newActiveSessions }); }, unregisterActiveSession: function unregisterActiveSession(sessionId) { - get().activeSessions.delete(sessionId); + const state = get(); + if (!state.activeSessions.has(sessionId)) return; + + const newActiveSessions = new Set(state.activeSessions); + newActiveSessions.delete(sessionId); + set({ activeSessions: newActiveSessions }); }, isSessionActive: function isSessionActive(sessionId) { @@ -225,10 +274,16 @@ export const useChatStore = create((set, get) => ({ }, onStreamComplete: function onStreamComplete(callback) { - const { streamCompleteCallbacks } = get(); - streamCompleteCallbacks.add(callback); + const state = get(); + const newCallbacks = new Set(state.streamCompleteCallbacks); + newCallbacks.add(callback); + set({ streamCompleteCallbacks: newCallbacks }); + return function unsubscribe() { - streamCompleteCallbacks.delete(callback); + const currentState = get(); + const cleanedCallbacks = new Set(currentState.streamCompleteCallbacks); + cleanedCallbacks.delete(callback); + set({ streamCompleteCallbacks: cleanedCallbacks }); }; }, })); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx index 0fee33dbc0..29e3a60a8c 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx @@ -126,10 +126,6 @@ export function ChatMessage({ [displayContent, message], ); - function isLongResponse(content: string): boolean { - return content.split("\n").length > 5; - } - const handleTryAgain = useCallback(() => { if (message.type !== "message" || !onSendMessage) return; onSendMessage(message.content, message.role === "user"); @@ -358,7 +354,7 @@ export function ChatMessage({ )} - {!isUser && isFinalMessage && isLongResponse(displayContent) && ( + {!isUser && isFinalMessage && !isStreaming && (
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx index f062df1397..dec221338a 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx @@ -16,6 +16,7 @@ export interface ChatContainerProps { initialPrompt?: string; className?: string; onStreamingChange?: (isStreaming: boolean) => void; + onOperationStarted?: () => void; } export function ChatContainer({ @@ -24,6 +25,7 @@ export function ChatContainer({ initialPrompt, className, onStreamingChange, + onOperationStarted, }: ChatContainerProps) { const { messages, @@ -38,6 +40,7 @@ export function ChatContainer({ sessionId, initialMessages, initialPrompt, + onOperationStarted, }); useEffect(() => { diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts index f406d33db4..f3cac01f96 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts @@ -22,6 +22,7 @@ export interface HandlerDependencies { setIsStreamingInitiated: Dispatch>; setIsRegionBlockedModalOpen: Dispatch>; sessionId: string; + onOperationStarted?: () => void; } export function isRegionBlockedError(chunk: StreamChunk): boolean { @@ -163,6 +164,11 @@ export function handleToolResponse( } return; } + // Trigger polling when operation_started is received + if (responseMessage.type === "operation_started") { + deps.onOperationStarted?.(); + } + deps.setMessages((prev) => { const toolCallIndex = prev.findIndex( (msg) => msg.type === "tool_call" && msg.toolId === chunk.tool_id, diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts index 83730cc308..46f384d055 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts @@ -14,16 +14,40 @@ import { processInitialMessages, } from "./helpers"; +// Helper to generate deduplication key for a message +function getMessageKey(msg: ChatMessageData): string { + if (msg.type === "message") { + // Don't include timestamp - dedupe by role + content only + // This handles the case where local and server timestamps differ + // Server messages are authoritative, so duplicates from local state are filtered + return `msg:${msg.role}:${msg.content}`; + } else if (msg.type === "tool_call") { + return `toolcall:${msg.toolId}`; + } else if (msg.type === "tool_response") { + return `toolresponse:${(msg as any).toolId}`; + } else if ( + msg.type === "operation_started" || + msg.type === "operation_pending" || + msg.type === "operation_in_progress" + ) { + return `op:${(msg as any).toolId || (msg as any).operationId || (msg as any).toolCallId || ""}:${msg.toolName}`; + } else { + return `${msg.type}:${JSON.stringify(msg).slice(0, 100)}`; + } +} + interface Args { sessionId: string | null; initialMessages: SessionDetailResponse["messages"]; initialPrompt?: string; + onOperationStarted?: () => void; } export function useChatContainer({ sessionId, initialMessages, initialPrompt, + onOperationStarted, }: Args) { const [messages, setMessages] = useState([]); const [streamingChunks, setStreamingChunks] = useState([]); @@ -73,13 +97,20 @@ export function useChatContainer({ setIsRegionBlockedModalOpen, sessionId, setIsStreamingInitiated, + onOperationStarted, }); setIsStreamingInitiated(true); const skipReplay = initialMessages.length > 0; return subscribeToStream(sessionId, dispatcher, skipReplay); }, - [sessionId, stopStreaming, activeStreams, subscribeToStream], + [ + sessionId, + stopStreaming, + activeStreams, + subscribeToStream, + onOperationStarted, + ], ); // Collect toolIds from completed tool results in initialMessages @@ -130,12 +161,19 @@ export function useChatContainer({ ); // Combine initial messages from backend with local streaming messages, - // then deduplicate to prevent duplicates when polling refreshes initialMessages + // Server messages maintain correct order; only append truly new local messages const allMessages = useMemo(() => { const processedInitial = processInitialMessages(initialMessages); - // Filter local messages to remove operation messages for completed tools - const filteredLocalMessages = messages.filter((msg) => { + // Build a set of keys from server messages for deduplication + const serverKeys = new Set(); + for (const msg of processedInitial) { + serverKeys.add(getMessageKey(msg)); + } + + // Filter local messages: remove duplicates and completed operation messages + const newLocalMessages = messages.filter((msg) => { + // Remove operation messages for completed tools if ( msg.type === "operation_started" || msg.type === "operation_pending" || @@ -143,48 +181,17 @@ export function useChatContainer({ ) { const toolId = (msg as any).toolId || (msg as any).toolCallId; if (toolId && completedToolIds.has(toolId)) { - return false; // Filter out - operation completed + return false; } } - return true; + // Remove messages that already exist in server data + const key = getMessageKey(msg); + return !serverKeys.has(key); }); - const combined = [...processedInitial, ...filteredLocalMessages]; - - // Deduplicate by content+role+timestamp. When initialMessages is refreshed via polling, - // it may contain messages that are also in the local `messages` state. - // Including timestamp prevents dropping legitimate repeated messages (e.g., user sends "yes" twice) - const seen = new Set(); - return combined.filter((msg) => { - // Create a key based on type, role, content, and timestamp for deduplication - let key: string; - if (msg.type === "message") { - // Use timestamp (rounded to nearest second) to allow slight variations - // while still catching true duplicates from SSE/polling overlap - const ts = msg.timestamp - ? Math.floor(new Date(msg.timestamp).getTime() / 1000) - : ""; - key = `msg:${msg.role}:${ts}:${msg.content}`; - } else if (msg.type === "tool_call") { - key = `toolcall:${msg.toolId}`; - } else if ( - msg.type === "operation_started" || - msg.type === "operation_pending" || - msg.type === "operation_in_progress" - ) { - // Dedupe operation messages by toolId or operationId - key = `op:${(msg as any).toolId || (msg as any).operationId || (msg as any).toolCallId || ""}:${msg.toolName}`; - } else { - // For other types, use a combination of type and first few fields - key = `${msg.type}:${JSON.stringify(msg).slice(0, 100)}`; - } - if (seen.has(key)) { - return false; - } - seen.add(key); - return true; - }); - }, [initialMessages, messages]); + // Server messages first (correct order), then new local messages + return [...processedInitial, ...newLocalMessages]; + }, [initialMessages, messages, completedToolIds]); async function sendMessage( content: string, @@ -217,6 +224,7 @@ export function useChatContainer({ setIsRegionBlockedModalOpen, sessionId, setIsStreamingInitiated, + onOperationStarted, }); try { diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts index f6b2031059..124301abc4 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts @@ -26,6 +26,7 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) { claimSession, clearSession: clearSessionBase, loadSession, + startPollingForOperation, } = useChatSession({ urlSessionId, autoCreate: false, @@ -94,5 +95,6 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) { loadSession, sessionId: sessionIdFromHook, showLoader, + startPollingForOperation, }; } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts index 3fe4f801c6..936a49936c 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts @@ -103,9 +103,14 @@ export function useChatSession({ } }, [createError, loadError]); + // Track if we should be polling (set by external callers when they receive operation_started via SSE) + const [forcePolling, setForcePolling] = useState(false); + // Track if we've seen server acknowledge the pending operation (to avoid clearing forcePolling prematurely) + const hasSeenServerPendingRef = useRef(false); + // Check if there are any pending operations in the messages // Must check all operation types: operation_pending, operation_started, operation_in_progress - const hasPendingOperations = useMemo(() => { + const hasPendingOperationsFromServer = useMemo(() => { if (!messages || messages.length === 0) return false; const pendingTypes = new Set([ "operation_pending", @@ -126,6 +131,35 @@ export function useChatSession({ }); }, [messages]); + // Track when server has acknowledged the pending operation + useEffect(() => { + if (hasPendingOperationsFromServer) { + hasSeenServerPendingRef.current = true; + } + }, [hasPendingOperationsFromServer]); + + // Combined: poll if server has pending ops OR if we received operation_started via SSE + const hasPendingOperations = hasPendingOperationsFromServer || forcePolling; + + // Clear forcePolling only after server has acknowledged AND completed the operation + useEffect(() => { + if ( + forcePolling && + !hasPendingOperationsFromServer && + hasSeenServerPendingRef.current + ) { + // Server acknowledged the operation and it's now complete + setForcePolling(false); + hasSeenServerPendingRef.current = false; + } + }, [forcePolling, hasPendingOperationsFromServer]); + + // Function to trigger polling (called when operation_started is received via SSE) + function startPollingForOperation() { + setForcePolling(true); + hasSeenServerPendingRef.current = false; // Reset for new operation + } + // Refresh sessions list when a pending operation completes // (hasPendingOperations transitions from true to false) const prevHasPendingOperationsRef = useRef(hasPendingOperations); @@ -144,7 +178,8 @@ export function useChatSession({ [hasPendingOperations, sessionId, queryClient], ); - // Poll for updates when there are pending operations (long poll - 10s intervals with backoff) + // Poll for updates when there are pending operations + // Backoff: 2s, 4s, 6s, 8s, 10s, ... up to 30s max const pollAttemptRef = useRef(0); const hasPendingOperationsRef = useRef(hasPendingOperations); hasPendingOperationsRef.current = hasPendingOperations; @@ -159,27 +194,17 @@ export function useChatSession({ let cancelled = false; let timeoutId: ReturnType | null = null; - // Calculate delay with exponential backoff: 10s, 15s, 20s, 25s, 30s (max) - const baseDelay = 10000; - const maxDelay = 30000; - function schedule() { - const delay = Math.min( - baseDelay + pollAttemptRef.current * 5000, - maxDelay, - ); + // 2s, 4s, 6s, 8s, 10s, ... 30s (max) + const delay = Math.min((pollAttemptRef.current + 1) * 2000, 30000); timeoutId = setTimeout(async () => { if (cancelled) return; - console.info( - `[useChatSession] Polling for pending operation updates (attempt ${pollAttemptRef.current + 1})`, - ); pollAttemptRef.current += 1; try { await refetch(); } catch (err) { console.error("[useChatSession] Poll failed:", err); } finally { - // Continue polling if still pending and not cancelled if (!cancelled && hasPendingOperationsRef.current) { schedule(); } @@ -329,6 +354,7 @@ export function useChatSession({ refreshSession, claimSession, clearSession, + startPollingForOperation, }; } From 09539839441e86f0684ed6bc5bebefe7ab4bf03f Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Wed, 28 Jan 2026 01:22:46 -0600 Subject: [PATCH 21/21] feat(platform): disable onboarding redirects and add $5 signup bonus (#11862) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Disable automatic onboarding redirects on signup/login while keeping the checklist/wallet functional. Users now receive $5 (500 credits) on their first visit to /copilot. ### Changes 🏗️ - **Frontend**: `shouldShowOnboarding()` now returns `false`, disabling auto-redirects to `/onboarding` - **Backend**: Added `VISIT_COPILOT` onboarding step with 500 credit ($5) reward - **Frontend**: Copilot page automatically completes `VISIT_COPILOT` step on mount - **Database**: Migration to add `VISIT_COPILOT` to `OnboardingStep` enum NOTE: /onboarding/1-welcome -> /library now as shouldShowOnboardin is always false Users land directly on `/copilot` after signup/login and receive $5 invisibly (not shown in checklist UI). ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] New user signup (email/password) → lands on `/copilot`, wallet shows 500 credits - [x] Verified credits are only granted once (idempotent via onboarding reward mechanism) - [x] Existing user login (already granted flag set) → lands on `/copilot`, no duplicate credits - [x] Checklist/wallet remains functional #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**) No configuration changes required. --- OPEN-2967 🤖 Generated with [Claude Code](https://claude.ai/code) --- > [!NOTE] > Introduces a new onboarding step and adjusts onboarding flow. > > - Adds `VISIT_COPILOT` onboarding step (+500 credits) with DB enum migration and API/type updates > - Copilot page auto-completes `VISIT_COPILOT` on mount to grant the welcome bonus > - Changes `/onboarding/enabled` to require user context and return `false` when `CHAT` feature is enabled (skips legacy onboarding) > - Wallet now refreshes credits on any onboarding `step_completed` notification; confetti limited to visible tasks > - Test flows updated to accept redirects to `copilot`/`library` and verify authenticated state > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit ec5a5a4dfdd76f8bd9b5918c38cee9b9d6832247. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). --------- Co-authored-by: Claude Opus 4.5 Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: claude[bot] <41898282+claude[bot]@users.noreply.github.com> Co-authored-by: Nicholas Tindle --- .../backend/backend/api/features/v1.py | 8 ++++-- .../backend/backend/data/onboarding.py | 4 +++ .../migration.sql | 2 ++ autogpt_platform/backend/schema.prisma | 1 + .../app/(platform)/copilot/useCopilotPage.ts | 9 +++++++ .../frontend/src/app/api/openapi.json | 2 ++ .../Navbar/components/Wallet/Wallet.tsx | 14 +++++++---- .../src/lib/autogpt-server-api/types.ts | 1 + .../frontend/src/tests/pages/login.page.ts | 6 ++++- .../frontend/src/tests/utils/signup.ts | 25 ++++++++++++------- 10 files changed, 55 insertions(+), 17 deletions(-) create mode 100644 autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql diff --git a/autogpt_platform/backend/backend/api/features/v1.py b/autogpt_platform/backend/backend/api/features/v1.py index 51789f9e2b..62b532089c 100644 --- a/autogpt_platform/backend/backend/api/features/v1.py +++ b/autogpt_platform/backend/backend/api/features/v1.py @@ -265,9 +265,13 @@ async def get_onboarding_agents( "/onboarding/enabled", summary="Is onboarding enabled", tags=["onboarding", "public"], - dependencies=[Security(requires_user)], ) -async def is_onboarding_enabled() -> bool: +async def is_onboarding_enabled( + user_id: Annotated[str, Security(get_user_id)], +) -> bool: + # If chat is enabled for user, skip legacy onboarding + if await is_feature_enabled(Flag.CHAT, user_id, False): + return False return await onboarding_enabled() diff --git a/autogpt_platform/backend/backend/data/onboarding.py b/autogpt_platform/backend/backend/data/onboarding.py index 6a842d1022..4af8e8dffd 100644 --- a/autogpt_platform/backend/backend/data/onboarding.py +++ b/autogpt_platform/backend/backend/data/onboarding.py @@ -41,6 +41,7 @@ FrontendOnboardingStep = Literal[ OnboardingStep.AGENT_NEW_RUN, OnboardingStep.AGENT_INPUT, OnboardingStep.CONGRATS, + OnboardingStep.VISIT_COPILOT, OnboardingStep.MARKETPLACE_VISIT, OnboardingStep.BUILDER_OPEN, ] @@ -122,6 +123,9 @@ async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate): async def _reward_user(user_id: str, onboarding: UserOnboarding, step: OnboardingStep): reward = 0 match step: + # Welcome bonus for visiting copilot ($5 = 500 credits) + case OnboardingStep.VISIT_COPILOT: + reward = 500 # Reward user when they clicked New Run during onboarding # This is because they need credits before scheduling a run (next step) # This is seen as a reward for the GET_RESULTS step in the wallet diff --git a/autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql b/autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql new file mode 100644 index 0000000000..6a08d9231b --- /dev/null +++ b/autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql @@ -0,0 +1,2 @@ +-- AlterEnum +ALTER TYPE "OnboardingStep" ADD VALUE 'VISIT_COPILOT'; diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index de94600820..2c52528e3f 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -81,6 +81,7 @@ enum OnboardingStep { AGENT_INPUT CONGRATS // First Wins + VISIT_COPILOT GET_RESULTS MARKETPLACE_VISIT MARKETPLACE_ADD_AGENT diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts index 38796946f4..e4713cd24a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts @@ -5,6 +5,7 @@ import { import { useToast } from "@/components/molecules/Toast/use-toast"; import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { useOnboarding } from "@/providers/onboarding/onboarding-provider"; import { Flag, type FlagValues, @@ -25,12 +26,20 @@ export function useCopilotPage() { const queryClient = useQueryClient(); const { user, isLoggedIn, isUserLoading } = useSupabase(); const { toast } = useToast(); + const { completeStep } = useOnboarding(); const { urlSessionId, setUrlSessionId } = useCopilotSessionId(); const setIsStreaming = useCopilotStore((s) => s.setIsStreaming); const isCreating = useCopilotStore((s) => s.isCreatingSession); const setIsCreating = useCopilotStore((s) => s.setIsCreatingSession); + // Complete VISIT_COPILOT onboarding step to grant $5 welcome bonus + useEffect(() => { + if (isLoggedIn) { + completeStep("VISIT_COPILOT"); + } + }, [completeStep, isLoggedIn]); + const isChatEnabled = useGetFlag(Flag.CHAT); const flags = useFlags(); const homepageRoute = getHomepageRoute(isChatEnabled); diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index b4e2bc80bd..d1ecd91702 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -4594,6 +4594,7 @@ "AGENT_NEW_RUN", "AGENT_INPUT", "CONGRATS", + "VISIT_COPILOT", "MARKETPLACE_VISIT", "BUILDER_OPEN" ], @@ -8754,6 +8755,7 @@ "AGENT_NEW_RUN", "AGENT_INPUT", "CONGRATS", + "VISIT_COPILOT", "GET_RESULTS", "MARKETPLACE_VISIT", "MARKETPLACE_ADD_AGENT", diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx index 0a3c7de6c8..4a25c84f92 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx @@ -255,13 +255,18 @@ export function Wallet() { (notification: WebSocketNotification) => { if ( notification.type !== "onboarding" || - notification.event !== "step_completed" || - !walletRef.current + notification.event !== "step_completed" ) { return; } - // Only trigger confetti for tasks that are in groups + // Always refresh credits when any onboarding step completes + fetchCredits(); + + // Only trigger confetti for tasks that are in displayed groups + if (!walletRef.current) { + return; + } const taskIds = groups .flatMap((group) => group.tasks) .map((task) => task.id); @@ -274,7 +279,6 @@ export function Wallet() { return; } - fetchCredits(); party.confetti(walletRef.current, { count: 30, spread: 120, @@ -284,7 +288,7 @@ export function Wallet() { modules: [fadeOut], }); }, - [fetchCredits, fadeOut], + [fetchCredits, fadeOut, groups], ); // WebSocket setup for onboarding notifications diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 82c03bc9f1..2d583d2062 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -1003,6 +1003,7 @@ export type OnboardingStep = | "AGENT_INPUT" | "CONGRATS" // First Wins + | "VISIT_COPILOT" | "GET_RESULTS" | "MARKETPLACE_VISIT" | "MARKETPLACE_ADD_AGENT" diff --git a/autogpt_platform/frontend/src/tests/pages/login.page.ts b/autogpt_platform/frontend/src/tests/pages/login.page.ts index 9082cc6219..adcb8d908b 100644 --- a/autogpt_platform/frontend/src/tests/pages/login.page.ts +++ b/autogpt_platform/frontend/src/tests/pages/login.page.ts @@ -37,9 +37,13 @@ export class LoginPage { this.page.on("load", (page) => console.log(`ℹ️ Now at URL: ${page.url()}`)); // Start waiting for navigation before clicking + // Wait for redirect to marketplace, onboarding, library, or copilot (new landing pages) const leaveLoginPage = this.page .waitForURL( - (url) => /^\/(marketplace|onboarding(\/.*)?)?$/.test(url.pathname), + (url: URL) => + /^\/(marketplace|onboarding(\/.*)?|library|copilot)?$/.test( + url.pathname, + ), { timeout: 10_000 }, ) .catch((reason) => { diff --git a/autogpt_platform/frontend/src/tests/utils/signup.ts b/autogpt_platform/frontend/src/tests/utils/signup.ts index 7c8fdbe01b..192a9129b9 100644 --- a/autogpt_platform/frontend/src/tests/utils/signup.ts +++ b/autogpt_platform/frontend/src/tests/utils/signup.ts @@ -36,14 +36,16 @@ export async function signupTestUser( const signupButton = getButton("Sign up"); await signupButton.click(); - // Wait for successful signup - could redirect to onboarding or marketplace + // Wait for successful signup - could redirect to various pages depending on onboarding state try { - // Wait for either onboarding or marketplace redirect - await Promise.race([ - page.waitForURL(/\/onboarding/, { timeout: 15000 }), - page.waitForURL(/\/marketplace/, { timeout: 15000 }), - ]); + // Wait for redirect to onboarding, marketplace, copilot, or library + // Use a single waitForURL with a callback to avoid Promise.race race conditions + await page.waitForURL( + (url: URL) => + /\/(onboarding|marketplace|copilot|library)/.test(url.pathname), + { timeout: 15000 }, + ); } catch (error) { console.error( "❌ Timeout waiting for redirect, current URL:", @@ -54,14 +56,19 @@ export async function signupTestUser( const currentUrl = page.url(); - // Handle onboarding or marketplace redirect + // Handle onboarding redirect if needed if (currentUrl.includes("/onboarding") && ignoreOnboarding) { await page.goto("http://localhost:3000/marketplace"); await page.waitForLoadState("domcontentloaded", { timeout: 10000 }); } - // Verify we're on the expected final page - if (ignoreOnboarding || currentUrl.includes("/marketplace")) { + // Verify we're on an expected final page and user is authenticated + if (currentUrl.includes("/copilot") || currentUrl.includes("/library")) { + // For copilot/library landing pages, just verify user is authenticated + await page + .getByTestId("profile-popout-menu-trigger") + .waitFor({ state: "visible", timeout: 10000 }); + } else if (ignoreOnboarding || currentUrl.includes("/marketplace")) { // Verify we're on marketplace await page .getByText(