diff --git a/autogpt_platform/backend/.env.default b/autogpt_platform/backend/.env.default index 49689d7ad6..b393f13017 100644 --- a/autogpt_platform/backend/.env.default +++ b/autogpt_platform/backend/.env.default @@ -178,5 +178,10 @@ AYRSHARE_JWT_KEY= SMARTLEAD_API_KEY= ZEROBOUNCE_API_KEY= +# PostHog Analytics +# Get API key from https://posthog.com - Project Settings > Project API Key +POSTHOG_API_KEY= +POSTHOG_HOST=https://eu.i.posthog.com + # Other Services AUTOMOD_API_KEY= diff --git a/autogpt_platform/backend/backend/api/external/v1/routes.py b/autogpt_platform/backend/backend/api/external/v1/routes.py index 58e15dc6a3..00933c1899 100644 --- a/autogpt_platform/backend/backend/api/external/v1/routes.py +++ b/autogpt_platform/backend/backend/api/external/v1/routes.py @@ -86,6 +86,8 @@ async def execute_graph_block( obj = backend.data.block.get_block(block_id) if not obj: raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") + if obj.disabled: + raise HTTPException(status_code=403, detail=f"Block #{block_id} is disabled.") output = defaultdict(list) async for name, data in obj.execute(data): diff --git a/autogpt_platform/backend/backend/api/features/chat/config.py b/autogpt_platform/backend/backend/api/features/chat/config.py index 95aef7f2ed..dba7934877 100644 --- a/autogpt_platform/backend/backend/api/features/chat/config.py +++ b/autogpt_platform/backend/backend/api/features/chat/config.py @@ -33,9 +33,15 @@ class ChatConfig(BaseSettings): stream_timeout: int = Field(default=300, description="Stream timeout in seconds") max_retries: int = Field(default=3, description="Maximum number of retries") - max_agent_runs: int = Field(default=3, description="Maximum number of agent runs") + max_agent_runs: int = Field(default=30, description="Maximum number of agent runs") max_agent_schedules: int = Field( - default=3, description="Maximum number of agent schedules" + default=30, description="Maximum number of agent schedules" + ) + + # Long-running operation configuration + long_running_operation_ttl: int = Field( + default=600, + description="TTL in seconds for long-running operation tracking in Redis (safety net if pod dies)", ) # Langfuse Prompt Management Configuration diff --git a/autogpt_platform/backend/backend/api/features/chat/db.py b/autogpt_platform/backend/backend/api/features/chat/db.py index 05a3553cc8..d34b4e5b07 100644 --- a/autogpt_platform/backend/backend/api/features/chat/db.py +++ b/autogpt_platform/backend/backend/api/features/chat/db.py @@ -247,3 +247,45 @@ async def get_chat_session_message_count(session_id: str) -> int: """Get the number of messages in a chat session.""" count = await PrismaChatMessage.prisma().count(where={"sessionId": session_id}) return count + + +async def update_tool_message_content( + session_id: str, + tool_call_id: str, + new_content: str, +) -> bool: + """Update the content of a tool message in chat history. + + Used by background tasks to update pending operation messages with final results. + + Args: + session_id: The chat session ID. + tool_call_id: The tool call ID to find the message. + new_content: The new content to set. + + Returns: + True if a message was updated, False otherwise. + """ + try: + result = await PrismaChatMessage.prisma().update_many( + where={ + "sessionId": session_id, + "toolCallId": tool_call_id, + }, + data={ + "content": new_content, + }, + ) + if result == 0: + logger.warning( + f"No message found to update for session {session_id}, " + f"tool_call_id {tool_call_id}" + ) + return False + return True + except Exception as e: + logger.error( + f"Failed to update tool message for session {session_id}, " + f"tool_call_id {tool_call_id}: {e}" + ) + return False diff --git a/autogpt_platform/backend/backend/api/features/chat/model.py b/autogpt_platform/backend/backend/api/features/chat/model.py index 75bda11127..7318ef88d7 100644 --- a/autogpt_platform/backend/backend/api/features/chat/model.py +++ b/autogpt_platform/backend/backend/api/features/chat/model.py @@ -295,6 +295,21 @@ async def cache_chat_session(session: ChatSession) -> None: await _cache_session(session) +async def invalidate_session_cache(session_id: str) -> None: + """Invalidate a chat session from Redis cache. + + Used by background tasks to ensure fresh data is loaded on next access. + This is best-effort - Redis failures are logged but don't fail the operation. + """ + try: + redis_key = _get_session_cache_key(session_id) + async_redis = await get_redis_async() + await async_redis.delete(redis_key) + except Exception as e: + # Best-effort: log but don't fail - cache will expire naturally + logger.warning(f"Failed to invalidate session cache for {session_id}: {e}") + + async def _get_session_from_db(session_id: str) -> ChatSession | None: """Get a chat session from the database.""" prisma_session = await chat_db.get_chat_session(session_id) diff --git a/autogpt_platform/backend/backend/api/features/chat/response_model.py b/autogpt_platform/backend/backend/api/features/chat/response_model.py index 49a9b38e8f..53a8cf3a1f 100644 --- a/autogpt_platform/backend/backend/api/features/chat/response_model.py +++ b/autogpt_platform/backend/backend/api/features/chat/response_model.py @@ -31,6 +31,7 @@ class ResponseType(str, Enum): # Other ERROR = "error" USAGE = "usage" + HEARTBEAT = "heartbeat" class StreamBaseResponse(BaseModel): @@ -142,3 +143,20 @@ class StreamError(StreamBaseResponse): details: dict[str, Any] | None = Field( default=None, description="Additional error details" ) + + +class StreamHeartbeat(StreamBaseResponse): + """Heartbeat to keep SSE connection alive during long-running operations. + + Uses SSE comment format (: comment) which is ignored by clients but keeps + the connection alive through proxies and load balancers. + """ + + type: ResponseType = ResponseType.HEARTBEAT + toolCallId: str | None = Field( + default=None, description="Tool call ID if heartbeat is for a specific tool" + ) + + def to_sse(self) -> str: + """Convert to SSE comment format to keep connection alive.""" + return ": heartbeat\n\n" diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 3daf378f65..20216162b5 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -5,9 +5,9 @@ from asyncio import CancelledError from collections.abc import AsyncGenerator from typing import Any +import openai import orjson -from langfuse import get_client, propagate_attributes -from langfuse.openai import openai # type: ignore +from langfuse import get_client from openai import ( APIConnectionError, APIError, @@ -17,6 +17,7 @@ from openai import ( ) from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam +from backend.data.redis_client import get_redis_async from backend.data.understanding import ( format_understanding_for_prompt, get_business_understanding, @@ -24,6 +25,7 @@ from backend.data.understanding import ( from backend.util.exceptions import NotFoundError from backend.util.settings import Settings +from . import db as chat_db from .config import ChatConfig from .model import ( ChatMessage, @@ -31,6 +33,7 @@ from .model import ( Usage, cache_chat_session, get_chat_session, + invalidate_session_cache, update_session_title, upsert_chat_session, ) @@ -38,6 +41,7 @@ from .response_model import ( StreamBaseResponse, StreamError, StreamFinish, + StreamHeartbeat, StreamStart, StreamTextDelta, StreamTextEnd, @@ -47,7 +51,14 @@ from .response_model import ( StreamToolOutputAvailable, StreamUsage, ) -from .tools import execute_tool, tools +from .tools import execute_tool, get_tool, tools +from .tools.models import ( + ErrorResponse, + OperationInProgressResponse, + OperationPendingResponse, + OperationStartedResponse, +) +from .tracking import track_user_message logger = logging.getLogger(__name__) @@ -58,11 +69,126 @@ client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url) langfuse = get_client() +# Redis key prefix for tracking running long-running operations +# Used for idempotency across Kubernetes pods - prevents duplicate executions on browser refresh +RUNNING_OPERATION_PREFIX = "chat:running_operation:" -class LangfuseNotConfiguredError(Exception): - """Raised when Langfuse is required but not configured.""" +# Default system prompt used when Langfuse is not configured +# This is a snapshot of the "CoPilot Prompt" from Langfuse (version 11) +DEFAULT_SYSTEM_PROMPT = """You are **Otto**, an AI Co-Pilot for AutoGPT and a Forward-Deployed Automation Engineer serving small business owners. Your mission is to help users automate business tasks with AI by delivering tangible value through working automations—not through documentation or lengthy explanations. - pass +Here is everything you know about the current user from previous interactions: + + +{users_information} + + +## YOUR CORE MANDATE + +You are action-oriented. Your success is measured by: +- **Value Delivery**: Does the user think "wow, that was amazing" or "what was the point"? +- **Demonstrable Proof**: Show working automations, not descriptions of what's possible +- **Time Saved**: Focus on tangible efficiency gains +- **Quality Output**: Deliver results that meet or exceed expectations + +## YOUR WORKFLOW + +Adapt flexibly to the conversation context. Not every interaction requires all stages: + +1. **Explore & Understand**: Learn about the user's business, tasks, and goals. Use `add_understanding` to capture important context that will improve future conversations. + +2. **Assess Automation Potential**: Help the user understand whether and how AI can automate their task. + +3. **Prepare for AI**: Provide brief, actionable guidance on prerequisites (data, access, etc.). + +4. **Discover or Create Agents**: + - **Always check the user's library first** with `find_library_agent` (these may be customized to their needs) + - Search the marketplace with `find_agent` for pre-built automations + - Find reusable components with `find_block` + - Create custom solutions with `create_agent` if nothing suitable exists + - Modify existing library agents with `edit_agent` + +5. **Execute**: Run automations immediately, schedule them, or set up webhooks using `run_agent`. Test specific components with `run_block`. + +6. **Show Results**: Display outputs using `agent_output`. + +## AVAILABLE TOOLS + +**Understanding & Discovery:** +- `add_understanding`: Create a memory about the user's business or use cases for future sessions +- `search_docs`: Search platform documentation for specific technical information +- `get_doc_page`: Retrieve full text of a specific documentation page + +**Agent Discovery:** +- `find_library_agent`: Search the user's existing agents (CHECK HERE FIRST—these may be customized) +- `find_agent`: Search the marketplace for pre-built automations +- `find_block`: Find pre-written code units that perform specific tasks (agents are built from blocks) + +**Agent Creation & Editing:** +- `create_agent`: Create a new automation agent +- `edit_agent`: Modify an agent in the user's library + +**Execution & Output:** +- `run_agent`: Run an agent now, schedule it, or set up a webhook trigger +- `run_block`: Test or run a specific block independently +- `agent_output`: View results from previous agent runs + +## BEHAVIORAL GUIDELINES + +**Be Concise:** +- Target 2-5 short lines maximum +- Make every word count—no repetition or filler +- Use lightweight structure for scannability (bullets, numbered lists, short prompts) +- Avoid jargon (blocks, slugs, cron) unless the user asks + +**Be Proactive:** +- Suggest next steps before being asked +- Anticipate needs based on conversation context and user information +- Look for opportunities to expand scope when relevant +- Reveal capabilities through action, not explanation + +**Use Tools Effectively:** +- Select the right tool for each task +- **Always check `find_library_agent` before searching the marketplace** +- Use `add_understanding` to capture valuable business context +- When tool calls fail, try alternative approaches + +## CRITICAL REMINDER + +You are NOT a chatbot. You are NOT documentation. You are a partner who helps busy business owners get value quickly by showing proof through working automations. Bias toward action over explanation.""" + +# Module-level set to hold strong references to background tasks. +# This prevents asyncio from garbage collecting tasks before they complete. +# Tasks are automatically removed on completion via done_callback. +_background_tasks: set[asyncio.Task] = set() + + +async def _mark_operation_started(tool_call_id: str) -> bool: + """Mark a long-running operation as started (Redis-based). + + Returns True if successfully marked (operation was not already running), + False if operation was already running (lost race condition). + Raises exception if Redis is unavailable (fail-closed). + """ + redis = await get_redis_async() + key = f"{RUNNING_OPERATION_PREFIX}{tool_call_id}" + # SETNX with TTL - atomic "set if not exists" + result = await redis.set(key, "1", ex=config.long_running_operation_ttl, nx=True) + return result is not None + + +async def _mark_operation_completed(tool_call_id: str) -> None: + """Mark a long-running operation as completed (remove Redis key). + + This is best-effort - if Redis fails, the TTL will eventually clean up. + """ + try: + redis = await get_redis_async() + key = f"{RUNNING_OPERATION_PREFIX}{tool_call_id}" + await redis.delete(key) + except Exception as e: + # Non-critical: TTL will clean up eventually + logger.warning(f"Failed to delete running operation key {tool_call_id}: {e}") def _is_langfuse_configured() -> bool: @@ -72,6 +198,30 @@ def _is_langfuse_configured() -> bool: ) +async def _get_system_prompt_template(context: str) -> str: + """Get the system prompt, trying Langfuse first with fallback to default. + + Args: + context: The user context/information to compile into the prompt. + + Returns: + The compiled system prompt string. + """ + if _is_langfuse_configured(): + try: + # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt + # Use asyncio.to_thread to avoid blocking the event loop + prompt = await asyncio.to_thread( + langfuse.get_prompt, config.langfuse_prompt_name, cache_ttl_seconds=0 + ) + return prompt.compile(users_information=context) + except Exception as e: + logger.warning(f"Failed to fetch prompt from Langfuse, using default: {e}") + + # Fallback to default prompt + return DEFAULT_SYSTEM_PROMPT.format(users_information=context) + + async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: """Build the full system prompt including business understanding if available. @@ -80,12 +230,8 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: If "default" and this is the user's first session, will use "onboarding" instead. Returns: - Tuple of (compiled prompt string, Langfuse prompt object for tracing) + Tuple of (compiled prompt string, business understanding object) """ - - # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt - prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0) - # If user is authenticated, try to fetch their business understanding understanding = None if user_id: @@ -94,25 +240,43 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: except Exception as e: logger.warning(f"Failed to fetch business understanding: {e}") understanding = None + if understanding: context = format_understanding_for_prompt(understanding) else: context = "This is the first time you are meeting the user. Greet them and introduce them to the platform" - compiled = prompt.compile(users_information=context) + compiled = await _get_system_prompt_template(context) return compiled, understanding -async def _generate_session_title(message: str) -> str | None: +async def _generate_session_title( + message: str, + user_id: str | None = None, + session_id: str | None = None, +) -> str | None: """Generate a concise title for a chat session based on the first message. Args: message: The first user message in the session + user_id: User ID for OpenRouter tracing (optional) + session_id: Session ID for OpenRouter tracing (optional) Returns: A short title (3-6 words) or None if generation fails """ try: + # Build extra_body for OpenRouter tracing and PostHog analytics + extra_body: dict[str, Any] = {} + if user_id: + extra_body["user"] = user_id[:128] # OpenRouter limit + extra_body["posthogDistinctId"] = user_id + if session_id: + extra_body["session_id"] = session_id[:128] # OpenRouter limit + extra_body["posthogProperties"] = { + "environment": settings.config.app_env.value, + } + response = await client.chat.completions.create( model=config.title_model, messages=[ @@ -127,6 +291,7 @@ async def _generate_session_title(message: str) -> str | None: {"role": "user", "content": message[:500]}, # Limit input length ], max_tokens=20, + extra_body=extra_body, ) title = response.choices[0].message.content if title: @@ -189,16 +354,6 @@ async def stream_chat_completion( f"Streaming chat completion for session {session_id} for message {message} and user id {user_id}. Message is user message: {is_user_message}" ) - # Check if Langfuse is configured - required for chat functionality - if not _is_langfuse_configured(): - logger.error("Chat request failed: Langfuse is not configured") - yield StreamError( - errorText="Chat service is not available. Langfuse must be configured " - "with LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment variables." - ) - yield StreamFinish() - return - # Only fetch from Redis if session not provided (initial call) if session is None: session = await get_chat_session(session_id, user_id) @@ -218,18 +373,9 @@ async def stream_chat_completion( ) if message: - # Build message content with context if provided - message_content = message - if context and context.get("url") and context.get("content"): - context_text = f"Page URL: {context['url']}\n\nPage Content:\n{context['content']}\n\n---\n\nUser Message: {message}" - message_content = context_text - logger.info( - f"Including page context: URL={context['url']}, content_length={len(context['content'])}" - ) - session.messages.append( ChatMessage( - role="user" if is_user_message else "assistant", content=message_content + role="user" if is_user_message else "assistant", content=message ) ) logger.info( @@ -237,6 +383,14 @@ async def stream_chat_completion( f"new message_count={len(session.messages)}" ) + # Track user message in PostHog + if is_user_message: + track_user_message( + user_id=user_id, + session_id=session_id, + message_length=len(message), + ) + logger.info( f"Upserting session: {session.session_id} with user id {session.user_id}, " f"message_count={len(session.messages)}" @@ -256,10 +410,15 @@ async def stream_chat_completion( # stale data issues when the main flow modifies the session captured_session_id = session_id captured_message = message + captured_user_id = user_id async def _update_title(): try: - title = await _generate_session_title(captured_message) + title = await _generate_session_title( + captured_message, + user_id=captured_user_id, + session_id=captured_session_id, + ) if title: # Use dedicated title update function that doesn't # touch messages, avoiding race conditions @@ -276,347 +435,332 @@ async def stream_chat_completion( # Build system prompt with business understanding system_prompt, understanding = await _build_system_prompt(user_id) - # Create Langfuse trace for this LLM call (each call gets its own trace, grouped by session_id) - # Using v3 SDK: start_observation creates a root span, update_trace sets trace-level attributes - input = message - if not message and tool_call_response: - input = tool_call_response + # Initialize variables for streaming + assistant_response = ChatMessage( + role="assistant", + content="", + ) + accumulated_tool_calls: list[dict[str, Any]] = [] + has_saved_assistant_message = False + has_appended_streaming_message = False + last_cache_time = 0.0 + last_cache_content_len = 0 - langfuse = get_client() - with langfuse.start_as_current_observation( - as_type="span", - name="user-copilot-request", - input=input, - ) as span: - with propagate_attributes( - session_id=session_id, - user_id=user_id, - tags=["copilot"], - metadata={ - "users_information": format_understanding_for_prompt(understanding)[ - :200 - ] # langfuse only accepts upto to 200 chars - }, + has_yielded_end = False + has_yielded_error = False + has_done_tool_call = False + has_long_running_tool_call = False # Track if we had a long-running tool call + has_received_text = False + text_streaming_ended = False + tool_response_messages: list[ChatMessage] = [] + should_retry = False + + # Generate unique IDs for AI SDK protocol + import uuid as uuid_module + + message_id = str(uuid_module.uuid4()) + text_block_id = str(uuid_module.uuid4()) + + # Yield message start + yield StreamStart(messageId=message_id) + + try: + async for chunk in _stream_chat_chunks( + session=session, + tools=tools, + system_prompt=system_prompt, + text_block_id=text_block_id, ): - - # Initialize variables that will be used in finally block (must be defined before try) - assistant_response = ChatMessage( - role="assistant", - content="", - ) - accumulated_tool_calls: list[dict[str, Any]] = [] - has_saved_assistant_message = False - has_appended_streaming_message = False - last_cache_time = 0.0 - last_cache_content_len = 0 - - # Wrap main logic in try/finally to ensure Langfuse observations are always ended - has_yielded_end = False - has_yielded_error = False - has_done_tool_call = False - has_received_text = False - text_streaming_ended = False - tool_response_messages: list[ChatMessage] = [] - should_retry = False - - # Generate unique IDs for AI SDK protocol - import uuid as uuid_module - - message_id = str(uuid_module.uuid4()) - text_block_id = str(uuid_module.uuid4()) - - # Yield message start - yield StreamStart(messageId=message_id) - - try: - async for chunk in _stream_chat_chunks( - session=session, - tools=tools, - system_prompt=system_prompt, - text_block_id=text_block_id, + if isinstance(chunk, StreamTextStart): + # Emit text-start before first text delta + if not has_received_text: + yield chunk + elif isinstance(chunk, StreamTextDelta): + delta = chunk.delta or "" + assert assistant_response.content is not None + assistant_response.content += delta + has_received_text = True + if not has_appended_streaming_message: + session.messages.append(assistant_response) + has_appended_streaming_message = True + current_time = time.monotonic() + content_len = len(assistant_response.content) + if ( + current_time - last_cache_time >= 1.0 + and content_len > last_cache_content_len ): - - if isinstance(chunk, StreamTextStart): - # Emit text-start before first text delta - if not has_received_text: - yield chunk - elif isinstance(chunk, StreamTextDelta): - delta = chunk.delta or "" - assert assistant_response.content is not None - assistant_response.content += delta - has_received_text = True - if not has_appended_streaming_message: - session.messages.append(assistant_response) - has_appended_streaming_message = True - current_time = time.monotonic() - content_len = len(assistant_response.content) - if ( - current_time - last_cache_time >= 1.0 - and content_len > last_cache_content_len - ): - try: - await cache_chat_session(session) - except Exception as e: - logger.warning( - f"Failed to cache partial session {session.session_id}: {e}" - ) - last_cache_time = current_time - last_cache_content_len = content_len - yield chunk - elif isinstance(chunk, StreamTextEnd): - # Emit text-end after text completes - if has_received_text and not text_streaming_ended: - text_streaming_ended = True - if assistant_response.content: - logger.warn( - f"StreamTextEnd: Attempting to set output {assistant_response.content}" - ) - span.update_trace(output=assistant_response.content) - span.update(output=assistant_response.content) - yield chunk - elif isinstance(chunk, StreamToolInputStart): - # Emit text-end before first tool call, but only if we've received text - if has_received_text and not text_streaming_ended: - yield StreamTextEnd(id=text_block_id) - text_streaming_ended = True - yield chunk - elif isinstance(chunk, StreamToolInputAvailable): - # Accumulate tool calls in OpenAI format - accumulated_tool_calls.append( - { - "id": chunk.toolCallId, - "type": "function", - "function": { - "name": chunk.toolName, - "arguments": orjson.dumps(chunk.input).decode( - "utf-8" - ), - }, - } - ) - elif isinstance(chunk, StreamToolOutputAvailable): - result_content = ( - chunk.output - if isinstance(chunk.output, str) - else orjson.dumps(chunk.output).decode("utf-8") - ) - tool_response_messages.append( - ChatMessage( - role="tool", - content=result_content, - tool_call_id=chunk.toolCallId, - ) - ) - has_done_tool_call = True - # Track if any tool execution failed - if not chunk.success: - logger.warning( - f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed" - ) - yield chunk - elif isinstance(chunk, StreamFinish): - if not has_done_tool_call: - # Emit text-end before finish if we received text but haven't closed it - if has_received_text and not text_streaming_ended: - yield StreamTextEnd(id=text_block_id) - text_streaming_ended = True - - # Save assistant message before yielding finish to ensure it's persisted - # even if client disconnects immediately after receiving StreamFinish - if not has_saved_assistant_message: - messages_to_save_early: list[ChatMessage] = [] - if accumulated_tool_calls: - assistant_response.tool_calls = ( - accumulated_tool_calls - ) - if not has_appended_streaming_message and ( - assistant_response.content - or assistant_response.tool_calls - ): - messages_to_save_early.append(assistant_response) - messages_to_save_early.extend(tool_response_messages) - - if messages_to_save_early: - session.messages.extend(messages_to_save_early) - logger.info( - f"Saving assistant message before StreamFinish: " - f"content_len={len(assistant_response.content or '')}, " - f"tool_calls={len(assistant_response.tool_calls or [])}, " - f"tool_responses={len(tool_response_messages)}" - ) - if ( - messages_to_save_early - or has_appended_streaming_message - ): - await upsert_chat_session(session) - has_saved_assistant_message = True - - has_yielded_end = True - yield chunk - elif isinstance(chunk, StreamError): - has_yielded_error = True - yield chunk - elif isinstance(chunk, StreamUsage): - session.usage.append( - Usage( - prompt_tokens=chunk.promptTokens, - completion_tokens=chunk.completionTokens, - total_tokens=chunk.totalTokens, - ) - ) - else: - logger.error( - f"Unknown chunk type: {type(chunk)}", exc_info=True - ) - if assistant_response.content: - langfuse.update_current_trace(output=assistant_response.content) - langfuse.update_current_span(output=assistant_response.content) - elif tool_response_messages: - langfuse.update_current_trace(output=str(tool_response_messages)) - langfuse.update_current_span(output=str(tool_response_messages)) - - except CancelledError: - if not has_saved_assistant_message: - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - if assistant_response.content: - assistant_response.content = ( - f"{assistant_response.content}\n\n[interrupted]" - ) - else: - assistant_response.content = "[interrupted]" - if not has_appended_streaming_message: - session.messages.append(assistant_response) - if tool_response_messages: - session.messages.extend(tool_response_messages) try: - await upsert_chat_session(session) + await cache_chat_session(session) except Exception as e: logger.warning( - f"Failed to save interrupted session {session.session_id}: {e}" + f"Failed to cache partial session {session.session_id}: {e}" ) - raise - except Exception as e: - logger.error(f"Error during stream: {e!s}", exc_info=True) - - # Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.) - is_retryable = isinstance( - e, (orjson.JSONDecodeError, KeyError, TypeError) - ) - - if is_retryable and retry_count < config.max_retries: - logger.info( - f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" - ) - should_retry = True - else: - # Non-retryable error or max retries exceeded - # Save any partial progress before reporting error - messages_to_save: list[ChatMessage] = [] - - # Add assistant message if it has content or tool calls - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - if not has_appended_streaming_message and ( - assistant_response.content or assistant_response.tool_calls - ): - messages_to_save.append(assistant_response) - - # Add tool response messages after assistant message - messages_to_save.extend(tool_response_messages) - - if not has_saved_assistant_message: - if messages_to_save: - session.messages.extend(messages_to_save) - if messages_to_save or has_appended_streaming_message: - await upsert_chat_session(session) - - if not has_yielded_error: - error_message = str(e) - if not is_retryable: - error_message = f"Non-retryable error: {error_message}" - elif retry_count >= config.max_retries: - error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}" - - error_response = StreamError(errorText=error_message) - yield error_response - if not has_yielded_end: - yield StreamFinish() - return - - # Handle retry outside of exception handler to avoid nesting - if should_retry and retry_count < config.max_retries: - logger.info( - f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}" - ) - async for chunk in stream_chat_completion( - session_id=session.session_id, - user_id=user_id, - retry_count=retry_count + 1, - session=session, - context=context, - ): + last_cache_time = current_time + last_cache_content_len = content_len + yield chunk + elif isinstance(chunk, StreamTextEnd): + # Emit text-end after text completes + if has_received_text and not text_streaming_ended: + text_streaming_ended = True yield chunk - return # Exit after retry to avoid double-saving in finally block + elif isinstance(chunk, StreamToolInputStart): + # Emit text-end before first tool call, but only if we've received text + if has_received_text and not text_streaming_ended: + yield StreamTextEnd(id=text_block_id) + text_streaming_ended = True + yield chunk + elif isinstance(chunk, StreamToolInputAvailable): + # Accumulate tool calls in OpenAI format + accumulated_tool_calls.append( + { + "id": chunk.toolCallId, + "type": "function", + "function": { + "name": chunk.toolName, + "arguments": orjson.dumps(chunk.input).decode("utf-8"), + }, + } + ) + yield chunk + elif isinstance(chunk, StreamToolOutputAvailable): + result_content = ( + chunk.output + if isinstance(chunk.output, str) + else orjson.dumps(chunk.output).decode("utf-8") + ) + # Skip saving long-running operation responses - messages already saved in _yield_tool_call + # Use JSON parsing instead of substring matching to avoid false positives + is_long_running_response = False + try: + parsed = orjson.loads(result_content) + if isinstance(parsed, dict) and parsed.get("type") in ( + "operation_started", + "operation_in_progress", + ): + is_long_running_response = True + except (orjson.JSONDecodeError, TypeError): + pass # Not JSON or not a dict - treat as regular response + if is_long_running_response: + # Remove from accumulated_tool_calls since assistant message was already saved + accumulated_tool_calls[:] = [ + tc + for tc in accumulated_tool_calls + if tc["id"] != chunk.toolCallId + ] + has_long_running_tool_call = True + else: + tool_response_messages.append( + ChatMessage( + role="tool", + content=result_content, + tool_call_id=chunk.toolCallId, + ) + ) + has_done_tool_call = True + # Track if any tool execution failed + if not chunk.success: + logger.warning( + f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed" + ) + yield chunk + elif isinstance(chunk, StreamFinish): + if not has_done_tool_call: + # Emit text-end before finish if we received text but haven't closed it + if has_received_text and not text_streaming_ended: + yield StreamTextEnd(id=text_block_id) + text_streaming_ended = True + + # Save assistant message before yielding finish to ensure it's persisted + # even if client disconnects immediately after receiving StreamFinish + if not has_saved_assistant_message: + messages_to_save_early: list[ChatMessage] = [] + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if not has_appended_streaming_message and ( + assistant_response.content or assistant_response.tool_calls + ): + messages_to_save_early.append(assistant_response) + messages_to_save_early.extend(tool_response_messages) + + if messages_to_save_early: + session.messages.extend(messages_to_save_early) + logger.info( + f"Saving assistant message before StreamFinish: " + f"content_len={len(assistant_response.content or '')}, " + f"tool_calls={len(assistant_response.tool_calls or [])}, " + f"tool_responses={len(tool_response_messages)}" + ) + if messages_to_save_early or has_appended_streaming_message: + await upsert_chat_session(session) + has_saved_assistant_message = True + + has_yielded_end = True + yield chunk + elif isinstance(chunk, StreamError): + has_yielded_error = True + yield chunk + elif isinstance(chunk, StreamUsage): + session.usage.append( + Usage( + prompt_tokens=chunk.promptTokens, + completion_tokens=chunk.completionTokens, + total_tokens=chunk.totalTokens, + ) + ) + else: + logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True) + + except CancelledError: + if not has_saved_assistant_message: + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if assistant_response.content: + assistant_response.content = ( + f"{assistant_response.content}\n\n[interrupted]" + ) + else: + assistant_response.content = "[interrupted]" + if not has_appended_streaming_message: + session.messages.append(assistant_response) + if tool_response_messages: + session.messages.extend(tool_response_messages) + try: + await upsert_chat_session(session) + except Exception as e: + logger.warning( + f"Failed to save interrupted session {session.session_id}: {e}" + ) + raise + except Exception as e: + logger.error(f"Error during stream: {e!s}", exc_info=True) + + # Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.) + is_retryable = isinstance(e, (orjson.JSONDecodeError, KeyError, TypeError)) + + if is_retryable and retry_count < config.max_retries: + logger.info( + f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" + ) + should_retry = True + else: + # Non-retryable error or max retries exceeded + # Save any partial progress before reporting error + messages_to_save: list[ChatMessage] = [] + + # Add assistant message if it has content or tool calls + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if not has_appended_streaming_message and ( + assistant_response.content or assistant_response.tool_calls + ): + messages_to_save.append(assistant_response) + + # Add tool response messages after assistant message + messages_to_save.extend(tool_response_messages) - # Normal completion path - save session and handle tool call continuation - # Only save if we haven't already saved when StreamFinish was received if not has_saved_assistant_message: - logger.info( - f"Normal completion path: session={session.session_id}, " - f"current message_count={len(session.messages)}" - ) - - # Build the messages list in the correct order - messages_to_save: list[ChatMessage] = [] - - # Add assistant message with tool_calls if any - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - logger.info( - f"Added {len(accumulated_tool_calls)} tool calls to assistant message" - ) - if not has_appended_streaming_message and ( - assistant_response.content or assistant_response.tool_calls - ): - messages_to_save.append(assistant_response) - logger.info( - f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" - ) - - # Add tool response messages after assistant message - messages_to_save.extend(tool_response_messages) - logger.info( - f"Saving {len(tool_response_messages)} tool response messages, " - f"total_to_save={len(messages_to_save)}" - ) - if messages_to_save: session.messages.extend(messages_to_save) - logger.info( - f"Extended session messages, new message_count={len(session.messages)}" - ) if messages_to_save or has_appended_streaming_message: await upsert_chat_session(session) - else: - logger.info( - "Assistant message already saved when StreamFinish was received, " - "skipping duplicate save" - ) - # If we did a tool call, stream the chat completion again to get the next response - if has_done_tool_call: - logger.info( - "Tool call executed, streaming chat completion again to get assistant response" - ) - async for chunk in stream_chat_completion( - session_id=session.session_id, - user_id=user_id, - session=session, # Pass session object to avoid Redis refetch - context=context, - tool_call_response=str(tool_response_messages), - ): - yield chunk + if not has_yielded_error: + error_message = str(e) + if not is_retryable: + error_message = f"Non-retryable error: {error_message}" + elif retry_count >= config.max_retries: + error_message = ( + f"Max retries ({config.max_retries}) exceeded: {error_message}" + ) + + error_response = StreamError(errorText=error_message) + yield error_response + if not has_yielded_end: + yield StreamFinish() + return + + # Handle retry outside of exception handler to avoid nesting + if should_retry and retry_count < config.max_retries: + logger.info( + f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}" + ) + async for chunk in stream_chat_completion( + session_id=session.session_id, + user_id=user_id, + retry_count=retry_count + 1, + session=session, + context=context, + ): + yield chunk + return # Exit after retry to avoid double-saving in finally block + + # Normal completion path - save session and handle tool call continuation + # Only save if we haven't already saved when StreamFinish was received + if not has_saved_assistant_message: + logger.info( + f"Normal completion path: session={session.session_id}, " + f"current message_count={len(session.messages)}" + ) + + # Build the messages list in the correct order + messages_to_save: list[ChatMessage] = [] + + # Add assistant message with tool_calls if any + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + logger.info( + f"Added {len(accumulated_tool_calls)} tool calls to assistant message" + ) + if not has_appended_streaming_message and ( + assistant_response.content or assistant_response.tool_calls + ): + messages_to_save.append(assistant_response) + logger.info( + f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" + ) + + # Add tool response messages after assistant message + messages_to_save.extend(tool_response_messages) + logger.info( + f"Saving {len(tool_response_messages)} tool response messages, " + f"total_to_save={len(messages_to_save)}" + ) + + if messages_to_save: + session.messages.extend(messages_to_save) + logger.info( + f"Extended session messages, new message_count={len(session.messages)}" + ) + # Save if there are regular (non-long-running) tool responses or streaming message. + # Long-running tools save their own state, but we still need to save regular tools + # that may be in the same response. + has_regular_tool_responses = len(tool_response_messages) > 0 + if has_regular_tool_responses or ( + not has_long_running_tool_call + and (messages_to_save or has_appended_streaming_message) + ): + await upsert_chat_session(session) + else: + logger.info( + "Assistant message already saved when StreamFinish was received, " + "skipping duplicate save" + ) + + # If we did a tool call, stream the chat completion again to get the next response + # Skip only if ALL tools were long-running (they handle their own completion) + has_regular_tools = len(tool_response_messages) > 0 + if has_done_tool_call and (has_regular_tools or not has_long_running_tool_call): + logger.info( + "Tool call executed, streaming chat completion again to get assistant response" + ) + async for chunk in stream_chat_completion( + session_id=session.session_id, + user_id=user_id, + session=session, # Pass session object to avoid Redis refetch + context=context, + tool_call_response=str(tool_response_messages), + ): + yield chunk # Retry configuration for OpenAI API calls @@ -650,6 +794,209 @@ def _is_region_blocked_error(error: Exception) -> bool: return "not available in your region" in str(error).lower() +async def _summarize_messages( + messages: list, + model: str, + api_key: str | None = None, + base_url: str | None = None, + timeout: float = 30.0, +) -> str: + """Summarize a list of messages into concise context. + + Uses the same model as the chat for higher quality summaries. + + Args: + messages: List of message dicts to summarize + model: Model to use for summarization (same as chat model) + api_key: API key for OpenAI client + base_url: Base URL for OpenAI client + timeout: Request timeout in seconds (default: 30.0) + + Returns: + Summarized text + """ + # Format messages for summarization + conversation = [] + for msg in messages: + role = msg.get("role", "") + content = msg.get("content", "") + # Include user, assistant, and tool messages (tool outputs are important context) + if content and role in ("user", "assistant", "tool"): + conversation.append(f"{role.upper()}: {content}") + + conversation_text = "\n\n".join(conversation) + + # Handle empty conversation + if not conversation_text: + return "No conversation history available." + + # Truncate conversation to fit within summarization model's context + # gpt-4o-mini has 128k context, but we limit to ~25k tokens (~100k chars) for safety + MAX_CHARS = 100_000 + if len(conversation_text) > MAX_CHARS: + conversation_text = conversation_text[:MAX_CHARS] + "\n\n[truncated]" + + # Call LLM to summarize + import openai + + summarization_client = openai.AsyncOpenAI( + api_key=api_key, base_url=base_url, timeout=timeout + ) + + response = await summarization_client.chat.completions.create( + model=model, + messages=[ + { + "role": "system", + "content": ( + "Create a detailed summary of the conversation so far. " + "This summary will be used as context when continuing the conversation.\n\n" + "Before writing the summary, analyze each message chronologically to identify:\n" + "- User requests and their explicit goals\n" + "- Your approach and key decisions made\n" + "- Technical specifics (file names, tool outputs, function signatures)\n" + "- Errors encountered and resolutions applied\n\n" + "You MUST include ALL of the following sections:\n\n" + "## 1. Primary Request and Intent\n" + "The user's explicit goals and what they are trying to accomplish.\n\n" + "## 2. Key Technical Concepts\n" + "Technologies, frameworks, tools, and patterns being used or discussed.\n\n" + "## 3. Files and Resources Involved\n" + "Specific files examined or modified, with relevant snippets and identifiers.\n\n" + "## 4. Errors and Fixes\n" + "Problems encountered, error messages, and their resolutions. " + "Include any user feedback on fixes.\n\n" + "## 5. Problem Solving\n" + "Issues that have been resolved and how they were addressed.\n\n" + "## 6. All User Messages\n" + "A complete list of all user inputs (excluding tool outputs) to preserve their exact requests.\n\n" + "## 7. Pending Tasks\n" + "Work items the user explicitly requested that have not yet been completed.\n\n" + "## 8. Current Work\n" + "Precise description of what was being worked on most recently, including relevant context.\n\n" + "## 9. Next Steps\n" + "What should happen next, aligned with the user's most recent requests. " + "Include verbatim quotes of recent instructions if relevant." + ), + }, + {"role": "user", "content": f"Summarize:\n\n{conversation_text}"}, + ], + max_tokens=1500, + temperature=0.3, + ) + + summary = response.choices[0].message.content + return summary or "No summary available." + + +def _ensure_tool_pairs_intact( + recent_messages: list[dict], + all_messages: list[dict], + start_index: int, +) -> list[dict]: + """ + Ensure tool_call/tool_response pairs stay together after slicing. + + When slicing messages for context compaction, a naive slice can separate + an assistant message containing tool_calls from its corresponding tool + response messages. This causes API validation errors (e.g., Anthropic's + "unexpected tool_use_id found in tool_result blocks"). + + This function checks for orphan tool responses in the slice and extends + backwards to include their corresponding assistant messages. + + Args: + recent_messages: The sliced messages to validate + all_messages: The complete message list (for looking up missing assistants) + start_index: The index in all_messages where recent_messages begins + + Returns: + A potentially extended list of messages with tool pairs intact + """ + if not recent_messages: + return recent_messages + + # Collect all tool_call_ids from assistant messages in the slice + available_tool_call_ids: set[str] = set() + for msg in recent_messages: + if msg.get("role") == "assistant" and msg.get("tool_calls"): + for tc in msg["tool_calls"]: + tc_id = tc.get("id") + if tc_id: + available_tool_call_ids.add(tc_id) + + # Find orphan tool responses (tool messages whose tool_call_id is missing) + orphan_tool_call_ids: set[str] = set() + for msg in recent_messages: + if msg.get("role") == "tool": + tc_id = msg.get("tool_call_id") + if tc_id and tc_id not in available_tool_call_ids: + orphan_tool_call_ids.add(tc_id) + + if not orphan_tool_call_ids: + # No orphans, slice is valid + return recent_messages + + # Find the assistant messages that contain the orphan tool_call_ids + # Search backwards from start_index in all_messages + messages_to_prepend: list[dict] = [] + for i in range(start_index - 1, -1, -1): + msg = all_messages[i] + if msg.get("role") == "assistant" and msg.get("tool_calls"): + msg_tool_ids = {tc.get("id") for tc in msg["tool_calls"] if tc.get("id")} + if msg_tool_ids & orphan_tool_call_ids: + # This assistant message has tool_calls we need + # Also collect its contiguous tool responses that follow it + assistant_and_responses: list[dict] = [msg] + + # Scan forward from this assistant to collect tool responses + for j in range(i + 1, start_index): + following_msg = all_messages[j] + if following_msg.get("role") == "tool": + tool_id = following_msg.get("tool_call_id") + if tool_id and tool_id in msg_tool_ids: + assistant_and_responses.append(following_msg) + else: + # Stop at first non-tool message + break + + # Prepend the assistant and its tool responses (maintain order) + messages_to_prepend = assistant_and_responses + messages_to_prepend + # Mark these as found + orphan_tool_call_ids -= msg_tool_ids + # Also add this assistant's tool_call_ids to available set + available_tool_call_ids |= msg_tool_ids + + if not orphan_tool_call_ids: + # Found all missing assistants + break + + if orphan_tool_call_ids: + # Some tool_call_ids couldn't be resolved - remove those tool responses + # This shouldn't happen in normal operation but handles edge cases + logger.warning( + f"Could not find assistant messages for tool_call_ids: {orphan_tool_call_ids}. " + "Removing orphan tool responses." + ) + recent_messages = [ + msg + for msg in recent_messages + if not ( + msg.get("role") == "tool" + and msg.get("tool_call_id") in orphan_tool_call_ids + ) + ] + + if messages_to_prepend: + logger.info( + f"Extended recent messages by {len(messages_to_prepend)} to preserve " + f"tool_call/tool_response pairs" + ) + return messages_to_prepend + recent_messages + + return recent_messages + + async def _stream_chat_chunks( session: ChatSession, tools: list[ChatCompletionToolParam], @@ -686,6 +1033,316 @@ async def _stream_chat_chunks( ) messages = [system_message] + messages + # Apply context window management + token_count = 0 # Initialize for exception handler + try: + from backend.util.prompt import estimate_token_count + + # Convert to dict for token counting + # OpenAI message types are TypedDicts, so they're already dict-like + messages_dict = [] + for msg in messages: + # TypedDict objects are already dicts, just filter None values + if isinstance(msg, dict): + msg_dict = {k: v for k, v in msg.items() if v is not None} + else: + # Fallback for unexpected types + msg_dict = dict(msg) + messages_dict.append(msg_dict) + + # Estimate tokens using appropriate tokenizer + # Normalize model name for token counting (tiktoken only supports OpenAI models) + token_count_model = model + if "/" in model: + # Strip provider prefix (e.g., "anthropic/claude-opus-4.5" -> "claude-opus-4.5") + token_count_model = model.split("/")[-1] + + # For Claude and other non-OpenAI models, approximate with gpt-4o tokenizer + # Most modern LLMs have similar tokenization (~1 token per 4 chars) + if "claude" in token_count_model.lower() or not any( + known in token_count_model.lower() + for known in ["gpt", "o1", "chatgpt", "text-"] + ): + token_count_model = "gpt-4o" + + # Attempt token counting with error handling + try: + token_count = estimate_token_count(messages_dict, model=token_count_model) + except Exception as token_error: + # If token counting fails, use gpt-4o as fallback approximation + logger.warning( + f"Token counting failed for model {token_count_model}: {token_error}. " + "Using gpt-4o approximation." + ) + token_count = estimate_token_count(messages_dict, model="gpt-4o") + + # If over threshold, summarize old messages + if token_count > 120_000: + KEEP_RECENT = 15 + + # Check if we have a system prompt at the start + has_system_prompt = ( + len(messages) > 0 and messages[0].get("role") == "system" + ) + + # Always attempt mitigation when over limit, even with few messages + if messages: + # Split messages based on whether system prompt exists + # Calculate start index for the slice + slice_start = max(0, len(messages_dict) - KEEP_RECENT) + recent_messages = messages_dict[-KEEP_RECENT:] + + # Ensure tool_call/tool_response pairs stay together + # This prevents API errors from orphan tool responses + recent_messages = _ensure_tool_pairs_intact( + recent_messages, messages_dict, slice_start + ) + + if has_system_prompt: + # Keep system prompt separate, summarize everything between system and recent + system_msg = messages[0] + old_messages_dict = messages_dict[1:-KEEP_RECENT] + else: + # No system prompt, summarize everything except recent + system_msg = None + old_messages_dict = messages_dict[:-KEEP_RECENT] + + # Summarize any non-empty old messages (no minimum threshold) + # If we're over the token limit, we need to compress whatever we can + if old_messages_dict: + # Summarize old messages using the same model as chat + summary_text = await _summarize_messages( + old_messages_dict, + model=model, + api_key=config.api_key, + base_url=config.base_url, + ) + + # Build new message list + # Use assistant role (not system) to prevent privilege escalation + # of user-influenced content to instruction-level authority + from openai.types.chat import ChatCompletionAssistantMessageParam + + summary_msg = ChatCompletionAssistantMessageParam( + role="assistant", + content=( + "[Previous conversation summary — for context only]: " + f"{summary_text}" + ), + ) + + # Rebuild messages based on whether we have a system prompt + if has_system_prompt: + # system_prompt + summary + recent_messages + messages = [system_msg, summary_msg] + recent_messages + else: + # summary + recent_messages (no original system prompt) + messages = [summary_msg] + recent_messages + + logger.info( + f"Context summarized: {token_count} tokens, " + f"summarized {len(old_messages_dict)} old messages, " + f"kept last {KEEP_RECENT} messages" + ) + + # Fallback: If still over limit after summarization, progressively drop recent messages + # This handles edge cases where recent messages are extremely large + new_messages_dict = [] + for msg in messages: + if isinstance(msg, dict): + msg_dict = {k: v for k, v in msg.items() if v is not None} + else: + msg_dict = dict(msg) + new_messages_dict.append(msg_dict) + + new_token_count = estimate_token_count( + new_messages_dict, model=token_count_model + ) + + if new_token_count > 120_000: + # Still over limit - progressively reduce KEEP_RECENT + logger.warning( + f"Still over limit after summarization: {new_token_count} tokens. " + "Reducing number of recent messages kept." + ) + + for keep_count in [12, 10, 8, 5, 3, 2, 1, 0]: + if keep_count == 0: + # Try with just system prompt + summary (no recent messages) + if has_system_prompt: + messages = [system_msg, summary_msg] + else: + messages = [summary_msg] + logger.info( + "Trying with 0 recent messages (system + summary only)" + ) + else: + # Slice from ORIGINAL recent_messages to avoid duplicating summary + reduced_recent = ( + recent_messages[-keep_count:] + if len(recent_messages) >= keep_count + else recent_messages + ) + # Ensure tool pairs stay intact in the reduced slice + reduced_slice_start = max( + 0, len(recent_messages) - keep_count + ) + reduced_recent = _ensure_tool_pairs_intact( + reduced_recent, recent_messages, reduced_slice_start + ) + if has_system_prompt: + messages = [ + system_msg, + summary_msg, + ] + reduced_recent + else: + messages = [summary_msg] + reduced_recent + + new_messages_dict = [] + for msg in messages: + if isinstance(msg, dict): + msg_dict = { + k: v for k, v in msg.items() if v is not None + } + else: + msg_dict = dict(msg) + new_messages_dict.append(msg_dict) + + new_token_count = estimate_token_count( + new_messages_dict, model=token_count_model + ) + + if new_token_count <= 120_000: + logger.info( + f"Reduced to {keep_count} recent messages, " + f"now {new_token_count} tokens" + ) + break + else: + logger.error( + f"Unable to reduce token count below threshold even with 0 messages. " + f"Final count: {new_token_count} tokens" + ) + # ABSOLUTE LAST RESORT: Drop system prompt + # This should only happen if summary itself is massive + if has_system_prompt and len(messages) > 1: + messages = messages[1:] # Drop system prompt + logger.critical( + "CRITICAL: Dropped system prompt as absolute last resort. " + "Behavioral consistency may be affected." + ) + # Yield error to user + yield StreamError( + errorText=( + "Warning: System prompt dropped due to size constraints. " + "Assistant behavior may be affected." + ) + ) + else: + # No old messages to summarize - all messages are "recent" + # Apply progressive truncation to reduce token count + logger.warning( + f"Token count {token_count} exceeds threshold but no old messages to summarize. " + f"Applying progressive truncation to recent messages." + ) + + # Create a base list excluding system prompt to avoid duplication + # This is the pool of messages we'll slice from in the loop + # Use messages_dict for type consistency with _ensure_tool_pairs_intact + base_msgs = ( + messages_dict[1:] if has_system_prompt else messages_dict + ) + + # Try progressively smaller keep counts + new_token_count = token_count # Initialize with current count + for keep_count in [12, 10, 8, 5, 3, 2, 1, 0]: + if keep_count == 0: + # Try with just system prompt (no recent messages) + if has_system_prompt: + messages = [system_msg] + logger.info( + "Trying with 0 recent messages (system prompt only)" + ) + else: + # No system prompt and no recent messages = empty messages list + # This is invalid, skip this iteration + continue + else: + if len(base_msgs) < keep_count: + continue # Skip if we don't have enough messages + + # Slice from base_msgs to get recent messages (without system prompt) + recent_messages = base_msgs[-keep_count:] + + # Ensure tool pairs stay intact in the reduced slice + reduced_slice_start = max(0, len(base_msgs) - keep_count) + recent_messages = _ensure_tool_pairs_intact( + recent_messages, base_msgs, reduced_slice_start + ) + + if has_system_prompt: + messages = [system_msg] + recent_messages + else: + messages = recent_messages + + new_messages_dict = [] + for msg in messages: + if msg is None: + continue # Skip None messages (type safety) + if isinstance(msg, dict): + msg_dict = { + k: v for k, v in msg.items() if v is not None + } + else: + msg_dict = dict(msg) + new_messages_dict.append(msg_dict) + + new_token_count = estimate_token_count( + new_messages_dict, model=token_count_model + ) + + if new_token_count <= 120_000: + logger.info( + f"Reduced to {keep_count} recent messages, " + f"now {new_token_count} tokens" + ) + break + else: + # Even with 0 messages still over limit + logger.error( + f"Unable to reduce token count below threshold even with 0 messages. " + f"Final count: {new_token_count} tokens. Messages may be extremely large." + ) + # ABSOLUTE LAST RESORT: Drop system prompt + if has_system_prompt and len(messages) > 1: + messages = messages[1:] # Drop system prompt + logger.critical( + "CRITICAL: Dropped system prompt as absolute last resort. " + "Behavioral consistency may be affected." + ) + # Yield error to user + yield StreamError( + errorText=( + "Warning: System prompt dropped due to size constraints. " + "Assistant behavior may be affected." + ) + ) + + except Exception as e: + logger.error(f"Context summarization failed: {e}", exc_info=True) + # If we were over the token limit, yield error to user + # Don't silently continue with oversized messages that will fail + if token_count > 120_000: + yield StreamError( + errorText=( + f"Unable to manage context window (token limit exceeded: {token_count} tokens). " + "Context summarization failed. Please start a new conversation." + ) + ) + yield StreamFinish() + return + # Otherwise, continue with original messages (under limit) + # Loop to handle tool calls and continue conversation while True: retry_count = 0 @@ -698,14 +1355,36 @@ async def _stream_chat_chunks( f"{f' (retry {retry_count}/{MAX_RETRIES})' if retry_count > 0 else ''}" ) + # Build extra_body for OpenRouter tracing and PostHog analytics + extra_body: dict[str, Any] = { + "posthogProperties": { + "environment": settings.config.app_env.value, + }, + } + if session.user_id: + extra_body["user"] = session.user_id[:128] # OpenRouter limit + extra_body["posthogDistinctId"] = session.user_id + if session.session_id: + extra_body["session_id"] = session.session_id[ + :128 + ] # OpenRouter limit + # Create the stream with proper types + from typing import cast + + from openai.types.chat import ( + ChatCompletionMessageParam, + ChatCompletionStreamOptionsParam, + ) + stream = await client.chat.completions.create( model=model, - messages=messages, + messages=cast(list[ChatCompletionMessageParam], messages), tools=tools, tool_choice="auto", stream=True, - stream_options={"include_usage": True}, + stream_options=ChatCompletionStreamOptionsParam(include_usage=True), + extra_body=extra_body, ) # Variables to accumulate tool calls @@ -877,14 +1556,19 @@ async def _yield_tool_call( """ Yield a tool call and its execution result. + For tools marked with `is_long_running=True` (like agent generation), spawns a + background task so the operation survives SSE disconnections. For other tools, + yields heartbeat events every 15 seconds to keep the SSE connection alive. + Raises: orjson.JSONDecodeError: If tool call arguments cannot be parsed as JSON KeyError: If expected tool call fields are missing TypeError: If tool call structure is invalid """ + import uuid as uuid_module + tool_name = tool_calls[yield_idx]["function"]["name"] tool_call_id = tool_calls[yield_idx]["id"] - logger.info(f"Yielding tool call: {tool_calls[yield_idx]}") # Parse tool call arguments - handle empty arguments gracefully raw_arguments = tool_calls[yield_idx]["function"]["arguments"] @@ -899,12 +1583,384 @@ async def _yield_tool_call( input=arguments, ) - tool_execution_response: StreamToolOutputAvailable = await execute_tool( - tool_name=tool_name, - parameters=arguments, - tool_call_id=tool_call_id, - user_id=session.user_id, - session=session, + # Check if this tool is long-running (survives SSE disconnection) + tool = get_tool(tool_name) + if tool and tool.is_long_running: + # Atomic check-and-set: returns False if operation already running (lost race) + if not await _mark_operation_started(tool_call_id): + logger.info( + f"Tool call {tool_call_id} already in progress, returning status" + ) + # Build dynamic message based on tool name + if tool_name == "create_agent": + in_progress_msg = "Agent creation already in progress. Please wait..." + elif tool_name == "edit_agent": + in_progress_msg = "Agent edit already in progress. Please wait..." + else: + in_progress_msg = f"{tool_name} already in progress. Please wait..." + + yield StreamToolOutputAvailable( + toolCallId=tool_call_id, + toolName=tool_name, + output=OperationInProgressResponse( + message=in_progress_msg, + tool_call_id=tool_call_id, + ).model_dump_json(), + success=True, + ) + return + + # Generate operation ID + operation_id = str(uuid_module.uuid4()) + + # Build a user-friendly message based on tool and arguments + if tool_name == "create_agent": + agent_desc = arguments.get("description", "") + # Truncate long descriptions for the message + desc_preview = ( + (agent_desc[:100] + "...") if len(agent_desc) > 100 else agent_desc + ) + pending_msg = ( + f"Creating your agent: {desc_preview}" + if desc_preview + else "Creating agent... This may take a few minutes." + ) + started_msg = ( + "Agent creation started. You can close this tab - " + "check your library in a few minutes." + ) + elif tool_name == "edit_agent": + changes = arguments.get("changes", "") + changes_preview = (changes[:100] + "...") if len(changes) > 100 else changes + pending_msg = ( + f"Editing agent: {changes_preview}" + if changes_preview + else "Editing agent... This may take a few minutes." + ) + started_msg = ( + "Agent edit started. You can close this tab - " + "check your library in a few minutes." + ) + else: + pending_msg = f"Running {tool_name}... This may take a few minutes." + started_msg = ( + f"{tool_name} started. You can close this tab - " + "check back in a few minutes." + ) + + # Track appended messages for rollback on failure + assistant_message: ChatMessage | None = None + pending_message: ChatMessage | None = None + + # Wrap session save and task creation in try-except to release lock on failure + try: + # Save assistant message with tool_call FIRST (required by LLM) + assistant_message = ChatMessage( + role="assistant", + content="", + tool_calls=[tool_calls[yield_idx]], + ) + session.messages.append(assistant_message) + + # Then save pending tool result + pending_message = ChatMessage( + role="tool", + content=OperationPendingResponse( + message=pending_msg, + operation_id=operation_id, + tool_name=tool_name, + ).model_dump_json(), + tool_call_id=tool_call_id, + ) + session.messages.append(pending_message) + await upsert_chat_session(session) + logger.info( + f"Saved pending operation {operation_id} for tool {tool_name} " + f"in session {session.session_id}" + ) + + # Store task reference in module-level set to prevent GC before completion + task = asyncio.create_task( + _execute_long_running_tool( + tool_name=tool_name, + parameters=arguments, + tool_call_id=tool_call_id, + operation_id=operation_id, + session_id=session.session_id, + user_id=session.user_id, + ) + ) + _background_tasks.add(task) + task.add_done_callback(_background_tasks.discard) + except Exception as e: + # Roll back appended messages to prevent data corruption on subsequent saves + if ( + pending_message + and session.messages + and session.messages[-1] == pending_message + ): + session.messages.pop() + if ( + assistant_message + and session.messages + and session.messages[-1] == assistant_message + ): + session.messages.pop() + + # Release the Redis lock since the background task won't be spawned + await _mark_operation_completed(tool_call_id) + logger.error( + f"Failed to setup long-running tool {tool_name}: {e}", exc_info=True + ) + raise + + # Return immediately - don't wait for completion + yield StreamToolOutputAvailable( + toolCallId=tool_call_id, + toolName=tool_name, + output=OperationStartedResponse( + message=started_msg, + operation_id=operation_id, + tool_name=tool_name, + ).model_dump_json(), + success=True, + ) + return + + # Normal flow: Run tool execution in background task with heartbeats + tool_task = asyncio.create_task( + execute_tool( + tool_name=tool_name, + parameters=arguments, + tool_call_id=tool_call_id, + user_id=session.user_id, + session=session, + ) ) + # Yield heartbeats every 15 seconds while waiting for tool to complete + heartbeat_interval = 15.0 # seconds + while not tool_task.done(): + try: + # Wait for either the task to complete or the heartbeat interval + await asyncio.wait_for( + asyncio.shield(tool_task), timeout=heartbeat_interval + ) + except asyncio.TimeoutError: + # Task still running, send heartbeat to keep connection alive + logger.debug(f"Sending heartbeat for tool {tool_name} ({tool_call_id})") + yield StreamHeartbeat(toolCallId=tool_call_id) + except CancelledError: + # Task was cancelled, clean up and propagate + tool_task.cancel() + logger.warning(f"Tool execution cancelled: {tool_name} ({tool_call_id})") + raise + + # Get the result - handle any exceptions that occurred during execution + try: + tool_execution_response: StreamToolOutputAvailable = await tool_task + except Exception as e: + # Task raised an exception - ensure we send an error response to the frontend + logger.error( + f"Tool execution failed: {tool_name} ({tool_call_id}): {e}", exc_info=True + ) + error_response = ErrorResponse( + message=f"Tool execution failed: {e!s}", + error=type(e).__name__, + session_id=session.session_id, + ) + tool_execution_response = StreamToolOutputAvailable( + toolCallId=tool_call_id, + toolName=tool_name, + output=error_response.model_dump_json(), + success=False, + ) + yield tool_execution_response + + +async def _execute_long_running_tool( + tool_name: str, + parameters: dict[str, Any], + tool_call_id: str, + operation_id: str, + session_id: str, + user_id: str | None, +) -> None: + """Execute a long-running tool in background and update chat history with result. + + This function runs independently of the SSE connection, so the operation + survives if the user closes their browser tab. + """ + try: + # Load fresh session (not stale reference) + session = await get_chat_session(session_id, user_id) + if not session: + logger.error(f"Session {session_id} not found for background tool") + return + + # Execute the actual tool + result = await execute_tool( + tool_name=tool_name, + parameters=parameters, + tool_call_id=tool_call_id, + user_id=user_id, + session=session, + ) + + # Update the pending message with result + await _update_pending_operation( + session_id=session_id, + tool_call_id=tool_call_id, + result=( + result.output + if isinstance(result.output, str) + else orjson.dumps(result.output).decode("utf-8") + ), + ) + + logger.info(f"Background tool {tool_name} completed for session {session_id}") + + # Generate LLM continuation so user sees response when they poll/refresh + await _generate_llm_continuation(session_id=session_id, user_id=user_id) + + except Exception as e: + logger.error(f"Background tool {tool_name} failed: {e}", exc_info=True) + error_response = ErrorResponse( + message=f"Tool {tool_name} failed: {str(e)}", + ) + await _update_pending_operation( + session_id=session_id, + tool_call_id=tool_call_id, + result=error_response.model_dump_json(), + ) + finally: + await _mark_operation_completed(tool_call_id) + + +async def _update_pending_operation( + session_id: str, + tool_call_id: str, + result: str, +) -> None: + """Update the pending tool message with final result. + + This is called by background tasks when long-running operations complete. + """ + # Update the message in database + updated = await chat_db.update_tool_message_content( + session_id=session_id, + tool_call_id=tool_call_id, + new_content=result, + ) + + if updated: + # Invalidate Redis cache so next load gets fresh data + # Wrap in try/except to prevent cache failures from triggering error handling + # that would overwrite our successful DB update + try: + await invalidate_session_cache(session_id) + except Exception as e: + # Non-critical: cache will eventually be refreshed on next load + logger.warning(f"Failed to invalidate cache for session {session_id}: {e}") + logger.info( + f"Updated pending operation for tool_call_id {tool_call_id} " + f"in session {session_id}" + ) + else: + logger.warning( + f"Failed to update pending operation for tool_call_id {tool_call_id} " + f"in session {session_id}" + ) + + +async def _generate_llm_continuation( + session_id: str, + user_id: str | None, +) -> None: + """Generate an LLM response after a long-running tool completes. + + This is called by background tasks to continue the conversation + after a tool result is saved. The response is saved to the database + so users see it when they refresh or poll. + """ + try: + # Load fresh session from DB (bypass cache to get the updated tool result) + await invalidate_session_cache(session_id) + session = await get_chat_session(session_id, user_id) + if not session: + logger.error(f"Session {session_id} not found for LLM continuation") + return + + # Build system prompt + system_prompt, _ = await _build_system_prompt(user_id) + + # Build messages in OpenAI format + messages = session.to_openai_messages() + if system_prompt: + from openai.types.chat import ChatCompletionSystemMessageParam + + system_message = ChatCompletionSystemMessageParam( + role="system", + content=system_prompt, + ) + messages = [system_message] + messages + + # Build extra_body for tracing + extra_body: dict[str, Any] = { + "posthogProperties": { + "environment": settings.config.app_env.value, + }, + } + if user_id: + extra_body["user"] = user_id[:128] + extra_body["posthogDistinctId"] = user_id + if session_id: + extra_body["session_id"] = session_id[:128] + + # Make non-streaming LLM call (no tools - just text response) + from typing import cast + + from openai.types.chat import ChatCompletionMessageParam + + # No tools parameter = text-only response (no tool calls) + response = await client.chat.completions.create( + model=config.model, + messages=cast(list[ChatCompletionMessageParam], messages), + extra_body=extra_body, + ) + + if response.choices and response.choices[0].message.content: + assistant_content = response.choices[0].message.content + + # Reload session from DB to avoid race condition with user messages + # that may have been sent while we were generating the LLM response + fresh_session = await get_chat_session(session_id, user_id) + if not fresh_session: + logger.error( + f"Session {session_id} disappeared during LLM continuation" + ) + return + + # Save assistant message to database + assistant_message = ChatMessage( + role="assistant", + content=assistant_content, + ) + fresh_session.messages.append(assistant_message) + + # Save to database (not cache) to persist the response + await upsert_chat_session(fresh_session) + + # Invalidate cache so next poll/refresh gets fresh data + await invalidate_session_cache(session_id) + + logger.info( + f"Generated LLM continuation for session {session_id}, " + f"response length: {len(assistant_content)}" + ) + else: + logger.warning(f"LLM continuation returned empty response for {session_id}") + + except Exception as e: + logger.error(f"Failed to generate LLM continuation: {e}", exc_info=True) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py index 82ce5cfd6f..beeb128ae9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py @@ -1,8 +1,10 @@ +import logging from typing import TYPE_CHECKING, Any from openai.types.chat import ChatCompletionToolParam from backend.api.features.chat.model import ChatSession +from backend.api.features.chat.tracking import track_tool_called from .add_understanding import AddUnderstandingTool from .agent_output import AgentOutputTool @@ -20,6 +22,8 @@ from .search_docs import SearchDocsTool if TYPE_CHECKING: from backend.api.features.chat.response_model import StreamToolOutputAvailable +logger = logging.getLogger(__name__) + # Single source of truth for all tools TOOL_REGISTRY: dict[str, BaseTool] = { "add_understanding": AddUnderstandingTool(), @@ -45,6 +49,11 @@ tools: list[ChatCompletionToolParam] = [ ] +def get_tool(tool_name: str) -> BaseTool | None: + """Get a tool instance by name.""" + return TOOL_REGISTRY.get(tool_name) + + async def execute_tool( tool_name: str, parameters: dict[str, Any], @@ -53,7 +62,20 @@ async def execute_tool( tool_call_id: str, ) -> "StreamToolOutputAvailable": """Execute a tool by name.""" - tool = TOOL_REGISTRY.get(tool_name) + tool = get_tool(tool_name) if not tool: raise ValueError(f"Tool {tool_name} not found") + + # Track tool call in PostHog + logger.info( + f"Tracking tool call: tool={tool_name}, user={user_id}, " + f"session={session.session_id}, call_id={tool_call_id}" + ) + track_tool_called( + user_id=user_id, + session_id=session.session_id, + tool_name=tool_name, + tool_call_id=tool_call_id, + ) + return await tool.execute(user_id, session, tool_call_id, **parameters) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py b/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py index bd93f0e2a6..fe3d5e8984 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py @@ -3,8 +3,6 @@ import logging from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from backend.data.understanding import ( BusinessUnderstandingInput, @@ -61,7 +59,6 @@ and automations for the user's specific needs.""" """Requires authentication to store user-specific data.""" return True - @observe(as_type="tool", name="add_understanding") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py index 00c6d8499b..457e4a4f9b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py @@ -5,7 +5,6 @@ import re from datetime import datetime, timedelta, timezone from typing import Any -from langfuse import observe from pydantic import BaseModel, field_validator from backend.api.features.chat.model import ChatSession @@ -329,7 +328,6 @@ class AgentOutputTool(BaseTool): total_executions=len(available_executions) if available_executions else 1, ) - @observe(as_type="tool", name="view_agent_output") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/base.py b/autogpt_platform/backend/backend/api/features/chat/tools/base.py index 1dc40c18c7..809e06632b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/base.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/base.py @@ -36,6 +36,16 @@ class BaseTool: """Whether this tool requires authentication.""" return False + @property + def is_long_running(self) -> bool: + """Whether this tool is long-running and should execute in background. + + Long-running tools (like agent generation) are executed via background + tasks to survive SSE disconnections. The result is persisted to chat + history and visible when the user refreshes. + """ + return False + def as_openai_tool(self) -> ChatCompletionToolParam: """Convert to OpenAI tool format.""" return ChatCompletionToolParam( diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py index 5a3c44fb94..6b3784e323 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py @@ -3,8 +3,6 @@ import logging from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from .agent_generator import ( @@ -44,6 +42,10 @@ class CreateAgentTool(BaseTool): def requires_auth(self) -> bool: return True + @property + def is_long_running(self) -> bool: + return True + @property def parameters(self) -> dict[str, Any]: return { @@ -75,7 +77,6 @@ class CreateAgentTool(BaseTool): "required": ["description"], } - @observe(as_type="tool", name="create_agent") async def _execute( self, user_id: str | None, @@ -116,8 +117,11 @@ class CreateAgentTool(BaseTool): if decomposition_result is None: return ErrorResponse( - message="Failed to analyze the goal. Please try rephrasing.", - error="Decomposition failed", + message="Failed to analyze the goal. The agent generation service may be unavailable or timed out. Please try again.", + error="decomposition_failed", + details={ + "description": description[:100] + }, # Include context for debugging session_id=session_id, ) @@ -182,8 +186,11 @@ class CreateAgentTool(BaseTool): if agent_json is None: return ErrorResponse( - message="Failed to generate the agent. Please try again.", - error="Generation failed", + message="Failed to generate the agent. The agent generation service may be unavailable or timed out. Please try again.", + error="generation_failed", + details={ + "description": description[:100] + }, # Include context for debugging session_id=session_id, ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py index 777c39a254..7c4da8ad43 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py @@ -3,8 +3,6 @@ import logging from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from .agent_generator import ( @@ -44,6 +42,10 @@ class EditAgentTool(BaseTool): def requires_auth(self) -> bool: return True + @property + def is_long_running(self) -> bool: + return True + @property def parameters(self) -> dict[str, Any]: return { @@ -81,7 +83,6 @@ class EditAgentTool(BaseTool): "required": ["agent_id", "changes"], } - @observe(as_type="tool", name="edit_agent") async def _execute( self, user_id: str | None, @@ -145,8 +146,9 @@ class EditAgentTool(BaseTool): if result is None: return ErrorResponse( - message="Failed to generate changes. Please try rephrasing.", - error="Update generation failed", + message="Failed to generate changes. The agent generation service may be unavailable or timed out. Please try again.", + error="update_generation_failed", + details={"agent_id": agent_id, "changes": changes[:100]}, session_id=session_id, ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py index f231ef4484..477522757d 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py @@ -2,8 +2,6 @@ from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from .agent_search import search_agents @@ -37,7 +35,6 @@ class FindAgentTool(BaseTool): "required": ["query"], } - @observe(as_type="tool", name="find_agent") async def _execute( self, user_id: str | None, session: ChatSession, **kwargs ) -> ToolResponseBase: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py index fc20fdfc4a..7ca85961f9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py @@ -1,7 +1,6 @@ import logging from typing import Any -from langfuse import observe from prisma.enums import ContentType from backend.api.features.chat.model import ChatSession @@ -56,7 +55,6 @@ class FindBlockTool(BaseTool): def requires_auth(self) -> bool: return True - @observe(as_type="tool", name="find_block") async def _execute( self, user_id: str | None, @@ -109,7 +107,8 @@ class FindBlockTool(BaseTool): block_id = result["content_id"] block = get_block(block_id) - if block: + # Skip disabled blocks + if block and not block.disabled: # Get input/output schemas input_schema = {} output_schema = {} diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py index d9b5edfa9b..108fba75ae 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py @@ -2,8 +2,6 @@ from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from .agent_search import search_agents @@ -43,7 +41,6 @@ class FindLibraryAgentTool(BaseTool): def requires_auth(self) -> bool: return True - @observe(as_type="tool", name="find_library_agent") async def _execute( self, user_id: str | None, session: ChatSession, **kwargs ) -> ToolResponseBase: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py b/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py index b2fdcccfcd..7040cd7db5 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py @@ -4,8 +4,6 @@ import logging from pathlib import Path from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from backend.api.features.chat.tools.base import BaseTool from backend.api.features.chat.tools.models import ( @@ -73,7 +71,6 @@ class GetDocPageTool(BaseTool): url_path = path.rsplit(".", 1)[0] if "." in path else path return f"{DOCS_BASE_URL}/{url_path}" - @observe(as_type="tool", name="get_doc_page") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/models.py b/autogpt_platform/backend/backend/api/features/chat/tools/models.py index 1736ddb9a8..8552681d03 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/models.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/models.py @@ -28,6 +28,10 @@ class ResponseType(str, Enum): BLOCK_OUTPUT = "block_output" DOC_SEARCH_RESULTS = "doc_search_results" DOC_PAGE = "doc_page" + # Long-running operation types + OPERATION_STARTED = "operation_started" + OPERATION_PENDING = "operation_pending" + OPERATION_IN_PROGRESS = "operation_in_progress" # Base response model @@ -334,3 +338,39 @@ class BlockOutputResponse(ToolResponseBase): block_name: str outputs: dict[str, list[Any]] success: bool = True + + +# Long-running operation models +class OperationStartedResponse(ToolResponseBase): + """Response when a long-running operation has been started in the background. + + This is returned immediately to the client while the operation continues + to execute. The user can close the tab and check back later. + """ + + type: ResponseType = ResponseType.OPERATION_STARTED + operation_id: str + tool_name: str + + +class OperationPendingResponse(ToolResponseBase): + """Response stored in chat history while a long-running operation is executing. + + This is persisted to the database so users see a pending state when they + refresh before the operation completes. + """ + + type: ResponseType = ResponseType.OPERATION_PENDING + operation_id: str + tool_name: str + + +class OperationInProgressResponse(ToolResponseBase): + """Response when an operation is already in progress. + + Returned for idempotency when the same tool_call_id is requested again + while the background task is still running. + """ + + type: ResponseType = ResponseType.OPERATION_IN_PROGRESS + tool_call_id: str diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py index b212c11e8a..a7fa65348a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py @@ -3,11 +3,14 @@ import logging from typing import Any -from langfuse import observe from pydantic import BaseModel, Field, field_validator from backend.api.features.chat.config import ChatConfig from backend.api.features.chat.model import ChatSession +from backend.api.features.chat.tracking import ( + track_agent_run_success, + track_agent_scheduled, +) from backend.api.features.library import db as library_db from backend.data.graph import GraphModel from backend.data.model import CredentialsMetaInput @@ -155,7 +158,6 @@ class RunAgentTool(BaseTool): """All operations require authentication.""" return True - @observe(as_type="tool", name="run_agent") async def _execute( self, user_id: str | None, @@ -453,6 +455,16 @@ class RunAgentTool(BaseTool): session.successful_agent_runs.get(library_agent.graph_id, 0) + 1 ) + # Track in PostHog + track_agent_run_success( + user_id=user_id, + session_id=session_id, + graph_id=library_agent.graph_id, + graph_name=library_agent.name, + execution_id=execution.id, + library_agent_id=library_agent.id, + ) + library_agent_link = f"/library/agents/{library_agent.id}" return ExecutionStartedResponse( message=( @@ -534,6 +546,18 @@ class RunAgentTool(BaseTool): session.successful_agent_schedules.get(library_agent.graph_id, 0) + 1 ) + # Track in PostHog + track_agent_scheduled( + user_id=user_id, + session_id=session_id, + graph_id=library_agent.graph_id, + graph_name=library_agent.name, + schedule_id=result.id, + schedule_name=schedule_name, + cron=cron, + library_agent_id=library_agent.id, + ) + library_agent_link = f"/library/agents/{library_agent.id}" return ExecutionStartedResponse( message=( diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index c29cc92556..3f57236564 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -4,8 +4,6 @@ import logging from collections import defaultdict from typing import Any -from langfuse import observe - from backend.api.features.chat.model import ChatSession from backend.data.block import get_block from backend.data.execution import ExecutionContext @@ -130,7 +128,6 @@ class RunBlockTool(BaseTool): return matched_credentials, missing_credentials - @observe(as_type="tool", name="run_block") async def _execute( self, user_id: str | None, @@ -179,6 +176,11 @@ class RunBlockTool(BaseTool): message=f"Block '{block_id}' not found", session_id=session_id, ) + if block.disabled: + return ErrorResponse( + message=f"Block '{block_id}' is disabled", + session_id=session_id, + ) logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}") diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py b/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py index 4903230b40..edb0c0de1e 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py @@ -3,7 +3,6 @@ import logging from typing import Any -from langfuse import observe from prisma.enums import ContentType from backend.api.features.chat.model import ChatSession @@ -88,7 +87,6 @@ class SearchDocsTool(BaseTool): url_path = path.rsplit(".", 1)[0] if "." in path else path return f"{DOCS_BASE_URL}/{url_path}" - @observe(as_type="tool", name="search_docs") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tracking.py b/autogpt_platform/backend/backend/api/features/chat/tracking.py new file mode 100644 index 0000000000..b2c0fd032f --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tracking.py @@ -0,0 +1,250 @@ +"""PostHog analytics tracking for the chat system.""" + +import atexit +import logging +from typing import Any + +from posthog import Posthog + +from backend.util.settings import Settings + +logger = logging.getLogger(__name__) +settings = Settings() + +# PostHog client instance (lazily initialized) +_posthog_client: Posthog | None = None + + +def _shutdown_posthog() -> None: + """Flush and shutdown PostHog client on process exit.""" + if _posthog_client is not None: + _posthog_client.flush() + _posthog_client.shutdown() + + +atexit.register(_shutdown_posthog) + + +def _get_posthog_client() -> Posthog | None: + """Get or create the PostHog client instance.""" + global _posthog_client + if _posthog_client is not None: + return _posthog_client + + if not settings.secrets.posthog_api_key: + logger.debug("PostHog API key not configured, analytics disabled") + return None + + _posthog_client = Posthog( + settings.secrets.posthog_api_key, + host=settings.secrets.posthog_host, + ) + logger.info( + f"PostHog client initialized with host: {settings.secrets.posthog_host}" + ) + return _posthog_client + + +def _get_base_properties() -> dict[str, Any]: + """Get base properties included in all events.""" + return { + "environment": settings.config.app_env.value, + "source": "chat_copilot", + } + + +def track_user_message( + user_id: str | None, + session_id: str, + message_length: int, +) -> None: + """Track when a user sends a message in chat. + + Args: + user_id: The user's ID (or None for anonymous) + session_id: The chat session ID + message_length: Length of the user's message + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "message_length": message_length, + } + client.capture( + distinct_id=user_id or f"anonymous_{session_id}", + event="copilot_message_sent", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track user message: {e}") + + +def track_tool_called( + user_id: str | None, + session_id: str, + tool_name: str, + tool_call_id: str, +) -> None: + """Track when a tool is called in chat. + + Args: + user_id: The user's ID (or None for anonymous) + session_id: The chat session ID + tool_name: Name of the tool being called + tool_call_id: Unique ID of the tool call + """ + client = _get_posthog_client() + if not client: + logger.info("PostHog client not available for tool tracking") + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "tool_name": tool_name, + "tool_call_id": tool_call_id, + } + distinct_id = user_id or f"anonymous_{session_id}" + logger.info( + f"Sending copilot_tool_called event to PostHog: distinct_id={distinct_id}, " + f"tool_name={tool_name}" + ) + client.capture( + distinct_id=distinct_id, + event="copilot_tool_called", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track tool call: {e}") + + +def track_agent_run_success( + user_id: str, + session_id: str, + graph_id: str, + graph_name: str, + execution_id: str, + library_agent_id: str, +) -> None: + """Track when an agent is successfully run. + + Args: + user_id: The user's ID + session_id: The chat session ID + graph_id: ID of the agent graph + graph_name: Name of the agent + execution_id: ID of the execution + library_agent_id: ID of the library agent + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "graph_id": graph_id, + "graph_name": graph_name, + "execution_id": execution_id, + "library_agent_id": library_agent_id, + } + client.capture( + distinct_id=user_id, + event="copilot_agent_run_success", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track agent run: {e}") + + +def track_agent_scheduled( + user_id: str, + session_id: str, + graph_id: str, + graph_name: str, + schedule_id: str, + schedule_name: str, + cron: str, + library_agent_id: str, +) -> None: + """Track when an agent is successfully scheduled. + + Args: + user_id: The user's ID + session_id: The chat session ID + graph_id: ID of the agent graph + graph_name: Name of the agent + schedule_id: ID of the schedule + schedule_name: Name of the schedule + cron: Cron expression for the schedule + library_agent_id: ID of the library agent + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "graph_id": graph_id, + "graph_name": graph_name, + "schedule_id": schedule_id, + "schedule_name": schedule_name, + "cron": cron, + "library_agent_id": library_agent_id, + } + client.capture( + distinct_id=user_id, + event="copilot_agent_scheduled", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track agent schedule: {e}") + + +def track_trigger_setup( + user_id: str, + session_id: str, + graph_id: str, + graph_name: str, + trigger_type: str, + library_agent_id: str, +) -> None: + """Track when a trigger is set up for an agent. + + Args: + user_id: The user's ID + session_id: The chat session ID + graph_id: ID of the agent graph + graph_name: Name of the agent + trigger_type: Type of trigger (e.g., 'webhook') + library_agent_id: ID of the library agent + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "graph_id": graph_id, + "graph_name": graph_name, + "trigger_type": trigger_type, + "library_agent_id": library_agent_id, + } + client.capture( + distinct_id=user_id, + event="copilot_trigger_setup", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track trigger setup: {e}") diff --git a/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py b/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py index d0c24f2cf8..c8bbfe4bb0 100644 --- a/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py +++ b/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py @@ -164,9 +164,9 @@ async def test_process_review_action_approve_success( """Test successful review approval""" # Mock the route functions - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} @@ -244,9 +244,9 @@ async def test_process_review_action_reject_success( """Test successful review rejection""" # Mock the route functions - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} @@ -339,9 +339,9 @@ async def test_process_review_action_mixed_success( # Mock the route functions - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = { "test_node_123": sample_pending_review, @@ -463,9 +463,9 @@ async def test_process_review_action_review_not_found( test_user_id: str, ) -> None: """Test error when review is not found""" - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) # Return empty dict to simulate review not found mock_get_reviews_for_user.return_value = {} @@ -506,7 +506,7 @@ async def test_process_review_action_review_not_found( response = await client.post("/api/review/action", json=request_data) assert response.status_code == 404 - assert "No pending review found" in response.json()["detail"] + assert "Review(s) not found" in response.json()["detail"] @pytest.mark.asyncio(loop_scope="session") @@ -517,9 +517,9 @@ async def test_process_review_action_partial_failure( test_user_id: str, ) -> None: """Test handling of partial failures in review processing""" - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} @@ -567,9 +567,9 @@ async def test_process_review_action_invalid_node_exec_id( test_user_id: str, ) -> None: """Test failure when trying to process review with invalid node execution ID""" - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) # Return empty dict to simulate review not found mock_get_reviews_for_user.return_value = {} @@ -596,7 +596,7 @@ async def test_process_review_action_invalid_node_exec_id( # Returns 404 when review is not found assert response.status_code == 404 - assert "No pending review found" in response.json()["detail"] + assert "Review(s) not found" in response.json()["detail"] @pytest.mark.asyncio(loop_scope="session") @@ -607,9 +607,9 @@ async def test_process_review_action_auto_approve_creates_auto_approval_records( test_user_id: str, ) -> None: """Test that auto_approve_future_actions flag creates auto-approval records""" - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} @@ -737,9 +737,9 @@ async def test_process_review_action_without_auto_approve_still_loads_settings( test_user_id: str, ) -> None: """Test that execution context is created with settings even without auto-approve""" - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} @@ -885,9 +885,9 @@ async def test_process_review_action_auto_approve_only_applies_to_approved_revie reviewed_at=FIXED_NOW, ) - # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + # Mock get_reviews_by_node_exec_ids (called to find the graph_exec_id) mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) # Need to return both reviews in WAITING state (before processing) approved_review_waiting = PendingHumanReviewModel( @@ -1031,9 +1031,9 @@ async def test_process_review_action_per_review_auto_approve_granularity( test_user_id: str, ) -> None: """Test that auto-approval can be set per-review (granular control)""" - # Mock get_pending_reviews_by_node_exec_ids - return different reviews based on node_exec_id + # Mock get_reviews_by_node_exec_ids - return different reviews based on node_exec_id mock_get_reviews_for_user = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + "backend.api.features.executions.review.routes.get_reviews_by_node_exec_ids" ) # Create a mapping of node_exec_id to review diff --git a/autogpt_platform/backend/backend/api/features/executions/review/routes.py b/autogpt_platform/backend/backend/api/features/executions/review/routes.py index a10071e9cb..539c7fd87b 100644 --- a/autogpt_platform/backend/backend/api/features/executions/review/routes.py +++ b/autogpt_platform/backend/backend/api/features/executions/review/routes.py @@ -14,9 +14,9 @@ from backend.data.execution import ( from backend.data.graph import get_graph_settings from backend.data.human_review import ( create_auto_approval_record, - get_pending_reviews_by_node_exec_ids, get_pending_reviews_for_execution, get_pending_reviews_for_user, + get_reviews_by_node_exec_ids, has_pending_reviews_for_graph_exec, process_all_reviews_for_execution, ) @@ -137,17 +137,17 @@ async def process_review_action( detail="At least one review must be provided", ) - # Batch fetch all requested reviews - reviews_map = await get_pending_reviews_by_node_exec_ids( + # Batch fetch all requested reviews (regardless of status for idempotent handling) + reviews_map = await get_reviews_by_node_exec_ids( list(all_request_node_ids), user_id ) - # Validate all reviews were found + # Validate all reviews were found (must exist, any status is OK for now) missing_ids = all_request_node_ids - set(reviews_map.keys()) if missing_ids: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, - detail=f"No pending review found for node execution(s): {', '.join(missing_ids)}", + detail=f"Review(s) not found: {', '.join(missing_ids)}", ) # Validate all reviews belong to the same execution diff --git a/autogpt_platform/backend/backend/api/features/store/content_handlers.py b/autogpt_platform/backend/backend/api/features/store/content_handlers.py index 1560db421c..cbbdcfbebf 100644 --- a/autogpt_platform/backend/backend/api/features/store/content_handlers.py +++ b/autogpt_platform/backend/backend/api/features/store/content_handlers.py @@ -188,6 +188,10 @@ class BlockHandler(ContentHandler): try: block_instance = block_cls() + # Skip disabled blocks - they shouldn't be indexed + if block_instance.disabled: + continue + # Build searchable text from block metadata parts = [] if hasattr(block_instance, "name") and block_instance.name: @@ -248,12 +252,19 @@ class BlockHandler(ContentHandler): from backend.data.block import get_blocks all_blocks = get_blocks() - total_blocks = len(all_blocks) + + # Filter out disabled blocks - they're not indexed + enabled_block_ids = [ + block_id + for block_id, block_cls in all_blocks.items() + if not block_cls().disabled + ] + total_blocks = len(enabled_block_ids) if total_blocks == 0: return {"total": 0, "with_embeddings": 0, "without_embeddings": 0} - block_ids = list(all_blocks.keys()) + block_ids = enabled_block_ids placeholders = ",".join([f"${i+1}" for i in range(len(block_ids))]) embedded_result = await query_raw_with_schema( diff --git a/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py b/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py index 28bc88e270..fee879fae0 100644 --- a/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py +++ b/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py @@ -81,6 +81,7 @@ async def test_block_handler_get_missing_items(mocker): mock_block_instance.name = "Calculator Block" mock_block_instance.description = "Performs calculations" mock_block_instance.categories = [MagicMock(value="MATH")] + mock_block_instance.disabled = False mock_block_instance.input_schema.model_json_schema.return_value = { "properties": {"expression": {"description": "Math expression to evaluate"}} } @@ -116,11 +117,18 @@ async def test_block_handler_get_stats(mocker): """Test BlockHandler returns correct stats.""" handler = BlockHandler() - # Mock get_blocks + # Mock get_blocks - each block class returns an instance with disabled=False + def make_mock_block_class(): + mock_class = MagicMock() + mock_instance = MagicMock() + mock_instance.disabled = False + mock_class.return_value = mock_instance + return mock_class + mock_blocks = { - "block-1": MagicMock(), - "block-2": MagicMock(), - "block-3": MagicMock(), + "block-1": make_mock_block_class(), + "block-2": make_mock_block_class(), + "block-3": make_mock_block_class(), } # Mock embedded count query (2 blocks have embeddings) @@ -309,6 +317,7 @@ async def test_block_handler_handles_missing_attributes(): mock_block_class = MagicMock() mock_block_instance = MagicMock() mock_block_instance.name = "Minimal Block" + mock_block_instance.disabled = False # No description, categories, or schema del mock_block_instance.description del mock_block_instance.categories @@ -342,6 +351,7 @@ async def test_block_handler_skips_failed_blocks(): good_instance.name = "Good Block" good_instance.description = "Works fine" good_instance.categories = [] + good_instance.disabled = False good_block.return_value = good_instance bad_block = MagicMock() diff --git a/autogpt_platform/backend/backend/api/features/v1.py b/autogpt_platform/backend/backend/api/features/v1.py index 3a5dd3ec12..62b532089c 100644 --- a/autogpt_platform/backend/backend/api/features/v1.py +++ b/autogpt_platform/backend/backend/api/features/v1.py @@ -265,9 +265,13 @@ async def get_onboarding_agents( "/onboarding/enabled", summary="Is onboarding enabled", tags=["onboarding", "public"], - dependencies=[Security(requires_user)], ) -async def is_onboarding_enabled() -> bool: +async def is_onboarding_enabled( + user_id: Annotated[str, Security(get_user_id)], +) -> bool: + # If chat is enabled for user, skip legacy onboarding + if await is_feature_enabled(Flag.CHAT, user_id, False): + return False return await onboarding_enabled() @@ -364,6 +368,8 @@ async def execute_graph_block( obj = get_block(block_id) if not obj: raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") + if obj.disabled: + raise HTTPException(status_code=403, detail=f"Block #{block_id} is disabled.") user = await get_user_by_id(user_id) if not user: diff --git a/autogpt_platform/backend/backend/api/features/v1_test.py b/autogpt_platform/backend/backend/api/features/v1_test.py index a186d38810..d57ad49949 100644 --- a/autogpt_platform/backend/backend/api/features/v1_test.py +++ b/autogpt_platform/backend/backend/api/features/v1_test.py @@ -138,6 +138,7 @@ def test_execute_graph_block( """Test execute block endpoint""" # Mock block mock_block = Mock() + mock_block.disabled = False async def mock_execute(*args, **kwargs): yield "output1", {"data": "result1"} diff --git a/autogpt_platform/backend/backend/data/human_review.py b/autogpt_platform/backend/backend/data/human_review.py index c70eaa7b64..f198043a38 100644 --- a/autogpt_platform/backend/backend/data/human_review.py +++ b/autogpt_platform/backend/backend/data/human_review.py @@ -263,11 +263,14 @@ async def get_pending_review_by_node_exec_id( return PendingHumanReviewModel.from_db(review, node_id=node_id) -async def get_pending_reviews_by_node_exec_ids( +async def get_reviews_by_node_exec_ids( node_exec_ids: list[str], user_id: str ) -> dict[str, "PendingHumanReviewModel"]: """ - Get multiple pending reviews by their node execution IDs in a single batch query. + Get multiple reviews by their node execution IDs regardless of status. + + Unlike get_pending_reviews_by_node_exec_ids, this returns reviews in any status + (WAITING, APPROVED, REJECTED). Used for validation in idempotent operations. Args: node_exec_ids: List of node execution IDs to look up @@ -283,7 +286,6 @@ async def get_pending_reviews_by_node_exec_ids( where={ "nodeExecId": {"in": node_exec_ids}, "userId": user_id, - "status": ReviewStatus.WAITING, } ) @@ -407,38 +409,68 @@ async def process_all_reviews_for_execution( ) -> dict[str, PendingHumanReviewModel]: """Process all pending reviews for an execution with approve/reject decisions. + Handles race conditions gracefully: if a review was already processed with the + same decision by a concurrent request, it's treated as success rather than error. + Args: user_id: User ID for ownership validation review_decisions: Map of node_exec_id -> (status, reviewed_data, message) Returns: - Dict of node_exec_id -> updated review model + Dict of node_exec_id -> updated review model (includes already-processed reviews) """ if not review_decisions: return {} node_exec_ids = list(review_decisions.keys()) - # Get all reviews for validation - reviews = await PendingHumanReview.prisma().find_many( + # Get all reviews (both WAITING and already processed) for the user + all_reviews = await PendingHumanReview.prisma().find_many( where={ "nodeExecId": {"in": node_exec_ids}, "userId": user_id, - "status": ReviewStatus.WAITING, }, ) - # Validate all reviews can be processed - if len(reviews) != len(node_exec_ids): - missing_ids = set(node_exec_ids) - {review.nodeExecId for review in reviews} + # Separate into pending and already-processed reviews + reviews_to_process = [] + already_processed = [] + for review in all_reviews: + if review.status == ReviewStatus.WAITING: + reviews_to_process.append(review) + else: + already_processed.append(review) + + # Check for truly missing reviews (not found at all) + found_ids = {review.nodeExecId for review in all_reviews} + missing_ids = set(node_exec_ids) - found_ids + if missing_ids: raise ValueError( - f"Reviews not found, access denied, or not in WAITING status: {', '.join(missing_ids)}" + f"Reviews not found or access denied: {', '.join(missing_ids)}" ) - # Create parallel update tasks + # Validate already-processed reviews have compatible status (same decision) + # This handles race conditions where another request processed the same reviews + for review in already_processed: + requested_status = review_decisions[review.nodeExecId][0] + if review.status != requested_status: + raise ValueError( + f"Review {review.nodeExecId} was already processed with status " + f"{review.status}, cannot change to {requested_status}" + ) + + # Log if we're handling a race condition (some reviews already processed) + if already_processed: + already_processed_ids = [r.nodeExecId for r in already_processed] + logger.info( + f"Race condition handled: {len(already_processed)} review(s) already " + f"processed by concurrent request: {already_processed_ids}" + ) + + # Create parallel update tasks for reviews that still need processing update_tasks = [] - for review in reviews: + for review in reviews_to_process: new_status, reviewed_data, message = review_decisions[review.nodeExecId] has_data_changes = reviewed_data is not None and reviewed_data != review.payload @@ -463,7 +495,7 @@ async def process_all_reviews_for_execution( update_tasks.append(task) # Execute all updates in parallel and get updated reviews - updated_reviews = await asyncio.gather(*update_tasks) + updated_reviews = await asyncio.gather(*update_tasks) if update_tasks else [] # Note: Execution resumption is now handled at the API layer after ALL reviews # for an execution are processed (both approved and rejected) @@ -472,8 +504,11 @@ async def process_all_reviews_for_execution( # Local import to avoid event loop conflicts in tests from backend.data.execution import get_node_execution + # Combine updated reviews with already-processed ones (for idempotent response) + all_result_reviews = list(updated_reviews) + already_processed + result = {} - for review in updated_reviews: + for review in all_result_reviews: node_exec = await get_node_execution(review.nodeExecId) node_id = node_exec.node_id if node_exec else review.nodeExecId result[review.nodeExecId] = PendingHumanReviewModel.from_db( diff --git a/autogpt_platform/backend/backend/data/onboarding.py b/autogpt_platform/backend/backend/data/onboarding.py index 6a842d1022..4af8e8dffd 100644 --- a/autogpt_platform/backend/backend/data/onboarding.py +++ b/autogpt_platform/backend/backend/data/onboarding.py @@ -41,6 +41,7 @@ FrontendOnboardingStep = Literal[ OnboardingStep.AGENT_NEW_RUN, OnboardingStep.AGENT_INPUT, OnboardingStep.CONGRATS, + OnboardingStep.VISIT_COPILOT, OnboardingStep.MARKETPLACE_VISIT, OnboardingStep.BUILDER_OPEN, ] @@ -122,6 +123,9 @@ async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate): async def _reward_user(user_id: str, onboarding: UserOnboarding, step: OnboardingStep): reward = 0 match step: + # Welcome bonus for visiting copilot ($5 = 500 credits) + case OnboardingStep.VISIT_COPILOT: + reward = 500 # Reward user when they clicked New Run during onboarding # This is because they need credits before scheduling a run (next step) # This is seen as a reward for the GET_RESULTS step in the wallet diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 2972fc07c2..a42a4d29b4 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -359,8 +359,8 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): description="The port for the Agent Generator service", ) agentgenerator_timeout: int = Field( - default=120, - description="The timeout in seconds for Agent Generator service requests", + default=600, + description="The timeout in seconds for Agent Generator service requests (includes retries for rate limits)", ) enable_example_blocks: bool = Field( @@ -679,6 +679,12 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings): default="https://cloud.langfuse.com", description="Langfuse host URL" ) + # PostHog analytics + posthog_api_key: str = Field(default="", description="PostHog API key") + posthog_host: str = Field( + default="https://eu.i.posthog.com", description="PostHog host URL" + ) + # Add more secret fields as needed model_config = SettingsConfigDict( env_file=".env", diff --git a/autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql b/autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql new file mode 100644 index 0000000000..6a08d9231b --- /dev/null +++ b/autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql @@ -0,0 +1,2 @@ +-- AlterEnum +ALTER TYPE "OnboardingStep" ADD VALUE 'VISIT_COPILOT'; diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock index 2aa55ce5b6..91ac358ade 100644 --- a/autogpt_platform/backend/poetry.lock +++ b/autogpt_platform/backend/poetry.lock @@ -4204,14 +4204,14 @@ strenum = {version = ">=0.4.9,<0.5.0", markers = "python_version < \"3.11\""} [[package]] name = "posthog" -version = "6.1.1" +version = "7.6.0" description = "Integrate PostHog into any python application." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "posthog-6.1.1-py3-none-any.whl", hash = "sha256:329fd3d06b4d54cec925f47235bd8e327c91403c2f9ec38f1deb849535934dba"}, - {file = "posthog-6.1.1.tar.gz", hash = "sha256:b453f54c4a2589da859fd575dd3bf86fcb40580727ec399535f268b1b9f318b8"}, + {file = "posthog-7.6.0-py3-none-any.whl", hash = "sha256:c4dd78cf77c4fecceb965f86066e5ac37886ef867d68ffe75a1db5d681d7d9ad"}, + {file = "posthog-7.6.0.tar.gz", hash = "sha256:941dfd278ee427c9b14640f09b35b5bb52a71bdf028d7dbb7307e1838fd3002e"}, ] [package.dependencies] @@ -4225,7 +4225,7 @@ typing-extensions = ">=4.2.0" [package.extras] dev = ["django-stubs", "lxml", "mypy", "mypy-baseline", "packaging", "pre-commit", "pydantic", "ruff", "setuptools", "tomli", "tomli_w", "twine", "types-mock", "types-python-dateutil", "types-requests", "types-setuptools", "types-six", "wheel"] langchain = ["langchain (>=0.2.0)"] -test = ["anthropic", "coverage", "django", "freezegun (==1.5.1)", "google-genai", "langchain-anthropic (>=0.3.15)", "langchain-community (>=0.3.25)", "langchain-core (>=0.3.65)", "langchain-openai (>=0.3.22)", "langgraph (>=0.4.8)", "mock (>=2.0.0)", "openai", "parameterized (>=0.8.1)", "pydantic", "pytest", "pytest-asyncio", "pytest-timeout"] +test = ["anthropic (>=0.72)", "coverage", "django", "freezegun (==1.5.1)", "google-genai", "langchain-anthropic (>=1.0)", "langchain-community (>=0.4)", "langchain-core (>=1.0)", "langchain-openai (>=1.0)", "langgraph (>=1.0)", "mock (>=2.0.0)", "openai (>=2.0)", "parameterized (>=0.8.1)", "pydantic", "pytest", "pytest-asyncio", "pytest-timeout"] [[package]] name = "postmarker" @@ -7512,4 +7512,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.14" -content-hash = "18b92e09596298c82432e4d0a85cb6d80a40b4229bee0a0c15f0529fd6cb21a4" +content-hash = "ee5742dc1a9df50dfc06d4b26a1682cbb2b25cab6b79ce5625ec272f93e4f4bf" diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml index 1f489d2bc4..fe263e47c0 100644 --- a/autogpt_platform/backend/pyproject.toml +++ b/autogpt_platform/backend/pyproject.toml @@ -85,6 +85,7 @@ exa-py = "^1.14.20" croniter = "^6.0.0" stagehand = "^0.5.1" gravitas-md2gdocs = "^0.1.0" +posthog = "^7.6.0" [tool.poetry.group.dev.dependencies] aiohappyeyeballs = "^2.6.1" diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index de94600820..2c52528e3f 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -81,6 +81,7 @@ enum OnboardingStep { AGENT_INPUT CONGRATS // First Wins + VISIT_COPILOT GET_RESULTS MARKETPLACE_VISIT MARKETPLACE_ADD_AGENT diff --git a/autogpt_platform/frontend/.env.default b/autogpt_platform/frontend/.env.default index 197a37e8bb..af250fb8bf 100644 --- a/autogpt_platform/frontend/.env.default +++ b/autogpt_platform/frontend/.env.default @@ -30,3 +30,7 @@ NEXT_PUBLIC_TURNSTILE=disabled # PR previews NEXT_PUBLIC_PREVIEW_STEALING_DEV= + +# PostHog Analytics +NEXT_PUBLIC_POSTHOG_KEY= +NEXT_PUBLIC_POSTHOG_HOST=https://eu.i.posthog.com diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index bc1e2d7443..f22a182d20 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -34,6 +34,7 @@ "@hookform/resolvers": "5.2.2", "@next/third-parties": "15.4.6", "@phosphor-icons/react": "2.1.10", + "@posthog/react": "1.7.0", "@radix-ui/react-accordion": "1.2.12", "@radix-ui/react-alert-dialog": "1.1.15", "@radix-ui/react-avatar": "1.1.10", @@ -91,6 +92,7 @@ "next-themes": "0.4.6", "nuqs": "2.7.2", "party-js": "2.2.0", + "posthog-js": "1.334.1", "react": "18.3.1", "react-currency-input-field": "4.0.3", "react-day-picker": "9.11.1", @@ -120,7 +122,6 @@ }, "devDependencies": { "@chromatic-com/storybook": "4.1.2", - "happy-dom": "20.3.4", "@opentelemetry/instrumentation": "0.209.0", "@playwright/test": "1.56.1", "@storybook/addon-a11y": "9.1.5", @@ -148,6 +149,7 @@ "eslint": "8.57.1", "eslint-config-next": "15.5.7", "eslint-plugin-storybook": "9.1.5", + "happy-dom": "20.3.4", "import-in-the-middle": "2.0.2", "msw": "2.11.6", "msw-storybook-addon": "2.0.6", diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml index 8e83289f03..db891ccf3f 100644 --- a/autogpt_platform/frontend/pnpm-lock.yaml +++ b/autogpt_platform/frontend/pnpm-lock.yaml @@ -23,6 +23,9 @@ importers: '@phosphor-icons/react': specifier: 2.1.10 version: 2.1.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@posthog/react': + specifier: 1.7.0 + version: 1.7.0(@types/react@18.3.17)(posthog-js@1.334.1)(react@18.3.1) '@radix-ui/react-accordion': specifier: 1.2.12 version: 1.2.12(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -194,6 +197,9 @@ importers: party-js: specifier: 2.2.0 version: 2.2.0 + posthog-js: + specifier: 1.334.1 + version: 1.334.1 react: specifier: 18.3.1 version: 18.3.1 @@ -1794,6 +1800,10 @@ packages: '@open-draft/until@2.1.0': resolution: {integrity: sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==} + '@opentelemetry/api-logs@0.208.0': + resolution: {integrity: sha512-CjruKY9V6NMssL/T1kAFgzosF1v9o6oeN+aX5JB/C/xPNtmgIJqcXHG7fA82Ou1zCpWGl4lROQUKwUNE1pMCyg==} + engines: {node: '>=8.0.0'} + '@opentelemetry/api-logs@0.209.0': resolution: {integrity: sha512-xomnUNi7TiAGtOgs0tb54LyrjRZLu9shJGGwkcN7NgtiPYOpNnKLkRJtzZvTjD/w6knSZH9sFZcUSUovYOPg6A==} engines: {node: '>=8.0.0'} @@ -1814,6 +1824,12 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' + '@opentelemetry/exporter-logs-otlp-http@0.208.0': + resolution: {integrity: sha512-jOv40Bs9jy9bZVLo/i8FwUiuCvbjWDI+ZW13wimJm4LjnlwJxGgB+N/VWOZUTpM+ah/awXeQqKdNlpLf2EjvYg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + '@opentelemetry/instrumentation-amqplib@0.55.0': resolution: {integrity: sha512-5ULoU8p+tWcQw5PDYZn8rySptGSLZHNX/7srqo2TioPnAAcvTy6sQFQXsNPrAnyRRtYGMetXVyZUy5OaX1+IfA==} engines: {node: ^18.19.0 || >=20.6.0} @@ -1952,6 +1968,18 @@ packages: peerDependencies: '@opentelemetry/api': ^1.3.0 + '@opentelemetry/otlp-exporter-base@0.208.0': + resolution: {integrity: sha512-gMd39gIfVb2OgxldxUtOwGJYSH8P1kVFFlJLuut32L6KgUC4gl1dMhn+YC2mGn0bDOiQYSk/uHOdSjuKp58vvA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-transformer@0.208.0': + resolution: {integrity: sha512-DCFPY8C6lAQHUNkzcNT9R+qYExvsk6C5Bto2pbNxgicpcSWbe2WHShLxkOxIdNcBiYPdVHv/e7vH7K6TI+C+fQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + '@opentelemetry/redis-common@0.38.2': resolution: {integrity: sha512-1BCcU93iwSRZvDAgwUxC/DV4T/406SkMfxGqu5ojc3AvNI+I9GhV7v0J1HljsczuuhcnFLYqD5VmwVXfCGHzxA==} engines: {node: ^18.19.0 || >=20.6.0} @@ -1962,6 +1990,18 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.3.0 <1.10.0' + '@opentelemetry/sdk-logs@0.208.0': + resolution: {integrity: sha512-QlAyL1jRpOeaqx7/leG1vJMp84g0xKP6gJmfELBpnI4O/9xPX+Hu5m1POk9Kl+veNkyth5t19hRlN6tNY1sjbA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.4.0 <1.10.0' + + '@opentelemetry/sdk-metrics@2.2.0': + resolution: {integrity: sha512-G5KYP6+VJMZzpGipQw7Giif48h6SGQ2PFKEYCybeXJsOCB4fp8azqMAAzE5lnnHK3ZVwYQrgmFbsUJO/zOnwGw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.9.0 <1.10.0' + '@opentelemetry/sdk-trace-base@2.2.0': resolution: {integrity: sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw==} engines: {node: ^18.19.0 || >=20.6.0} @@ -2050,11 +2090,57 @@ packages: webpack-plugin-serve: optional: true + '@posthog/core@1.13.0': + resolution: {integrity: sha512-knjncrk7qRmssFRbGzBl1Tunt21GRpe0Wv+uVelyL0Rh7PdQUsgguulzXFTps8hA6wPwTU4kq85qnbAJ3eH6Wg==} + + '@posthog/react@1.7.0': + resolution: {integrity: sha512-pM7GL7z/rKjiIwosbRiQA3buhLI6vUo+wg+T/ZrVZC7O5bVU07TfgNZTcuOj8E9dx7vDbfNrc1kjDN7PKMM8ug==} + peerDependencies: + '@types/react': '>=16.8.0' + posthog-js: '>=1.257.2' + react: '>=16.8.0' + peerDependenciesMeta: + '@types/react': + optional: true + + '@posthog/types@1.334.1': + resolution: {integrity: sha512-ypFnwTO7qbV7icylLbujbamPdQXbJq0a61GUUBnJAeTbBw/qYPIss5IRYICcbCj0uunQrwD7/CGxVb5TOYKWgA==} + '@prisma/instrumentation@6.19.0': resolution: {integrity: sha512-QcuYy25pkXM8BJ37wVFBO7Zh34nyRV1GOb2n3lPkkbRYfl4hWl3PTcImP41P0KrzVXfa/45p6eVCos27x3exIg==} peerDependencies: '@opentelemetry/api': ^1.8 + '@protobufjs/aspromise@1.1.2': + resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} + + '@protobufjs/base64@1.1.2': + resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} + + '@protobufjs/codegen@2.0.4': + resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} + + '@protobufjs/eventemitter@1.1.0': + resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} + + '@protobufjs/fetch@1.1.0': + resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} + + '@protobufjs/float@1.0.2': + resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} + + '@protobufjs/inquire@1.1.0': + resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} + + '@protobufjs/path@1.1.2': + resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} + + '@protobufjs/pool@1.1.0': + resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} + + '@protobufjs/utf8@1.1.0': + resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} + '@radix-ui/number@1.1.1': resolution: {integrity: sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==} @@ -3401,6 +3487,9 @@ packages: '@types/tedious@4.0.14': resolution: {integrity: sha512-KHPsfX/FoVbUGbyYvk1q9MMQHLPeRZhRJZdO45Q4YjvFkv4hMNghCWTvy7rdKessBsmtz4euWCWAB6/tVpI1Iw==} + '@types/trusted-types@2.0.7': + resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + '@types/unist@2.0.11': resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==} @@ -4278,6 +4367,9 @@ packages: core-js-pure@3.47.0: resolution: {integrity: sha512-BcxeDbzUrRnXGYIVAGFtcGQVNpFcUhVjr6W7F8XktvQW2iJP9e66GP6xdKotCRFlrxBvNIBrhwKteRXqMV86Nw==} + core-js@3.48.0: + resolution: {integrity: sha512-zpEHTy1fjTMZCKLHUZoVeylt9XrzaIN2rbPXEt0k+q7JE5CkCZdo6bNq55bn24a69CH7ErAVLKijxJja4fw+UQ==} + core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} @@ -4569,6 +4661,9 @@ packages: resolution: {integrity: sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==} engines: {node: '>= 4'} + dompurify@3.3.1: + resolution: {integrity: sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==} + domutils@2.8.0: resolution: {integrity: sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==} @@ -4939,6 +5034,9 @@ packages: picomatch: optional: true + fflate@0.4.8: + resolution: {integrity: sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==} + file-entry-cache@6.0.1: resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} engines: {node: ^10.12.0 || >=12.0.0} @@ -5745,6 +5843,9 @@ packages: resolution: {integrity: sha512-HgMmCqIJSAKqo68l0rS2AanEWfkxaZ5wNiEFb5ggm08lDs9Xl2KxBlX3PTcaD2chBM1gXAYf491/M2Rv8Jwayg==} engines: {node: '>= 0.6.0'} + long@5.3.2: + resolution: {integrity: sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==} + longest-streak@3.1.0: resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==} @@ -6534,6 +6635,12 @@ packages: resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==} engines: {node: '>=0.10.0'} + posthog-js@1.334.1: + resolution: {integrity: sha512-5cDzLICr2afnwX/cR9fwoLC0vN0Nb5gP5HiCigzHkgHdO+E3WsYefla3EFMQz7U4r01CBPZ+nZ9/srkzeACxtQ==} + + preact@10.28.2: + resolution: {integrity: sha512-lbteaWGzGHdlIuiJ0l2Jq454m6kcpI1zNje6d8MlGAFlYvP2GO4ibnat7P74Esfz4sPTdM6UxtTwh/d3pwM9JA==} + prelude-ls@1.2.1: resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} engines: {node: '>= 0.8.0'} @@ -6622,6 +6729,10 @@ packages: property-information@7.1.0: resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==} + protobufjs@7.5.4: + resolution: {integrity: sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==} + engines: {node: '>=12.0.0'} + proxy-from-env@1.1.0: resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} @@ -6643,6 +6754,9 @@ packages: resolution: {integrity: sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==} engines: {node: '>=0.6'} + query-selector-shadow-dom@1.0.1: + resolution: {integrity: sha512-lT5yCqEBgfoMYpf3F2xQRK7zEr1rhIIZuceDK6+xRkJQ4NMbHTwXqk4NkwDwQMNqXgG9r9fyHnzwNVs6zV5KRw==} + querystring-es3@0.2.1: resolution: {integrity: sha512-773xhDQnZBMFobEiztv8LIl70ch5MSF/jUQVlhwFyBILqq96anmoctVIYz+ZRp0qbCKATTn6ev02M3r7Ga5vqA==} engines: {node: '>=0.4.x'} @@ -7821,6 +7935,9 @@ packages: web-namespaces@2.0.1: resolution: {integrity: sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==} + web-vitals@5.1.0: + resolution: {integrity: sha512-ArI3kx5jI0atlTtmV0fWU3fjpLmq/nD3Zr1iFFlJLaqa5wLBkUSzINwBPySCX/8jRyjlmy1Volw1kz1g9XE4Jg==} + webidl-conversions@3.0.1: resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} @@ -9420,6 +9537,10 @@ snapshots: '@open-draft/until@2.1.0': {} + '@opentelemetry/api-logs@0.208.0': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs@0.209.0': dependencies: '@opentelemetry/api': 1.9.0 @@ -9435,6 +9556,15 @@ snapshots: '@opentelemetry/api': 1.9.0 '@opentelemetry/semantic-conventions': 1.38.0 + '@opentelemetry/exporter-logs-otlp-http@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-amqplib@0.55.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -9629,6 +9759,23 @@ snapshots: transitivePeerDependencies: - supports-color + '@opentelemetry/otlp-exporter-base@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.208.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-transformer@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + protobufjs: 7.5.4 + '@opentelemetry/redis-common@0.38.2': {} '@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0)': @@ -9637,6 +9784,19 @@ snapshots: '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.38.0 + '@opentelemetry/sdk-logs@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -9801,6 +9961,19 @@ snapshots: type-fest: 4.41.0 webpack-hot-middleware: 2.26.1 + '@posthog/core@1.13.0': + dependencies: + cross-spawn: 7.0.6 + + '@posthog/react@1.7.0(@types/react@18.3.17)(posthog-js@1.334.1)(react@18.3.1)': + dependencies: + posthog-js: 1.334.1 + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.17 + + '@posthog/types@1.334.1': {} + '@prisma/instrumentation@6.19.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -9808,6 +9981,29 @@ snapshots: transitivePeerDependencies: - supports-color + '@protobufjs/aspromise@1.1.2': {} + + '@protobufjs/base64@1.1.2': {} + + '@protobufjs/codegen@2.0.4': {} + + '@protobufjs/eventemitter@1.1.0': {} + + '@protobufjs/fetch@1.1.0': + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/inquire': 1.1.0 + + '@protobufjs/float@1.0.2': {} + + '@protobufjs/inquire@1.1.0': {} + + '@protobufjs/path@1.1.2': {} + + '@protobufjs/pool@1.1.0': {} + + '@protobufjs/utf8@1.1.0': {} + '@radix-ui/number@1.1.1': {} '@radix-ui/primitive@1.1.3': {} @@ -11426,6 +11622,9 @@ snapshots: dependencies: '@types/node': 24.10.0 + '@types/trusted-types@2.0.7': + optional: true + '@types/unist@2.0.11': {} '@types/unist@3.0.3': {} @@ -12327,6 +12526,8 @@ snapshots: core-js-pure@3.47.0: {} + core-js@3.48.0: {} + core-util-is@1.0.3: {} cosmiconfig@7.1.0: @@ -12636,6 +12837,10 @@ snapshots: dependencies: domelementtype: 2.3.0 + dompurify@3.3.1: + optionalDependencies: + '@types/trusted-types': 2.0.7 + domutils@2.8.0: dependencies: dom-serializer: 1.4.1 @@ -13205,6 +13410,8 @@ snapshots: optionalDependencies: picomatch: 4.0.3 + fflate@0.4.8: {} + file-entry-cache@6.0.1: dependencies: flat-cache: 3.2.0 @@ -14092,6 +14299,8 @@ snapshots: loglevel@1.9.2: {} + long@5.3.2: {} + longest-streak@3.1.0: {} loose-envify@1.4.0: @@ -15154,6 +15363,24 @@ snapshots: dependencies: xtend: 4.0.2 + posthog-js@1.334.1: + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/exporter-logs-otlp-http': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@posthog/core': 1.13.0 + '@posthog/types': 1.334.1 + core-js: 3.48.0 + dompurify: 3.3.1 + fflate: 0.4.8 + preact: 10.28.2 + query-selector-shadow-dom: 1.0.1 + web-vitals: 5.1.0 + + preact@10.28.2: {} + prelude-ls@1.2.1: {} prettier-plugin-tailwindcss@0.7.1(prettier@3.6.2): @@ -15187,6 +15414,21 @@ snapshots: property-information@7.1.0: {} + protobufjs@7.5.4: + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 24.10.0 + long: 5.3.2 + proxy-from-env@1.1.0: {} public-encrypt@4.0.3: @@ -15208,6 +15450,8 @@ snapshots: dependencies: side-channel: 1.1.0 + query-selector-shadow-dom@1.0.1: {} + querystring-es3@0.2.1: {} queue-microtask@1.2.3: {} @@ -16619,6 +16863,8 @@ snapshots: web-namespaces@2.0.1: {} + web-vitals@5.1.0: {} + webidl-conversions@3.0.1: {} webidl-conversions@8.0.1: diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx index cfea5d9452..8ec1ba8be3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx @@ -38,8 +38,12 @@ export const AgentOutputs = ({ flowID }: { flowID: string | null }) => { return outputNodes .map((node) => { - const executionResult = node.data.nodeExecutionResult; - const outputData = executionResult?.output_data?.output; + const executionResults = node.data.nodeExecutionResults || []; + const latestResult = + executionResults.length > 0 + ? executionResults[executionResults.length - 1] + : undefined; + const outputData = latestResult?.output_data?.output; const renderer = globalRegistry.getRenderer(outputData); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts index 0eba6e8188..629d4662a9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts @@ -153,6 +153,9 @@ export const useRunInputDialog = ({ Object.entries(credentialValues).filter(([_, cred]) => cred && cred.id), ); + useNodeStore.getState().clearAllNodeExecutionResults(); + useNodeStore.getState().cleanNodesStatuses(); + await executeGraph({ graphId: flowID ?? "", graphVersion: flowVersion || null, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx index 6306582c3b..d4aa26480d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx @@ -34,7 +34,7 @@ export type CustomNodeData = { uiType: BlockUIType; block_id: string; status?: AgentExecutionStatus; - nodeExecutionResult?: NodeExecutionResult; + nodeExecutionResults?: NodeExecutionResult[]; staticOutput?: boolean; // TODO : We need better type safety for the following backend fields. costs: BlockCost[]; @@ -75,7 +75,11 @@ export const CustomNode: React.FC> = React.memo( (value) => value !== null && value !== undefined && value !== "", ); - const outputData = data.nodeExecutionResult?.output_data; + const latestResult = + data.nodeExecutionResults && data.nodeExecutionResults.length > 0 + ? data.nodeExecutionResults[data.nodeExecutionResults.length - 1] + : undefined; + const outputData = latestResult?.output_data; const hasOutputError = typeof outputData === "object" && outputData !== null && diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx index 17134ae299..c5df24e0e6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx @@ -14,10 +14,15 @@ import { useNodeOutput } from "./useNodeOutput"; import { ViewMoreData } from "./components/ViewMoreData"; export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => { - const { outputData, copiedKey, handleCopy, executionResultId, inputData } = - useNodeOutput(nodeId); + const { + latestOutputData, + copiedKey, + handleCopy, + executionResultId, + latestInputData, + } = useNodeOutput(nodeId); - if (Object.keys(outputData).length === 0) { + if (Object.keys(latestOutputData).length === 0) { return null; } @@ -41,18 +46,19 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
Input - +
- {Object.entries(outputData) + {Object.entries(latestOutputData) .slice(0, 2) - .map(([key, value]) => ( -
-
- - Pin: - - - {beautifyString(key)} - -
-
- - Data: - -
- {value.map((item, index) => ( -
- -
- ))} + .map(([key, value]) => { + return ( +
+
+ + Pin: + + + {beautifyString(key)} + +
+
+ + Data: + +
+ {value.map((item, index) => ( +
+ +
+ ))} -
- - +
+ + +
-
- ))} + ); + })}
- - {Object.keys(outputData).length > 2 && ( - - )} + diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx index 0858db8f0e..680b6bc44a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx @@ -19,22 +19,51 @@ import { CopyIcon, DownloadIcon, } from "@phosphor-icons/react"; -import { FC } from "react"; +import React, { FC } from "react"; import { useNodeDataViewer } from "./useNodeDataViewer"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; +import { useShallow } from "zustand/react/shallow"; +import { NodeDataType } from "../../helpers"; -interface NodeDataViewerProps { - data: any; +export interface NodeDataViewerProps { + data?: any; pinName: string; + nodeId?: string; execId?: string; isViewMoreData?: boolean; + dataType?: NodeDataType; } export const NodeDataViewer: FC = ({ data, pinName, + nodeId, execId = "N/A", isViewMoreData = false, + dataType = "output", }) => { + const executionResults = useNodeStore( + useShallow((state) => + nodeId ? state.getNodeExecutionResults(nodeId) : [], + ), + ); + const latestInputData = useNodeStore( + useShallow((state) => + nodeId ? state.getLatestNodeInputData(nodeId) : undefined, + ), + ); + const accumulatedOutputData = useNodeStore( + useShallow((state) => + nodeId ? state.getAccumulatedNodeOutputData(nodeId) : {}, + ), + ); + + const resolvedData = + data ?? + (dataType === "input" + ? (latestInputData ?? {}) + : (accumulatedOutputData[pinName] ?? [])); + const { outputItems, copyExecutionId, @@ -42,7 +71,20 @@ export const NodeDataViewer: FC = ({ handleDownloadItem, dataArray, copiedIndex, - } = useNodeDataViewer(data, pinName, execId); + groupedExecutions, + totalGroupedItems, + handleCopyGroupedItem, + handleDownloadGroupedItem, + copiedKey, + } = useNodeDataViewer( + resolvedData, + pinName, + execId, + executionResults, + dataType, + ); + + const shouldGroupExecutions = groupedExecutions.length > 0; return ( @@ -68,44 +110,141 @@ export const NodeDataViewer: FC = ({
- Full Output Preview + Full {dataType === "input" ? "Input" : "Output"} Preview
- {dataArray.length} item{dataArray.length !== 1 ? "s" : ""} total + {shouldGroupExecutions ? totalGroupedItems : dataArray.length}{" "} + item + {shouldGroupExecutions + ? totalGroupedItems !== 1 + ? "s" + : "" + : dataArray.length !== 1 + ? "s" + : ""}{" "} + total
-
- - Execution ID: - - - {execId} - - -
-
- Pin:{" "} - {beautifyString(pinName)} -
+ {shouldGroupExecutions ? ( +
+ Pin:{" "} + {beautifyString(pinName)} +
+ ) : ( + <> +
+ + Execution ID: + + + {execId} + + +
+
+ Pin:{" "} + + {beautifyString(pinName)} + +
+ + )}
- {dataArray.length > 0 ? ( + {shouldGroupExecutions ? ( +
+ {groupedExecutions.map((execution) => ( +
+
+ + Execution ID: + + + {execution.execId} + +
+
+ {execution.outputItems.length > 0 ? ( + execution.outputItems.map((item, index) => ( +
+
+ +
+ +
+ + +
+
+ )) + ) : ( +
+ No data available +
+ )} +
+
+ ))} +
+ ) : dataArray.length > 0 ? (
{outputItems.map((item, index) => (
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts index d3c555970c..818d1266c1 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts @@ -1,82 +1,70 @@ -import type { OutputMetadata } from "@/components/contextual/OutputRenderers"; -import { globalRegistry } from "@/components/contextual/OutputRenderers"; import { downloadOutputs } from "@/components/contextual/OutputRenderers/utils/download"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { beautifyString } from "@/lib/utils"; -import React, { useMemo, useState } from "react"; +import { useState } from "react"; +import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult"; +import { + NodeDataType, + createOutputItems, + getExecutionData, + normalizeToArray, + type OutputItem, +} from "../../helpers"; + +export type GroupedExecution = { + execId: string; + outputItems: Array; +}; export const useNodeDataViewer = ( data: any, pinName: string, execId: string, + executionResults?: NodeExecutionResult[], + dataType?: NodeDataType, ) => { const { toast } = useToast(); const [copiedIndex, setCopiedIndex] = useState(null); + const [copiedKey, setCopiedKey] = useState(null); - // Normalize data to array format - const dataArray = useMemo(() => { - return Array.isArray(data) ? data : [data]; - }, [data]); + const dataArray = Array.isArray(data) ? data : [data]; - // Prepare items for the enhanced renderer system - const outputItems = useMemo(() => { - if (!dataArray) return []; - - const items: Array<{ - key: string; - label: string; - value: unknown; - metadata?: OutputMetadata; - renderer: any; - }> = []; - - dataArray.forEach((value, index) => { - const metadata: OutputMetadata = {}; - - // Extract metadata from the value if it's an object - if ( - typeof value === "object" && - value !== null && - !React.isValidElement(value) - ) { - const objValue = value as any; - if (objValue.type) metadata.type = objValue.type; - if (objValue.mimeType) metadata.mimeType = objValue.mimeType; - if (objValue.filename) metadata.filename = objValue.filename; - if (objValue.language) metadata.language = objValue.language; - } - - const renderer = globalRegistry.getRenderer(value, metadata); - if (renderer) { - items.push({ - key: `item-${index}`, + const outputItems = + !dataArray || dataArray.length === 0 + ? [] + : createOutputItems(dataArray).map((item, index) => ({ + ...item, label: index === 0 ? beautifyString(pinName) : "", - value, - metadata, - renderer, - }); - } else { - // Fallback to text renderer - const textRenderer = globalRegistry - .getAllRenderers() - .find((r) => r.name === "TextRenderer"); - if (textRenderer) { - items.push({ - key: `item-${index}`, - label: index === 0 ? beautifyString(pinName) : "", - value: - typeof value === "string" - ? value - : JSON.stringify(value, null, 2), - metadata, - renderer: textRenderer, - }); - } - } - }); + })); - return items; - }, [dataArray, pinName]); + const groupedExecutions = + !executionResults || executionResults.length === 0 + ? [] + : [...executionResults].reverse().map((result) => { + const rawData = getExecutionData( + result, + dataType || "output", + pinName, + ); + let dataArray: unknown[]; + if (dataType === "input") { + dataArray = + rawData !== undefined && rawData !== null ? [rawData] : []; + } else { + dataArray = normalizeToArray(rawData); + } + + const outputItems = createOutputItems(dataArray); + return { + execId: result.node_exec_id, + outputItems, + }; + }); + + const totalGroupedItems = groupedExecutions.reduce( + (total, execution) => total + execution.outputItems.length, + 0, + ); const copyExecutionId = () => { navigator.clipboard.writeText(execId).then(() => { @@ -122,6 +110,45 @@ export const useNodeDataViewer = ( ]); }; + const handleCopyGroupedItem = async ( + execId: string, + index: number, + item: OutputItem, + ) => { + const copyContent = item.renderer.getCopyContent(item.value, item.metadata); + + if (!copyContent) { + return; + } + + try { + let text: string; + if (typeof copyContent.data === "string") { + text = copyContent.data; + } else if (copyContent.fallbackText) { + text = copyContent.fallbackText; + } else { + return; + } + + await navigator.clipboard.writeText(text); + setCopiedKey(`${execId}-${index}`); + setTimeout(() => setCopiedKey(null), 2000); + } catch (error) { + console.error("Failed to copy:", error); + } + }; + + const handleDownloadGroupedItem = (item: OutputItem) => { + downloadOutputs([ + { + value: item.value, + metadata: item.metadata, + renderer: item.renderer, + }, + ]); + }; + return { outputItems, dataArray, @@ -129,5 +156,10 @@ export const useNodeDataViewer = ( handleCopyItem, handleDownloadItem, copiedIndex, + groupedExecutions, + totalGroupedItems, + handleCopyGroupedItem, + handleDownloadGroupedItem, + copiedKey, }; }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx index 7bf026fe43..74d0da06c2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx @@ -8,16 +8,28 @@ import { useState } from "react"; import { NodeDataViewer } from "./NodeDataViewer/NodeDataViewer"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { CheckIcon, CopyIcon } from "@phosphor-icons/react"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; +import { useShallow } from "zustand/react/shallow"; +import { + NodeDataType, + getExecutionEntries, + normalizeToArray, +} from "../helpers"; export const ViewMoreData = ({ - outputData, - execId, + nodeId, + dataType = "output", }: { - outputData: Record>; - execId?: string; + nodeId: string; + dataType?: NodeDataType; }) => { const [copiedKey, setCopiedKey] = useState(null); const { toast } = useToast(); + const executionResults = useNodeStore( + useShallow((state) => state.getNodeExecutionResults(nodeId)), + ); + + const reversedExecutionResults = [...executionResults].reverse(); const handleCopy = (key: string, value: any) => { const textToCopy = @@ -29,8 +41,8 @@ export const ViewMoreData = ({ setTimeout(() => setCopiedKey(null), 2000); }; - const copyExecutionId = () => { - navigator.clipboard.writeText(execId || "N/A").then(() => { + const copyExecutionId = (executionId: string) => { + navigator.clipboard.writeText(executionId || "N/A").then(() => { toast({ title: "Execution ID copied to clipboard!", duration: 2000, @@ -42,7 +54,7 @@ export const ViewMoreData = ({ -
-
- {Object.entries(outputData).map(([key, value]) => ( -
+ {reversedExecutionResults.map((result) => ( +
+ + Execution ID: + - Pin: - - - {beautifyString(key)} + {result.node_exec_id} +
-
- - Data: - -
- {value.map((item, index) => ( -
- -
- ))} -
- - -
-
+
+ {getExecutionEntries(result, dataType).map( + ([key, value]) => { + const normalizedValue = normalizeToArray(value); + return ( +
+
+ + Pin: + + + {beautifyString(key)} + +
+
+ + Data: + +
+ {normalizedValue.map((item, index) => ( +
+ +
+ ))} + +
+ + +
+
+
+
+ ); + }, + )}
))} diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/helpers.ts new file mode 100644 index 0000000000..c75cd83cac --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/helpers.ts @@ -0,0 +1,83 @@ +import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult"; +import type { OutputMetadata } from "@/components/contextual/OutputRenderers"; +import { globalRegistry } from "@/components/contextual/OutputRenderers"; +import React from "react"; + +export type NodeDataType = "input" | "output"; + +export type OutputItem = { + key: string; + value: unknown; + metadata?: OutputMetadata; + renderer: any; +}; + +export const normalizeToArray = (value: unknown) => { + if (value === undefined) return []; + return Array.isArray(value) ? value : [value]; +}; + +export const getExecutionData = ( + result: NodeExecutionResult, + dataType: NodeDataType, + pinName: string, +) => { + if (dataType === "input") { + return result.input_data; + } + + return result.output_data?.[pinName]; +}; + +export const createOutputItems = (dataArray: unknown[]): Array => { + const items: Array = []; + + dataArray.forEach((value, index) => { + const metadata: OutputMetadata = {}; + + if ( + typeof value === "object" && + value !== null && + !React.isValidElement(value) + ) { + const objValue = value as any; + if (objValue.type) metadata.type = objValue.type; + if (objValue.mimeType) metadata.mimeType = objValue.mimeType; + if (objValue.filename) metadata.filename = objValue.filename; + if (objValue.language) metadata.language = objValue.language; + } + + const renderer = globalRegistry.getRenderer(value, metadata); + if (renderer) { + items.push({ + key: `item-${index}`, + value, + metadata, + renderer, + }); + } else { + const textRenderer = globalRegistry + .getAllRenderers() + .find((r) => r.name === "TextRenderer"); + if (textRenderer) { + items.push({ + key: `item-${index}`, + value: + typeof value === "string" ? value : JSON.stringify(value, null, 2), + metadata, + renderer: textRenderer, + }); + } + } + }); + + return items; +}; + +export const getExecutionEntries = ( + result: NodeExecutionResult, + dataType: NodeDataType, +) => { + const data = dataType === "input" ? result.input_data : result.output_data; + return Object.entries(data || {}); +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx index cfc599c6e4..8ebf1dfaf3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx @@ -7,15 +7,18 @@ export const useNodeOutput = (nodeId: string) => { const [copiedKey, setCopiedKey] = useState(null); const { toast } = useToast(); - const nodeExecutionResult = useNodeStore( - useShallow((state) => state.getNodeExecutionResult(nodeId)), + const latestResult = useNodeStore( + useShallow((state) => state.getLatestNodeExecutionResult(nodeId)), ); - const inputData = nodeExecutionResult?.input_data; + const latestInputData = useNodeStore( + useShallow((state) => state.getLatestNodeInputData(nodeId)), + ); + + const latestOutputData: Record> = useNodeStore( + useShallow((state) => state.getLatestNodeOutputData(nodeId) || {}), + ); - const outputData: Record> = { - ...nodeExecutionResult?.output_data, - }; const handleCopy = async (key: string, value: any) => { try { const text = JSON.stringify(value, null, 2); @@ -35,11 +38,12 @@ export const useNodeOutput = (nodeId: string) => { }); } }; + return { - outputData, - inputData, + latestOutputData, + latestInputData, copiedKey, handleCopy, - executionResultId: nodeExecutionResult?.node_exec_id, + executionResultId: latestResult?.node_exec_id, }; }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts index d4ba538172..143cd58509 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts @@ -1,10 +1,7 @@ import { useState, useCallback, useEffect } from "react"; import { useShallow } from "zustand/react/shallow"; import { useGraphStore } from "@/app/(platform)/build/stores/graphStore"; -import { - useNodeStore, - NodeResolutionData, -} from "@/app/(platform)/build/stores/nodeStore"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore"; import { useSubAgentUpdate, @@ -13,6 +10,7 @@ import { } from "@/app/(platform)/build/hooks/useSubAgentUpdate"; import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api"; import { CustomNodeData } from "../../CustomNode"; +import { NodeResolutionData } from "@/app/(platform)/build/stores/types"; // Stable empty set to avoid creating new references in selectors const EMPTY_SET: Set = new Set(); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts index 54ddf2a61d..50326a03e6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts @@ -1,5 +1,5 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; -import { NodeResolutionData } from "@/app/(platform)/build/stores/nodeStore"; +import { NodeResolutionData } from "@/app/(platform)/build/stores/types"; import { RJSFSchema } from "@rjsf/utils"; export const nodeStyleBasedOnStatus: Record = { diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/helpers.ts new file mode 100644 index 0000000000..bcdfd4c313 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/helpers.ts @@ -0,0 +1,16 @@ +export const accumulateExecutionData = ( + accumulated: Record, + data: Record | undefined, +) => { + if (!data) return { ...accumulated }; + const next = { ...accumulated }; + Object.entries(data).forEach(([key, values]) => { + const nextValues = Array.isArray(values) ? values : [values]; + if (next[key]) { + next[key] = [...next[key], ...nextValues]; + } else { + next[key] = [...nextValues]; + } + }); + return next; +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts index 5502a8780d..f7a52636f3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts @@ -10,6 +10,8 @@ import { import { Node } from "@/app/api/__generated__/models/node"; import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; import { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult"; +import { NodeExecutionResultInputData } from "@/app/api/__generated__/models/nodeExecutionResultInputData"; +import { NodeExecutionResultOutputData } from "@/app/api/__generated__/models/nodeExecutionResultOutputData"; import { useHistoryStore } from "./historyStore"; import { useEdgeStore } from "./edgeStore"; import { BlockUIType } from "../components/types"; @@ -18,31 +20,10 @@ import { ensurePathExists, parseHandleIdToPath, } from "@/components/renderers/InputRenderer/helpers"; -import { IncompatibilityInfo } from "../hooks/useSubAgentUpdate/types"; +import { accumulateExecutionData } from "./helpers"; +import { NodeResolutionData } from "./types"; -// Resolution mode data stored per node -export type NodeResolutionData = { - incompatibilities: IncompatibilityInfo; - // The NEW schema from the update (what we're updating TO) - pendingUpdate: { - input_schema: Record; - output_schema: Record; - }; - // The OLD schema before the update (what we're updating FROM) - // Needed to merge and show removed inputs during resolution - currentSchema: { - input_schema: Record; - output_schema: Record; - }; - // The full updated hardcoded values to apply when resolution completes - pendingHardcodedValues: Record; -}; - -// Minimum movement (in pixels) required before logging position change to history -// Prevents spamming history with small movements when clicking on inputs inside blocks const MINIMUM_MOVE_BEFORE_LOG = 50; - -// Track initial positions when drag starts (outside store to avoid re-renders) const dragStartPositions: Record = {}; let dragStartState: { nodes: CustomNode[]; edges: CustomEdge[] } | null = null; @@ -52,6 +33,15 @@ type NodeStore = { nodeCounter: number; setNodeCounter: (nodeCounter: number) => void; nodeAdvancedStates: Record; + + latestNodeInputData: Record; + latestNodeOutputData: Record< + string, + NodeExecutionResultOutputData | undefined + >; + accumulatedNodeInputData: Record>; + accumulatedNodeOutputData: Record>; + setNodes: (nodes: CustomNode[]) => void; onNodesChange: (changes: NodeChange[]) => void; addNode: (node: CustomNode) => void; @@ -72,12 +62,26 @@ type NodeStore = { updateNodeStatus: (nodeId: string, status: AgentExecutionStatus) => void; getNodeStatus: (nodeId: string) => AgentExecutionStatus | undefined; + cleanNodesStatuses: () => void; updateNodeExecutionResult: ( nodeId: string, result: NodeExecutionResult, ) => void; - getNodeExecutionResult: (nodeId: string) => NodeExecutionResult | undefined; + getNodeExecutionResults: (nodeId: string) => NodeExecutionResult[]; + getLatestNodeInputData: ( + nodeId: string, + ) => NodeExecutionResultInputData | undefined; + getLatestNodeOutputData: ( + nodeId: string, + ) => NodeExecutionResultOutputData | undefined; + getAccumulatedNodeInputData: (nodeId: string) => Record; + getAccumulatedNodeOutputData: (nodeId: string) => Record; + getLatestNodeExecutionResult: ( + nodeId: string, + ) => NodeExecutionResult | undefined; + clearAllNodeExecutionResults: () => void; + getNodeBlockUIType: (nodeId: string) => BlockUIType; hasWebhookNodes: () => boolean; @@ -122,6 +126,10 @@ export const useNodeStore = create((set, get) => ({ nodeCounter: 0, setNodeCounter: (nodeCounter) => set({ nodeCounter }), nodeAdvancedStates: {}, + latestNodeInputData: {}, + latestNodeOutputData: {}, + accumulatedNodeInputData: {}, + accumulatedNodeOutputData: {}, incrementNodeCounter: () => set((state) => ({ nodeCounter: state.nodeCounter + 1, @@ -317,17 +325,162 @@ export const useNodeStore = create((set, get) => ({ return get().nodes.find((n) => n.id === nodeId)?.data?.status; }, - updateNodeExecutionResult: (nodeId: string, result: NodeExecutionResult) => { + cleanNodesStatuses: () => { set((state) => ({ - nodes: state.nodes.map((n) => - n.id === nodeId - ? { ...n, data: { ...n.data, nodeExecutionResult: result } } - : n, - ), + nodes: state.nodes.map((n) => ({ + ...n, + data: { ...n.data, status: undefined }, + })), })); }, - getNodeExecutionResult: (nodeId: string) => { - return get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResult; + + updateNodeExecutionResult: (nodeId: string, result: NodeExecutionResult) => { + set((state) => { + let latestNodeInputData = state.latestNodeInputData; + let latestNodeOutputData = state.latestNodeOutputData; + let accumulatedNodeInputData = state.accumulatedNodeInputData; + let accumulatedNodeOutputData = state.accumulatedNodeOutputData; + + const nodes = state.nodes.map((n) => { + if (n.id !== nodeId) return n; + + const existingResults = n.data.nodeExecutionResults || []; + const duplicateIndex = existingResults.findIndex( + (r) => r.node_exec_id === result.node_exec_id, + ); + + if (duplicateIndex !== -1) { + const oldResult = existingResults[duplicateIndex]; + const inputDataChanged = + JSON.stringify(oldResult.input_data) !== + JSON.stringify(result.input_data); + const outputDataChanged = + JSON.stringify(oldResult.output_data) !== + JSON.stringify(result.output_data); + + if (!inputDataChanged && !outputDataChanged) { + return n; + } + + const updatedResults = [...existingResults]; + updatedResults[duplicateIndex] = result; + + const recomputedAccumulatedInput = updatedResults.reduce( + (acc, r) => accumulateExecutionData(acc, r.input_data), + {} as Record, + ); + const recomputedAccumulatedOutput = updatedResults.reduce( + (acc, r) => accumulateExecutionData(acc, r.output_data), + {} as Record, + ); + + const mostRecentResult = updatedResults[updatedResults.length - 1]; + latestNodeInputData = { + ...latestNodeInputData, + [nodeId]: mostRecentResult.input_data, + }; + latestNodeOutputData = { + ...latestNodeOutputData, + [nodeId]: mostRecentResult.output_data, + }; + + accumulatedNodeInputData = { + ...accumulatedNodeInputData, + [nodeId]: recomputedAccumulatedInput, + }; + accumulatedNodeOutputData = { + ...accumulatedNodeOutputData, + [nodeId]: recomputedAccumulatedOutput, + }; + + return { + ...n, + data: { + ...n.data, + nodeExecutionResults: updatedResults, + }, + }; + } + + accumulatedNodeInputData = { + ...accumulatedNodeInputData, + [nodeId]: accumulateExecutionData( + accumulatedNodeInputData[nodeId] || {}, + result.input_data, + ), + }; + accumulatedNodeOutputData = { + ...accumulatedNodeOutputData, + [nodeId]: accumulateExecutionData( + accumulatedNodeOutputData[nodeId] || {}, + result.output_data, + ), + }; + + latestNodeInputData = { + ...latestNodeInputData, + [nodeId]: result.input_data, + }; + latestNodeOutputData = { + ...latestNodeOutputData, + [nodeId]: result.output_data, + }; + + return { + ...n, + data: { + ...n.data, + nodeExecutionResults: [...existingResults, result], + }, + }; + }); + + return { + nodes, + latestNodeInputData, + latestNodeOutputData, + accumulatedNodeInputData, + accumulatedNodeOutputData, + }; + }); + }, + getNodeExecutionResults: (nodeId: string) => { + return ( + get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResults || [] + ); + }, + getLatestNodeInputData: (nodeId: string) => { + return get().latestNodeInputData[nodeId]; + }, + getLatestNodeOutputData: (nodeId: string) => { + return get().latestNodeOutputData[nodeId]; + }, + getAccumulatedNodeInputData: (nodeId: string) => { + return get().accumulatedNodeInputData[nodeId] || {}; + }, + getAccumulatedNodeOutputData: (nodeId: string) => { + return get().accumulatedNodeOutputData[nodeId] || {}; + }, + getLatestNodeExecutionResult: (nodeId: string) => { + const results = + get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResults || + []; + return results.length > 0 ? results[results.length - 1] : undefined; + }, + clearAllNodeExecutionResults: () => { + set((state) => ({ + nodes: state.nodes.map((n) => ({ + ...n, + data: { + ...n.data, + nodeExecutionResults: [], + }, + })), + latestNodeInputData: {}, + latestNodeOutputData: {}, + accumulatedNodeInputData: {}, + accumulatedNodeOutputData: {}, + })); }, getNodeBlockUIType: (nodeId: string) => { return ( diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/types.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/types.ts new file mode 100644 index 0000000000..f0ec7e6c1c --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/types.ts @@ -0,0 +1,14 @@ +import { IncompatibilityInfo } from "../hooks/useSubAgentUpdate/types"; + +export type NodeResolutionData = { + incompatibilities: IncompatibilityInfo; + pendingUpdate: { + input_schema: Record; + output_schema: Record; + }; + currentSchema: { + input_schema: Record; + output_schema: Record; + }; + pendingHardcodedValues: Record; +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/NewChatContext.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/NewChatContext.tsx deleted file mode 100644 index 0826637043..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/NewChatContext.tsx +++ /dev/null @@ -1,41 +0,0 @@ -"use client"; - -import { createContext, useContext, useRef, type ReactNode } from "react"; - -interface NewChatContextValue { - onNewChatClick: () => void; - setOnNewChatClick: (handler?: () => void) => void; - performNewChat?: () => void; - setPerformNewChat: (handler?: () => void) => void; -} - -const NewChatContext = createContext(null); - -export function NewChatProvider({ children }: { children: ReactNode }) { - const onNewChatRef = useRef<(() => void) | undefined>(); - const performNewChatRef = useRef<(() => void) | undefined>(); - const contextValueRef = useRef({ - onNewChatClick() { - onNewChatRef.current?.(); - }, - setOnNewChatClick(handler?: () => void) { - onNewChatRef.current = handler; - }, - performNewChat() { - performNewChatRef.current?.(); - }, - setPerformNewChat(handler?: () => void) { - performNewChatRef.current = handler; - }, - }); - - return ( - - {children} - - ); -} - -export function useNewChat() { - return useContext(NewChatContext); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx index 44e32024a8..3f695da5ed 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx @@ -1,12 +1,10 @@ "use client"; import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; +import { Text } from "@/components/atoms/Text/Text"; import { NAVBAR_HEIGHT_PX } from "@/lib/constants"; import type { ReactNode } from "react"; -import { useEffect } from "react"; -import { useNewChat } from "../../NewChatContext"; import { DesktopSidebar } from "./components/DesktopSidebar/DesktopSidebar"; -import { LoadingState } from "./components/LoadingState/LoadingState"; import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer"; import { MobileHeader } from "./components/MobileHeader/MobileHeader"; import { useCopilotShell } from "./useCopilotShell"; @@ -20,36 +18,21 @@ export function CopilotShell({ children }: Props) { isMobile, isDrawerOpen, isLoading, + isCreatingSession, isLoggedIn, hasActiveSession, sessions, currentSessionId, - handleSelectSession, handleOpenDrawer, handleCloseDrawer, handleDrawerOpenChange, - handleNewChat, + handleNewChatClick, + handleSessionClick, hasNextPage, isFetchingNextPage, fetchNextPage, - isReadyToShowContent, } = useCopilotShell(); - const newChatContext = useNewChat(); - const handleNewChatClickWrapper = - newChatContext?.onNewChatClick || handleNewChat; - - useEffect( - function registerNewChatHandler() { - if (!newChatContext) return; - newChatContext.setPerformNewChat(handleNewChat); - return function cleanup() { - newChatContext.setPerformNewChat(undefined); - }; - }, - [newChatContext, handleNewChat], - ); - if (!isLoggedIn) { return (
@@ -70,9 +53,9 @@ export function CopilotShell({ children }: Props) { isLoading={isLoading} hasNextPage={hasNextPage} isFetchingNextPage={isFetchingNextPage} - onSelectSession={handleSelectSession} + onSelectSession={handleSessionClick} onFetchNextPage={fetchNextPage} - onNewChat={handleNewChatClickWrapper} + onNewChat={handleNewChatClick} hasActiveSession={Boolean(hasActiveSession)} /> )} @@ -80,7 +63,18 @@ export function CopilotShell({ children }: Props) {
{isMobile && }
- {isReadyToShowContent ? children : } + {isCreatingSession ? ( +
+
+ + + Creating your chat... + +
+
+ ) : ( + children + )}
@@ -92,9 +86,9 @@ export function CopilotShell({ children }: Props) { isLoading={isLoading} hasNextPage={hasNextPage} isFetchingNextPage={isFetchingNextPage} - onSelectSession={handleSelectSession} + onSelectSession={handleSessionClick} onFetchNextPage={fetchNextPage} - onNewChat={handleNewChatClickWrapper} + onNewChat={handleNewChatClick} onClose={handleCloseDrawer} onOpenChange={handleDrawerOpenChange} hasActiveSession={Boolean(hasActiveSession)} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/LoadingState/LoadingState.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/LoadingState/LoadingState.tsx deleted file mode 100644 index 21b1663916..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/LoadingState/LoadingState.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import { Text } from "@/components/atoms/Text/Text"; -import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; - -export function LoadingState() { - return ( -
-
- - - Loading your chats... - -
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts index c9504e49a9..2ef63a4422 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts @@ -3,17 +3,17 @@ import { useState } from "react"; export function useMobileDrawer() { const [isDrawerOpen, setIsDrawerOpen] = useState(false); - function handleOpenDrawer() { + const handleOpenDrawer = () => { setIsDrawerOpen(true); - } + }; - function handleCloseDrawer() { + const handleCloseDrawer = () => { setIsDrawerOpen(false); - } + }; - function handleDrawerOpenChange(open: boolean) { + const handleDrawerOpenChange = (open: boolean) => { setIsDrawerOpen(open); - } + }; return { isDrawerOpen, diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts index 8833a419c1..11ddd937af 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts @@ -1,7 +1,7 @@ import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat"; import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; import { okData } from "@/app/api/helpers"; -import { useEffect, useMemo, useState } from "react"; +import { useEffect, useState } from "react"; const PAGE_SIZE = 50; @@ -11,9 +11,11 @@ export interface UseSessionsPaginationArgs { export function useSessionsPagination({ enabled }: UseSessionsPaginationArgs) { const [offset, setOffset] = useState(0); + const [accumulatedSessions, setAccumulatedSessions] = useState< SessionSummaryResponse[] >([]); + const [totalCount, setTotalCount] = useState(null); const { data, isLoading, isFetching, isError } = useGetV2ListSessions( @@ -43,17 +45,14 @@ export function useSessionsPagination({ enabled }: UseSessionsPaginationArgs) { } }, [data, offset, enabled]); - const hasNextPage = useMemo(() => { - if (totalCount === null) return false; - return accumulatedSessions.length < totalCount; - }, [accumulatedSessions.length, totalCount]); + const hasNextPage = + totalCount !== null && accumulatedSessions.length < totalCount; - const areAllSessionsLoaded = useMemo(() => { - if (totalCount === null) return false; - return ( - accumulatedSessions.length >= totalCount && !isFetching && !isLoading - ); - }, [accumulatedSessions.length, totalCount, isFetching, isLoading]); + const areAllSessionsLoaded = + totalCount !== null && + accumulatedSessions.length >= totalCount && + !isFetching && + !isLoading; useEffect(() => { if ( @@ -67,17 +66,17 @@ export function useSessionsPagination({ enabled }: UseSessionsPaginationArgs) { } }, [hasNextPage, isFetching, isLoading, isError, totalCount]); - function fetchNextPage() { + const fetchNextPage = () => { if (hasNextPage && !isFetching) { setOffset((prev) => prev + PAGE_SIZE); } - } + }; - function reset() { + const reset = () => { setOffset(0); setAccumulatedSessions([]); setTotalCount(null); - } + }; return { sessions: accumulatedSessions, diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts index bf4eb70ccb..ef0d414edf 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts @@ -2,9 +2,7 @@ import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessi import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; import { format, formatDistanceToNow, isToday } from "date-fns"; -export function convertSessionDetailToSummary( - session: SessionDetailResponse, -): SessionSummaryResponse { +export function convertSessionDetailToSummary(session: SessionDetailResponse) { return { id: session.id, created_at: session.created_at, @@ -13,17 +11,25 @@ export function convertSessionDetailToSummary( }; } -export function filterVisibleSessions( - sessions: SessionSummaryResponse[], -): SessionSummaryResponse[] { - return sessions.filter( - (session) => session.updated_at !== session.created_at, - ); +export function filterVisibleSessions(sessions: SessionSummaryResponse[]) { + const fiveMinutesAgo = Date.now() - 5 * 60 * 1000; + return sessions.filter((session) => { + const hasBeenUpdated = session.updated_at !== session.created_at; + + if (hasBeenUpdated) return true; + + const isRecentlyCreated = + new Date(session.created_at).getTime() > fiveMinutesAgo; + + return isRecentlyCreated; + }); } -export function getSessionTitle(session: SessionSummaryResponse): string { +export function getSessionTitle(session: SessionSummaryResponse) { if (session.title) return session.title; + const isNewSession = session.updated_at === session.created_at; + if (isNewSession) { const createdDate = new Date(session.created_at); if (isToday(createdDate)) { @@ -31,12 +37,11 @@ export function getSessionTitle(session: SessionSummaryResponse): string { } return format(createdDate, "MMM d, yyyy"); } + return "Untitled Chat"; } -export function getSessionUpdatedLabel( - session: SessionSummaryResponse, -): string { +export function getSessionUpdatedLabel(session: SessionSummaryResponse) { if (!session.updated_at) return ""; return formatDistanceToNow(new Date(session.updated_at), { addSuffix: true }); } @@ -45,8 +50,10 @@ export function mergeCurrentSessionIntoList( accumulatedSessions: SessionSummaryResponse[], currentSessionId: string | null, currentSessionData: SessionDetailResponse | null | undefined, -): SessionSummaryResponse[] { + recentlyCreatedSessions?: Map, +) { const filteredSessions: SessionSummaryResponse[] = []; + const addedIds = new Set(); if (accumulatedSessions.length > 0) { const visibleSessions = filterVisibleSessions(accumulatedSessions); @@ -61,105 +68,39 @@ export function mergeCurrentSessionIntoList( ); if (!isInVisible) { filteredSessions.push(currentInAll); + addedIds.add(currentInAll.id); } } } - filteredSessions.push(...visibleSessions); + for (const session of visibleSessions) { + if (!addedIds.has(session.id)) { + filteredSessions.push(session); + addedIds.add(session.id); + } + } } if (currentSessionId && currentSessionData) { - const isCurrentInList = filteredSessions.some( - (s) => s.id === currentSessionId, - ); - if (!isCurrentInList) { + if (!addedIds.has(currentSessionId)) { const summarySession = convertSessionDetailToSummary(currentSessionData); filteredSessions.unshift(summarySession); + addedIds.add(currentSessionId); + } + } + + if (recentlyCreatedSessions) { + for (const [sessionId, sessionData] of recentlyCreatedSessions) { + if (!addedIds.has(sessionId)) { + filteredSessions.unshift(sessionData); + addedIds.add(sessionId); + } } } return filteredSessions; } -export function getCurrentSessionId( - searchParams: URLSearchParams, -): string | null { +export function getCurrentSessionId(searchParams: URLSearchParams) { return searchParams.get("sessionId"); } - -export function shouldAutoSelectSession( - areAllSessionsLoaded: boolean, - hasAutoSelectedSession: boolean, - paramSessionId: string | null, - visibleSessions: SessionSummaryResponse[], - accumulatedSessions: SessionSummaryResponse[], - isLoading: boolean, - totalCount: number | null, -): { - shouldSelect: boolean; - sessionIdToSelect: string | null; - shouldCreate: boolean; -} { - if (!areAllSessionsLoaded || hasAutoSelectedSession) { - return { - shouldSelect: false, - sessionIdToSelect: null, - shouldCreate: false, - }; - } - - if (paramSessionId) { - return { - shouldSelect: false, - sessionIdToSelect: null, - shouldCreate: false, - }; - } - - if (visibleSessions.length > 0) { - return { - shouldSelect: true, - sessionIdToSelect: visibleSessions[0].id, - shouldCreate: false, - }; - } - - if (accumulatedSessions.length === 0 && !isLoading && totalCount === 0) { - return { shouldSelect: false, sessionIdToSelect: null, shouldCreate: true }; - } - - if (totalCount === 0) { - return { - shouldSelect: false, - sessionIdToSelect: null, - shouldCreate: false, - }; - } - - return { shouldSelect: false, sessionIdToSelect: null, shouldCreate: false }; -} - -export function checkReadyToShowContent( - areAllSessionsLoaded: boolean, - paramSessionId: string | null, - accumulatedSessions: SessionSummaryResponse[], - isCurrentSessionLoading: boolean, - currentSessionData: SessionDetailResponse | null | undefined, - hasAutoSelectedSession: boolean, -): boolean { - if (!areAllSessionsLoaded) return false; - - if (paramSessionId) { - const sessionFound = accumulatedSessions.some( - (s) => s.id === paramSessionId, - ); - return ( - sessionFound || - (!isCurrentSessionLoading && - currentSessionData !== undefined && - currentSessionData !== null) - ); - } - - return hasAutoSelectedSession; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts index cadd98da3e..74fd663ab2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts @@ -1,26 +1,24 @@ "use client"; import { + getGetV2GetSessionQueryKey, getGetV2ListSessionsQueryKey, useGetV2GetSession, } from "@/app/api/__generated__/endpoints/chat/chat"; import { okData } from "@/app/api/helpers"; +import { useChatStore } from "@/components/contextual/Chat/chat-store"; import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useQueryClient } from "@tanstack/react-query"; -import { usePathname, useRouter, useSearchParams } from "next/navigation"; -import { useEffect, useRef, useState } from "react"; +import { usePathname, useSearchParams } from "next/navigation"; +import { useRef } from "react"; +import { useCopilotStore } from "../../copilot-page-store"; +import { useCopilotSessionId } from "../../useCopilotSessionId"; import { useMobileDrawer } from "./components/MobileDrawer/useMobileDrawer"; -import { useSessionsPagination } from "./components/SessionsList/useSessionsPagination"; -import { - checkReadyToShowContent, - filterVisibleSessions, - getCurrentSessionId, - mergeCurrentSessionIntoList, -} from "./helpers"; +import { getCurrentSessionId } from "./helpers"; +import { useShellSessionList } from "./useShellSessionList"; export function useCopilotShell() { - const router = useRouter(); const pathname = usePathname(); const searchParams = useSearchParams(); const queryClient = useQueryClient(); @@ -29,6 +27,8 @@ export function useCopilotShell() { const isMobile = breakpoint === "base" || breakpoint === "sm" || breakpoint === "md"; + const { urlSessionId, setUrlSessionId } = useCopilotSessionId(); + const isOnHomepage = pathname === "/copilot"; const paramSessionId = searchParams.get("sessionId"); @@ -41,114 +41,113 @@ export function useCopilotShell() { const paginationEnabled = !isMobile || isDrawerOpen || !!paramSessionId; - const { - sessions: accumulatedSessions, - isLoading: isSessionsLoading, - isFetching: isSessionsFetching, - hasNextPage, - areAllSessionsLoaded, - fetchNextPage, - reset: resetPagination, - } = useSessionsPagination({ - enabled: paginationEnabled, - }); - const currentSessionId = getCurrentSessionId(searchParams); - const { data: currentSessionData, isLoading: isCurrentSessionLoading } = - useGetV2GetSession(currentSessionId || "", { + const { data: currentSessionData } = useGetV2GetSession( + currentSessionId || "", + { query: { enabled: !!currentSessionId, select: okData, }, - }); - - const [hasAutoSelectedSession, setHasAutoSelectedSession] = useState(false); - const hasAutoSelectedRef = useRef(false); - - // Mark as auto-selected when sessionId is in URL - useEffect(() => { - if (paramSessionId && !hasAutoSelectedRef.current) { - hasAutoSelectedRef.current = true; - setHasAutoSelectedSession(true); - } - }, [paramSessionId]); - - // On homepage without sessionId, mark as ready immediately - useEffect(() => { - if (isOnHomepage && !paramSessionId && !hasAutoSelectedRef.current) { - hasAutoSelectedRef.current = true; - setHasAutoSelectedSession(true); - } - }, [isOnHomepage, paramSessionId]); - - // Invalidate sessions list when navigating to homepage (to show newly created sessions) - useEffect(() => { - if (isOnHomepage && !paramSessionId) { - queryClient.invalidateQueries({ - queryKey: getGetV2ListSessionsQueryKey(), - }); - } - }, [isOnHomepage, paramSessionId, queryClient]); - - // Reset pagination when query becomes disabled - const prevPaginationEnabledRef = useRef(paginationEnabled); - useEffect(() => { - if (prevPaginationEnabledRef.current && !paginationEnabled) { - resetPagination(); - resetAutoSelect(); - } - prevPaginationEnabledRef.current = paginationEnabled; - }, [paginationEnabled, resetPagination]); - - const sessions = mergeCurrentSessionIntoList( - accumulatedSessions, - currentSessionId, - currentSessionData, + }, ); - const visibleSessions = filterVisibleSessions(sessions); + const { + sessions, + isLoading, + isSessionsFetching, + hasNextPage, + fetchNextPage, + resetPagination, + recentlyCreatedSessionsRef, + } = useShellSessionList({ + paginationEnabled, + currentSessionId, + currentSessionData, + isOnHomepage, + paramSessionId, + }); - const sidebarSelectedSessionId = - isOnHomepage && !paramSessionId ? null : currentSessionId; + const stopStream = useChatStore((s) => s.stopStream); + const onStreamComplete = useChatStore((s) => s.onStreamComplete); + const isStreaming = useCopilotStore((s) => s.isStreaming); + const isCreatingSession = useCopilotStore((s) => s.isCreatingSession); + const setIsSwitchingSession = useCopilotStore((s) => s.setIsSwitchingSession); + const openInterruptModal = useCopilotStore((s) => s.openInterruptModal); - const isReadyToShowContent = isOnHomepage - ? true - : checkReadyToShowContent( - areAllSessionsLoaded, - paramSessionId, - accumulatedSessions, - isCurrentSessionLoading, - currentSessionData, - hasAutoSelectedSession, - ); + const pendingActionRef = useRef<(() => void) | null>(null); - function handleSelectSession(sessionId: string) { - // Navigate using replaceState to avoid full page reload - window.history.replaceState(null, "", `/copilot?sessionId=${sessionId}`); - // Force a re-render by updating the URL through router - router.replace(`/copilot?sessionId=${sessionId}`); + async function stopCurrentStream() { + if (!currentSessionId) return; + + setIsSwitchingSession(true); + await new Promise((resolve) => { + const unsubscribe = onStreamComplete((completedId) => { + if (completedId === currentSessionId) { + clearTimeout(timeout); + unsubscribe(); + resolve(); + } + }); + const timeout = setTimeout(() => { + unsubscribe(); + resolve(); + }, 3000); + stopStream(currentSessionId); + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2GetSessionQueryKey(currentSessionId), + }); + setIsSwitchingSession(false); + } + + function selectSession(sessionId: string) { + if (sessionId === currentSessionId) return; + if (recentlyCreatedSessionsRef.current.has(sessionId)) { + queryClient.invalidateQueries({ + queryKey: getGetV2GetSessionQueryKey(sessionId), + }); + } + setUrlSessionId(sessionId, { shallow: false }); if (isMobile) handleCloseDrawer(); } - function handleNewChat() { - resetAutoSelect(); + function startNewChat() { resetPagination(); - // Invalidate and refetch sessions list to ensure newly created sessions appear queryClient.invalidateQueries({ queryKey: getGetV2ListSessionsQueryKey(), }); - window.history.replaceState(null, "", "/copilot"); - router.replace("/copilot"); + setUrlSessionId(null, { shallow: false }); if (isMobile) handleCloseDrawer(); } - function resetAutoSelect() { - hasAutoSelectedRef.current = false; - setHasAutoSelectedSession(false); + function handleSessionClick(sessionId: string) { + if (sessionId === currentSessionId) return; + + if (isStreaming) { + pendingActionRef.current = async () => { + await stopCurrentStream(); + selectSession(sessionId); + }; + openInterruptModal(pendingActionRef.current); + } else { + selectSession(sessionId); + } } - const isLoading = isSessionsLoading && accumulatedSessions.length === 0; + function handleNewChatClick() { + if (isStreaming) { + pendingActionRef.current = async () => { + await stopCurrentStream(); + startNewChat(); + }; + openInterruptModal(pendingActionRef.current); + } else { + startNewChat(); + } + } return { isMobile, @@ -156,17 +155,17 @@ export function useCopilotShell() { isLoggedIn, hasActiveSession: Boolean(currentSessionId) && (!isOnHomepage || Boolean(paramSessionId)), - isLoading, - sessions: visibleSessions, - currentSessionId: sidebarSelectedSessionId, - handleSelectSession, + isLoading: isLoading || isCreatingSession, + isCreatingSession, + sessions, + currentSessionId: urlSessionId, handleOpenDrawer, handleCloseDrawer, handleDrawerOpenChange, - handleNewChat, + handleNewChatClick, + handleSessionClick, hasNextPage, isFetchingNextPage: isSessionsFetching, fetchNextPage, - isReadyToShowContent, }; } diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useShellSessionList.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useShellSessionList.ts new file mode 100644 index 0000000000..fb39a11096 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useShellSessionList.ts @@ -0,0 +1,113 @@ +import { getGetV2ListSessionsQueryKey } from "@/app/api/__generated__/endpoints/chat/chat"; +import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { useChatStore } from "@/components/contextual/Chat/chat-store"; +import { useQueryClient } from "@tanstack/react-query"; +import { useEffect, useMemo, useRef } from "react"; +import { useSessionsPagination } from "./components/SessionsList/useSessionsPagination"; +import { + convertSessionDetailToSummary, + filterVisibleSessions, + mergeCurrentSessionIntoList, +} from "./helpers"; + +interface UseShellSessionListArgs { + paginationEnabled: boolean; + currentSessionId: string | null; + currentSessionData: SessionDetailResponse | null | undefined; + isOnHomepage: boolean; + paramSessionId: string | null; +} + +export function useShellSessionList({ + paginationEnabled, + currentSessionId, + currentSessionData, + isOnHomepage, + paramSessionId, +}: UseShellSessionListArgs) { + const queryClient = useQueryClient(); + const onStreamComplete = useChatStore((s) => s.onStreamComplete); + + const { + sessions: accumulatedSessions, + isLoading: isSessionsLoading, + isFetching: isSessionsFetching, + hasNextPage, + fetchNextPage, + reset: resetPagination, + } = useSessionsPagination({ + enabled: paginationEnabled, + }); + + const recentlyCreatedSessionsRef = useRef< + Map + >(new Map()); + + useEffect(() => { + if (isOnHomepage && !paramSessionId) { + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + } + }, [isOnHomepage, paramSessionId, queryClient]); + + useEffect(() => { + if (currentSessionId && currentSessionData) { + const isNewSession = + currentSessionData.updated_at === currentSessionData.created_at; + const isNotInAccumulated = !accumulatedSessions.some( + (s) => s.id === currentSessionId, + ); + if (isNewSession || isNotInAccumulated) { + const summary = convertSessionDetailToSummary(currentSessionData); + recentlyCreatedSessionsRef.current.set(currentSessionId, summary); + } + } + }, [currentSessionId, currentSessionData, accumulatedSessions]); + + useEffect(() => { + for (const sessionId of recentlyCreatedSessionsRef.current.keys()) { + if (accumulatedSessions.some((s) => s.id === sessionId)) { + recentlyCreatedSessionsRef.current.delete(sessionId); + } + } + }, [accumulatedSessions]); + + useEffect(() => { + const unsubscribe = onStreamComplete(() => { + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + }); + return unsubscribe; + }, [onStreamComplete, queryClient]); + + const sessions = useMemo( + () => + mergeCurrentSessionIntoList( + accumulatedSessions, + currentSessionId, + currentSessionData, + recentlyCreatedSessionsRef.current, + ), + [accumulatedSessions, currentSessionId, currentSessionData], + ); + + const visibleSessions = useMemo( + () => filterVisibleSessions(sessions), + [sessions], + ); + + const isLoading = isSessionsLoading && accumulatedSessions.length === 0; + + return { + sessions: visibleSessions, + isLoading, + isSessionsFetching, + hasNextPage, + fetchNextPage, + resetPagination, + recentlyCreatedSessionsRef, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts new file mode 100644 index 0000000000..9fc97a14e3 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts @@ -0,0 +1,56 @@ +"use client"; + +import { create } from "zustand"; + +interface CopilotStoreState { + isStreaming: boolean; + isSwitchingSession: boolean; + isCreatingSession: boolean; + isInterruptModalOpen: boolean; + pendingAction: (() => void) | null; +} + +interface CopilotStoreActions { + setIsStreaming: (isStreaming: boolean) => void; + setIsSwitchingSession: (isSwitchingSession: boolean) => void; + setIsCreatingSession: (isCreating: boolean) => void; + openInterruptModal: (onConfirm: () => void) => void; + confirmInterrupt: () => void; + cancelInterrupt: () => void; +} + +type CopilotStore = CopilotStoreState & CopilotStoreActions; + +export const useCopilotStore = create((set, get) => ({ + isStreaming: false, + isSwitchingSession: false, + isCreatingSession: false, + isInterruptModalOpen: false, + pendingAction: null, + + setIsStreaming(isStreaming) { + set({ isStreaming }); + }, + + setIsSwitchingSession(isSwitchingSession) { + set({ isSwitchingSession }); + }, + + setIsCreatingSession(isCreatingSession) { + set({ isCreatingSession }); + }, + + openInterruptModal(onConfirm) { + set({ isInterruptModalOpen: true, pendingAction: onConfirm }); + }, + + confirmInterrupt() { + const { pendingAction } = get(); + set({ isInterruptModalOpen: false, pendingAction: null }); + if (pendingAction) pendingAction(); + }, + + cancelInterrupt() { + set({ isInterruptModalOpen: false, pendingAction: null }); + }, +})); diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts index a5818f0a9f..692a5741f4 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts @@ -1,28 +1,5 @@ import type { User } from "@supabase/supabase-js"; -export type PageState = - | { type: "welcome" } - | { type: "newChat" } - | { type: "creating"; prompt: string } - | { type: "chat"; sessionId: string; initialPrompt?: string }; - -export function getInitialPromptFromState( - pageState: PageState, - storedInitialPrompt: string | undefined, -) { - if (storedInitialPrompt) return storedInitialPrompt; - if (pageState.type === "creating") return pageState.prompt; - if (pageState.type === "chat") return pageState.initialPrompt; -} - -export function shouldResetToWelcome(pageState: PageState) { - return ( - pageState.type !== "newChat" && - pageState.type !== "creating" && - pageState.type !== "welcome" - ); -} - export function getGreetingName(user?: User | null): string { if (!user) return "there"; const metadata = user.user_metadata as Record | undefined; diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx index 0f40de8f25..89cf72e2ba 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx @@ -1,11 +1,6 @@ import type { ReactNode } from "react"; -import { NewChatProvider } from "./NewChatContext"; import { CopilotShell } from "./components/CopilotShell/CopilotShell"; export default function CopilotLayout({ children }: { children: ReactNode }) { - return ( - - {children} - - ); + return {children}; } diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx index 3bbafd087b..104b238895 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx @@ -1,22 +1,25 @@ "use client"; -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; import { Button } from "@/components/atoms/Button/Button"; +import { Skeleton } from "@/components/atoms/Skeleton/Skeleton"; import { Text } from "@/components/atoms/Text/Text"; import { Chat } from "@/components/contextual/Chat/Chat"; import { ChatInput } from "@/components/contextual/Chat/components/ChatInput/ChatInput"; -import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useCopilotStore } from "./copilot-page-store"; import { useCopilotPage } from "./useCopilotPage"; export default function CopilotPage() { const { state, handlers } = useCopilotPage(); + const isInterruptModalOpen = useCopilotStore((s) => s.isInterruptModalOpen); + const confirmInterrupt = useCopilotStore((s) => s.confirmInterrupt); + const cancelInterrupt = useCopilotStore((s) => s.cancelInterrupt); const { greetingName, quickActions, isLoading, - pageState, - isNewChatModalOpen, + hasSession, + initialPrompt, isReady, } = state; const { @@ -24,24 +27,16 @@ export default function CopilotPage() { startChatWithPrompt, handleSessionNotFound, handleStreamingChange, - handleCancelNewChat, - proceedWithNewChat, - handleNewChatModalOpen, } = handlers; - if (!isReady) { - return null; - } + if (!isReady) return null; - // Show Chat when we have an active session - if (pageState.type === "chat") { + if (hasSession) { return (
@@ -49,31 +44,33 @@ export default function CopilotPage() { title="Interrupt current chat?" styling={{ maxWidth: 300, width: "100%" }} controlled={{ - isOpen: isNewChatModalOpen, - set: handleNewChatModalOpen, + isOpen: isInterruptModalOpen, + set: (open) => { + if (!open) cancelInterrupt(); + }, }} - onClose={handleCancelNewChat} + onClose={cancelInterrupt} >
The current chat response will be interrupted. Are you sure you - want to start a new chat? + want to continue?
@@ -83,34 +80,6 @@ export default function CopilotPage() { ); } - if (pageState.type === "newChat") { - return ( -
-
- - - Loading your chats... - -
-
- ); - } - - // Show loading state while creating session and sending first message - if (pageState.type === "creating") { - return ( -
-
- - - Loading your chats... - -
-
- ); - } - - // Show Welcome screen return (
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts index cb13137432..e4713cd24a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts @@ -1,86 +1,44 @@ -import { postV2CreateSession } from "@/app/api/__generated__/endpoints/chat/chat"; +import { + getGetV2ListSessionsQueryKey, + postV2CreateSession, +} from "@/app/api/__generated__/endpoints/chat/chat"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { useOnboarding } from "@/providers/onboarding/onboarding-provider"; import { Flag, type FlagValues, useGetFlag, } from "@/services/feature-flags/use-get-flag"; +import { SessionKey, sessionStorage } from "@/services/storage/session-storage"; import * as Sentry from "@sentry/nextjs"; +import { useQueryClient } from "@tanstack/react-query"; import { useFlags } from "launchdarkly-react-client-sdk"; import { useRouter } from "next/navigation"; -import { useEffect, useReducer } from "react"; -import { useNewChat } from "./NewChatContext"; -import { getGreetingName, getQuickActions, type PageState } from "./helpers"; -import { useCopilotURLState } from "./useCopilotURLState"; - -type CopilotState = { - pageState: PageState; - isStreaming: boolean; - isNewChatModalOpen: boolean; - initialPrompts: Record; - previousSessionId: string | null; -}; - -type CopilotAction = - | { type: "setPageState"; pageState: PageState } - | { type: "setStreaming"; isStreaming: boolean } - | { type: "setNewChatModalOpen"; isOpen: boolean } - | { type: "setInitialPrompt"; sessionId: string; prompt: string } - | { type: "setPreviousSessionId"; sessionId: string | null }; - -function isSamePageState(next: PageState, current: PageState) { - if (next.type !== current.type) return false; - if (next.type === "creating" && current.type === "creating") { - return next.prompt === current.prompt; - } - if (next.type === "chat" && current.type === "chat") { - return ( - next.sessionId === current.sessionId && - next.initialPrompt === current.initialPrompt - ); - } - return true; -} - -function copilotReducer( - state: CopilotState, - action: CopilotAction, -): CopilotState { - if (action.type === "setPageState") { - if (isSamePageState(action.pageState, state.pageState)) return state; - return { ...state, pageState: action.pageState }; - } - if (action.type === "setStreaming") { - if (action.isStreaming === state.isStreaming) return state; - return { ...state, isStreaming: action.isStreaming }; - } - if (action.type === "setNewChatModalOpen") { - if (action.isOpen === state.isNewChatModalOpen) return state; - return { ...state, isNewChatModalOpen: action.isOpen }; - } - if (action.type === "setInitialPrompt") { - if (state.initialPrompts[action.sessionId] === action.prompt) return state; - return { - ...state, - initialPrompts: { - ...state.initialPrompts, - [action.sessionId]: action.prompt, - }, - }; - } - if (action.type === "setPreviousSessionId") { - if (state.previousSessionId === action.sessionId) return state; - return { ...state, previousSessionId: action.sessionId }; - } - return state; -} +import { useEffect } from "react"; +import { useCopilotStore } from "./copilot-page-store"; +import { getGreetingName, getQuickActions } from "./helpers"; +import { useCopilotSessionId } from "./useCopilotSessionId"; export function useCopilotPage() { const router = useRouter(); + const queryClient = useQueryClient(); const { user, isLoggedIn, isUserLoading } = useSupabase(); const { toast } = useToast(); + const { completeStep } = useOnboarding(); + + const { urlSessionId, setUrlSessionId } = useCopilotSessionId(); + const setIsStreaming = useCopilotStore((s) => s.setIsStreaming); + const isCreating = useCopilotStore((s) => s.isCreatingSession); + const setIsCreating = useCopilotStore((s) => s.setIsCreatingSession); + + // Complete VISIT_COPILOT onboarding step to grant $5 welcome bonus + useEffect(() => { + if (isLoggedIn) { + completeStep("VISIT_COPILOT"); + } + }, [completeStep, isLoggedIn]); const isChatEnabled = useGetFlag(Flag.CHAT); const flags = useFlags(); @@ -91,86 +49,27 @@ export function useCopilotPage() { const isFlagReady = !isLaunchDarklyConfigured || flags[Flag.CHAT] !== undefined; - const [state, dispatch] = useReducer(copilotReducer, { - pageState: { type: "welcome" }, - isStreaming: false, - isNewChatModalOpen: false, - initialPrompts: {}, - previousSessionId: null, - }); - - const newChatContext = useNewChat(); const greetingName = getGreetingName(user); const quickActions = getQuickActions(); - function setPageState(pageState: PageState) { - dispatch({ type: "setPageState", pageState }); - } + const hasSession = Boolean(urlSessionId); + const initialPrompt = urlSessionId + ? getInitialPrompt(urlSessionId) + : undefined; - function setInitialPrompt(sessionId: string, prompt: string) { - dispatch({ type: "setInitialPrompt", sessionId, prompt }); - } - - function setPreviousSessionId(sessionId: string | null) { - dispatch({ type: "setPreviousSessionId", sessionId }); - } - - const { setUrlSessionId } = useCopilotURLState({ - pageState: state.pageState, - initialPrompts: state.initialPrompts, - previousSessionId: state.previousSessionId, - setPageState, - setInitialPrompt, - setPreviousSessionId, - }); - - useEffect( - function registerNewChatHandler() { - if (!newChatContext) return; - newChatContext.setOnNewChatClick(handleNewChatClick); - return function cleanup() { - newChatContext.setOnNewChatClick(undefined); - }; - }, - [newChatContext, handleNewChatClick], - ); - - useEffect( - function transitionNewChatToWelcome() { - if (state.pageState.type === "newChat") { - function setWelcomeState() { - dispatch({ type: "setPageState", pageState: { type: "welcome" } }); - } - - const timer = setTimeout(setWelcomeState, 300); - - return function cleanup() { - clearTimeout(timer); - }; - } - }, - [state.pageState.type], - ); - - useEffect( - function ensureAccess() { - if (!isFlagReady) return; - if (isChatEnabled === false) { - router.replace(homepageRoute); - } - }, - [homepageRoute, isChatEnabled, isFlagReady, router], - ); + useEffect(() => { + if (!isFlagReady) return; + if (isChatEnabled === false) { + router.replace(homepageRoute); + } + }, [homepageRoute, isChatEnabled, isFlagReady, router]); async function startChatWithPrompt(prompt: string) { if (!prompt?.trim()) return; - if (state.pageState.type === "creating") return; + if (isCreating) return; const trimmedPrompt = prompt.trim(); - dispatch({ - type: "setPageState", - pageState: { type: "creating", prompt: trimmedPrompt }, - }); + setIsCreating(true); try { const sessionResponse = await postV2CreateSession({ @@ -182,23 +81,19 @@ export function useCopilotPage() { } const sessionId = sessionResponse.data.id; + setInitialPrompt(sessionId, trimmedPrompt); - dispatch({ - type: "setInitialPrompt", - sessionId, - prompt: trimmedPrompt, + await queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), }); - await setUrlSessionId(sessionId, { shallow: false }); - dispatch({ - type: "setPageState", - pageState: { type: "chat", sessionId, initialPrompt: trimmedPrompt }, - }); + await setUrlSessionId(sessionId, { shallow: true }); } catch (error) { console.error("[CopilotPage] Failed to start chat:", error); toast({ title: "Failed to start chat", variant: "destructive" }); Sentry.captureException(error); - dispatch({ type: "setPageState", pageState: { type: "welcome" } }); + } finally { + setIsCreating(false); } } @@ -211,37 +106,7 @@ export function useCopilotPage() { } function handleStreamingChange(isStreamingValue: boolean) { - dispatch({ type: "setStreaming", isStreaming: isStreamingValue }); - } - - async function proceedWithNewChat() { - dispatch({ type: "setNewChatModalOpen", isOpen: false }); - if (newChatContext?.performNewChat) { - newChatContext.performNewChat(); - return; - } - try { - await setUrlSessionId(null, { shallow: false }); - } catch (error) { - console.error("[CopilotPage] Failed to clear session:", error); - } - router.replace("/copilot"); - } - - function handleCancelNewChat() { - dispatch({ type: "setNewChatModalOpen", isOpen: false }); - } - - function handleNewChatModalOpen(isOpen: boolean) { - dispatch({ type: "setNewChatModalOpen", isOpen }); - } - - function handleNewChatClick() { - if (state.isStreaming) { - dispatch({ type: "setNewChatModalOpen", isOpen: true }); - } else { - proceedWithNewChat(); - } + setIsStreaming(isStreamingValue); } return { @@ -249,8 +114,8 @@ export function useCopilotPage() { greetingName, quickActions, isLoading: isUserLoading, - pageState: state.pageState, - isNewChatModalOpen: state.isNewChatModalOpen, + hasSession, + initialPrompt, isReady: isFlagReady && isChatEnabled !== false && isLoggedIn, }, handlers: { @@ -258,9 +123,32 @@ export function useCopilotPage() { startChatWithPrompt, handleSessionNotFound, handleStreamingChange, - handleCancelNewChat, - proceedWithNewChat, - handleNewChatModalOpen, }, }; } + +function getInitialPrompt(sessionId: string): string | undefined { + try { + const prompts = JSON.parse( + sessionStorage.get(SessionKey.CHAT_INITIAL_PROMPTS) || "{}", + ); + return prompts[sessionId]; + } catch { + return undefined; + } +} + +function setInitialPrompt(sessionId: string, prompt: string): void { + try { + const prompts = JSON.parse( + sessionStorage.get(SessionKey.CHAT_INITIAL_PROMPTS) || "{}", + ); + prompts[sessionId] = prompt; + sessionStorage.set( + SessionKey.CHAT_INITIAL_PROMPTS, + JSON.stringify(prompts), + ); + } catch { + // Ignore storage errors + } +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotSessionId.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotSessionId.ts new file mode 100644 index 0000000000..87f9b7d3ae --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotSessionId.ts @@ -0,0 +1,10 @@ +import { parseAsString, useQueryState } from "nuqs"; + +export function useCopilotSessionId() { + const [urlSessionId, setUrlSessionId] = useQueryState( + "sessionId", + parseAsString, + ); + + return { urlSessionId, setUrlSessionId }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotURLState.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotURLState.ts deleted file mode 100644 index 5e37e29a15..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotURLState.ts +++ /dev/null @@ -1,80 +0,0 @@ -import { parseAsString, useQueryState } from "nuqs"; -import { useLayoutEffect } from "react"; -import { - getInitialPromptFromState, - type PageState, - shouldResetToWelcome, -} from "./helpers"; - -interface UseCopilotUrlStateArgs { - pageState: PageState; - initialPrompts: Record; - previousSessionId: string | null; - setPageState: (pageState: PageState) => void; - setInitialPrompt: (sessionId: string, prompt: string) => void; - setPreviousSessionId: (sessionId: string | null) => void; -} - -export function useCopilotURLState({ - pageState, - initialPrompts, - previousSessionId, - setPageState, - setInitialPrompt, - setPreviousSessionId, -}: UseCopilotUrlStateArgs) { - const [urlSessionId, setUrlSessionId] = useQueryState( - "sessionId", - parseAsString, - ); - - function syncSessionFromUrl() { - if (urlSessionId) { - if (pageState.type === "chat" && pageState.sessionId === urlSessionId) { - setPreviousSessionId(urlSessionId); - return; - } - - const storedInitialPrompt = initialPrompts[urlSessionId]; - const currentInitialPrompt = getInitialPromptFromState( - pageState, - storedInitialPrompt, - ); - - if (currentInitialPrompt) { - setInitialPrompt(urlSessionId, currentInitialPrompt); - } - - setPageState({ - type: "chat", - sessionId: urlSessionId, - initialPrompt: currentInitialPrompt, - }); - setPreviousSessionId(urlSessionId); - return; - } - - const wasInChat = previousSessionId !== null && pageState.type === "chat"; - setPreviousSessionId(null); - if (wasInChat) { - setPageState({ type: "newChat" }); - return; - } - - if (shouldResetToWelcome(pageState)) { - setPageState({ type: "welcome" }); - } - } - - useLayoutEffect(syncSessionFromUrl, [ - urlSessionId, - pageState.type, - previousSessionId, - initialPrompts, - ]); - - return { - urlSessionId, - setUrlSessionId, - }; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/layout.tsx b/autogpt_platform/frontend/src/app/(platform)/layout.tsx index f5e3f3b99b..048110f8b2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/layout.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/layout.tsx @@ -1,10 +1,12 @@ import { Navbar } from "@/components/layout/Navbar/Navbar"; +import { NetworkStatusMonitor } from "@/services/network-status/NetworkStatusMonitor"; import { ReactNode } from "react"; import { AdminImpersonationBanner } from "./admin/components/AdminImpersonationBanner"; export default function PlatformLayout({ children }: { children: ReactNode }) { return (
+
{children}
diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index b4e2bc80bd..d1ecd91702 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -4594,6 +4594,7 @@ "AGENT_NEW_RUN", "AGENT_INPUT", "CONGRATS", + "VISIT_COPILOT", "MARKETPLACE_VISIT", "BUILDER_OPEN" ], @@ -8754,6 +8755,7 @@ "AGENT_NEW_RUN", "AGENT_INPUT", "CONGRATS", + "VISIT_COPILOT", "GET_RESULTS", "MARKETPLACE_VISIT", "MARKETPLACE_ADD_AGENT", diff --git a/autogpt_platform/frontend/src/app/providers.tsx b/autogpt_platform/frontend/src/app/providers.tsx index 8ea199abc8..267814e7c2 100644 --- a/autogpt_platform/frontend/src/app/providers.tsx +++ b/autogpt_platform/frontend/src/app/providers.tsx @@ -6,28 +6,40 @@ import { BackendAPIProvider } from "@/lib/autogpt-server-api/context"; import { getQueryClient } from "@/lib/react-query/queryClient"; import CredentialsProvider from "@/providers/agent-credentials/credentials-provider"; import OnboardingProvider from "@/providers/onboarding/onboarding-provider"; +import { + PostHogPageViewTracker, + PostHogProvider, + PostHogUserTracker, +} from "@/providers/posthog/posthog-provider"; import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider"; import { QueryClientProvider } from "@tanstack/react-query"; import { ThemeProvider, ThemeProviderProps } from "next-themes"; import { NuqsAdapter } from "nuqs/adapters/next/app"; +import { Suspense } from "react"; export function Providers({ children, ...props }: ThemeProviderProps) { const queryClient = getQueryClient(); return ( - - - - - - - {children} - - - - - + + + + + + + + + + + + {children} + + + + + + ); diff --git a/autogpt_platform/frontend/src/components/atoms/Skeleton/Skeleton.tsx b/autogpt_platform/frontend/src/components/atoms/Skeleton/Skeleton.tsx new file mode 100644 index 0000000000..4789e281ce --- /dev/null +++ b/autogpt_platform/frontend/src/components/atoms/Skeleton/Skeleton.tsx @@ -0,0 +1,14 @@ +import { cn } from "@/lib/utils"; + +interface Props extends React.HTMLAttributes { + className?: string; +} + +export function Skeleton({ className, ...props }: Props) { + return ( +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx b/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx index 04d87a6e0e..69bb7c3440 100644 --- a/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx +++ b/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx @@ -1,4 +1,4 @@ -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { Skeleton } from "./Skeleton"; import type { Meta, StoryObj } from "@storybook/nextjs"; const meta: Meta = { diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx index ba7584765d..ada8c26231 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx @@ -1,16 +1,17 @@ "use client"; +import { useCopilotSessionId } from "@/app/(platform)/copilot/useCopilotSessionId"; +import { useCopilotStore } from "@/app/(platform)/copilot/copilot-page-store"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; import { useEffect, useRef } from "react"; import { ChatContainer } from "./components/ChatContainer/ChatContainer"; import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState"; -import { ChatLoader } from "./components/ChatLoader/ChatLoader"; import { useChat } from "./useChat"; export interface ChatProps { className?: string; - urlSessionId?: string | null; initialPrompt?: string; onSessionNotFound?: () => void; onStreamingChange?: (isStreaming: boolean) => void; @@ -18,12 +19,13 @@ export interface ChatProps { export function Chat({ className, - urlSessionId, initialPrompt, onSessionNotFound, onStreamingChange, }: ChatProps) { + const { urlSessionId } = useCopilotSessionId(); const hasHandledNotFoundRef = useRef(false); + const isSwitchingSession = useCopilotStore((s) => s.isSwitchingSession); const { messages, isLoading, @@ -33,49 +35,59 @@ export function Chat({ sessionId, createSession, showLoader, + startPollingForOperation, } = useChat({ urlSessionId }); - useEffect( - function handleMissingSession() { - if (!onSessionNotFound) return; - if (!urlSessionId) return; - if (!isSessionNotFound || isLoading || isCreating) return; - if (hasHandledNotFoundRef.current) return; - hasHandledNotFoundRef.current = true; - onSessionNotFound(); - }, - [onSessionNotFound, urlSessionId, isSessionNotFound, isLoading, isCreating], - ); + useEffect(() => { + if (!onSessionNotFound) return; + if (!urlSessionId) return; + if (!isSessionNotFound || isLoading || isCreating) return; + if (hasHandledNotFoundRef.current) return; + hasHandledNotFoundRef.current = true; + onSessionNotFound(); + }, [ + onSessionNotFound, + urlSessionId, + isSessionNotFound, + isLoading, + isCreating, + ]); + + const shouldShowLoader = + (showLoader && (isLoading || isCreating)) || isSwitchingSession; return (
{/* Main Content */}
{/* Loading State */} - {showLoader && (isLoading || isCreating) && ( + {shouldShowLoader && (
-
- +
+ - Loading your chats... + {isSwitchingSession + ? "Switching chat..." + : "Loading your chat..."}
)} {/* Error State */} - {error && !isLoading && ( + {error && !isLoading && !isSwitchingSession && ( )} {/* Session Content */} - {sessionId && !isLoading && !error && ( + {sessionId && !isLoading && !error && !isSwitchingSession && ( )}
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts b/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts new file mode 100644 index 0000000000..8229630e5d --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts @@ -0,0 +1,289 @@ +"use client"; + +import { create } from "zustand"; +import type { + ActiveStream, + StreamChunk, + StreamCompleteCallback, + StreamResult, + StreamStatus, +} from "./chat-types"; +import { executeStream } from "./stream-executor"; + +const COMPLETED_STREAM_TTL = 5 * 60 * 1000; // 5 minutes + +interface ChatStoreState { + activeStreams: Map; + completedStreams: Map; + activeSessions: Set; + streamCompleteCallbacks: Set; +} + +interface ChatStoreActions { + startStream: ( + sessionId: string, + message: string, + isUserMessage: boolean, + context?: { url: string; content: string }, + onChunk?: (chunk: StreamChunk) => void, + ) => Promise; + stopStream: (sessionId: string) => void; + subscribeToStream: ( + sessionId: string, + onChunk: (chunk: StreamChunk) => void, + skipReplay?: boolean, + ) => () => void; + getStreamStatus: (sessionId: string) => StreamStatus; + getCompletedStream: (sessionId: string) => StreamResult | undefined; + clearCompletedStream: (sessionId: string) => void; + isStreaming: (sessionId: string) => boolean; + registerActiveSession: (sessionId: string) => void; + unregisterActiveSession: (sessionId: string) => void; + isSessionActive: (sessionId: string) => boolean; + onStreamComplete: (callback: StreamCompleteCallback) => () => void; +} + +type ChatStore = ChatStoreState & ChatStoreActions; + +function notifyStreamComplete( + callbacks: Set, + sessionId: string, +) { + for (const callback of callbacks) { + try { + callback(sessionId); + } catch (err) { + console.warn("[ChatStore] Stream complete callback error:", err); + } + } +} + +function cleanupExpiredStreams( + completedStreams: Map, +): Map { + const now = Date.now(); + const cleaned = new Map(completedStreams); + for (const [sessionId, result] of cleaned) { + if (now - result.completedAt > COMPLETED_STREAM_TTL) { + cleaned.delete(sessionId); + } + } + return cleaned; +} + +export const useChatStore = create((set, get) => ({ + activeStreams: new Map(), + completedStreams: new Map(), + activeSessions: new Set(), + streamCompleteCallbacks: new Set(), + + startStream: async function startStream( + sessionId, + message, + isUserMessage, + context, + onChunk, + ) { + const state = get(); + const newActiveStreams = new Map(state.activeStreams); + let newCompletedStreams = new Map(state.completedStreams); + const callbacks = state.streamCompleteCallbacks; + + const existingStream = newActiveStreams.get(sessionId); + if (existingStream) { + existingStream.abortController.abort(); + const normalizedStatus = + existingStream.status === "streaming" + ? "completed" + : existingStream.status; + const result: StreamResult = { + sessionId, + status: normalizedStatus, + chunks: existingStream.chunks, + completedAt: Date.now(), + error: existingStream.error, + }; + newCompletedStreams.set(sessionId, result); + newActiveStreams.delete(sessionId); + newCompletedStreams = cleanupExpiredStreams(newCompletedStreams); + if (normalizedStatus === "completed" || normalizedStatus === "error") { + notifyStreamComplete(callbacks, sessionId); + } + } + + const abortController = new AbortController(); + const initialCallbacks = new Set<(chunk: StreamChunk) => void>(); + if (onChunk) initialCallbacks.add(onChunk); + + const stream: ActiveStream = { + sessionId, + abortController, + status: "streaming", + startedAt: Date.now(), + chunks: [], + onChunkCallbacks: initialCallbacks, + }; + + newActiveStreams.set(sessionId, stream); + set({ + activeStreams: newActiveStreams, + completedStreams: newCompletedStreams, + }); + + try { + await executeStream(stream, message, isUserMessage, context); + } finally { + if (onChunk) stream.onChunkCallbacks.delete(onChunk); + if (stream.status !== "streaming") { + const currentState = get(); + const finalActiveStreams = new Map(currentState.activeStreams); + let finalCompletedStreams = new Map(currentState.completedStreams); + + const storedStream = finalActiveStreams.get(sessionId); + if (storedStream === stream) { + const result: StreamResult = { + sessionId, + status: stream.status, + chunks: stream.chunks, + completedAt: Date.now(), + error: stream.error, + }; + finalCompletedStreams.set(sessionId, result); + finalActiveStreams.delete(sessionId); + finalCompletedStreams = cleanupExpiredStreams(finalCompletedStreams); + set({ + activeStreams: finalActiveStreams, + completedStreams: finalCompletedStreams, + }); + if (stream.status === "completed" || stream.status === "error") { + notifyStreamComplete( + currentState.streamCompleteCallbacks, + sessionId, + ); + } + } + } + } + }, + + stopStream: function stopStream(sessionId) { + const state = get(); + const stream = state.activeStreams.get(sessionId); + if (!stream) return; + + stream.abortController.abort(); + stream.status = "completed"; + + const newActiveStreams = new Map(state.activeStreams); + let newCompletedStreams = new Map(state.completedStreams); + + const result: StreamResult = { + sessionId, + status: stream.status, + chunks: stream.chunks, + completedAt: Date.now(), + error: stream.error, + }; + newCompletedStreams.set(sessionId, result); + newActiveStreams.delete(sessionId); + newCompletedStreams = cleanupExpiredStreams(newCompletedStreams); + + set({ + activeStreams: newActiveStreams, + completedStreams: newCompletedStreams, + }); + + notifyStreamComplete(state.streamCompleteCallbacks, sessionId); + }, + + subscribeToStream: function subscribeToStream( + sessionId, + onChunk, + skipReplay = false, + ) { + const state = get(); + const stream = state.activeStreams.get(sessionId); + + if (stream) { + if (!skipReplay) { + for (const chunk of stream.chunks) { + onChunk(chunk); + } + } + + stream.onChunkCallbacks.add(onChunk); + + return function unsubscribe() { + stream.onChunkCallbacks.delete(onChunk); + }; + } + + return function noop() {}; + }, + + getStreamStatus: function getStreamStatus(sessionId) { + const { activeStreams, completedStreams } = get(); + + const active = activeStreams.get(sessionId); + if (active) return active.status; + + const completed = completedStreams.get(sessionId); + if (completed) return completed.status; + + return "idle"; + }, + + getCompletedStream: function getCompletedStream(sessionId) { + return get().completedStreams.get(sessionId); + }, + + clearCompletedStream: function clearCompletedStream(sessionId) { + const state = get(); + if (!state.completedStreams.has(sessionId)) return; + + const newCompletedStreams = new Map(state.completedStreams); + newCompletedStreams.delete(sessionId); + set({ completedStreams: newCompletedStreams }); + }, + + isStreaming: function isStreaming(sessionId) { + const stream = get().activeStreams.get(sessionId); + return stream?.status === "streaming"; + }, + + registerActiveSession: function registerActiveSession(sessionId) { + const state = get(); + if (state.activeSessions.has(sessionId)) return; + + const newActiveSessions = new Set(state.activeSessions); + newActiveSessions.add(sessionId); + set({ activeSessions: newActiveSessions }); + }, + + unregisterActiveSession: function unregisterActiveSession(sessionId) { + const state = get(); + if (!state.activeSessions.has(sessionId)) return; + + const newActiveSessions = new Set(state.activeSessions); + newActiveSessions.delete(sessionId); + set({ activeSessions: newActiveSessions }); + }, + + isSessionActive: function isSessionActive(sessionId) { + return get().activeSessions.has(sessionId); + }, + + onStreamComplete: function onStreamComplete(callback) { + const state = get(); + const newCallbacks = new Set(state.streamCompleteCallbacks); + newCallbacks.add(callback); + set({ streamCompleteCallbacks: newCallbacks }); + + return function unsubscribe() { + const currentState = get(); + const cleanedCallbacks = new Set(currentState.streamCompleteCallbacks); + cleanedCallbacks.delete(callback); + set({ streamCompleteCallbacks: cleanedCallbacks }); + }; + }, +})); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts b/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts new file mode 100644 index 0000000000..8c8aa7b704 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts @@ -0,0 +1,94 @@ +import type { ToolArguments, ToolResult } from "@/types/chat"; + +export type StreamStatus = "idle" | "streaming" | "completed" | "error"; + +export interface StreamChunk { + type: + | "text_chunk" + | "text_ended" + | "tool_call" + | "tool_call_start" + | "tool_response" + | "login_needed" + | "need_login" + | "credentials_needed" + | "error" + | "usage" + | "stream_end"; + timestamp?: string; + content?: string; + message?: string; + code?: string; + details?: Record; + tool_id?: string; + tool_name?: string; + arguments?: ToolArguments; + result?: ToolResult; + success?: boolean; + idx?: number; + session_id?: string; + agent_info?: { + graph_id: string; + name: string; + trigger_type: string; + }; + provider?: string; + provider_name?: string; + credential_type?: string; + scopes?: string[]; + title?: string; + [key: string]: unknown; +} + +export type VercelStreamChunk = + | { type: "start"; messageId: string } + | { type: "finish" } + | { type: "text-start"; id: string } + | { type: "text-delta"; id: string; delta: string } + | { type: "text-end"; id: string } + | { type: "tool-input-start"; toolCallId: string; toolName: string } + | { + type: "tool-input-available"; + toolCallId: string; + toolName: string; + input: Record; + } + | { + type: "tool-output-available"; + toolCallId: string; + toolName?: string; + output: unknown; + success?: boolean; + } + | { + type: "usage"; + promptTokens: number; + completionTokens: number; + totalTokens: number; + } + | { + type: "error"; + errorText: string; + code?: string; + details?: Record; + }; + +export interface ActiveStream { + sessionId: string; + abortController: AbortController; + status: StreamStatus; + startedAt: number; + chunks: StreamChunk[]; + error?: Error; + onChunkCallbacks: Set<(chunk: StreamChunk) => void>; +} + +export interface StreamResult { + sessionId: string; + status: StreamStatus; + chunks: StreamChunk[]; + completedAt: number; + error?: Error; +} + +export type StreamCompleteCallback = (sessionId: string) => void; diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx index 17748f8dbc..dec221338a 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx @@ -4,6 +4,7 @@ import { Text } from "@/components/atoms/Text/Text"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { cn } from "@/lib/utils"; +import { GlobeHemisphereEastIcon } from "@phosphor-icons/react"; import { useEffect } from "react"; import { ChatInput } from "../ChatInput/ChatInput"; import { MessageList } from "../MessageList/MessageList"; @@ -15,6 +16,7 @@ export interface ChatContainerProps { initialPrompt?: string; className?: string; onStreamingChange?: (isStreaming: boolean) => void; + onOperationStarted?: () => void; } export function ChatContainer({ @@ -23,6 +25,7 @@ export function ChatContainer({ initialPrompt, className, onStreamingChange, + onOperationStarted, }: ChatContainerProps) { const { messages, @@ -37,6 +40,7 @@ export function ChatContainer({ sessionId, initialMessages, initialPrompt, + onOperationStarted, }); useEffect(() => { @@ -55,24 +59,37 @@ export function ChatContainer({ )} > + + + Service unavailable + +
+ } controlled={{ isOpen: isRegionBlockedModalOpen, set: handleRegionModalOpenChange, }} onClose={handleRegionModalClose} + styling={{ maxWidth: 550, width: "100%", minWidth: "auto" }} > -
+
- This model is not available in your region. Please connect via VPN - and try again. + The Autogpt AI model is not available in your region or your + connection is blocking it. Please try again with a different + connection. -
+
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts index 791cf046d5..82e9b05e88 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts @@ -1,5 +1,5 @@ import { toast } from "sonner"; -import { StreamChunk } from "../../useChatStream"; +import type { StreamChunk } from "../../chat-types"; import type { HandlerDependencies } from "./handlers"; import { handleError, diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts index 96198a0386..f3cac01f96 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts @@ -22,6 +22,7 @@ export interface HandlerDependencies { setIsStreamingInitiated: Dispatch>; setIsRegionBlockedModalOpen: Dispatch>; sessionId: string; + onOperationStarted?: () => void; } export function isRegionBlockedError(chunk: StreamChunk): boolean { @@ -48,6 +49,15 @@ export function handleTextEnded( const completedText = deps.streamingChunksRef.current.join(""); if (completedText.trim()) { deps.setMessages((prev) => { + // Check if this exact message already exists to prevent duplicates + const exists = prev.some( + (msg) => + msg.type === "message" && + msg.role === "assistant" && + msg.content === completedText, + ); + if (exists) return prev; + const assistantMessage: ChatMessageData = { type: "message", role: "assistant", @@ -154,6 +164,11 @@ export function handleToolResponse( } return; } + // Trigger polling when operation_started is received + if (responseMessage.type === "operation_started") { + deps.onOperationStarted?.(); + } + deps.setMessages((prev) => { const toolCallIndex = prev.findIndex( (msg) => msg.type === "tool_call" && msg.toolId === chunk.tool_id, @@ -203,13 +218,24 @@ export function handleStreamEnd( ]); } if (completedContent.trim()) { - const assistantMessage: ChatMessageData = { - type: "message", - role: "assistant", - content: completedContent, - timestamp: new Date(), - }; - deps.setMessages((prev) => [...prev, assistantMessage]); + deps.setMessages((prev) => { + // Check if this exact message already exists to prevent duplicates + const exists = prev.some( + (msg) => + msg.type === "message" && + msg.role === "assistant" && + msg.content === completedContent, + ); + if (exists) return prev; + + const assistantMessage: ChatMessageData = { + type: "message", + role: "assistant", + content: completedContent, + timestamp: new Date(), + }; + return [...prev, assistantMessage]; + }); } deps.setStreamingChunks([]); deps.streamingChunksRef.current = []; diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts index 9d51003a93..e744c9bc34 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts @@ -1,7 +1,118 @@ +import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; import { SessionKey, sessionStorage } from "@/services/storage/session-storage"; import type { ToolResult } from "@/types/chat"; import type { ChatMessageData } from "../ChatMessage/useChatMessage"; +export function processInitialMessages( + initialMessages: SessionDetailResponse["messages"], +): ChatMessageData[] { + const processedMessages: ChatMessageData[] = []; + const toolCallMap = new Map(); + + for (const msg of initialMessages) { + if (!isValidMessage(msg)) { + console.warn("Invalid message structure from backend:", msg); + continue; + } + + let content = String(msg.content || ""); + const role = String(msg.role || "assistant").toLowerCase(); + const toolCalls = msg.tool_calls; + const timestamp = msg.timestamp + ? new Date(msg.timestamp as string) + : undefined; + + if (role === "user") { + content = removePageContext(content); + if (!content.trim()) continue; + processedMessages.push({ + type: "message", + role: "user", + content, + timestamp, + }); + continue; + } + + if (role === "assistant") { + content = content + .replace(/[\s\S]*?<\/thinking>/gi, "") + .replace(/[\s\S]*?<\/internal_reasoning>/gi, "") + .trim(); + + if (toolCalls && isToolCallArray(toolCalls) && toolCalls.length > 0) { + for (const toolCall of toolCalls) { + const toolName = toolCall.function.name; + const toolId = toolCall.id; + toolCallMap.set(toolId, toolName); + + try { + const args = JSON.parse(toolCall.function.arguments || "{}"); + processedMessages.push({ + type: "tool_call", + toolId, + toolName, + arguments: args, + timestamp, + }); + } catch (err) { + console.warn("Failed to parse tool call arguments:", err); + processedMessages.push({ + type: "tool_call", + toolId, + toolName, + arguments: {}, + timestamp, + }); + } + } + if (content.trim()) { + processedMessages.push({ + type: "message", + role: "assistant", + content, + timestamp, + }); + } + } else if (content.trim()) { + processedMessages.push({ + type: "message", + role: "assistant", + content, + timestamp, + }); + } + continue; + } + + if (role === "tool") { + const toolCallId = (msg.tool_call_id as string) || ""; + const toolName = toolCallMap.get(toolCallId) || "unknown"; + const toolResponse = parseToolResponse( + content, + toolCallId, + toolName, + timestamp, + ); + if (toolResponse) { + processedMessages.push(toolResponse); + } + continue; + } + + if (content.trim()) { + processedMessages.push({ + type: "message", + role: role as "user" | "assistant" | "system", + content, + timestamp, + }); + } + } + + return processedMessages; +} + export function hasSentInitialPrompt(sessionId: string): boolean { try { const sent = JSON.parse( @@ -193,6 +304,7 @@ export function parseToolResponse( if (isAgentArray(agentsData)) { return { type: "agent_carousel", + toolId, toolName: "agent_carousel", agents: agentsData, totalCount: parsedResult.total_count as number | undefined, @@ -205,6 +317,7 @@ export function parseToolResponse( if (responseType === "execution_started") { return { type: "execution_started", + toolId, toolName: "execution_started", executionId: (parsedResult.execution_id as string) || "", agentName: (parsedResult.graph_name as string) || undefined, @@ -213,6 +326,58 @@ export function parseToolResponse( timestamp: timestamp || new Date(), }; } + if (responseType === "clarification_needed") { + return { + type: "clarification_needed", + toolName, + questions: + (parsedResult.questions as Array<{ + question: string; + keyword: string; + example?: string; + }>) || [], + message: + (parsedResult.message as string) || + "I need more information to proceed.", + sessionId: (parsedResult.session_id as string) || "", + timestamp: timestamp || new Date(), + }; + } + if (responseType === "operation_started") { + return { + type: "operation_started", + toolName: (parsedResult.tool_name as string) || toolName, + toolId, + operationId: (parsedResult.operation_id as string) || "", + message: + (parsedResult.message as string) || + "Operation started. You can close this tab.", + timestamp: timestamp || new Date(), + }; + } + if (responseType === "operation_pending") { + return { + type: "operation_pending", + toolName: (parsedResult.tool_name as string) || toolName, + toolId, + operationId: (parsedResult.operation_id as string) || "", + message: + (parsedResult.message as string) || + "Operation in progress. Please wait...", + timestamp: timestamp || new Date(), + }; + } + if (responseType === "operation_in_progress") { + return { + type: "operation_in_progress", + toolName: (parsedResult.tool_name as string) || toolName, + toolCallId: (parsedResult.tool_call_id as string) || toolId, + message: + (parsedResult.message as string) || + "Operation already in progress. Please wait...", + timestamp: timestamp || new Date(), + }; + } if (responseType === "need_login") { return { type: "login_needed", diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts index 42dd04670d..46f384d055 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts @@ -1,5 +1,6 @@ import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { useCallback, useEffect, useMemo, useRef, useState } from "react"; +import { useEffect, useMemo, useRef, useState } from "react"; +import { useChatStore } from "../../chat-store"; import { toast } from "sonner"; import { useChatStream } from "../../useChatStream"; import { usePageContext } from "../../usePageContext"; @@ -9,23 +10,44 @@ import { createUserMessage, filterAuthMessages, hasSentInitialPrompt, - isToolCallArray, - isValidMessage, markInitialPromptSent, - parseToolResponse, - removePageContext, + processInitialMessages, } from "./helpers"; +// Helper to generate deduplication key for a message +function getMessageKey(msg: ChatMessageData): string { + if (msg.type === "message") { + // Don't include timestamp - dedupe by role + content only + // This handles the case where local and server timestamps differ + // Server messages are authoritative, so duplicates from local state are filtered + return `msg:${msg.role}:${msg.content}`; + } else if (msg.type === "tool_call") { + return `toolcall:${msg.toolId}`; + } else if (msg.type === "tool_response") { + return `toolresponse:${(msg as any).toolId}`; + } else if ( + msg.type === "operation_started" || + msg.type === "operation_pending" || + msg.type === "operation_in_progress" + ) { + return `op:${(msg as any).toolId || (msg as any).operationId || (msg as any).toolCallId || ""}:${msg.toolName}`; + } else { + return `${msg.type}:${JSON.stringify(msg).slice(0, 100)}`; + } +} + interface Args { sessionId: string | null; initialMessages: SessionDetailResponse["messages"]; initialPrompt?: string; + onOperationStarted?: () => void; } export function useChatContainer({ sessionId, initialMessages, initialPrompt, + onOperationStarted, }: Args) { const [messages, setMessages] = useState([]); const [streamingChunks, setStreamingChunks] = useState([]); @@ -41,11 +63,18 @@ export function useChatContainer({ sendMessage: sendStreamMessage, stopStreaming, } = useChatStream(); + const activeStreams = useChatStore((s) => s.activeStreams); + const subscribeToStream = useChatStore((s) => s.subscribeToStream); const isStreaming = isStreamingInitiated || hasTextChunks; - useEffect(() => { - if (sessionId !== previousSessionIdRef.current) { - stopStreaming(previousSessionIdRef.current ?? undefined, true); + useEffect( + function handleSessionChange() { + if (sessionId === previousSessionIdRef.current) return; + + const prevSession = previousSessionIdRef.current; + if (prevSession) { + stopStreaming(prevSession); + } previousSessionIdRef.current = sessionId; setMessages([]); setStreamingChunks([]); @@ -53,138 +82,11 @@ export function useChatContainer({ setHasTextChunks(false); setIsStreamingInitiated(false); hasResponseRef.current = false; - } - }, [sessionId, stopStreaming]); - const allMessages = useMemo(() => { - const processedInitialMessages: ChatMessageData[] = []; - const toolCallMap = new Map(); + if (!sessionId) return; - for (const msg of initialMessages) { - if (!isValidMessage(msg)) { - console.warn("Invalid message structure from backend:", msg); - continue; - } - - let content = String(msg.content || ""); - const role = String(msg.role || "assistant").toLowerCase(); - const toolCalls = msg.tool_calls; - const timestamp = msg.timestamp - ? new Date(msg.timestamp as string) - : undefined; - - if (role === "user") { - content = removePageContext(content); - if (!content.trim()) continue; - processedInitialMessages.push({ - type: "message", - role: "user", - content, - timestamp, - }); - continue; - } - - if (role === "assistant") { - content = content - .replace(/[\s\S]*?<\/thinking>/gi, "") - .trim(); - - if (toolCalls && isToolCallArray(toolCalls) && toolCalls.length > 0) { - for (const toolCall of toolCalls) { - const toolName = toolCall.function.name; - const toolId = toolCall.id; - toolCallMap.set(toolId, toolName); - - try { - const args = JSON.parse(toolCall.function.arguments || "{}"); - processedInitialMessages.push({ - type: "tool_call", - toolId, - toolName, - arguments: args, - timestamp, - }); - } catch (err) { - console.warn("Failed to parse tool call arguments:", err); - processedInitialMessages.push({ - type: "tool_call", - toolId, - toolName, - arguments: {}, - timestamp, - }); - } - } - if (content.trim()) { - processedInitialMessages.push({ - type: "message", - role: "assistant", - content, - timestamp, - }); - } - } else if (content.trim()) { - processedInitialMessages.push({ - type: "message", - role: "assistant", - content, - timestamp, - }); - } - continue; - } - - if (role === "tool") { - const toolCallId = (msg.tool_call_id as string) || ""; - const toolName = toolCallMap.get(toolCallId) || "unknown"; - const toolResponse = parseToolResponse( - content, - toolCallId, - toolName, - timestamp, - ); - if (toolResponse) { - processedInitialMessages.push(toolResponse); - } - continue; - } - - if (content.trim()) { - processedInitialMessages.push({ - type: "message", - role: role as "user" | "assistant" | "system", - content, - timestamp, - }); - } - } - - return [...processedInitialMessages, ...messages]; - }, [initialMessages, messages]); - - const sendMessage = useCallback( - async function sendMessage( - content: string, - isUserMessage: boolean = true, - context?: { url: string; content: string }, - ) { - if (!sessionId) { - console.error("[useChatContainer] Cannot send message: no session ID"); - return; - } - setIsRegionBlockedModalOpen(false); - if (isUserMessage) { - const userMessage = createUserMessage(content); - setMessages((prev) => [...filterAuthMessages(prev), userMessage]); - } else { - setMessages((prev) => filterAuthMessages(prev)); - } - setStreamingChunks([]); - streamingChunksRef.current = []; - setHasTextChunks(false); - setIsStreamingInitiated(true); - hasResponseRef.current = false; + const activeStream = activeStreams.get(sessionId); + if (!activeStream || activeStream.status !== "streaming") return; const dispatcher = createStreamEventDispatcher({ setHasTextChunks, @@ -195,44 +97,170 @@ export function useChatContainer({ setIsRegionBlockedModalOpen, sessionId, setIsStreamingInitiated, + onOperationStarted, }); - try { - await sendStreamMessage( - sessionId, - content, - dispatcher, - isUserMessage, - context, - ); - } catch (err) { - console.error("[useChatContainer] Failed to send message:", err); - setIsStreamingInitiated(false); - - // Don't show error toast for AbortError (expected during cleanup) - if (err instanceof Error && err.name === "AbortError") return; - - const errorMessage = - err instanceof Error ? err.message : "Failed to send message"; - toast.error("Failed to send message", { - description: errorMessage, - }); - } + setIsStreamingInitiated(true); + const skipReplay = initialMessages.length > 0; + return subscribeToStream(sessionId, dispatcher, skipReplay); }, - [sessionId, sendStreamMessage], + [ + sessionId, + stopStreaming, + activeStreams, + subscribeToStream, + onOperationStarted, + ], ); - const handleStopStreaming = useCallback(() => { + // Collect toolIds from completed tool results in initialMessages + // Used to filter out operation messages when their results arrive + const completedToolIds = useMemo(() => { + const processedInitial = processInitialMessages(initialMessages); + const ids = new Set(); + for (const msg of processedInitial) { + if ( + msg.type === "tool_response" || + msg.type === "agent_carousel" || + msg.type === "execution_started" + ) { + const toolId = (msg as any).toolId; + if (toolId) { + ids.add(toolId); + } + } + } + return ids; + }, [initialMessages]); + + // Clean up local operation messages when their completed results arrive from polling + // This effect runs when completedToolIds changes (i.e., when polling brings new results) + useEffect( + function cleanupCompletedOperations() { + if (completedToolIds.size === 0) return; + + setMessages((prev) => { + const filtered = prev.filter((msg) => { + if ( + msg.type === "operation_started" || + msg.type === "operation_pending" || + msg.type === "operation_in_progress" + ) { + const toolId = (msg as any).toolId || (msg as any).toolCallId; + if (toolId && completedToolIds.has(toolId)) { + return false; // Remove - operation completed + } + } + return true; + }); + // Only update state if something was actually filtered + return filtered.length === prev.length ? prev : filtered; + }); + }, + [completedToolIds], + ); + + // Combine initial messages from backend with local streaming messages, + // Server messages maintain correct order; only append truly new local messages + const allMessages = useMemo(() => { + const processedInitial = processInitialMessages(initialMessages); + + // Build a set of keys from server messages for deduplication + const serverKeys = new Set(); + for (const msg of processedInitial) { + serverKeys.add(getMessageKey(msg)); + } + + // Filter local messages: remove duplicates and completed operation messages + const newLocalMessages = messages.filter((msg) => { + // Remove operation messages for completed tools + if ( + msg.type === "operation_started" || + msg.type === "operation_pending" || + msg.type === "operation_in_progress" + ) { + const toolId = (msg as any).toolId || (msg as any).toolCallId; + if (toolId && completedToolIds.has(toolId)) { + return false; + } + } + // Remove messages that already exist in server data + const key = getMessageKey(msg); + return !serverKeys.has(key); + }); + + // Server messages first (correct order), then new local messages + return [...processedInitial, ...newLocalMessages]; + }, [initialMessages, messages, completedToolIds]); + + async function sendMessage( + content: string, + isUserMessage: boolean = true, + context?: { url: string; content: string }, + ) { + if (!sessionId) { + console.error("[useChatContainer] Cannot send message: no session ID"); + return; + } + setIsRegionBlockedModalOpen(false); + if (isUserMessage) { + const userMessage = createUserMessage(content); + setMessages((prev) => [...filterAuthMessages(prev), userMessage]); + } else { + setMessages((prev) => filterAuthMessages(prev)); + } + setStreamingChunks([]); + streamingChunksRef.current = []; + setHasTextChunks(false); + setIsStreamingInitiated(true); + hasResponseRef.current = false; + + const dispatcher = createStreamEventDispatcher({ + setHasTextChunks, + setStreamingChunks, + streamingChunksRef, + hasResponseRef, + setMessages, + setIsRegionBlockedModalOpen, + sessionId, + setIsStreamingInitiated, + onOperationStarted, + }); + + try { + await sendStreamMessage( + sessionId, + content, + dispatcher, + isUserMessage, + context, + ); + } catch (err) { + console.error("[useChatContainer] Failed to send message:", err); + setIsStreamingInitiated(false); + + if (err instanceof Error && err.name === "AbortError") return; + + const errorMessage = + err instanceof Error ? err.message : "Failed to send message"; + toast.error("Failed to send message", { + description: errorMessage, + }); + } + } + + function handleStopStreaming() { stopStreaming(); setStreamingChunks([]); streamingChunksRef.current = []; setHasTextChunks(false); setIsStreamingInitiated(false); - }, [stopStreaming]); + } const { capturePageContext } = usePageContext(); + const sendMessageRef = useRef(sendMessage); + sendMessageRef.current = sendMessage; - // Send initial prompt if provided (for new sessions from homepage) useEffect( function handleInitialPrompt() { if (!initialPrompt || !sessionId) return; @@ -241,15 +269,9 @@ export function useChatContainer({ markInitialPromptSent(sessionId); const context = capturePageContext(); - sendMessage(initialPrompt, true, context); + sendMessageRef.current(initialPrompt, true, context); }, - [ - initialPrompt, - sessionId, - initialMessages.length, - sendMessage, - capturePageContext, - ], + [initialPrompt, sessionId, initialMessages.length, capturePageContext], ); async function sendMessageWithContext( diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx index 8cdecf0bf4..c45e8dc250 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx @@ -21,7 +21,7 @@ export function ChatInput({ className, }: Props) { const inputId = "chat-input"; - const { value, setValue, handleKeyDown, handleSend, hasMultipleLines } = + const { value, handleKeyDown, handleSubmit, handleChange, hasMultipleLines } = useChatInput({ onSend, disabled: disabled || isStreaming, @@ -29,15 +29,6 @@ export function ChatInput({ inputId, }); - function handleSubmit(e: React.FormEvent) { - e.preventDefault(); - handleSend(); - } - - function handleChange(e: React.ChangeEvent) { - setValue(e.target.value); - } - return (
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts index 93d764b026..6fa8e7252b 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts @@ -1,4 +1,10 @@ -import { KeyboardEvent, useCallback, useEffect, useState } from "react"; +import { + ChangeEvent, + FormEvent, + KeyboardEvent, + useEffect, + useState, +} from "react"; interface UseChatInputArgs { onSend: (message: string) => void; @@ -16,6 +22,23 @@ export function useChatInput({ const [value, setValue] = useState(""); const [hasMultipleLines, setHasMultipleLines] = useState(false); + useEffect( + function focusOnMount() { + const textarea = document.getElementById(inputId) as HTMLTextAreaElement; + if (textarea) textarea.focus(); + }, + [inputId], + ); + + useEffect( + function focusWhenEnabled() { + if (disabled) return; + const textarea = document.getElementById(inputId) as HTMLTextAreaElement; + if (textarea) textarea.focus(); + }, + [disabled, inputId], + ); + useEffect(() => { const textarea = document.getElementById(inputId) as HTMLTextAreaElement; const wrapper = document.getElementById( @@ -77,7 +100,7 @@ export function useChatInput({ } }, [value, maxRows, inputId]); - const handleSend = useCallback(() => { + const handleSend = () => { if (disabled || !value.trim()) return; onSend(value.trim()); setValue(""); @@ -93,23 +116,31 @@ export function useChatInput({ wrapper.style.height = ""; wrapper.style.maxHeight = ""; } - }, [value, onSend, disabled, inputId]); + }; - const handleKeyDown = useCallback( - (event: KeyboardEvent) => { - if (event.key === "Enter" && !event.shiftKey) { - event.preventDefault(); - handleSend(); - } - }, - [handleSend], - ); + function handleKeyDown(event: KeyboardEvent) { + if (event.key === "Enter" && !event.shiftKey) { + event.preventDefault(); + handleSend(); + } + } + + function handleSubmit(e: FormEvent) { + e.preventDefault(); + handleSend(); + } + + function handleChange(e: ChangeEvent) { + setValue(e.target.value); + } return { value, setValue, handleKeyDown, handleSend, + handleSubmit, + handleChange, hasMultipleLines, }; } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx index a2827ce611..c922d0da76 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx @@ -14,7 +14,9 @@ import { AgentCarouselMessage } from "../AgentCarouselMessage/AgentCarouselMessa import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; import { AuthPromptWidget } from "../AuthPromptWidget/AuthPromptWidget"; import { ChatCredentialsSetup } from "../ChatCredentialsSetup/ChatCredentialsSetup"; +import { ClarificationQuestionsWidget } from "../ClarificationQuestionsWidget/ClarificationQuestionsWidget"; import { ExecutionStartedMessage } from "../ExecutionStartedMessage/ExecutionStartedMessage"; +import { PendingOperationWidget } from "../PendingOperationWidget/PendingOperationWidget"; import { MarkdownContent } from "../MarkdownContent/MarkdownContent"; import { NoResultsMessage } from "../NoResultsMessage/NoResultsMessage"; import { ToolCallMessage } from "../ToolCallMessage/ToolCallMessage"; @@ -69,6 +71,10 @@ export function ChatMessage({ isToolResponse, isLoginNeeded, isCredentialsNeeded, + isClarificationNeeded, + isOperationStarted, + isOperationPending, + isOperationInProgress, } = useChatMessage(message); const displayContent = getDisplayContent(message, isUser); @@ -96,6 +102,18 @@ export function ChatMessage({ } } + function handleClarificationAnswers(answers: Record) { + if (onSendMessage) { + const contextMessage = Object.entries(answers) + .map(([keyword, answer]) => `${keyword}: ${answer}`) + .join("\n"); + + onSendMessage( + `I have the answers to your questions:\n\n${contextMessage}\n\nPlease proceed with creating the agent.`, + ); + } + } + const handleCopy = useCallback( async function handleCopy() { if (message.type !== "message") return; @@ -112,10 +130,6 @@ export function ChatMessage({ [displayContent, message], ); - function isLongResponse(content: string): boolean { - return content.split("\n").length > 5; - } - const handleTryAgain = useCallback(() => { if (message.type !== "message" || !onSendMessage) return; onSendMessage(message.content, message.role === "user"); @@ -141,6 +155,17 @@ export function ChatMessage({ ); } + if (isClarificationNeeded && message.type === "clarification_needed") { + return ( + + ); + } + // Render login needed messages if (isLoginNeeded && message.type === "login_needed") { // If user is already logged in, show success message instead of auth prompt @@ -269,6 +294,42 @@ export function ChatMessage({ ); } + // Render operation_started messages (long-running background operations) + if (isOperationStarted && message.type === "operation_started") { + return ( + + ); + } + + // Render operation_pending messages (operations in progress when refreshing) + if (isOperationPending && message.type === "operation_pending") { + return ( + + ); + } + + // Render operation_in_progress messages (duplicate request while operation running) + if (isOperationInProgress && message.type === "operation_in_progress") { + return ( + + ); + } + // Render tool response messages (but skip agent_output if it's being rendered inside assistant message) if (isToolResponse && message.type === "tool_response") { return ( @@ -333,7 +394,7 @@ export function ChatMessage({ )} - {!isUser && isFinalMessage && isLongResponse(displayContent) && ( + {!isUser && isFinalMessage && !isStreaming && ( + {onCancel && ( + + )} +
+ +
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx index 3e6bf91ad2..15b10e5715 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx @@ -1,7 +1,5 @@ -import { AIChatBubble } from "../../../AIChatBubble/AIChatBubble"; import type { ChatMessageData } from "../../../ChatMessage/useChatMessage"; -import { MarkdownContent } from "../../../MarkdownContent/MarkdownContent"; -import { formatToolResponse } from "../../../ToolResponseMessage/helpers"; +import { ToolResponseMessage } from "../../../ToolResponseMessage/ToolResponseMessage"; import { shouldSkipAgentOutput } from "../../helpers"; export interface LastToolResponseProps { @@ -15,16 +13,15 @@ export function LastToolResponse({ }: LastToolResponseProps) { if (message.type !== "tool_response") return null; - // Skip if this is an agent_output that should be rendered inside assistant message if (shouldSkipAgentOutput(message, prevMessage)) return null; - const formattedText = formatToolResponse(message.result, message.toolName); - return (
- - - +
); } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/PendingOperationWidget/PendingOperationWidget.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/PendingOperationWidget/PendingOperationWidget.tsx new file mode 100644 index 0000000000..6cfea7f327 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/PendingOperationWidget/PendingOperationWidget.tsx @@ -0,0 +1,109 @@ +"use client"; + +import { Card } from "@/components/atoms/Card/Card"; +import { Text } from "@/components/atoms/Text/Text"; +import { cn } from "@/lib/utils"; +import { CircleNotch, CheckCircle, XCircle } from "@phosphor-icons/react"; + +type OperationStatus = + | "pending" + | "started" + | "in_progress" + | "completed" + | "error"; + +interface Props { + status: OperationStatus; + message: string; + toolName?: string; + className?: string; +} + +function getOperationTitle(toolName?: string): string { + if (!toolName) return "Operation"; + // Convert tool name to human-readable format + // e.g., "create_agent" -> "Creating Agent", "edit_agent" -> "Editing Agent" + if (toolName === "create_agent") return "Creating Agent"; + if (toolName === "edit_agent") return "Editing Agent"; + // Default: capitalize and format tool name + return toolName + .split("_") + .map((word) => word.charAt(0).toUpperCase() + word.slice(1)) + .join(" "); +} + +export function PendingOperationWidget({ + status, + message, + toolName, + className, +}: Props) { + const isPending = + status === "pending" || status === "started" || status === "in_progress"; + const isCompleted = status === "completed"; + const isError = status === "error"; + + const operationTitle = getOperationTitle(toolName); + + return ( +
+
+
+
+ {isPending && ( + + )} + {isCompleted && ( + + )} + {isError && ( + + )} +
+
+ +
+ +
+ + {isPending && operationTitle} + {isCompleted && `${operationTitle} Complete`} + {isError && `${operationTitle} Failed`} + + + {message} + +
+ + {isPending && ( + + Check your library in a few minutes. + + )} + + {toolName && ( + + Tool: {toolName} + + )} +
+
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx index 1ba10dd248..27da02beb8 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx @@ -1,7 +1,14 @@ +import { Text } from "@/components/atoms/Text/Text"; +import { cn } from "@/lib/utils"; import type { ToolResult } from "@/types/chat"; +import { WarningCircleIcon } from "@phosphor-icons/react"; import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; import { MarkdownContent } from "../MarkdownContent/MarkdownContent"; -import { formatToolResponse } from "./helpers"; +import { + formatToolResponse, + getErrorMessage, + isErrorResponse, +} from "./helpers"; export interface ToolResponseMessageProps { toolId?: string; @@ -18,6 +25,24 @@ export function ToolResponseMessage({ success: _success, className, }: ToolResponseMessageProps) { + if (isErrorResponse(result)) { + const errorMessage = getErrorMessage(result); + return ( + +
+ + + {errorMessage} + +
+
+ ); + } + const formattedText = formatToolResponse(result, toolName); return ( diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts index cf2bca95f7..400f32936e 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts @@ -1,3 +1,42 @@ +function stripInternalReasoning(content: string): string { + return content + .replace(/[\s\S]*?<\/internal_reasoning>/gi, "") + .replace(/[\s\S]*?<\/thinking>/gi, "") + .replace(/\n{3,}/g, "\n\n") + .trim(); +} + +export function isErrorResponse(result: unknown): boolean { + if (typeof result === "string") { + const lower = result.toLowerCase(); + return ( + lower.startsWith("error:") || + lower.includes("not found") || + lower.includes("does not exist") || + lower.includes("failed to") || + lower.includes("unable to") + ); + } + if (typeof result === "object" && result !== null) { + const response = result as Record; + return response.type === "error" || response.error !== undefined; + } + return false; +} + +export function getErrorMessage(result: unknown): string { + if (typeof result === "string") { + return stripInternalReasoning(result.replace(/^error:\s*/i, "")); + } + if (typeof result === "object" && result !== null) { + const response = result as Record; + if (response.error) return stripInternalReasoning(String(response.error)); + if (response.message) + return stripInternalReasoning(String(response.message)); + } + return "An error occurred"; +} + function getToolCompletionPhrase(toolName: string): string { const toolCompletionPhrases: Record = { add_understanding: "Updated your business information", @@ -28,10 +67,10 @@ export function formatToolResponse(result: unknown, toolName: string): string { const parsed = JSON.parse(trimmed); return formatToolResponse(parsed, toolName); } catch { - return trimmed; + return stripInternalReasoning(trimmed); } } - return result; + return stripInternalReasoning(result); } if (typeof result !== "object" || result === null) { diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/UserChatBubble/UserChatBubble.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/UserChatBubble/UserChatBubble.tsx index 46459ff894..39a6cb36ad 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/UserChatBubble/UserChatBubble.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/UserChatBubble/UserChatBubble.tsx @@ -10,7 +10,7 @@ export function UserChatBubble({ children, className }: UserChatBubbleProps) { return (
{ + const { sessionId, abortController } = stream; + + try { + const url = `/api/chat/sessions/${sessionId}/stream`; + const body = JSON.stringify({ + message, + is_user_message: isUserMessage, + context: context || null, + }); + + const response = await fetch(url, { + method: "POST", + headers: { + "Content-Type": "application/json", + Accept: "text/event-stream", + }, + body, + signal: abortController.signal, + }); + + if (!response.ok) { + const errorText = await response.text(); + throw new Error(errorText || `HTTP ${response.status}`); + } + + if (!response.body) { + throw new Error("Response body is null"); + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + + if (done) { + notifySubscribers(stream, { type: "stream_end" }); + stream.status = "completed"; + return; + } + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + + for (const line of lines) { + const data = parseSSELine(line); + if (data !== null) { + if (data === "[DONE]") { + notifySubscribers(stream, { type: "stream_end" }); + stream.status = "completed"; + return; + } + + try { + const rawChunk = JSON.parse(data) as + | StreamChunk + | VercelStreamChunk; + const chunk = normalizeStreamChunk(rawChunk); + if (!chunk) continue; + + notifySubscribers(stream, chunk); + + if (chunk.type === "stream_end") { + stream.status = "completed"; + return; + } + + if (chunk.type === "error") { + stream.status = "error"; + stream.error = new Error( + chunk.message || chunk.content || "Stream error", + ); + return; + } + } catch (err) { + console.warn("[StreamExecutor] Failed to parse SSE chunk:", err); + } + } + } + } + } catch (err) { + if (err instanceof Error && err.name === "AbortError") { + notifySubscribers(stream, { type: "stream_end" }); + stream.status = "completed"; + return; + } + + if (retryCount < MAX_RETRIES) { + const retryDelay = INITIAL_RETRY_DELAY * Math.pow(2, retryCount); + console.log( + `[StreamExecutor] Retrying in ${retryDelay}ms (attempt ${retryCount + 1}/${MAX_RETRIES})`, + ); + await new Promise((resolve) => setTimeout(resolve, retryDelay)); + return executeStream( + stream, + message, + isUserMessage, + context, + retryCount + 1, + ); + } + + stream.status = "error"; + stream.error = err instanceof Error ? err : new Error("Stream failed"); + notifySubscribers(stream, { + type: "error", + message: stream.error.message, + }); + } +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts b/autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts new file mode 100644 index 0000000000..4100926e79 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts @@ -0,0 +1,84 @@ +import type { ToolArguments, ToolResult } from "@/types/chat"; +import type { StreamChunk, VercelStreamChunk } from "./chat-types"; + +const LEGACY_STREAM_TYPES = new Set([ + "text_chunk", + "text_ended", + "tool_call", + "tool_call_start", + "tool_response", + "login_needed", + "need_login", + "credentials_needed", + "error", + "usage", + "stream_end", +]); + +export function isLegacyStreamChunk( + chunk: StreamChunk | VercelStreamChunk, +): chunk is StreamChunk { + return LEGACY_STREAM_TYPES.has(chunk.type as StreamChunk["type"]); +} + +export function normalizeStreamChunk( + chunk: StreamChunk | VercelStreamChunk, +): StreamChunk | null { + if (isLegacyStreamChunk(chunk)) return chunk; + + switch (chunk.type) { + case "text-delta": + return { type: "text_chunk", content: chunk.delta }; + case "text-end": + return { type: "text_ended" }; + case "tool-input-available": + return { + type: "tool_call_start", + tool_id: chunk.toolCallId, + tool_name: chunk.toolName, + arguments: chunk.input as ToolArguments, + }; + case "tool-output-available": + return { + type: "tool_response", + tool_id: chunk.toolCallId, + tool_name: chunk.toolName, + result: chunk.output as ToolResult, + success: chunk.success ?? true, + }; + case "usage": + return { + type: "usage", + promptTokens: chunk.promptTokens, + completionTokens: chunk.completionTokens, + totalTokens: chunk.totalTokens, + }; + case "error": + return { + type: "error", + message: chunk.errorText, + code: chunk.code, + details: chunk.details, + }; + case "finish": + return { type: "stream_end" }; + case "start": + case "text-start": + return null; + case "tool-input-start": + return { + type: "tool_call_start", + tool_id: chunk.toolCallId, + tool_name: chunk.toolName, + arguments: {}, + }; + } +} + +export const MAX_RETRIES = 3; +export const INITIAL_RETRY_DELAY = 1000; + +export function parseSSELine(line: string): string | null { + if (line.startsWith("data: ")) return line.slice(6); + return null; +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts index cf629a287c..124301abc4 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts @@ -2,7 +2,6 @@ import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useEffect, useRef, useState } from "react"; -import { toast } from "sonner"; import { useChatSession } from "./useChatSession"; import { useChatStream } from "./useChatStream"; @@ -27,6 +26,7 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) { claimSession, clearSession: clearSessionBase, loadSession, + startPollingForOperation, } = useChatSession({ urlSessionId, autoCreate: false, @@ -67,38 +67,16 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) { ], ); - useEffect(() => { - if (isLoading || isCreating) { - const timer = setTimeout(() => { - setShowLoader(true); - }, 300); - return () => clearTimeout(timer); - } else { + useEffect( + function showLoaderWithDelay() { + if (isLoading || isCreating) { + const timer = setTimeout(() => setShowLoader(true), 300); + return () => clearTimeout(timer); + } setShowLoader(false); - } - }, [isLoading, isCreating]); - - useEffect(function monitorNetworkStatus() { - function handleOnline() { - toast.success("Connection restored", { - description: "You're back online", - }); - } - - function handleOffline() { - toast.error("You're offline", { - description: "Check your internet connection", - }); - } - - window.addEventListener("online", handleOnline); - window.addEventListener("offline", handleOffline); - - return () => { - window.removeEventListener("online", handleOnline); - window.removeEventListener("offline", handleOffline); - }; - }, []); + }, + [isLoading, isCreating], + ); function clearSession() { clearSessionBase(); @@ -117,5 +95,6 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) { loadSession, sessionId: sessionIdFromHook, showLoader, + startPollingForOperation, }; } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatDrawer.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatDrawer.ts deleted file mode 100644 index 62e1a5a569..0000000000 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatDrawer.ts +++ /dev/null @@ -1,17 +0,0 @@ -"use client"; - -import { create } from "zustand"; - -interface ChatDrawerState { - isOpen: boolean; - open: () => void; - close: () => void; - toggle: () => void; -} - -export const useChatDrawer = create((set) => ({ - isOpen: false, - open: () => set({ isOpen: true }), - close: () => set({ isOpen: false }), - toggle: () => set((state) => ({ isOpen: !state.isOpen })), -})); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts index 553e348f79..936a49936c 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts @@ -1,6 +1,7 @@ import { getGetV2GetSessionQueryKey, getGetV2GetSessionQueryOptions, + getGetV2ListSessionsQueryKey, postV2CreateSession, useGetV2GetSession, usePatchV2SessionAssignUser, @@ -58,6 +59,7 @@ export function useChatSession({ query: { enabled: !!sessionId, select: okData, + staleTime: 0, retry: shouldRetrySessionLoad, retryDelay: getSessionRetryDelay, }, @@ -101,6 +103,125 @@ export function useChatSession({ } }, [createError, loadError]); + // Track if we should be polling (set by external callers when they receive operation_started via SSE) + const [forcePolling, setForcePolling] = useState(false); + // Track if we've seen server acknowledge the pending operation (to avoid clearing forcePolling prematurely) + const hasSeenServerPendingRef = useRef(false); + + // Check if there are any pending operations in the messages + // Must check all operation types: operation_pending, operation_started, operation_in_progress + const hasPendingOperationsFromServer = useMemo(() => { + if (!messages || messages.length === 0) return false; + const pendingTypes = new Set([ + "operation_pending", + "operation_in_progress", + "operation_started", + ]); + return messages.some((msg) => { + if (msg.role !== "tool" || !msg.content) return false; + try { + const content = + typeof msg.content === "string" + ? JSON.parse(msg.content) + : msg.content; + return pendingTypes.has(content?.type); + } catch { + return false; + } + }); + }, [messages]); + + // Track when server has acknowledged the pending operation + useEffect(() => { + if (hasPendingOperationsFromServer) { + hasSeenServerPendingRef.current = true; + } + }, [hasPendingOperationsFromServer]); + + // Combined: poll if server has pending ops OR if we received operation_started via SSE + const hasPendingOperations = hasPendingOperationsFromServer || forcePolling; + + // Clear forcePolling only after server has acknowledged AND completed the operation + useEffect(() => { + if ( + forcePolling && + !hasPendingOperationsFromServer && + hasSeenServerPendingRef.current + ) { + // Server acknowledged the operation and it's now complete + setForcePolling(false); + hasSeenServerPendingRef.current = false; + } + }, [forcePolling, hasPendingOperationsFromServer]); + + // Function to trigger polling (called when operation_started is received via SSE) + function startPollingForOperation() { + setForcePolling(true); + hasSeenServerPendingRef.current = false; // Reset for new operation + } + + // Refresh sessions list when a pending operation completes + // (hasPendingOperations transitions from true to false) + const prevHasPendingOperationsRef = useRef(hasPendingOperations); + useEffect( + function refreshSessionsListOnOperationComplete() { + const wasHasPending = prevHasPendingOperationsRef.current; + prevHasPendingOperationsRef.current = hasPendingOperations; + + // Only invalidate when transitioning from pending to not pending + if (wasHasPending && !hasPendingOperations && sessionId) { + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + } + }, + [hasPendingOperations, sessionId, queryClient], + ); + + // Poll for updates when there are pending operations + // Backoff: 2s, 4s, 6s, 8s, 10s, ... up to 30s max + const pollAttemptRef = useRef(0); + const hasPendingOperationsRef = useRef(hasPendingOperations); + hasPendingOperationsRef.current = hasPendingOperations; + + useEffect( + function pollForPendingOperations() { + if (!sessionId || !hasPendingOperations) { + pollAttemptRef.current = 0; + return; + } + + let cancelled = false; + let timeoutId: ReturnType | null = null; + + function schedule() { + // 2s, 4s, 6s, 8s, 10s, ... 30s (max) + const delay = Math.min((pollAttemptRef.current + 1) * 2000, 30000); + timeoutId = setTimeout(async () => { + if (cancelled) return; + pollAttemptRef.current += 1; + try { + await refetch(); + } catch (err) { + console.error("[useChatSession] Poll failed:", err); + } finally { + if (!cancelled && hasPendingOperationsRef.current) { + schedule(); + } + } + }, delay); + } + + schedule(); + + return () => { + cancelled = true; + if (timeoutId) clearTimeout(timeoutId); + }; + }, + [sessionId, hasPendingOperations, refetch], + ); + async function createSession() { try { setError(null); @@ -227,11 +348,13 @@ export function useChatSession({ isCreating, error, isSessionNotFound: isNotFoundError(loadError), + hasPendingOperations, createSession, loadSession, refreshSession, claimSession, clearSession, + startPollingForOperation, }; } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts index 903c19cd30..5a9f637457 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts @@ -1,543 +1,110 @@ -import type { ToolArguments, ToolResult } from "@/types/chat"; -import { useCallback, useEffect, useRef, useState } from "react"; +"use client"; + +import { useEffect, useRef, useState } from "react"; import { toast } from "sonner"; +import { useChatStore } from "./chat-store"; +import type { StreamChunk } from "./chat-types"; -const MAX_RETRIES = 3; -const INITIAL_RETRY_DELAY = 1000; - -export interface StreamChunk { - type: - | "text_chunk" - | "text_ended" - | "tool_call" - | "tool_call_start" - | "tool_response" - | "login_needed" - | "need_login" - | "credentials_needed" - | "error" - | "usage" - | "stream_end"; - timestamp?: string; - content?: string; - message?: string; - code?: string; - details?: Record; - tool_id?: string; - tool_name?: string; - arguments?: ToolArguments; - result?: ToolResult; - success?: boolean; - idx?: number; - session_id?: string; - agent_info?: { - graph_id: string; - name: string; - trigger_type: string; - }; - provider?: string; - provider_name?: string; - credential_type?: string; - scopes?: string[]; - title?: string; - [key: string]: unknown; -} - -type VercelStreamChunk = - | { type: "start"; messageId: string } - | { type: "finish" } - | { type: "text-start"; id: string } - | { type: "text-delta"; id: string; delta: string } - | { type: "text-end"; id: string } - | { type: "tool-input-start"; toolCallId: string; toolName: string } - | { - type: "tool-input-available"; - toolCallId: string; - toolName: string; - input: ToolArguments; - } - | { - type: "tool-output-available"; - toolCallId: string; - toolName?: string; - output: ToolResult; - success?: boolean; - } - | { - type: "usage"; - promptTokens: number; - completionTokens: number; - totalTokens: number; - } - | { - type: "error"; - errorText: string; - code?: string; - details?: Record; - }; - -const LEGACY_STREAM_TYPES = new Set([ - "text_chunk", - "text_ended", - "tool_call", - "tool_call_start", - "tool_response", - "login_needed", - "need_login", - "credentials_needed", - "error", - "usage", - "stream_end", -]); - -function isLegacyStreamChunk( - chunk: StreamChunk | VercelStreamChunk, -): chunk is StreamChunk { - return LEGACY_STREAM_TYPES.has(chunk.type as StreamChunk["type"]); -} - -function normalizeStreamChunk( - chunk: StreamChunk | VercelStreamChunk, -): StreamChunk | null { - if (isLegacyStreamChunk(chunk)) { - return chunk; - } - switch (chunk.type) { - case "text-delta": - return { type: "text_chunk", content: chunk.delta }; - case "text-end": - return { type: "text_ended" }; - case "tool-input-available": - return { - type: "tool_call_start", - tool_id: chunk.toolCallId, - tool_name: chunk.toolName, - arguments: chunk.input, - }; - case "tool-output-available": - return { - type: "tool_response", - tool_id: chunk.toolCallId, - tool_name: chunk.toolName, - result: chunk.output, - success: chunk.success ?? true, - }; - case "usage": - return { - type: "usage", - promptTokens: chunk.promptTokens, - completionTokens: chunk.completionTokens, - totalTokens: chunk.totalTokens, - }; - case "error": - return { - type: "error", - message: chunk.errorText, - code: chunk.code, - details: chunk.details, - }; - case "finish": - return { type: "stream_end" }; - case "start": - case "text-start": - return null; - case "tool-input-start": - const toolInputStart = chunk as Extract< - VercelStreamChunk, - { type: "tool-input-start" } - >; - return { - type: "tool_call_start", - tool_id: toolInputStart.toolCallId, - tool_name: toolInputStart.toolName, - arguments: {}, - }; - } -} +export type { StreamChunk } from "./chat-types"; export function useChatStream() { const [isStreaming, setIsStreaming] = useState(false); const [error, setError] = useState(null); - const retryCountRef = useRef(0); - const retryTimeoutRef = useRef(null); - const abortControllerRef = useRef(null); const currentSessionIdRef = useRef(null); - const requestStartTimeRef = useRef(null); - - const stopStreaming = useCallback( - (sessionId?: string, force: boolean = false) => { - console.log("[useChatStream] stopStreaming called", { - hasAbortController: !!abortControllerRef.current, - isAborted: abortControllerRef.current?.signal.aborted, - currentSessionId: currentSessionIdRef.current, - requestedSessionId: sessionId, - requestStartTime: requestStartTimeRef.current, - timeSinceStart: requestStartTimeRef.current - ? Date.now() - requestStartTimeRef.current - : null, - force, - stack: new Error().stack, - }); - - if ( - sessionId && - currentSessionIdRef.current && - currentSessionIdRef.current !== sessionId - ) { - console.log( - "[useChatStream] Session changed, aborting previous stream", - { - oldSessionId: currentSessionIdRef.current, - newSessionId: sessionId, - }, - ); - } - - const controller = abortControllerRef.current; - if (controller) { - const timeSinceStart = requestStartTimeRef.current - ? Date.now() - requestStartTimeRef.current - : null; - - if (!force && timeSinceStart !== null && timeSinceStart < 100) { - console.log( - "[useChatStream] Request just started (<100ms), skipping abort to prevent race condition", - { - timeSinceStart, - }, - ); - return; - } - - try { - const signal = controller.signal; - - if ( - signal && - typeof signal.aborted === "boolean" && - !signal.aborted - ) { - console.log("[useChatStream] Aborting stream"); - controller.abort(); - } else { - console.log( - "[useChatStream] Stream already aborted or signal invalid", - ); - } - } catch (error) { - if (error instanceof Error && error.name === "AbortError") { - console.log( - "[useChatStream] AbortError caught (expected during cleanup)", - ); - } else { - console.warn("[useChatStream] Error aborting stream:", error); - } - } finally { - abortControllerRef.current = null; - requestStartTimeRef.current = null; - } - } - if (retryTimeoutRef.current) { - clearTimeout(retryTimeoutRef.current); - retryTimeoutRef.current = null; - } - setIsStreaming(false); - }, - [], + const onChunkCallbackRef = useRef<((chunk: StreamChunk) => void) | null>( + null, ); + const stopStream = useChatStore((s) => s.stopStream); + const unregisterActiveSession = useChatStore( + (s) => s.unregisterActiveSession, + ); + const isSessionActive = useChatStore((s) => s.isSessionActive); + const onStreamComplete = useChatStore((s) => s.onStreamComplete); + const getCompletedStream = useChatStore((s) => s.getCompletedStream); + const registerActiveSession = useChatStore((s) => s.registerActiveSession); + const startStream = useChatStore((s) => s.startStream); + const getStreamStatus = useChatStore((s) => s.getStreamStatus); + + function stopStreaming(sessionId?: string) { + const targetSession = sessionId || currentSessionIdRef.current; + if (targetSession) { + stopStream(targetSession); + unregisterActiveSession(targetSession); + } + setIsStreaming(false); + } + useEffect(() => { - console.log("[useChatStream] Component mounted"); - return () => { - const sessionIdAtUnmount = currentSessionIdRef.current; - console.log( - "[useChatStream] Component unmounting, calling stopStreaming", - { - sessionIdAtUnmount, - }, - ); - stopStreaming(undefined, false); + return function cleanup() { + const sessionId = currentSessionIdRef.current; + if (sessionId && !isSessionActive(sessionId)) { + stopStream(sessionId); + } currentSessionIdRef.current = null; + onChunkCallbackRef.current = null; }; - }, [stopStreaming]); + }, []); - const sendMessage = useCallback( - async ( - sessionId: string, - message: string, - onChunk: (chunk: StreamChunk) => void, - isUserMessage: boolean = true, - context?: { url: string; content: string }, - isRetry: boolean = false, - ) => { - console.log("[useChatStream] sendMessage called", { - sessionId, - message: message.substring(0, 50), - isUserMessage, - isRetry, - stack: new Error().stack, - }); + useEffect(() => { + const unsubscribe = onStreamComplete( + function handleStreamComplete(completedSessionId) { + if (completedSessionId !== currentSessionIdRef.current) return; - const previousSessionId = currentSessionIdRef.current; - stopStreaming(sessionId, true); - currentSessionIdRef.current = sessionId; - - const abortController = new AbortController(); - abortControllerRef.current = abortController; - requestStartTimeRef.current = Date.now(); - console.log("[useChatStream] Created new AbortController", { - sessionId, - previousSessionId, - requestStartTime: requestStartTimeRef.current, - }); - - if (abortController.signal.aborted) { - console.warn( - "[useChatStream] AbortController was aborted before request started", - ); - requestStartTimeRef.current = null; - return Promise.reject(new Error("Request aborted")); - } - - if (!isRetry) { - retryCountRef.current = 0; - } - setIsStreaming(true); - setError(null); - - try { - const url = `/api/chat/sessions/${sessionId}/stream`; - const body = JSON.stringify({ - message, - is_user_message: isUserMessage, - context: context || null, - }); - - const response = await fetch(url, { - method: "POST", - headers: { - "Content-Type": "application/json", - Accept: "text/event-stream", - }, - body, - signal: abortController.signal, - }); - - console.info("[useChatStream] Stream response", { - sessionId, - status: response.status, - ok: response.ok, - contentType: response.headers.get("content-type"), - }); - - if (!response.ok) { - const errorText = await response.text(); - console.warn("[useChatStream] Stream response error", { - sessionId, - status: response.status, - errorText, - }); - throw new Error(errorText || `HTTP ${response.status}`); - } - - if (!response.body) { - console.warn("[useChatStream] Response body is null", { sessionId }); - throw new Error("Response body is null"); - } - - const reader = response.body.getReader(); - const decoder = new TextDecoder(); - let buffer = ""; - let receivedChunkCount = 0; - let firstChunkAt: number | null = null; - let loggedLineCount = 0; - - return new Promise((resolve, reject) => { - let didDispatchStreamEnd = false; - - function dispatchStreamEnd() { - if (didDispatchStreamEnd) return; - didDispatchStreamEnd = true; - onChunk({ type: "stream_end" }); - } - - const cleanup = () => { - reader.cancel().catch(() => { - // Ignore cancel errors - }); - }; - - async function readStream() { - try { - while (true) { - const { done, value } = await reader.read(); - - if (done) { - cleanup(); - console.info("[useChatStream] Stream closed", { - sessionId, - receivedChunkCount, - timeSinceStart: requestStartTimeRef.current - ? Date.now() - requestStartTimeRef.current - : null, - }); - dispatchStreamEnd(); - retryCountRef.current = 0; - stopStreaming(); - resolve(); - return; - } - - buffer += decoder.decode(value, { stream: true }); - const lines = buffer.split("\n"); - buffer = lines.pop() || ""; - - for (const line of lines) { - if (line.startsWith("data: ")) { - const data = line.slice(6); - if (loggedLineCount < 3) { - console.info("[useChatStream] Raw stream line", { - sessionId, - data: - data.length > 300 ? `${data.slice(0, 300)}...` : data, - }); - loggedLineCount += 1; - } - if (data === "[DONE]") { - cleanup(); - console.info("[useChatStream] Stream done marker", { - sessionId, - receivedChunkCount, - timeSinceStart: requestStartTimeRef.current - ? Date.now() - requestStartTimeRef.current - : null, - }); - dispatchStreamEnd(); - retryCountRef.current = 0; - stopStreaming(); - resolve(); - return; - } - - try { - const rawChunk = JSON.parse(data) as - | StreamChunk - | VercelStreamChunk; - const chunk = normalizeStreamChunk(rawChunk); - if (!chunk) { - continue; - } - - if (!firstChunkAt) { - firstChunkAt = Date.now(); - console.info("[useChatStream] First stream chunk", { - sessionId, - chunkType: chunk.type, - timeSinceStart: requestStartTimeRef.current - ? firstChunkAt - requestStartTimeRef.current - : null, - }); - } - receivedChunkCount += 1; - - // Call the chunk handler - onChunk(chunk); - - // Handle stream lifecycle - if (chunk.type === "stream_end") { - didDispatchStreamEnd = true; - cleanup(); - console.info("[useChatStream] Stream end chunk", { - sessionId, - receivedChunkCount, - timeSinceStart: requestStartTimeRef.current - ? Date.now() - requestStartTimeRef.current - : null, - }); - retryCountRef.current = 0; - stopStreaming(); - resolve(); - return; - } else if (chunk.type === "error") { - cleanup(); - reject( - new Error( - chunk.message || chunk.content || "Stream error", - ), - ); - return; - } - } catch (err) { - // Skip invalid JSON lines - console.warn("Failed to parse SSE chunk:", err, data); - } - } - } - } - } catch (err) { - if (err instanceof Error && err.name === "AbortError") { - cleanup(); - dispatchStreamEnd(); - stopStreaming(); - resolve(); - return; - } - - const streamError = - err instanceof Error ? err : new Error("Failed to read stream"); - - if (retryCountRef.current < MAX_RETRIES) { - retryCountRef.current += 1; - const retryDelay = - INITIAL_RETRY_DELAY * Math.pow(2, retryCountRef.current - 1); - - toast.info("Connection interrupted", { - description: `Retrying in ${retryDelay / 1000} seconds...`, - }); - - retryTimeoutRef.current = setTimeout(() => { - sendMessage( - sessionId, - message, - onChunk, - isUserMessage, - context, - true, - ).catch((_err) => { - // Retry failed - }); - }, retryDelay); - } else { - setError(streamError); - toast.error("Connection Failed", { - description: - "Unable to connect to chat service. Please try again.", - }); - cleanup(); - dispatchStreamEnd(); - retryCountRef.current = 0; - stopStreaming(); - reject(streamError); - } - } - } - - readStream(); - }); - } catch (err) { - if (err instanceof Error && err.name === "AbortError") { - setIsStreaming(false); - return Promise.resolve(); - } - const streamError = - err instanceof Error ? err : new Error("Failed to start stream"); - setError(streamError); setIsStreaming(false); - throw streamError; + const completed = getCompletedStream(completedSessionId); + if (completed?.error) { + setError(completed.error); + } + unregisterActiveSession(completedSessionId); + }, + ); + + return unsubscribe; + }, []); + + async function sendMessage( + sessionId: string, + message: string, + onChunk: (chunk: StreamChunk) => void, + isUserMessage: boolean = true, + context?: { url: string; content: string }, + ) { + const previousSessionId = currentSessionIdRef.current; + if (previousSessionId && previousSessionId !== sessionId) { + stopStreaming(previousSessionId); + } + + currentSessionIdRef.current = sessionId; + onChunkCallbackRef.current = onChunk; + setIsStreaming(true); + setError(null); + + registerActiveSession(sessionId); + + try { + await startStream(sessionId, message, isUserMessage, context, onChunk); + + const status = getStreamStatus(sessionId); + if (status === "error") { + const completed = getCompletedStream(sessionId); + if (completed?.error) { + setError(completed.error); + toast.error("Connection Failed", { + description: "Unable to connect to chat service. Please try again.", + }); + throw completed.error; + } } - }, - [stopStreaming], - ); + } catch (err) { + const streamError = + err instanceof Error ? err : new Error("Failed to start stream"); + setError(streamError); + throw streamError; + } finally { + setIsStreaming(false); + } + } return { isStreaming, diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx index 0a3c7de6c8..4a25c84f92 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx @@ -255,13 +255,18 @@ export function Wallet() { (notification: WebSocketNotification) => { if ( notification.type !== "onboarding" || - notification.event !== "step_completed" || - !walletRef.current + notification.event !== "step_completed" ) { return; } - // Only trigger confetti for tasks that are in groups + // Always refresh credits when any onboarding step completes + fetchCredits(); + + // Only trigger confetti for tasks that are in displayed groups + if (!walletRef.current) { + return; + } const taskIds = groups .flatMap((group) => group.tasks) .map((task) => task.id); @@ -274,7 +279,6 @@ export function Wallet() { return; } - fetchCredits(); party.confetti(walletRef.current, { count: 30, spread: 120, @@ -284,7 +288,7 @@ export function Wallet() { modules: [fadeOut], }); }, - [fetchCredits, fadeOut], + [fetchCredits, fadeOut, groups], ); // WebSocket setup for onboarding notifications diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 82c03bc9f1..2d583d2062 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -1003,6 +1003,7 @@ export type OnboardingStep = | "AGENT_INPUT" | "CONGRATS" // First Wins + | "VISIT_COPILOT" | "GET_RESULTS" | "MARKETPLACE_VISIT" | "MARKETPLACE_ADD_AGENT" diff --git a/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx b/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx new file mode 100644 index 0000000000..674f6c55eb --- /dev/null +++ b/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx @@ -0,0 +1,72 @@ +"use client"; + +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { environment } from "@/services/environment"; +import { PostHogProvider as PHProvider } from "@posthog/react"; +import { usePathname, useSearchParams } from "next/navigation"; +import posthog from "posthog-js"; +import { ReactNode, useEffect, useRef } from "react"; + +export function PostHogProvider({ children }: { children: ReactNode }) { + const isPostHogEnabled = environment.isPostHogEnabled(); + const postHogCredentials = environment.getPostHogCredentials(); + + useEffect(() => { + if (postHogCredentials.key) { + posthog.init(postHogCredentials.key, { + api_host: postHogCredentials.host, + defaults: "2025-11-30", + capture_pageview: false, + capture_pageleave: true, + autocapture: true, + }); + } + }, []); + + if (!isPostHogEnabled) return <>{children}; + + return {children}; +} + +export function PostHogUserTracker() { + const { user, isUserLoading } = useSupabase(); + const previousUserIdRef = useRef(null); + const isPostHogEnabled = environment.isPostHogEnabled(); + + useEffect(() => { + if (isUserLoading || !isPostHogEnabled) return; + + if (user) { + if (previousUserIdRef.current !== user.id) { + posthog.identify(user.id, { + email: user.email, + ...(user.user_metadata?.name && { name: user.user_metadata.name }), + }); + previousUserIdRef.current = user.id; + } + } else if (previousUserIdRef.current !== null) { + posthog.reset(); + previousUserIdRef.current = null; + } + }, [user, isUserLoading, isPostHogEnabled]); + + return null; +} + +export function PostHogPageViewTracker() { + const pathname = usePathname(); + const searchParams = useSearchParams(); + const isPostHogEnabled = environment.isPostHogEnabled(); + + useEffect(() => { + if (pathname && isPostHogEnabled) { + let url = window.origin + pathname; + if (searchParams && searchParams.toString()) { + url = url + `?${searchParams.toString()}`; + } + posthog.capture("$pageview", { $current_url: url }); + } + }, [pathname, searchParams, isPostHogEnabled]); + + return null; +} diff --git a/autogpt_platform/frontend/src/services/environment/index.ts b/autogpt_platform/frontend/src/services/environment/index.ts index cdd5b421b5..f19bc417e3 100644 --- a/autogpt_platform/frontend/src/services/environment/index.ts +++ b/autogpt_platform/frontend/src/services/environment/index.ts @@ -76,6 +76,13 @@ function getPreviewStealingDev() { return branch; } +function getPostHogCredentials() { + return { + key: process.env.NEXT_PUBLIC_POSTHOG_KEY, + host: process.env.NEXT_PUBLIC_POSTHOG_HOST, + }; +} + function isProductionBuild() { return process.env.NODE_ENV === "production"; } @@ -116,6 +123,13 @@ function areFeatureFlagsEnabled() { return process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "enabled"; } +function isPostHogEnabled() { + const inCloud = isCloud(); + const key = process.env.NEXT_PUBLIC_POSTHOG_KEY; + const host = process.env.NEXT_PUBLIC_POSTHOG_HOST; + return inCloud && key && host; +} + export const environment = { // Generic getEnvironmentStr, @@ -128,6 +142,7 @@ export const environment = { getSupabaseUrl, getSupabaseAnonKey, getPreviewStealingDev, + getPostHogCredentials, // Assertions isServerSide, isClientSide, @@ -138,5 +153,6 @@ export const environment = { isCloud, isLocal, isVercelPreview, + isPostHogEnabled, areFeatureFlagsEnabled, }; diff --git a/autogpt_platform/frontend/src/services/network-status/NetworkStatusMonitor.tsx b/autogpt_platform/frontend/src/services/network-status/NetworkStatusMonitor.tsx new file mode 100644 index 0000000000..7552bbf78c --- /dev/null +++ b/autogpt_platform/frontend/src/services/network-status/NetworkStatusMonitor.tsx @@ -0,0 +1,8 @@ +"use client"; + +import { useNetworkStatus } from "./useNetworkStatus"; + +export function NetworkStatusMonitor() { + useNetworkStatus(); + return null; +} diff --git a/autogpt_platform/frontend/src/services/network-status/useNetworkStatus.ts b/autogpt_platform/frontend/src/services/network-status/useNetworkStatus.ts new file mode 100644 index 0000000000..472a6e0e90 --- /dev/null +++ b/autogpt_platform/frontend/src/services/network-status/useNetworkStatus.ts @@ -0,0 +1,28 @@ +"use client"; + +import { useEffect } from "react"; +import { toast } from "sonner"; + +export function useNetworkStatus() { + useEffect(function monitorNetworkStatus() { + function handleOnline() { + toast.success("Connection restored", { + description: "You're back online", + }); + } + + function handleOffline() { + toast.error("You're offline", { + description: "Check your internet connection", + }); + } + + window.addEventListener("online", handleOnline); + window.addEventListener("offline", handleOffline); + + return function cleanup() { + window.removeEventListener("online", handleOnline); + window.removeEventListener("offline", handleOffline); + }; + }, []); +} diff --git a/autogpt_platform/frontend/src/services/storage/session-storage.ts b/autogpt_platform/frontend/src/services/storage/session-storage.ts index 8404da571c..1be82c98fb 100644 --- a/autogpt_platform/frontend/src/services/storage/session-storage.ts +++ b/autogpt_platform/frontend/src/services/storage/session-storage.ts @@ -3,6 +3,7 @@ import { environment } from "../environment"; export enum SessionKey { CHAT_SENT_INITIAL_PROMPTS = "chat_sent_initial_prompts", + CHAT_INITIAL_PROMPTS = "chat_initial_prompts", } function get(key: SessionKey) { diff --git a/autogpt_platform/frontend/src/tests/pages/login.page.ts b/autogpt_platform/frontend/src/tests/pages/login.page.ts index 9082cc6219..adcb8d908b 100644 --- a/autogpt_platform/frontend/src/tests/pages/login.page.ts +++ b/autogpt_platform/frontend/src/tests/pages/login.page.ts @@ -37,9 +37,13 @@ export class LoginPage { this.page.on("load", (page) => console.log(`ℹ️ Now at URL: ${page.url()}`)); // Start waiting for navigation before clicking + // Wait for redirect to marketplace, onboarding, library, or copilot (new landing pages) const leaveLoginPage = this.page .waitForURL( - (url) => /^\/(marketplace|onboarding(\/.*)?)?$/.test(url.pathname), + (url: URL) => + /^\/(marketplace|onboarding(\/.*)?|library|copilot)?$/.test( + url.pathname, + ), { timeout: 10_000 }, ) .catch((reason) => { diff --git a/autogpt_platform/frontend/src/tests/utils/signup.ts b/autogpt_platform/frontend/src/tests/utils/signup.ts index 7c8fdbe01b..192a9129b9 100644 --- a/autogpt_platform/frontend/src/tests/utils/signup.ts +++ b/autogpt_platform/frontend/src/tests/utils/signup.ts @@ -36,14 +36,16 @@ export async function signupTestUser( const signupButton = getButton("Sign up"); await signupButton.click(); - // Wait for successful signup - could redirect to onboarding or marketplace + // Wait for successful signup - could redirect to various pages depending on onboarding state try { - // Wait for either onboarding or marketplace redirect - await Promise.race([ - page.waitForURL(/\/onboarding/, { timeout: 15000 }), - page.waitForURL(/\/marketplace/, { timeout: 15000 }), - ]); + // Wait for redirect to onboarding, marketplace, copilot, or library + // Use a single waitForURL with a callback to avoid Promise.race race conditions + await page.waitForURL( + (url: URL) => + /\/(onboarding|marketplace|copilot|library)/.test(url.pathname), + { timeout: 15000 }, + ); } catch (error) { console.error( "❌ Timeout waiting for redirect, current URL:", @@ -54,14 +56,19 @@ export async function signupTestUser( const currentUrl = page.url(); - // Handle onboarding or marketplace redirect + // Handle onboarding redirect if needed if (currentUrl.includes("/onboarding") && ignoreOnboarding) { await page.goto("http://localhost:3000/marketplace"); await page.waitForLoadState("domcontentloaded", { timeout: 10000 }); } - // Verify we're on the expected final page - if (ignoreOnboarding || currentUrl.includes("/marketplace")) { + // Verify we're on an expected final page and user is authenticated + if (currentUrl.includes("/copilot") || currentUrl.includes("/library")) { + // For copilot/library landing pages, just verify user is authenticated + await page + .getByTestId("profile-popout-menu-trigger") + .waitFor({ state: "visible", timeout: 10000 }); + } else if (ignoreOnboarding || currentUrl.includes("/marketplace")) { // Verify we're on marketplace await page .getByText( diff --git a/backend/blocks/video/__init__.py b/backend/blocks/video/__init__.py deleted file mode 100644 index fd95ef9a58..0000000000 --- a/backend/blocks/video/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Video editing blocks