From bc75d70e7d4e5603d362987ed4e24682d71a43c6 Mon Sep 17 00:00:00 2001 From: Swifty Date: Mon, 19 Jan 2026 21:56:51 +0100 Subject: [PATCH 1/7] refactor(backend): Improve Langfuse tracing with v3 SDK patterns and @observe decorators (#11803) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR improves the Langfuse tracing implementation in the chat feature by adopting the v3 SDK patterns, resulting in cleaner code and better observability. ### Changes ๐Ÿ—๏ธ - **Simplified Langfuse client usage**: Replace manual client initialization with `langfuse.get_client()` global singleton - **Use v3 context managers**: Switch to `start_as_current_observation()` and `propagate_attributes()` for automatic trace propagation - **Auto-instrument OpenAI calls**: Use `langfuse.openai` wrapper for automatic LLM call tracing instead of manual generation tracking - **Add `@observe` decorators**: All chat tools now have `@observe(as_type="tool")` decorators for automatic tool execution tracing: - `add_understanding` - `view_agent_output` (renamed from `agent_output`) - `create_agent` - `edit_agent` - `find_agent` - `find_block` - `find_library_agent` - `get_doc_page` - `run_agent` - `run_block` - `search_docs` - **Remove manual trace lifecycle**: Eliminated the verbose `finally` block that manually ended traces/generations - **Rename tool**: `agent_output` โ†’ `view_agent_output` for clarity ### Checklist ๐Ÿ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Verified chat feature works with Langfuse tracing enabled - [x] Confirmed traces appear correctly in Langfuse dashboard with tool spans - [x] Tested tool execution flows show up as nested observations #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**) No configuration changes required - uses existing Langfuse environment variables. --- .../backend/api/features/chat/service.py | 576 +++++++----------- .../api/features/chat/tools/__init__.py | 2 +- .../features/chat/tools/add_understanding.py | 3 + .../api/features/chat/tools/agent_output.py | 4 +- .../api/features/chat/tools/create_agent.py | 3 + .../api/features/chat/tools/edit_agent.py | 3 + .../api/features/chat/tools/find_agent.py | 3 + .../api/features/chat/tools/find_block.py | 2 + .../features/chat/tools/find_library_agent.py | 3 + .../api/features/chat/tools/get_doc_page.py | 3 + .../api/features/chat/tools/run_agent.py | 2 + .../api/features/chat/tools/run_block.py | 3 + .../api/features/chat/tools/search_docs.py | 2 + .../backend/backend/data/understanding.py | 2 + 14 files changed, 266 insertions(+), 345 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 7b41b040ba..93634c47e3 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -4,14 +4,9 @@ from collections.abc import AsyncGenerator from typing import Any import orjson -from langfuse import Langfuse -from openai import ( - APIConnectionError, - APIError, - APIStatusError, - AsyncOpenAI, - RateLimitError, -) +from langfuse import get_client, propagate_attributes +from langfuse.openai import openai # type: ignore +from openai import APIConnectionError, APIError, APIStatusError, RateLimitError from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam from backend.data.understanding import ( @@ -21,7 +16,6 @@ from backend.data.understanding import ( from backend.util.exceptions import NotFoundError from backend.util.settings import Settings -from . import db as chat_db from .config import ChatConfig from .model import ( ChatMessage, @@ -50,10 +44,10 @@ logger = logging.getLogger(__name__) config = ChatConfig() settings = Settings() -client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url) +client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url) -# Langfuse client (lazy initialization) -_langfuse_client: Langfuse | None = None + +langfuse = get_client() class LangfuseNotConfiguredError(Exception): @@ -69,65 +63,6 @@ def _is_langfuse_configured() -> bool: ) -def _get_langfuse_client() -> Langfuse: - """Get or create the Langfuse client for prompt management and tracing.""" - global _langfuse_client - if _langfuse_client is None: - if not _is_langfuse_configured(): - raise LangfuseNotConfiguredError( - "Langfuse is not configured. The chat feature requires Langfuse for prompt management. " - "Please set the LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment variables." - ) - _langfuse_client = Langfuse( - public_key=settings.secrets.langfuse_public_key, - secret_key=settings.secrets.langfuse_secret_key, - host=settings.secrets.langfuse_host or "https://cloud.langfuse.com", - ) - return _langfuse_client - - -def _get_environment() -> str: - """Get the current environment name for Langfuse tagging.""" - return settings.config.app_env.value - - -def _get_langfuse_prompt() -> str: - """Fetch the latest production prompt from Langfuse. - - Returns: - The compiled prompt text from Langfuse. - - Raises: - Exception: If Langfuse is unavailable or prompt fetch fails. - """ - try: - langfuse = _get_langfuse_client() - # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt - prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0) - compiled = prompt.compile() - logger.info( - f"Fetched prompt '{config.langfuse_prompt_name}' from Langfuse " - f"(version: {prompt.version})" - ) - return compiled - except Exception as e: - logger.error(f"Failed to fetch prompt from Langfuse: {e}") - raise - - -async def _is_first_session(user_id: str) -> bool: - """Check if this is the user's first chat session. - - Returns True if the user has 1 or fewer sessions (meaning this is their first). - """ - try: - session_count = await chat_db.get_user_session_count(user_id) - return session_count <= 1 - except Exception as e: - logger.warning(f"Failed to check session count for user {user_id}: {e}") - return False # Default to non-onboarding if we can't check - - async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: """Build the full system prompt including business understanding if available. @@ -139,8 +74,6 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: Tuple of (compiled prompt string, Langfuse prompt object for tracing) """ - langfuse = _get_langfuse_client() - # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0) @@ -158,7 +91,7 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: context = "This is the first time you are meeting the user. Greet them and introduce them to the platform" compiled = prompt.compile(users_information=context) - return compiled, prompt + return compiled, understanding async def _generate_session_title(message: str) -> str | None: @@ -217,6 +150,7 @@ async def assign_user_to_session( async def stream_chat_completion( session_id: str, message: str | None = None, + tool_call_response: str | None = None, is_user_message: bool = True, user_id: str | None = None, retry_count: int = 0, @@ -256,11 +190,6 @@ async def stream_chat_completion( yield StreamFinish() return - # Langfuse observations will be created after session is loaded (need messages for input) - # Initialize to None so finally block can safely check and end them - trace = None - generation = None - # Only fetch from Redis if session not provided (initial call) if session is None: session = await get_chat_session(session_id, user_id) @@ -336,297 +265,259 @@ async def stream_chat_completion( asyncio.create_task(_update_title()) # Build system prompt with business understanding - system_prompt, langfuse_prompt = await _build_system_prompt(user_id) - - # Build input messages including system prompt for complete Langfuse logging - trace_input_messages = [{"role": "system", "content": system_prompt}] + [ - m.model_dump() for m in session.messages - ] + system_prompt, understanding = await _build_system_prompt(user_id) # Create Langfuse trace for this LLM call (each call gets its own trace, grouped by session_id) # Using v3 SDK: start_observation creates a root span, update_trace sets trace-level attributes - try: - langfuse = _get_langfuse_client() - env = _get_environment() - trace = langfuse.start_observation( - name="chat_completion", - input={"messages": trace_input_messages}, - metadata={ - "environment": env, - "model": config.model, - "message_count": len(session.messages), - "prompt_name": langfuse_prompt.name if langfuse_prompt else None, - "prompt_version": langfuse_prompt.version if langfuse_prompt else None, - }, - ) - # Set trace-level attributes (session_id, user_id, tags) - trace.update_trace( + input = message + if not message and tool_call_response: + input = tool_call_response + + langfuse = get_client() + with langfuse.start_as_current_observation( + as_type="span", + name="user-copilot-request", + input=input, + ) as span: + with propagate_attributes( session_id=session_id, user_id=user_id, - tags=[env, "copilot"], - ) - except Exception as e: - logger.warning(f"Failed to create Langfuse trace: {e}") + tags=["copilot"], + metadata={ + "users_information": format_understanding_for_prompt(understanding)[ + :200 + ] # langfuse only accepts upto to 200 chars + }, + ): - # Initialize variables that will be used in finally block (must be defined before try) - assistant_response = ChatMessage( - role="assistant", - content="", - ) - accumulated_tool_calls: list[dict[str, Any]] = [] - - # Wrap main logic in try/finally to ensure Langfuse observations are always ended - try: - has_yielded_end = False - has_yielded_error = False - has_done_tool_call = False - has_received_text = False - text_streaming_ended = False - tool_response_messages: list[ChatMessage] = [] - should_retry = False - - # Generate unique IDs for AI SDK protocol - import uuid as uuid_module - - message_id = str(uuid_module.uuid4()) - text_block_id = str(uuid_module.uuid4()) - - # Yield message start - yield StreamStart(messageId=message_id) - - # Create Langfuse generation for each LLM call, linked to the prompt - # Using v3 SDK: start_observation with as_type="generation" - generation = ( - trace.start_observation( - as_type="generation", - name="llm_call", - model=config.model, - input={"messages": trace_input_messages}, - prompt=langfuse_prompt, + # Initialize variables that will be used in finally block (must be defined before try) + assistant_response = ChatMessage( + role="assistant", + content="", ) - if trace - else None - ) + accumulated_tool_calls: list[dict[str, Any]] = [] - try: - async for chunk in _stream_chat_chunks( - session=session, - tools=tools, - system_prompt=system_prompt, - text_block_id=text_block_id, - ): + # Wrap main logic in try/finally to ensure Langfuse observations are always ended + has_yielded_end = False + has_yielded_error = False + has_done_tool_call = False + has_received_text = False + text_streaming_ended = False + tool_response_messages: list[ChatMessage] = [] + should_retry = False - if isinstance(chunk, StreamTextStart): - # Emit text-start before first text delta - if not has_received_text: + # Generate unique IDs for AI SDK protocol + import uuid as uuid_module + + message_id = str(uuid_module.uuid4()) + text_block_id = str(uuid_module.uuid4()) + + # Yield message start + yield StreamStart(messageId=message_id) + + try: + async for chunk in _stream_chat_chunks( + session=session, + tools=tools, + system_prompt=system_prompt, + text_block_id=text_block_id, + ): + + if isinstance(chunk, StreamTextStart): + # Emit text-start before first text delta + if not has_received_text: + yield chunk + elif isinstance(chunk, StreamTextDelta): + delta = chunk.delta or "" + assert assistant_response.content is not None + assistant_response.content += delta + has_received_text = True yield chunk - elif isinstance(chunk, StreamTextDelta): - delta = chunk.delta or "" - assert assistant_response.content is not None - assistant_response.content += delta - has_received_text = True - yield chunk - elif isinstance(chunk, StreamTextEnd): - # Emit text-end after text completes - if has_received_text and not text_streaming_ended: - text_streaming_ended = True - yield chunk - elif isinstance(chunk, StreamToolInputStart): - # Emit text-end before first tool call, but only if we've received text - if has_received_text and not text_streaming_ended: - yield StreamTextEnd(id=text_block_id) - text_streaming_ended = True - yield chunk - elif isinstance(chunk, StreamToolInputAvailable): - # Accumulate tool calls in OpenAI format - accumulated_tool_calls.append( - { - "id": chunk.toolCallId, - "type": "function", - "function": { - "name": chunk.toolName, - "arguments": orjson.dumps(chunk.input).decode("utf-8"), - }, - } - ) - elif isinstance(chunk, StreamToolOutputAvailable): - result_content = ( - chunk.output - if isinstance(chunk.output, str) - else orjson.dumps(chunk.output).decode("utf-8") - ) - tool_response_messages.append( - ChatMessage( - role="tool", - content=result_content, - tool_call_id=chunk.toolCallId, - ) - ) - has_done_tool_call = True - # Track if any tool execution failed - if not chunk.success: - logger.warning( - f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed" - ) - yield chunk - elif isinstance(chunk, StreamFinish): - if not has_done_tool_call: - # Emit text-end before finish if we received text but haven't closed it + elif isinstance(chunk, StreamTextEnd): + # Emit text-end after text completes + if has_received_text and not text_streaming_ended: + text_streaming_ended = True + if assistant_response.content: + logger.warn( + f"StreamTextEnd: Attempting to set output {assistant_response.content}" + ) + span.update_trace(output=assistant_response.content) + span.update(output=assistant_response.content) + yield chunk + elif isinstance(chunk, StreamToolInputStart): + # Emit text-end before first tool call, but only if we've received text if has_received_text and not text_streaming_ended: yield StreamTextEnd(id=text_block_id) text_streaming_ended = True - has_yielded_end = True yield chunk - elif isinstance(chunk, StreamError): - has_yielded_error = True - elif isinstance(chunk, StreamUsage): - session.usage.append( - Usage( - prompt_tokens=chunk.promptTokens, - completion_tokens=chunk.completionTokens, - total_tokens=chunk.totalTokens, + elif isinstance(chunk, StreamToolInputAvailable): + # Accumulate tool calls in OpenAI format + accumulated_tool_calls.append( + { + "id": chunk.toolCallId, + "type": "function", + "function": { + "name": chunk.toolName, + "arguments": orjson.dumps(chunk.input).decode( + "utf-8" + ), + }, + } ) + elif isinstance(chunk, StreamToolOutputAvailable): + result_content = ( + chunk.output + if isinstance(chunk.output, str) + else orjson.dumps(chunk.output).decode("utf-8") + ) + tool_response_messages.append( + ChatMessage( + role="tool", + content=result_content, + tool_call_id=chunk.toolCallId, + ) + ) + has_done_tool_call = True + # Track if any tool execution failed + if not chunk.success: + logger.warning( + f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed" + ) + yield chunk + elif isinstance(chunk, StreamFinish): + if not has_done_tool_call: + # Emit text-end before finish if we received text but haven't closed it + if has_received_text and not text_streaming_ended: + yield StreamTextEnd(id=text_block_id) + text_streaming_ended = True + has_yielded_end = True + yield chunk + elif isinstance(chunk, StreamError): + has_yielded_error = True + elif isinstance(chunk, StreamUsage): + session.usage.append( + Usage( + prompt_tokens=chunk.promptTokens, + completion_tokens=chunk.completionTokens, + total_tokens=chunk.totalTokens, + ) + ) + else: + logger.error( + f"Unknown chunk type: {type(chunk)}", exc_info=True + ) + if assistant_response.content: + langfuse.update_current_trace(output=assistant_response.content) + langfuse.update_current_span(output=assistant_response.content) + elif tool_response_messages: + langfuse.update_current_trace(output=str(tool_response_messages)) + langfuse.update_current_span(output=str(tool_response_messages)) + + except Exception as e: + logger.error(f"Error during stream: {e!s}", exc_info=True) + + # Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.) + is_retryable = isinstance( + e, (orjson.JSONDecodeError, KeyError, TypeError) + ) + + if is_retryable and retry_count < config.max_retries: + logger.info( + f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" ) + should_retry = True else: - logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True) - except Exception as e: - logger.error(f"Error during stream: {e!s}", exc_info=True) + # Non-retryable error or max retries exceeded + # Save any partial progress before reporting error + messages_to_save: list[ChatMessage] = [] - # Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.) - is_retryable = isinstance(e, (orjson.JSONDecodeError, KeyError, TypeError)) + # Add assistant message if it has content or tool calls + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if assistant_response.content or assistant_response.tool_calls: + messages_to_save.append(assistant_response) - if is_retryable and retry_count < config.max_retries: + # Add tool response messages after assistant message + messages_to_save.extend(tool_response_messages) + + session.messages.extend(messages_to_save) + await upsert_chat_session(session) + + if not has_yielded_error: + error_message = str(e) + if not is_retryable: + error_message = f"Non-retryable error: {error_message}" + elif retry_count >= config.max_retries: + error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}" + + error_response = StreamError(errorText=error_message) + yield error_response + if not has_yielded_end: + yield StreamFinish() + return + + # Handle retry outside of exception handler to avoid nesting + if should_retry and retry_count < config.max_retries: logger.info( - f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" + f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}" ) - should_retry = True - else: - # Non-retryable error or max retries exceeded - # Save any partial progress before reporting error - messages_to_save: list[ChatMessage] = [] + async for chunk in stream_chat_completion( + session_id=session.session_id, + user_id=user_id, + retry_count=retry_count + 1, + session=session, + context=context, + ): + yield chunk + return # Exit after retry to avoid double-saving in finally block - # Add assistant message if it has content or tool calls - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - if assistant_response.content or assistant_response.tool_calls: - messages_to_save.append(assistant_response) - - # Add tool response messages after assistant message - messages_to_save.extend(tool_response_messages) - - session.messages.extend(messages_to_save) - await upsert_chat_session(session) - - if not has_yielded_error: - error_message = str(e) - if not is_retryable: - error_message = f"Non-retryable error: {error_message}" - elif retry_count >= config.max_retries: - error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}" - - error_response = StreamError(errorText=error_message) - yield error_response - if not has_yielded_end: - yield StreamFinish() - return - - # Handle retry outside of exception handler to avoid nesting - if should_retry and retry_count < config.max_retries: + # Normal completion path - save session and handle tool call continuation logger.info( - f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}" - ) - async for chunk in stream_chat_completion( - session_id=session.session_id, - user_id=user_id, - retry_count=retry_count + 1, - session=session, - context=context, - ): - yield chunk - return # Exit after retry to avoid double-saving in finally block - - # Normal completion path - save session and handle tool call continuation - logger.info( - f"Normal completion path: session={session.session_id}, " - f"current message_count={len(session.messages)}" - ) - - # Build the messages list in the correct order - messages_to_save: list[ChatMessage] = [] - - # Add assistant message with tool_calls if any - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - logger.info( - f"Added {len(accumulated_tool_calls)} tool calls to assistant message" - ) - if assistant_response.content or assistant_response.tool_calls: - messages_to_save.append(assistant_response) - logger.info( - f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" + f"Normal completion path: session={session.session_id}, " + f"current message_count={len(session.messages)}" ) - # Add tool response messages after assistant message - messages_to_save.extend(tool_response_messages) - logger.info( - f"Saving {len(tool_response_messages)} tool response messages, " - f"total_to_save={len(messages_to_save)}" - ) + # Build the messages list in the correct order + messages_to_save: list[ChatMessage] = [] - session.messages.extend(messages_to_save) - logger.info( - f"Extended session messages, new message_count={len(session.messages)}" - ) - await upsert_chat_session(session) - - # If we did a tool call, stream the chat completion again to get the next response - if has_done_tool_call: - logger.info( - "Tool call executed, streaming chat completion again to get assistant response" - ) - async for chunk in stream_chat_completion( - session_id=session.session_id, - user_id=user_id, - session=session, # Pass session object to avoid Redis refetch - context=context, - ): - yield chunk - - finally: - # Always end Langfuse observations to prevent resource leaks - # Guard against None and catch errors to avoid masking original exceptions - if generation is not None: - try: - latest_usage = session.usage[-1] if session.usage else None - generation.update( - model=config.model, - output={ - "content": assistant_response.content, - "tool_calls": accumulated_tool_calls or None, - }, - usage_details=( - { - "input": latest_usage.prompt_tokens, - "output": latest_usage.completion_tokens, - "total": latest_usage.total_tokens, - } - if latest_usage - else None - ), + # Add assistant message with tool_calls if any + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + logger.info( + f"Added {len(accumulated_tool_calls)} tool calls to assistant message" + ) + if assistant_response.content or assistant_response.tool_calls: + messages_to_save.append(assistant_response) + logger.info( + f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" ) - generation.end() - except Exception as e: - logger.warning(f"Failed to end Langfuse generation: {e}") - if trace is not None: - try: - if accumulated_tool_calls: - trace.update_trace(output={"tool_calls": accumulated_tool_calls}) - else: - trace.update_trace(output={"response": assistant_response.content}) - trace.end() - except Exception as e: - logger.warning(f"Failed to end Langfuse trace: {e}") + # Add tool response messages after assistant message + messages_to_save.extend(tool_response_messages) + logger.info( + f"Saving {len(tool_response_messages)} tool response messages, " + f"total_to_save={len(messages_to_save)}" + ) + + session.messages.extend(messages_to_save) + logger.info( + f"Extended session messages, new message_count={len(session.messages)}" + ) + await upsert_chat_session(session) + + # If we did a tool call, stream the chat completion again to get the next response + if has_done_tool_call: + logger.info( + "Tool call executed, streaming chat completion again to get assistant response" + ) + async for chunk in stream_chat_completion( + session_id=session.session_id, + user_id=user_id, + session=session, # Pass session object to avoid Redis refetch + context=context, + tool_call_response=str(tool_response_messages), + ): + yield chunk # Retry configuration for OpenAI API calls @@ -900,5 +791,4 @@ async def _yield_tool_call( session=session, ) - logger.info(f"Yielding Tool execution response: {tool_execution_response}") yield tool_execution_response diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py index fc0fdf9064..82ce5cfd6f 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py @@ -30,7 +30,7 @@ TOOL_REGISTRY: dict[str, BaseTool] = { "find_library_agent": FindLibraryAgentTool(), "run_agent": RunAgentTool(), "run_block": RunBlockTool(), - "agent_output": AgentOutputTool(), + "view_agent_output": AgentOutputTool(), "search_docs": SearchDocsTool(), "get_doc_page": GetDocPageTool(), } diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py b/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py index fe3d5e8984..bd93f0e2a6 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py @@ -3,6 +3,8 @@ import logging from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from backend.data.understanding import ( BusinessUnderstandingInput, @@ -59,6 +61,7 @@ and automations for the user's specific needs.""" """Requires authentication to store user-specific data.""" return True + @observe(as_type="tool", name="add_understanding") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py index d81a11362b..00c6d8499b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py @@ -5,6 +5,7 @@ import re from datetime import datetime, timedelta, timezone from typing import Any +from langfuse import observe from pydantic import BaseModel, field_validator from backend.api.features.chat.model import ChatSession @@ -103,7 +104,7 @@ class AgentOutputTool(BaseTool): @property def name(self) -> str: - return "agent_output" + return "view_agent_output" @property def description(self) -> str: @@ -328,6 +329,7 @@ class AgentOutputTool(BaseTool): total_executions=len(available_executions) if available_executions else 1, ) + @observe(as_type="tool", name="view_agent_output") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py index c8168f473d..26c980c6c5 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py @@ -3,6 +3,8 @@ import logging from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_generator import ( @@ -78,6 +80,7 @@ class CreateAgentTool(BaseTool): "required": ["description"], } + @observe(as_type="tool", name="create_agent") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py index 5aaa166036..a50a89c5c7 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py @@ -3,6 +3,8 @@ import logging from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_generator import ( @@ -85,6 +87,7 @@ class EditAgentTool(BaseTool): "required": ["agent_id", "changes"], } + @observe(as_type="tool", name="edit_agent") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py index 477522757d..f231ef4484 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py @@ -2,6 +2,8 @@ from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_search import search_agents @@ -35,6 +37,7 @@ class FindAgentTool(BaseTool): "required": ["query"], } + @observe(as_type="tool", name="find_agent") async def _execute( self, user_id: str | None, session: ChatSession, **kwargs ) -> ToolResponseBase: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py index a5e66f0a1c..fc20fdfc4a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py @@ -1,6 +1,7 @@ import logging from typing import Any +from langfuse import observe from prisma.enums import ContentType from backend.api.features.chat.model import ChatSession @@ -55,6 +56,7 @@ class FindBlockTool(BaseTool): def requires_auth(self) -> bool: return True + @observe(as_type="tool", name="find_block") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py index 108fba75ae..d9b5edfa9b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py @@ -2,6 +2,8 @@ from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_search import search_agents @@ -41,6 +43,7 @@ class FindLibraryAgentTool(BaseTool): def requires_auth(self) -> bool: return True + @observe(as_type="tool", name="find_library_agent") async def _execute( self, user_id: str | None, session: ChatSession, **kwargs ) -> ToolResponseBase: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py b/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py index 7040cd7db5..b2fdcccfcd 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py @@ -4,6 +4,8 @@ import logging from pathlib import Path from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from backend.api.features.chat.tools.base import BaseTool from backend.api.features.chat.tools.models import ( @@ -71,6 +73,7 @@ class GetDocPageTool(BaseTool): url_path = path.rsplit(".", 1)[0] if "." in path else path return f"{DOCS_BASE_URL}/{url_path}" + @observe(as_type="tool", name="get_doc_page") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py index 1f0a836543..4d93a3af30 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py @@ -3,6 +3,7 @@ import logging from typing import Any +from langfuse import observe from pydantic import BaseModel, Field, field_validator from backend.api.features.chat.config import ChatConfig @@ -154,6 +155,7 @@ class RunAgentTool(BaseTool): """All operations require authentication.""" return True + @observe(as_type="tool", name="run_agent") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index 48cbcb5e5c..02f493df71 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -4,6 +4,8 @@ import logging from collections import defaultdict from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from backend.data.block import get_block from backend.data.execution import ExecutionContext @@ -127,6 +129,7 @@ class RunBlockTool(BaseTool): return matched_credentials, missing_credentials + @observe(as_type="tool", name="run_block") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py b/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py index edb0c0de1e..4903230b40 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py @@ -3,6 +3,7 @@ import logging from typing import Any +from langfuse import observe from prisma.enums import ContentType from backend.api.features.chat.model import ChatSession @@ -87,6 +88,7 @@ class SearchDocsTool(BaseTool): url_path = path.rsplit(".", 1)[0] if "." in path else path return f"{DOCS_BASE_URL}/{url_path}" + @observe(as_type="tool", name="search_docs") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/data/understanding.py b/autogpt_platform/backend/backend/data/understanding.py index eb63d719ca..c604e046b6 100644 --- a/autogpt_platform/backend/backend/data/understanding.py +++ b/autogpt_platform/backend/backend/data/understanding.py @@ -328,6 +328,8 @@ async def clear_business_understanding(user_id: str) -> bool: def format_understanding_for_prompt(understanding: BusinessUnderstanding) -> str: """Format business understanding as text for system prompt injection.""" + if not understanding: + return "" sections = [] # User info section From 7756e2d12d548a7c7030b2254bebbcb2896430ca Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Tue, 20 Jan 2026 17:50:25 +0530 Subject: [PATCH 2/7] refactor(frontend): refactor credentials input with unified CredentialsGroupedView component (#11801) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes ๐Ÿ—๏ธ - Refactored the credentials input handling in the RunInputDialog to use the shared CredentialsGroupedView component - Moved CredentialsGroupedView from agent library to a shared component location for reuse - Fixed source name handling in edge creation to properly handle tool source names - Improved node output UI by replacing custom expand/collapse with Accordion component - Fixed timing of hardcoded values synchronization with handle IDs to ensure proper loading - Enabled NEW_FLOW_EDITOR and BUILDER_VIEW_SWITCH feature flags by default ### Checklist ๐Ÿ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Verified credentials input works in both agent run dialog and builder run dialog - [x] Confirmed node output accordion works correctly - [x] Tested flow editor with tools to ensure source name handling works properly - [x] Verified hardcoded values sync correctly with handle IDs #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**) --- .../RunInputDialog/RunInputDialog.tsx | 27 +-- .../RunInputDialog/useRunInputDialog.ts | 48 ++-- .../components/FlowEditor/Flow/useFlow.ts | 16 +- .../components/NodeOutput/NodeOutput.tsx | 226 +++++++++--------- .../components/NodeOutput/useNodeOutput.tsx | 12 +- .../app/(platform)/build/components/helper.ts | 8 +- .../ModalRunSection/ModalRunSection.tsx | 7 +- .../CredentialsGroupedView.tsx | 32 ++- .../CredentialsGroupedView}/helpers.ts | 2 +- 9 files changed, 194 insertions(+), 184 deletions(-) rename autogpt_platform/frontend/src/{app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal => components/contextual/CredentialsInput}/components/CredentialsGroupedView/CredentialsGroupedView.tsx (86%) rename autogpt_platform/frontend/src/{app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components => components/contextual/CredentialsInput/components/CredentialsGroupedView}/helpers.ts (98%) diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/RunInputDialog.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/RunInputDialog.tsx index df944da4f9..3fe72649b6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/RunInputDialog.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/RunInputDialog.tsx @@ -10,6 +10,7 @@ import { useRunInputDialog } from "./useRunInputDialog"; import { CronSchedulerDialog } from "../CronSchedulerDialog/CronSchedulerDialog"; import { useTutorialStore } from "@/app/(platform)/build/stores/tutorialStore"; import { useEffect } from "react"; +import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView"; export const RunInputDialog = ({ isOpen, @@ -23,19 +24,17 @@ export const RunInputDialog = ({ const hasInputs = useGraphStore((state) => state.hasInputs); const hasCredentials = useGraphStore((state) => state.hasCredentials); const inputSchema = useGraphStore((state) => state.inputSchema); - const credentialsSchema = useGraphStore( - (state) => state.credentialsInputSchema, - ); const { - credentialsUiSchema, + credentialFields, + requiredCredentials, handleManualRun, handleInputChange, openCronSchedulerDialog, setOpenCronSchedulerDialog, inputValues, credentialValues, - handleCredentialChange, + handleCredentialFieldChange, isExecutingGraph, } = useRunInputDialog({ setIsOpen }); @@ -67,7 +66,7 @@ export const RunInputDialog = ({
{/* Credentials Section */} - {hasCredentials() && ( + {hasCredentials() && credentialFields.length > 0 && (
@@ -75,16 +74,12 @@ export const RunInputDialog = ({
- handleCredentialChange(v.formData)} - uiSchema={credentialsUiSchema} - initialValues={{}} - formContext={{ - showHandles: false, - size: "large", - showOptionalToggle: false, - }} +
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts index 358fd3ae7e..0eba6e8188 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts @@ -7,12 +7,11 @@ import { GraphExecutionMeta, } from "@/lib/autogpt-server-api"; import { parseAsInteger, parseAsString, useQueryStates } from "nuqs"; -import { useMemo, useState } from "react"; -import { uiSchema } from "../../../FlowEditor/nodes/uiSchema"; -import { isCredentialFieldSchema } from "@/components/renderers/InputRenderer/custom/CredentialField/helpers"; +import { useCallback, useMemo, useState } from "react"; import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { useReactFlow } from "@xyflow/react"; +import type { CredentialField } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers"; export const useRunInputDialog = ({ setIsOpen, @@ -120,27 +119,32 @@ export const useRunInputDialog = ({ }, }); - // We are rendering the credentials field differently compared to other fields. - // In the node, we have the field name as "credential" - so our library catches it and renders it differently. - // But here we have a different name, something like `Firecrawl credentials`, so here we are telling the library that this field is a credential field type. + // Convert credentials schema to credential fields array for CredentialsGroupedView + const credentialFields: CredentialField[] = useMemo(() => { + if (!credentialsSchema?.properties) return []; + return Object.entries(credentialsSchema.properties); + }, [credentialsSchema]); - const credentialsUiSchema = useMemo(() => { - const dynamicUiSchema: any = { ...uiSchema }; + // Get required credentials as a Set + const requiredCredentials = useMemo(() => { + return new Set(credentialsSchema?.required || []); + }, [credentialsSchema]); - if (credentialsSchema?.properties) { - Object.keys(credentialsSchema.properties).forEach((fieldName) => { - const fieldSchema = credentialsSchema.properties[fieldName]; - if (isCredentialFieldSchema(fieldSchema)) { - dynamicUiSchema[fieldName] = { - ...dynamicUiSchema[fieldName], - "ui:field": "custom/credential_field", - }; + // Handler for individual credential changes + const handleCredentialFieldChange = useCallback( + (key: string, value?: CredentialsMetaInput) => { + setCredentialValues((prev) => { + if (value) { + return { ...prev, [key]: value }; + } else { + const next = { ...prev }; + delete next[key]; + return next; } }); - } - - return dynamicUiSchema; - }, [credentialsSchema]); + }, + [], + ); const handleManualRun = async () => { // Filter out incomplete credentials (those without a valid id) @@ -173,12 +177,14 @@ export const useRunInputDialog = ({ }; return { - credentialsUiSchema, + credentialFields, + requiredCredentials, inputValues, credentialValues, isExecutingGraph, handleInputChange, handleCredentialChange, + handleCredentialFieldChange, handleManualRun, openCronSchedulerDialog, setOpenCronSchedulerDialog, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts index 694c1be81b..f5533848d2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts @@ -139,14 +139,6 @@ export const useFlow = () => { useNodeStore.getState().setNodes([]); useNodeStore.getState().clearResolutionState(); addNodes(customNodes); - - // Sync hardcoded values with handle IDs. - // If a keyโ€“value field has a key without a value, the backend omits it from hardcoded values. - // But if a handleId exists for that key, it causes inconsistency. - // This ensures hardcoded values stay in sync with handle IDs. - customNodes.forEach((node) => { - useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id); - }); } }, [customNodes, addNodes]); @@ -158,6 +150,14 @@ export const useFlow = () => { } }, [graph?.links, addLinks]); + useEffect(() => { + if (customNodes.length > 0 && graph?.links) { + customNodes.forEach((node) => { + useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id); + }); + } + }, [customNodes, graph?.links]); + // update node execution status in nodes useEffect(() => { if ( diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx index 7189ab9ca7..17134ae299 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx @@ -1,22 +1,21 @@ import { Button } from "@/components/atoms/Button/Button"; import { Text } from "@/components/atoms/Text/Text"; +import { + Accordion, + AccordionContent, + AccordionItem, + AccordionTrigger, +} from "@/components/molecules/Accordion/Accordion"; import { beautifyString, cn } from "@/lib/utils"; -import { CaretDownIcon, CopyIcon, CheckIcon } from "@phosphor-icons/react"; +import { CopyIcon, CheckIcon } from "@phosphor-icons/react"; import { NodeDataViewer } from "./components/NodeDataViewer/NodeDataViewer"; import { ContentRenderer } from "./components/ContentRenderer"; import { useNodeOutput } from "./useNodeOutput"; import { ViewMoreData } from "./components/ViewMoreData"; export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => { - const { - outputData, - isExpanded, - setIsExpanded, - copiedKey, - handleCopy, - executionResultId, - inputData, - } = useNodeOutput(nodeId); + const { outputData, copiedKey, handleCopy, executionResultId, inputData } = + useNodeOutput(nodeId); if (Object.keys(outputData).length === 0) { return null; @@ -25,122 +24,117 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => { return (
-
- - Node Output - - -
+ + + + + Node Output + + + +
+
+ Input - {isExpanded && ( - <> -
-
- Input + - - -
- - +
+ + +
-
- {Object.entries(outputData) - .slice(0, 2) - .map(([key, value]) => ( -
-
- - Pin: - - - {beautifyString(key)} - -
-
- - Data: - -
- {value.map((item, index) => ( -
- + {Object.entries(outputData) + .slice(0, 2) + .map(([key, value]) => ( +
+
+ + Pin: + + + {beautifyString(key)} + +
+
+ + Data: + +
+ {value.map((item, index) => ( +
+ +
+ ))} + +
+ +
- ))} - -
- -
-
- ))} -
+ ))} +
- {Object.keys(outputData).length > 2 && ( - - )} - - )} + {Object.keys(outputData).length > 2 && ( + + )} + + +
); }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx index ba8559a66c..cfc599c6e4 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx @@ -4,7 +4,6 @@ import { useShallow } from "zustand/react/shallow"; import { useState } from "react"; export const useNodeOutput = (nodeId: string) => { - const [isExpanded, setIsExpanded] = useState(true); const [copiedKey, setCopiedKey] = useState(null); const { toast } = useToast(); @@ -37,13 +36,10 @@ export const useNodeOutput = (nodeId: string) => { } }; return { - outputData: outputData, - inputData: inputData, - isExpanded: isExpanded, - setIsExpanded: setIsExpanded, - copiedKey: copiedKey, - setCopiedKey: setCopiedKey, - handleCopy: handleCopy, + outputData, + inputData, + copiedKey, + handleCopy, executionResultId: nodeExecutionResult?.node_exec_id, }; }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts index 7b3c5b1d01..00c151d35b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts @@ -61,12 +61,18 @@ export const convertNodesPlusBlockInfoIntoCustomNodes = ( return customNode; }; +const isToolSourceName = (sourceName: string): boolean => + sourceName.startsWith("tools_^_"); + +const cleanupSourceName = (sourceName: string): string => + isToolSourceName(sourceName) ? "tools" : sourceName; + export const linkToCustomEdge = (link: Link): CustomEdge => ({ id: link.id ?? "", type: "custom" as const, source: link.source_id, target: link.sink_id, - sourceHandle: link.source_name, + sourceHandle: cleanupSourceName(link.source_name), targetHandle: link.sink_name, data: { isStatic: link.is_static, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx index 7660de7c15..b3e0c17d74 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx @@ -1,9 +1,9 @@ import { Input } from "@/components/atoms/Input/Input"; +import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView"; import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { useMemo } from "react"; import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs"; import { useRunAgentModalContext } from "../../context"; -import { CredentialsGroupedView } from "../CredentialsGroupedView/CredentialsGroupedView"; import { ModalSection } from "../ModalSection/ModalSection"; import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBanner"; @@ -19,6 +19,8 @@ export function ModalRunSection() { setInputValue, agentInputFields, agentCredentialsInputFields, + inputCredentials, + setInputCredentialsValue, } = useRunAgentModalContext(); const inputFields = Object.entries(agentInputFields || {}); @@ -102,6 +104,9 @@ export function ModalRunSection() { ) : null} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/CredentialsGroupedView/CredentialsGroupedView.tsx b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView.tsx similarity index 86% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/CredentialsGroupedView/CredentialsGroupedView.tsx rename to autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView.tsx index 2ae159e739..135a960431 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/CredentialsGroupedView/CredentialsGroupedView.tsx +++ b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView.tsx @@ -5,30 +5,37 @@ import { AccordionItem, AccordionTrigger, } from "@/components/molecules/Accordion/Accordion"; +import { + CredentialsMetaInput, + CredentialsType, +} from "@/lib/autogpt-server-api/types"; import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider"; -import { SlidersHorizontal } from "@phosphor-icons/react"; +import { SlidersHorizontalIcon } from "@phosphor-icons/react"; import { useContext, useEffect, useMemo, useRef } from "react"; -import { useRunAgentModalContext } from "../../context"; import { areSystemCredentialProvidersLoading, CredentialField, findSavedCredentialByProviderAndType, hasMissingRequiredSystemCredentials, splitCredentialFieldsBySystem, -} from "../helpers"; +} from "./helpers"; type Props = { credentialFields: CredentialField[]; requiredCredentials: Set; + inputCredentials: Record; + inputValues: Record; + onCredentialChange: (key: string, value?: CredentialsMetaInput) => void; }; export function CredentialsGroupedView({ credentialFields, requiredCredentials, + inputCredentials, + inputValues, + onCredentialChange, }: Props) { const allProviders = useContext(CredentialsProvidersContext); - const { inputCredentials, setInputCredentialsValue, inputValues } = - useRunAgentModalContext(); const { userCredentialFields, systemCredentialFields } = useMemo( () => @@ -87,11 +94,11 @@ export function CredentialsGroupedView({ ); if (savedCredential) { - setInputCredentialsValue(key, { + onCredentialChange(key, { id: savedCredential.id, provider: savedCredential.provider, - type: savedCredential.type, - title: (savedCredential as { title?: string }).title, + type: savedCredential.type as CredentialsType, + title: savedCredential.title, }); } } @@ -103,7 +110,7 @@ export function CredentialsGroupedView({ systemCredentialFields, requiredCredentials, inputCredentials, - setInputCredentialsValue, + onCredentialChange, isLoadingProviders, ]); @@ -123,7 +130,7 @@ export function CredentialsGroupedView({ } selectedCredentials={selectedCred} onSelectCredentials={(value) => { - setInputCredentialsValue(key, value); + onCredentialChange(key, value); }} siblingInputs={inputValues} isOptional={!requiredCredentials.has(key)} @@ -143,7 +150,8 @@ export function CredentialsGroupedView({
- System credentials + System + credentials {hasMissingSystemCredentials && ( (missing) )} @@ -163,7 +171,7 @@ export function CredentialsGroupedView({ } selectedCredentials={selectedCred} onSelectCredentials={(value) => { - setInputCredentialsValue(key, value); + onCredentialChange(key, value); }} siblingInputs={inputValues} isOptional={!requiredCredentials.has(key)} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/helpers.ts b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers.ts similarity index 98% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/helpers.ts rename to autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers.ts index 72f0fcb451..519ef302c1 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers.ts @@ -1,5 +1,5 @@ import { CredentialsProvidersContextType } from "@/providers/agent-credentials/credentials-provider"; -import { getSystemCredentials } from "../../../../../../../../../../../components/contextual/CredentialsInput/helpers"; +import { getSystemCredentials } from "../../helpers"; export type CredentialField = [string, any]; From c20ca47bb0cdbb5a2e2c2760d2b0258f219aad2e Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Tue, 20 Jan 2026 21:20:23 +0530 Subject: [PATCH 3/7] feat(frontend): enhance RunGraph and RunInputDialog components with loading states and improved UI (#11808) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes ๐Ÿ—๏ธ - Enhanced UI for the Run Graph button with improved loading states and animations - Added color-coded edges in the flow editor based on output data types - Improved the layout of the Run Input Dialog with a two-column grid design - Refined the styling of flow editor controls with consistent icon sizes and colors - Updated tutorial icons with better color and size customization - Fixed credential field display to show provider name with "credential" suffix - Optimized draft saving by excluding node position changes to prevent excessive saves when dragging nodes ### Checklist ๐Ÿ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Verified that the Run Graph button shows proper loading states - [x] Confirmed that edges display correct colors based on data types - [x] Tested the Run Input Dialog layout with various input configurations - [x] Checked that flow editor controls display consistently - [x] Verified that tutorial icons render properly - [x] Confirmed credential fields show proper provider names - [x] Tested that dragging nodes doesn't trigger unnecessary draft saves --- .../components/RunGraph/RunGraph.tsx | 44 +++++++-- .../RunInputDialog/RunInputDialog.tsx | 94 ++++++++++--------- .../Flow/components/CustomControl.tsx | 14 +-- .../FlowEditor/edges/CustomEdge.tsx | 8 +- .../FlowEditor/edges/useCustomEdge.ts | 12 ++- .../components/FlowEditor/nodes/helpers.ts | 35 +++++++ .../components/FlowEditor/tutorial/icons.ts | 33 ++++++- .../components/FlowEditor/tutorial/index.ts | 3 + .../LibraryUploadAgentDialog.tsx | 3 +- .../components/CredentialFieldTitle.tsx | 11 ++- .../frontend/src/lib/dexie/draft-utils.ts | 2 +- 11 files changed, 183 insertions(+), 76 deletions(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx index f381ccb93b..57890b1f17 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx @@ -5,10 +5,11 @@ import { TooltipContent, TooltipTrigger, } from "@/components/atoms/Tooltip/BaseTooltip"; -import { PlayIcon, StopIcon } from "@phosphor-icons/react"; +import { CircleNotchIcon, PlayIcon, StopIcon } from "@phosphor-icons/react"; import { useShallow } from "zustand/react/shallow"; import { RunInputDialog } from "../RunInputDialog/RunInputDialog"; import { useRunGraph } from "./useRunGraph"; +import { cn } from "@/lib/utils"; export const RunGraph = ({ flowID }: { flowID: string | null }) => { const { @@ -24,6 +25,31 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => { useShallow((state) => state.isGraphRunning), ); + const isLoading = isExecutingGraph || isTerminatingGraph || isSaving; + + // Determine which icon to show with proper animation + const renderIcon = () => { + const iconClass = cn( + "size-4 transition-transform duration-200 ease-out", + !isLoading && "group-hover:scale-110", + ); + + if (isLoading) { + return ( + + ); + } + + if (isGraphRunning) { + return ; + } + + return ; + }; + return ( <> @@ -33,18 +59,18 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => { variant={isGraphRunning ? "destructive" : "primary"} data-id={isGraphRunning ? "stop-graph-button" : "run-graph-button"} onClick={isGraphRunning ? handleStopGraph : handleRunGraph} - disabled={!flowID || isExecutingGraph || isTerminatingGraph} - loading={isExecutingGraph || isTerminatingGraph || isSaving} + disabled={!flowID || isLoading} + className="group" > - {!isGraphRunning ? ( - - ) : ( - - )} + {renderIcon()} - {isGraphRunning ? "Stop agent" : "Run agent"} + {isLoading + ? "Processing..." + : isGraphRunning + ? "Stop agent" + : "Run agent"} -
- {/* Credentials Section */} - {hasCredentials() && credentialFields.length > 0 && ( -
-
- - Credentials - +
+
+ {/* Credentials Section */} + {hasCredentials() && credentialFields.length > 0 && ( +
+
+ + Credentials + +
+
+ +
-
- -
-
- )} + )} - {/* Inputs Section */} - {hasInputs() && ( -
-
- - Inputs - + {/* Inputs Section */} + {hasInputs() && ( +
+
+ + Inputs + +
+
+ handleInputChange(v.formData)} + uiSchema={uiSchema} + initialValues={{}} + formContext={{ + showHandles: false, + size: "large", + }} + /> +
-
- handleInputChange(v.formData)} - uiSchema={uiSchema} - initialValues={{}} - formContext={{ - showHandles: false, - size: "large", - }} - /> -
-
- )} + )} +
- {/* Action Button */}
{purpose === "run" && ( + + +
+
+ {label}: {isEnabled ? "ON" : "OFF"} +
+
+ {isEnabled ? tooltipEnabled : tooltipDisabled} +
+
+
+ + ); +} + export function FloatingSafeModeToggle({ graph, className, fullWidth = false, }: Props) { const { - currentSafeMode, + currentHITLSafeMode, + showHITLToggle, + isHITLStateUndetermined, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, isPending, shouldShowToggle, - isStateUndetermined, - handleToggle, } = useAgentSafeMode(graph); - if (!shouldShowToggle || isStateUndetermined || isPending) { + if (!shouldShowToggle || isPending) { + return null; + } + + const showHITL = showHITLToggle && !isHITLStateUndetermined; + const showSensitive = showSensitiveActionToggle; + + if (!showHITL && !showSensitive) { return null; } return ( -
- - - - - -
-
- Safe Mode: {currentSafeMode! ? "ON" : "OFF"} -
-
- {currentSafeMode! - ? "Human in the loop blocks require manual review" - : "Human in the loop blocks proceed automatically"} -
-
-
-
+
+ {showHITL && ( + + )} + {showSensitive && ( + + )}
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx index 7886f7adaf..de912c5fc3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx @@ -31,10 +31,18 @@ export function AgentSettingsModal({ } } - const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } = - useAgentSafeMode(agent); + const { + currentHITLSafeMode, + showHITLToggle, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + isPending, + shouldShowToggle, + } = useAgentSafeMode(agent); - if (!hasHITLBlocks) return null; + if (!shouldShowToggle) return null; return (
-
-
-
- Require human approval - - The agent will pause and wait for your review before - continuing - + {showHITLToggle && ( +
+
+
+ + Human-in-the-loop approval + + + The agent will pause at human-in-the-loop blocks and wait + for your review before continuing + +
+
-
-
+ )} + {showSensitiveActionToggle && ( +
+
+
+ + Sensitive action approval + + + The agent will pause at sensitive action blocks and wait for + your review before continuing + +
+ +
+
+ )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx index 9ba37d8d17..dc0258c768 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx @@ -5,48 +5,112 @@ import { Graph } from "@/lib/autogpt-server-api/types"; import { cn } from "@/lib/utils"; import { ShieldCheckIcon, ShieldIcon } from "@phosphor-icons/react"; import { useAgentSafeMode } from "@/hooks/useAgentSafeMode"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/atoms/Tooltip/BaseTooltip"; interface Props { graph: GraphModel | LibraryAgent | Graph; className?: string; - fullWidth?: boolean; } -export function SafeModeToggle({ graph }: Props) { +interface SafeModeIconButtonProps { + isEnabled: boolean; + label: string; + tooltipEnabled: string; + tooltipDisabled: string; + onToggle: () => void; + isPending: boolean; +} + +function SafeModeIconButton({ + isEnabled, + label, + tooltipEnabled, + tooltipDisabled, + onToggle, + isPending, +}: SafeModeIconButtonProps) { + return ( + + + + + +
+
+ {label}: {isEnabled ? "ON" : "OFF"} +
+
+ {isEnabled ? tooltipEnabled : tooltipDisabled} +
+
+
+
+ ); +} + +export function SafeModeToggle({ graph, className }: Props) { const { - currentSafeMode, + currentHITLSafeMode, + showHITLToggle, + isHITLStateUndetermined, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, isPending, shouldShowToggle, - isStateUndetermined, - handleToggle, } = useAgentSafeMode(graph); - if (!shouldShowToggle || isStateUndetermined) { + if (!shouldShowToggle || isHITLStateUndetermined) { + return null; + } + + const showHITL = showHITLToggle && !isHITLStateUndetermined; + const showSensitive = showSensitiveActionToggle; + + if (!showHITL && !showSensitive) { return null; } return ( - + {showSensitive && ( + + )} +
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx index 57d7055e1c..530d24529f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx @@ -13,8 +13,16 @@ interface Props { } export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) { - const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } = - useAgentSafeMode(agent); + const { + currentHITLSafeMode, + showHITLToggle, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + isPending, + shouldShowToggle, + } = useAgentSafeMode(agent); return ( @@ -34,24 +42,51 @@ export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
- {hasHITLBlocks ? ( -
-
-
- Require human approval - - The agent will pause and wait for your review before - continuing - + {shouldShowToggle ? ( + <> + {showHITLToggle && ( +
+
+
+ + Human-in-the-loop approval + + + The agent will pause at human-in-the-loop blocks and + wait for your review before continuing + +
+ +
- -
-
+ )} + {showSensitiveActionToggle && ( +
+
+
+ + Sensitive action approval + + + The agent will pause at sensitive action blocks and wait + for your review before continuing + +
+ +
+
+ )} + ) : (
diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index fc4e737651..5cd60fcb35 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -6383,6 +6383,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -6399,6 +6404,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info" ], "title": "BaseGraph" @@ -7629,6 +7635,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7652,6 +7663,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info", "credentials_input_schema" ], @@ -7730,6 +7742,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7754,6 +7771,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info", "credentials_input_schema" ], @@ -7762,8 +7780,14 @@ "GraphSettings": { "properties": { "human_in_the_loop_safe_mode": { - "anyOf": [{ "type": "boolean" }, { "type": "null" }], - "title": "Human In The Loop Safe Mode" + "type": "boolean", + "title": "Human In The Loop Safe Mode", + "default": true + }, + "sensitive_action_safe_mode": { + "type": "boolean", + "title": "Sensitive Action Safe Mode", + "default": false } }, "type": "object", @@ -7921,6 +7945,16 @@ "title": "Has External Trigger", "description": "Whether the agent has an external trigger (e.g. webhook) node" }, + "has_human_in_the_loop": { + "type": "boolean", + "title": "Has Human In The Loop", + "description": "Whether the agent has human-in-the-loop blocks" + }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "description": "Whether the agent has sensitive action blocks" + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7967,6 +8001,8 @@ "output_schema", "credentials_input_schema", "has_external_trigger", + "has_human_in_the_loop", + "has_sensitive_action", "new_output", "can_access_graph", "is_latest_version", diff --git a/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts b/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts index 07a2b33674..8e5560ce8f 100644 --- a/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts +++ b/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts @@ -20,11 +20,15 @@ function hasHITLBlocks(graph: GraphModel | LibraryAgent | Graph): boolean { if ("has_human_in_the_loop" in graph) { return !!graph.has_human_in_the_loop; } + return false; +} - if (isLibraryAgent(graph)) { - return graph.settings?.human_in_the_loop_safe_mode !== null; +function hasSensitiveActionBlocks( + graph: GraphModel | LibraryAgent | Graph, +): boolean { + if ("has_sensitive_action" in graph) { + return !!graph.has_sensitive_action; } - return false; } @@ -40,7 +44,9 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { const graphId = getGraphId(graph); const isAgent = isLibraryAgent(graph); - const shouldShowToggle = hasHITLBlocks(graph); + const showHITLToggle = hasHITLBlocks(graph); + const showSensitiveActionToggle = hasSensitiveActionBlocks(graph); + const shouldShowToggle = showHITLToggle || showSensitiveActionToggle; const { mutateAsync: updateGraphSettings, isPending } = usePatchV1UpdateGraphSettings(); @@ -56,27 +62,37 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { }, ); - const [localSafeMode, setLocalSafeMode] = useState(null); + const [localHITLSafeMode, setLocalHITLSafeMode] = useState(true); + const [localSensitiveActionSafeMode, setLocalSensitiveActionSafeMode] = + useState(false); + const [isLocalStateLoaded, setIsLocalStateLoaded] = useState(false); useEffect(() => { if (!isAgent && libraryAgent) { - const backendValue = libraryAgent.settings?.human_in_the_loop_safe_mode; - if (backendValue !== undefined) { - setLocalSafeMode(backendValue); - } + setLocalHITLSafeMode( + libraryAgent.settings?.human_in_the_loop_safe_mode ?? true, + ); + setLocalSensitiveActionSafeMode( + libraryAgent.settings?.sensitive_action_safe_mode ?? false, + ); + setIsLocalStateLoaded(true); } }, [isAgent, libraryAgent]); - const currentSafeMode = isAgent - ? graph.settings?.human_in_the_loop_safe_mode - : localSafeMode; + const currentHITLSafeMode = isAgent + ? (graph.settings?.human_in_the_loop_safe_mode ?? true) + : localHITLSafeMode; - const isStateUndetermined = isAgent - ? graph.settings?.human_in_the_loop_safe_mode == null - : isLoading || localSafeMode === null; + const currentSensitiveActionSafeMode = isAgent + ? (graph.settings?.sensitive_action_safe_mode ?? false) + : localSensitiveActionSafeMode; - const handleToggle = useCallback(async () => { - const newSafeMode = !currentSafeMode; + const isHITLStateUndetermined = isAgent + ? false + : isLoading || !isLocalStateLoaded; + + const handleHITLToggle = useCallback(async () => { + const newSafeMode = !currentHITLSafeMode; try { await updateGraphSettings({ @@ -85,7 +101,7 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { }); if (!isAgent) { - setLocalSafeMode(newSafeMode); + setLocalHITLSafeMode(newSafeMode); } if (isAgent) { @@ -101,37 +117,62 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { queryClient.invalidateQueries({ queryKey: ["v2", "executions"] }); toast({ - title: `Safe mode ${newSafeMode ? "enabled" : "disabled"}`, + title: `HITL safe mode ${newSafeMode ? "enabled" : "disabled"}`, description: newSafeMode ? "Human-in-the-loop blocks will require manual review" : "Human-in-the-loop blocks will proceed automatically", duration: 2000, }); } catch (error) { - const isNotFoundError = - error instanceof Error && - (error.message.includes("404") || error.message.includes("not found")); - - if (!isAgent && isNotFoundError) { - toast({ - title: "Safe mode not available", - description: - "To configure safe mode, please save this graph to your library first.", - variant: "destructive", - }); - } else { - toast({ - title: "Failed to update safe mode", - description: - error instanceof Error - ? error.message - : "An unexpected error occurred.", - variant: "destructive", - }); - } + handleToggleError(error, isAgent, toast); } }, [ - currentSafeMode, + currentHITLSafeMode, + graphId, + isAgent, + graph.id, + updateGraphSettings, + queryClient, + toast, + ]); + + const handleSensitiveActionToggle = useCallback(async () => { + const newSafeMode = !currentSensitiveActionSafeMode; + + try { + await updateGraphSettings({ + graphId, + data: { sensitive_action_safe_mode: newSafeMode }, + }); + + if (!isAgent) { + setLocalSensitiveActionSafeMode(newSafeMode); + } + + if (isAgent) { + queryClient.invalidateQueries({ + queryKey: getGetV2GetLibraryAgentQueryOptions(graph.id.toString()) + .queryKey, + }); + } + + queryClient.invalidateQueries({ + queryKey: ["v1", "graphs", graphId, "executions"], + }); + queryClient.invalidateQueries({ queryKey: ["v2", "executions"] }); + + toast({ + title: `Sensitive action safe mode ${newSafeMode ? "enabled" : "disabled"}`, + description: newSafeMode + ? "Sensitive action blocks will require manual review" + : "Sensitive action blocks will proceed automatically", + duration: 2000, + }); + } catch (error) { + handleToggleError(error, isAgent, toast); + } + }, [ + currentSensitiveActionSafeMode, graphId, isAgent, graph.id, @@ -141,11 +182,53 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { ]); return { - currentSafeMode, + // HITL safe mode + currentHITLSafeMode, + showHITLToggle, + isHITLStateUndetermined, + handleHITLToggle, + + // Sensitive action safe mode + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + + // General isPending, shouldShowToggle, - isStateUndetermined, - handleToggle, - hasHITLBlocks: shouldShowToggle, + + // Backwards compatibility + currentSafeMode: currentHITLSafeMode, + isStateUndetermined: isHITLStateUndetermined, + handleToggle: handleHITLToggle, + hasHITLBlocks: showHITLToggle, }; } + +function handleToggleError( + error: unknown, + isAgent: boolean, + toast: ReturnType["toast"], +) { + const isNotFoundError = + error instanceof Error && + (error.message.includes("404") || error.message.includes("not found")); + + if (!isAgent && isNotFoundError) { + toast({ + title: "Safe mode not available", + description: + "To configure safe mode, please save this graph to your library first.", + variant: "destructive", + }); + } else { + toast({ + title: "Failed to update safe mode", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } +}