diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 7b41b040ba..93634c47e3 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -4,14 +4,9 @@ from collections.abc import AsyncGenerator from typing import Any import orjson -from langfuse import Langfuse -from openai import ( - APIConnectionError, - APIError, - APIStatusError, - AsyncOpenAI, - RateLimitError, -) +from langfuse import get_client, propagate_attributes +from langfuse.openai import openai # type: ignore +from openai import APIConnectionError, APIError, APIStatusError, RateLimitError from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam from backend.data.understanding import ( @@ -21,7 +16,6 @@ from backend.data.understanding import ( from backend.util.exceptions import NotFoundError from backend.util.settings import Settings -from . import db as chat_db from .config import ChatConfig from .model import ( ChatMessage, @@ -50,10 +44,10 @@ logger = logging.getLogger(__name__) config = ChatConfig() settings = Settings() -client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url) +client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url) -# Langfuse client (lazy initialization) -_langfuse_client: Langfuse | None = None + +langfuse = get_client() class LangfuseNotConfiguredError(Exception): @@ -69,65 +63,6 @@ def _is_langfuse_configured() -> bool: ) -def _get_langfuse_client() -> Langfuse: - """Get or create the Langfuse client for prompt management and tracing.""" - global _langfuse_client - if _langfuse_client is None: - if not _is_langfuse_configured(): - raise LangfuseNotConfiguredError( - "Langfuse is not configured. The chat feature requires Langfuse for prompt management. " - "Please set the LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment variables." - ) - _langfuse_client = Langfuse( - public_key=settings.secrets.langfuse_public_key, - secret_key=settings.secrets.langfuse_secret_key, - host=settings.secrets.langfuse_host or "https://cloud.langfuse.com", - ) - return _langfuse_client - - -def _get_environment() -> str: - """Get the current environment name for Langfuse tagging.""" - return settings.config.app_env.value - - -def _get_langfuse_prompt() -> str: - """Fetch the latest production prompt from Langfuse. - - Returns: - The compiled prompt text from Langfuse. - - Raises: - Exception: If Langfuse is unavailable or prompt fetch fails. - """ - try: - langfuse = _get_langfuse_client() - # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt - prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0) - compiled = prompt.compile() - logger.info( - f"Fetched prompt '{config.langfuse_prompt_name}' from Langfuse " - f"(version: {prompt.version})" - ) - return compiled - except Exception as e: - logger.error(f"Failed to fetch prompt from Langfuse: {e}") - raise - - -async def _is_first_session(user_id: str) -> bool: - """Check if this is the user's first chat session. - - Returns True if the user has 1 or fewer sessions (meaning this is their first). - """ - try: - session_count = await chat_db.get_user_session_count(user_id) - return session_count <= 1 - except Exception as e: - logger.warning(f"Failed to check session count for user {user_id}: {e}") - return False # Default to non-onboarding if we can't check - - async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: """Build the full system prompt including business understanding if available. @@ -139,8 +74,6 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: Tuple of (compiled prompt string, Langfuse prompt object for tracing) """ - langfuse = _get_langfuse_client() - # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0) @@ -158,7 +91,7 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: context = "This is the first time you are meeting the user. Greet them and introduce them to the platform" compiled = prompt.compile(users_information=context) - return compiled, prompt + return compiled, understanding async def _generate_session_title(message: str) -> str | None: @@ -217,6 +150,7 @@ async def assign_user_to_session( async def stream_chat_completion( session_id: str, message: str | None = None, + tool_call_response: str | None = None, is_user_message: bool = True, user_id: str | None = None, retry_count: int = 0, @@ -256,11 +190,6 @@ async def stream_chat_completion( yield StreamFinish() return - # Langfuse observations will be created after session is loaded (need messages for input) - # Initialize to None so finally block can safely check and end them - trace = None - generation = None - # Only fetch from Redis if session not provided (initial call) if session is None: session = await get_chat_session(session_id, user_id) @@ -336,297 +265,259 @@ async def stream_chat_completion( asyncio.create_task(_update_title()) # Build system prompt with business understanding - system_prompt, langfuse_prompt = await _build_system_prompt(user_id) - - # Build input messages including system prompt for complete Langfuse logging - trace_input_messages = [{"role": "system", "content": system_prompt}] + [ - m.model_dump() for m in session.messages - ] + system_prompt, understanding = await _build_system_prompt(user_id) # Create Langfuse trace for this LLM call (each call gets its own trace, grouped by session_id) # Using v3 SDK: start_observation creates a root span, update_trace sets trace-level attributes - try: - langfuse = _get_langfuse_client() - env = _get_environment() - trace = langfuse.start_observation( - name="chat_completion", - input={"messages": trace_input_messages}, - metadata={ - "environment": env, - "model": config.model, - "message_count": len(session.messages), - "prompt_name": langfuse_prompt.name if langfuse_prompt else None, - "prompt_version": langfuse_prompt.version if langfuse_prompt else None, - }, - ) - # Set trace-level attributes (session_id, user_id, tags) - trace.update_trace( + input = message + if not message and tool_call_response: + input = tool_call_response + + langfuse = get_client() + with langfuse.start_as_current_observation( + as_type="span", + name="user-copilot-request", + input=input, + ) as span: + with propagate_attributes( session_id=session_id, user_id=user_id, - tags=[env, "copilot"], - ) - except Exception as e: - logger.warning(f"Failed to create Langfuse trace: {e}") + tags=["copilot"], + metadata={ + "users_information": format_understanding_for_prompt(understanding)[ + :200 + ] # langfuse only accepts upto to 200 chars + }, + ): - # Initialize variables that will be used in finally block (must be defined before try) - assistant_response = ChatMessage( - role="assistant", - content="", - ) - accumulated_tool_calls: list[dict[str, Any]] = [] - - # Wrap main logic in try/finally to ensure Langfuse observations are always ended - try: - has_yielded_end = False - has_yielded_error = False - has_done_tool_call = False - has_received_text = False - text_streaming_ended = False - tool_response_messages: list[ChatMessage] = [] - should_retry = False - - # Generate unique IDs for AI SDK protocol - import uuid as uuid_module - - message_id = str(uuid_module.uuid4()) - text_block_id = str(uuid_module.uuid4()) - - # Yield message start - yield StreamStart(messageId=message_id) - - # Create Langfuse generation for each LLM call, linked to the prompt - # Using v3 SDK: start_observation with as_type="generation" - generation = ( - trace.start_observation( - as_type="generation", - name="llm_call", - model=config.model, - input={"messages": trace_input_messages}, - prompt=langfuse_prompt, + # Initialize variables that will be used in finally block (must be defined before try) + assistant_response = ChatMessage( + role="assistant", + content="", ) - if trace - else None - ) + accumulated_tool_calls: list[dict[str, Any]] = [] - try: - async for chunk in _stream_chat_chunks( - session=session, - tools=tools, - system_prompt=system_prompt, - text_block_id=text_block_id, - ): + # Wrap main logic in try/finally to ensure Langfuse observations are always ended + has_yielded_end = False + has_yielded_error = False + has_done_tool_call = False + has_received_text = False + text_streaming_ended = False + tool_response_messages: list[ChatMessage] = [] + should_retry = False - if isinstance(chunk, StreamTextStart): - # Emit text-start before first text delta - if not has_received_text: + # Generate unique IDs for AI SDK protocol + import uuid as uuid_module + + message_id = str(uuid_module.uuid4()) + text_block_id = str(uuid_module.uuid4()) + + # Yield message start + yield StreamStart(messageId=message_id) + + try: + async for chunk in _stream_chat_chunks( + session=session, + tools=tools, + system_prompt=system_prompt, + text_block_id=text_block_id, + ): + + if isinstance(chunk, StreamTextStart): + # Emit text-start before first text delta + if not has_received_text: + yield chunk + elif isinstance(chunk, StreamTextDelta): + delta = chunk.delta or "" + assert assistant_response.content is not None + assistant_response.content += delta + has_received_text = True yield chunk - elif isinstance(chunk, StreamTextDelta): - delta = chunk.delta or "" - assert assistant_response.content is not None - assistant_response.content += delta - has_received_text = True - yield chunk - elif isinstance(chunk, StreamTextEnd): - # Emit text-end after text completes - if has_received_text and not text_streaming_ended: - text_streaming_ended = True - yield chunk - elif isinstance(chunk, StreamToolInputStart): - # Emit text-end before first tool call, but only if we've received text - if has_received_text and not text_streaming_ended: - yield StreamTextEnd(id=text_block_id) - text_streaming_ended = True - yield chunk - elif isinstance(chunk, StreamToolInputAvailable): - # Accumulate tool calls in OpenAI format - accumulated_tool_calls.append( - { - "id": chunk.toolCallId, - "type": "function", - "function": { - "name": chunk.toolName, - "arguments": orjson.dumps(chunk.input).decode("utf-8"), - }, - } - ) - elif isinstance(chunk, StreamToolOutputAvailable): - result_content = ( - chunk.output - if isinstance(chunk.output, str) - else orjson.dumps(chunk.output).decode("utf-8") - ) - tool_response_messages.append( - ChatMessage( - role="tool", - content=result_content, - tool_call_id=chunk.toolCallId, - ) - ) - has_done_tool_call = True - # Track if any tool execution failed - if not chunk.success: - logger.warning( - f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed" - ) - yield chunk - elif isinstance(chunk, StreamFinish): - if not has_done_tool_call: - # Emit text-end before finish if we received text but haven't closed it + elif isinstance(chunk, StreamTextEnd): + # Emit text-end after text completes + if has_received_text and not text_streaming_ended: + text_streaming_ended = True + if assistant_response.content: + logger.warn( + f"StreamTextEnd: Attempting to set output {assistant_response.content}" + ) + span.update_trace(output=assistant_response.content) + span.update(output=assistant_response.content) + yield chunk + elif isinstance(chunk, StreamToolInputStart): + # Emit text-end before first tool call, but only if we've received text if has_received_text and not text_streaming_ended: yield StreamTextEnd(id=text_block_id) text_streaming_ended = True - has_yielded_end = True yield chunk - elif isinstance(chunk, StreamError): - has_yielded_error = True - elif isinstance(chunk, StreamUsage): - session.usage.append( - Usage( - prompt_tokens=chunk.promptTokens, - completion_tokens=chunk.completionTokens, - total_tokens=chunk.totalTokens, + elif isinstance(chunk, StreamToolInputAvailable): + # Accumulate tool calls in OpenAI format + accumulated_tool_calls.append( + { + "id": chunk.toolCallId, + "type": "function", + "function": { + "name": chunk.toolName, + "arguments": orjson.dumps(chunk.input).decode( + "utf-8" + ), + }, + } ) + elif isinstance(chunk, StreamToolOutputAvailable): + result_content = ( + chunk.output + if isinstance(chunk.output, str) + else orjson.dumps(chunk.output).decode("utf-8") + ) + tool_response_messages.append( + ChatMessage( + role="tool", + content=result_content, + tool_call_id=chunk.toolCallId, + ) + ) + has_done_tool_call = True + # Track if any tool execution failed + if not chunk.success: + logger.warning( + f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed" + ) + yield chunk + elif isinstance(chunk, StreamFinish): + if not has_done_tool_call: + # Emit text-end before finish if we received text but haven't closed it + if has_received_text and not text_streaming_ended: + yield StreamTextEnd(id=text_block_id) + text_streaming_ended = True + has_yielded_end = True + yield chunk + elif isinstance(chunk, StreamError): + has_yielded_error = True + elif isinstance(chunk, StreamUsage): + session.usage.append( + Usage( + prompt_tokens=chunk.promptTokens, + completion_tokens=chunk.completionTokens, + total_tokens=chunk.totalTokens, + ) + ) + else: + logger.error( + f"Unknown chunk type: {type(chunk)}", exc_info=True + ) + if assistant_response.content: + langfuse.update_current_trace(output=assistant_response.content) + langfuse.update_current_span(output=assistant_response.content) + elif tool_response_messages: + langfuse.update_current_trace(output=str(tool_response_messages)) + langfuse.update_current_span(output=str(tool_response_messages)) + + except Exception as e: + logger.error(f"Error during stream: {e!s}", exc_info=True) + + # Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.) + is_retryable = isinstance( + e, (orjson.JSONDecodeError, KeyError, TypeError) + ) + + if is_retryable and retry_count < config.max_retries: + logger.info( + f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" ) + should_retry = True else: - logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True) - except Exception as e: - logger.error(f"Error during stream: {e!s}", exc_info=True) + # Non-retryable error or max retries exceeded + # Save any partial progress before reporting error + messages_to_save: list[ChatMessage] = [] - # Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.) - is_retryable = isinstance(e, (orjson.JSONDecodeError, KeyError, TypeError)) + # Add assistant message if it has content or tool calls + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if assistant_response.content or assistant_response.tool_calls: + messages_to_save.append(assistant_response) - if is_retryable and retry_count < config.max_retries: + # Add tool response messages after assistant message + messages_to_save.extend(tool_response_messages) + + session.messages.extend(messages_to_save) + await upsert_chat_session(session) + + if not has_yielded_error: + error_message = str(e) + if not is_retryable: + error_message = f"Non-retryable error: {error_message}" + elif retry_count >= config.max_retries: + error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}" + + error_response = StreamError(errorText=error_message) + yield error_response + if not has_yielded_end: + yield StreamFinish() + return + + # Handle retry outside of exception handler to avoid nesting + if should_retry and retry_count < config.max_retries: logger.info( - f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" + f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}" ) - should_retry = True - else: - # Non-retryable error or max retries exceeded - # Save any partial progress before reporting error - messages_to_save: list[ChatMessage] = [] + async for chunk in stream_chat_completion( + session_id=session.session_id, + user_id=user_id, + retry_count=retry_count + 1, + session=session, + context=context, + ): + yield chunk + return # Exit after retry to avoid double-saving in finally block - # Add assistant message if it has content or tool calls - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - if assistant_response.content or assistant_response.tool_calls: - messages_to_save.append(assistant_response) - - # Add tool response messages after assistant message - messages_to_save.extend(tool_response_messages) - - session.messages.extend(messages_to_save) - await upsert_chat_session(session) - - if not has_yielded_error: - error_message = str(e) - if not is_retryable: - error_message = f"Non-retryable error: {error_message}" - elif retry_count >= config.max_retries: - error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}" - - error_response = StreamError(errorText=error_message) - yield error_response - if not has_yielded_end: - yield StreamFinish() - return - - # Handle retry outside of exception handler to avoid nesting - if should_retry and retry_count < config.max_retries: + # Normal completion path - save session and handle tool call continuation logger.info( - f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}" - ) - async for chunk in stream_chat_completion( - session_id=session.session_id, - user_id=user_id, - retry_count=retry_count + 1, - session=session, - context=context, - ): - yield chunk - return # Exit after retry to avoid double-saving in finally block - - # Normal completion path - save session and handle tool call continuation - logger.info( - f"Normal completion path: session={session.session_id}, " - f"current message_count={len(session.messages)}" - ) - - # Build the messages list in the correct order - messages_to_save: list[ChatMessage] = [] - - # Add assistant message with tool_calls if any - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - logger.info( - f"Added {len(accumulated_tool_calls)} tool calls to assistant message" - ) - if assistant_response.content or assistant_response.tool_calls: - messages_to_save.append(assistant_response) - logger.info( - f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" + f"Normal completion path: session={session.session_id}, " + f"current message_count={len(session.messages)}" ) - # Add tool response messages after assistant message - messages_to_save.extend(tool_response_messages) - logger.info( - f"Saving {len(tool_response_messages)} tool response messages, " - f"total_to_save={len(messages_to_save)}" - ) + # Build the messages list in the correct order + messages_to_save: list[ChatMessage] = [] - session.messages.extend(messages_to_save) - logger.info( - f"Extended session messages, new message_count={len(session.messages)}" - ) - await upsert_chat_session(session) - - # If we did a tool call, stream the chat completion again to get the next response - if has_done_tool_call: - logger.info( - "Tool call executed, streaming chat completion again to get assistant response" - ) - async for chunk in stream_chat_completion( - session_id=session.session_id, - user_id=user_id, - session=session, # Pass session object to avoid Redis refetch - context=context, - ): - yield chunk - - finally: - # Always end Langfuse observations to prevent resource leaks - # Guard against None and catch errors to avoid masking original exceptions - if generation is not None: - try: - latest_usage = session.usage[-1] if session.usage else None - generation.update( - model=config.model, - output={ - "content": assistant_response.content, - "tool_calls": accumulated_tool_calls or None, - }, - usage_details=( - { - "input": latest_usage.prompt_tokens, - "output": latest_usage.completion_tokens, - "total": latest_usage.total_tokens, - } - if latest_usage - else None - ), + # Add assistant message with tool_calls if any + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + logger.info( + f"Added {len(accumulated_tool_calls)} tool calls to assistant message" + ) + if assistant_response.content or assistant_response.tool_calls: + messages_to_save.append(assistant_response) + logger.info( + f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" ) - generation.end() - except Exception as e: - logger.warning(f"Failed to end Langfuse generation: {e}") - if trace is not None: - try: - if accumulated_tool_calls: - trace.update_trace(output={"tool_calls": accumulated_tool_calls}) - else: - trace.update_trace(output={"response": assistant_response.content}) - trace.end() - except Exception as e: - logger.warning(f"Failed to end Langfuse trace: {e}") + # Add tool response messages after assistant message + messages_to_save.extend(tool_response_messages) + logger.info( + f"Saving {len(tool_response_messages)} tool response messages, " + f"total_to_save={len(messages_to_save)}" + ) + + session.messages.extend(messages_to_save) + logger.info( + f"Extended session messages, new message_count={len(session.messages)}" + ) + await upsert_chat_session(session) + + # If we did a tool call, stream the chat completion again to get the next response + if has_done_tool_call: + logger.info( + "Tool call executed, streaming chat completion again to get assistant response" + ) + async for chunk in stream_chat_completion( + session_id=session.session_id, + user_id=user_id, + session=session, # Pass session object to avoid Redis refetch + context=context, + tool_call_response=str(tool_response_messages), + ): + yield chunk # Retry configuration for OpenAI API calls @@ -900,5 +791,4 @@ async def _yield_tool_call( session=session, ) - logger.info(f"Yielding Tool execution response: {tool_execution_response}") yield tool_execution_response diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py index fc0fdf9064..82ce5cfd6f 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py @@ -30,7 +30,7 @@ TOOL_REGISTRY: dict[str, BaseTool] = { "find_library_agent": FindLibraryAgentTool(), "run_agent": RunAgentTool(), "run_block": RunBlockTool(), - "agent_output": AgentOutputTool(), + "view_agent_output": AgentOutputTool(), "search_docs": SearchDocsTool(), "get_doc_page": GetDocPageTool(), } diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py b/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py index fe3d5e8984..bd93f0e2a6 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py @@ -3,6 +3,8 @@ import logging from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from backend.data.understanding import ( BusinessUnderstandingInput, @@ -59,6 +61,7 @@ and automations for the user's specific needs.""" """Requires authentication to store user-specific data.""" return True + @observe(as_type="tool", name="add_understanding") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py index aa3cc1555d..0f94135a41 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py @@ -218,6 +218,7 @@ async def save_agent_to_library( library_agents = await library_db.create_library_agent( graph=created_graph, user_id=user_id, + sensitive_action_safe_mode=True, create_library_agents_for_sub_graphs=False, ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py index d81a11362b..00c6d8499b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py @@ -5,6 +5,7 @@ import re from datetime import datetime, timedelta, timezone from typing import Any +from langfuse import observe from pydantic import BaseModel, field_validator from backend.api.features.chat.model import ChatSession @@ -103,7 +104,7 @@ class AgentOutputTool(BaseTool): @property def name(self) -> str: - return "agent_output" + return "view_agent_output" @property def description(self) -> str: @@ -328,6 +329,7 @@ class AgentOutputTool(BaseTool): total_executions=len(available_executions) if available_executions else 1, ) + @observe(as_type="tool", name="view_agent_output") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py index c8168f473d..26c980c6c5 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py @@ -3,6 +3,8 @@ import logging from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_generator import ( @@ -78,6 +80,7 @@ class CreateAgentTool(BaseTool): "required": ["description"], } + @observe(as_type="tool", name="create_agent") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py index 5aaa166036..a50a89c5c7 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py @@ -3,6 +3,8 @@ import logging from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_generator import ( @@ -85,6 +87,7 @@ class EditAgentTool(BaseTool): "required": ["agent_id", "changes"], } + @observe(as_type="tool", name="edit_agent") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py index 477522757d..f231ef4484 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py @@ -2,6 +2,8 @@ from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_search import search_agents @@ -35,6 +37,7 @@ class FindAgentTool(BaseTool): "required": ["query"], } + @observe(as_type="tool", name="find_agent") async def _execute( self, user_id: str | None, session: ChatSession, **kwargs ) -> ToolResponseBase: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py index a5e66f0a1c..fc20fdfc4a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py @@ -1,6 +1,7 @@ import logging from typing import Any +from langfuse import observe from prisma.enums import ContentType from backend.api.features.chat.model import ChatSession @@ -55,6 +56,7 @@ class FindBlockTool(BaseTool): def requires_auth(self) -> bool: return True + @observe(as_type="tool", name="find_block") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py index 108fba75ae..d9b5edfa9b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py @@ -2,6 +2,8 @@ from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_search import search_agents @@ -41,6 +43,7 @@ class FindLibraryAgentTool(BaseTool): def requires_auth(self) -> bool: return True + @observe(as_type="tool", name="find_library_agent") async def _execute( self, user_id: str | None, session: ChatSession, **kwargs ) -> ToolResponseBase: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py b/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py index 7040cd7db5..b2fdcccfcd 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py @@ -4,6 +4,8 @@ import logging from pathlib import Path from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from backend.api.features.chat.tools.base import BaseTool from backend.api.features.chat.tools.models import ( @@ -71,6 +73,7 @@ class GetDocPageTool(BaseTool): url_path = path.rsplit(".", 1)[0] if "." in path else path return f"{DOCS_BASE_URL}/{url_path}" + @observe(as_type="tool", name="get_doc_page") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py index 1f0a836543..b212c11e8a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py @@ -3,6 +3,7 @@ import logging from typing import Any +from langfuse import observe from pydantic import BaseModel, Field, field_validator from backend.api.features.chat.config import ChatConfig @@ -32,7 +33,7 @@ from .models import ( UserReadiness, ) from .utils import ( - check_user_has_required_credentials, + build_missing_credentials_from_graph, extract_credentials_from_schema, fetch_graph_from_store_slug, get_or_create_library_agent, @@ -154,6 +155,7 @@ class RunAgentTool(BaseTool): """All operations require authentication.""" return True + @observe(as_type="tool", name="run_agent") async def _execute( self, user_id: str | None, @@ -235,15 +237,13 @@ class RunAgentTool(BaseTool): # Return credentials needed response with input data info # The UI handles credential setup automatically, so the message # focuses on asking about input data - credentials = extract_credentials_from_schema( - graph.credentials_input_schema + requirements_creds_dict = build_missing_credentials_from_graph( + graph, None ) - missing_creds_check = await check_user_has_required_credentials( - user_id, credentials + missing_credentials_dict = build_missing_credentials_from_graph( + graph, graph_credentials ) - missing_credentials_dict = { - c.id: c.model_dump() for c in missing_creds_check - } + requirements_creds_list = list(requirements_creds_dict.values()) return SetupRequirementsResponse( message=self._build_inputs_message(graph, MSG_WHAT_VALUES_TO_USE), @@ -257,7 +257,7 @@ class RunAgentTool(BaseTool): ready_to_run=False, ), requirements={ - "credentials": [c.model_dump() for c in credentials], + "credentials": requirements_creds_list, "inputs": self._get_inputs_list(graph.input_schema), "execution_modes": self._get_execution_modes(graph), }, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index 48cbcb5e5c..c29cc92556 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -4,6 +4,8 @@ import logging from collections import defaultdict from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from backend.data.block import get_block from backend.data.execution import ExecutionContext @@ -20,6 +22,7 @@ from .models import ( ToolResponseBase, UserReadiness, ) +from .utils import build_missing_credentials_from_field_info logger = logging.getLogger(__name__) @@ -127,6 +130,7 @@ class RunBlockTool(BaseTool): return matched_credentials, missing_credentials + @observe(as_type="tool", name="run_block") async def _execute( self, user_id: str | None, @@ -186,7 +190,11 @@ class RunBlockTool(BaseTool): if missing_credentials: # Return setup requirements response with missing credentials - missing_creds_dict = {c.id: c.model_dump() for c in missing_credentials} + credentials_fields_info = block.input_schema.get_credentials_fields_info() + missing_creds_dict = build_missing_credentials_from_field_info( + credentials_fields_info, set(matched_credentials.keys()) + ) + missing_creds_list = list(missing_creds_dict.values()) return SetupRequirementsResponse( message=( @@ -203,7 +211,7 @@ class RunBlockTool(BaseTool): ready_to_run=False, ), requirements={ - "credentials": [c.model_dump() for c in missing_credentials], + "credentials": missing_creds_list, "inputs": self._get_inputs_list(block), "execution_modes": ["immediate"], }, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py b/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py index edb0c0de1e..4903230b40 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py @@ -3,6 +3,7 @@ import logging from typing import Any +from langfuse import observe from prisma.enums import ContentType from backend.api.features.chat.model import ChatSession @@ -87,6 +88,7 @@ class SearchDocsTool(BaseTool): url_path = path.rsplit(".", 1)[0] if "." in path else path return f"{DOCS_BASE_URL}/{url_path}" + @observe(as_type="tool", name="search_docs") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py index 19e092c312..a2ac91dc65 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py @@ -8,7 +8,7 @@ from backend.api.features.library import model as library_model from backend.api.features.store import db as store_db from backend.data import graph as graph_db from backend.data.graph import GraphModel -from backend.data.model import CredentialsMetaInput +from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util.exceptions import NotFoundError @@ -89,6 +89,59 @@ def extract_credentials_from_schema( return credentials +def _serialize_missing_credential( + field_key: str, field_info: CredentialsFieldInfo +) -> dict[str, Any]: + """ + Convert credential field info into a serializable dict that preserves all supported + credential types (e.g., api_key + oauth2) so the UI can offer multiple options. + """ + supported_types = sorted(field_info.supported_types) + provider = next(iter(field_info.provider), "unknown") + scopes = sorted(field_info.required_scopes or []) + + return { + "id": field_key, + "title": field_key.replace("_", " ").title(), + "provider": provider, + "provider_name": provider.replace("_", " ").title(), + "type": supported_types[0] if supported_types else "api_key", + "types": supported_types, + "scopes": scopes, + } + + +def build_missing_credentials_from_graph( + graph: GraphModel, matched_credentials: dict[str, CredentialsMetaInput] | None +) -> dict[str, Any]: + """ + Build a missing_credentials mapping from a graph's aggregated credentials inputs, + preserving all supported credential types for each field. + """ + matched_keys = set(matched_credentials.keys()) if matched_credentials else set() + aggregated_fields = graph.aggregate_credentials_inputs() + + return { + field_key: _serialize_missing_credential(field_key, field_info) + for field_key, (field_info, _node_fields) in aggregated_fields.items() + if field_key not in matched_keys + } + + +def build_missing_credentials_from_field_info( + credential_fields: dict[str, CredentialsFieldInfo], + matched_keys: set[str], +) -> dict[str, Any]: + """ + Build missing_credentials mapping from a simple credentials field info dictionary. + """ + return { + field_key: _serialize_missing_credential(field_key, field_info) + for field_key, field_info in credential_fields.items() + if field_key not in matched_keys + } + + def extract_credentials_as_dict( credentials_input_schema: dict[str, Any] | None, ) -> dict[str, CredentialsMetaInput]: diff --git a/autogpt_platform/backend/backend/api/features/library/db.py b/autogpt_platform/backend/backend/api/features/library/db.py index 1c17e7b36c..0c775802db 100644 --- a/autogpt_platform/backend/backend/api/features/library/db.py +++ b/autogpt_platform/backend/backend/api/features/library/db.py @@ -401,27 +401,11 @@ async def add_generated_agent_image( ) -def _initialize_graph_settings(graph: graph_db.GraphModel) -> GraphSettings: - """ - Initialize GraphSettings based on graph content. - - Args: - graph: The graph to analyze - - Returns: - GraphSettings with appropriate human_in_the_loop_safe_mode value - """ - if graph.has_human_in_the_loop: - # Graph has HITL blocks - set safe mode to True by default - return GraphSettings(human_in_the_loop_safe_mode=True) - else: - # Graph has no HITL blocks - keep None - return GraphSettings(human_in_the_loop_safe_mode=None) - - async def create_library_agent( graph: graph_db.GraphModel, user_id: str, + hitl_safe_mode: bool = True, + sensitive_action_safe_mode: bool = False, create_library_agents_for_sub_graphs: bool = True, ) -> list[library_model.LibraryAgent]: """ @@ -430,6 +414,8 @@ async def create_library_agent( Args: agent: The agent/Graph to add to the library. user_id: The user to whom the agent will be added. + hitl_safe_mode: Whether HITL blocks require manual review (default True). + sensitive_action_safe_mode: Whether sensitive action blocks require review. create_library_agents_for_sub_graphs: If True, creates LibraryAgent records for sub-graphs as well. Returns: @@ -465,7 +451,11 @@ async def create_library_agent( } }, settings=SafeJson( - _initialize_graph_settings(graph_entry).model_dump() + GraphSettings.from_graph( + graph_entry, + hitl_safe_mode=hitl_safe_mode, + sensitive_action_safe_mode=sensitive_action_safe_mode, + ).model_dump() ), ), include=library_agent_include( @@ -627,33 +617,6 @@ async def update_library_agent( raise DatabaseError("Failed to update library agent") from e -async def update_library_agent_settings( - user_id: str, - agent_id: str, - settings: GraphSettings, -) -> library_model.LibraryAgent: - """ - Updates the settings for a specific LibraryAgent. - - Args: - user_id: The owner of the LibraryAgent. - agent_id: The ID of the LibraryAgent to update. - settings: New GraphSettings to apply. - - Returns: - The updated LibraryAgent. - - Raises: - NotFoundError: If the specified LibraryAgent does not exist. - DatabaseError: If there's an error in the update operation. - """ - return await update_library_agent( - library_agent_id=agent_id, - user_id=user_id, - settings=settings, - ) - - async def delete_library_agent( library_agent_id: str, user_id: str, soft_delete: bool = True ) -> None: @@ -838,7 +801,7 @@ async def add_store_agent_to_library( "isCreatedByUser": False, "useGraphIsActiveVersion": False, "settings": SafeJson( - _initialize_graph_settings(graph_model).model_dump() + GraphSettings.from_graph(graph_model).model_dump() ), }, include=library_agent_include( @@ -1228,8 +1191,15 @@ async def fork_library_agent( ) new_graph = await on_graph_activate(new_graph, user_id=user_id) - # Create a library agent for the new graph - return (await create_library_agent(new_graph, user_id))[0] + # Create a library agent for the new graph, preserving safe mode settings + return ( + await create_library_agent( + new_graph, + user_id, + hitl_safe_mode=original_agent.settings.human_in_the_loop_safe_mode, + sensitive_action_safe_mode=original_agent.settings.sensitive_action_safe_mode, + ) + )[0] except prisma.errors.PrismaError as e: logger.error(f"Database error cloning library agent: {e}") raise DatabaseError("Failed to fork library agent") from e diff --git a/autogpt_platform/backend/backend/api/features/library/model.py b/autogpt_platform/backend/backend/api/features/library/model.py index 56fad7bfd3..14d7c7be81 100644 --- a/autogpt_platform/backend/backend/api/features/library/model.py +++ b/autogpt_platform/backend/backend/api/features/library/model.py @@ -73,6 +73,12 @@ class LibraryAgent(pydantic.BaseModel): has_external_trigger: bool = pydantic.Field( description="Whether the agent has an external trigger (e.g. webhook) node" ) + has_human_in_the_loop: bool = pydantic.Field( + description="Whether the agent has human-in-the-loop blocks" + ) + has_sensitive_action: bool = pydantic.Field( + description="Whether the agent has sensitive action blocks" + ) trigger_setup_info: Optional[GraphTriggerInfo] = None # Indicates whether there's a new output (based on recent runs) @@ -180,6 +186,8 @@ class LibraryAgent(pydantic.BaseModel): graph.credentials_input_schema if sub_graphs is not None else None ), has_external_trigger=graph.has_external_trigger, + has_human_in_the_loop=graph.has_human_in_the_loop, + has_sensitive_action=graph.has_sensitive_action, trigger_setup_info=graph.trigger_setup_info, new_output=new_output, can_access_graph=can_access_graph, diff --git a/autogpt_platform/backend/backend/api/features/library/routes_test.py b/autogpt_platform/backend/backend/api/features/library/routes_test.py index 0f05240a7f..ca604af760 100644 --- a/autogpt_platform/backend/backend/api/features/library/routes_test.py +++ b/autogpt_platform/backend/backend/api/features/library/routes_test.py @@ -52,6 +52,8 @@ async def test_get_library_agents_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, recommended_schedule_cron=None, new_output=False, @@ -75,6 +77,8 @@ async def test_get_library_agents_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, recommended_schedule_cron=None, new_output=False, @@ -150,6 +154,8 @@ async def test_get_favorite_library_agents_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, recommended_schedule_cron=None, new_output=False, @@ -218,6 +224,8 @@ def test_add_agent_to_library_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, new_output=False, can_access_graph=True, diff --git a/autogpt_platform/backend/backend/api/features/store/embeddings.py b/autogpt_platform/backend/backend/api/features/store/embeddings.py index a421d8cd93..68b0d6aea0 100644 --- a/autogpt_platform/backend/backend/api/features/store/embeddings.py +++ b/autogpt_platform/backend/backend/api/features/store/embeddings.py @@ -154,15 +154,16 @@ async def store_content_embedding( # Upsert the embedding # WHERE clause in DO UPDATE prevents PostgreSQL 15 bug with NULLS NOT DISTINCT + # Use {pgvector_schema}.vector for explicit pgvector type qualification await execute_raw_with_schema( """ INSERT INTO {schema_prefix}"UnifiedContentEmbedding" ( "id", "contentType", "contentId", "userId", "embedding", "searchableText", "metadata", "createdAt", "updatedAt" ) - VALUES (gen_random_uuid()::text, $1::{schema_prefix}"ContentType", $2, $3, $4::vector, $5, $6::jsonb, NOW(), NOW()) + VALUES (gen_random_uuid()::text, $1::{schema_prefix}"ContentType", $2, $3, $4::{pgvector_schema}.vector, $5, $6::jsonb, NOW(), NOW()) ON CONFLICT ("contentType", "contentId", "userId") DO UPDATE SET - "embedding" = $4::vector, + "embedding" = $4::{pgvector_schema}.vector, "searchableText" = $5, "metadata" = $6::jsonb, "updatedAt" = NOW() @@ -177,7 +178,6 @@ async def store_content_embedding( searchable_text, metadata_json, client=client, - set_public_search_path=True, ) logger.info(f"Stored embedding for {content_type}:{content_id}") @@ -236,7 +236,6 @@ async def get_content_embedding( content_type, content_id, user_id, - set_public_search_path=True, ) if result and len(result) > 0: @@ -871,31 +870,46 @@ async def semantic_search( # Add content type parameters and build placeholders dynamically content_type_start_idx = len(params) + 1 content_type_placeholders = ", ".join( - f'${content_type_start_idx + i}::{{{{schema_prefix}}}}"ContentType"' + "$" + str(content_type_start_idx + i) + '::{schema_prefix}"ContentType"' for i in range(len(content_types)) ) params.extend([ct.value for ct in content_types]) - sql = f""" + # Build min_similarity param index before appending + min_similarity_idx = len(params) + 1 + params.append(min_similarity) + + # Use regular string (not f-string) for template to preserve {schema_prefix} and {schema} placeholders + # Use OPERATOR({pgvector_schema}.<=>) for explicit operator schema qualification + sql = ( + """ SELECT "contentId" as content_id, "contentType" as content_type, "searchableText" as searchable_text, metadata, - 1 - (embedding <=> '{embedding_str}'::vector) as similarity - FROM {{{{schema_prefix}}}}"UnifiedContentEmbedding" - WHERE "contentType" IN ({content_type_placeholders}) - {user_filter} - AND 1 - (embedding <=> '{embedding_str}'::vector) >= ${len(params) + 1} + 1 - (embedding OPERATOR({pgvector_schema}.<=>) '""" + + embedding_str + + """'::{pgvector_schema}.vector) as similarity + FROM {schema_prefix}"UnifiedContentEmbedding" + WHERE "contentType" IN (""" + + content_type_placeholders + + """) + """ + + user_filter + + """ + AND 1 - (embedding OPERATOR({pgvector_schema}.<=>) '""" + + embedding_str + + """'::{pgvector_schema}.vector) >= $""" + + str(min_similarity_idx) + + """ ORDER BY similarity DESC LIMIT $1 """ - params.append(min_similarity) + ) try: - results = await query_raw_with_schema( - sql, *params, set_public_search_path=True - ) + results = await query_raw_with_schema(sql, *params) return [ { "content_id": row["content_id"], @@ -922,31 +936,41 @@ async def semantic_search( # Add content type parameters and build placeholders dynamically content_type_start_idx = len(params_lexical) + 1 content_type_placeholders_lexical = ", ".join( - f'${content_type_start_idx + i}::{{{{schema_prefix}}}}"ContentType"' + "$" + str(content_type_start_idx + i) + '::{schema_prefix}"ContentType"' for i in range(len(content_types)) ) params_lexical.extend([ct.value for ct in content_types]) - sql_lexical = f""" + # Build query param index before appending + query_param_idx = len(params_lexical) + 1 + params_lexical.append(f"%{query}%") + + # Use regular string (not f-string) for template to preserve {schema_prefix} placeholders + sql_lexical = ( + """ SELECT "contentId" as content_id, "contentType" as content_type, "searchableText" as searchable_text, metadata, 0.0 as similarity - FROM {{{{schema_prefix}}}}"UnifiedContentEmbedding" - WHERE "contentType" IN ({content_type_placeholders_lexical}) - {user_filter} - AND "searchableText" ILIKE ${len(params_lexical) + 1} + FROM {schema_prefix}"UnifiedContentEmbedding" + WHERE "contentType" IN (""" + + content_type_placeholders_lexical + + """) + """ + + user_filter + + """ + AND "searchableText" ILIKE $""" + + str(query_param_idx) + + """ ORDER BY "updatedAt" DESC LIMIT $1 """ - params_lexical.append(f"%{query}%") + ) try: - results = await query_raw_with_schema( - sql_lexical, *params_lexical, set_public_search_path=True - ) + results = await query_raw_with_schema(sql_lexical, *params_lexical) return [ { "content_id": row["content_id"], diff --git a/autogpt_platform/backend/backend/api/features/store/embeddings_test.py b/autogpt_platform/backend/backend/api/features/store/embeddings_test.py index a17e393472..8cb471379b 100644 --- a/autogpt_platform/backend/backend/api/features/store/embeddings_test.py +++ b/autogpt_platform/backend/backend/api/features/store/embeddings_test.py @@ -155,18 +155,14 @@ async def test_store_embedding_success(mocker): ) assert result is True - # execute_raw is called twice: once for SET search_path, once for INSERT - assert mock_client.execute_raw.call_count == 2 + # execute_raw is called once for INSERT (no separate SET search_path needed) + assert mock_client.execute_raw.call_count == 1 - # First call: SET search_path - first_call_args = mock_client.execute_raw.call_args_list[0][0] - assert "SET search_path" in first_call_args[0] - - # Second call: INSERT query with the actual data - second_call_args = mock_client.execute_raw.call_args_list[1][0] - assert "test-version-id" in second_call_args - assert "[0.1,0.2,0.3]" in second_call_args - assert None in second_call_args # userId should be None for store agents + # Verify the INSERT query with the actual data + call_args = mock_client.execute_raw.call_args_list[0][0] + assert "test-version-id" in call_args + assert "[0.1,0.2,0.3]" in call_args + assert None in call_args # userId should be None for store agents @pytest.mark.asyncio(loop_scope="session") diff --git a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py index f9c831f93b..2f2beb80ff 100644 --- a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py +++ b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py @@ -12,7 +12,7 @@ from dataclasses import dataclass from typing import Any, Literal from prisma.enums import ContentType -from rank_bm25 import BM25Okapi +from rank_bm25 import BM25Okapi # type: ignore[import-untyped] from backend.api.features.store.embeddings import ( EMBEDDING_DIM, @@ -295,7 +295,7 @@ async def unified_hybrid_search( FROM {{schema_prefix}}"UnifiedContentEmbedding" uce WHERE uce."contentType" = ANY({content_types_param}::{{schema_prefix}}"ContentType"[]) {user_filter} - ORDER BY uce.embedding <=> {embedding_param}::vector + ORDER BY uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector LIMIT 200 ) ), @@ -307,7 +307,7 @@ async def unified_hybrid_search( uce.metadata, uce."updatedAt" as updated_at, -- Semantic score: cosine similarity (1 - distance) - COALESCE(1 - (uce.embedding <=> {embedding_param}::vector), 0) as semantic_score, + COALESCE(1 - (uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector), 0) as semantic_score, -- Lexical score: ts_rank_cd COALESCE(ts_rank_cd(uce.search, plainto_tsquery('english', {query_param})), 0) as lexical_raw, -- Category match from metadata @@ -363,9 +363,7 @@ async def unified_hybrid_search( LIMIT {limit_param} OFFSET {offset_param} """ - results = await query_raw_with_schema( - sql_query, *params, set_public_search_path=True - ) + results = await query_raw_with_schema(sql_query, *params) total = results[0]["total_count"] if results else 0 # Apply BM25 reranking @@ -585,7 +583,7 @@ async def hybrid_search( WHERE uce."contentType" = 'STORE_AGENT'::{{schema_prefix}}"ContentType" AND uce."userId" IS NULL AND {where_clause} - ORDER BY uce.embedding <=> {embedding_param}::vector + ORDER BY uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector LIMIT 200 ) uce ), @@ -607,7 +605,7 @@ async def hybrid_search( -- Searchable text for BM25 reranking COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text, -- Semantic score - COALESCE(1 - (uce.embedding <=> {embedding_param}::vector), 0) as semantic_score, + COALESCE(1 - (uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector), 0) as semantic_score, -- Lexical score (raw, will normalize) COALESCE(ts_rank_cd(uce.search, plainto_tsquery('english', {query_param})), 0) as lexical_raw, -- Category match @@ -688,9 +686,7 @@ async def hybrid_search( LIMIT {limit_param} OFFSET {offset_param} """ - results = await query_raw_with_schema( - sql_query, *params, set_public_search_path=True - ) + results = await query_raw_with_schema(sql_query, *params) total = results[0]["total_count"] if results else 0 diff --git a/autogpt_platform/backend/backend/api/features/v1.py b/autogpt_platform/backend/backend/api/features/v1.py index 661e8ff7f2..3a5dd3ec12 100644 --- a/autogpt_platform/backend/backend/api/features/v1.py +++ b/autogpt_platform/backend/backend/api/features/v1.py @@ -761,10 +761,8 @@ async def create_new_graph( graph.reassign_ids(user_id=user_id, reassign_graph_id=True) graph.validate_graph(for_run=False) - # The return value of the create graph & library function is intentionally not used here, - # as the graph already valid and no sub-graphs are returned back. await graph_db.create_graph(graph, user_id=user_id) - await library_db.create_library_agent(graph, user_id=user_id) + await library_db.create_library_agent(graph, user_id) activated_graph = await on_graph_activate(graph, user_id=user_id) if create_graph.source == "builder": @@ -888,21 +886,19 @@ async def set_graph_active_version( async def _update_library_agent_version_and_settings( user_id: str, agent_graph: graph_db.GraphModel ) -> library_model.LibraryAgent: - # Keep the library agent up to date with the new active version library = await library_db.update_agent_version_in_library( user_id, agent_graph.id, agent_graph.version ) - # If the graph has HITL node, initialize the setting if it's not already set. - if ( - agent_graph.has_human_in_the_loop - and library.settings.human_in_the_loop_safe_mode is None - ): - await library_db.update_library_agent_settings( + updated_settings = GraphSettings.from_graph( + graph=agent_graph, + hitl_safe_mode=library.settings.human_in_the_loop_safe_mode, + sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode, + ) + if updated_settings != library.settings: + library = await library_db.update_library_agent( + library_agent_id=library.id, user_id=user_id, - agent_id=library.id, - settings=library.settings.model_copy( - update={"human_in_the_loop_safe_mode": True} - ), + settings=updated_settings, ) return library @@ -919,21 +915,18 @@ async def update_graph_settings( user_id: Annotated[str, Security(get_user_id)], ) -> GraphSettings: """Update graph settings for the user's library agent.""" - # Get the library agent for this graph library_agent = await library_db.get_library_agent_by_graph_id( graph_id=graph_id, user_id=user_id ) if not library_agent: raise HTTPException(404, f"Graph #{graph_id} not found in user's library") - # Update the library agent settings - updated_agent = await library_db.update_library_agent_settings( + updated_agent = await library_db.update_library_agent( + library_agent_id=library_agent.id, user_id=user_id, - agent_id=library_agent.id, settings=settings, ) - # Return the updated settings return GraphSettings.model_validate(updated_agent.settings) diff --git a/autogpt_platform/backend/backend/blocks/data_manipulation.py b/autogpt_platform/backend/backend/blocks/data_manipulation.py index 5864a517ed..1014236b8c 100644 --- a/autogpt_platform/backend/backend/blocks/data_manipulation.py +++ b/autogpt_platform/backend/backend/blocks/data_manipulation.py @@ -680,3 +680,58 @@ class ListIsEmptyBlock(Block): async def run(self, input_data: Input, **kwargs) -> BlockOutput: yield "is_empty", len(input_data.list) == 0 + + +class ConcatenateListsBlock(Block): + class Input(BlockSchemaInput): + lists: List[List[Any]] = SchemaField( + description="A list of lists to concatenate together. All lists will be combined in order into a single list.", + placeholder="e.g., [[1, 2], [3, 4], [5, 6]]", + ) + + class Output(BlockSchemaOutput): + concatenated_list: List[Any] = SchemaField( + description="The concatenated list containing all elements from all input lists in order." + ) + error: str = SchemaField( + description="Error message if concatenation failed due to invalid input types." + ) + + def __init__(self): + super().__init__( + id="3cf9298b-5817-4141-9d80-7c2cc5199c8e", + description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order.", + categories={BlockCategory.BASIC}, + input_schema=ConcatenateListsBlock.Input, + output_schema=ConcatenateListsBlock.Output, + test_input=[ + {"lists": [[1, 2, 3], [4, 5, 6]]}, + {"lists": [["a", "b"], ["c"], ["d", "e", "f"]]}, + {"lists": [[1, 2], []]}, + {"lists": []}, + ], + test_output=[ + ("concatenated_list", [1, 2, 3, 4, 5, 6]), + ("concatenated_list", ["a", "b", "c", "d", "e", "f"]), + ("concatenated_list", [1, 2]), + ("concatenated_list", []), + ], + ) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: + concatenated = [] + for idx, lst in enumerate(input_data.lists): + if lst is None: + # Skip None values to avoid errors + continue + if not isinstance(lst, list): + # Type validation: each item must be a list + # Strings are iterable and would cause extend() to iterate character-by-character + # Non-iterable types would raise TypeError + yield "error", ( + f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. " + f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])." + ) + return + concatenated.extend(lst) + yield "concatenated_list", concatenated diff --git a/autogpt_platform/backend/backend/blocks/helpers/review.py b/autogpt_platform/backend/backend/blocks/helpers/review.py index f35397e6aa..80c28cfd14 100644 --- a/autogpt_platform/backend/backend/blocks/helpers/review.py +++ b/autogpt_platform/backend/backend/blocks/helpers/review.py @@ -84,7 +84,7 @@ class HITLReviewHelper: Exception: If review creation or status update fails """ # Skip review if safe mode is disabled - return auto-approved result - if not execution_context.safe_mode: + if not execution_context.human_in_the_loop_safe_mode: logger.info( f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled" ) diff --git a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py index 1e338816c8..b6106843bd 100644 --- a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py +++ b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py @@ -104,7 +104,7 @@ class HumanInTheLoopBlock(Block): execution_context: ExecutionContext, **_kwargs, ) -> BlockOutput: - if not execution_context.safe_mode: + if not execution_context.human_in_the_loop_safe_mode: logger.info( f"HITL block skipping review for node {node_exec_id} - safe mode disabled" ) diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py index 8266d433ad..0f9da7e10b 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py @@ -242,7 +242,7 @@ async def test_smart_decision_maker_tracks_llm_stats(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -343,7 +343,7 @@ async def test_smart_decision_maker_parameter_validation(): # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -409,7 +409,7 @@ async def test_smart_decision_maker_parameter_validation(): # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -471,7 +471,7 @@ async def test_smart_decision_maker_parameter_validation(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -535,7 +535,7 @@ async def test_smart_decision_maker_parameter_validation(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -658,7 +658,7 @@ async def test_smart_decision_maker_raw_response_conversion(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -730,7 +730,7 @@ async def test_smart_decision_maker_raw_response_conversion(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -786,7 +786,7 @@ async def test_smart_decision_maker_raw_response_conversion(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -905,7 +905,7 @@ async def test_smart_decision_maker_agent_mode(): # Create a mock execution context mock_execution_context = ExecutionContext( - safe_mode=False, + human_in_the_loop_safe_mode=False, ) # Create a mock execution processor for agent mode tests @@ -1027,7 +1027,7 @@ async def test_smart_decision_maker_traditional_mode_default(): # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py index af89a83f86..0427b13466 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py @@ -386,7 +386,7 @@ async def test_output_yielding_with_dynamic_fields(): outputs = {} from backend.data.execution import ExecutionContext - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) mock_execution_processor = MagicMock() async for output_name, output_value in block.run( @@ -609,7 +609,9 @@ async def test_validation_errors_dont_pollute_conversation(): outputs = {} from backend.data.execution import ExecutionContext - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext( + human_in_the_loop_safe_mode=False + ) # Create a proper mock execution processor for agent mode from collections import defaultdict diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py index bfd4a35ec2..4bfa3892e2 100644 --- a/autogpt_platform/backend/backend/data/block.py +++ b/autogpt_platform/backend/backend/data/block.py @@ -474,7 +474,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): self.block_type = block_type self.webhook_config = webhook_config self.execution_stats: NodeExecutionStats = NodeExecutionStats() - self.requires_human_review: bool = False + self.is_sensitive_action: bool = False if self.webhook_config: if isinstance(self.webhook_config, BlockWebhookConfig): @@ -637,8 +637,9 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): - should_pause: True if execution should be paused for review - input_data_to_use: The input data to use (may be modified by reviewer) """ - # Skip review if not required or safe mode is disabled - if not self.requires_human_review or not execution_context.safe_mode: + if not ( + self.is_sensitive_action and execution_context.sensitive_action_safe_mode + ): return False, input_data from backend.blocks.helpers.review import HITLReviewHelper diff --git a/autogpt_platform/backend/backend/data/db.py b/autogpt_platform/backend/backend/data/db.py index ab39881ed5..bdba5eca02 100644 --- a/autogpt_platform/backend/backend/data/db.py +++ b/autogpt_platform/backend/backend/data/db.py @@ -38,20 +38,6 @@ POOL_TIMEOUT = os.getenv("DB_POOL_TIMEOUT") if POOL_TIMEOUT: DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT) -# Add public schema to search_path for pgvector type access -# The vector extension is in public schema, but search_path is determined by schema parameter -# Extract the schema from DATABASE_URL or default to 'public' (matching get_database_schema()) -parsed_url = urlparse(DATABASE_URL) -url_params = dict(parse_qsl(parsed_url.query)) -db_schema = url_params.get("schema", "public") -# Build search_path, avoiding duplicates if db_schema is already 'public' -search_path_schemas = list( - dict.fromkeys([db_schema, "public"]) -) # Preserves order, removes duplicates -search_path = ",".join(search_path_schemas) -# This allows using ::vector without schema qualification -DATABASE_URL = add_param(DATABASE_URL, "options", f"-c search_path={search_path}") - HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None prisma = Prisma( @@ -127,38 +113,48 @@ async def _raw_with_schema( *args, execute: bool = False, client: Prisma | None = None, - set_public_search_path: bool = False, ) -> list[dict] | int: """Internal: Execute raw SQL with proper schema handling. Use query_raw_with_schema() or execute_raw_with_schema() instead. + Supports placeholders: + - {schema_prefix}: Table/type prefix (e.g., "platform".) + - {schema}: Raw schema name for application tables (e.g., platform) + - {pgvector_schema}: Schema where pgvector is installed (defaults to "public") + Args: - query_template: SQL query with {schema_prefix} placeholder + query_template: SQL query with {schema_prefix}, {schema}, and/or {pgvector_schema} placeholders *args: Query parameters execute: If False, executes SELECT query. If True, executes INSERT/UPDATE/DELETE. client: Optional Prisma client for transactions (only used when execute=True). - set_public_search_path: If True, sets search_path to include public schema. - Needed for pgvector types and other public schema objects. Returns: - list[dict] if execute=False (query results) - int if execute=True (number of affected rows) + + Example with vector type: + await execute_raw_with_schema( + 'INSERT INTO {schema_prefix}"Embedding" (vec) VALUES ($1::{pgvector_schema}.vector)', + embedding_data + ) """ schema = get_database_schema() schema_prefix = f'"{schema}".' if schema != "public" else "" - formatted_query = query_template.format(schema_prefix=schema_prefix) + # pgvector extension is typically installed in "public" schema + # On Supabase it may be in "extensions" but "public" is the common default + pgvector_schema = "public" + + formatted_query = query_template.format( + schema_prefix=schema_prefix, + schema=schema, + pgvector_schema=pgvector_schema, + ) import prisma as prisma_module db_client = client if client else prisma_module.get_client() - # Set search_path to include public schema if requested - # Prisma doesn't support the 'options' connection parameter, so we set it per-session - # This is idempotent and safe to call multiple times - if set_public_search_path: - await db_client.execute_raw(f"SET search_path = {schema}, public") # type: ignore - if execute: result = await db_client.execute_raw(formatted_query, *args) # type: ignore else: @@ -167,16 +163,12 @@ async def _raw_with_schema( return result -async def query_raw_with_schema( - query_template: str, *args, set_public_search_path: bool = False -) -> list[dict]: +async def query_raw_with_schema(query_template: str, *args) -> list[dict]: """Execute raw SQL SELECT query with proper schema handling. Args: - query_template: SQL query with {schema_prefix} placeholder + query_template: SQL query with {schema_prefix} and/or {schema} placeholders *args: Query parameters - set_public_search_path: If True, sets search_path to include public schema. - Needed for pgvector types and other public schema objects. Returns: List of result rows as dictionaries @@ -187,23 +179,20 @@ async def query_raw_with_schema( user_id ) """ - return await _raw_with_schema(query_template, *args, execute=False, set_public_search_path=set_public_search_path) # type: ignore + return await _raw_with_schema(query_template, *args, execute=False) # type: ignore async def execute_raw_with_schema( query_template: str, *args, client: Prisma | None = None, - set_public_search_path: bool = False, ) -> int: """Execute raw SQL command (INSERT/UPDATE/DELETE) with proper schema handling. Args: - query_template: SQL query with {schema_prefix} placeholder + query_template: SQL query with {schema_prefix} and/or {schema} placeholders *args: Query parameters client: Optional Prisma client for transactions - set_public_search_path: If True, sets search_path to include public schema. - Needed for pgvector types and other public schema objects. Returns: Number of affected rows @@ -215,7 +204,7 @@ async def execute_raw_with_schema( client=tx # Optional transaction client ) """ - return await _raw_with_schema(query_template, *args, execute=True, client=client, set_public_search_path=set_public_search_path) # type: ignore + return await _raw_with_schema(query_template, *args, execute=True, client=client) # type: ignore class BaseDbModel(BaseModel): diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index a6797032fd..cee0b82137 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -81,7 +81,8 @@ class ExecutionContext(BaseModel): This includes information needed by blocks, sub-graphs, and execution management. """ - safe_mode: bool = True + human_in_the_loop_safe_mode: bool = True + sensitive_action_safe_mode: bool = False user_timezone: str = "UTC" root_execution_id: Optional[str] = None parent_execution_id: Optional[str] = None diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index e9be80892c..d868c0ff3b 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -62,7 +62,23 @@ logger = logging.getLogger(__name__) class GraphSettings(BaseModel): - human_in_the_loop_safe_mode: bool | None = None + human_in_the_loop_safe_mode: bool = True + sensitive_action_safe_mode: bool = False + + @classmethod + def from_graph( + cls, + graph: "GraphModel", + hitl_safe_mode: bool | None = None, + sensitive_action_safe_mode: bool = False, + ) -> "GraphSettings": + # Default to True if not explicitly set + if hitl_safe_mode is None: + hitl_safe_mode = True + return cls( + human_in_the_loop_safe_mode=hitl_safe_mode, + sensitive_action_safe_mode=sensitive_action_safe_mode, + ) class Link(BaseDbModel): @@ -244,10 +260,14 @@ class BaseGraph(BaseDbModel): return any( node.block_id for node in self.nodes - if ( - node.block.block_type == BlockType.HUMAN_IN_THE_LOOP - or node.block.requires_human_review - ) + if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP + ) + + @computed_field + @property + def has_sensitive_action(self) -> bool: + return any( + node.block_id for node in self.nodes if node.block.is_sensitive_action ) @property diff --git a/autogpt_platform/backend/backend/data/understanding.py b/autogpt_platform/backend/backend/data/understanding.py index eb63d719ca..c604e046b6 100644 --- a/autogpt_platform/backend/backend/data/understanding.py +++ b/autogpt_platform/backend/backend/data/understanding.py @@ -328,6 +328,8 @@ async def clear_business_understanding(user_id: str) -> bool: def format_understanding_for_prompt(understanding: BusinessUnderstanding) -> str: """Format business understanding as text for system prompt injection.""" + if not understanding: + return "" sections = [] # User info section diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py index c2aa81b10a..44b77fc018 100644 --- a/autogpt_platform/backend/backend/executor/scheduler.py +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -309,7 +309,7 @@ def ensure_embeddings_coverage(): # Process in batches until no more missing embeddings while True: - result = db_client.backfill_missing_embeddings(batch_size=10) + result = db_client.backfill_missing_embeddings(batch_size=100) total_processed += result["processed"] total_success += result["success"] diff --git a/autogpt_platform/backend/backend/executor/utils.py b/autogpt_platform/backend/backend/executor/utils.py index 25f0389e99..7771c3751c 100644 --- a/autogpt_platform/backend/backend/executor/utils.py +++ b/autogpt_platform/backend/backend/executor/utils.py @@ -873,11 +873,8 @@ async def add_graph_execution( settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id) execution_context = ExecutionContext( - safe_mode=( - settings.human_in_the_loop_safe_mode - if settings.human_in_the_loop_safe_mode is not None - else True - ), + human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode, + sensitive_action_safe_mode=settings.sensitive_action_safe_mode, user_timezone=( user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC" ), diff --git a/autogpt_platform/backend/backend/executor/utils_test.py b/autogpt_platform/backend/backend/executor/utils_test.py index 0e652f9627..e6e8fcbf60 100644 --- a/autogpt_platform/backend/backend/executor/utils_test.py +++ b/autogpt_platform/backend/backend/executor/utils_test.py @@ -386,6 +386,7 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture): mock_user.timezone = "UTC" mock_settings = mocker.MagicMock() mock_settings.human_in_the_loop_safe_mode = True + mock_settings.sensitive_action_safe_mode = False mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user) mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings) @@ -651,6 +652,7 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture): mock_user.timezone = "UTC" mock_settings = mocker.MagicMock() mock_settings.human_in_the_loop_safe_mode = True + mock_settings.sensitive_action_safe_mode = False mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user) mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings) diff --git a/autogpt_platform/backend/snapshots/grph_single b/autogpt_platform/backend/snapshots/grph_single index 7ce8695e6b..1811a57ec8 100644 --- a/autogpt_platform/backend/snapshots/grph_single +++ b/autogpt_platform/backend/snapshots/grph_single @@ -11,6 +11,7 @@ "forked_from_version": null, "has_external_trigger": false, "has_human_in_the_loop": false, + "has_sensitive_action": false, "id": "graph-123", "input_schema": { "properties": {}, diff --git a/autogpt_platform/backend/snapshots/grphs_all b/autogpt_platform/backend/snapshots/grphs_all index f69b45a6de..0b314d96f9 100644 --- a/autogpt_platform/backend/snapshots/grphs_all +++ b/autogpt_platform/backend/snapshots/grphs_all @@ -11,6 +11,7 @@ "forked_from_version": null, "has_external_trigger": false, "has_human_in_the_loop": false, + "has_sensitive_action": false, "id": "graph-123", "input_schema": { "properties": {}, diff --git a/autogpt_platform/backend/snapshots/lib_agts_search b/autogpt_platform/backend/snapshots/lib_agts_search index c8e3cc73a6..67c307b09e 100644 --- a/autogpt_platform/backend/snapshots/lib_agts_search +++ b/autogpt_platform/backend/snapshots/lib_agts_search @@ -27,6 +27,8 @@ "properties": {} }, "has_external_trigger": false, + "has_human_in_the_loop": false, + "has_sensitive_action": false, "trigger_setup_info": null, "new_output": false, "can_access_graph": true, @@ -34,7 +36,8 @@ "is_favorite": false, "recommended_schedule_cron": null, "settings": { - "human_in_the_loop_safe_mode": null + "human_in_the_loop_safe_mode": true, + "sensitive_action_safe_mode": false }, "marketplace_listing": null }, @@ -65,6 +68,8 @@ "properties": {} }, "has_external_trigger": false, + "has_human_in_the_loop": false, + "has_sensitive_action": false, "trigger_setup_info": null, "new_output": false, "can_access_graph": false, @@ -72,7 +77,8 @@ "is_favorite": false, "recommended_schedule_cron": null, "settings": { - "human_in_the_loop_safe_mode": null + "human_in_the_loop_safe_mode": true, + "sensitive_action_safe_mode": false }, "marketplace_listing": null } diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx index f381ccb93b..57890b1f17 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx @@ -5,10 +5,11 @@ import { TooltipContent, TooltipTrigger, } from "@/components/atoms/Tooltip/BaseTooltip"; -import { PlayIcon, StopIcon } from "@phosphor-icons/react"; +import { CircleNotchIcon, PlayIcon, StopIcon } from "@phosphor-icons/react"; import { useShallow } from "zustand/react/shallow"; import { RunInputDialog } from "../RunInputDialog/RunInputDialog"; import { useRunGraph } from "./useRunGraph"; +import { cn } from "@/lib/utils"; export const RunGraph = ({ flowID }: { flowID: string | null }) => { const { @@ -24,6 +25,31 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => { useShallow((state) => state.isGraphRunning), ); + const isLoading = isExecutingGraph || isTerminatingGraph || isSaving; + + // Determine which icon to show with proper animation + const renderIcon = () => { + const iconClass = cn( + "size-4 transition-transform duration-200 ease-out", + !isLoading && "group-hover:scale-110", + ); + + if (isLoading) { + return ( + + ); + } + + if (isGraphRunning) { + return ; + } + + return ; + }; + return ( <> @@ -33,18 +59,18 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => { variant={isGraphRunning ? "destructive" : "primary"} data-id={isGraphRunning ? "stop-graph-button" : "run-graph-button"} onClick={isGraphRunning ? handleStopGraph : handleRunGraph} - disabled={!flowID || isExecutingGraph || isTerminatingGraph} - loading={isExecutingGraph || isTerminatingGraph || isSaving} + disabled={!flowID || isLoading} + className="group" > - {!isGraphRunning ? ( - - ) : ( - - )} + {renderIcon()} - {isGraphRunning ? "Stop agent" : "Run agent"} + {isLoading + ? "Processing..." + : isGraphRunning + ? "Stop agent" + : "Run agent"} state.hasInputs); const hasCredentials = useGraphStore((state) => state.hasCredentials); const inputSchema = useGraphStore((state) => state.inputSchema); - const credentialsSchema = useGraphStore( - (state) => state.credentialsInputSchema, - ); const { - credentialsUiSchema, + credentialFields, + requiredCredentials, handleManualRun, handleInputChange, openCronSchedulerDialog, setOpenCronSchedulerDialog, inputValues, credentialValues, - handleCredentialChange, + handleCredentialFieldChange, isExecutingGraph, } = useRunInputDialog({ setIsOpen }); @@ -62,67 +61,67 @@ export const RunInputDialog = ({ isOpen, set: setIsOpen, }} - styling={{ maxWidth: "600px", minWidth: "600px" }} + styling={{ maxWidth: "700px", minWidth: "700px" }} > -
- {/* Credentials Section */} - {hasCredentials() && ( -
-
- - Credentials - +
+
+ {/* Credentials Section */} + {hasCredentials() && credentialFields.length > 0 && ( +
+
+ + Credentials + +
+
+ +
-
- handleCredentialChange(v.formData)} - uiSchema={credentialsUiSchema} - initialValues={{}} - formContext={{ - showHandles: false, - size: "large", - showOptionalToggle: false, - }} - /> -
-
- )} + )} - {/* Inputs Section */} - {hasInputs() && ( -
-
- - Inputs - + {/* Inputs Section */} + {hasInputs() && ( +
+
+ + Inputs + +
+
+ handleInputChange(v.formData)} + uiSchema={uiSchema} + initialValues={{}} + formContext={{ + showHandles: false, + size: "large", + }} + /> +
-
- handleInputChange(v.formData)} - uiSchema={uiSchema} - initialValues={{}} - formContext={{ - showHandles: false, - size: "large", - }} - /> -
-
- )} + )} +
- {/* Action Button */}
{purpose === "run" && ( + + +
+
+ {label}: {isEnabled ? "ON" : "OFF"} +
+
+ {isEnabled ? tooltipEnabled : tooltipDisabled} +
+
+
+ + ); +} + export function FloatingSafeModeToggle({ graph, className, fullWidth = false, }: Props) { const { - currentSafeMode, + currentHITLSafeMode, + showHITLToggle, + isHITLStateUndetermined, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, isPending, shouldShowToggle, - isStateUndetermined, - handleToggle, } = useAgentSafeMode(graph); - if (!shouldShowToggle || isStateUndetermined || isPending) { + if (!shouldShowToggle || isPending) { + return null; + } + + const showHITL = showHITLToggle && !isHITLStateUndetermined; + const showSensitive = showSensitiveActionToggle; + + if (!showHITL && !showSensitive) { return null; } return ( -
- - - - - -
-
- Safe Mode: {currentSafeMode! ? "ON" : "OFF"} -
-
- {currentSafeMode! - ? "Human in the loop blocks require manual review" - : "Human in the loop blocks proceed automatically"} -
-
-
-
+
+ {showHITL && ( + + )} + {showSensitive && ( + + )}
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/components/CustomControl.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/components/CustomControl.tsx index 50e2034f75..7b723d73b3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/components/CustomControl.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/components/CustomControl.tsx @@ -53,14 +53,14 @@ export const CustomControls = memo( const controls = [ { id: "zoom-in-button", - icon: , + icon: , label: "Zoom In", onClick: () => zoomIn(), className: "h-10 w-10 border-none", }, { id: "zoom-out-button", - icon: , + icon: , label: "Zoom Out", onClick: () => zoomOut(), className: "h-10 w-10 border-none", @@ -68,9 +68,9 @@ export const CustomControls = memo( { id: "tutorial-button", icon: isTutorialLoading ? ( - + ) : ( - + ), label: isTutorialLoading ? "Loading Tutorial..." : "Start Tutorial", onClick: handleTutorialClick, @@ -79,7 +79,7 @@ export const CustomControls = memo( }, { id: "fit-view-button", - icon: , + icon: , label: "Fit View", onClick: () => fitView({ padding: 0.2, duration: 800, maxZoom: 1 }), className: "h-10 w-10 border-none", @@ -87,9 +87,9 @@ export const CustomControls = memo( { id: "lock-button", icon: !isLocked ? ( - + ) : ( - + ), label: "Toggle Lock", onClick: () => setIsLocked(!isLocked), diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts index 694c1be81b..f5533848d2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts @@ -139,14 +139,6 @@ export const useFlow = () => { useNodeStore.getState().setNodes([]); useNodeStore.getState().clearResolutionState(); addNodes(customNodes); - - // Sync hardcoded values with handle IDs. - // If a key–value field has a key without a value, the backend omits it from hardcoded values. - // But if a handleId exists for that key, it causes inconsistency. - // This ensures hardcoded values stay in sync with handle IDs. - customNodes.forEach((node) => { - useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id); - }); } }, [customNodes, addNodes]); @@ -158,6 +150,14 @@ export const useFlow = () => { } }, [graph?.links, addLinks]); + useEffect(() => { + if (customNodes.length > 0 && graph?.links) { + customNodes.forEach((node) => { + useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id); + }); + } + }, [customNodes, graph?.links]); + // update node execution status in nodes useEffect(() => { if ( diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/edges/CustomEdge.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/edges/CustomEdge.tsx index eb221b5d34..3b6425a7c6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/edges/CustomEdge.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/edges/CustomEdge.tsx @@ -19,6 +19,8 @@ export type CustomEdgeData = { beadUp?: number; beadDown?: number; beadData?: Map; + edgeColorClass?: string; + edgeHexColor?: string; }; export type CustomEdge = XYEdge; @@ -36,7 +38,6 @@ const CustomEdge = ({ selected, }: EdgeProps) => { const removeConnection = useEdgeStore((state) => state.removeEdge); - // Subscribe to the brokenEdgeIDs map and check if this edge is broken across any node const isBroken = useNodeStore((state) => state.isEdgeBroken(id)); const [isHovered, setIsHovered] = useState(false); @@ -52,6 +53,7 @@ const CustomEdge = ({ const isStatic = data?.isStatic ?? false; const beadUp = data?.beadUp ?? 0; const beadDown = data?.beadDown ?? 0; + const edgeColorClass = data?.edgeColorClass; const handleRemoveEdge = () => { removeConnection(id); @@ -70,7 +72,9 @@ const CustomEdge = ({ ? "!stroke-red-500 !stroke-[2px] [stroke-dasharray:4]" : selected ? "stroke-zinc-800" - : "stroke-zinc-500/50 hover:stroke-zinc-500", + : edgeColorClass + ? cn(edgeColorClass, "opacity-70 hover:opacity-100") + : "stroke-zinc-500/50 hover:stroke-zinc-500", )} /> { const edges = useEdgeStore((s) => s.edges); @@ -34,8 +35,13 @@ export const useCustomEdge = () => { if (exists) return; const nodes = useNodeStore.getState().nodes; - const isStatic = nodes.find((n) => n.id === conn.source)?.data - ?.staticOutput; + const sourceNode = nodes.find((n) => n.id === conn.source); + const isStatic = sourceNode?.data?.staticOutput; + + const { colorClass, hexColor } = getEdgeColorFromOutputType( + sourceNode?.data?.outputSchema, + conn.sourceHandle, + ); addEdge({ source: conn.source, @@ -44,6 +50,8 @@ export const useCustomEdge = () => { targetHandle: conn.targetHandle, data: { isStatic, + edgeColorClass: colorClass, + edgeHexColor: hexColor, }, }); }, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx index 7189ab9ca7..17134ae299 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx @@ -1,22 +1,21 @@ import { Button } from "@/components/atoms/Button/Button"; import { Text } from "@/components/atoms/Text/Text"; +import { + Accordion, + AccordionContent, + AccordionItem, + AccordionTrigger, +} from "@/components/molecules/Accordion/Accordion"; import { beautifyString, cn } from "@/lib/utils"; -import { CaretDownIcon, CopyIcon, CheckIcon } from "@phosphor-icons/react"; +import { CopyIcon, CheckIcon } from "@phosphor-icons/react"; import { NodeDataViewer } from "./components/NodeDataViewer/NodeDataViewer"; import { ContentRenderer } from "./components/ContentRenderer"; import { useNodeOutput } from "./useNodeOutput"; import { ViewMoreData } from "./components/ViewMoreData"; export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => { - const { - outputData, - isExpanded, - setIsExpanded, - copiedKey, - handleCopy, - executionResultId, - inputData, - } = useNodeOutput(nodeId); + const { outputData, copiedKey, handleCopy, executionResultId, inputData } = + useNodeOutput(nodeId); if (Object.keys(outputData).length === 0) { return null; @@ -25,122 +24,117 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => { return (
-
- - Node Output - - -
+ + + + + Node Output + + + +
+
+ Input - {isExpanded && ( - <> -
-
- Input + - - -
- - +
+ + +
-
- {Object.entries(outputData) - .slice(0, 2) - .map(([key, value]) => ( -
-
- - Pin: - - - {beautifyString(key)} - -
-
- - Data: - -
- {value.map((item, index) => ( -
- + {Object.entries(outputData) + .slice(0, 2) + .map(([key, value]) => ( +
+
+ + Pin: + + + {beautifyString(key)} + +
+
+ + Data: + +
+ {value.map((item, index) => ( +
+ +
+ ))} + +
+ +
- ))} - -
- -
-
- ))} -
+ ))} +
- {Object.keys(outputData).length > 2 && ( - - )} - - )} + {Object.keys(outputData).length > 2 && ( + + )} + + +
); }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx index ba8559a66c..cfc599c6e4 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx @@ -4,7 +4,6 @@ import { useShallow } from "zustand/react/shallow"; import { useState } from "react"; export const useNodeOutput = (nodeId: string) => { - const [isExpanded, setIsExpanded] = useState(true); const [copiedKey, setCopiedKey] = useState(null); const { toast } = useToast(); @@ -37,13 +36,10 @@ export const useNodeOutput = (nodeId: string) => { } }; return { - outputData: outputData, - inputData: inputData, - isExpanded: isExpanded, - setIsExpanded: setIsExpanded, - copiedKey: copiedKey, - setCopiedKey: setCopiedKey, - handleCopy: handleCopy, + outputData, + inputData, + copiedKey, + handleCopy, executionResultId: nodeExecutionResult?.node_exec_id, }; }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/helpers.ts index 46032a67ea..48f4fc19c7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/helpers.ts @@ -187,3 +187,38 @@ export const getTypeDisplayInfo = (schema: any) => { hexColor, }; }; + +export function getEdgeColorFromOutputType( + outputSchema: RJSFSchema | undefined, + sourceHandle: string, +): { colorClass: string; hexColor: string } { + const defaultColor = { + colorClass: "stroke-zinc-500/50", + hexColor: "#6b7280", + }; + + if (!outputSchema?.properties) return defaultColor; + + const properties = outputSchema.properties as Record; + const handleParts = sourceHandle.split("_#_"); + let currentSchema: Record = properties; + + for (let i = 0; i < handleParts.length; i++) { + const part = handleParts[i]; + const fieldSchema = currentSchema[part] as Record; + if (!fieldSchema) return defaultColor; + + if (i === handleParts.length - 1) { + const { hexColor, colorClass } = getTypeDisplayInfo(fieldSchema); + return { colorClass: colorClass.replace("!text-", "stroke-"), hexColor }; + } + + if (fieldSchema.properties) { + currentSchema = fieldSchema.properties as Record; + } else { + return defaultColor; + } + } + + return defaultColor; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/icons.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/icons.ts index 19e133ef7d..2c7a22b423 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/icons.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/icons.ts @@ -1,7 +1,32 @@ -// These are SVG Phosphor icons +type IconOptions = { + size?: number; + color?: string; +}; + +const DEFAULT_SIZE = 16; +const DEFAULT_COLOR = "#52525b"; // zinc-600 + +const iconPaths = { + ClickIcon: `M88,24V16a8,8,0,0,1,16,0v8a8,8,0,0,1-16,0ZM16,104h8a8,8,0,0,0,0-16H16a8,8,0,0,0,0,16ZM124.42,39.16a8,8,0,0,0,10.74-3.58l8-16a8,8,0,0,0-14.31-7.16l-8,16A8,8,0,0,0,124.42,39.16Zm-96,81.69-16,8a8,8,0,0,0,7.16,14.31l16-8a8,8,0,1,0-7.16-14.31ZM219.31,184a16,16,0,0,1,0,22.63l-12.68,12.68a16,16,0,0,1-22.63,0L132.7,168,115,214.09c0,.1-.08.21-.13.32a15.83,15.83,0,0,1-14.6,9.59l-.79,0a15.83,15.83,0,0,1-14.41-11L32.8,52.92A16,16,0,0,1,52.92,32.8L213,85.07a16,16,0,0,1,1.41,29.8l-.32.13L168,132.69ZM208,195.31,156.69,144h0a16,16,0,0,1,4.93-26l.32-.14,45.95-17.64L48,48l52.2,159.86,17.65-46c0-.11.08-.22.13-.33a16,16,0,0,1,11.69-9.34,16.72,16.72,0,0,1,3-.28,16,16,0,0,1,11.3,4.69L195.31,208Z`, + Keyboard: `M224,48H32A16,16,0,0,0,16,64V192a16,16,0,0,0,16,16H224a16,16,0,0,0,16-16V64A16,16,0,0,0,224,48Zm0,144H32V64H224V192Zm-16-64a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,128Zm0-32a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,96ZM72,160a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16h8A8,8,0,0,1,72,160Zm96,0a8,8,0,0,1-8,8H96a8,8,0,0,1,0-16h64A8,8,0,0,1,168,160Zm40,0a8,8,0,0,1-8,8h-8a8,8,0,0,1,0-16h8A8,8,0,0,1,208,160Z`, + Drag: `M188,80a27.79,27.79,0,0,0-13.36,3.4,28,28,0,0,0-46.64-11A28,28,0,0,0,80,92v20H68a28,28,0,0,0-28,28v12a88,88,0,0,0,176,0V108A28,28,0,0,0,188,80Zm12,72a72,72,0,0,1-144,0V140a12,12,0,0,1,12-12H80v24a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V108a12,12,0,0,1,24,0Z`, +}; + +function createIcon(path: string, options: IconOptions = {}): string { + const size = options.size ?? DEFAULT_SIZE; + const color = options.color ?? DEFAULT_COLOR; + return ``; +} export const ICONS = { - ClickIcon: ``, - Keyboard: ``, - Drag: ``, + ClickIcon: createIcon(iconPaths.ClickIcon), + Keyboard: createIcon(iconPaths.Keyboard), + Drag: createIcon(iconPaths.Drag), }; + +export function getIcon( + name: keyof typeof iconPaths, + options?: IconOptions, +): string { + return createIcon(iconPaths[name], options); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/index.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/index.ts index fac08ec145..49f505054b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/index.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/index.ts @@ -11,6 +11,7 @@ import { } from "./helpers"; import { useNodeStore } from "../../../stores/nodeStore"; import { useEdgeStore } from "../../../stores/edgeStore"; +import { useTutorialStore } from "../../../stores/tutorialStore"; let isTutorialLoading = false; let tutorialLoadingCallback: ((loading: boolean) => void) | null = null; @@ -60,12 +61,14 @@ export const startTutorial = async () => { handleTutorialComplete(); removeTutorialStyles(); clearPrefetchedBlocks(); + useTutorialStore.getState().setIsTutorialRunning(false); }); tour.on("cancel", () => { handleTutorialCancel(tour); removeTutorialStyles(); clearPrefetchedBlocks(); + useTutorialStore.getState().setIsTutorialRunning(false); }); for (const step of tour.steps) { diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts index 7b3c5b1d01..00c151d35b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts @@ -61,12 +61,18 @@ export const convertNodesPlusBlockInfoIntoCustomNodes = ( return customNode; }; +const isToolSourceName = (sourceName: string): boolean => + sourceName.startsWith("tools_^_"); + +const cleanupSourceName = (sourceName: string): string => + isToolSourceName(sourceName) ? "tools" : sourceName; + export const linkToCustomEdge = (link: Link): CustomEdge => ({ id: link.id ?? "", type: "custom" as const, source: link.source_id, target: link.sink_id, - sourceHandle: link.source_name, + sourceHandle: cleanupSourceName(link.source_name), targetHandle: link.sink_name, data: { isStatic: link.is_static, diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/helpers.ts index cd05563369..ab7dbd275d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/helpers.ts @@ -267,23 +267,34 @@ export function extractCredentialsNeeded( | undefined; if (missingCreds && Object.keys(missingCreds).length > 0) { const agentName = (setupInfo?.agent_name as string) || "this block"; - const credentials = Object.values(missingCreds).map((credInfo) => ({ - provider: (credInfo.provider as string) || "unknown", - providerName: - (credInfo.provider_name as string) || - (credInfo.provider as string) || - "Unknown Provider", - credentialType: + const credentials = Object.values(missingCreds).map((credInfo) => { + // Normalize to array at boundary - prefer 'types' array, fall back to single 'type' + const typesArray = credInfo.types as + | Array<"api_key" | "oauth2" | "user_password" | "host_scoped"> + | undefined; + const singleType = (credInfo.type as | "api_key" | "oauth2" | "user_password" - | "host_scoped") || "api_key", - title: - (credInfo.title as string) || - `${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`, - scopes: credInfo.scopes as string[] | undefined, - })); + | "host_scoped" + | undefined) || "api_key"; + const credentialTypes = + typesArray && typesArray.length > 0 ? typesArray : [singleType]; + + return { + provider: (credInfo.provider as string) || "unknown", + providerName: + (credInfo.provider_name as string) || + (credInfo.provider as string) || + "Unknown Provider", + credentialTypes, + title: + (credInfo.title as string) || + `${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`, + scopes: credInfo.scopes as string[] | undefined, + }; + }); return { type: "credentials_needed", toolName, @@ -358,11 +369,14 @@ export function extractInputsNeeded( credentials.forEach((cred) => { const id = cred.id as string; if (id) { + const credentialTypes = Array.isArray(cred.types) + ? cred.types + : [(cred.type as string) || "api_key"]; credentialsSchema[id] = { type: "object", properties: {}, credentials_provider: [cred.provider as string], - credentials_types: [(cred.type as string) || "api_key"], + credentials_types: credentialTypes, credentials_scopes: cred.scopes as string[] | undefined, }; } diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx index 4b9da57286..f0dfadd1f7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx @@ -9,7 +9,9 @@ import { useChatCredentialsSetup } from "./useChatCredentialsSetup"; export interface CredentialInfo { provider: string; providerName: string; - credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped"; + credentialTypes: Array< + "api_key" | "oauth2" | "user_password" | "host_scoped" + >; title: string; scopes?: string[]; } @@ -30,7 +32,7 @@ function createSchemaFromCredentialInfo( type: "object", properties: {}, credentials_provider: [credential.provider], - credentials_types: [credential.credentialType], + credentials_types: credential.credentialTypes, credentials_scopes: credential.scopes, discriminator: undefined, discriminator_mapping: undefined, diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatMessage/useChatMessage.ts b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatMessage/useChatMessage.ts index 9a597d4b26..5ee61bc554 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatMessage/useChatMessage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatMessage/useChatMessage.ts @@ -41,7 +41,9 @@ export type ChatMessageData = credentials: Array<{ provider: string; providerName: string; - credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped"; + credentialTypes: Array< + "api_key" | "oauth2" | "user_password" | "host_scoped" + >; title: string; scopes?: string[]; }>; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx index 7886f7adaf..de912c5fc3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx @@ -31,10 +31,18 @@ export function AgentSettingsModal({ } } - const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } = - useAgentSafeMode(agent); + const { + currentHITLSafeMode, + showHITLToggle, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + isPending, + shouldShowToggle, + } = useAgentSafeMode(agent); - if (!hasHITLBlocks) return null; + if (!shouldShowToggle) return null; return (
-
-
-
- Require human approval - - The agent will pause and wait for your review before - continuing - + {showHITLToggle && ( +
+
+
+ + Human-in-the-loop approval + + + The agent will pause at human-in-the-loop blocks and wait + for your review before continuing + +
+
-
-
+ )} + {showSensitiveActionToggle && ( +
+
+
+ + Sensitive action approval + + + The agent will pause at sensitive action blocks and wait for + your review before continuing + +
+ +
+
+ )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx index 7660de7c15..b3e0c17d74 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx @@ -1,9 +1,9 @@ import { Input } from "@/components/atoms/Input/Input"; +import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView"; import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { useMemo } from "react"; import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs"; import { useRunAgentModalContext } from "../../context"; -import { CredentialsGroupedView } from "../CredentialsGroupedView/CredentialsGroupedView"; import { ModalSection } from "../ModalSection/ModalSection"; import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBanner"; @@ -19,6 +19,8 @@ export function ModalRunSection() { setInputValue, agentInputFields, agentCredentialsInputFields, + inputCredentials, + setInputCredentialsValue, } = useRunAgentModalContext(); const inputFields = Object.entries(agentInputFields || {}); @@ -102,6 +104,9 @@ export function ModalRunSection() { ) : null} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx index 9ba37d8d17..dc0258c768 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx @@ -5,48 +5,112 @@ import { Graph } from "@/lib/autogpt-server-api/types"; import { cn } from "@/lib/utils"; import { ShieldCheckIcon, ShieldIcon } from "@phosphor-icons/react"; import { useAgentSafeMode } from "@/hooks/useAgentSafeMode"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/atoms/Tooltip/BaseTooltip"; interface Props { graph: GraphModel | LibraryAgent | Graph; className?: string; - fullWidth?: boolean; } -export function SafeModeToggle({ graph }: Props) { +interface SafeModeIconButtonProps { + isEnabled: boolean; + label: string; + tooltipEnabled: string; + tooltipDisabled: string; + onToggle: () => void; + isPending: boolean; +} + +function SafeModeIconButton({ + isEnabled, + label, + tooltipEnabled, + tooltipDisabled, + onToggle, + isPending, +}: SafeModeIconButtonProps) { + return ( + + + + + +
+
+ {label}: {isEnabled ? "ON" : "OFF"} +
+
+ {isEnabled ? tooltipEnabled : tooltipDisabled} +
+
+
+
+ ); +} + +export function SafeModeToggle({ graph, className }: Props) { const { - currentSafeMode, + currentHITLSafeMode, + showHITLToggle, + isHITLStateUndetermined, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, isPending, shouldShowToggle, - isStateUndetermined, - handleToggle, } = useAgentSafeMode(graph); - if (!shouldShowToggle || isStateUndetermined) { + if (!shouldShowToggle || isHITLStateUndetermined) { + return null; + } + + const showHITL = showHITLToggle && !isHITLStateUndetermined; + const showSensitive = showSensitiveActionToggle; + + if (!showHITL && !showSensitive) { return null; } return ( - + {showSensitive && ( + + )} +
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx index 57d7055e1c..530d24529f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx @@ -13,8 +13,16 @@ interface Props { } export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) { - const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } = - useAgentSafeMode(agent); + const { + currentHITLSafeMode, + showHITLToggle, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + isPending, + shouldShowToggle, + } = useAgentSafeMode(agent); return ( @@ -34,24 +42,51 @@ export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
- {hasHITLBlocks ? ( -
-
-
- Require human approval - - The agent will pause and wait for your review before - continuing - + {shouldShowToggle ? ( + <> + {showHITLToggle && ( +
+
+
+ + Human-in-the-loop approval + + + The agent will pause at human-in-the-loop blocks and + wait for your review before continuing + +
+ +
- -
-
+ )} + {showSensitiveActionToggle && ( +
+
+
+ + Sensitive action approval + + + The agent will pause at sensitive action blocks and wait + for your review before continuing + +
+ +
+
+ )} + ) : (
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryUploadAgentDialog/LibraryUploadAgentDialog.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryUploadAgentDialog/LibraryUploadAgentDialog.tsx index 1a6999721e..436be6f15a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryUploadAgentDialog/LibraryUploadAgentDialog.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryUploadAgentDialog/LibraryUploadAgentDialog.tsx @@ -2,6 +2,7 @@ import { Button } from "@/components/atoms/Button/Button"; import { FileInput } from "@/components/atoms/FileInput/FileInput"; import { Input } from "@/components/atoms/Input/Input"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { Form, @@ -120,7 +121,7 @@ export default function LibraryUploadAgentDialog() { > {isUploading ? (
-
+ Uploading...
) : ( diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index fc4e737651..5cd60fcb35 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -6383,6 +6383,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -6399,6 +6404,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info" ], "title": "BaseGraph" @@ -7629,6 +7635,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7652,6 +7663,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info", "credentials_input_schema" ], @@ -7730,6 +7742,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7754,6 +7771,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info", "credentials_input_schema" ], @@ -7762,8 +7780,14 @@ "GraphSettings": { "properties": { "human_in_the_loop_safe_mode": { - "anyOf": [{ "type": "boolean" }, { "type": "null" }], - "title": "Human In The Loop Safe Mode" + "type": "boolean", + "title": "Human In The Loop Safe Mode", + "default": true + }, + "sensitive_action_safe_mode": { + "type": "boolean", + "title": "Sensitive Action Safe Mode", + "default": false } }, "type": "object", @@ -7921,6 +7945,16 @@ "title": "Has External Trigger", "description": "Whether the agent has an external trigger (e.g. webhook) node" }, + "has_human_in_the_loop": { + "type": "boolean", + "title": "Has Human In The Loop", + "description": "Whether the agent has human-in-the-loop blocks" + }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "description": "Whether the agent has sensitive action blocks" + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7967,6 +8001,8 @@ "output_schema", "credentials_input_schema", "has_external_trigger", + "has_human_in_the_loop", + "has_sensitive_action", "new_output", "can_access_graph", "is_latest_version", diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/CredentialsGroupedView/CredentialsGroupedView.tsx b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView.tsx similarity index 86% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/CredentialsGroupedView/CredentialsGroupedView.tsx rename to autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView.tsx index 2ae159e739..135a960431 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/CredentialsGroupedView/CredentialsGroupedView.tsx +++ b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView.tsx @@ -5,30 +5,37 @@ import { AccordionItem, AccordionTrigger, } from "@/components/molecules/Accordion/Accordion"; +import { + CredentialsMetaInput, + CredentialsType, +} from "@/lib/autogpt-server-api/types"; import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider"; -import { SlidersHorizontal } from "@phosphor-icons/react"; +import { SlidersHorizontalIcon } from "@phosphor-icons/react"; import { useContext, useEffect, useMemo, useRef } from "react"; -import { useRunAgentModalContext } from "../../context"; import { areSystemCredentialProvidersLoading, CredentialField, findSavedCredentialByProviderAndType, hasMissingRequiredSystemCredentials, splitCredentialFieldsBySystem, -} from "../helpers"; +} from "./helpers"; type Props = { credentialFields: CredentialField[]; requiredCredentials: Set; + inputCredentials: Record; + inputValues: Record; + onCredentialChange: (key: string, value?: CredentialsMetaInput) => void; }; export function CredentialsGroupedView({ credentialFields, requiredCredentials, + inputCredentials, + inputValues, + onCredentialChange, }: Props) { const allProviders = useContext(CredentialsProvidersContext); - const { inputCredentials, setInputCredentialsValue, inputValues } = - useRunAgentModalContext(); const { userCredentialFields, systemCredentialFields } = useMemo( () => @@ -87,11 +94,11 @@ export function CredentialsGroupedView({ ); if (savedCredential) { - setInputCredentialsValue(key, { + onCredentialChange(key, { id: savedCredential.id, provider: savedCredential.provider, - type: savedCredential.type, - title: (savedCredential as { title?: string }).title, + type: savedCredential.type as CredentialsType, + title: savedCredential.title, }); } } @@ -103,7 +110,7 @@ export function CredentialsGroupedView({ systemCredentialFields, requiredCredentials, inputCredentials, - setInputCredentialsValue, + onCredentialChange, isLoadingProviders, ]); @@ -123,7 +130,7 @@ export function CredentialsGroupedView({ } selectedCredentials={selectedCred} onSelectCredentials={(value) => { - setInputCredentialsValue(key, value); + onCredentialChange(key, value); }} siblingInputs={inputValues} isOptional={!requiredCredentials.has(key)} @@ -143,7 +150,8 @@ export function CredentialsGroupedView({
- System credentials + System + credentials {hasMissingSystemCredentials && ( (missing) )} @@ -163,7 +171,7 @@ export function CredentialsGroupedView({ } selectedCredentials={selectedCred} onSelectCredentials={(value) => { - setInputCredentialsValue(key, value); + onCredentialChange(key, value); }} siblingInputs={inputValues} isOptional={!requiredCredentials.has(key)} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/helpers.ts b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers.ts similarity index 98% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/helpers.ts rename to autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers.ts index 72f0fcb451..519ef302c1 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers.ts @@ -1,5 +1,5 @@ import { CredentialsProvidersContextType } from "@/providers/agent-credentials/credentials-provider"; -import { getSystemCredentials } from "../../../../../../../../../../../components/contextual/CredentialsInput/helpers"; +import { getSystemCredentials } from "../../helpers"; export type CredentialField = [string, any]; diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/CredentialField/components/CredentialFieldTitle.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/CredentialField/components/CredentialFieldTitle.tsx index 347f4e089a..b2f71b14ed 100644 --- a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/CredentialField/components/CredentialFieldTitle.tsx +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/CredentialField/components/CredentialFieldTitle.tsx @@ -35,12 +35,13 @@ export const CredentialFieldTitle = (props: { uiOptions, ); - const credentialProvider = toDisplayName( - getCredentialProviderFromSchema( - useNodeStore.getState().getHardCodedValues(nodeId), - schema as BlockIOCredentialsSubSchema, - ) ?? "", + const provider = getCredentialProviderFromSchema( + useNodeStore.getState().getHardCodedValues(nodeId), + schema as BlockIOCredentialsSubSchema, ); + const credentialProvider = provider + ? `${toDisplayName(provider)} credential` + : "credential"; const updatedUiSchema = updateUiOption(uiSchema, { showHandles: false, diff --git a/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts b/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts index 07a2b33674..8e5560ce8f 100644 --- a/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts +++ b/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts @@ -20,11 +20,15 @@ function hasHITLBlocks(graph: GraphModel | LibraryAgent | Graph): boolean { if ("has_human_in_the_loop" in graph) { return !!graph.has_human_in_the_loop; } + return false; +} - if (isLibraryAgent(graph)) { - return graph.settings?.human_in_the_loop_safe_mode !== null; +function hasSensitiveActionBlocks( + graph: GraphModel | LibraryAgent | Graph, +): boolean { + if ("has_sensitive_action" in graph) { + return !!graph.has_sensitive_action; } - return false; } @@ -40,7 +44,9 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { const graphId = getGraphId(graph); const isAgent = isLibraryAgent(graph); - const shouldShowToggle = hasHITLBlocks(graph); + const showHITLToggle = hasHITLBlocks(graph); + const showSensitiveActionToggle = hasSensitiveActionBlocks(graph); + const shouldShowToggle = showHITLToggle || showSensitiveActionToggle; const { mutateAsync: updateGraphSettings, isPending } = usePatchV1UpdateGraphSettings(); @@ -56,27 +62,37 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { }, ); - const [localSafeMode, setLocalSafeMode] = useState(null); + const [localHITLSafeMode, setLocalHITLSafeMode] = useState(true); + const [localSensitiveActionSafeMode, setLocalSensitiveActionSafeMode] = + useState(false); + const [isLocalStateLoaded, setIsLocalStateLoaded] = useState(false); useEffect(() => { if (!isAgent && libraryAgent) { - const backendValue = libraryAgent.settings?.human_in_the_loop_safe_mode; - if (backendValue !== undefined) { - setLocalSafeMode(backendValue); - } + setLocalHITLSafeMode( + libraryAgent.settings?.human_in_the_loop_safe_mode ?? true, + ); + setLocalSensitiveActionSafeMode( + libraryAgent.settings?.sensitive_action_safe_mode ?? false, + ); + setIsLocalStateLoaded(true); } }, [isAgent, libraryAgent]); - const currentSafeMode = isAgent - ? graph.settings?.human_in_the_loop_safe_mode - : localSafeMode; + const currentHITLSafeMode = isAgent + ? (graph.settings?.human_in_the_loop_safe_mode ?? true) + : localHITLSafeMode; - const isStateUndetermined = isAgent - ? graph.settings?.human_in_the_loop_safe_mode == null - : isLoading || localSafeMode === null; + const currentSensitiveActionSafeMode = isAgent + ? (graph.settings?.sensitive_action_safe_mode ?? false) + : localSensitiveActionSafeMode; - const handleToggle = useCallback(async () => { - const newSafeMode = !currentSafeMode; + const isHITLStateUndetermined = isAgent + ? false + : isLoading || !isLocalStateLoaded; + + const handleHITLToggle = useCallback(async () => { + const newSafeMode = !currentHITLSafeMode; try { await updateGraphSettings({ @@ -85,7 +101,7 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { }); if (!isAgent) { - setLocalSafeMode(newSafeMode); + setLocalHITLSafeMode(newSafeMode); } if (isAgent) { @@ -101,37 +117,62 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { queryClient.invalidateQueries({ queryKey: ["v2", "executions"] }); toast({ - title: `Safe mode ${newSafeMode ? "enabled" : "disabled"}`, + title: `HITL safe mode ${newSafeMode ? "enabled" : "disabled"}`, description: newSafeMode ? "Human-in-the-loop blocks will require manual review" : "Human-in-the-loop blocks will proceed automatically", duration: 2000, }); } catch (error) { - const isNotFoundError = - error instanceof Error && - (error.message.includes("404") || error.message.includes("not found")); - - if (!isAgent && isNotFoundError) { - toast({ - title: "Safe mode not available", - description: - "To configure safe mode, please save this graph to your library first.", - variant: "destructive", - }); - } else { - toast({ - title: "Failed to update safe mode", - description: - error instanceof Error - ? error.message - : "An unexpected error occurred.", - variant: "destructive", - }); - } + handleToggleError(error, isAgent, toast); } }, [ - currentSafeMode, + currentHITLSafeMode, + graphId, + isAgent, + graph.id, + updateGraphSettings, + queryClient, + toast, + ]); + + const handleSensitiveActionToggle = useCallback(async () => { + const newSafeMode = !currentSensitiveActionSafeMode; + + try { + await updateGraphSettings({ + graphId, + data: { sensitive_action_safe_mode: newSafeMode }, + }); + + if (!isAgent) { + setLocalSensitiveActionSafeMode(newSafeMode); + } + + if (isAgent) { + queryClient.invalidateQueries({ + queryKey: getGetV2GetLibraryAgentQueryOptions(graph.id.toString()) + .queryKey, + }); + } + + queryClient.invalidateQueries({ + queryKey: ["v1", "graphs", graphId, "executions"], + }); + queryClient.invalidateQueries({ queryKey: ["v2", "executions"] }); + + toast({ + title: `Sensitive action safe mode ${newSafeMode ? "enabled" : "disabled"}`, + description: newSafeMode + ? "Sensitive action blocks will require manual review" + : "Sensitive action blocks will proceed automatically", + duration: 2000, + }); + } catch (error) { + handleToggleError(error, isAgent, toast); + } + }, [ + currentSensitiveActionSafeMode, graphId, isAgent, graph.id, @@ -141,11 +182,53 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { ]); return { - currentSafeMode, + // HITL safe mode + currentHITLSafeMode, + showHITLToggle, + isHITLStateUndetermined, + handleHITLToggle, + + // Sensitive action safe mode + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + + // General isPending, shouldShowToggle, - isStateUndetermined, - handleToggle, - hasHITLBlocks: shouldShowToggle, + + // Backwards compatibility + currentSafeMode: currentHITLSafeMode, + isStateUndetermined: isHITLStateUndetermined, + handleToggle: handleHITLToggle, + hasHITLBlocks: showHITLToggle, }; } + +function handleToggleError( + error: unknown, + isAgent: boolean, + toast: ReturnType["toast"], +) { + const isNotFoundError = + error instanceof Error && + (error.message.includes("404") || error.message.includes("not found")); + + if (!isAgent && isNotFoundError) { + toast({ + title: "Safe mode not available", + description: + "To configure safe mode, please save this graph to your library first.", + variant: "destructive", + }); + } else { + toast({ + title: "Failed to update safe mode", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } +} diff --git a/autogpt_platform/frontend/src/lib/dexie/draft-utils.ts b/autogpt_platform/frontend/src/lib/dexie/draft-utils.ts index 03232ede30..2a212a3f35 100644 --- a/autogpt_platform/frontend/src/lib/dexie/draft-utils.ts +++ b/autogpt_platform/frontend/src/lib/dexie/draft-utils.ts @@ -5,7 +5,7 @@ import isEqual from "lodash/isEqual"; export function cleanNode(node: CustomNode) { return { id: node.id, - position: node.position, + // Note: position is intentionally excluded to prevent draft saves when dragging nodes data: { hardcodedValues: node.data.hardcodedValues, title: node.data.title, diff --git a/docs/CLAUDE.md b/docs/CLAUDE.md new file mode 100644 index 0000000000..67cb1fc4f3 --- /dev/null +++ b/docs/CLAUDE.md @@ -0,0 +1,44 @@ +# Documentation Guidelines + +## Block Documentation Manual Sections + +When updating manual sections (``) in block documentation files (e.g., `docs/integrations/basic.md`), follow these formats: + +### How It Works Section + +Provide a technical explanation of how the block functions: +- Describe the processing logic in 1-2 paragraphs +- Mention any validation, error handling, or edge cases +- Use code examples with backticks when helpful (e.g., `[[1, 2], [3, 4]]` becomes `[1, 2, 3, 4]`) + +Example: +```markdown + +The block iterates through each list in the input and extends a result list with all elements from each one. It processes lists in order, so `[[1, 2], [3, 4]]` becomes `[1, 2, 3, 4]`. + +The block includes validation to ensure each item is actually a list. If a non-list value is encountered, the block outputs an error message instead of proceeding. + +``` + +### Use Case Section + +Provide 3 practical use cases in this format: +- **Bold Heading**: Short one-sentence description + +Example: +```markdown + +**Paginated API Merging**: Combine results from multiple API pages into a single list for batch processing or display. + +**Parallel Task Aggregation**: Merge outputs from parallel workflow branches that each produce a list of results. + +**Multi-Source Data Collection**: Combine data collected from different sources (like multiple RSS feeds or API endpoints) into one unified list. + +``` + +### Style Guidelines + +- Keep descriptions concise and action-oriented +- Focus on practical, real-world scenarios +- Use consistent terminology with other blocks +- Avoid overly technical jargon unless necessary diff --git a/docs/integrations/README.md b/docs/integrations/README.md index f954ac530f..e444757a49 100644 --- a/docs/integrations/README.md +++ b/docs/integrations/README.md @@ -31,6 +31,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim | [Agent Time Input](basic.md#agent-time-input) | Block for time input | | [Agent Toggle Input](basic.md#agent-toggle-input) | Block for boolean toggle input | | [Block Installation](basic.md#block-installation) | Given a code string, this block allows the verification and installation of a block code into the system | +| [Concatenate Lists](basic.md#concatenate-lists) | Concatenates multiple lists into a single list | | [Dictionary Is Empty](basic.md#dictionary-is-empty) | Checks if a dictionary is empty | | [File Store](basic.md#file-store) | Stores the input file in the temporary directory | | [Find In Dictionary](basic.md#find-in-dictionary) | A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value | diff --git a/docs/integrations/basic.md b/docs/integrations/basic.md index 367299ffa7..f92d19002f 100644 --- a/docs/integrations/basic.md +++ b/docs/integrations/basic.md @@ -634,6 +634,42 @@ This enables extensibility by allowing custom blocks to be added without modifyi --- +## Concatenate Lists + +### What it is +Concatenates multiple lists into a single list. All elements from all input lists are combined in order. + +### How it works + +The block iterates through each list in the input and extends a result list with all elements from each one. It processes lists in order, so `[[1, 2], [3, 4]]` becomes `[1, 2, 3, 4]`. + +The block includes validation to ensure each item is actually a list. If a non-list value (like a string or number) is encountered, the block outputs an error message instead of proceeding. None values are skipped automatically. + + +### Inputs + +| Input | Description | Type | Required | +|-------|-------------|------|----------| +| lists | A list of lists to concatenate together. All lists will be combined in order into a single list. | List[List[Any]] | Yes | + +### Outputs + +| Output | Description | Type | +|--------|-------------|------| +| error | Error message if concatenation failed due to invalid input types. | str | +| concatenated_list | The concatenated list containing all elements from all input lists in order. | List[Any] | + +### Possible use case + +**Paginated API Merging**: Combine results from multiple API pages into a single list for batch processing or display. + +**Parallel Task Aggregation**: Merge outputs from parallel workflow branches that each produce a list of results. + +**Multi-Source Data Collection**: Combine data collected from different sources (like multiple RSS feeds or API endpoints) into one unified list. + + +--- + ## Dictionary Is Empty ### What it is