diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index fb7a55055e..499bb03170 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -128,7 +128,7 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} exitOnceUploaded: true - test: + e2e_test: runs-on: big-boi needs: setup strategy: @@ -258,3 +258,39 @@ jobs: - name: Print Final Docker Compose logs if: always() run: docker compose -f ../docker-compose.yml logs + + integration_test: + runs-on: ubuntu-latest + needs: setup + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "22.18.0" + + - name: Enable corepack + run: corepack enable + + - name: Restore dependencies cache + uses: actions/cache@v4 + with: + path: ~/.pnpm-store + key: ${{ needs.setup.outputs.cache-key }} + restore-keys: | + ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }} + ${{ runner.os }}-pnpm- + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Generate API client + run: pnpm generate:api + + - name: Run Integration Tests + run: pnpm test:unit diff --git a/AGENTS.md b/AGENTS.md index d31bc92f8c..cd176f8a2d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -16,6 +16,32 @@ See `docs/content/platform/getting-started.md` for setup instructions. - Format Python code with `poetry run format`. - Format frontend code using `pnpm format`. + +## Frontend guidelines: + +See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: + +1. **Pages**: Create in `src/app/(platform)/feature-name/page.tsx` + - Add `usePageName.ts` hook for logic + - Put sub-components in local `components/` folder +2. **Components**: Structure as `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts` + - Use design system components from `src/components/` (atoms, molecules, organisms) + - Never use `src/components/__legacy__/*` +3. **Data fetching**: Use generated API hooks from `@/app/api/__generated__/endpoints/` + - Regenerate with `pnpm generate:api` + - Pattern: `use{Method}{Version}{OperationName}` +4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only +5. **Testing**: Add Storybook stories for new components, Playwright for E2E +6. **Code conventions**: Function declarations (not arrow functions) for components/handlers +- Component props should be `interface Props { ... }` (not exported) unless the interface needs to be used outside the component +- Separate render logic from business logic (component.tsx + useComponent.ts + helpers.ts) +- Colocate state when possible and avoid creating large components, use sub-components ( local `/components` folder next to the parent component ) when sensible +- Avoid large hooks, abstract logic into `helpers.ts` files when sensible +- Use function declarations for components, arrow functions only for callbacks +- No barrel files or `index.ts` re-exports +- Do not use `useCallback` or `useMemo` unless strictly needed +- Avoid comments at all times unless the code is very complex + ## Testing - Backend: `poetry run test` (runs pytest with a docker based postgres + prisma). diff --git a/autogpt_platform/CLAUDE.md b/autogpt_platform/CLAUDE.md index df1f3314aa..2c76e7db80 100644 --- a/autogpt_platform/CLAUDE.md +++ b/autogpt_platform/CLAUDE.md @@ -201,7 +201,7 @@ If you get any pushback or hit complex block conditions check the new_blocks gui 3. Write tests alongside the route file 4. Run `poetry run test` to verify -**Frontend feature development:** +### Frontend guidelines: See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: @@ -217,6 +217,14 @@ See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: 4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only 5. **Testing**: Add Storybook stories for new components, Playwright for E2E 6. **Code conventions**: Function declarations (not arrow functions) for components/handlers +- Component props should be `interface Props { ... }` (not exported) unless the interface needs to be used outside the component +- Separate render logic from business logic (component.tsx + useComponent.ts + helpers.ts) +- Colocate state when possible and avoid creating large components, use sub-components ( local `/components` folder next to the parent component ) when sensible +- Avoid large hooks, abstract logic into `helpers.ts` files when sensible +- Use function declarations for components, arrow functions only for callbacks +- No barrel files or `index.ts` re-exports +- Do not use `useCallback` or `useMemo` unless strictly needed +- Avoid comments at all times unless the code is very complex ### Security Implementation diff --git a/autogpt_platform/backend/.env.default b/autogpt_platform/backend/.env.default index 49689d7ad6..b393f13017 100644 --- a/autogpt_platform/backend/.env.default +++ b/autogpt_platform/backend/.env.default @@ -178,5 +178,10 @@ AYRSHARE_JWT_KEY= SMARTLEAD_API_KEY= ZEROBOUNCE_API_KEY= +# PostHog Analytics +# Get API key from https://posthog.com - Project Settings > Project API Key +POSTHOG_API_KEY= +POSTHOG_HOST=https://eu.i.posthog.com + # Other Services AUTOMOD_API_KEY= diff --git a/autogpt_platform/backend/backend/api/external/v1/routes.py b/autogpt_platform/backend/backend/api/external/v1/routes.py index 58e15dc6a3..00933c1899 100644 --- a/autogpt_platform/backend/backend/api/external/v1/routes.py +++ b/autogpt_platform/backend/backend/api/external/v1/routes.py @@ -86,6 +86,8 @@ async def execute_graph_block( obj = backend.data.block.get_block(block_id) if not obj: raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") + if obj.disabled: + raise HTTPException(status_code=403, detail=f"Block #{block_id} is disabled.") output = defaultdict(list) async for name, data in obj.execute(data): diff --git a/autogpt_platform/backend/backend/api/features/chat/model.py b/autogpt_platform/backend/backend/api/features/chat/model.py index ec4cf1fc8b..75bda11127 100644 --- a/autogpt_platform/backend/backend/api/features/chat/model.py +++ b/autogpt_platform/backend/backend/api/features/chat/model.py @@ -290,6 +290,11 @@ async def _cache_session(session: ChatSession) -> None: await async_redis.setex(redis_key, config.session_ttl, session.model_dump_json()) +async def cache_chat_session(session: ChatSession) -> None: + """Cache a chat session without persisting to the database.""" + await _cache_session(session) + + async def _get_session_from_db(session_id: str) -> ChatSession | None: """Get a chat session from the database.""" prisma_session = await chat_db.get_chat_session(session_id) diff --git a/autogpt_platform/backend/backend/api/features/chat/routes.py b/autogpt_platform/backend/backend/api/features/chat/routes.py index 58b017ad5e..cab51543b1 100644 --- a/autogpt_platform/backend/backend/api/features/chat/routes.py +++ b/autogpt_platform/backend/backend/api/features/chat/routes.py @@ -172,12 +172,12 @@ async def get_session( user_id: The optional authenticated user ID, or None for anonymous access. Returns: - SessionDetailResponse: Details for the requested session; raises NotFoundError if not found. + SessionDetailResponse: Details for the requested session, or None if not found. """ session = await get_chat_session(session_id, user_id) if not session: - raise NotFoundError(f"Session {session_id} not found") + raise NotFoundError(f"Session {session_id} not found.") messages = [message.model_dump() for message in session.messages] logger.info( @@ -222,6 +222,8 @@ async def stream_chat_post( session = await _validate_and_get_session(session_id, user_id) async def event_generator() -> AsyncGenerator[str, None]: + chunk_count = 0 + first_chunk_type: str | None = None async for chunk in chat_service.stream_chat_completion( session_id, request.message, @@ -230,7 +232,26 @@ async def stream_chat_post( session=session, # Pass pre-fetched session to avoid double-fetch context=request.context, ): + if chunk_count < 3: + logger.info( + "Chat stream chunk", + extra={ + "session_id": session_id, + "chunk_type": str(chunk.type), + }, + ) + if not first_chunk_type: + first_chunk_type = str(chunk.type) + chunk_count += 1 yield chunk.to_sse() + logger.info( + "Chat stream completed", + extra={ + "session_id": session_id, + "chunk_count": chunk_count, + "first_chunk_type": first_chunk_type, + }, + ) # AI SDK protocol termination yield "data: [DONE]\n\n" @@ -275,6 +296,8 @@ async def stream_chat_get( session = await _validate_and_get_session(session_id, user_id) async def event_generator() -> AsyncGenerator[str, None]: + chunk_count = 0 + first_chunk_type: str | None = None async for chunk in chat_service.stream_chat_completion( session_id, message, @@ -282,7 +305,26 @@ async def stream_chat_get( user_id=user_id, session=session, # Pass pre-fetched session to avoid double-fetch ): + if chunk_count < 3: + logger.info( + "Chat stream chunk", + extra={ + "session_id": session_id, + "chunk_type": str(chunk.type), + }, + ) + if not first_chunk_type: + first_chunk_type = str(chunk.type) + chunk_count += 1 yield chunk.to_sse() + logger.info( + "Chat stream completed", + extra={ + "session_id": session_id, + "chunk_count": chunk_count, + "first_chunk_type": first_chunk_type, + }, + ) # AI SDK protocol termination yield "data: [DONE]\n\n" diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 7b41b040ba..e10343fff6 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -1,15 +1,18 @@ import asyncio import logging +import time +from asyncio import CancelledError from collections.abc import AsyncGenerator from typing import Any import orjson -from langfuse import Langfuse +from langfuse import get_client, propagate_attributes +from langfuse.openai import openai # type: ignore from openai import ( APIConnectionError, APIError, APIStatusError, - AsyncOpenAI, + PermissionDeniedError, RateLimitError, ) from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam @@ -21,12 +24,12 @@ from backend.data.understanding import ( from backend.util.exceptions import NotFoundError from backend.util.settings import Settings -from . import db as chat_db from .config import ChatConfig from .model import ( ChatMessage, ChatSession, Usage, + cache_chat_session, get_chat_session, update_session_title, upsert_chat_session, @@ -45,15 +48,16 @@ from .response_model import ( StreamUsage, ) from .tools import execute_tool, tools +from .tracking import track_user_message logger = logging.getLogger(__name__) config = ChatConfig() settings = Settings() -client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url) +client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url) -# Langfuse client (lazy initialization) -_langfuse_client: Langfuse | None = None + +langfuse = get_client() class LangfuseNotConfiguredError(Exception): @@ -69,65 +73,6 @@ def _is_langfuse_configured() -> bool: ) -def _get_langfuse_client() -> Langfuse: - """Get or create the Langfuse client for prompt management and tracing.""" - global _langfuse_client - if _langfuse_client is None: - if not _is_langfuse_configured(): - raise LangfuseNotConfiguredError( - "Langfuse is not configured. The chat feature requires Langfuse for prompt management. " - "Please set the LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment variables." - ) - _langfuse_client = Langfuse( - public_key=settings.secrets.langfuse_public_key, - secret_key=settings.secrets.langfuse_secret_key, - host=settings.secrets.langfuse_host or "https://cloud.langfuse.com", - ) - return _langfuse_client - - -def _get_environment() -> str: - """Get the current environment name for Langfuse tagging.""" - return settings.config.app_env.value - - -def _get_langfuse_prompt() -> str: - """Fetch the latest production prompt from Langfuse. - - Returns: - The compiled prompt text from Langfuse. - - Raises: - Exception: If Langfuse is unavailable or prompt fetch fails. - """ - try: - langfuse = _get_langfuse_client() - # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt - prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0) - compiled = prompt.compile() - logger.info( - f"Fetched prompt '{config.langfuse_prompt_name}' from Langfuse " - f"(version: {prompt.version})" - ) - return compiled - except Exception as e: - logger.error(f"Failed to fetch prompt from Langfuse: {e}") - raise - - -async def _is_first_session(user_id: str) -> bool: - """Check if this is the user's first chat session. - - Returns True if the user has 1 or fewer sessions (meaning this is their first). - """ - try: - session_count = await chat_db.get_user_session_count(user_id) - return session_count <= 1 - except Exception as e: - logger.warning(f"Failed to check session count for user {user_id}: {e}") - return False # Default to non-onboarding if we can't check - - async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: """Build the full system prompt including business understanding if available. @@ -139,8 +84,6 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: Tuple of (compiled prompt string, Langfuse prompt object for tracing) """ - langfuse = _get_langfuse_client() - # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0) @@ -158,19 +101,36 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]: context = "This is the first time you are meeting the user. Greet them and introduce them to the platform" compiled = prompt.compile(users_information=context) - return compiled, prompt + return compiled, understanding -async def _generate_session_title(message: str) -> str | None: +async def _generate_session_title( + message: str, + user_id: str | None = None, + session_id: str | None = None, +) -> str | None: """Generate a concise title for a chat session based on the first message. Args: message: The first user message in the session + user_id: User ID for OpenRouter tracing (optional) + session_id: Session ID for OpenRouter tracing (optional) Returns: A short title (3-6 words) or None if generation fails """ try: + # Build extra_body for OpenRouter tracing and PostHog analytics + extra_body: dict[str, Any] = {} + if user_id: + extra_body["user"] = user_id[:128] # OpenRouter limit + extra_body["posthogDistinctId"] = user_id + if session_id: + extra_body["session_id"] = session_id[:128] # OpenRouter limit + extra_body["posthogProperties"] = { + "environment": settings.config.app_env.value, + } + response = await client.chat.completions.create( model=config.title_model, messages=[ @@ -185,6 +145,7 @@ async def _generate_session_title(message: str) -> str | None: {"role": "user", "content": message[:500]}, # Limit input length ], max_tokens=20, + extra_body=extra_body, ) title = response.choices[0].message.content if title: @@ -217,6 +178,7 @@ async def assign_user_to_session( async def stream_chat_completion( session_id: str, message: str | None = None, + tool_call_response: str | None = None, is_user_message: bool = True, user_id: str | None = None, retry_count: int = 0, @@ -256,11 +218,6 @@ async def stream_chat_completion( yield StreamFinish() return - # Langfuse observations will be created after session is loaded (need messages for input) - # Initialize to None so finally block can safely check and end them - trace = None - generation = None - # Only fetch from Redis if session not provided (initial call) if session is None: session = await get_chat_session(session_id, user_id) @@ -280,18 +237,9 @@ async def stream_chat_completion( ) if message: - # Build message content with context if provided - message_content = message - if context and context.get("url") and context.get("content"): - context_text = f"Page URL: {context['url']}\n\nPage Content:\n{context['content']}\n\n---\n\nUser Message: {message}" - message_content = context_text - logger.info( - f"Including page context: URL={context['url']}, content_length={len(context['content'])}" - ) - session.messages.append( ChatMessage( - role="user" if is_user_message else "assistant", content=message_content + role="user" if is_user_message else "assistant", content=message ) ) logger.info( @@ -299,6 +247,14 @@ async def stream_chat_completion( f"new message_count={len(session.messages)}" ) + # Track user message in PostHog + if is_user_message: + track_user_message( + user_id=user_id, + session_id=session_id, + message_length=len(message), + ) + logger.info( f"Upserting session: {session.session_id} with user id {session.user_id}, " f"message_count={len(session.messages)}" @@ -318,10 +274,15 @@ async def stream_chat_completion( # stale data issues when the main flow modifies the session captured_session_id = session_id captured_message = message + captured_user_id = user_id async def _update_title(): try: - title = await _generate_session_title(captured_message) + title = await _generate_session_title( + captured_message, + user_id=captured_user_id, + session_id=captured_session_id, + ) if title: # Use dedicated title update function that doesn't # touch messages, avoiding race conditions @@ -336,297 +297,349 @@ async def stream_chat_completion( asyncio.create_task(_update_title()) # Build system prompt with business understanding - system_prompt, langfuse_prompt = await _build_system_prompt(user_id) - - # Build input messages including system prompt for complete Langfuse logging - trace_input_messages = [{"role": "system", "content": system_prompt}] + [ - m.model_dump() for m in session.messages - ] + system_prompt, understanding = await _build_system_prompt(user_id) # Create Langfuse trace for this LLM call (each call gets its own trace, grouped by session_id) # Using v3 SDK: start_observation creates a root span, update_trace sets trace-level attributes - try: - langfuse = _get_langfuse_client() - env = _get_environment() - trace = langfuse.start_observation( - name="chat_completion", - input={"messages": trace_input_messages}, - metadata={ - "environment": env, - "model": config.model, - "message_count": len(session.messages), - "prompt_name": langfuse_prompt.name if langfuse_prompt else None, - "prompt_version": langfuse_prompt.version if langfuse_prompt else None, - }, - ) - # Set trace-level attributes (session_id, user_id, tags) - trace.update_trace( + input = message + if not message and tool_call_response: + input = tool_call_response + + langfuse = get_client() + with langfuse.start_as_current_observation( + as_type="span", + name="user-copilot-request", + input=input, + ) as span: + with propagate_attributes( session_id=session_id, user_id=user_id, - tags=[env, "copilot"], - ) - except Exception as e: - logger.warning(f"Failed to create Langfuse trace: {e}") + tags=["copilot"], + metadata={ + "users_information": format_understanding_for_prompt(understanding)[ + :200 + ] # langfuse only accepts upto to 200 chars + }, + ): - # Initialize variables that will be used in finally block (must be defined before try) - assistant_response = ChatMessage( - role="assistant", - content="", - ) - accumulated_tool_calls: list[dict[str, Any]] = [] - - # Wrap main logic in try/finally to ensure Langfuse observations are always ended - try: - has_yielded_end = False - has_yielded_error = False - has_done_tool_call = False - has_received_text = False - text_streaming_ended = False - tool_response_messages: list[ChatMessage] = [] - should_retry = False - - # Generate unique IDs for AI SDK protocol - import uuid as uuid_module - - message_id = str(uuid_module.uuid4()) - text_block_id = str(uuid_module.uuid4()) - - # Yield message start - yield StreamStart(messageId=message_id) - - # Create Langfuse generation for each LLM call, linked to the prompt - # Using v3 SDK: start_observation with as_type="generation" - generation = ( - trace.start_observation( - as_type="generation", - name="llm_call", - model=config.model, - input={"messages": trace_input_messages}, - prompt=langfuse_prompt, + # Initialize variables that will be used in finally block (must be defined before try) + assistant_response = ChatMessage( + role="assistant", + content="", ) - if trace - else None - ) + accumulated_tool_calls: list[dict[str, Any]] = [] + has_saved_assistant_message = False + has_appended_streaming_message = False + last_cache_time = 0.0 + last_cache_content_len = 0 - try: - async for chunk in _stream_chat_chunks( - session=session, - tools=tools, - system_prompt=system_prompt, - text_block_id=text_block_id, - ): + # Wrap main logic in try/finally to ensure Langfuse observations are always ended + has_yielded_end = False + has_yielded_error = False + has_done_tool_call = False + has_received_text = False + text_streaming_ended = False + tool_response_messages: list[ChatMessage] = [] + should_retry = False - if isinstance(chunk, StreamTextStart): - # Emit text-start before first text delta - if not has_received_text: + # Generate unique IDs for AI SDK protocol + import uuid as uuid_module + + message_id = str(uuid_module.uuid4()) + text_block_id = str(uuid_module.uuid4()) + + # Yield message start + yield StreamStart(messageId=message_id) + + try: + async for chunk in _stream_chat_chunks( + session=session, + tools=tools, + system_prompt=system_prompt, + text_block_id=text_block_id, + ): + + if isinstance(chunk, StreamTextStart): + # Emit text-start before first text delta + if not has_received_text: + yield chunk + elif isinstance(chunk, StreamTextDelta): + delta = chunk.delta or "" + assert assistant_response.content is not None + assistant_response.content += delta + has_received_text = True + if not has_appended_streaming_message: + session.messages.append(assistant_response) + has_appended_streaming_message = True + current_time = time.monotonic() + content_len = len(assistant_response.content) + if ( + current_time - last_cache_time >= 1.0 + and content_len > last_cache_content_len + ): + try: + await cache_chat_session(session) + except Exception as e: + logger.warning( + f"Failed to cache partial session {session.session_id}: {e}" + ) + last_cache_time = current_time + last_cache_content_len = content_len yield chunk - elif isinstance(chunk, StreamTextDelta): - delta = chunk.delta or "" - assert assistant_response.content is not None - assistant_response.content += delta - has_received_text = True - yield chunk - elif isinstance(chunk, StreamTextEnd): - # Emit text-end after text completes - if has_received_text and not text_streaming_ended: - text_streaming_ended = True - yield chunk - elif isinstance(chunk, StreamToolInputStart): - # Emit text-end before first tool call, but only if we've received text - if has_received_text and not text_streaming_ended: - yield StreamTextEnd(id=text_block_id) - text_streaming_ended = True - yield chunk - elif isinstance(chunk, StreamToolInputAvailable): - # Accumulate tool calls in OpenAI format - accumulated_tool_calls.append( - { - "id": chunk.toolCallId, - "type": "function", - "function": { - "name": chunk.toolName, - "arguments": orjson.dumps(chunk.input).decode("utf-8"), - }, - } - ) - elif isinstance(chunk, StreamToolOutputAvailable): - result_content = ( - chunk.output - if isinstance(chunk.output, str) - else orjson.dumps(chunk.output).decode("utf-8") - ) - tool_response_messages.append( - ChatMessage( - role="tool", - content=result_content, - tool_call_id=chunk.toolCallId, - ) - ) - has_done_tool_call = True - # Track if any tool execution failed - if not chunk.success: - logger.warning( - f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed" - ) - yield chunk - elif isinstance(chunk, StreamFinish): - if not has_done_tool_call: - # Emit text-end before finish if we received text but haven't closed it + elif isinstance(chunk, StreamTextEnd): + # Emit text-end after text completes + if has_received_text and not text_streaming_ended: + text_streaming_ended = True + if assistant_response.content: + logger.warn( + f"StreamTextEnd: Attempting to set output {assistant_response.content}" + ) + span.update_trace(output=assistant_response.content) + span.update(output=assistant_response.content) + yield chunk + elif isinstance(chunk, StreamToolInputStart): + # Emit text-end before first tool call, but only if we've received text if has_received_text and not text_streaming_ended: yield StreamTextEnd(id=text_block_id) text_streaming_ended = True - has_yielded_end = True yield chunk - elif isinstance(chunk, StreamError): - has_yielded_error = True - elif isinstance(chunk, StreamUsage): - session.usage.append( - Usage( - prompt_tokens=chunk.promptTokens, - completion_tokens=chunk.completionTokens, - total_tokens=chunk.totalTokens, + elif isinstance(chunk, StreamToolInputAvailable): + # Accumulate tool calls in OpenAI format + accumulated_tool_calls.append( + { + "id": chunk.toolCallId, + "type": "function", + "function": { + "name": chunk.toolName, + "arguments": orjson.dumps(chunk.input).decode( + "utf-8" + ), + }, + } ) - ) - else: - logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True) - except Exception as e: - logger.error(f"Error during stream: {e!s}", exc_info=True) + elif isinstance(chunk, StreamToolOutputAvailable): + result_content = ( + chunk.output + if isinstance(chunk.output, str) + else orjson.dumps(chunk.output).decode("utf-8") + ) + tool_response_messages.append( + ChatMessage( + role="tool", + content=result_content, + tool_call_id=chunk.toolCallId, + ) + ) + has_done_tool_call = True + # Track if any tool execution failed + if not chunk.success: + logger.warning( + f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed" + ) + yield chunk + elif isinstance(chunk, StreamFinish): + if not has_done_tool_call: + # Emit text-end before finish if we received text but haven't closed it + if has_received_text and not text_streaming_ended: + yield StreamTextEnd(id=text_block_id) + text_streaming_ended = True - # Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.) - is_retryable = isinstance(e, (orjson.JSONDecodeError, KeyError, TypeError)) + # Save assistant message before yielding finish to ensure it's persisted + # even if client disconnects immediately after receiving StreamFinish + if not has_saved_assistant_message: + messages_to_save_early: list[ChatMessage] = [] + if accumulated_tool_calls: + assistant_response.tool_calls = ( + accumulated_tool_calls + ) + if not has_appended_streaming_message and ( + assistant_response.content + or assistant_response.tool_calls + ): + messages_to_save_early.append(assistant_response) + messages_to_save_early.extend(tool_response_messages) - if is_retryable and retry_count < config.max_retries: - logger.info( - f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" + if messages_to_save_early: + session.messages.extend(messages_to_save_early) + logger.info( + f"Saving assistant message before StreamFinish: " + f"content_len={len(assistant_response.content or '')}, " + f"tool_calls={len(assistant_response.tool_calls or [])}, " + f"tool_responses={len(tool_response_messages)}" + ) + if ( + messages_to_save_early + or has_appended_streaming_message + ): + await upsert_chat_session(session) + has_saved_assistant_message = True + + has_yielded_end = True + yield chunk + elif isinstance(chunk, StreamError): + has_yielded_error = True + yield chunk + elif isinstance(chunk, StreamUsage): + session.usage.append( + Usage( + prompt_tokens=chunk.promptTokens, + completion_tokens=chunk.completionTokens, + total_tokens=chunk.totalTokens, + ) + ) + else: + logger.error( + f"Unknown chunk type: {type(chunk)}", exc_info=True + ) + if assistant_response.content: + langfuse.update_current_trace(output=assistant_response.content) + langfuse.update_current_span(output=assistant_response.content) + elif tool_response_messages: + langfuse.update_current_trace(output=str(tool_response_messages)) + langfuse.update_current_span(output=str(tool_response_messages)) + + except CancelledError: + if not has_saved_assistant_message: + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if assistant_response.content: + assistant_response.content = ( + f"{assistant_response.content}\n\n[interrupted]" + ) + else: + assistant_response.content = "[interrupted]" + if not has_appended_streaming_message: + session.messages.append(assistant_response) + if tool_response_messages: + session.messages.extend(tool_response_messages) + try: + await upsert_chat_session(session) + except Exception as e: + logger.warning( + f"Failed to save interrupted session {session.session_id}: {e}" + ) + raise + except Exception as e: + logger.error(f"Error during stream: {e!s}", exc_info=True) + + # Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.) + is_retryable = isinstance( + e, (orjson.JSONDecodeError, KeyError, TypeError) ) - should_retry = True - else: - # Non-retryable error or max retries exceeded - # Save any partial progress before reporting error + + if is_retryable and retry_count < config.max_retries: + logger.info( + f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" + ) + should_retry = True + else: + # Non-retryable error or max retries exceeded + # Save any partial progress before reporting error + messages_to_save: list[ChatMessage] = [] + + # Add assistant message if it has content or tool calls + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if not has_appended_streaming_message and ( + assistant_response.content or assistant_response.tool_calls + ): + messages_to_save.append(assistant_response) + + # Add tool response messages after assistant message + messages_to_save.extend(tool_response_messages) + + if not has_saved_assistant_message: + if messages_to_save: + session.messages.extend(messages_to_save) + if messages_to_save or has_appended_streaming_message: + await upsert_chat_session(session) + + if not has_yielded_error: + error_message = str(e) + if not is_retryable: + error_message = f"Non-retryable error: {error_message}" + elif retry_count >= config.max_retries: + error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}" + + error_response = StreamError(errorText=error_message) + yield error_response + if not has_yielded_end: + yield StreamFinish() + return + + # Handle retry outside of exception handler to avoid nesting + if should_retry and retry_count < config.max_retries: + logger.info( + f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}" + ) + async for chunk in stream_chat_completion( + session_id=session.session_id, + user_id=user_id, + retry_count=retry_count + 1, + session=session, + context=context, + ): + yield chunk + return # Exit after retry to avoid double-saving in finally block + + # Normal completion path - save session and handle tool call continuation + # Only save if we haven't already saved when StreamFinish was received + if not has_saved_assistant_message: + logger.info( + f"Normal completion path: session={session.session_id}, " + f"current message_count={len(session.messages)}" + ) + + # Build the messages list in the correct order messages_to_save: list[ChatMessage] = [] - # Add assistant message if it has content or tool calls + # Add assistant message with tool_calls if any if accumulated_tool_calls: assistant_response.tool_calls = accumulated_tool_calls - if assistant_response.content or assistant_response.tool_calls: + logger.info( + f"Added {len(accumulated_tool_calls)} tool calls to assistant message" + ) + if not has_appended_streaming_message and ( + assistant_response.content or assistant_response.tool_calls + ): messages_to_save.append(assistant_response) + logger.info( + f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" + ) # Add tool response messages after assistant message messages_to_save.extend(tool_response_messages) - - session.messages.extend(messages_to_save) - await upsert_chat_session(session) - - if not has_yielded_error: - error_message = str(e) - if not is_retryable: - error_message = f"Non-retryable error: {error_message}" - elif retry_count >= config.max_retries: - error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}" - - error_response = StreamError(errorText=error_message) - yield error_response - if not has_yielded_end: - yield StreamFinish() - return - - # Handle retry outside of exception handler to avoid nesting - if should_retry and retry_count < config.max_retries: - logger.info( - f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}" - ) - async for chunk in stream_chat_completion( - session_id=session.session_id, - user_id=user_id, - retry_count=retry_count + 1, - session=session, - context=context, - ): - yield chunk - return # Exit after retry to avoid double-saving in finally block - - # Normal completion path - save session and handle tool call continuation - logger.info( - f"Normal completion path: session={session.session_id}, " - f"current message_count={len(session.messages)}" - ) - - # Build the messages list in the correct order - messages_to_save: list[ChatMessage] = [] - - # Add assistant message with tool_calls if any - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls - logger.info( - f"Added {len(accumulated_tool_calls)} tool calls to assistant message" - ) - if assistant_response.content or assistant_response.tool_calls: - messages_to_save.append(assistant_response) - logger.info( - f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" - ) - - # Add tool response messages after assistant message - messages_to_save.extend(tool_response_messages) - logger.info( - f"Saving {len(tool_response_messages)} tool response messages, " - f"total_to_save={len(messages_to_save)}" - ) - - session.messages.extend(messages_to_save) - logger.info( - f"Extended session messages, new message_count={len(session.messages)}" - ) - await upsert_chat_session(session) - - # If we did a tool call, stream the chat completion again to get the next response - if has_done_tool_call: - logger.info( - "Tool call executed, streaming chat completion again to get assistant response" - ) - async for chunk in stream_chat_completion( - session_id=session.session_id, - user_id=user_id, - session=session, # Pass session object to avoid Redis refetch - context=context, - ): - yield chunk - - finally: - # Always end Langfuse observations to prevent resource leaks - # Guard against None and catch errors to avoid masking original exceptions - if generation is not None: - try: - latest_usage = session.usage[-1] if session.usage else None - generation.update( - model=config.model, - output={ - "content": assistant_response.content, - "tool_calls": accumulated_tool_calls or None, - }, - usage_details=( - { - "input": latest_usage.prompt_tokens, - "output": latest_usage.completion_tokens, - "total": latest_usage.total_tokens, - } - if latest_usage - else None - ), + logger.info( + f"Saving {len(tool_response_messages)} tool response messages, " + f"total_to_save={len(messages_to_save)}" ) - generation.end() - except Exception as e: - logger.warning(f"Failed to end Langfuse generation: {e}") - if trace is not None: - try: - if accumulated_tool_calls: - trace.update_trace(output={"tool_calls": accumulated_tool_calls}) - else: - trace.update_trace(output={"response": assistant_response.content}) - trace.end() - except Exception as e: - logger.warning(f"Failed to end Langfuse trace: {e}") + if messages_to_save: + session.messages.extend(messages_to_save) + logger.info( + f"Extended session messages, new message_count={len(session.messages)}" + ) + if messages_to_save or has_appended_streaming_message: + await upsert_chat_session(session) + else: + logger.info( + "Assistant message already saved when StreamFinish was received, " + "skipping duplicate save" + ) + + # If we did a tool call, stream the chat completion again to get the next response + if has_done_tool_call: + logger.info( + "Tool call executed, streaming chat completion again to get assistant response" + ) + async for chunk in stream_chat_completion( + session_id=session.session_id, + user_id=user_id, + session=session, # Pass session object to avoid Redis refetch + context=context, + tool_call_response=str(tool_response_messages), + ): + yield chunk # Retry configuration for OpenAI API calls @@ -654,6 +667,12 @@ def _is_retryable_error(error: Exception) -> bool: return False +def _is_region_blocked_error(error: Exception) -> bool: + if isinstance(error, PermissionDeniedError): + return "not available in your region" in str(error).lower() + return "not available in your region" in str(error).lower() + + async def _stream_chat_chunks( session: ChatSession, tools: list[ChatCompletionToolParam], @@ -702,6 +721,20 @@ async def _stream_chat_chunks( f"{f' (retry {retry_count}/{MAX_RETRIES})' if retry_count > 0 else ''}" ) + # Build extra_body for OpenRouter tracing and PostHog analytics + extra_body: dict[str, Any] = { + "posthogProperties": { + "environment": settings.config.app_env.value, + }, + } + if session.user_id: + extra_body["user"] = session.user_id[:128] # OpenRouter limit + extra_body["posthogDistinctId"] = session.user_id + if session.session_id: + extra_body["session_id"] = session.session_id[ + :128 + ] # OpenRouter limit + # Create the stream with proper types stream = await client.chat.completions.create( model=model, @@ -710,6 +743,7 @@ async def _stream_chat_chunks( tool_choice="auto", stream=True, stream_options={"include_usage": True}, + extra_body=extra_body, ) # Variables to accumulate tool calls @@ -846,7 +880,18 @@ async def _stream_chat_chunks( f"Error in stream (not retrying): {e!s}", exc_info=True, ) - error_response = StreamError(errorText=str(e)) + error_code = None + error_text = str(e) + if _is_region_blocked_error(e): + error_code = "MODEL_NOT_AVAILABLE_REGION" + error_text = ( + "This model is not available in your region. " + "Please connect via VPN and try again." + ) + error_response = StreamError( + errorText=error_text, + code=error_code, + ) yield error_response yield StreamFinish() return @@ -900,5 +945,4 @@ async def _yield_tool_call( session=session, ) - logger.info(f"Yielding Tool execution response: {tool_execution_response}") yield tool_execution_response diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py index fc0fdf9064..284273f3b9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py @@ -1,8 +1,10 @@ +import logging from typing import TYPE_CHECKING, Any from openai.types.chat import ChatCompletionToolParam from backend.api.features.chat.model import ChatSession +from backend.api.features.chat.tracking import track_tool_called from .add_understanding import AddUnderstandingTool from .agent_output import AgentOutputTool @@ -20,6 +22,8 @@ from .search_docs import SearchDocsTool if TYPE_CHECKING: from backend.api.features.chat.response_model import StreamToolOutputAvailable +logger = logging.getLogger(__name__) + # Single source of truth for all tools TOOL_REGISTRY: dict[str, BaseTool] = { "add_understanding": AddUnderstandingTool(), @@ -30,7 +34,7 @@ TOOL_REGISTRY: dict[str, BaseTool] = { "find_library_agent": FindLibraryAgentTool(), "run_agent": RunAgentTool(), "run_block": RunBlockTool(), - "agent_output": AgentOutputTool(), + "view_agent_output": AgentOutputTool(), "search_docs": SearchDocsTool(), "get_doc_page": GetDocPageTool(), } @@ -56,4 +60,17 @@ async def execute_tool( tool = TOOL_REGISTRY.get(tool_name) if not tool: raise ValueError(f"Tool {tool_name} not found") + + # Track tool call in PostHog + logger.info( + f"Tracking tool call: tool={tool_name}, user={user_id}, " + f"session={session.session_id}, call_id={tool_call_id}" + ) + track_tool_called( + user_id=user_id, + session_id=session.session_id, + tool_name=tool_name, + tool_call_id=tool_call_id, + ) + return await tool.execute(user_id, session, tool_call_id, **parameters) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py b/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py index fe3d5e8984..bd93f0e2a6 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/add_understanding.py @@ -3,6 +3,8 @@ import logging from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from backend.data.understanding import ( BusinessUnderstandingInput, @@ -59,6 +61,7 @@ and automations for the user's specific needs.""" """Requires authentication to store user-specific data.""" return True + @observe(as_type="tool", name="add_understanding") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py index d4df2564a8..392f642c41 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py @@ -1,29 +1,28 @@ """Agent generator package - Creates agents from natural language.""" from .core import ( - apply_agent_patch, + AgentGeneratorNotConfiguredError, decompose_goal, generate_agent, generate_agent_patch, get_agent_as_json, + json_to_graph, save_agent_to_library, ) -from .fixer import apply_all_fixes -from .utils import get_blocks_info -from .validator import validate_agent +from .service import health_check as check_external_service_health +from .service import is_external_service_configured __all__ = [ # Core functions "decompose_goal", "generate_agent", "generate_agent_patch", - "apply_agent_patch", "save_agent_to_library", "get_agent_as_json", - # Fixer - "apply_all_fixes", - # Validator - "validate_agent", - # Utils - "get_blocks_info", + "json_to_graph", + # Exceptions + "AgentGeneratorNotConfiguredError", + # Service + "is_external_service_configured", + "check_external_service_health", ] diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/client.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/client.py deleted file mode 100644 index 4450fa9d75..0000000000 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/client.py +++ /dev/null @@ -1,25 +0,0 @@ -"""OpenRouter client configuration for agent generation.""" - -import os - -from openai import AsyncOpenAI - -# Configuration - use OPEN_ROUTER_API_KEY for consistency with chat/config.py -OPENROUTER_API_KEY = os.getenv("OPEN_ROUTER_API_KEY") -AGENT_GENERATOR_MODEL = os.getenv("AGENT_GENERATOR_MODEL", "anthropic/claude-opus-4.5") - -# OpenRouter client (OpenAI-compatible API) -_client: AsyncOpenAI | None = None - - -def get_client() -> AsyncOpenAI: - """Get or create the OpenRouter client.""" - global _client - if _client is None: - if not OPENROUTER_API_KEY: - raise ValueError("OPENROUTER_API_KEY environment variable is required") - _client = AsyncOpenAI( - base_url="https://openrouter.ai/api/v1", - api_key=OPENROUTER_API_KEY, - ) - return _client diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py index aa3cc1555d..fc15587110 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py @@ -1,7 +1,5 @@ """Core agent generation functions.""" -import copy -import json import logging import uuid from typing import Any @@ -9,13 +7,35 @@ from typing import Any from backend.api.features.library import db as library_db from backend.data.graph import Graph, Link, Node, create_graph -from .client import AGENT_GENERATOR_MODEL, get_client -from .prompts import DECOMPOSITION_PROMPT, GENERATION_PROMPT, PATCH_PROMPT -from .utils import get_block_summaries, parse_json_from_llm +from .service import ( + decompose_goal_external, + generate_agent_external, + generate_agent_patch_external, + is_external_service_configured, +) logger = logging.getLogger(__name__) +class AgentGeneratorNotConfiguredError(Exception): + """Raised when the external Agent Generator service is not configured.""" + + pass + + +def _check_service_configured() -> None: + """Check if the external Agent Generator service is configured. + + Raises: + AgentGeneratorNotConfiguredError: If the service is not configured. + """ + if not is_external_service_configured(): + raise AgentGeneratorNotConfiguredError( + "Agent Generator service is not configured. " + "Set AGENTGENERATOR_HOST environment variable to enable agent generation." + ) + + async def decompose_goal(description: str, context: str = "") -> dict[str, Any] | None: """Break down a goal into steps or return clarifying questions. @@ -28,40 +48,13 @@ async def decompose_goal(description: str, context: str = "") -> dict[str, Any] - {"type": "clarifying_questions", "questions": [...]} - {"type": "instructions", "steps": [...]} Or None on error + + Raises: + AgentGeneratorNotConfiguredError: If the external service is not configured. """ - client = get_client() - prompt = DECOMPOSITION_PROMPT.format(block_summaries=get_block_summaries()) - - full_description = description - if context: - full_description = f"{description}\n\nAdditional context:\n{context}" - - try: - response = await client.chat.completions.create( - model=AGENT_GENERATOR_MODEL, - messages=[ - {"role": "system", "content": prompt}, - {"role": "user", "content": full_description}, - ], - temperature=0, - ) - - content = response.choices[0].message.content - if content is None: - logger.error("LLM returned empty content for decomposition") - return None - - result = parse_json_from_llm(content) - - if result is None: - logger.error(f"Failed to parse decomposition response: {content[:200]}") - return None - - return result - - except Exception as e: - logger.error(f"Error decomposing goal: {e}") - return None + _check_service_configured() + logger.info("Calling external Agent Generator service for decompose_goal") + return await decompose_goal_external(description, context) async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: @@ -72,31 +65,14 @@ async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: Returns: Agent JSON dict or None on error + + Raises: + AgentGeneratorNotConfiguredError: If the external service is not configured. """ - client = get_client() - prompt = GENERATION_PROMPT.format(block_summaries=get_block_summaries()) - - try: - response = await client.chat.completions.create( - model=AGENT_GENERATOR_MODEL, - messages=[ - {"role": "system", "content": prompt}, - {"role": "user", "content": json.dumps(instructions, indent=2)}, - ], - temperature=0, - ) - - content = response.choices[0].message.content - if content is None: - logger.error("LLM returned empty content for agent generation") - return None - - result = parse_json_from_llm(content) - - if result is None: - logger.error(f"Failed to parse agent JSON: {content[:200]}") - return None - + _check_service_configured() + logger.info("Calling external Agent Generator service for generate_agent") + result = await generate_agent_external(instructions) + if result: # Ensure required fields if "id" not in result: result["id"] = str(uuid.uuid4()) @@ -104,12 +80,7 @@ async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: result["version"] = 1 if "is_active" not in result: result["is_active"] = True - - return result - - except Exception as e: - logger.error(f"Error generating agent: {e}") - return None + return result def json_to_graph(agent_json: dict[str, Any]) -> Graph: @@ -218,6 +189,7 @@ async def save_agent_to_library( library_agents = await library_db.create_library_agent( graph=created_graph, user_id=user_id, + sensitive_action_safe_mode=True, create_library_agents_for_sub_graphs=False, ) @@ -283,108 +255,23 @@ async def get_agent_as_json( async def generate_agent_patch( update_request: str, current_agent: dict[str, Any] ) -> dict[str, Any] | None: - """Generate a patch to update an existing agent. + """Update an existing agent using natural language. + + The external Agent Generator service handles: + - Generating the patch + - Applying the patch + - Fixing and validating the result Args: update_request: Natural language description of changes current_agent: Current agent JSON Returns: - Patch dict or clarifying questions, or None on error + Updated agent JSON, clarifying questions dict, or None on error + + Raises: + AgentGeneratorNotConfiguredError: If the external service is not configured. """ - client = get_client() - prompt = PATCH_PROMPT.format( - current_agent=json.dumps(current_agent, indent=2), - block_summaries=get_block_summaries(), - ) - - try: - response = await client.chat.completions.create( - model=AGENT_GENERATOR_MODEL, - messages=[ - {"role": "system", "content": prompt}, - {"role": "user", "content": update_request}, - ], - temperature=0, - ) - - content = response.choices[0].message.content - if content is None: - logger.error("LLM returned empty content for patch generation") - return None - - return parse_json_from_llm(content) - - except Exception as e: - logger.error(f"Error generating patch: {e}") - return None - - -def apply_agent_patch( - current_agent: dict[str, Any], patch: dict[str, Any] -) -> dict[str, Any]: - """Apply a patch to an existing agent. - - Args: - current_agent: Current agent JSON - patch: Patch dict with operations - - Returns: - Updated agent JSON - """ - agent = copy.deepcopy(current_agent) - patches = patch.get("patches", []) - - for p in patches: - patch_type = p.get("type") - - if patch_type == "modify": - node_id = p.get("node_id") - changes = p.get("changes", {}) - - for node in agent.get("nodes", []): - if node["id"] == node_id: - _deep_update(node, changes) - logger.debug(f"Modified node {node_id}") - break - - elif patch_type == "add": - new_nodes = p.get("new_nodes", []) - new_links = p.get("new_links", []) - - agent["nodes"] = agent.get("nodes", []) + new_nodes - agent["links"] = agent.get("links", []) + new_links - logger.debug(f"Added {len(new_nodes)} nodes, {len(new_links)} links") - - elif patch_type == "remove": - node_ids_to_remove = set(p.get("node_ids", [])) - link_ids_to_remove = set(p.get("link_ids", [])) - - # Remove nodes - agent["nodes"] = [ - n for n in agent.get("nodes", []) if n["id"] not in node_ids_to_remove - ] - - # Remove links (both explicit and those referencing removed nodes) - agent["links"] = [ - link - for link in agent.get("links", []) - if link["id"] not in link_ids_to_remove - and link["source_id"] not in node_ids_to_remove - and link["sink_id"] not in node_ids_to_remove - ] - - logger.debug( - f"Removed {len(node_ids_to_remove)} nodes, {len(link_ids_to_remove)} links" - ) - - return agent - - -def _deep_update(target: dict, source: dict) -> None: - """Recursively update a dict with another dict.""" - for key, value in source.items(): - if key in target and isinstance(target[key], dict) and isinstance(value, dict): - _deep_update(target[key], value) - else: - target[key] = value + _check_service_configured() + logger.info("Calling external Agent Generator service for generate_agent_patch") + return await generate_agent_patch_external(update_request, current_agent) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/fixer.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/fixer.py deleted file mode 100644 index 1e25e0cbed..0000000000 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/fixer.py +++ /dev/null @@ -1,606 +0,0 @@ -"""Agent fixer - Fixes common LLM generation errors.""" - -import logging -import re -import uuid -from typing import Any - -from .utils import ( - ADDTODICTIONARY_BLOCK_ID, - ADDTOLIST_BLOCK_ID, - CODE_EXECUTION_BLOCK_ID, - CONDITION_BLOCK_ID, - CREATEDICT_BLOCK_ID, - CREATELIST_BLOCK_ID, - DATA_SAMPLING_BLOCK_ID, - DOUBLE_CURLY_BRACES_BLOCK_IDS, - GET_CURRENT_DATE_BLOCK_ID, - STORE_VALUE_BLOCK_ID, - UNIVERSAL_TYPE_CONVERTER_BLOCK_ID, - get_blocks_info, - is_valid_uuid, -) - -logger = logging.getLogger(__name__) - - -def fix_agent_ids(agent: dict[str, Any]) -> dict[str, Any]: - """Fix invalid UUIDs in agent and link IDs.""" - # Fix agent ID - if not is_valid_uuid(agent.get("id", "")): - agent["id"] = str(uuid.uuid4()) - logger.debug(f"Fixed agent ID: {agent['id']}") - - # Fix node IDs - id_mapping = {} # Old ID -> New ID - for node in agent.get("nodes", []): - if not is_valid_uuid(node.get("id", "")): - old_id = node.get("id", "") - new_id = str(uuid.uuid4()) - id_mapping[old_id] = new_id - node["id"] = new_id - logger.debug(f"Fixed node ID: {old_id} -> {new_id}") - - # Fix link IDs and update references - for link in agent.get("links", []): - if not is_valid_uuid(link.get("id", "")): - link["id"] = str(uuid.uuid4()) - logger.debug(f"Fixed link ID: {link['id']}") - - # Update source/sink IDs if they were remapped - if link.get("source_id") in id_mapping: - link["source_id"] = id_mapping[link["source_id"]] - if link.get("sink_id") in id_mapping: - link["sink_id"] = id_mapping[link["sink_id"]] - - return agent - - -def fix_double_curly_braces(agent: dict[str, Any]) -> dict[str, Any]: - """Fix single curly braces to double in template blocks.""" - for node in agent.get("nodes", []): - if node.get("block_id") not in DOUBLE_CURLY_BRACES_BLOCK_IDS: - continue - - input_data = node.get("input_default", {}) - for key in ("prompt", "format"): - if key in input_data and isinstance(input_data[key], str): - original = input_data[key] - # Fix simple variable references: {var} -> {{var}} - fixed = re.sub( - r"(? dict[str, Any]: - """Add StoreValueBlock before ConditionBlock if needed for value2.""" - nodes = agent.get("nodes", []) - links = agent.get("links", []) - - # Find all ConditionBlock nodes - condition_node_ids = { - node["id"] for node in nodes if node.get("block_id") == CONDITION_BLOCK_ID - } - - if not condition_node_ids: - return agent - - new_nodes = [] - new_links = [] - processed_conditions = set() - - for link in links: - sink_id = link.get("sink_id") - sink_name = link.get("sink_name") - - # Check if this link goes to a ConditionBlock's value2 - if sink_id in condition_node_ids and sink_name == "value2": - source_node = next( - (n for n in nodes if n["id"] == link.get("source_id")), None - ) - - # Skip if source is already a StoreValueBlock - if source_node and source_node.get("block_id") == STORE_VALUE_BLOCK_ID: - continue - - # Skip if we already processed this condition - if sink_id in processed_conditions: - continue - - processed_conditions.add(sink_id) - - # Create StoreValueBlock - store_node_id = str(uuid.uuid4()) - store_node = { - "id": store_node_id, - "block_id": STORE_VALUE_BLOCK_ID, - "input_default": {"data": None}, - "metadata": {"position": {"x": 0, "y": -100}}, - } - new_nodes.append(store_node) - - # Create link: original source -> StoreValueBlock - new_links.append( - { - "id": str(uuid.uuid4()), - "source_id": link["source_id"], - "source_name": link["source_name"], - "sink_id": store_node_id, - "sink_name": "input", - "is_static": False, - } - ) - - # Update original link: StoreValueBlock -> ConditionBlock - link["source_id"] = store_node_id - link["source_name"] = "output" - - logger.debug(f"Added StoreValueBlock before ConditionBlock {sink_id}") - - if new_nodes: - agent["nodes"] = nodes + new_nodes - - return agent - - -def fix_addtolist_blocks(agent: dict[str, Any]) -> dict[str, Any]: - """Fix AddToList blocks by adding prerequisite empty AddToList block. - - When an AddToList block is found: - 1. Checks if there's a CreateListBlock before it - 2. Removes CreateListBlock if linked directly to AddToList - 3. Adds an empty AddToList block before the original - 4. Ensures the original has a self-referencing link - """ - nodes = agent.get("nodes", []) - links = agent.get("links", []) - new_nodes = [] - original_addtolist_ids = set() - nodes_to_remove = set() - links_to_remove = [] - - # First pass: identify CreateListBlock nodes to remove - for link in links: - source_node = next( - (n for n in nodes if n.get("id") == link.get("source_id")), None - ) - sink_node = next((n for n in nodes if n.get("id") == link.get("sink_id")), None) - - if ( - source_node - and sink_node - and source_node.get("block_id") == CREATELIST_BLOCK_ID - and sink_node.get("block_id") == ADDTOLIST_BLOCK_ID - ): - nodes_to_remove.add(source_node.get("id")) - links_to_remove.append(link) - logger.debug(f"Removing CreateListBlock {source_node.get('id')}") - - # Second pass: process AddToList blocks - filtered_nodes = [] - for node in nodes: - if node.get("id") in nodes_to_remove: - continue - - if node.get("block_id") == ADDTOLIST_BLOCK_ID: - original_addtolist_ids.add(node.get("id")) - node_id = node.get("id") - pos = node.get("metadata", {}).get("position", {"x": 0, "y": 0}) - - # Check if already has prerequisite - has_prereq = any( - link.get("sink_id") == node_id - and link.get("sink_name") == "list" - and link.get("source_name") == "updated_list" - for link in links - ) - - if not has_prereq: - # Remove links to "list" input (except self-reference) - for link in links: - if ( - link.get("sink_id") == node_id - and link.get("sink_name") == "list" - and link.get("source_id") != node_id - and link not in links_to_remove - ): - links_to_remove.append(link) - - # Create prerequisite AddToList block - prereq_id = str(uuid.uuid4()) - prereq_node = { - "id": prereq_id, - "block_id": ADDTOLIST_BLOCK_ID, - "input_default": {"list": [], "entry": None, "entries": []}, - "metadata": { - "position": {"x": pos.get("x", 0) - 800, "y": pos.get("y", 0)} - }, - } - new_nodes.append(prereq_node) - - # Link prerequisite to original - links.append( - { - "id": str(uuid.uuid4()), - "source_id": prereq_id, - "source_name": "updated_list", - "sink_id": node_id, - "sink_name": "list", - "is_static": False, - } - ) - logger.debug(f"Added prerequisite AddToList block for {node_id}") - - filtered_nodes.append(node) - - # Remove marked links - filtered_links = [link for link in links if link not in links_to_remove] - - # Add self-referencing links for original AddToList blocks - for node in filtered_nodes + new_nodes: - if ( - node.get("block_id") == ADDTOLIST_BLOCK_ID - and node.get("id") in original_addtolist_ids - ): - node_id = node.get("id") - has_self_ref = any( - link["source_id"] == node_id - and link["sink_id"] == node_id - and link["source_name"] == "updated_list" - and link["sink_name"] == "list" - for link in filtered_links - ) - if not has_self_ref: - filtered_links.append( - { - "id": str(uuid.uuid4()), - "source_id": node_id, - "source_name": "updated_list", - "sink_id": node_id, - "sink_name": "list", - "is_static": False, - } - ) - logger.debug(f"Added self-reference for AddToList {node_id}") - - agent["nodes"] = filtered_nodes + new_nodes - agent["links"] = filtered_links - return agent - - -def fix_addtodictionary_blocks(agent: dict[str, Any]) -> dict[str, Any]: - """Fix AddToDictionary blocks by removing empty CreateDictionary nodes.""" - nodes = agent.get("nodes", []) - links = agent.get("links", []) - nodes_to_remove = set() - links_to_remove = [] - - for link in links: - source_node = next( - (n for n in nodes if n.get("id") == link.get("source_id")), None - ) - sink_node = next((n for n in nodes if n.get("id") == link.get("sink_id")), None) - - if ( - source_node - and sink_node - and source_node.get("block_id") == CREATEDICT_BLOCK_ID - and sink_node.get("block_id") == ADDTODICTIONARY_BLOCK_ID - ): - nodes_to_remove.add(source_node.get("id")) - links_to_remove.append(link) - logger.debug(f"Removing CreateDictionary {source_node.get('id')}") - - agent["nodes"] = [n for n in nodes if n.get("id") not in nodes_to_remove] - agent["links"] = [link for link in links if link not in links_to_remove] - return agent - - -def fix_code_execution_output(agent: dict[str, Any]) -> dict[str, Any]: - """Fix CodeExecutionBlock output: change 'response' to 'stdout_logs'.""" - nodes = agent.get("nodes", []) - links = agent.get("links", []) - - for link in links: - source_node = next( - (n for n in nodes if n.get("id") == link.get("source_id")), None - ) - if ( - source_node - and source_node.get("block_id") == CODE_EXECUTION_BLOCK_ID - and link.get("source_name") == "response" - ): - link["source_name"] = "stdout_logs" - logger.debug("Fixed CodeExecutionBlock output: response -> stdout_logs") - - return agent - - -def fix_data_sampling_sample_size(agent: dict[str, Any]) -> dict[str, Any]: - """Fix DataSamplingBlock by setting sample_size to 1 as default.""" - nodes = agent.get("nodes", []) - links = agent.get("links", []) - links_to_remove = [] - - for node in nodes: - if node.get("block_id") == DATA_SAMPLING_BLOCK_ID: - node_id = node.get("id") - input_default = node.get("input_default", {}) - - # Remove links to sample_size - for link in links: - if ( - link.get("sink_id") == node_id - and link.get("sink_name") == "sample_size" - ): - links_to_remove.append(link) - - # Set default - input_default["sample_size"] = 1 - node["input_default"] = input_default - logger.debug(f"Fixed DataSamplingBlock {node_id} sample_size to 1") - - if links_to_remove: - agent["links"] = [link for link in links if link not in links_to_remove] - - return agent - - -def fix_node_x_coordinates(agent: dict[str, Any]) -> dict[str, Any]: - """Fix node x-coordinates to ensure 800+ unit spacing between linked nodes.""" - nodes = agent.get("nodes", []) - links = agent.get("links", []) - node_lookup = {n.get("id"): n for n in nodes} - - for link in links: - source_id = link.get("source_id") - sink_id = link.get("sink_id") - - source_node = node_lookup.get(source_id) - sink_node = node_lookup.get(sink_id) - - if not source_node or not sink_node: - continue - - source_pos = source_node.get("metadata", {}).get("position", {}) - sink_pos = sink_node.get("metadata", {}).get("position", {}) - - source_x = source_pos.get("x", 0) - sink_x = sink_pos.get("x", 0) - - if abs(sink_x - source_x) < 800: - new_x = source_x + 800 - if "metadata" not in sink_node: - sink_node["metadata"] = {} - if "position" not in sink_node["metadata"]: - sink_node["metadata"]["position"] = {} - sink_node["metadata"]["position"]["x"] = new_x - logger.debug(f"Fixed node {sink_id} x: {sink_x} -> {new_x}") - - return agent - - -def fix_getcurrentdate_offset(agent: dict[str, Any]) -> dict[str, Any]: - """Fix GetCurrentDateBlock offset to ensure it's positive.""" - for node in agent.get("nodes", []): - if node.get("block_id") == GET_CURRENT_DATE_BLOCK_ID: - input_default = node.get("input_default", {}) - if "offset" in input_default: - offset = input_default["offset"] - if isinstance(offset, (int, float)) and offset < 0: - input_default["offset"] = abs(offset) - logger.debug(f"Fixed offset: {offset} -> {abs(offset)}") - - return agent - - -def fix_ai_model_parameter( - agent: dict[str, Any], - blocks_info: list[dict[str, Any]], - default_model: str = "gpt-4o", -) -> dict[str, Any]: - """Add default model parameter to AI blocks if missing.""" - block_map = {b.get("id"): b for b in blocks_info} - - for node in agent.get("nodes", []): - block_id = node.get("block_id") - block = block_map.get(block_id) - - if not block: - continue - - # Check if block has AI category - categories = block.get("categories", []) - is_ai_block = any( - cat.get("category") == "AI" for cat in categories if isinstance(cat, dict) - ) - - if is_ai_block: - input_default = node.get("input_default", {}) - if "model" not in input_default: - input_default["model"] = default_model - node["input_default"] = input_default - logger.debug( - f"Added model '{default_model}' to AI block {node.get('id')}" - ) - - return agent - - -def fix_link_static_properties( - agent: dict[str, Any], blocks_info: list[dict[str, Any]] -) -> dict[str, Any]: - """Fix is_static property based on source block's staticOutput.""" - block_map = {b.get("id"): b for b in blocks_info} - node_lookup = {n.get("id"): n for n in agent.get("nodes", [])} - - for link in agent.get("links", []): - source_node = node_lookup.get(link.get("source_id")) - if not source_node: - continue - - source_block = block_map.get(source_node.get("block_id")) - if not source_block: - continue - - static_output = source_block.get("staticOutput", False) - if link.get("is_static") != static_output: - link["is_static"] = static_output - logger.debug(f"Fixed link {link.get('id')} is_static to {static_output}") - - return agent - - -def fix_data_type_mismatch( - agent: dict[str, Any], blocks_info: list[dict[str, Any]] -) -> dict[str, Any]: - """Fix data type mismatches by inserting UniversalTypeConverterBlock.""" - nodes = agent.get("nodes", []) - links = agent.get("links", []) - block_map = {b.get("id"): b for b in blocks_info} - node_lookup = {n.get("id"): n for n in nodes} - - def get_property_type(schema: dict, name: str) -> str | None: - if "_#_" in name: - parent, child = name.split("_#_", 1) - parent_schema = schema.get(parent, {}) - if "properties" in parent_schema: - return parent_schema["properties"].get(child, {}).get("type") - return None - return schema.get(name, {}).get("type") - - def are_types_compatible(src: str, sink: str) -> bool: - if {src, sink} <= {"integer", "number"}: - return True - return src == sink - - type_mapping = { - "string": "string", - "text": "string", - "integer": "number", - "number": "number", - "float": "number", - "boolean": "boolean", - "bool": "boolean", - "array": "list", - "list": "list", - "object": "dictionary", - "dict": "dictionary", - "dictionary": "dictionary", - } - - new_links = [] - nodes_to_add = [] - - for link in links: - source_node = node_lookup.get(link.get("source_id")) - sink_node = node_lookup.get(link.get("sink_id")) - - if not source_node or not sink_node: - new_links.append(link) - continue - - source_block = block_map.get(source_node.get("block_id")) - sink_block = block_map.get(sink_node.get("block_id")) - - if not source_block or not sink_block: - new_links.append(link) - continue - - source_outputs = source_block.get("outputSchema", {}).get("properties", {}) - sink_inputs = sink_block.get("inputSchema", {}).get("properties", {}) - - source_type = get_property_type(source_outputs, link.get("source_name", "")) - sink_type = get_property_type(sink_inputs, link.get("sink_name", "")) - - if ( - source_type - and sink_type - and not are_types_compatible(source_type, sink_type) - ): - # Insert type converter - converter_id = str(uuid.uuid4()) - target_type = type_mapping.get(sink_type, sink_type) - - converter_node = { - "id": converter_id, - "block_id": UNIVERSAL_TYPE_CONVERTER_BLOCK_ID, - "input_default": {"type": target_type}, - "metadata": {"position": {"x": 0, "y": 100}}, - } - nodes_to_add.append(converter_node) - - # source -> converter - new_links.append( - { - "id": str(uuid.uuid4()), - "source_id": link["source_id"], - "source_name": link["source_name"], - "sink_id": converter_id, - "sink_name": "value", - "is_static": False, - } - ) - - # converter -> sink - new_links.append( - { - "id": str(uuid.uuid4()), - "source_id": converter_id, - "source_name": "value", - "sink_id": link["sink_id"], - "sink_name": link["sink_name"], - "is_static": False, - } - ) - - logger.debug(f"Inserted type converter: {source_type} -> {target_type}") - else: - new_links.append(link) - - if nodes_to_add: - agent["nodes"] = nodes + nodes_to_add - agent["links"] = new_links - - return agent - - -def apply_all_fixes( - agent: dict[str, Any], blocks_info: list[dict[str, Any]] | None = None -) -> dict[str, Any]: - """Apply all fixes to an agent JSON. - - Args: - agent: Agent JSON dict - blocks_info: Optional list of block info dicts for advanced fixes - - Returns: - Fixed agent JSON - """ - # Basic fixes (no block info needed) - agent = fix_agent_ids(agent) - agent = fix_double_curly_braces(agent) - agent = fix_storevalue_before_condition(agent) - agent = fix_addtolist_blocks(agent) - agent = fix_addtodictionary_blocks(agent) - agent = fix_code_execution_output(agent) - agent = fix_data_sampling_sample_size(agent) - agent = fix_node_x_coordinates(agent) - agent = fix_getcurrentdate_offset(agent) - - # Advanced fixes (require block info) - if blocks_info is None: - blocks_info = get_blocks_info() - - agent = fix_ai_model_parameter(agent, blocks_info) - agent = fix_link_static_properties(agent, blocks_info) - agent = fix_data_type_mismatch(agent, blocks_info) - - return agent diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/prompts.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/prompts.py deleted file mode 100644 index 228bba8c8a..0000000000 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/prompts.py +++ /dev/null @@ -1,225 +0,0 @@ -"""Prompt templates for agent generation.""" - -DECOMPOSITION_PROMPT = """ -You are an expert AutoGPT Workflow Decomposer. Your task is to analyze a user's high-level goal and break it down into a clear, step-by-step plan using the available blocks. - -Each step should represent a distinct, automatable action suitable for execution by an AI automation system. - ---- - -FIRST: Analyze the user's goal and determine: -1) Design-time configuration (fixed settings that won't change per run) -2) Runtime inputs (values the agent's end-user will provide each time it runs) - -For anything that can vary per run (email addresses, names, dates, search terms, etc.): -- DO NOT ask for the actual value -- Instead, define it as an Agent Input with a clear name, type, and description - -Only ask clarifying questions about design-time config that affects how you build the workflow: -- Which external service to use (e.g., "Gmail vs Outlook", "Notion vs Google Docs") -- Required formats or structures (e.g., "CSV, JSON, or PDF output?") -- Business rules that must be hard-coded - -IMPORTANT CLARIFICATIONS POLICY: -- Ask no more than five essential questions -- Do not ask for concrete values that can be provided at runtime as Agent Inputs -- Do not ask for API keys or credentials; the platform handles those directly -- If there is enough information to infer reasonable defaults, prefer to propose defaults - ---- - -GUIDELINES: -1. List each step as a numbered item -2. Describe the action clearly and specify inputs/outputs -3. Ensure steps are in logical, sequential order -4. Mention block names naturally (e.g., "Use GetWeatherByLocationBlock to...") -5. Help the user reach their goal efficiently - ---- - -RULES: -1. OUTPUT FORMAT: Only output either clarifying questions OR step-by-step instructions, not both -2. USE ONLY THE BLOCKS PROVIDED -3. ALL required_input fields must be provided -4. Data types of linked properties must match -5. Write expert-level prompts for AI-related blocks - ---- - -CRITICAL BLOCK RESTRICTIONS: -1. AddToListBlock: Outputs updated list EVERY addition, not after all additions -2. SendEmailBlock: Draft the email for user review; set SMTP config based on email type -3. ConditionBlock: value2 is reference, value1 is contrast -4. CodeExecutionBlock: DO NOT USE - use AI blocks instead -5. ReadCsvBlock: Only use the 'rows' output, not 'row' - ---- - -OUTPUT FORMAT: - -If more information is needed: -```json -{{ - "type": "clarifying_questions", - "questions": [ - {{ - "question": "Which email provider should be used? (Gmail, Outlook, custom SMTP)", - "keyword": "email_provider", - "example": "Gmail" - }} - ] -}} -``` - -If ready to proceed: -```json -{{ - "type": "instructions", - "steps": [ - {{ - "step_number": 1, - "block_name": "AgentShortTextInputBlock", - "description": "Get the URL of the content to analyze.", - "inputs": [{{"name": "name", "value": "URL"}}], - "outputs": [{{"name": "result", "description": "The URL entered by user"}}] - }} - ] -}} -``` - ---- - -AVAILABLE BLOCKS: -{block_summaries} -""" - -GENERATION_PROMPT = """ -You are an expert AI workflow builder. Generate a valid agent JSON from the given instructions. - ---- - -NODES: -Each node must include: -- `id`: Unique UUID v4 (e.g. `a8f5b1e2-c3d4-4e5f-8a9b-0c1d2e3f4a5b`) -- `block_id`: The block identifier (must match an Allowed Block) -- `input_default`: Dict of inputs (can be empty if no static inputs needed) -- `metadata`: Must contain: - - `position`: {{"x": number, "y": number}} - adjacent nodes should differ by 800+ in X - - `customized_name`: Clear name describing this block's purpose in the workflow - ---- - -LINKS: -Each link connects a source node's output to a sink node's input: -- `id`: MUST be UUID v4 (NOT "link-1", "link-2", etc.) -- `source_id`: ID of the source node -- `source_name`: Output field name from the source block -- `sink_id`: ID of the sink node -- `sink_name`: Input field name on the sink block -- `is_static`: true only if source block has static_output: true - -CRITICAL: All IDs must be valid UUID v4 format! - ---- - -AGENT (GRAPH): -Wrap nodes and links in: -- `id`: UUID of the agent -- `name`: Short, generic name (avoid specific company names, URLs) -- `description`: Short, generic description -- `nodes`: List of all nodes -- `links`: List of all links -- `version`: 1 -- `is_active`: true - ---- - -TIPS: -- All required_input fields must be provided via input_default or a valid link -- Ensure consistent source_id and sink_id references -- Avoid dangling links -- Input/output pins must match block schemas -- Do not invent unknown block_ids - ---- - -ALLOWED BLOCKS: -{block_summaries} - ---- - -Generate the complete agent JSON. Output ONLY valid JSON, no explanation. -""" - -PATCH_PROMPT = """ -You are an expert at modifying AutoGPT agent workflows. Given the current agent and a modification request, generate a JSON patch to update the agent. - -CURRENT AGENT: -{current_agent} - -AVAILABLE BLOCKS: -{block_summaries} - ---- - -PATCH FORMAT: -Return a JSON object with the following structure: - -```json -{{ - "type": "patch", - "intent": "Brief description of what the patch does", - "patches": [ - {{ - "type": "modify", - "node_id": "uuid-of-node-to-modify", - "changes": {{ - "input_default": {{"field": "new_value"}}, - "metadata": {{"customized_name": "New Name"}} - }} - }}, - {{ - "type": "add", - "new_nodes": [ - {{ - "id": "new-uuid", - "block_id": "block-uuid", - "input_default": {{}}, - "metadata": {{"position": {{"x": 0, "y": 0}}, "customized_name": "Name"}} - }} - ], - "new_links": [ - {{ - "id": "link-uuid", - "source_id": "source-node-id", - "source_name": "output_field", - "sink_id": "sink-node-id", - "sink_name": "input_field" - }} - ] - }}, - {{ - "type": "remove", - "node_ids": ["uuid-of-node-to-remove"], - "link_ids": ["uuid-of-link-to-remove"] - }} - ] -}} -``` - -If you need more information, return: -```json -{{ - "type": "clarifying_questions", - "questions": [ - {{ - "question": "What specific change do you want?", - "keyword": "change_type", - "example": "Add error handling" - }} - ] -}} -``` - -Generate the minimal patch needed. Output ONLY valid JSON. -""" diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py new file mode 100644 index 0000000000..a4d2f1af15 --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py @@ -0,0 +1,269 @@ +"""External Agent Generator service client. + +This module provides a client for communicating with the external Agent Generator +microservice. When AGENTGENERATOR_HOST is configured, the agent generation functions +will delegate to the external service instead of using the built-in LLM-based implementation. +""" + +import logging +from typing import Any + +import httpx + +from backend.util.settings import Settings + +logger = logging.getLogger(__name__) + +_client: httpx.AsyncClient | None = None +_settings: Settings | None = None + + +def _get_settings() -> Settings: + """Get or create settings singleton.""" + global _settings + if _settings is None: + _settings = Settings() + return _settings + + +def is_external_service_configured() -> bool: + """Check if external Agent Generator service is configured.""" + settings = _get_settings() + return bool(settings.config.agentgenerator_host) + + +def _get_base_url() -> str: + """Get the base URL for the external service.""" + settings = _get_settings() + host = settings.config.agentgenerator_host + port = settings.config.agentgenerator_port + return f"http://{host}:{port}" + + +def _get_client() -> httpx.AsyncClient: + """Get or create the HTTP client for the external service.""" + global _client + if _client is None: + settings = _get_settings() + _client = httpx.AsyncClient( + base_url=_get_base_url(), + timeout=httpx.Timeout(settings.config.agentgenerator_timeout), + ) + return _client + + +async def decompose_goal_external( + description: str, context: str = "" +) -> dict[str, Any] | None: + """Call the external service to decompose a goal. + + Args: + description: Natural language goal description + context: Additional context (e.g., answers to previous questions) + + Returns: + Dict with either: + - {"type": "clarifying_questions", "questions": [...]} + - {"type": "instructions", "steps": [...]} + - {"type": "unachievable_goal", ...} + - {"type": "vague_goal", ...} + Or None on error + """ + client = _get_client() + + # Build the request payload + payload: dict[str, Any] = {"description": description} + if context: + # The external service uses user_instruction for additional context + payload["user_instruction"] = context + + try: + response = await client.post("/api/decompose-description", json=payload) + response.raise_for_status() + data = response.json() + + if not data.get("success"): + logger.error(f"External service returned error: {data.get('error')}") + return None + + # Map the response to the expected format + response_type = data.get("type") + if response_type == "instructions": + return {"type": "instructions", "steps": data.get("steps", [])} + elif response_type == "clarifying_questions": + return { + "type": "clarifying_questions", + "questions": data.get("questions", []), + } + elif response_type == "unachievable_goal": + return { + "type": "unachievable_goal", + "reason": data.get("reason"), + "suggested_goal": data.get("suggested_goal"), + } + elif response_type == "vague_goal": + return { + "type": "vague_goal", + "suggested_goal": data.get("suggested_goal"), + } + else: + logger.error( + f"Unknown response type from external service: {response_type}" + ) + return None + + except httpx.HTTPStatusError as e: + logger.error(f"HTTP error calling external agent generator: {e}") + return None + except httpx.RequestError as e: + logger.error(f"Request error calling external agent generator: {e}") + return None + except Exception as e: + logger.error(f"Unexpected error calling external agent generator: {e}") + return None + + +async def generate_agent_external( + instructions: dict[str, Any] +) -> dict[str, Any] | None: + """Call the external service to generate an agent from instructions. + + Args: + instructions: Structured instructions from decompose_goal + + Returns: + Agent JSON dict or None on error + """ + client = _get_client() + + try: + response = await client.post( + "/api/generate-agent", json={"instructions": instructions} + ) + response.raise_for_status() + data = response.json() + + if not data.get("success"): + logger.error(f"External service returned error: {data.get('error')}") + return None + + return data.get("agent_json") + + except httpx.HTTPStatusError as e: + logger.error(f"HTTP error calling external agent generator: {e}") + return None + except httpx.RequestError as e: + logger.error(f"Request error calling external agent generator: {e}") + return None + except Exception as e: + logger.error(f"Unexpected error calling external agent generator: {e}") + return None + + +async def generate_agent_patch_external( + update_request: str, current_agent: dict[str, Any] +) -> dict[str, Any] | None: + """Call the external service to generate a patch for an existing agent. + + Args: + update_request: Natural language description of changes + current_agent: Current agent JSON + + Returns: + Updated agent JSON, clarifying questions dict, or None on error + """ + client = _get_client() + + try: + response = await client.post( + "/api/update-agent", + json={ + "update_request": update_request, + "current_agent_json": current_agent, + }, + ) + response.raise_for_status() + data = response.json() + + if not data.get("success"): + logger.error(f"External service returned error: {data.get('error')}") + return None + + # Check if it's clarifying questions + if data.get("type") == "clarifying_questions": + return { + "type": "clarifying_questions", + "questions": data.get("questions", []), + } + + # Otherwise return the updated agent JSON + return data.get("agent_json") + + except httpx.HTTPStatusError as e: + logger.error(f"HTTP error calling external agent generator: {e}") + return None + except httpx.RequestError as e: + logger.error(f"Request error calling external agent generator: {e}") + return None + except Exception as e: + logger.error(f"Unexpected error calling external agent generator: {e}") + return None + + +async def get_blocks_external() -> list[dict[str, Any]] | None: + """Get available blocks from the external service. + + Returns: + List of block info dicts or None on error + """ + client = _get_client() + + try: + response = await client.get("/api/blocks") + response.raise_for_status() + data = response.json() + + if not data.get("success"): + logger.error("External service returned error getting blocks") + return None + + return data.get("blocks", []) + + except httpx.HTTPStatusError as e: + logger.error(f"HTTP error getting blocks from external service: {e}") + return None + except httpx.RequestError as e: + logger.error(f"Request error getting blocks from external service: {e}") + return None + except Exception as e: + logger.error(f"Unexpected error getting blocks from external service: {e}") + return None + + +async def health_check() -> bool: + """Check if the external service is healthy. + + Returns: + True if healthy, False otherwise + """ + if not is_external_service_configured(): + return False + + client = _get_client() + + try: + response = await client.get("/health") + response.raise_for_status() + data = response.json() + return data.get("status") == "healthy" and data.get("blocks_loaded", False) + except Exception as e: + logger.warning(f"External agent generator health check failed: {e}") + return False + + +async def close_client() -> None: + """Close the HTTP client.""" + global _client + if _client is not None: + await _client.aclose() + _client = None diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/utils.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/utils.py deleted file mode 100644 index 9c3c866c7f..0000000000 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/utils.py +++ /dev/null @@ -1,213 +0,0 @@ -"""Utilities for agent generation.""" - -import json -import re -from typing import Any - -from backend.data.block import get_blocks - -# UUID validation regex -UUID_REGEX = re.compile( - r"^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}$" -) - -# Block IDs for various fixes -STORE_VALUE_BLOCK_ID = "1ff065e9-88e8-4358-9d82-8dc91f622ba9" -CONDITION_BLOCK_ID = "715696a0-e1da-45c8-b209-c2fa9c3b0be6" -ADDTOLIST_BLOCK_ID = "aeb08fc1-2fc1-4141-bc8e-f758f183a822" -ADDTODICTIONARY_BLOCK_ID = "31d1064e-7446-4693-a7d4-65e5ca1180d1" -CREATELIST_BLOCK_ID = "a912d5c7-6e00-4542-b2a9-8034136930e4" -CREATEDICT_BLOCK_ID = "b924ddf4-de4f-4b56-9a85-358930dcbc91" -CODE_EXECUTION_BLOCK_ID = "0b02b072-abe7-11ef-8372-fb5d162dd712" -DATA_SAMPLING_BLOCK_ID = "4a448883-71fa-49cf-91cf-70d793bd7d87" -UNIVERSAL_TYPE_CONVERTER_BLOCK_ID = "95d1b990-ce13-4d88-9737-ba5c2070c97b" -GET_CURRENT_DATE_BLOCK_ID = "b29c1b50-5d0e-4d9f-8f9d-1b0e6fcbf0b1" - -DOUBLE_CURLY_BRACES_BLOCK_IDS = [ - "44f6c8ad-d75c-4ae1-8209-aad1c0326928", # FillTextTemplateBlock - "6ab085e2-20b3-4055-bc3e-08036e01eca6", - "90f8c45e-e983-4644-aa0b-b4ebe2f531bc", - "363ae599-353e-4804-937e-b2ee3cef3da4", # AgentOutputBlock - "3b191d9f-356f-482d-8238-ba04b6d18381", - "db7d8f02-2f44-4c55-ab7a-eae0941f0c30", - "3a7c4b8d-6e2f-4a5d-b9c1-f8d23c5a9b0e", - "ed1ae7a0-b770-4089-b520-1f0005fad19a", - "a892b8d9-3e4e-4e9c-9c1e-75f8efcf1bfa", - "b29c1b50-5d0e-4d9f-8f9d-1b0e6fcbf0b1", - "716a67b3-6760-42e7-86dc-18645c6e00fc", - "530cf046-2ce0-4854-ae2c-659db17c7a46", - "ed55ac19-356e-4243-a6cb-bc599e9b716f", - "1f292d4a-41a4-4977-9684-7c8d560b9f91", # LLM blocks - "32a87eab-381e-4dd4-bdb8-4c47151be35a", -] - - -def is_valid_uuid(value: str) -> bool: - """Check if a string is a valid UUID v4.""" - return isinstance(value, str) and UUID_REGEX.match(value) is not None - - -def _compact_schema(schema: dict) -> dict[str, str]: - """Extract compact type info from a JSON schema properties dict. - - Returns a dict of {field_name: type_string} for essential info only. - """ - props = schema.get("properties", {}) - result = {} - - for name, prop in props.items(): - # Skip internal/complex fields - if name.startswith("_"): - continue - - # Get type string - type_str = prop.get("type", "any") - - # Handle anyOf/oneOf (optional types) - if "anyOf" in prop: - types = [t.get("type", "?") for t in prop["anyOf"] if t.get("type")] - type_str = "|".join(types) if types else "any" - elif "allOf" in prop: - type_str = "object" - - # Add array item type if present - if type_str == "array" and "items" in prop: - items = prop["items"] - if isinstance(items, dict): - item_type = items.get("type", "any") - type_str = f"array[{item_type}]" - - result[name] = type_str - - return result - - -def get_block_summaries(include_schemas: bool = True) -> str: - """Generate compact block summaries for prompts. - - Args: - include_schemas: Whether to include input/output type info - - Returns: - Formatted string of block summaries (compact format) - """ - blocks = get_blocks() - summaries = [] - - for block_id, block_cls in blocks.items(): - block = block_cls() - name = block.name - desc = getattr(block, "description", "") or "" - - # Truncate description - if len(desc) > 150: - desc = desc[:147] + "..." - - if not include_schemas: - summaries.append(f"- {name} (id: {block_id}): {desc}") - else: - # Compact format with type info only - inputs = {} - outputs = {} - required = [] - - if hasattr(block, "input_schema"): - try: - schema = block.input_schema.jsonschema() - inputs = _compact_schema(schema) - required = schema.get("required", []) - except Exception: - pass - - if hasattr(block, "output_schema"): - try: - schema = block.output_schema.jsonschema() - outputs = _compact_schema(schema) - except Exception: - pass - - # Build compact line format - # Format: NAME (id): desc | in: {field:type, ...} [required] | out: {field:type} - in_str = ", ".join(f"{k}:{v}" for k, v in inputs.items()) - out_str = ", ".join(f"{k}:{v}" for k, v in outputs.items()) - req_str = f" req=[{','.join(required)}]" if required else "" - - static = " [static]" if getattr(block, "static_output", False) else "" - - line = f"- {name} (id: {block_id}): {desc}" - if in_str: - line += f"\n in: {{{in_str}}}{req_str}" - if out_str: - line += f"\n out: {{{out_str}}}{static}" - - summaries.append(line) - - return "\n".join(summaries) - - -def get_blocks_info() -> list[dict[str, Any]]: - """Get block information with schemas for validation and fixing.""" - blocks = get_blocks() - blocks_info = [] - for block_id, block_cls in blocks.items(): - block = block_cls() - blocks_info.append( - { - "id": block_id, - "name": block.name, - "description": getattr(block, "description", ""), - "categories": getattr(block, "categories", []), - "staticOutput": getattr(block, "static_output", False), - "inputSchema": ( - block.input_schema.jsonschema() - if hasattr(block, "input_schema") - else {} - ), - "outputSchema": ( - block.output_schema.jsonschema() - if hasattr(block, "output_schema") - else {} - ), - } - ) - return blocks_info - - -def parse_json_from_llm(text: str) -> dict[str, Any] | None: - """Extract JSON from LLM response (handles markdown code blocks).""" - if not text: - return None - - # Try fenced code block - match = re.search(r"```(?:json)?\s*([\s\S]*?)```", text, re.IGNORECASE) - if match: - try: - return json.loads(match.group(1).strip()) - except json.JSONDecodeError: - pass - - # Try raw text - try: - return json.loads(text.strip()) - except json.JSONDecodeError: - pass - - # Try finding {...} span - start = text.find("{") - end = text.rfind("}") - if start != -1 and end > start: - try: - return json.loads(text[start : end + 1]) - except json.JSONDecodeError: - pass - - # Try finding [...] span - start = text.find("[") - end = text.rfind("]") - if start != -1 and end > start: - try: - return json.loads(text[start : end + 1]) - except json.JSONDecodeError: - pass - - return None diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/validator.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/validator.py deleted file mode 100644 index c913e92bfd..0000000000 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/validator.py +++ /dev/null @@ -1,279 +0,0 @@ -"""Agent validator - Validates agent structure and connections.""" - -import logging -import re -from typing import Any - -from .utils import get_blocks_info - -logger = logging.getLogger(__name__) - - -class AgentValidator: - """Validator for AutoGPT agents with detailed error reporting.""" - - def __init__(self): - self.errors: list[str] = [] - - def add_error(self, error: str) -> None: - """Add an error message.""" - self.errors.append(error) - - def validate_block_existence( - self, agent: dict[str, Any], blocks_info: list[dict[str, Any]] - ) -> bool: - """Validate all block IDs exist in the blocks library.""" - valid = True - valid_block_ids = {b.get("id") for b in blocks_info if b.get("id")} - - for node in agent.get("nodes", []): - block_id = node.get("block_id") - node_id = node.get("id") - - if not block_id: - self.add_error(f"Node '{node_id}' is missing 'block_id' field.") - valid = False - continue - - if block_id not in valid_block_ids: - self.add_error( - f"Node '{node_id}' references block_id '{block_id}' which does not exist." - ) - valid = False - - return valid - - def validate_link_node_references(self, agent: dict[str, Any]) -> bool: - """Validate all node IDs referenced in links exist.""" - valid = True - valid_node_ids = {n.get("id") for n in agent.get("nodes", []) if n.get("id")} - - for link in agent.get("links", []): - link_id = link.get("id", "Unknown") - source_id = link.get("source_id") - sink_id = link.get("sink_id") - - if not source_id: - self.add_error(f"Link '{link_id}' is missing 'source_id'.") - valid = False - elif source_id not in valid_node_ids: - self.add_error( - f"Link '{link_id}' references non-existent source_id '{source_id}'." - ) - valid = False - - if not sink_id: - self.add_error(f"Link '{link_id}' is missing 'sink_id'.") - valid = False - elif sink_id not in valid_node_ids: - self.add_error( - f"Link '{link_id}' references non-existent sink_id '{sink_id}'." - ) - valid = False - - return valid - - def validate_required_inputs( - self, agent: dict[str, Any], blocks_info: list[dict[str, Any]] - ) -> bool: - """Validate required inputs are provided.""" - valid = True - block_map = {b.get("id"): b for b in blocks_info} - - for node in agent.get("nodes", []): - block_id = node.get("block_id") - block = block_map.get(block_id) - - if not block: - continue - - required_inputs = block.get("inputSchema", {}).get("required", []) - input_defaults = node.get("input_default", {}) - node_id = node.get("id") - - # Get linked inputs - linked_inputs = { - link["sink_name"] - for link in agent.get("links", []) - if link.get("sink_id") == node_id - } - - for req_input in required_inputs: - if ( - req_input not in input_defaults - and req_input not in linked_inputs - and req_input != "credentials" - ): - block_name = block.get("name", "Unknown Block") - self.add_error( - f"Node '{node_id}' ({block_name}) is missing required input '{req_input}'." - ) - valid = False - - return valid - - def validate_data_type_compatibility( - self, agent: dict[str, Any], blocks_info: list[dict[str, Any]] - ) -> bool: - """Validate linked data types are compatible.""" - valid = True - block_map = {b.get("id"): b for b in blocks_info} - node_lookup = {n.get("id"): n for n in agent.get("nodes", [])} - - def get_type(schema: dict, name: str) -> str | None: - if "_#_" in name: - parent, child = name.split("_#_", 1) - parent_schema = schema.get(parent, {}) - if "properties" in parent_schema: - return parent_schema["properties"].get(child, {}).get("type") - return None - return schema.get(name, {}).get("type") - - def are_compatible(src: str, sink: str) -> bool: - if {src, sink} <= {"integer", "number"}: - return True - return src == sink - - for link in agent.get("links", []): - source_node = node_lookup.get(link.get("source_id")) - sink_node = node_lookup.get(link.get("sink_id")) - - if not source_node or not sink_node: - continue - - source_block = block_map.get(source_node.get("block_id")) - sink_block = block_map.get(sink_node.get("block_id")) - - if not source_block or not sink_block: - continue - - source_outputs = source_block.get("outputSchema", {}).get("properties", {}) - sink_inputs = sink_block.get("inputSchema", {}).get("properties", {}) - - source_type = get_type(source_outputs, link.get("source_name", "")) - sink_type = get_type(sink_inputs, link.get("sink_name", "")) - - if source_type and sink_type and not are_compatible(source_type, sink_type): - self.add_error( - f"Type mismatch: {source_block.get('name')} output '{link['source_name']}' " - f"({source_type}) -> {sink_block.get('name')} input '{link['sink_name']}' ({sink_type})." - ) - valid = False - - return valid - - def validate_nested_sink_links( - self, agent: dict[str, Any], blocks_info: list[dict[str, Any]] - ) -> bool: - """Validate nested sink links (with _#_ notation).""" - valid = True - block_map = {b.get("id"): b for b in blocks_info} - node_lookup = {n.get("id"): n for n in agent.get("nodes", [])} - - for link in agent.get("links", []): - sink_name = link.get("sink_name", "") - - if "_#_" in sink_name: - parent, child = sink_name.split("_#_", 1) - - sink_node = node_lookup.get(link.get("sink_id")) - if not sink_node: - continue - - block = block_map.get(sink_node.get("block_id")) - if not block: - continue - - input_props = block.get("inputSchema", {}).get("properties", {}) - parent_schema = input_props.get(parent) - - if not parent_schema: - self.add_error( - f"Invalid nested link '{sink_name}': parent '{parent}' not found." - ) - valid = False - continue - - if not parent_schema.get("additionalProperties"): - if not ( - isinstance(parent_schema, dict) - and "properties" in parent_schema - and child in parent_schema.get("properties", {}) - ): - self.add_error( - f"Invalid nested link '{sink_name}': child '{child}' not found in '{parent}'." - ) - valid = False - - return valid - - def validate_prompt_spaces(self, agent: dict[str, Any]) -> bool: - """Validate prompts don't have spaces in template variables.""" - valid = True - - for node in agent.get("nodes", []): - input_default = node.get("input_default", {}) - prompt = input_default.get("prompt", "") - - if not isinstance(prompt, str): - continue - - # Find {{...}} with spaces - matches = re.finditer(r"\{\{([^}]+)\}\}", prompt) - for match in matches: - content = match.group(1) - if " " in content: - self.add_error( - f"Node '{node.get('id')}' has spaces in template variable: " - f"'{{{{{content}}}}}' should be '{{{{{content.replace(' ', '_')}}}}}'." - ) - valid = False - - return valid - - def validate( - self, agent: dict[str, Any], blocks_info: list[dict[str, Any]] | None = None - ) -> tuple[bool, str | None]: - """Run all validations. - - Returns: - Tuple of (is_valid, error_message) - """ - self.errors = [] - - if blocks_info is None: - blocks_info = get_blocks_info() - - checks = [ - self.validate_block_existence(agent, blocks_info), - self.validate_link_node_references(agent), - self.validate_required_inputs(agent, blocks_info), - self.validate_data_type_compatibility(agent, blocks_info), - self.validate_nested_sink_links(agent, blocks_info), - self.validate_prompt_spaces(agent), - ] - - all_passed = all(checks) - - if all_passed: - logger.info("Agent validation successful") - return True, None - - error_message = "Agent validation failed:\n" - for i, error in enumerate(self.errors, 1): - error_message += f"{i}. {error}\n" - - logger.warning(f"Agent validation failed with {len(self.errors)} errors") - return False, error_message - - -def validate_agent( - agent: dict[str, Any], blocks_info: list[dict[str, Any]] | None = None -) -> tuple[bool, str | None]: - """Convenience function to validate an agent. - - Returns: - Tuple of (is_valid, error_message) - """ - validator = AgentValidator() - return validator.validate(agent, blocks_info) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py index d81a11362b..00c6d8499b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_output.py @@ -5,6 +5,7 @@ import re from datetime import datetime, timedelta, timezone from typing import Any +from langfuse import observe from pydantic import BaseModel, field_validator from backend.api.features.chat.model import ChatSession @@ -103,7 +104,7 @@ class AgentOutputTool(BaseTool): @property def name(self) -> str: - return "agent_output" + return "view_agent_output" @property def description(self) -> str: @@ -328,6 +329,7 @@ class AgentOutputTool(BaseTool): total_executions=len(available_executions) if available_executions else 1, ) + @observe(as_type="tool", name="view_agent_output") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py index c8168f473d..5a3c44fb94 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py @@ -3,15 +3,15 @@ import logging from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_generator import ( - apply_all_fixes, + AgentGeneratorNotConfiguredError, decompose_goal, generate_agent, - get_blocks_info, save_agent_to_library, - validate_agent, ) from .base import BaseTool from .models import ( @@ -25,9 +25,6 @@ from .models import ( logger = logging.getLogger(__name__) -# Maximum retries for agent generation with validation feedback -MAX_GENERATION_RETRIES = 2 - class CreateAgentTool(BaseTool): """Tool for creating agents from natural language descriptions.""" @@ -78,6 +75,7 @@ class CreateAgentTool(BaseTool): "required": ["description"], } + @observe(as_type="tool", name="create_agent") async def _execute( self, user_id: str | None, @@ -88,9 +86,8 @@ class CreateAgentTool(BaseTool): Flow: 1. Decompose the description into steps (may return clarifying questions) - 2. Generate agent JSON from the steps - 3. Apply fixes to correct common LLM errors - 4. Preview or save based on the save parameter + 2. Generate agent JSON (external service handles fixing and validation) + 3. Preview or save based on the save parameter """ description = kwargs.get("description", "").strip() context = kwargs.get("context", "") @@ -107,11 +104,13 @@ class CreateAgentTool(BaseTool): # Step 1: Decompose goal into steps try: decomposition_result = await decompose_goal(description, context) - except ValueError as e: - # Handle missing API key or configuration errors + except AgentGeneratorNotConfiguredError: return ErrorResponse( - message=f"Agent generation is not configured: {str(e)}", - error="configuration_error", + message=( + "Agent generation is not available. " + "The Agent Generator service is not configured." + ), + error="service_not_configured", session_id=session_id, ) @@ -168,72 +167,32 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Step 2: Generate agent JSON with retry on validation failure - blocks_info = get_blocks_info() - agent_json = None - validation_errors = None - - for attempt in range(MAX_GENERATION_RETRIES + 1): - # Generate agent (include validation errors from previous attempt) - if attempt == 0: - agent_json = await generate_agent(decomposition_result) - else: - # Retry with validation error feedback - logger.info( - f"Retry {attempt}/{MAX_GENERATION_RETRIES} with validation feedback" - ) - retry_instructions = { - **decomposition_result, - "previous_errors": validation_errors, - "retry_instructions": ( - "The previous generation had validation errors. " - "Please fix these issues in the new generation:\n" - f"{validation_errors}" - ), - } - agent_json = await generate_agent(retry_instructions) - - if agent_json is None: - if attempt == MAX_GENERATION_RETRIES: - return ErrorResponse( - message="Failed to generate the agent. Please try again.", - error="Generation failed", - session_id=session_id, - ) - continue - - # Step 3: Apply fixes to correct common errors - agent_json = apply_all_fixes(agent_json, blocks_info) - - # Step 4: Validate the agent - is_valid, validation_errors = validate_agent(agent_json, blocks_info) - - if is_valid: - logger.info(f"Agent generated successfully on attempt {attempt + 1}") - break - - logger.warning( - f"Validation failed on attempt {attempt + 1}: {validation_errors}" + # Step 2: Generate agent JSON (external service handles fixing and validation) + try: + agent_json = await generate_agent(decomposition_result) + except AgentGeneratorNotConfiguredError: + return ErrorResponse( + message=( + "Agent generation is not available. " + "The Agent Generator service is not configured." + ), + error="service_not_configured", + session_id=session_id, ) - if attempt == MAX_GENERATION_RETRIES: - # Return error with validation details - return ErrorResponse( - message=( - f"Generated agent has validation errors after {MAX_GENERATION_RETRIES + 1} attempts. " - f"Please try rephrasing your request or simplify the workflow." - ), - error="validation_failed", - details={"validation_errors": validation_errors}, - session_id=session_id, - ) + if agent_json is None: + return ErrorResponse( + message="Failed to generate the agent. Please try again.", + error="Generation failed", + session_id=session_id, + ) agent_name = agent_json.get("name", "Generated Agent") agent_description = agent_json.get("description", "") node_count = len(agent_json.get("nodes", [])) link_count = len(agent_json.get("links", [])) - # Step 4: Preview or save + # Step 3: Preview or save if not save: return AgentPreviewResponse( message=( diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py index 5aaa166036..777c39a254 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py @@ -3,16 +3,15 @@ import logging from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_generator import ( - apply_agent_patch, - apply_all_fixes, + AgentGeneratorNotConfiguredError, generate_agent_patch, get_agent_as_json, - get_blocks_info, save_agent_to_library, - validate_agent, ) from .base import BaseTool from .models import ( @@ -26,9 +25,6 @@ from .models import ( logger = logging.getLogger(__name__) -# Maximum retries for patch generation with validation feedback -MAX_GENERATION_RETRIES = 2 - class EditAgentTool(BaseTool): """Tool for editing existing agents using natural language.""" @@ -41,7 +37,7 @@ class EditAgentTool(BaseTool): def description(self) -> str: return ( "Edit an existing agent from the user's library using natural language. " - "Generates a patch to update the agent while preserving unchanged parts." + "Generates updates to the agent while preserving unchanged parts." ) @property @@ -85,6 +81,7 @@ class EditAgentTool(BaseTool): "required": ["agent_id", "changes"], } + @observe(as_type="tool", name="edit_agent") async def _execute( self, user_id: str | None, @@ -95,9 +92,8 @@ class EditAgentTool(BaseTool): Flow: 1. Fetch the current agent - 2. Generate a patch based on the requested changes - 3. Apply the patch to create an updated agent - 4. Preview or save based on the save parameter + 2. Generate updated agent (external service handles fixing and validation) + 3. Preview or save based on the save parameter """ agent_id = kwargs.get("agent_id", "").strip() changes = kwargs.get("changes", "").strip() @@ -134,121 +130,58 @@ class EditAgentTool(BaseTool): if context: update_request = f"{changes}\n\nAdditional context:\n{context}" - # Step 2: Generate patch with retry on validation failure - blocks_info = get_blocks_info() - updated_agent = None - validation_errors = None - intent = "Applied requested changes" - - for attempt in range(MAX_GENERATION_RETRIES + 1): - # Generate patch (include validation errors from previous attempt) - try: - if attempt == 0: - patch_result = await generate_agent_patch( - update_request, current_agent - ) - else: - # Retry with validation error feedback - logger.info( - f"Retry {attempt}/{MAX_GENERATION_RETRIES} with validation feedback" - ) - retry_request = ( - f"{update_request}\n\n" - f"IMPORTANT: The previous edit had validation errors. " - f"Please fix these issues:\n{validation_errors}" - ) - patch_result = await generate_agent_patch( - retry_request, current_agent - ) - except ValueError as e: - # Handle missing API key or configuration errors - return ErrorResponse( - message=f"Agent generation is not configured: {str(e)}", - error="configuration_error", - session_id=session_id, - ) - - if patch_result is None: - if attempt == MAX_GENERATION_RETRIES: - return ErrorResponse( - message="Failed to generate changes. Please try rephrasing.", - error="Patch generation failed", - session_id=session_id, - ) - continue - - # Check if LLM returned clarifying questions - if patch_result.get("type") == "clarifying_questions": - questions = patch_result.get("questions", []) - return ClarificationNeededResponse( - message=( - "I need some more information about the changes. " - "Please answer the following questions:" - ), - questions=[ - ClarifyingQuestion( - question=q.get("question", ""), - keyword=q.get("keyword", ""), - example=q.get("example"), - ) - for q in questions - ], - session_id=session_id, - ) - - # Step 3: Apply patch and fixes - try: - updated_agent = apply_agent_patch(current_agent, patch_result) - updated_agent = apply_all_fixes(updated_agent, blocks_info) - except Exception as e: - if attempt == MAX_GENERATION_RETRIES: - return ErrorResponse( - message=f"Failed to apply changes: {str(e)}", - error="patch_apply_failed", - details={"exception": str(e)}, - session_id=session_id, - ) - validation_errors = str(e) - continue - - # Step 4: Validate the updated agent - is_valid, validation_errors = validate_agent(updated_agent, blocks_info) - - if is_valid: - logger.info(f"Agent edited successfully on attempt {attempt + 1}") - intent = patch_result.get("intent", "Applied requested changes") - break - - logger.warning( - f"Validation failed on attempt {attempt + 1}: {validation_errors}" + # Step 2: Generate updated agent (external service handles fixing and validation) + try: + result = await generate_agent_patch(update_request, current_agent) + except AgentGeneratorNotConfiguredError: + return ErrorResponse( + message=( + "Agent editing is not available. " + "The Agent Generator service is not configured." + ), + error="service_not_configured", + session_id=session_id, ) - if attempt == MAX_GENERATION_RETRIES: - # Return error with validation details - return ErrorResponse( - message=( - f"Updated agent has validation errors after " - f"{MAX_GENERATION_RETRIES + 1} attempts. " - f"Please try rephrasing your request or simplify the changes." - ), - error="validation_failed", - details={"validation_errors": validation_errors}, - session_id=session_id, - ) + if result is None: + return ErrorResponse( + message="Failed to generate changes. Please try rephrasing.", + error="Update generation failed", + session_id=session_id, + ) - # At this point, updated_agent is guaranteed to be set (we return on all failure paths) - assert updated_agent is not None + # Check if LLM returned clarifying questions + if result.get("type") == "clarifying_questions": + questions = result.get("questions", []) + return ClarificationNeededResponse( + message=( + "I need some more information about the changes. " + "Please answer the following questions:" + ), + questions=[ + ClarifyingQuestion( + question=q.get("question", ""), + keyword=q.get("keyword", ""), + example=q.get("example"), + ) + for q in questions + ], + session_id=session_id, + ) + + # Result is the updated agent JSON + updated_agent = result agent_name = updated_agent.get("name", "Updated Agent") agent_description = updated_agent.get("description", "") node_count = len(updated_agent.get("nodes", [])) link_count = len(updated_agent.get("links", [])) - # Step 5: Preview or save + # Step 3: Preview or save if not save: return AgentPreviewResponse( message=( - f"I've updated the agent. Changes: {intent}. " + f"I've updated the agent. " f"The agent now has {node_count} blocks. " f"Review it and call edit_agent with save=true to save the changes." ), @@ -274,10 +207,7 @@ class EditAgentTool(BaseTool): ) return AgentSavedResponse( - message=( - f"Updated agent '{created_graph.name}' has been saved to your library! " - f"Changes: {intent}" - ), + message=f"Updated agent '{created_graph.name}' has been saved to your library!", agent_id=created_graph.id, agent_name=created_graph.name, library_agent_id=library_agent.id, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py index 477522757d..f231ef4484 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_agent.py @@ -2,6 +2,8 @@ from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_search import search_agents @@ -35,6 +37,7 @@ class FindAgentTool(BaseTool): "required": ["query"], } + @observe(as_type="tool", name="find_agent") async def _execute( self, user_id: str | None, session: ChatSession, **kwargs ) -> ToolResponseBase: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py index a5e66f0a1c..fc20fdfc4a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py @@ -1,6 +1,7 @@ import logging from typing import Any +from langfuse import observe from prisma.enums import ContentType from backend.api.features.chat.model import ChatSession @@ -55,6 +56,7 @@ class FindBlockTool(BaseTool): def requires_auth(self) -> bool: return True + @observe(as_type="tool", name="find_block") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py index 108fba75ae..d9b5edfa9b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_library_agent.py @@ -2,6 +2,8 @@ from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from .agent_search import search_agents @@ -41,6 +43,7 @@ class FindLibraryAgentTool(BaseTool): def requires_auth(self) -> bool: return True + @observe(as_type="tool", name="find_library_agent") async def _execute( self, user_id: str | None, session: ChatSession, **kwargs ) -> ToolResponseBase: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py b/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py index 7040cd7db5..b2fdcccfcd 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/get_doc_page.py @@ -4,6 +4,8 @@ import logging from pathlib import Path from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from backend.api.features.chat.tools.base import BaseTool from backend.api.features.chat.tools.models import ( @@ -71,6 +73,7 @@ class GetDocPageTool(BaseTool): url_path = path.rsplit(".", 1)[0] if "." in path else path return f"{DOCS_BASE_URL}/{url_path}" + @observe(as_type="tool", name="get_doc_page") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py index 1f0a836543..88d432a797 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py @@ -3,10 +3,15 @@ import logging from typing import Any +from langfuse import observe from pydantic import BaseModel, Field, field_validator from backend.api.features.chat.config import ChatConfig from backend.api.features.chat.model import ChatSession +from backend.api.features.chat.tracking import ( + track_agent_run_success, + track_agent_scheduled, +) from backend.api.features.library import db as library_db from backend.data.graph import GraphModel from backend.data.model import CredentialsMetaInput @@ -32,7 +37,7 @@ from .models import ( UserReadiness, ) from .utils import ( - check_user_has_required_credentials, + build_missing_credentials_from_graph, extract_credentials_from_schema, fetch_graph_from_store_slug, get_or_create_library_agent, @@ -154,6 +159,7 @@ class RunAgentTool(BaseTool): """All operations require authentication.""" return True + @observe(as_type="tool", name="run_agent") async def _execute( self, user_id: str | None, @@ -235,15 +241,13 @@ class RunAgentTool(BaseTool): # Return credentials needed response with input data info # The UI handles credential setup automatically, so the message # focuses on asking about input data - credentials = extract_credentials_from_schema( - graph.credentials_input_schema + requirements_creds_dict = build_missing_credentials_from_graph( + graph, None ) - missing_creds_check = await check_user_has_required_credentials( - user_id, credentials + missing_credentials_dict = build_missing_credentials_from_graph( + graph, graph_credentials ) - missing_credentials_dict = { - c.id: c.model_dump() for c in missing_creds_check - } + requirements_creds_list = list(requirements_creds_dict.values()) return SetupRequirementsResponse( message=self._build_inputs_message(graph, MSG_WHAT_VALUES_TO_USE), @@ -257,7 +261,7 @@ class RunAgentTool(BaseTool): ready_to_run=False, ), requirements={ - "credentials": [c.model_dump() for c in credentials], + "credentials": requirements_creds_list, "inputs": self._get_inputs_list(graph.input_schema), "execution_modes": self._get_execution_modes(graph), }, @@ -453,6 +457,16 @@ class RunAgentTool(BaseTool): session.successful_agent_runs.get(library_agent.graph_id, 0) + 1 ) + # Track in PostHog + track_agent_run_success( + user_id=user_id, + session_id=session_id, + graph_id=library_agent.graph_id, + graph_name=library_agent.name, + execution_id=execution.id, + library_agent_id=library_agent.id, + ) + library_agent_link = f"/library/agents/{library_agent.id}" return ExecutionStartedResponse( message=( @@ -534,6 +548,18 @@ class RunAgentTool(BaseTool): session.successful_agent_schedules.get(library_agent.graph_id, 0) + 1 ) + # Track in PostHog + track_agent_scheduled( + user_id=user_id, + session_id=session_id, + graph_id=library_agent.graph_id, + graph_name=library_agent.name, + schedule_id=result.id, + schedule_name=schedule_name, + cron=cron, + library_agent_id=library_agent.id, + ) + library_agent_link = f"/library/agents/{library_agent.id}" return ExecutionStartedResponse( message=( diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent_test.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent_test.py index 9e10304429..404df2adb6 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent_test.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent_test.py @@ -29,7 +29,7 @@ def mock_embedding_functions(): yield -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent(setup_test_data): """Test that the run_agent tool successfully executes an approved agent""" # Use test data from fixture @@ -70,7 +70,7 @@ async def test_run_agent(setup_test_data): assert result_data["graph_name"] == "Test Agent" -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent_missing_inputs(setup_test_data): """Test that the run_agent tool returns error when inputs are missing""" # Use test data from fixture @@ -106,7 +106,7 @@ async def test_run_agent_missing_inputs(setup_test_data): assert "message" in result_data -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent_invalid_agent_id(setup_test_data): """Test that the run_agent tool returns error for invalid agent ID""" # Use test data from fixture @@ -141,7 +141,7 @@ async def test_run_agent_invalid_agent_id(setup_test_data): ) -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent_with_llm_credentials(setup_llm_test_data): """Test that run_agent works with an agent requiring LLM credentials""" # Use test data from fixture @@ -185,7 +185,7 @@ async def test_run_agent_with_llm_credentials(setup_llm_test_data): assert result_data["graph_name"] == "LLM Test Agent" -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent_shows_available_inputs_when_none_provided(setup_test_data): """Test that run_agent returns available inputs when called without inputs or use_defaults.""" user = setup_test_data["user"] @@ -219,7 +219,7 @@ async def test_run_agent_shows_available_inputs_when_none_provided(setup_test_da assert "inputs" in result_data["message"].lower() -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent_with_use_defaults(setup_test_data): """Test that run_agent executes successfully with use_defaults=True.""" user = setup_test_data["user"] @@ -251,7 +251,7 @@ async def test_run_agent_with_use_defaults(setup_test_data): assert result_data["graph_id"] == graph.id -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent_missing_credentials(setup_firecrawl_test_data): """Test that run_agent returns setup_requirements when credentials are missing.""" user = setup_firecrawl_test_data["user"] @@ -285,7 +285,7 @@ async def test_run_agent_missing_credentials(setup_firecrawl_test_data): assert len(setup_info["user_readiness"]["missing_credentials"]) > 0 -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent_invalid_slug_format(setup_test_data): """Test that run_agent returns error for invalid slug format (no slash).""" user = setup_test_data["user"] @@ -313,7 +313,7 @@ async def test_run_agent_invalid_slug_format(setup_test_data): assert "username/agent-name" in result_data["message"] -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent_unauthenticated(): """Test that run_agent returns need_login for unauthenticated users.""" tool = RunAgentTool() @@ -340,7 +340,7 @@ async def test_run_agent_unauthenticated(): assert "sign in" in result_data["message"].lower() -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent_schedule_without_cron(setup_test_data): """Test that run_agent returns error when scheduling without cron expression.""" user = setup_test_data["user"] @@ -372,7 +372,7 @@ async def test_run_agent_schedule_without_cron(setup_test_data): assert "cron" in result_data["message"].lower() -@pytest.mark.asyncio(scope="session") +@pytest.mark.asyncio(loop_scope="session") async def test_run_agent_schedule_without_name(setup_test_data): """Test that run_agent returns error when scheduling without schedule_name.""" user = setup_test_data["user"] diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index 48cbcb5e5c..0d233fcfec 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -4,6 +4,8 @@ import logging from collections import defaultdict from typing import Any +from langfuse import observe + from backend.api.features.chat.model import ChatSession from backend.data.block import get_block from backend.data.execution import ExecutionContext @@ -20,6 +22,7 @@ from .models import ( ToolResponseBase, UserReadiness, ) +from .utils import build_missing_credentials_from_field_info logger = logging.getLogger(__name__) @@ -127,6 +130,7 @@ class RunBlockTool(BaseTool): return matched_credentials, missing_credentials + @observe(as_type="tool", name="run_block") async def _execute( self, user_id: str | None, @@ -175,6 +179,11 @@ class RunBlockTool(BaseTool): message=f"Block '{block_id}' not found", session_id=session_id, ) + if block.disabled: + return ErrorResponse( + message=f"Block '{block_id}' is disabled", + session_id=session_id, + ) logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}") @@ -186,7 +195,11 @@ class RunBlockTool(BaseTool): if missing_credentials: # Return setup requirements response with missing credentials - missing_creds_dict = {c.id: c.model_dump() for c in missing_credentials} + credentials_fields_info = block.input_schema.get_credentials_fields_info() + missing_creds_dict = build_missing_credentials_from_field_info( + credentials_fields_info, set(matched_credentials.keys()) + ) + missing_creds_list = list(missing_creds_dict.values()) return SetupRequirementsResponse( message=( @@ -203,7 +216,7 @@ class RunBlockTool(BaseTool): ready_to_run=False, ), requirements={ - "credentials": [c.model_dump() for c in missing_credentials], + "credentials": missing_creds_list, "inputs": self._get_inputs_list(block), "execution_modes": ["immediate"], }, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py b/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py index edb0c0de1e..4903230b40 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/search_docs.py @@ -3,6 +3,7 @@ import logging from typing import Any +from langfuse import observe from prisma.enums import ContentType from backend.api.features.chat.model import ChatSession @@ -87,6 +88,7 @@ class SearchDocsTool(BaseTool): url_path = path.rsplit(".", 1)[0] if "." in path else path return f"{DOCS_BASE_URL}/{url_path}" + @observe(as_type="tool", name="search_docs") async def _execute( self, user_id: str | None, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py index 19e092c312..a2ac91dc65 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py @@ -8,7 +8,7 @@ from backend.api.features.library import model as library_model from backend.api.features.store import db as store_db from backend.data import graph as graph_db from backend.data.graph import GraphModel -from backend.data.model import CredentialsMetaInput +from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util.exceptions import NotFoundError @@ -89,6 +89,59 @@ def extract_credentials_from_schema( return credentials +def _serialize_missing_credential( + field_key: str, field_info: CredentialsFieldInfo +) -> dict[str, Any]: + """ + Convert credential field info into a serializable dict that preserves all supported + credential types (e.g., api_key + oauth2) so the UI can offer multiple options. + """ + supported_types = sorted(field_info.supported_types) + provider = next(iter(field_info.provider), "unknown") + scopes = sorted(field_info.required_scopes or []) + + return { + "id": field_key, + "title": field_key.replace("_", " ").title(), + "provider": provider, + "provider_name": provider.replace("_", " ").title(), + "type": supported_types[0] if supported_types else "api_key", + "types": supported_types, + "scopes": scopes, + } + + +def build_missing_credentials_from_graph( + graph: GraphModel, matched_credentials: dict[str, CredentialsMetaInput] | None +) -> dict[str, Any]: + """ + Build a missing_credentials mapping from a graph's aggregated credentials inputs, + preserving all supported credential types for each field. + """ + matched_keys = set(matched_credentials.keys()) if matched_credentials else set() + aggregated_fields = graph.aggregate_credentials_inputs() + + return { + field_key: _serialize_missing_credential(field_key, field_info) + for field_key, (field_info, _node_fields) in aggregated_fields.items() + if field_key not in matched_keys + } + + +def build_missing_credentials_from_field_info( + credential_fields: dict[str, CredentialsFieldInfo], + matched_keys: set[str], +) -> dict[str, Any]: + """ + Build missing_credentials mapping from a simple credentials field info dictionary. + """ + return { + field_key: _serialize_missing_credential(field_key, field_info) + for field_key, field_info in credential_fields.items() + if field_key not in matched_keys + } + + def extract_credentials_as_dict( credentials_input_schema: dict[str, Any] | None, ) -> dict[str, CredentialsMetaInput]: diff --git a/autogpt_platform/backend/backend/api/features/chat/tracking.py b/autogpt_platform/backend/backend/api/features/chat/tracking.py new file mode 100644 index 0000000000..b2c0fd032f --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tracking.py @@ -0,0 +1,250 @@ +"""PostHog analytics tracking for the chat system.""" + +import atexit +import logging +from typing import Any + +from posthog import Posthog + +from backend.util.settings import Settings + +logger = logging.getLogger(__name__) +settings = Settings() + +# PostHog client instance (lazily initialized) +_posthog_client: Posthog | None = None + + +def _shutdown_posthog() -> None: + """Flush and shutdown PostHog client on process exit.""" + if _posthog_client is not None: + _posthog_client.flush() + _posthog_client.shutdown() + + +atexit.register(_shutdown_posthog) + + +def _get_posthog_client() -> Posthog | None: + """Get or create the PostHog client instance.""" + global _posthog_client + if _posthog_client is not None: + return _posthog_client + + if not settings.secrets.posthog_api_key: + logger.debug("PostHog API key not configured, analytics disabled") + return None + + _posthog_client = Posthog( + settings.secrets.posthog_api_key, + host=settings.secrets.posthog_host, + ) + logger.info( + f"PostHog client initialized with host: {settings.secrets.posthog_host}" + ) + return _posthog_client + + +def _get_base_properties() -> dict[str, Any]: + """Get base properties included in all events.""" + return { + "environment": settings.config.app_env.value, + "source": "chat_copilot", + } + + +def track_user_message( + user_id: str | None, + session_id: str, + message_length: int, +) -> None: + """Track when a user sends a message in chat. + + Args: + user_id: The user's ID (or None for anonymous) + session_id: The chat session ID + message_length: Length of the user's message + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "message_length": message_length, + } + client.capture( + distinct_id=user_id or f"anonymous_{session_id}", + event="copilot_message_sent", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track user message: {e}") + + +def track_tool_called( + user_id: str | None, + session_id: str, + tool_name: str, + tool_call_id: str, +) -> None: + """Track when a tool is called in chat. + + Args: + user_id: The user's ID (or None for anonymous) + session_id: The chat session ID + tool_name: Name of the tool being called + tool_call_id: Unique ID of the tool call + """ + client = _get_posthog_client() + if not client: + logger.info("PostHog client not available for tool tracking") + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "tool_name": tool_name, + "tool_call_id": tool_call_id, + } + distinct_id = user_id or f"anonymous_{session_id}" + logger.info( + f"Sending copilot_tool_called event to PostHog: distinct_id={distinct_id}, " + f"tool_name={tool_name}" + ) + client.capture( + distinct_id=distinct_id, + event="copilot_tool_called", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track tool call: {e}") + + +def track_agent_run_success( + user_id: str, + session_id: str, + graph_id: str, + graph_name: str, + execution_id: str, + library_agent_id: str, +) -> None: + """Track when an agent is successfully run. + + Args: + user_id: The user's ID + session_id: The chat session ID + graph_id: ID of the agent graph + graph_name: Name of the agent + execution_id: ID of the execution + library_agent_id: ID of the library agent + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "graph_id": graph_id, + "graph_name": graph_name, + "execution_id": execution_id, + "library_agent_id": library_agent_id, + } + client.capture( + distinct_id=user_id, + event="copilot_agent_run_success", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track agent run: {e}") + + +def track_agent_scheduled( + user_id: str, + session_id: str, + graph_id: str, + graph_name: str, + schedule_id: str, + schedule_name: str, + cron: str, + library_agent_id: str, +) -> None: + """Track when an agent is successfully scheduled. + + Args: + user_id: The user's ID + session_id: The chat session ID + graph_id: ID of the agent graph + graph_name: Name of the agent + schedule_id: ID of the schedule + schedule_name: Name of the schedule + cron: Cron expression for the schedule + library_agent_id: ID of the library agent + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "graph_id": graph_id, + "graph_name": graph_name, + "schedule_id": schedule_id, + "schedule_name": schedule_name, + "cron": cron, + "library_agent_id": library_agent_id, + } + client.capture( + distinct_id=user_id, + event="copilot_agent_scheduled", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track agent schedule: {e}") + + +def track_trigger_setup( + user_id: str, + session_id: str, + graph_id: str, + graph_name: str, + trigger_type: str, + library_agent_id: str, +) -> None: + """Track when a trigger is set up for an agent. + + Args: + user_id: The user's ID + session_id: The chat session ID + graph_id: ID of the agent graph + graph_name: Name of the agent + trigger_type: Type of trigger (e.g., 'webhook') + library_agent_id: ID of the library agent + """ + client = _get_posthog_client() + if not client: + return + + try: + properties = { + **_get_base_properties(), + "session_id": session_id, + "graph_id": graph_id, + "graph_name": graph_name, + "trigger_type": trigger_type, + "library_agent_id": library_agent_id, + } + client.capture( + distinct_id=user_id, + event="copilot_trigger_setup", + properties=properties, + ) + except Exception as e: + logger.warning(f"Failed to track trigger setup: {e}") diff --git a/autogpt_platform/backend/backend/api/features/executions/review/model.py b/autogpt_platform/backend/backend/api/features/executions/review/model.py index 74f72fe1ff..bad8b8d304 100644 --- a/autogpt_platform/backend/backend/api/features/executions/review/model.py +++ b/autogpt_platform/backend/backend/api/features/executions/review/model.py @@ -23,6 +23,7 @@ class PendingHumanReviewModel(BaseModel): id: Unique identifier for the review record user_id: ID of the user who must perform the review node_exec_id: ID of the node execution that created this review + node_id: ID of the node definition (for grouping reviews from same node) graph_exec_id: ID of the graph execution containing the node graph_id: ID of the graph template being executed graph_version: Version number of the graph template @@ -37,6 +38,10 @@ class PendingHumanReviewModel(BaseModel): """ node_exec_id: str = Field(description="Node execution ID (primary key)") + node_id: str = Field( + description="Node definition ID (for grouping)", + default="", # Temporary default for test compatibility + ) user_id: str = Field(description="User ID associated with the review") graph_exec_id: str = Field(description="Graph execution ID") graph_id: str = Field(description="Graph ID") @@ -66,7 +71,9 @@ class PendingHumanReviewModel(BaseModel): ) @classmethod - def from_db(cls, review: "PendingHumanReview") -> "PendingHumanReviewModel": + def from_db( + cls, review: "PendingHumanReview", node_id: str + ) -> "PendingHumanReviewModel": """ Convert a database model to a response model. @@ -74,9 +81,14 @@ class PendingHumanReviewModel(BaseModel): payload, instructions, and editable flag. Handles invalid data gracefully by using safe defaults. + + Args: + review: Database review object + node_id: Node definition ID (fetched from NodeExecution) """ return cls( node_exec_id=review.nodeExecId, + node_id=node_id, user_id=review.userId, graph_exec_id=review.graphExecId, graph_id=review.graphId, @@ -107,6 +119,13 @@ class ReviewItem(BaseModel): reviewed_data: SafeJsonData | None = Field( None, description="Optional edited data (ignored if approved=False)" ) + auto_approve_future: bool = Field( + default=False, + description=( + "If true and this review is approved, future executions of this same " + "block (node) will be automatically approved. This only affects approved reviews." + ), + ) @field_validator("reviewed_data") @classmethod @@ -174,6 +193,9 @@ class ReviewRequest(BaseModel): This request must include ALL pending reviews for a graph execution. Each review will be either approved (with optional data modifications) or rejected (data ignored). The execution will resume only after ALL reviews are processed. + + Each review item can individually specify whether to auto-approve future executions + of the same block via the `auto_approve_future` field on ReviewItem. """ reviews: List[ReviewItem] = Field( diff --git a/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py b/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py index c4eba0befc..d0c24f2cf8 100644 --- a/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py +++ b/autogpt_platform/backend/backend/api/features/executions/review/review_routes_test.py @@ -1,35 +1,43 @@ import datetime +from typing import AsyncGenerator -import fastapi -import fastapi.testclient +import httpx import pytest +import pytest_asyncio import pytest_mock from prisma.enums import ReviewStatus from pytest_snapshot.plugin import Snapshot -from backend.api.rest_api import handle_internal_http_error +from backend.api.rest_api import app +from backend.data.execution import ( + ExecutionContext, + ExecutionStatus, + NodeExecutionResult, +) +from backend.data.graph import GraphSettings from .model import PendingHumanReviewModel -from .routes import router # Using a fixed timestamp for reproducible tests FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc) -app = fastapi.FastAPI() -app.include_router(router, prefix="/api/review") -app.add_exception_handler(ValueError, handle_internal_http_error(400)) -client = fastapi.testclient.TestClient(app) - - -@pytest.fixture(autouse=True) -def setup_app_auth(mock_jwt_user): - """Setup auth overrides for all tests in this module""" +@pytest_asyncio.fixture(loop_scope="session") +async def client(server, mock_jwt_user) -> AsyncGenerator[httpx.AsyncClient, None]: + """Create async HTTP client with auth overrides""" from autogpt_libs.auth.jwt_utils import get_jwt_payload + # Override get_jwt_payload dependency to return our test user app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"] - yield - app.dependency_overrides.clear() + + async with httpx.AsyncClient( + transport=httpx.ASGITransport(app=app), + base_url="http://test", + ) as http_client: + yield http_client + + # Clean up overrides + app.dependency_overrides.pop(get_jwt_payload, None) @pytest.fixture @@ -37,6 +45,7 @@ def sample_pending_review(test_user_id: str) -> PendingHumanReviewModel: """Create a sample pending review for testing""" return PendingHumanReviewModel( node_exec_id="test_node_123", + node_id="test_node_def_456", user_id=test_user_id, graph_exec_id="test_graph_exec_456", graph_id="test_graph_789", @@ -54,7 +63,9 @@ def sample_pending_review(test_user_id: str) -> PendingHumanReviewModel: ) -def test_get_pending_reviews_empty( +@pytest.mark.asyncio(loop_scope="session") +async def test_get_pending_reviews_empty( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, snapshot: Snapshot, test_user_id: str, @@ -65,14 +76,16 @@ def test_get_pending_reviews_empty( ) mock_get_reviews.return_value = [] - response = client.get("/api/review/pending") + response = await client.get("/api/review/pending") assert response.status_code == 200 assert response.json() == [] mock_get_reviews.assert_called_once_with(test_user_id, 1, 25) -def test_get_pending_reviews_with_data( +@pytest.mark.asyncio(loop_scope="session") +async def test_get_pending_reviews_with_data( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, sample_pending_review: PendingHumanReviewModel, snapshot: Snapshot, @@ -84,7 +97,7 @@ def test_get_pending_reviews_with_data( ) mock_get_reviews.return_value = [sample_pending_review] - response = client.get("/api/review/pending?page=2&page_size=10") + response = await client.get("/api/review/pending?page=2&page_size=10") assert response.status_code == 200 data = response.json() @@ -94,7 +107,9 @@ def test_get_pending_reviews_with_data( mock_get_reviews.assert_called_once_with(test_user_id, 2, 10) -def test_get_pending_reviews_for_execution_success( +@pytest.mark.asyncio(loop_scope="session") +async def test_get_pending_reviews_for_execution_success( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, sample_pending_review: PendingHumanReviewModel, snapshot: Snapshot, @@ -114,7 +129,7 @@ def test_get_pending_reviews_for_execution_success( ) mock_get_reviews.return_value = [sample_pending_review] - response = client.get("/api/review/execution/test_graph_exec_456") + response = await client.get("/api/review/execution/test_graph_exec_456") assert response.status_code == 200 data = response.json() @@ -122,7 +137,9 @@ def test_get_pending_reviews_for_execution_success( assert data[0]["graph_exec_id"] == "test_graph_exec_456" -def test_get_pending_reviews_for_execution_not_available( +@pytest.mark.asyncio(loop_scope="session") +async def test_get_pending_reviews_for_execution_not_available( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, ) -> None: """Test access denied when user doesn't own the execution""" @@ -131,13 +148,15 @@ def test_get_pending_reviews_for_execution_not_available( ) mock_get_graph_execution.return_value = None - response = client.get("/api/review/execution/test_graph_exec_456") + response = await client.get("/api/review/execution/test_graph_exec_456") assert response.status_code == 404 assert "not found" in response.json()["detail"] -def test_process_review_action_approve_success( +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_approve_success( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, sample_pending_review: PendingHumanReviewModel, test_user_id: str, @@ -145,6 +164,12 @@ def test_process_review_action_approve_success( """Test successful review approval""" # Mock the route functions + # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + mock_get_reviews_for_user = mocker.patch( + "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + ) + mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} + mock_get_reviews_for_execution = mocker.patch( "backend.api.features.executions.review.routes.get_pending_reviews_for_execution" ) @@ -173,6 +198,14 @@ def test_process_review_action_approve_success( ) mock_process_all_reviews.return_value = {"test_node_123": approved_review} + # Mock get_graph_execution_meta to return execution in REVIEW status + mock_get_graph_exec = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_execution_meta" + ) + mock_graph_exec_meta = mocker.Mock() + mock_graph_exec_meta.status = ExecutionStatus.REVIEW + mock_get_graph_exec.return_value = mock_graph_exec_meta + mock_has_pending = mocker.patch( "backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec" ) @@ -191,7 +224,7 @@ def test_process_review_action_approve_success( ] } - response = client.post("/api/review/action", json=request_data) + response = await client.post("/api/review/action", json=request_data) assert response.status_code == 200 data = response.json() @@ -201,7 +234,9 @@ def test_process_review_action_approve_success( assert data["error"] is None -def test_process_review_action_reject_success( +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_reject_success( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, sample_pending_review: PendingHumanReviewModel, test_user_id: str, @@ -209,6 +244,20 @@ def test_process_review_action_reject_success( """Test successful review rejection""" # Mock the route functions + # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + mock_get_reviews_for_user = mocker.patch( + "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + ) + mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} + + # Mock get_graph_execution_meta to return execution in REVIEW status + mock_get_graph_exec = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_execution_meta" + ) + mock_graph_exec_meta = mocker.Mock() + mock_graph_exec_meta.status = ExecutionStatus.REVIEW + mock_get_graph_exec.return_value = mock_graph_exec_meta + mock_get_reviews_for_execution = mocker.patch( "backend.api.features.executions.review.routes.get_pending_reviews_for_execution" ) @@ -251,7 +300,7 @@ def test_process_review_action_reject_success( ] } - response = client.post("/api/review/action", json=request_data) + response = await client.post("/api/review/action", json=request_data) assert response.status_code == 200 data = response.json() @@ -261,7 +310,9 @@ def test_process_review_action_reject_success( assert data["error"] is None -def test_process_review_action_mixed_success( +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_mixed_success( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, sample_pending_review: PendingHumanReviewModel, test_user_id: str, @@ -288,6 +339,15 @@ def test_process_review_action_mixed_success( # Mock the route functions + # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + mock_get_reviews_for_user = mocker.patch( + "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + ) + mock_get_reviews_for_user.return_value = { + "test_node_123": sample_pending_review, + "test_node_456": second_review, + } + mock_get_reviews_for_execution = mocker.patch( "backend.api.features.executions.review.routes.get_pending_reviews_for_execution" ) @@ -337,6 +397,14 @@ def test_process_review_action_mixed_success( "test_node_456": rejected_review, } + # Mock get_graph_execution_meta to return execution in REVIEW status + mock_get_graph_exec = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_execution_meta" + ) + mock_graph_exec_meta = mocker.Mock() + mock_graph_exec_meta.status = ExecutionStatus.REVIEW + mock_get_graph_exec.return_value = mock_graph_exec_meta + mock_has_pending = mocker.patch( "backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec" ) @@ -358,7 +426,7 @@ def test_process_review_action_mixed_success( ] } - response = client.post("/api/review/action", json=request_data) + response = await client.post("/api/review/action", json=request_data) assert response.status_code == 200 data = response.json() @@ -368,14 +436,16 @@ def test_process_review_action_mixed_success( assert data["error"] is None -def test_process_review_action_empty_request( +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_empty_request( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, test_user_id: str, ) -> None: """Test error when no reviews provided""" request_data = {"reviews": []} - response = client.post("/api/review/action", json=request_data) + response = await client.post("/api/review/action", json=request_data) assert response.status_code == 422 response_data = response.json() @@ -385,11 +455,29 @@ def test_process_review_action_empty_request( assert "At least one review must be provided" in response_data["detail"][0]["msg"] -def test_process_review_action_review_not_found( +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_review_not_found( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, + sample_pending_review: PendingHumanReviewModel, test_user_id: str, ) -> None: """Test error when review is not found""" + # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + mock_get_reviews_for_user = mocker.patch( + "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + ) + # Return empty dict to simulate review not found + mock_get_reviews_for_user.return_value = {} + + # Mock get_graph_execution_meta to return execution in REVIEW status + mock_get_graph_exec = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_execution_meta" + ) + mock_graph_exec_meta = mocker.Mock() + mock_graph_exec_meta.status = ExecutionStatus.REVIEW + mock_get_graph_exec.return_value = mock_graph_exec_meta + # Mock the functions that extract graph execution ID from the request mock_get_reviews_for_execution = mocker.patch( "backend.api.features.executions.review.routes.get_pending_reviews_for_execution" @@ -415,18 +503,34 @@ def test_process_review_action_review_not_found( ] } - response = client.post("/api/review/action", json=request_data) + response = await client.post("/api/review/action", json=request_data) - assert response.status_code == 400 - assert "Reviews not found" in response.json()["detail"] + assert response.status_code == 404 + assert "No pending review found" in response.json()["detail"] -def test_process_review_action_partial_failure( +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_partial_failure( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, sample_pending_review: PendingHumanReviewModel, test_user_id: str, ) -> None: """Test handling of partial failures in review processing""" + # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + mock_get_reviews_for_user = mocker.patch( + "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + ) + mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} + + # Mock get_graph_execution_meta to return execution in REVIEW status + mock_get_graph_exec = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_execution_meta" + ) + mock_graph_exec_meta = mocker.Mock() + mock_graph_exec_meta.status = ExecutionStatus.REVIEW + mock_get_graph_exec.return_value = mock_graph_exec_meta + # Mock the route functions mock_get_reviews_for_execution = mocker.patch( "backend.api.features.executions.review.routes.get_pending_reviews_for_execution" @@ -449,31 +553,34 @@ def test_process_review_action_partial_failure( ] } - response = client.post("/api/review/action", json=request_data) + response = await client.post("/api/review/action", json=request_data) assert response.status_code == 400 assert "Some reviews failed validation" in response.json()["detail"] -def test_process_review_action_invalid_node_exec_id( +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_invalid_node_exec_id( + client: httpx.AsyncClient, mocker: pytest_mock.MockerFixture, sample_pending_review: PendingHumanReviewModel, test_user_id: str, ) -> None: """Test failure when trying to process review with invalid node execution ID""" - # Mock the route functions - mock_get_reviews_for_execution = mocker.patch( - "backend.api.features.executions.review.routes.get_pending_reviews_for_execution" + # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + mock_get_reviews_for_user = mocker.patch( + "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" ) - mock_get_reviews_for_execution.return_value = [sample_pending_review] + # Return empty dict to simulate review not found + mock_get_reviews_for_user.return_value = {} - # Mock validation failure - this should return 400, not 500 - mock_process_all_reviews = mocker.patch( - "backend.api.features.executions.review.routes.process_all_reviews_for_execution" - ) - mock_process_all_reviews.side_effect = ValueError( - "Invalid node execution ID format" + # Mock get_graph_execution_meta to return execution in REVIEW status + mock_get_graph_exec = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_execution_meta" ) + mock_graph_exec_meta = mocker.Mock() + mock_graph_exec_meta.status = ExecutionStatus.REVIEW + mock_get_graph_exec.return_value = mock_graph_exec_meta request_data = { "reviews": [ @@ -485,8 +592,638 @@ def test_process_review_action_invalid_node_exec_id( ] } - response = client.post("/api/review/action", json=request_data) + response = await client.post("/api/review/action", json=request_data) - # Should be a 400 Bad Request, not 500 Internal Server Error - assert response.status_code == 400 - assert "Invalid node execution ID format" in response.json()["detail"] + # Returns 404 when review is not found + assert response.status_code == 404 + assert "No pending review found" in response.json()["detail"] + + +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_auto_approve_creates_auto_approval_records( + client: httpx.AsyncClient, + mocker: pytest_mock.MockerFixture, + sample_pending_review: PendingHumanReviewModel, + test_user_id: str, +) -> None: + """Test that auto_approve_future_actions flag creates auto-approval records""" + # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + mock_get_reviews_for_user = mocker.patch( + "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + ) + mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} + + # Mock process_all_reviews + mock_process_all_reviews = mocker.patch( + "backend.api.features.executions.review.routes.process_all_reviews_for_execution" + ) + approved_review = PendingHumanReviewModel( + node_exec_id="test_node_123", + user_id=test_user_id, + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "test payload"}, + instructions="Please review", + editable=True, + status=ReviewStatus.APPROVED, + review_message="Approved", + was_edited=False, + processed=False, + created_at=FIXED_NOW, + updated_at=FIXED_NOW, + reviewed_at=FIXED_NOW, + ) + mock_process_all_reviews.return_value = {"test_node_123": approved_review} + + # Mock get_node_executions to return node_id mapping + mock_get_node_executions = mocker.patch( + "backend.data.execution.get_node_executions" + ) + mock_node_exec = mocker.Mock(spec=NodeExecutionResult) + mock_node_exec.node_exec_id = "test_node_123" + mock_node_exec.node_id = "test_node_def_456" + mock_get_node_executions.return_value = [mock_node_exec] + + # Mock create_auto_approval_record + mock_create_auto_approval = mocker.patch( + "backend.api.features.executions.review.routes.create_auto_approval_record" + ) + + # Mock get_graph_execution_meta to return execution in REVIEW status + mock_get_graph_exec = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_execution_meta" + ) + mock_graph_exec_meta = mocker.Mock() + mock_graph_exec_meta.status = ExecutionStatus.REVIEW + mock_get_graph_exec.return_value = mock_graph_exec_meta + + # Mock has_pending_reviews_for_graph_exec + mock_has_pending = mocker.patch( + "backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec" + ) + mock_has_pending.return_value = False + + # Mock get_graph_settings to return custom settings + mock_get_settings = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_settings" + ) + mock_get_settings.return_value = GraphSettings( + human_in_the_loop_safe_mode=True, + sensitive_action_safe_mode=True, + ) + + # Mock get_user_by_id to prevent database access + mock_get_user = mocker.patch( + "backend.api.features.executions.review.routes.get_user_by_id" + ) + mock_user = mocker.Mock() + mock_user.timezone = "UTC" + mock_get_user.return_value = mock_user + + # Mock add_graph_execution + mock_add_execution = mocker.patch( + "backend.api.features.executions.review.routes.add_graph_execution" + ) + + request_data = { + "reviews": [ + { + "node_exec_id": "test_node_123", + "approved": True, + "message": "Approved", + "auto_approve_future": True, + } + ], + } + + response = await client.post("/api/review/action", json=request_data) + + assert response.status_code == 200 + + # Verify process_all_reviews_for_execution was called (without auto_approve param) + mock_process_all_reviews.assert_called_once() + + # Verify create_auto_approval_record was called for the approved review + mock_create_auto_approval.assert_called_once_with( + user_id=test_user_id, + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + node_id="test_node_def_456", + payload={"data": "test payload"}, + ) + + # Verify get_graph_settings was called with correct parameters + mock_get_settings.assert_called_once_with( + user_id=test_user_id, graph_id="test_graph_789" + ) + + # Verify add_graph_execution was called with proper ExecutionContext + mock_add_execution.assert_called_once() + call_kwargs = mock_add_execution.call_args.kwargs + execution_context = call_kwargs["execution_context"] + + assert isinstance(execution_context, ExecutionContext) + assert execution_context.human_in_the_loop_safe_mode is True + assert execution_context.sensitive_action_safe_mode is True + + +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_without_auto_approve_still_loads_settings( + client: httpx.AsyncClient, + mocker: pytest_mock.MockerFixture, + sample_pending_review: PendingHumanReviewModel, + test_user_id: str, +) -> None: + """Test that execution context is created with settings even without auto-approve""" + # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + mock_get_reviews_for_user = mocker.patch( + "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + ) + mock_get_reviews_for_user.return_value = {"test_node_123": sample_pending_review} + + # Mock process_all_reviews + mock_process_all_reviews = mocker.patch( + "backend.api.features.executions.review.routes.process_all_reviews_for_execution" + ) + approved_review = PendingHumanReviewModel( + node_exec_id="test_node_123", + user_id=test_user_id, + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "test payload"}, + instructions="Please review", + editable=True, + status=ReviewStatus.APPROVED, + review_message="Approved", + was_edited=False, + processed=False, + created_at=FIXED_NOW, + updated_at=FIXED_NOW, + reviewed_at=FIXED_NOW, + ) + mock_process_all_reviews.return_value = {"test_node_123": approved_review} + + # Mock create_auto_approval_record - should NOT be called when auto_approve is False + mock_create_auto_approval = mocker.patch( + "backend.api.features.executions.review.routes.create_auto_approval_record" + ) + + # Mock get_graph_execution_meta to return execution in REVIEW status + mock_get_graph_exec = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_execution_meta" + ) + mock_graph_exec_meta = mocker.Mock() + mock_graph_exec_meta.status = ExecutionStatus.REVIEW + mock_get_graph_exec.return_value = mock_graph_exec_meta + + # Mock has_pending_reviews_for_graph_exec + mock_has_pending = mocker.patch( + "backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec" + ) + mock_has_pending.return_value = False + + # Mock get_graph_settings with sensitive_action_safe_mode enabled + mock_get_settings = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_settings" + ) + mock_get_settings.return_value = GraphSettings( + human_in_the_loop_safe_mode=False, + sensitive_action_safe_mode=True, + ) + + # Mock get_user_by_id to prevent database access + mock_get_user = mocker.patch( + "backend.api.features.executions.review.routes.get_user_by_id" + ) + mock_user = mocker.Mock() + mock_user.timezone = "UTC" + mock_get_user.return_value = mock_user + + # Mock add_graph_execution + mock_add_execution = mocker.patch( + "backend.api.features.executions.review.routes.add_graph_execution" + ) + + # Request WITHOUT auto_approve_future (defaults to False) + request_data = { + "reviews": [ + { + "node_exec_id": "test_node_123", + "approved": True, + "message": "Approved", + # auto_approve_future defaults to False + } + ], + } + + response = await client.post("/api/review/action", json=request_data) + + assert response.status_code == 200 + + # Verify process_all_reviews_for_execution was called + mock_process_all_reviews.assert_called_once() + + # Verify create_auto_approval_record was NOT called (auto_approve_future=False) + mock_create_auto_approval.assert_not_called() + + # Verify settings were loaded + mock_get_settings.assert_called_once() + + # Verify ExecutionContext has proper settings + mock_add_execution.assert_called_once() + call_kwargs = mock_add_execution.call_args.kwargs + execution_context = call_kwargs["execution_context"] + + assert isinstance(execution_context, ExecutionContext) + assert execution_context.human_in_the_loop_safe_mode is False + assert execution_context.sensitive_action_safe_mode is True + + +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_auto_approve_only_applies_to_approved_reviews( + client: httpx.AsyncClient, + mocker: pytest_mock.MockerFixture, + test_user_id: str, +) -> None: + """Test that auto_approve record is created only for approved reviews""" + # Create two reviews - one approved, one rejected + approved_review = PendingHumanReviewModel( + node_exec_id="node_exec_approved", + user_id=test_user_id, + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "approved"}, + instructions="Review", + editable=True, + status=ReviewStatus.APPROVED, + review_message=None, + was_edited=False, + processed=False, + created_at=FIXED_NOW, + updated_at=FIXED_NOW, + reviewed_at=FIXED_NOW, + ) + rejected_review = PendingHumanReviewModel( + node_exec_id="node_exec_rejected", + user_id=test_user_id, + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "rejected"}, + instructions="Review", + editable=True, + status=ReviewStatus.REJECTED, + review_message="Rejected", + was_edited=False, + processed=False, + created_at=FIXED_NOW, + updated_at=FIXED_NOW, + reviewed_at=FIXED_NOW, + ) + + # Mock get_pending_reviews_by_node_exec_ids (called to find the graph_exec_id) + mock_get_reviews_for_user = mocker.patch( + "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + ) + # Need to return both reviews in WAITING state (before processing) + approved_review_waiting = PendingHumanReviewModel( + node_exec_id="node_exec_approved", + user_id=test_user_id, + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "approved"}, + instructions="Review", + editable=True, + status=ReviewStatus.WAITING, + review_message=None, + was_edited=False, + processed=False, + created_at=FIXED_NOW, + ) + rejected_review_waiting = PendingHumanReviewModel( + node_exec_id="node_exec_rejected", + user_id=test_user_id, + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "rejected"}, + instructions="Review", + editable=True, + status=ReviewStatus.WAITING, + review_message=None, + was_edited=False, + processed=False, + created_at=FIXED_NOW, + ) + mock_get_reviews_for_user.return_value = { + "node_exec_approved": approved_review_waiting, + "node_exec_rejected": rejected_review_waiting, + } + + # Mock process_all_reviews + mock_process_all_reviews = mocker.patch( + "backend.api.features.executions.review.routes.process_all_reviews_for_execution" + ) + mock_process_all_reviews.return_value = { + "node_exec_approved": approved_review, + "node_exec_rejected": rejected_review, + } + + # Mock get_node_executions to return node_id mapping + mock_get_node_executions = mocker.patch( + "backend.data.execution.get_node_executions" + ) + mock_node_exec = mocker.Mock(spec=NodeExecutionResult) + mock_node_exec.node_exec_id = "node_exec_approved" + mock_node_exec.node_id = "test_node_def_approved" + mock_get_node_executions.return_value = [mock_node_exec] + + # Mock create_auto_approval_record + mock_create_auto_approval = mocker.patch( + "backend.api.features.executions.review.routes.create_auto_approval_record" + ) + + # Mock get_graph_execution_meta to return execution in REVIEW status + mock_get_graph_exec = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_execution_meta" + ) + mock_graph_exec_meta = mocker.Mock() + mock_graph_exec_meta.status = ExecutionStatus.REVIEW + mock_get_graph_exec.return_value = mock_graph_exec_meta + + # Mock has_pending_reviews_for_graph_exec + mock_has_pending = mocker.patch( + "backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec" + ) + mock_has_pending.return_value = False + + # Mock get_graph_settings + mock_get_settings = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_settings" + ) + mock_get_settings.return_value = GraphSettings() + + # Mock get_user_by_id to prevent database access + mock_get_user = mocker.patch( + "backend.api.features.executions.review.routes.get_user_by_id" + ) + mock_user = mocker.Mock() + mock_user.timezone = "UTC" + mock_get_user.return_value = mock_user + + # Mock add_graph_execution + mock_add_execution = mocker.patch( + "backend.api.features.executions.review.routes.add_graph_execution" + ) + + request_data = { + "reviews": [ + { + "node_exec_id": "node_exec_approved", + "approved": True, + "auto_approve_future": True, + }, + { + "node_exec_id": "node_exec_rejected", + "approved": False, + "auto_approve_future": True, # Should be ignored since rejected + }, + ], + } + + response = await client.post("/api/review/action", json=request_data) + + assert response.status_code == 200 + + # Verify process_all_reviews_for_execution was called + mock_process_all_reviews.assert_called_once() + + # Verify create_auto_approval_record was called ONLY for the approved review + # (not for the rejected one) + mock_create_auto_approval.assert_called_once_with( + user_id=test_user_id, + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + node_id="test_node_def_approved", + payload={"data": "approved"}, + ) + + # Verify get_node_executions was called to batch-fetch node data + mock_get_node_executions.assert_called_once() + + # Verify ExecutionContext was created (auto-approval is now DB-based) + call_kwargs = mock_add_execution.call_args.kwargs + execution_context = call_kwargs["execution_context"] + assert isinstance(execution_context, ExecutionContext) + + +@pytest.mark.asyncio(loop_scope="session") +async def test_process_review_action_per_review_auto_approve_granularity( + client: httpx.AsyncClient, + mocker: pytest_mock.MockerFixture, + sample_pending_review: PendingHumanReviewModel, + test_user_id: str, +) -> None: + """Test that auto-approval can be set per-review (granular control)""" + # Mock get_pending_reviews_by_node_exec_ids - return different reviews based on node_exec_id + mock_get_reviews_for_user = mocker.patch( + "backend.api.features.executions.review.routes.get_pending_reviews_by_node_exec_ids" + ) + + # Create a mapping of node_exec_id to review + review_map = { + "node_1_auto": PendingHumanReviewModel( + node_exec_id="node_1_auto", + user_id=test_user_id, + graph_exec_id="test_graph_exec", + graph_id="test_graph", + graph_version=1, + payload={"data": "node1"}, + instructions="Review 1", + editable=True, + status=ReviewStatus.WAITING, + review_message=None, + was_edited=False, + processed=False, + created_at=FIXED_NOW, + ), + "node_2_manual": PendingHumanReviewModel( + node_exec_id="node_2_manual", + user_id=test_user_id, + graph_exec_id="test_graph_exec", + graph_id="test_graph", + graph_version=1, + payload={"data": "node2"}, + instructions="Review 2", + editable=True, + status=ReviewStatus.WAITING, + review_message=None, + was_edited=False, + processed=False, + created_at=FIXED_NOW, + ), + "node_3_auto": PendingHumanReviewModel( + node_exec_id="node_3_auto", + user_id=test_user_id, + graph_exec_id="test_graph_exec", + graph_id="test_graph", + graph_version=1, + payload={"data": "node3"}, + instructions="Review 3", + editable=True, + status=ReviewStatus.WAITING, + review_message=None, + was_edited=False, + processed=False, + created_at=FIXED_NOW, + ), + } + + # Return the review map dict (batch function returns all requested reviews) + mock_get_reviews_for_user.return_value = review_map + + # Mock process_all_reviews - return 3 approved reviews + mock_process_all_reviews = mocker.patch( + "backend.api.features.executions.review.routes.process_all_reviews_for_execution" + ) + mock_process_all_reviews.return_value = { + "node_1_auto": PendingHumanReviewModel( + node_exec_id="node_1_auto", + user_id=test_user_id, + graph_exec_id="test_graph_exec", + graph_id="test_graph", + graph_version=1, + payload={"data": "node1"}, + instructions="Review 1", + editable=True, + status=ReviewStatus.APPROVED, + review_message=None, + was_edited=False, + processed=False, + created_at=FIXED_NOW, + updated_at=FIXED_NOW, + reviewed_at=FIXED_NOW, + ), + "node_2_manual": PendingHumanReviewModel( + node_exec_id="node_2_manual", + user_id=test_user_id, + graph_exec_id="test_graph_exec", + graph_id="test_graph", + graph_version=1, + payload={"data": "node2"}, + instructions="Review 2", + editable=True, + status=ReviewStatus.APPROVED, + review_message=None, + was_edited=False, + processed=False, + created_at=FIXED_NOW, + updated_at=FIXED_NOW, + reviewed_at=FIXED_NOW, + ), + "node_3_auto": PendingHumanReviewModel( + node_exec_id="node_3_auto", + user_id=test_user_id, + graph_exec_id="test_graph_exec", + graph_id="test_graph", + graph_version=1, + payload={"data": "node3"}, + instructions="Review 3", + editable=True, + status=ReviewStatus.APPROVED, + review_message=None, + was_edited=False, + processed=False, + created_at=FIXED_NOW, + updated_at=FIXED_NOW, + reviewed_at=FIXED_NOW, + ), + } + + # Mock get_node_executions to return batch node data + mock_get_node_executions = mocker.patch( + "backend.data.execution.get_node_executions" + ) + # Create mock node executions for each review + mock_node_execs = [] + for node_exec_id in ["node_1_auto", "node_2_manual", "node_3_auto"]: + mock_node = mocker.Mock(spec=NodeExecutionResult) + mock_node.node_exec_id = node_exec_id + mock_node.node_id = f"node_def_{node_exec_id}" + mock_node_execs.append(mock_node) + mock_get_node_executions.return_value = mock_node_execs + + # Mock create_auto_approval_record + mock_create_auto_approval = mocker.patch( + "backend.api.features.executions.review.routes.create_auto_approval_record" + ) + + # Mock get_graph_execution_meta + mock_get_graph_exec = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_execution_meta" + ) + mock_graph_exec_meta = mocker.Mock() + mock_graph_exec_meta.status = ExecutionStatus.REVIEW + mock_get_graph_exec.return_value = mock_graph_exec_meta + + # Mock has_pending_reviews_for_graph_exec + mock_has_pending = mocker.patch( + "backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec" + ) + mock_has_pending.return_value = False + + # Mock settings and execution + mock_get_settings = mocker.patch( + "backend.api.features.executions.review.routes.get_graph_settings" + ) + mock_get_settings.return_value = GraphSettings( + human_in_the_loop_safe_mode=False, sensitive_action_safe_mode=False + ) + + mocker.patch("backend.api.features.executions.review.routes.add_graph_execution") + mocker.patch("backend.api.features.executions.review.routes.get_user_by_id") + + # Request with granular auto-approval: + # - node_1_auto: auto_approve_future=True + # - node_2_manual: auto_approve_future=False (explicit) + # - node_3_auto: auto_approve_future=True + request_data = { + "reviews": [ + { + "node_exec_id": "node_1_auto", + "approved": True, + "auto_approve_future": True, + }, + { + "node_exec_id": "node_2_manual", + "approved": True, + "auto_approve_future": False, # Don't auto-approve this one + }, + { + "node_exec_id": "node_3_auto", + "approved": True, + "auto_approve_future": True, + }, + ], + } + + response = await client.post("/api/review/action", json=request_data) + + assert response.status_code == 200 + + # Verify create_auto_approval_record was called ONLY for reviews with auto_approve_future=True + assert mock_create_auto_approval.call_count == 2 + + # Check that it was called for node_1 and node_3, but NOT node_2 + call_args_list = [call.kwargs for call in mock_create_auto_approval.call_args_list] + node_ids_with_auto_approval = [args["node_id"] for args in call_args_list] + + assert "node_def_node_1_auto" in node_ids_with_auto_approval + assert "node_def_node_3_auto" in node_ids_with_auto_approval + assert "node_def_node_2_manual" not in node_ids_with_auto_approval diff --git a/autogpt_platform/backend/backend/api/features/executions/review/routes.py b/autogpt_platform/backend/backend/api/features/executions/review/routes.py index 88646046da..a10071e9cb 100644 --- a/autogpt_platform/backend/backend/api/features/executions/review/routes.py +++ b/autogpt_platform/backend/backend/api/features/executions/review/routes.py @@ -1,17 +1,27 @@ +import asyncio import logging -from typing import List +from typing import Any, List import autogpt_libs.auth as autogpt_auth_lib from fastapi import APIRouter, HTTPException, Query, Security, status from prisma.enums import ReviewStatus -from backend.data.execution import get_graph_execution_meta +from backend.data.execution import ( + ExecutionContext, + ExecutionStatus, + get_graph_execution_meta, +) +from backend.data.graph import get_graph_settings from backend.data.human_review import ( + create_auto_approval_record, + get_pending_reviews_by_node_exec_ids, get_pending_reviews_for_execution, get_pending_reviews_for_user, has_pending_reviews_for_graph_exec, process_all_reviews_for_execution, ) +from backend.data.model import USER_TIMEZONE_NOT_SET +from backend.data.user import get_user_by_id from backend.executor.utils import add_graph_execution from .model import PendingHumanReviewModel, ReviewRequest, ReviewResponse @@ -127,17 +137,70 @@ async def process_review_action( detail="At least one review must be provided", ) - # Build review decisions map + # Batch fetch all requested reviews + reviews_map = await get_pending_reviews_by_node_exec_ids( + list(all_request_node_ids), user_id + ) + + # Validate all reviews were found + missing_ids = all_request_node_ids - set(reviews_map.keys()) + if missing_ids: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"No pending review found for node execution(s): {', '.join(missing_ids)}", + ) + + # Validate all reviews belong to the same execution + graph_exec_ids = {review.graph_exec_id for review in reviews_map.values()} + if len(graph_exec_ids) > 1: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail="All reviews in a single request must belong to the same execution.", + ) + + graph_exec_id = next(iter(graph_exec_ids)) + + # Validate execution status before processing reviews + graph_exec_meta = await get_graph_execution_meta( + user_id=user_id, execution_id=graph_exec_id + ) + + if not graph_exec_meta: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Graph execution #{graph_exec_id} not found", + ) + + # Only allow processing reviews if execution is paused for review + # or incomplete (partial execution with some reviews already processed) + if graph_exec_meta.status not in ( + ExecutionStatus.REVIEW, + ExecutionStatus.INCOMPLETE, + ): + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Cannot process reviews while execution status is {graph_exec_meta.status}. " + f"Reviews can only be processed when execution is paused (REVIEW status). " + f"Current status: {graph_exec_meta.status}", + ) + + # Build review decisions map and track which reviews requested auto-approval + # Auto-approved reviews use original data (no modifications allowed) review_decisions = {} + auto_approve_requests = {} # Map node_exec_id -> auto_approve_future flag + for review in request.reviews: review_status = ( ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED ) + # If this review requested auto-approval, don't allow data modifications + reviewed_data = None if review.auto_approve_future else review.reviewed_data review_decisions[review.node_exec_id] = ( review_status, - review.reviewed_data, + reviewed_data, review.message, ) + auto_approve_requests[review.node_exec_id] = review.auto_approve_future # Process all reviews updated_reviews = await process_all_reviews_for_execution( @@ -145,6 +208,87 @@ async def process_review_action( review_decisions=review_decisions, ) + # Create auto-approval records for approved reviews that requested it + # Deduplicate by node_id to avoid race conditions when multiple reviews + # for the same node are processed in parallel + async def create_auto_approval_for_node( + node_id: str, review_result + ) -> tuple[str, bool]: + """ + Create auto-approval record for a node. + Returns (node_id, success) tuple for tracking failures. + """ + try: + await create_auto_approval_record( + user_id=user_id, + graph_exec_id=review_result.graph_exec_id, + graph_id=review_result.graph_id, + graph_version=review_result.graph_version, + node_id=node_id, + payload=review_result.payload, + ) + return (node_id, True) + except Exception as e: + logger.error( + f"Failed to create auto-approval record for node {node_id}", + exc_info=e, + ) + return (node_id, False) + + # Collect node_exec_ids that need auto-approval + node_exec_ids_needing_auto_approval = [ + node_exec_id + for node_exec_id, review_result in updated_reviews.items() + if review_result.status == ReviewStatus.APPROVED + and auto_approve_requests.get(node_exec_id, False) + ] + + # Batch-fetch node executions to get node_ids + nodes_needing_auto_approval: dict[str, Any] = {} + if node_exec_ids_needing_auto_approval: + from backend.data.execution import get_node_executions + + node_execs = await get_node_executions( + graph_exec_id=graph_exec_id, include_exec_data=False + ) + node_exec_map = {node_exec.node_exec_id: node_exec for node_exec in node_execs} + + for node_exec_id in node_exec_ids_needing_auto_approval: + node_exec = node_exec_map.get(node_exec_id) + if node_exec: + review_result = updated_reviews[node_exec_id] + # Use the first approved review for this node (deduplicate by node_id) + if node_exec.node_id not in nodes_needing_auto_approval: + nodes_needing_auto_approval[node_exec.node_id] = review_result + else: + logger.error( + f"Failed to create auto-approval record for {node_exec_id}: " + f"Node execution not found. This may indicate a race condition " + f"or data inconsistency." + ) + + # Execute all auto-approval creations in parallel (deduplicated by node_id) + auto_approval_results = await asyncio.gather( + *[ + create_auto_approval_for_node(node_id, review_result) + for node_id, review_result in nodes_needing_auto_approval.items() + ], + return_exceptions=True, + ) + + # Count auto-approval failures + auto_approval_failed_count = 0 + for result in auto_approval_results: + if isinstance(result, Exception): + # Unexpected exception during auto-approval creation + auto_approval_failed_count += 1 + logger.error( + f"Unexpected exception during auto-approval creation: {result}" + ) + elif isinstance(result, tuple) and len(result) == 2 and not result[1]: + # Auto-approval creation failed (returned False) + auto_approval_failed_count += 1 + # Count results approved_count = sum( 1 @@ -157,30 +301,53 @@ async def process_review_action( if review.status == ReviewStatus.REJECTED ) - # Resume execution if we processed some reviews + # Resume execution only if ALL pending reviews for this execution have been processed if updated_reviews: - # Get graph execution ID from any processed review - first_review = next(iter(updated_reviews.values())) - graph_exec_id = first_review.graph_exec_id - - # Check if any pending reviews remain for this execution still_has_pending = await has_pending_reviews_for_graph_exec(graph_exec_id) if not still_has_pending: - # Resume execution + # Get the graph_id from any processed review + first_review = next(iter(updated_reviews.values())) + try: + # Fetch user and settings to build complete execution context + user = await get_user_by_id(user_id) + settings = await get_graph_settings( + user_id=user_id, graph_id=first_review.graph_id + ) + + # Preserve user's timezone preference when resuming execution + user_timezone = ( + user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC" + ) + + execution_context = ExecutionContext( + human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode, + sensitive_action_safe_mode=settings.sensitive_action_safe_mode, + user_timezone=user_timezone, + ) + await add_graph_execution( graph_id=first_review.graph_id, user_id=user_id, graph_exec_id=graph_exec_id, + execution_context=execution_context, ) logger.info(f"Resumed execution {graph_exec_id}") except Exception as e: logger.error(f"Failed to resume execution {graph_exec_id}: {str(e)}") + # Build error message if auto-approvals failed + error_message = None + if auto_approval_failed_count > 0: + error_message = ( + f"{auto_approval_failed_count} auto-approval setting(s) could not be saved. " + f"You may need to manually approve these reviews in future executions." + ) + return ReviewResponse( approved_count=approved_count, rejected_count=rejected_count, - failed_count=0, - error=None, + failed_count=auto_approval_failed_count, + error=error_message, ) diff --git a/autogpt_platform/backend/backend/api/features/library/db.py b/autogpt_platform/backend/backend/api/features/library/db.py index 1c17e7b36c..18d535d896 100644 --- a/autogpt_platform/backend/backend/api/features/library/db.py +++ b/autogpt_platform/backend/backend/api/features/library/db.py @@ -401,27 +401,11 @@ async def add_generated_agent_image( ) -def _initialize_graph_settings(graph: graph_db.GraphModel) -> GraphSettings: - """ - Initialize GraphSettings based on graph content. - - Args: - graph: The graph to analyze - - Returns: - GraphSettings with appropriate human_in_the_loop_safe_mode value - """ - if graph.has_human_in_the_loop: - # Graph has HITL blocks - set safe mode to True by default - return GraphSettings(human_in_the_loop_safe_mode=True) - else: - # Graph has no HITL blocks - keep None - return GraphSettings(human_in_the_loop_safe_mode=None) - - async def create_library_agent( graph: graph_db.GraphModel, user_id: str, + hitl_safe_mode: bool = True, + sensitive_action_safe_mode: bool = False, create_library_agents_for_sub_graphs: bool = True, ) -> list[library_model.LibraryAgent]: """ @@ -430,6 +414,8 @@ async def create_library_agent( Args: agent: The agent/Graph to add to the library. user_id: The user to whom the agent will be added. + hitl_safe_mode: Whether HITL blocks require manual review (default True). + sensitive_action_safe_mode: Whether sensitive action blocks require review. create_library_agents_for_sub_graphs: If True, creates LibraryAgent records for sub-graphs as well. Returns: @@ -465,7 +451,11 @@ async def create_library_agent( } }, settings=SafeJson( - _initialize_graph_settings(graph_entry).model_dump() + GraphSettings.from_graph( + graph_entry, + hitl_safe_mode=hitl_safe_mode, + sensitive_action_safe_mode=sensitive_action_safe_mode, + ).model_dump() ), ), include=library_agent_include( @@ -593,7 +583,13 @@ async def update_library_agent( ) update_fields["isDeleted"] = is_deleted if settings is not None: - update_fields["settings"] = SafeJson(settings.model_dump()) + existing_agent = await get_library_agent(id=library_agent_id, user_id=user_id) + current_settings_dict = ( + existing_agent.settings.model_dump() if existing_agent.settings else {} + ) + new_settings = settings.model_dump(exclude_unset=True) + merged_settings = {**current_settings_dict, **new_settings} + update_fields["settings"] = SafeJson(merged_settings) try: # If graph_version is provided, update to that specific version @@ -627,33 +623,6 @@ async def update_library_agent( raise DatabaseError("Failed to update library agent") from e -async def update_library_agent_settings( - user_id: str, - agent_id: str, - settings: GraphSettings, -) -> library_model.LibraryAgent: - """ - Updates the settings for a specific LibraryAgent. - - Args: - user_id: The owner of the LibraryAgent. - agent_id: The ID of the LibraryAgent to update. - settings: New GraphSettings to apply. - - Returns: - The updated LibraryAgent. - - Raises: - NotFoundError: If the specified LibraryAgent does not exist. - DatabaseError: If there's an error in the update operation. - """ - return await update_library_agent( - library_agent_id=agent_id, - user_id=user_id, - settings=settings, - ) - - async def delete_library_agent( library_agent_id: str, user_id: str, soft_delete: bool = True ) -> None: @@ -838,7 +807,7 @@ async def add_store_agent_to_library( "isCreatedByUser": False, "useGraphIsActiveVersion": False, "settings": SafeJson( - _initialize_graph_settings(graph_model).model_dump() + GraphSettings.from_graph(graph_model).model_dump() ), }, include=library_agent_include( @@ -1228,8 +1197,15 @@ async def fork_library_agent( ) new_graph = await on_graph_activate(new_graph, user_id=user_id) - # Create a library agent for the new graph - return (await create_library_agent(new_graph, user_id))[0] + # Create a library agent for the new graph, preserving safe mode settings + return ( + await create_library_agent( + new_graph, + user_id, + hitl_safe_mode=original_agent.settings.human_in_the_loop_safe_mode, + sensitive_action_safe_mode=original_agent.settings.sensitive_action_safe_mode, + ) + )[0] except prisma.errors.PrismaError as e: logger.error(f"Database error cloning library agent: {e}") raise DatabaseError("Failed to fork library agent") from e diff --git a/autogpt_platform/backend/backend/api/features/library/model.py b/autogpt_platform/backend/backend/api/features/library/model.py index 56fad7bfd3..14d7c7be81 100644 --- a/autogpt_platform/backend/backend/api/features/library/model.py +++ b/autogpt_platform/backend/backend/api/features/library/model.py @@ -73,6 +73,12 @@ class LibraryAgent(pydantic.BaseModel): has_external_trigger: bool = pydantic.Field( description="Whether the agent has an external trigger (e.g. webhook) node" ) + has_human_in_the_loop: bool = pydantic.Field( + description="Whether the agent has human-in-the-loop blocks" + ) + has_sensitive_action: bool = pydantic.Field( + description="Whether the agent has sensitive action blocks" + ) trigger_setup_info: Optional[GraphTriggerInfo] = None # Indicates whether there's a new output (based on recent runs) @@ -180,6 +186,8 @@ class LibraryAgent(pydantic.BaseModel): graph.credentials_input_schema if sub_graphs is not None else None ), has_external_trigger=graph.has_external_trigger, + has_human_in_the_loop=graph.has_human_in_the_loop, + has_sensitive_action=graph.has_sensitive_action, trigger_setup_info=graph.trigger_setup_info, new_output=new_output, can_access_graph=can_access_graph, diff --git a/autogpt_platform/backend/backend/api/features/library/routes_test.py b/autogpt_platform/backend/backend/api/features/library/routes_test.py index 0f05240a7f..ca604af760 100644 --- a/autogpt_platform/backend/backend/api/features/library/routes_test.py +++ b/autogpt_platform/backend/backend/api/features/library/routes_test.py @@ -52,6 +52,8 @@ async def test_get_library_agents_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, recommended_schedule_cron=None, new_output=False, @@ -75,6 +77,8 @@ async def test_get_library_agents_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, recommended_schedule_cron=None, new_output=False, @@ -150,6 +154,8 @@ async def test_get_favorite_library_agents_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, recommended_schedule_cron=None, new_output=False, @@ -218,6 +224,8 @@ def test_add_agent_to_library_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, new_output=False, can_access_graph=True, diff --git a/autogpt_platform/backend/backend/api/features/oauth_test.py b/autogpt_platform/backend/backend/api/features/oauth_test.py index 5f6b85a88a..5fd35f82e7 100644 --- a/autogpt_platform/backend/backend/api/features/oauth_test.py +++ b/autogpt_platform/backend/backend/api/features/oauth_test.py @@ -20,6 +20,7 @@ from typing import AsyncGenerator import httpx import pytest +import pytest_asyncio from autogpt_libs.api_key.keysmith import APIKeySmith from prisma.enums import APIKeyPermission from prisma.models import OAuthAccessToken as PrismaOAuthAccessToken @@ -38,13 +39,13 @@ keysmith = APIKeySmith() # ============================================================================ -@pytest.fixture +@pytest.fixture(scope="session") def test_user_id() -> str: """Test user ID for OAuth tests.""" return str(uuid.uuid4()) -@pytest.fixture +@pytest_asyncio.fixture(scope="session", loop_scope="session") async def test_user(server, test_user_id: str): """Create a test user in the database.""" await PrismaUser.prisma().create( @@ -67,7 +68,7 @@ async def test_user(server, test_user_id: str): await PrismaUser.prisma().delete(where={"id": test_user_id}) -@pytest.fixture +@pytest_asyncio.fixture async def test_oauth_app(test_user: str): """Create a test OAuth application in the database.""" app_id = str(uuid.uuid4()) @@ -122,7 +123,7 @@ def pkce_credentials() -> tuple[str, str]: return generate_pkce() -@pytest.fixture +@pytest_asyncio.fixture async def client(server, test_user: str) -> AsyncGenerator[httpx.AsyncClient, None]: """ Create an async HTTP client that talks directly to the FastAPI app. @@ -287,7 +288,7 @@ async def test_authorize_invalid_client_returns_error( assert query_params["error"][0] == "invalid_client" -@pytest.fixture +@pytest_asyncio.fixture async def inactive_oauth_app(test_user: str): """Create an inactive test OAuth application in the database.""" app_id = str(uuid.uuid4()) @@ -1004,7 +1005,7 @@ async def test_token_refresh_revoked( assert "revoked" in response.json()["detail"].lower() -@pytest.fixture +@pytest_asyncio.fixture async def other_oauth_app(test_user: str): """Create a second OAuth application for cross-app tests.""" app_id = str(uuid.uuid4()) diff --git a/autogpt_platform/backend/backend/api/features/store/db.py b/autogpt_platform/backend/backend/api/features/store/db.py index e6aa3853f6..956fdfa7da 100644 --- a/autogpt_platform/backend/backend/api/features/store/db.py +++ b/autogpt_platform/backend/backend/api/features/store/db.py @@ -1552,7 +1552,7 @@ async def review_store_submission( # Generate embedding for approved listing (blocking - admin operation) # Inside transaction: if embedding fails, entire transaction rolls back - embedding_success = await ensure_embedding( + await ensure_embedding( version_id=store_listing_version_id, name=store_listing_version.name, description=store_listing_version.description, @@ -1560,12 +1560,6 @@ async def review_store_submission( categories=store_listing_version.categories or [], tx=tx, ) - if not embedding_success: - raise ValueError( - f"Failed to generate embedding for listing {store_listing_version_id}. " - "This is likely due to OpenAI API being unavailable. " - "Please try again later or contact support if the issue persists." - ) await prisma.models.StoreListing.prisma(tx).update( where={"id": store_listing_version.StoreListing.id}, diff --git a/autogpt_platform/backend/backend/api/features/store/embeddings.py b/autogpt_platform/backend/backend/api/features/store/embeddings.py index a421d8cd93..79a9a4e219 100644 --- a/autogpt_platform/backend/backend/api/features/store/embeddings.py +++ b/autogpt_platform/backend/backend/api/features/store/embeddings.py @@ -21,7 +21,6 @@ from backend.util.json import dumps logger = logging.getLogger(__name__) - # OpenAI embedding model configuration EMBEDDING_MODEL = "text-embedding-3-small" # Embedding dimension for the model above @@ -63,49 +62,42 @@ def build_searchable_text( return " ".join(parts) -async def generate_embedding(text: str) -> list[float] | None: +async def generate_embedding(text: str) -> list[float]: """ Generate embedding for text using OpenAI API. - Returns None if embedding generation fails. - Fail-fast: no retries to maintain consistency with approval flow. + Raises exceptions on failure - caller should handle. """ - try: - client = get_openai_client() - if not client: - logger.error("openai_internal_api_key not set, cannot generate embedding") - return None + client = get_openai_client() + if not client: + raise RuntimeError("openai_internal_api_key not set, cannot generate embedding") - # Truncate text to token limit using tiktoken - # Character-based truncation is insufficient because token ratios vary by content type - enc = encoding_for_model(EMBEDDING_MODEL) - tokens = enc.encode(text) - if len(tokens) > EMBEDDING_MAX_TOKENS: - tokens = tokens[:EMBEDDING_MAX_TOKENS] - truncated_text = enc.decode(tokens) - logger.info( - f"Truncated text from {len(enc.encode(text))} to {len(tokens)} tokens" - ) - else: - truncated_text = text - - start_time = time.time() - response = await client.embeddings.create( - model=EMBEDDING_MODEL, - input=truncated_text, - ) - latency_ms = (time.time() - start_time) * 1000 - - embedding = response.data[0].embedding + # Truncate text to token limit using tiktoken + # Character-based truncation is insufficient because token ratios vary by content type + enc = encoding_for_model(EMBEDDING_MODEL) + tokens = enc.encode(text) + if len(tokens) > EMBEDDING_MAX_TOKENS: + tokens = tokens[:EMBEDDING_MAX_TOKENS] + truncated_text = enc.decode(tokens) logger.info( - f"Generated embedding: {len(embedding)} dims, " - f"{len(tokens)} tokens, {latency_ms:.0f}ms" + f"Truncated text from {len(enc.encode(text))} to {len(tokens)} tokens" ) - return embedding + else: + truncated_text = text - except Exception as e: - logger.error(f"Failed to generate embedding: {e}") - return None + start_time = time.time() + response = await client.embeddings.create( + model=EMBEDDING_MODEL, + input=truncated_text, + ) + latency_ms = (time.time() - start_time) * 1000 + + embedding = response.data[0].embedding + logger.info( + f"Generated embedding: {len(embedding)} dims, " + f"{len(tokens)} tokens, {latency_ms:.0f}ms" + ) + return embedding async def store_embedding( @@ -144,48 +136,45 @@ async def store_content_embedding( New function for unified content embedding storage. Uses raw SQL since Prisma doesn't natively support pgvector. + + Raises exceptions on failure - caller should handle. """ - try: - client = tx if tx else prisma.get_client() + client = tx if tx else prisma.get_client() - # Convert embedding to PostgreSQL vector format - embedding_str = embedding_to_vector_string(embedding) - metadata_json = dumps(metadata or {}) + # Convert embedding to PostgreSQL vector format + embedding_str = embedding_to_vector_string(embedding) + metadata_json = dumps(metadata or {}) - # Upsert the embedding - # WHERE clause in DO UPDATE prevents PostgreSQL 15 bug with NULLS NOT DISTINCT - await execute_raw_with_schema( - """ - INSERT INTO {schema_prefix}"UnifiedContentEmbedding" ( - "id", "contentType", "contentId", "userId", "embedding", "searchableText", "metadata", "createdAt", "updatedAt" - ) - VALUES (gen_random_uuid()::text, $1::{schema_prefix}"ContentType", $2, $3, $4::vector, $5, $6::jsonb, NOW(), NOW()) - ON CONFLICT ("contentType", "contentId", "userId") - DO UPDATE SET - "embedding" = $4::vector, - "searchableText" = $5, - "metadata" = $6::jsonb, - "updatedAt" = NOW() - WHERE {schema_prefix}"UnifiedContentEmbedding"."contentType" = $1::{schema_prefix}"ContentType" - AND {schema_prefix}"UnifiedContentEmbedding"."contentId" = $2 - AND ({schema_prefix}"UnifiedContentEmbedding"."userId" = $3 OR ($3 IS NULL AND {schema_prefix}"UnifiedContentEmbedding"."userId" IS NULL)) - """, - content_type, - content_id, - user_id, - embedding_str, - searchable_text, - metadata_json, - client=client, - set_public_search_path=True, + # Upsert the embedding + # WHERE clause in DO UPDATE prevents PostgreSQL 15 bug with NULLS NOT DISTINCT + # Use unqualified ::vector - pgvector is in search_path on all environments + await execute_raw_with_schema( + """ + INSERT INTO {schema_prefix}"UnifiedContentEmbedding" ( + "id", "contentType", "contentId", "userId", "embedding", "searchableText", "metadata", "createdAt", "updatedAt" ) + VALUES (gen_random_uuid()::text, $1::{schema_prefix}"ContentType", $2, $3, $4::vector, $5, $6::jsonb, NOW(), NOW()) + ON CONFLICT ("contentType", "contentId", "userId") + DO UPDATE SET + "embedding" = $4::vector, + "searchableText" = $5, + "metadata" = $6::jsonb, + "updatedAt" = NOW() + WHERE {schema_prefix}"UnifiedContentEmbedding"."contentType" = $1::{schema_prefix}"ContentType" + AND {schema_prefix}"UnifiedContentEmbedding"."contentId" = $2 + AND ({schema_prefix}"UnifiedContentEmbedding"."userId" = $3 OR ($3 IS NULL AND {schema_prefix}"UnifiedContentEmbedding"."userId" IS NULL)) + """, + content_type, + content_id, + user_id, + embedding_str, + searchable_text, + metadata_json, + client=client, + ) - logger.info(f"Stored embedding for {content_type}:{content_id}") - return True - - except Exception as e: - logger.error(f"Failed to store embedding for {content_type}:{content_id}: {e}") - return False + logger.info(f"Stored embedding for {content_type}:{content_id}") + return True async def get_embedding(version_id: str) -> dict[str, Any] | None: @@ -217,35 +206,31 @@ async def get_content_embedding( New function for unified content embedding retrieval. Returns dict with contentType, contentId, embedding, timestamps or None if not found. + + Raises exceptions on failure - caller should handle. """ - try: - result = await query_raw_with_schema( - """ - SELECT - "contentType", - "contentId", - "userId", - "embedding"::text as "embedding", - "searchableText", - "metadata", - "createdAt", - "updatedAt" - FROM {schema_prefix}"UnifiedContentEmbedding" - WHERE "contentType" = $1::{schema_prefix}"ContentType" AND "contentId" = $2 AND ("userId" = $3 OR ($3 IS NULL AND "userId" IS NULL)) - """, - content_type, - content_id, - user_id, - set_public_search_path=True, - ) + result = await query_raw_with_schema( + """ + SELECT + "contentType", + "contentId", + "userId", + "embedding"::text as "embedding", + "searchableText", + "metadata", + "createdAt", + "updatedAt" + FROM {schema_prefix}"UnifiedContentEmbedding" + WHERE "contentType" = $1::{schema_prefix}"ContentType" AND "contentId" = $2 AND ("userId" = $3 OR ($3 IS NULL AND "userId" IS NULL)) + """, + content_type, + content_id, + user_id, + ) - if result and len(result) > 0: - return result[0] - return None - - except Exception as e: - logger.error(f"Failed to get embedding for {content_type}:{content_id}: {e}") - return None + if result and len(result) > 0: + return result[0] + return None async def ensure_embedding( @@ -273,46 +258,38 @@ async def ensure_embedding( tx: Optional transaction client Returns: - True if embedding exists/was created, False on failure + True if embedding exists/was created + + Raises exceptions on failure - caller should handle. """ - try: - # Check if embedding already exists - if not force: - existing = await get_embedding(version_id) - if existing and existing.get("embedding"): - logger.debug(f"Embedding for version {version_id} already exists") - return True + # Check if embedding already exists + if not force: + existing = await get_embedding(version_id) + if existing and existing.get("embedding"): + logger.debug(f"Embedding for version {version_id} already exists") + return True - # Build searchable text for embedding - searchable_text = build_searchable_text( - name, description, sub_heading, categories - ) + # Build searchable text for embedding + searchable_text = build_searchable_text(name, description, sub_heading, categories) - # Generate new embedding - embedding = await generate_embedding(searchable_text) - if embedding is None: - logger.warning(f"Could not generate embedding for version {version_id}") - return False + # Generate new embedding + embedding = await generate_embedding(searchable_text) - # Store the embedding with metadata using new function - metadata = { - "name": name, - "subHeading": sub_heading, - "categories": categories, - } - return await store_content_embedding( - content_type=ContentType.STORE_AGENT, - content_id=version_id, - embedding=embedding, - searchable_text=searchable_text, - metadata=metadata, - user_id=None, # Store agents are public - tx=tx, - ) - - except Exception as e: - logger.error(f"Failed to ensure embedding for version {version_id}: {e}") - return False + # Store the embedding with metadata using new function + metadata = { + "name": name, + "subHeading": sub_heading, + "categories": categories, + } + return await store_content_embedding( + content_type=ContentType.STORE_AGENT, + content_id=version_id, + embedding=embedding, + searchable_text=searchable_text, + metadata=metadata, + user_id=None, # Store agents are public + tx=tx, + ) async def delete_embedding(version_id: str) -> bool: @@ -522,6 +499,24 @@ async def backfill_all_content_types(batch_size: int = 10) -> dict[str, Any]: success = sum(1 for result in results if result is True) failed = len(results) - success + # Aggregate unique errors to avoid Sentry spam + if failed > 0: + # Group errors by type and message + error_summary: dict[str, int] = {} + for result in results: + if isinstance(result, Exception): + error_key = f"{type(result).__name__}: {str(result)}" + error_summary[error_key] = error_summary.get(error_key, 0) + 1 + + # Log aggregated error summary + error_details = ", ".join( + f"{error} ({count}x)" for error, count in error_summary.items() + ) + logger.error( + f"{content_type.value}: {failed}/{len(results)} embeddings failed. " + f"Errors: {error_details}" + ) + results_by_type[content_type.value] = { "processed": len(missing_items), "success": success, @@ -558,11 +553,12 @@ async def backfill_all_content_types(batch_size: int = 10) -> dict[str, Any]: } -async def embed_query(query: str) -> list[float] | None: +async def embed_query(query: str) -> list[float]: """ Generate embedding for a search query. Same as generate_embedding but with clearer intent. + Raises exceptions on failure - caller should handle. """ return await generate_embedding(query) @@ -595,40 +591,30 @@ async def ensure_content_embedding( tx: Optional transaction client Returns: - True if embedding exists/was created, False on failure + True if embedding exists/was created + + Raises exceptions on failure - caller should handle. """ - try: - # Check if embedding already exists - if not force: - existing = await get_content_embedding(content_type, content_id, user_id) - if existing and existing.get("embedding"): - logger.debug( - f"Embedding for {content_type}:{content_id} already exists" - ) - return True + # Check if embedding already exists + if not force: + existing = await get_content_embedding(content_type, content_id, user_id) + if existing and existing.get("embedding"): + logger.debug(f"Embedding for {content_type}:{content_id} already exists") + return True - # Generate new embedding - embedding = await generate_embedding(searchable_text) - if embedding is None: - logger.warning( - f"Could not generate embedding for {content_type}:{content_id}" - ) - return False + # Generate new embedding + embedding = await generate_embedding(searchable_text) - # Store the embedding - return await store_content_embedding( - content_type=content_type, - content_id=content_id, - embedding=embedding, - searchable_text=searchable_text, - metadata=metadata or {}, - user_id=user_id, - tx=tx, - ) - - except Exception as e: - logger.error(f"Failed to ensure embedding for {content_type}:{content_id}: {e}") - return False + # Store the embedding + return await store_content_embedding( + content_type=content_type, + content_id=content_id, + embedding=embedding, + searchable_text=searchable_text, + metadata=metadata or {}, + user_id=user_id, + tx=tx, + ) async def cleanup_orphaned_embeddings() -> dict[str, Any]: @@ -855,9 +841,8 @@ async def semantic_search( limit = 100 # Generate query embedding - query_embedding = await embed_query(query) - - if query_embedding is not None: + try: + query_embedding = await embed_query(query) # Semantic search with embeddings embedding_str = embedding_to_vector_string(query_embedding) @@ -871,47 +856,58 @@ async def semantic_search( # Add content type parameters and build placeholders dynamically content_type_start_idx = len(params) + 1 content_type_placeholders = ", ".join( - f'${content_type_start_idx + i}::{{{{schema_prefix}}}}"ContentType"' + "$" + str(content_type_start_idx + i) + '::{schema_prefix}"ContentType"' for i in range(len(content_types)) ) params.extend([ct.value for ct in content_types]) - sql = f""" + # Build min_similarity param index before appending + min_similarity_idx = len(params) + 1 + params.append(min_similarity) + + # Use unqualified ::vector and <=> operator - pgvector is in search_path on all environments + sql = ( + """ SELECT "contentId" as content_id, "contentType" as content_type, "searchableText" as searchable_text, metadata, - 1 - (embedding <=> '{embedding_str}'::vector) as similarity - FROM {{{{schema_prefix}}}}"UnifiedContentEmbedding" - WHERE "contentType" IN ({content_type_placeholders}) - {user_filter} - AND 1 - (embedding <=> '{embedding_str}'::vector) >= ${len(params) + 1} + 1 - (embedding <=> '""" + + embedding_str + + """'::vector) as similarity + FROM {schema_prefix}"UnifiedContentEmbedding" + WHERE "contentType" IN (""" + + content_type_placeholders + + """) + """ + + user_filter + + """ + AND 1 - (embedding <=> '""" + + embedding_str + + """'::vector) >= $""" + + str(min_similarity_idx) + + """ ORDER BY similarity DESC LIMIT $1 """ - params.append(min_similarity) + ) - try: - results = await query_raw_with_schema( - sql, *params, set_public_search_path=True - ) - return [ - { - "content_id": row["content_id"], - "content_type": row["content_type"], - "searchable_text": row["searchable_text"], - "metadata": row["metadata"], - "similarity": float(row["similarity"]), - } - for row in results - ] - except Exception as e: - logger.error(f"Semantic search failed: {e}") - # Fall through to lexical search below + results = await query_raw_with_schema(sql, *params) + return [ + { + "content_id": row["content_id"], + "content_type": row["content_type"], + "searchable_text": row["searchable_text"], + "metadata": row["metadata"], + "similarity": float(row["similarity"]), + } + for row in results + ] + except Exception as e: + logger.warning(f"Semantic search failed, falling back to lexical search: {e}") # Fallback to lexical search if embeddings unavailable - logger.warning("Falling back to lexical search (embeddings unavailable)") params_lexical: list[Any] = [limit] user_filter = "" @@ -922,31 +918,41 @@ async def semantic_search( # Add content type parameters and build placeholders dynamically content_type_start_idx = len(params_lexical) + 1 content_type_placeholders_lexical = ", ".join( - f'${content_type_start_idx + i}::{{{{schema_prefix}}}}"ContentType"' + "$" + str(content_type_start_idx + i) + '::{schema_prefix}"ContentType"' for i in range(len(content_types)) ) params_lexical.extend([ct.value for ct in content_types]) - sql_lexical = f""" + # Build query param index before appending + query_param_idx = len(params_lexical) + 1 + params_lexical.append(f"%{query}%") + + # Use regular string (not f-string) for template to preserve {schema_prefix} placeholders + sql_lexical = ( + """ SELECT "contentId" as content_id, "contentType" as content_type, "searchableText" as searchable_text, metadata, 0.0 as similarity - FROM {{{{schema_prefix}}}}"UnifiedContentEmbedding" - WHERE "contentType" IN ({content_type_placeholders_lexical}) - {user_filter} - AND "searchableText" ILIKE ${len(params_lexical) + 1} + FROM {schema_prefix}"UnifiedContentEmbedding" + WHERE "contentType" IN (""" + + content_type_placeholders_lexical + + """) + """ + + user_filter + + """ + AND "searchableText" ILIKE $""" + + str(query_param_idx) + + """ ORDER BY "updatedAt" DESC LIMIT $1 """ - params_lexical.append(f"%{query}%") + ) try: - results = await query_raw_with_schema( - sql_lexical, *params_lexical, set_public_search_path=True - ) + results = await query_raw_with_schema(sql_lexical, *params_lexical) return [ { "content_id": row["content_id"], diff --git a/autogpt_platform/backend/backend/api/features/store/embeddings_schema_test.py b/autogpt_platform/backend/backend/api/features/store/embeddings_schema_test.py index 7ba200fda0..5aa13b4d23 100644 --- a/autogpt_platform/backend/backend/api/features/store/embeddings_schema_test.py +++ b/autogpt_platform/backend/backend/api/features/store/embeddings_schema_test.py @@ -298,17 +298,16 @@ async def test_schema_handling_error_cases(): mock_client.execute_raw.side_effect = Exception("Database error") mock_get_client.return_value = mock_client - result = await embeddings.store_content_embedding( - content_type=ContentType.STORE_AGENT, - content_id="test-id", - embedding=[0.1] * EMBEDDING_DIM, - searchable_text="test", - metadata=None, - user_id=None, - ) - - # Should return False on error, not raise - assert result is False + # Should raise exception on error + with pytest.raises(Exception, match="Database error"): + await embeddings.store_content_embedding( + content_type=ContentType.STORE_AGENT, + content_id="test-id", + embedding=[0.1] * EMBEDDING_DIM, + searchable_text="test", + metadata=None, + user_id=None, + ) if __name__ == "__main__": diff --git a/autogpt_platform/backend/backend/api/features/store/embeddings_test.py b/autogpt_platform/backend/backend/api/features/store/embeddings_test.py index a17e393472..0d5e5ce4a2 100644 --- a/autogpt_platform/backend/backend/api/features/store/embeddings_test.py +++ b/autogpt_platform/backend/backend/api/features/store/embeddings_test.py @@ -80,9 +80,8 @@ async def test_generate_embedding_no_api_key(): ) as mock_get_client: mock_get_client.return_value = None - result = await embeddings.generate_embedding("test text") - - assert result is None + with pytest.raises(RuntimeError, match="openai_internal_api_key not set"): + await embeddings.generate_embedding("test text") @pytest.mark.asyncio(loop_scope="session") @@ -97,9 +96,8 @@ async def test_generate_embedding_api_error(): ) as mock_get_client: mock_get_client.return_value = mock_client - result = await embeddings.generate_embedding("test text") - - assert result is None + with pytest.raises(Exception, match="API Error"): + await embeddings.generate_embedding("test text") @pytest.mark.asyncio(loop_scope="session") @@ -155,18 +153,14 @@ async def test_store_embedding_success(mocker): ) assert result is True - # execute_raw is called twice: once for SET search_path, once for INSERT - assert mock_client.execute_raw.call_count == 2 + # execute_raw is called once for INSERT (no separate SET search_path needed) + assert mock_client.execute_raw.call_count == 1 - # First call: SET search_path - first_call_args = mock_client.execute_raw.call_args_list[0][0] - assert "SET search_path" in first_call_args[0] - - # Second call: INSERT query with the actual data - second_call_args = mock_client.execute_raw.call_args_list[1][0] - assert "test-version-id" in second_call_args - assert "[0.1,0.2,0.3]" in second_call_args - assert None in second_call_args # userId should be None for store agents + # Verify the INSERT query with the actual data + call_args = mock_client.execute_raw.call_args_list[0][0] + assert "test-version-id" in call_args + assert "[0.1,0.2,0.3]" in call_args + assert None in call_args # userId should be None for store agents @pytest.mark.asyncio(loop_scope="session") @@ -177,11 +171,10 @@ async def test_store_embedding_database_error(mocker): embedding = [0.1, 0.2, 0.3] - result = await embeddings.store_embedding( - version_id="test-version-id", embedding=embedding, tx=mock_client - ) - - assert result is False + with pytest.raises(Exception, match="Database error"): + await embeddings.store_embedding( + version_id="test-version-id", embedding=embedding, tx=mock_client + ) @pytest.mark.asyncio(loop_scope="session") @@ -281,17 +274,16 @@ async def test_ensure_embedding_create_new(mock_get, mock_store, mock_generate): async def test_ensure_embedding_generation_fails(mock_get, mock_generate): """Test ensure_embedding when generation fails.""" mock_get.return_value = None - mock_generate.return_value = None + mock_generate.side_effect = Exception("Generation failed") - result = await embeddings.ensure_embedding( - version_id="test-id", - name="Test", - description="Test description", - sub_heading="Test heading", - categories=["test"], - ) - - assert result is False + with pytest.raises(Exception, match="Generation failed"): + await embeddings.ensure_embedding( + version_id="test-id", + name="Test", + description="Test description", + sub_heading="Test heading", + categories=["test"], + ) @pytest.mark.asyncio(loop_scope="session") diff --git a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py index f9c831f93b..8b0884bb24 100644 --- a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py +++ b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py @@ -12,7 +12,7 @@ from dataclasses import dataclass from typing import Any, Literal from prisma.enums import ContentType -from rank_bm25 import BM25Okapi +from rank_bm25 import BM25Okapi # type: ignore[import-untyped] from backend.api.features.store.embeddings import ( EMBEDDING_DIM, @@ -186,13 +186,12 @@ async def unified_hybrid_search( offset = (page - 1) * page_size - # Generate query embedding - query_embedding = await embed_query(query) - - # Graceful degradation if embedding unavailable - if query_embedding is None or not query_embedding: + # Generate query embedding with graceful degradation + try: + query_embedding = await embed_query(query) + except Exception as e: logger.warning( - "Failed to generate query embedding - falling back to lexical-only search. " + f"Failed to generate query embedding - falling back to lexical-only search: {e}. " "Check that openai_internal_api_key is configured and OpenAI API is accessible." ) query_embedding = [0.0] * EMBEDDING_DIM @@ -363,9 +362,7 @@ async def unified_hybrid_search( LIMIT {limit_param} OFFSET {offset_param} """ - results = await query_raw_with_schema( - sql_query, *params, set_public_search_path=True - ) + results = await query_raw_with_schema(sql_query, *params) total = results[0]["total_count"] if results else 0 # Apply BM25 reranking @@ -466,13 +463,12 @@ async def hybrid_search( offset = (page - 1) * page_size - # Generate query embedding - query_embedding = await embed_query(query) - - # Graceful degradation - if query_embedding is None or not query_embedding: + # Generate query embedding with graceful degradation + try: + query_embedding = await embed_query(query) + except Exception as e: logger.warning( - "Failed to generate query embedding - falling back to lexical-only search." + f"Failed to generate query embedding - falling back to lexical-only search: {e}" ) query_embedding = [0.0] * EMBEDDING_DIM total_non_semantic = ( @@ -688,9 +684,7 @@ async def hybrid_search( LIMIT {limit_param} OFFSET {offset_param} """ - results = await query_raw_with_schema( - sql_query, *params, set_public_search_path=True - ) + results = await query_raw_with_schema(sql_query, *params) total = results[0]["total_count"] if results else 0 diff --git a/autogpt_platform/backend/backend/api/features/store/hybrid_search_test.py b/autogpt_platform/backend/backend/api/features/store/hybrid_search_test.py index 7f942927a5..58989fbb41 100644 --- a/autogpt_platform/backend/backend/api/features/store/hybrid_search_test.py +++ b/autogpt_platform/backend/backend/api/features/store/hybrid_search_test.py @@ -172,8 +172,8 @@ async def test_hybrid_search_without_embeddings(): with patch( "backend.api.features.store.hybrid_search.query_raw_with_schema" ) as mock_query: - # Simulate embedding failure - mock_embed.return_value = None + # Simulate embedding failure by raising exception + mock_embed.side_effect = Exception("Embedding generation failed") mock_query.return_value = mock_results # Should NOT raise - graceful degradation @@ -613,7 +613,9 @@ async def test_unified_hybrid_search_graceful_degradation(): "backend.api.features.store.hybrid_search.embed_query" ) as mock_embed: mock_query.return_value = mock_results - mock_embed.return_value = None # Embedding failure + mock_embed.side_effect = Exception( + "Embedding generation failed" + ) # Embedding failure # Should NOT raise - graceful degradation results, total = await unified_hybrid_search( diff --git a/autogpt_platform/backend/backend/api/features/v1.py b/autogpt_platform/backend/backend/api/features/v1.py index 661e8ff7f2..51789f9e2b 100644 --- a/autogpt_platform/backend/backend/api/features/v1.py +++ b/autogpt_platform/backend/backend/api/features/v1.py @@ -364,6 +364,8 @@ async def execute_graph_block( obj = get_block(block_id) if not obj: raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") + if obj.disabled: + raise HTTPException(status_code=403, detail=f"Block #{block_id} is disabled.") user = await get_user_by_id(user_id) if not user: @@ -761,10 +763,8 @@ async def create_new_graph( graph.reassign_ids(user_id=user_id, reassign_graph_id=True) graph.validate_graph(for_run=False) - # The return value of the create graph & library function is intentionally not used here, - # as the graph already valid and no sub-graphs are returned back. await graph_db.create_graph(graph, user_id=user_id) - await library_db.create_library_agent(graph, user_id=user_id) + await library_db.create_library_agent(graph, user_id) activated_graph = await on_graph_activate(graph, user_id=user_id) if create_graph.source == "builder": @@ -888,21 +888,19 @@ async def set_graph_active_version( async def _update_library_agent_version_and_settings( user_id: str, agent_graph: graph_db.GraphModel ) -> library_model.LibraryAgent: - # Keep the library agent up to date with the new active version library = await library_db.update_agent_version_in_library( user_id, agent_graph.id, agent_graph.version ) - # If the graph has HITL node, initialize the setting if it's not already set. - if ( - agent_graph.has_human_in_the_loop - and library.settings.human_in_the_loop_safe_mode is None - ): - await library_db.update_library_agent_settings( + updated_settings = GraphSettings.from_graph( + graph=agent_graph, + hitl_safe_mode=library.settings.human_in_the_loop_safe_mode, + sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode, + ) + if updated_settings != library.settings: + library = await library_db.update_library_agent( + library_agent_id=library.id, user_id=user_id, - agent_id=library.id, - settings=library.settings.model_copy( - update={"human_in_the_loop_safe_mode": True} - ), + settings=updated_settings, ) return library @@ -919,21 +917,18 @@ async def update_graph_settings( user_id: Annotated[str, Security(get_user_id)], ) -> GraphSettings: """Update graph settings for the user's library agent.""" - # Get the library agent for this graph library_agent = await library_db.get_library_agent_by_graph_id( graph_id=graph_id, user_id=user_id ) if not library_agent: raise HTTPException(404, f"Graph #{graph_id} not found in user's library") - # Update the library agent settings - updated_agent = await library_db.update_library_agent_settings( + updated_agent = await library_db.update_library_agent( + library_agent_id=library_agent.id, user_id=user_id, - agent_id=library_agent.id, settings=settings, ) - # Return the updated settings return GraphSettings.model_validate(updated_agent.settings) diff --git a/autogpt_platform/backend/backend/api/features/v1_test.py b/autogpt_platform/backend/backend/api/features/v1_test.py index a186d38810..d57ad49949 100644 --- a/autogpt_platform/backend/backend/api/features/v1_test.py +++ b/autogpt_platform/backend/backend/api/features/v1_test.py @@ -138,6 +138,7 @@ def test_execute_graph_block( """Test execute block endpoint""" # Mock block mock_block = Mock() + mock_block.disabled = False async def mock_execute(*args, **kwargs): yield "output1", {"data": "result1"} diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py index 4d452f3b34..a9c77e2b93 100644 --- a/autogpt_platform/backend/backend/blocks/basic.py +++ b/autogpt_platform/backend/backend/blocks/basic.py @@ -116,6 +116,7 @@ class PrintToConsoleBlock(Block): input_schema=PrintToConsoleBlock.Input, output_schema=PrintToConsoleBlock.Output, test_input={"text": "Hello, World!"}, + is_sensitive_action=True, test_output=[ ("output", "Hello, World!"), ("status", "printed"), diff --git a/autogpt_platform/backend/backend/blocks/claude_code.py b/autogpt_platform/backend/backend/blocks/claude_code.py new file mode 100644 index 0000000000..4ef44603b2 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/claude_code.py @@ -0,0 +1,659 @@ +import json +import shlex +import uuid +from typing import Literal, Optional + +from e2b import AsyncSandbox as BaseAsyncSandbox +from pydantic import BaseModel, SecretStr + +from backend.data.block import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName + + +class ClaudeCodeExecutionError(Exception): + """Exception raised when Claude Code execution fails. + + Carries the sandbox_id so it can be returned to the user for cleanup + when dispose_sandbox=False. + """ + + def __init__(self, message: str, sandbox_id: str = ""): + super().__init__(message) + self.sandbox_id = sandbox_id + + +# Test credentials for E2B +TEST_E2B_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="e2b", + api_key=SecretStr("mock-e2b-api-key"), + title="Mock E2B API key", + expires_at=None, +) +TEST_E2B_CREDENTIALS_INPUT = { + "provider": TEST_E2B_CREDENTIALS.provider, + "id": TEST_E2B_CREDENTIALS.id, + "type": TEST_E2B_CREDENTIALS.type, + "title": TEST_E2B_CREDENTIALS.title, +} + +# Test credentials for Anthropic +TEST_ANTHROPIC_CREDENTIALS = APIKeyCredentials( + id="2e568a2b-b2ea-475a-8564-9a676bf31c56", + provider="anthropic", + api_key=SecretStr("mock-anthropic-api-key"), + title="Mock Anthropic API key", + expires_at=None, +) +TEST_ANTHROPIC_CREDENTIALS_INPUT = { + "provider": TEST_ANTHROPIC_CREDENTIALS.provider, + "id": TEST_ANTHROPIC_CREDENTIALS.id, + "type": TEST_ANTHROPIC_CREDENTIALS.type, + "title": TEST_ANTHROPIC_CREDENTIALS.title, +} + + +class ClaudeCodeBlock(Block): + """ + Execute tasks using Claude Code (Anthropic's AI coding assistant) in an E2B sandbox. + + Claude Code can create files, install tools, run commands, and perform complex + coding tasks autonomously within a secure sandbox environment. + """ + + # Use base template - we'll install Claude Code ourselves for latest version + DEFAULT_TEMPLATE = "base" + + class Input(BlockSchemaInput): + e2b_credentials: CredentialsMetaInput[ + Literal[ProviderName.E2B], Literal["api_key"] + ] = CredentialsField( + description=( + "API key for the E2B platform to create the sandbox. " + "Get one on the [e2b website](https://e2b.dev/docs)" + ), + ) + + anthropic_credentials: CredentialsMetaInput[ + Literal[ProviderName.ANTHROPIC], Literal["api_key"] + ] = CredentialsField( + description=( + "API key for Anthropic to power Claude Code. " + "Get one at [Anthropic's website](https://console.anthropic.com)" + ), + ) + + prompt: str = SchemaField( + description=( + "The task or instruction for Claude Code to execute. " + "Claude Code can create files, install packages, run commands, " + "and perform complex coding tasks." + ), + placeholder="Create a hello world index.html file", + default="", + advanced=False, + ) + + timeout: int = SchemaField( + description=( + "Sandbox timeout in seconds. Claude Code tasks can take " + "a while, so set this appropriately for your task complexity. " + "Note: This only applies when creating a new sandbox. " + "When reconnecting to an existing sandbox via sandbox_id, " + "the original timeout is retained." + ), + default=300, # 5 minutes default + advanced=True, + ) + + setup_commands: list[str] = SchemaField( + description=( + "Optional shell commands to run before executing Claude Code. " + "Useful for installing dependencies or setting up the environment." + ), + default_factory=list, + advanced=True, + ) + + working_directory: str = SchemaField( + description="Working directory for Claude Code to operate in.", + default="/home/user", + advanced=True, + ) + + # Session/continuation support + session_id: str = SchemaField( + description=( + "Session ID to resume a previous conversation. " + "Leave empty for a new conversation. " + "Use the session_id from a previous run to continue that conversation." + ), + default="", + advanced=True, + ) + + sandbox_id: str = SchemaField( + description=( + "Sandbox ID to reconnect to an existing sandbox. " + "Required when resuming a session (along with session_id). " + "Use the sandbox_id from a previous run where dispose_sandbox was False." + ), + default="", + advanced=True, + ) + + conversation_history: str = SchemaField( + description=( + "Previous conversation history to continue from. " + "Use this to restore context on a fresh sandbox if the previous one timed out. " + "Pass the conversation_history output from a previous run." + ), + default="", + advanced=True, + ) + + dispose_sandbox: bool = SchemaField( + description=( + "Whether to dispose of the sandbox immediately after execution. " + "Set to False if you want to continue the conversation later " + "(you'll need both sandbox_id and session_id from the output)." + ), + default=True, + advanced=True, + ) + + class FileOutput(BaseModel): + """A file extracted from the sandbox.""" + + path: str + relative_path: str # Path relative to working directory (for GitHub, etc.) + name: str + content: str + + class Output(BlockSchemaOutput): + response: str = SchemaField( + description="The output/response from Claude Code execution" + ) + files: list["ClaudeCodeBlock.FileOutput"] = SchemaField( + description=( + "List of text files created/modified by Claude Code during this execution. " + "Each file has 'path', 'relative_path', 'name', and 'content' fields." + ) + ) + conversation_history: str = SchemaField( + description=( + "Full conversation history including this turn. " + "Pass this to conversation_history input to continue on a fresh sandbox " + "if the previous sandbox timed out." + ) + ) + session_id: str = SchemaField( + description=( + "Session ID for this conversation. " + "Pass this back along with sandbox_id to continue the conversation." + ) + ) + sandbox_id: Optional[str] = SchemaField( + description=( + "ID of the sandbox instance. " + "Pass this back along with session_id to continue the conversation. " + "This is None if dispose_sandbox was True (sandbox was disposed)." + ), + default=None, + ) + error: str = SchemaField(description="Error message if execution failed") + + def __init__(self): + super().__init__( + id="4e34f4a5-9b89-4326-ba77-2dd6750b7194", + description=( + "Execute tasks using Claude Code in an E2B sandbox. " + "Claude Code can create files, install tools, run commands, " + "and perform complex coding tasks autonomously." + ), + categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.AI}, + input_schema=ClaudeCodeBlock.Input, + output_schema=ClaudeCodeBlock.Output, + test_credentials={ + "e2b_credentials": TEST_E2B_CREDENTIALS, + "anthropic_credentials": TEST_ANTHROPIC_CREDENTIALS, + }, + test_input={ + "e2b_credentials": TEST_E2B_CREDENTIALS_INPUT, + "anthropic_credentials": TEST_ANTHROPIC_CREDENTIALS_INPUT, + "prompt": "Create a hello world HTML file", + "timeout": 300, + "setup_commands": [], + "working_directory": "/home/user", + "session_id": "", + "sandbox_id": "", + "conversation_history": "", + "dispose_sandbox": True, + }, + test_output=[ + ("response", "Created index.html with hello world content"), + ( + "files", + [ + { + "path": "/home/user/index.html", + "relative_path": "index.html", + "name": "index.html", + "content": "Hello World", + } + ], + ), + ( + "conversation_history", + "User: Create a hello world HTML file\n" + "Claude: Created index.html with hello world content", + ), + ("session_id", str), + ("sandbox_id", None), # None because dispose_sandbox=True in test_input + ], + test_mock={ + "execute_claude_code": lambda *args, **kwargs: ( + "Created index.html with hello world content", # response + [ + ClaudeCodeBlock.FileOutput( + path="/home/user/index.html", + relative_path="index.html", + name="index.html", + content="Hello World", + ) + ], # files + "User: Create a hello world HTML file\n" + "Claude: Created index.html with hello world content", # conversation_history + "test-session-id", # session_id + "sandbox_id", # sandbox_id + ), + }, + ) + + async def execute_claude_code( + self, + e2b_api_key: str, + anthropic_api_key: str, + prompt: str, + timeout: int, + setup_commands: list[str], + working_directory: str, + session_id: str, + existing_sandbox_id: str, + conversation_history: str, + dispose_sandbox: bool, + ) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]: + """ + Execute Claude Code in an E2B sandbox. + + Returns: + Tuple of (response, files, conversation_history, session_id, sandbox_id) + """ + + # Validate that sandbox_id is provided when resuming a session + if session_id and not existing_sandbox_id: + raise ValueError( + "sandbox_id is required when resuming a session with session_id. " + "The session state is stored in the original sandbox. " + "If the sandbox has timed out, use conversation_history instead " + "to restore context on a fresh sandbox." + ) + + sandbox = None + sandbox_id = "" + + try: + # Either reconnect to existing sandbox or create a new one + if existing_sandbox_id: + # Reconnect to existing sandbox for conversation continuation + sandbox = await BaseAsyncSandbox.connect( + sandbox_id=existing_sandbox_id, + api_key=e2b_api_key, + ) + else: + # Create new sandbox + sandbox = await BaseAsyncSandbox.create( + template=self.DEFAULT_TEMPLATE, + api_key=e2b_api_key, + timeout=timeout, + envs={"ANTHROPIC_API_KEY": anthropic_api_key}, + ) + + # Install Claude Code from npm (ensures we get the latest version) + install_result = await sandbox.commands.run( + "npm install -g @anthropic-ai/claude-code@latest", + timeout=120, # 2 min timeout for install + ) + if install_result.exit_code != 0: + raise Exception( + f"Failed to install Claude Code: {install_result.stderr}" + ) + + # Run any user-provided setup commands + for cmd in setup_commands: + setup_result = await sandbox.commands.run(cmd) + if setup_result.exit_code != 0: + raise Exception( + f"Setup command failed: {cmd}\n" + f"Exit code: {setup_result.exit_code}\n" + f"Stdout: {setup_result.stdout}\n" + f"Stderr: {setup_result.stderr}" + ) + + # Capture sandbox_id immediately after creation/connection + # so it's available for error recovery if dispose_sandbox=False + sandbox_id = sandbox.sandbox_id + + # Generate or use provided session ID + current_session_id = session_id if session_id else str(uuid.uuid4()) + + # Build base Claude flags + base_flags = "-p --dangerously-skip-permissions --output-format json" + + # Add conversation history context if provided (for fresh sandbox continuation) + history_flag = "" + if conversation_history and not session_id: + # Inject previous conversation as context via system prompt + # Use consistent escaping via _escape_prompt helper + escaped_history = self._escape_prompt( + f"Previous conversation context: {conversation_history}" + ) + history_flag = f" --append-system-prompt {escaped_history}" + + # Build Claude command based on whether we're resuming or starting new + # Use shlex.quote for working_directory and session IDs to prevent injection + safe_working_dir = shlex.quote(working_directory) + if session_id: + # Resuming existing session (sandbox still alive) + safe_session_id = shlex.quote(session_id) + claude_command = ( + f"cd {safe_working_dir} && " + f"echo {self._escape_prompt(prompt)} | " + f"claude --resume {safe_session_id} {base_flags}" + ) + else: + # New session with specific ID + safe_current_session_id = shlex.quote(current_session_id) + claude_command = ( + f"cd {safe_working_dir} && " + f"echo {self._escape_prompt(prompt)} | " + f"claude --session-id {safe_current_session_id} {base_flags}{history_flag}" + ) + + # Capture timestamp before running Claude Code to filter files later + # Capture timestamp 1 second in the past to avoid race condition with file creation + timestamp_result = await sandbox.commands.run( + "date -u -d '1 second ago' +%Y-%m-%dT%H:%M:%S" + ) + if timestamp_result.exit_code != 0: + raise RuntimeError( + f"Failed to capture timestamp: {timestamp_result.stderr}" + ) + start_timestamp = ( + timestamp_result.stdout.strip() if timestamp_result.stdout else None + ) + + result = await sandbox.commands.run( + claude_command, + timeout=0, # No command timeout - let sandbox timeout handle it + ) + + # Check for command failure + if result.exit_code != 0: + error_msg = result.stderr or result.stdout or "Unknown error" + raise Exception( + f"Claude Code command failed with exit code {result.exit_code}:\n" + f"{error_msg}" + ) + + raw_output = result.stdout or "" + + # Parse JSON output to extract response and build conversation history + response = "" + new_conversation_history = conversation_history or "" + + try: + # The JSON output contains the result + output_data = json.loads(raw_output) + response = output_data.get("result", raw_output) + + # Build conversation history entry + turn_entry = f"User: {prompt}\nClaude: {response}" + if new_conversation_history: + new_conversation_history = ( + f"{new_conversation_history}\n\n{turn_entry}" + ) + else: + new_conversation_history = turn_entry + + except json.JSONDecodeError: + # If not valid JSON, use raw output + response = raw_output + turn_entry = f"User: {prompt}\nClaude: {response}" + if new_conversation_history: + new_conversation_history = ( + f"{new_conversation_history}\n\n{turn_entry}" + ) + else: + new_conversation_history = turn_entry + + # Extract files created/modified during this run + files = await self._extract_files( + sandbox, working_directory, start_timestamp + ) + + return ( + response, + files, + new_conversation_history, + current_session_id, + sandbox_id, + ) + + except Exception as e: + # Wrap exception with sandbox_id so caller can access/cleanup + # the preserved sandbox when dispose_sandbox=False + raise ClaudeCodeExecutionError(str(e), sandbox_id) from e + + finally: + if dispose_sandbox and sandbox: + await sandbox.kill() + + async def _extract_files( + self, + sandbox: BaseAsyncSandbox, + working_directory: str, + since_timestamp: str | None = None, + ) -> list["ClaudeCodeBlock.FileOutput"]: + """ + Extract text files created/modified during this Claude Code execution. + + Args: + sandbox: The E2B sandbox instance + working_directory: Directory to search for files + since_timestamp: ISO timestamp - only return files modified after this time + + Returns: + List of FileOutput objects with path, relative_path, name, and content + """ + files: list[ClaudeCodeBlock.FileOutput] = [] + + # Text file extensions we can safely read as text + text_extensions = { + ".txt", + ".md", + ".html", + ".htm", + ".css", + ".js", + ".ts", + ".jsx", + ".tsx", + ".json", + ".xml", + ".yaml", + ".yml", + ".toml", + ".ini", + ".cfg", + ".conf", + ".py", + ".rb", + ".php", + ".java", + ".c", + ".cpp", + ".h", + ".hpp", + ".cs", + ".go", + ".rs", + ".swift", + ".kt", + ".scala", + ".sh", + ".bash", + ".zsh", + ".sql", + ".graphql", + ".env", + ".gitignore", + ".dockerfile", + "Dockerfile", + ".vue", + ".svelte", + ".astro", + ".mdx", + ".rst", + ".tex", + ".csv", + ".log", + } + + try: + # List files recursively using find command + # Exclude node_modules and .git directories, but allow hidden files + # like .env and .gitignore (they're filtered by text_extensions later) + # Filter by timestamp to only get files created/modified during this run + safe_working_dir = shlex.quote(working_directory) + timestamp_filter = "" + if since_timestamp: + timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} " + find_result = await sandbox.commands.run( + f"find {safe_working_dir} -type f " + f"{timestamp_filter}" + f"-not -path '*/node_modules/*' " + f"-not -path '*/.git/*' " + f"2>/dev/null" + ) + + if find_result.stdout: + for file_path in find_result.stdout.strip().split("\n"): + if not file_path: + continue + + # Check if it's a text file we can read + is_text = any( + file_path.endswith(ext) for ext in text_extensions + ) or file_path.endswith("Dockerfile") + + if is_text: + try: + content = await sandbox.files.read(file_path) + # Handle bytes or string + if isinstance(content, bytes): + content = content.decode("utf-8", errors="replace") + + # Extract filename from path + file_name = file_path.split("/")[-1] + + # Calculate relative path by stripping working directory + relative_path = file_path + if file_path.startswith(working_directory): + relative_path = file_path[len(working_directory) :] + # Remove leading slash if present + if relative_path.startswith("/"): + relative_path = relative_path[1:] + + files.append( + ClaudeCodeBlock.FileOutput( + path=file_path, + relative_path=relative_path, + name=file_name, + content=content, + ) + ) + except Exception: + # Skip files that can't be read + pass + + except Exception: + # If file extraction fails, return empty results + pass + + return files + + def _escape_prompt(self, prompt: str) -> str: + """Escape the prompt for safe shell execution.""" + # Use single quotes and escape any single quotes in the prompt + escaped = prompt.replace("'", "'\"'\"'") + return f"'{escaped}'" + + async def run( + self, + input_data: Input, + *, + e2b_credentials: APIKeyCredentials, + anthropic_credentials: APIKeyCredentials, + **kwargs, + ) -> BlockOutput: + try: + ( + response, + files, + conversation_history, + session_id, + sandbox_id, + ) = await self.execute_claude_code( + e2b_api_key=e2b_credentials.api_key.get_secret_value(), + anthropic_api_key=anthropic_credentials.api_key.get_secret_value(), + prompt=input_data.prompt, + timeout=input_data.timeout, + setup_commands=input_data.setup_commands, + working_directory=input_data.working_directory, + session_id=input_data.session_id, + existing_sandbox_id=input_data.sandbox_id, + conversation_history=input_data.conversation_history, + dispose_sandbox=input_data.dispose_sandbox, + ) + + yield "response", response + # Always yield files (empty list if none) to match Output schema + yield "files", [f.model_dump() for f in files] + # Always yield conversation_history so user can restore context on fresh sandbox + yield "conversation_history", conversation_history + # Always yield session_id so user can continue conversation + yield "session_id", session_id + # Always yield sandbox_id (None if disposed) to match Output schema + yield "sandbox_id", sandbox_id if not input_data.dispose_sandbox else None + + except ClaudeCodeExecutionError as e: + yield "error", str(e) + # If sandbox was preserved (dispose_sandbox=False), yield sandbox_id + # so user can reconnect to or clean up the orphaned sandbox + if not input_data.dispose_sandbox and e.sandbox_id: + yield "sandbox_id", e.sandbox_id + except Exception as e: + yield "error", str(e) diff --git a/autogpt_platform/backend/backend/blocks/data_manipulation.py b/autogpt_platform/backend/backend/blocks/data_manipulation.py index 5864a517ed..1014236b8c 100644 --- a/autogpt_platform/backend/backend/blocks/data_manipulation.py +++ b/autogpt_platform/backend/backend/blocks/data_manipulation.py @@ -680,3 +680,58 @@ class ListIsEmptyBlock(Block): async def run(self, input_data: Input, **kwargs) -> BlockOutput: yield "is_empty", len(input_data.list) == 0 + + +class ConcatenateListsBlock(Block): + class Input(BlockSchemaInput): + lists: List[List[Any]] = SchemaField( + description="A list of lists to concatenate together. All lists will be combined in order into a single list.", + placeholder="e.g., [[1, 2], [3, 4], [5, 6]]", + ) + + class Output(BlockSchemaOutput): + concatenated_list: List[Any] = SchemaField( + description="The concatenated list containing all elements from all input lists in order." + ) + error: str = SchemaField( + description="Error message if concatenation failed due to invalid input types." + ) + + def __init__(self): + super().__init__( + id="3cf9298b-5817-4141-9d80-7c2cc5199c8e", + description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order.", + categories={BlockCategory.BASIC}, + input_schema=ConcatenateListsBlock.Input, + output_schema=ConcatenateListsBlock.Output, + test_input=[ + {"lists": [[1, 2, 3], [4, 5, 6]]}, + {"lists": [["a", "b"], ["c"], ["d", "e", "f"]]}, + {"lists": [[1, 2], []]}, + {"lists": []}, + ], + test_output=[ + ("concatenated_list", [1, 2, 3, 4, 5, 6]), + ("concatenated_list", ["a", "b", "c", "d", "e", "f"]), + ("concatenated_list", [1, 2]), + ("concatenated_list", []), + ], + ) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: + concatenated = [] + for idx, lst in enumerate(input_data.lists): + if lst is None: + # Skip None values to avoid errors + continue + if not isinstance(lst, list): + # Type validation: each item must be a list + # Strings are iterable and would cause extend() to iterate character-by-character + # Non-iterable types would raise TypeError + yield "error", ( + f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. " + f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])." + ) + return + concatenated.extend(lst) + yield "concatenated_list", concatenated diff --git a/autogpt_platform/backend/backend/blocks/helpers/review.py b/autogpt_platform/backend/backend/blocks/helpers/review.py index f35397e6aa..4bd85e424b 100644 --- a/autogpt_platform/backend/backend/blocks/helpers/review.py +++ b/autogpt_platform/backend/backend/blocks/helpers/review.py @@ -9,7 +9,7 @@ from typing import Any, Optional from prisma.enums import ReviewStatus from pydantic import BaseModel -from backend.data.execution import ExecutionContext, ExecutionStatus +from backend.data.execution import ExecutionStatus from backend.data.human_review import ReviewResult from backend.executor.manager import async_update_node_execution_status from backend.util.clients import get_database_manager_async_client @@ -28,6 +28,11 @@ class ReviewDecision(BaseModel): class HITLReviewHelper: """Helper class for Human-In-The-Loop review operations.""" + @staticmethod + async def check_approval(**kwargs) -> Optional[ReviewResult]: + """Check if there's an existing approval for this node execution.""" + return await get_database_manager_async_client().check_approval(**kwargs) + @staticmethod async def get_or_create_human_review(**kwargs) -> Optional[ReviewResult]: """Create or retrieve a human review from the database.""" @@ -55,11 +60,11 @@ class HITLReviewHelper: async def _handle_review_request( input_data: Any, user_id: str, + node_id: str, node_exec_id: str, graph_exec_id: str, graph_id: str, graph_version: int, - execution_context: ExecutionContext, block_name: str = "Block", editable: bool = False, ) -> Optional[ReviewResult]: @@ -69,11 +74,11 @@ class HITLReviewHelper: Args: input_data: The input data to be reviewed user_id: ID of the user requesting the review + node_id: ID of the node in the graph definition node_exec_id: ID of the node execution graph_exec_id: ID of the graph execution graph_id: ID of the graph graph_version: Version of the graph - execution_context: Current execution context block_name: Name of the block requesting review editable: Whether the reviewer can edit the data @@ -83,15 +88,41 @@ class HITLReviewHelper: Raises: Exception: If review creation or status update fails """ - # Skip review if safe mode is disabled - return auto-approved result - if not execution_context.safe_mode: + # Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode) + # are handled by the caller: + # - HITL blocks check human_in_the_loop_safe_mode in their run() method + # - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review() + # This function only handles checking for existing approvals. + + # Check if this node has already been approved (normal or auto-approval) + if approval_result := await HITLReviewHelper.check_approval( + node_exec_id=node_exec_id, + graph_exec_id=graph_exec_id, + node_id=node_id, + user_id=user_id, + input_data=input_data, + ): logger.info( - f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled" + f"Block {block_name} skipping review for node {node_exec_id} - " + f"found existing approval" + ) + # Return a new ReviewResult with the current node_exec_id but approved status + # For auto-approvals, always use current input_data + # For normal approvals, use approval_result.data unless it's None + is_auto_approval = approval_result.node_exec_id != node_exec_id + approved_data = ( + input_data + if is_auto_approval + else ( + approval_result.data + if approval_result.data is not None + else input_data + ) ) return ReviewResult( - data=input_data, + data=approved_data, status=ReviewStatus.APPROVED, - message="Auto-approved (safe mode disabled)", + message=approval_result.message, processed=True, node_exec_id=node_exec_id, ) @@ -103,7 +134,7 @@ class HITLReviewHelper: graph_id=graph_id, graph_version=graph_version, input_data=input_data, - message=f"Review required for {block_name} execution", + message=block_name, # Use block_name directly as the message editable=editable, ) @@ -129,11 +160,11 @@ class HITLReviewHelper: async def handle_review_decision( input_data: Any, user_id: str, + node_id: str, node_exec_id: str, graph_exec_id: str, graph_id: str, graph_version: int, - execution_context: ExecutionContext, block_name: str = "Block", editable: bool = False, ) -> Optional[ReviewDecision]: @@ -143,11 +174,11 @@ class HITLReviewHelper: Args: input_data: The input data to be reviewed user_id: ID of the user requesting the review + node_id: ID of the node in the graph definition node_exec_id: ID of the node execution graph_exec_id: ID of the graph execution graph_id: ID of the graph graph_version: Version of the graph - execution_context: Current execution context block_name: Name of the block requesting review editable: Whether the reviewer can edit the data @@ -158,11 +189,11 @@ class HITLReviewHelper: review_result = await HITLReviewHelper._handle_review_request( input_data=input_data, user_id=user_id, + node_id=node_id, node_exec_id=node_exec_id, graph_exec_id=graph_exec_id, graph_id=graph_id, graph_version=graph_version, - execution_context=execution_context, block_name=block_name, editable=editable, ) diff --git a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py index 1e338816c8..568ac4b33f 100644 --- a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py +++ b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py @@ -97,6 +97,7 @@ class HumanInTheLoopBlock(Block): input_data: Input, *, user_id: str, + node_id: str, node_exec_id: str, graph_exec_id: str, graph_id: str, @@ -104,7 +105,7 @@ class HumanInTheLoopBlock(Block): execution_context: ExecutionContext, **_kwargs, ) -> BlockOutput: - if not execution_context.safe_mode: + if not execution_context.human_in_the_loop_safe_mode: logger.info( f"HITL block skipping review for node {node_exec_id} - safe mode disabled" ) @@ -115,12 +116,12 @@ class HumanInTheLoopBlock(Block): decision = await self.handle_review_decision( input_data=input_data.data, user_id=user_id, + node_id=node_id, node_exec_id=node_exec_id, graph_exec_id=graph_exec_id, graph_id=graph_id, graph_version=graph_version, - execution_context=execution_context, - block_name=self.name, + block_name=input_data.name, # Use user-provided name instead of block type editable=input_data.editable, ) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 85e8fc36df..fdcd7f3568 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -79,6 +79,10 @@ class ModelMetadata(NamedTuple): provider: str context_window: int max_output_tokens: int | None + display_name: str + provider_name: str + creator_name: str + price_tier: Literal[1, 2, 3] class LlmModelMeta(EnumMeta): @@ -171,6 +175,26 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): V0_1_5_LG = "v0-1.5-lg" V0_1_0_MD = "v0-1.0-md" + @classmethod + def __get_pydantic_json_schema__(cls, schema, handler): + json_schema = handler(schema) + llm_model_metadata = {} + for model in cls: + model_name = model.value + metadata = model.metadata + llm_model_metadata[model_name] = { + "creator": metadata.creator_name, + "creator_name": metadata.creator_name, + "title": metadata.display_name, + "provider": metadata.provider, + "provider_name": metadata.provider_name, + "name": model_name, + "price_tier": metadata.price_tier, + } + json_schema["llm_model"] = True + json_schema["llm_model_metadata"] = llm_model_metadata + return json_schema + @property def metadata(self) -> ModelMetadata: return MODEL_METADATA[self] @@ -190,119 +214,291 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): MODEL_METADATA = { # https://platform.openai.com/docs/models - LlmModel.O3: ModelMetadata("openai", 200000, 100000), - LlmModel.O3_MINI: ModelMetadata("openai", 200000, 100000), # o3-mini-2025-01-31 - LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17 - LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12 + LlmModel.O3: ModelMetadata("openai", 200000, 100000, "O3", "OpenAI", "OpenAI", 2), + LlmModel.O3_MINI: ModelMetadata( + "openai", 200000, 100000, "O3 Mini", "OpenAI", "OpenAI", 1 + ), # o3-mini-2025-01-31 + LlmModel.O1: ModelMetadata( + "openai", 200000, 100000, "O1", "OpenAI", "OpenAI", 3 + ), # o1-2024-12-17 + LlmModel.O1_MINI: ModelMetadata( + "openai", 128000, 65536, "O1 Mini", "OpenAI", "OpenAI", 2 + ), # o1-mini-2024-09-12 # GPT-5 models - LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000), - LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000), - LlmModel.GPT5: ModelMetadata("openai", 400000, 128000), - LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000), - LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000), - LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384), - LlmModel.GPT41: ModelMetadata("openai", 1047576, 32768), - LlmModel.GPT41_MINI: ModelMetadata("openai", 1047576, 32768), + LlmModel.GPT5_2: ModelMetadata( + "openai", 400000, 128000, "GPT-5.2", "OpenAI", "OpenAI", 3 + ), + LlmModel.GPT5_1: ModelMetadata( + "openai", 400000, 128000, "GPT-5.1", "OpenAI", "OpenAI", 2 + ), + LlmModel.GPT5: ModelMetadata( + "openai", 400000, 128000, "GPT-5", "OpenAI", "OpenAI", 1 + ), + LlmModel.GPT5_MINI: ModelMetadata( + "openai", 400000, 128000, "GPT-5 Mini", "OpenAI", "OpenAI", 1 + ), + LlmModel.GPT5_NANO: ModelMetadata( + "openai", 400000, 128000, "GPT-5 Nano", "OpenAI", "OpenAI", 1 + ), + LlmModel.GPT5_CHAT: ModelMetadata( + "openai", 400000, 16384, "GPT-5 Chat Latest", "OpenAI", "OpenAI", 2 + ), + LlmModel.GPT41: ModelMetadata( + "openai", 1047576, 32768, "GPT-4.1", "OpenAI", "OpenAI", 1 + ), + LlmModel.GPT41_MINI: ModelMetadata( + "openai", 1047576, 32768, "GPT-4.1 Mini", "OpenAI", "OpenAI", 1 + ), LlmModel.GPT4O_MINI: ModelMetadata( - "openai", 128000, 16384 + "openai", 128000, 16384, "GPT-4o Mini", "OpenAI", "OpenAI", 1 ), # gpt-4o-mini-2024-07-18 - LlmModel.GPT4O: ModelMetadata("openai", 128000, 16384), # gpt-4o-2024-08-06 + LlmModel.GPT4O: ModelMetadata( + "openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2 + ), # gpt-4o-2024-08-06 LlmModel.GPT4_TURBO: ModelMetadata( - "openai", 128000, 4096 + "openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3 ), # gpt-4-turbo-2024-04-09 - LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, 4096), # gpt-3.5-turbo-0125 + LlmModel.GPT3_5_TURBO: ModelMetadata( + "openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1 + ), # gpt-3.5-turbo-0125 # https://docs.anthropic.com/en/docs/about-claude/models LlmModel.CLAUDE_4_1_OPUS: ModelMetadata( - "anthropic", 200000, 32000 + "anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3 ), # claude-opus-4-1-20250805 LlmModel.CLAUDE_4_OPUS: ModelMetadata( - "anthropic", 200000, 32000 + "anthropic", 200000, 32000, "Claude Opus 4", "Anthropic", "Anthropic", 3 ), # claude-4-opus-20250514 LlmModel.CLAUDE_4_SONNET: ModelMetadata( - "anthropic", 200000, 64000 + "anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2 ), # claude-4-sonnet-20250514 LlmModel.CLAUDE_4_5_OPUS: ModelMetadata( - "anthropic", 200000, 64000 + "anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3 ), # claude-opus-4-5-20251101 LlmModel.CLAUDE_4_5_SONNET: ModelMetadata( - "anthropic", 200000, 64000 + "anthropic", 200000, 64000, "Claude Sonnet 4.5", "Anthropic", "Anthropic", 3 ), # claude-sonnet-4-5-20250929 LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata( - "anthropic", 200000, 64000 + "anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2 ), # claude-haiku-4-5-20251001 LlmModel.CLAUDE_3_7_SONNET: ModelMetadata( - "anthropic", 200000, 64000 + "anthropic", 200000, 64000, "Claude 3.7 Sonnet", "Anthropic", "Anthropic", 2 ), # claude-3-7-sonnet-20250219 LlmModel.CLAUDE_3_HAIKU: ModelMetadata( - "anthropic", 200000, 4096 + "anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1 ), # claude-3-haiku-20240307 # https://docs.aimlapi.com/api-overview/model-database/text-models - LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata("aiml_api", 32000, 8000), - LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata("aiml_api", 128000, 40000), - LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata("aiml_api", 128000, None), - LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata("aiml_api", 131000, 2000), - LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata("aiml_api", 128000, None), - # https://console.groq.com/docs/models - LlmModel.LLAMA3_3_70B: ModelMetadata("groq", 128000, 32768), - LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 128000, 8192), - # https://ollama.com/library - LlmModel.OLLAMA_LLAMA3_3: ModelMetadata("ollama", 8192, None), - LlmModel.OLLAMA_LLAMA3_2: ModelMetadata("ollama", 8192, None), - LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, None), - LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, None), - LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768, None), - # https://openrouter.ai/models - LlmModel.GEMINI_2_5_PRO: ModelMetadata("open_router", 1050000, 8192), - LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata("open_router", 1048576, 65535), - LlmModel.GEMINI_2_5_FLASH: ModelMetadata("open_router", 1048576, 65535), - LlmModel.GEMINI_2_0_FLASH: ModelMetadata("open_router", 1048576, 8192), - LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata( - "open_router", 1048576, 65535 + LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata( + "aiml_api", 32000, 8000, "Qwen 2.5 72B Instruct Turbo", "AI/ML", "Qwen", 1 + ), + LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata( + "aiml_api", + 128000, + 40000, + "Llama 3.1 Nemotron 70B Instruct", + "AI/ML", + "Nvidia", + 1, + ), + LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata( + "aiml_api", 128000, None, "Llama 3.3 70B Instruct Turbo", "AI/ML", "Meta", 1 + ), + LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata( + "aiml_api", 131000, 2000, "Llama 3.1 70B Instruct Turbo", "AI/ML", "Meta", 1 + ), + LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata( + "aiml_api", 128000, None, "Llama 3.2 3B Instruct Turbo", "AI/ML", "Meta", 1 + ), + # https://console.groq.com/docs/models + LlmModel.LLAMA3_3_70B: ModelMetadata( + "groq", 128000, 32768, "Llama 3.3 70B Versatile", "Groq", "Meta", 1 + ), + LlmModel.LLAMA3_1_8B: ModelMetadata( + "groq", 128000, 8192, "Llama 3.1 8B Instant", "Groq", "Meta", 1 + ), + # https://ollama.com/library + LlmModel.OLLAMA_LLAMA3_3: ModelMetadata( + "ollama", 8192, None, "Llama 3.3", "Ollama", "Meta", 1 + ), + LlmModel.OLLAMA_LLAMA3_2: ModelMetadata( + "ollama", 8192, None, "Llama 3.2", "Ollama", "Meta", 1 + ), + LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata( + "ollama", 8192, None, "Llama 3", "Ollama", "Meta", 1 + ), + LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata( + "ollama", 8192, None, "Llama 3.1 405B", "Ollama", "Meta", 1 + ), + LlmModel.OLLAMA_DOLPHIN: ModelMetadata( + "ollama", 32768, None, "Dolphin Mistral Latest", "Ollama", "Mistral AI", 1 + ), + # https://openrouter.ai/models + LlmModel.GEMINI_2_5_PRO: ModelMetadata( + "open_router", + 1050000, + 8192, + "Gemini 2.5 Pro Preview 03.25", + "OpenRouter", + "Google", + 2, + ), + LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata( + "open_router", 1048576, 65535, "Gemini 3 Pro Preview", "OpenRouter", "Google", 2 + ), + LlmModel.GEMINI_2_5_FLASH: ModelMetadata( + "open_router", 1048576, 65535, "Gemini 2.5 Flash", "OpenRouter", "Google", 1 + ), + LlmModel.GEMINI_2_0_FLASH: ModelMetadata( + "open_router", 1048576, 8192, "Gemini 2.0 Flash 001", "OpenRouter", "Google", 1 + ), + LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata( + "open_router", + 1048576, + 65535, + "Gemini 2.5 Flash Lite Preview 06.17", + "OpenRouter", + "Google", + 1, + ), + LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata( + "open_router", + 1048576, + 8192, + "Gemini 2.0 Flash Lite 001", + "OpenRouter", + "Google", + 1, + ), + LlmModel.MISTRAL_NEMO: ModelMetadata( + "open_router", 128000, 4096, "Mistral Nemo", "OpenRouter", "Mistral AI", 1 + ), + LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata( + "open_router", 128000, 4096, "Command R 08.2024", "OpenRouter", "Cohere", 1 + ), + LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata( + "open_router", 128000, 4096, "Command R Plus 08.2024", "OpenRouter", "Cohere", 2 + ), + LlmModel.DEEPSEEK_CHAT: ModelMetadata( + "open_router", 64000, 2048, "DeepSeek Chat", "OpenRouter", "DeepSeek", 1 + ), + LlmModel.DEEPSEEK_R1_0528: ModelMetadata( + "open_router", 163840, 163840, "DeepSeek R1 0528", "OpenRouter", "DeepSeek", 1 + ), + LlmModel.PERPLEXITY_SONAR: ModelMetadata( + "open_router", 127000, 8000, "Sonar", "OpenRouter", "Perplexity", 1 + ), + LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata( + "open_router", 200000, 8000, "Sonar Pro", "OpenRouter", "Perplexity", 2 ), - LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata("open_router", 1048576, 8192), - LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 128000, 4096), - LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 128000, 4096), - LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 128000, 4096), - LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 64000, 2048), - LlmModel.DEEPSEEK_R1_0528: ModelMetadata("open_router", 163840, 163840), - LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 8000), - LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata("open_router", 200000, 8000), LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata( "open_router", 128000, 16000, + "Sonar Deep Research", + "OpenRouter", + "Perplexity", + 3, ), LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata( - "open_router", 131000, 4096 + "open_router", + 131000, + 4096, + "Hermes 3 Llama 3.1 405B", + "OpenRouter", + "Nous Research", + 1, ), LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata( - "open_router", 12288, 12288 + "open_router", + 12288, + 12288, + "Hermes 3 Llama 3.1 70B", + "OpenRouter", + "Nous Research", + 1, + ), + LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata( + "open_router", 131072, 131072, "GPT-OSS 120B", "OpenRouter", "OpenAI", 1 + ), + LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata( + "open_router", 131072, 32768, "GPT-OSS 20B", "OpenRouter", "OpenAI", 1 + ), + LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata( + "open_router", 300000, 5120, "Nova Lite V1", "OpenRouter", "Amazon", 1 + ), + LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata( + "open_router", 128000, 5120, "Nova Micro V1", "OpenRouter", "Amazon", 1 + ), + LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata( + "open_router", 300000, 5120, "Nova Pro V1", "OpenRouter", "Amazon", 1 + ), + LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata( + "open_router", 65536, 4096, "WizardLM 2 8x22B", "OpenRouter", "Microsoft", 1 + ), + LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata( + "open_router", 4096, 4096, "MythoMax L2 13B", "OpenRouter", "Gryphe", 1 + ), + LlmModel.META_LLAMA_4_SCOUT: ModelMetadata( + "open_router", 131072, 131072, "Llama 4 Scout", "OpenRouter", "Meta", 1 + ), + LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata( + "open_router", 1048576, 1000000, "Llama 4 Maverick", "OpenRouter", "Meta", 1 + ), + LlmModel.GROK_4: ModelMetadata( + "open_router", 256000, 256000, "Grok 4", "OpenRouter", "xAI", 3 + ), + LlmModel.GROK_4_FAST: ModelMetadata( + "open_router", 2000000, 30000, "Grok 4 Fast", "OpenRouter", "xAI", 1 + ), + LlmModel.GROK_4_1_FAST: ModelMetadata( + "open_router", 2000000, 30000, "Grok 4.1 Fast", "OpenRouter", "xAI", 1 + ), + LlmModel.GROK_CODE_FAST_1: ModelMetadata( + "open_router", 256000, 10000, "Grok Code Fast 1", "OpenRouter", "xAI", 1 + ), + LlmModel.KIMI_K2: ModelMetadata( + "open_router", 131000, 131000, "Kimi K2", "OpenRouter", "Moonshot AI", 1 + ), + LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata( + "open_router", + 262144, + 262144, + "Qwen 3 235B A22B Thinking 2507", + "OpenRouter", + "Qwen", + 1, + ), + LlmModel.QWEN3_CODER: ModelMetadata( + "open_router", 262144, 262144, "Qwen 3 Coder", "OpenRouter", "Qwen", 3 ), - LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata("open_router", 131072, 131072), - LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata("open_router", 131072, 32768), - LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata("open_router", 300000, 5120), - LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata("open_router", 128000, 5120), - LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata("open_router", 300000, 5120), - LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata("open_router", 65536, 4096), - LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4096, 4096), - LlmModel.META_LLAMA_4_SCOUT: ModelMetadata("open_router", 131072, 131072), - LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata("open_router", 1048576, 1000000), - LlmModel.GROK_4: ModelMetadata("open_router", 256000, 256000), - LlmModel.GROK_4_FAST: ModelMetadata("open_router", 2000000, 30000), - LlmModel.GROK_4_1_FAST: ModelMetadata("open_router", 2000000, 30000), - LlmModel.GROK_CODE_FAST_1: ModelMetadata("open_router", 256000, 10000), - LlmModel.KIMI_K2: ModelMetadata("open_router", 131000, 131000), - LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata("open_router", 262144, 262144), - LlmModel.QWEN3_CODER: ModelMetadata("open_router", 262144, 262144), # Llama API models - LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata("llama_api", 128000, 4028), - LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028), - LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028), - LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028), + LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata( + "llama_api", + 128000, + 4028, + "Llama 4 Scout 17B 16E Instruct FP8", + "Llama API", + "Meta", + 1, + ), + LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata( + "llama_api", + 128000, + 4028, + "Llama 4 Maverick 17B 128E Instruct FP8", + "Llama API", + "Meta", + 1, + ), + LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata( + "llama_api", 128000, 4028, "Llama 3.3 8B Instruct", "Llama API", "Meta", 1 + ), + LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata( + "llama_api", 128000, 4028, "Llama 3.3 70B Instruct", "Llama API", "Meta", 1 + ), # v0 by Vercel models - LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000), - LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000), - LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000), + LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000, "v0 1.5 MD", "V0", "V0", 1), + LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000, "v0 1.5 LG", "V0", "V0", 1), + LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000, "v0 1.0 MD", "V0", "V0", 1), } DEFAULT_LLM_MODEL = LlmModel.GPT5_2 diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py index 8266d433ad..0f9da7e10b 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py @@ -242,7 +242,7 @@ async def test_smart_decision_maker_tracks_llm_stats(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -343,7 +343,7 @@ async def test_smart_decision_maker_parameter_validation(): # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -409,7 +409,7 @@ async def test_smart_decision_maker_parameter_validation(): # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -471,7 +471,7 @@ async def test_smart_decision_maker_parameter_validation(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -535,7 +535,7 @@ async def test_smart_decision_maker_parameter_validation(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -658,7 +658,7 @@ async def test_smart_decision_maker_raw_response_conversion(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -730,7 +730,7 @@ async def test_smart_decision_maker_raw_response_conversion(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -786,7 +786,7 @@ async def test_smart_decision_maker_raw_response_conversion(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -905,7 +905,7 @@ async def test_smart_decision_maker_agent_mode(): # Create a mock execution context mock_execution_context = ExecutionContext( - safe_mode=False, + human_in_the_loop_safe_mode=False, ) # Create a mock execution processor for agent mode tests @@ -1027,7 +1027,7 @@ async def test_smart_decision_maker_traditional_mode_default(): # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py index af89a83f86..0427b13466 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py @@ -386,7 +386,7 @@ async def test_output_yielding_with_dynamic_fields(): outputs = {} from backend.data.execution import ExecutionContext - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) mock_execution_processor = MagicMock() async for output_name, output_value in block.run( @@ -609,7 +609,9 @@ async def test_validation_errors_dont_pollute_conversation(): outputs = {} from backend.data.execution import ExecutionContext - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext( + human_in_the_loop_safe_mode=False + ) # Create a proper mock execution processor for agent mode from collections import defaultdict diff --git a/autogpt_platform/backend/backend/conftest.py b/autogpt_platform/backend/backend/conftest.py index b0b7f0cc67..57481e4b85 100644 --- a/autogpt_platform/backend/backend/conftest.py +++ b/autogpt_platform/backend/backend/conftest.py @@ -1,7 +1,7 @@ import logging import os -import pytest +import pytest_asyncio from dotenv import load_dotenv from backend.util.logging import configure_logging @@ -19,7 +19,7 @@ if not os.getenv("PRISMA_DEBUG"): prisma_logger.setLevel(logging.INFO) -@pytest.fixture(scope="session") +@pytest_asyncio.fixture(scope="session", loop_scope="session") async def server(): from backend.util.test import SpinTestServer @@ -27,7 +27,7 @@ async def server(): yield server -@pytest.fixture(scope="session", autouse=True) +@pytest_asyncio.fixture(scope="session", loop_scope="session", autouse=True) async def graph_cleanup(server): created_graph_ids = [] original_create_graph = server.agent_server.test_create_graph diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py index bfd4a35ec2..8d9ecfff4c 100644 --- a/autogpt_platform/backend/backend/data/block.py +++ b/autogpt_platform/backend/backend/data/block.py @@ -441,6 +441,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): static_output: bool = False, block_type: BlockType = BlockType.STANDARD, webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None, + is_sensitive_action: bool = False, ): """ Initialize the block with the given schema. @@ -473,8 +474,8 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): self.static_output = static_output self.block_type = block_type self.webhook_config = webhook_config + self.is_sensitive_action = is_sensitive_action self.execution_stats: NodeExecutionStats = NodeExecutionStats() - self.requires_human_review: bool = False if self.webhook_config: if isinstance(self.webhook_config, BlockWebhookConfig): @@ -622,6 +623,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): input_data: BlockInput, *, user_id: str, + node_id: str, node_exec_id: str, graph_exec_id: str, graph_id: str, @@ -637,8 +639,9 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): - should_pause: True if execution should be paused for review - input_data_to_use: The input data to use (may be modified by reviewer) """ - # Skip review if not required or safe mode is disabled - if not self.requires_human_review or not execution_context.safe_mode: + if not ( + self.is_sensitive_action and execution_context.sensitive_action_safe_mode + ): return False, input_data from backend.blocks.helpers.review import HITLReviewHelper @@ -647,11 +650,11 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): decision = await HITLReviewHelper.handle_review_decision( input_data=input_data, user_id=user_id, + node_id=node_id, node_exec_id=node_exec_id, graph_exec_id=graph_exec_id, graph_id=graph_id, graph_version=graph_version, - execution_context=execution_context, block_name=self.name, editable=True, ) diff --git a/autogpt_platform/backend/backend/data/block_cost_config.py b/autogpt_platform/backend/backend/data/block_cost_config.py index 7f8ee97d52..1b54ae0942 100644 --- a/autogpt_platform/backend/backend/data/block_cost_config.py +++ b/autogpt_platform/backend/backend/data/block_cost_config.py @@ -99,10 +99,15 @@ MODEL_COST: dict[LlmModel, int] = { LlmModel.OPENAI_GPT_OSS_20B: 1, LlmModel.GEMINI_2_5_PRO: 4, LlmModel.GEMINI_3_PRO_PREVIEW: 5, + LlmModel.GEMINI_2_5_FLASH: 1, + LlmModel.GEMINI_2_0_FLASH: 1, + LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1, + LlmModel.GEMINI_2_0_FLASH_LITE: 1, LlmModel.MISTRAL_NEMO: 1, LlmModel.COHERE_COMMAND_R_08_2024: 1, LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3, LlmModel.DEEPSEEK_CHAT: 2, + LlmModel.DEEPSEEK_R1_0528: 1, LlmModel.PERPLEXITY_SONAR: 1, LlmModel.PERPLEXITY_SONAR_PRO: 5, LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10, @@ -126,11 +131,6 @@ MODEL_COST: dict[LlmModel, int] = { LlmModel.KIMI_K2: 1, LlmModel.QWEN3_235B_A22B_THINKING: 1, LlmModel.QWEN3_CODER: 9, - LlmModel.GEMINI_2_5_FLASH: 1, - LlmModel.GEMINI_2_0_FLASH: 1, - LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1, - LlmModel.GEMINI_2_0_FLASH_LITE: 1, - LlmModel.DEEPSEEK_R1_0528: 1, # v0 by Vercel models LlmModel.V0_1_5_MD: 1, LlmModel.V0_1_5_LG: 2, diff --git a/autogpt_platform/backend/backend/data/db.py b/autogpt_platform/backend/backend/data/db.py index ab39881ed5..48f420aec7 100644 --- a/autogpt_platform/backend/backend/data/db.py +++ b/autogpt_platform/backend/backend/data/db.py @@ -38,20 +38,6 @@ POOL_TIMEOUT = os.getenv("DB_POOL_TIMEOUT") if POOL_TIMEOUT: DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT) -# Add public schema to search_path for pgvector type access -# The vector extension is in public schema, but search_path is determined by schema parameter -# Extract the schema from DATABASE_URL or default to 'public' (matching get_database_schema()) -parsed_url = urlparse(DATABASE_URL) -url_params = dict(parse_qsl(parsed_url.query)) -db_schema = url_params.get("schema", "public") -# Build search_path, avoiding duplicates if db_schema is already 'public' -search_path_schemas = list( - dict.fromkeys([db_schema, "public"]) -) # Preserves order, removes duplicates -search_path = ",".join(search_path_schemas) -# This allows using ::vector without schema qualification -DATABASE_URL = add_param(DATABASE_URL, "options", f"-c search_path={search_path}") - HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None prisma = Prisma( @@ -127,38 +113,48 @@ async def _raw_with_schema( *args, execute: bool = False, client: Prisma | None = None, - set_public_search_path: bool = False, ) -> list[dict] | int: """Internal: Execute raw SQL with proper schema handling. Use query_raw_with_schema() or execute_raw_with_schema() instead. + Supports placeholders: + - {schema_prefix}: Table/type prefix (e.g., "platform".) + - {schema}: Raw schema name for application tables (e.g., platform) + + Note on pgvector types: + Use unqualified ::vector and <=> operator in queries. PostgreSQL resolves + these via search_path, which includes the schema where pgvector is installed + on all environments (local, CI, dev). + Args: - query_template: SQL query with {schema_prefix} placeholder + query_template: SQL query with {schema_prefix} and/or {schema} placeholders *args: Query parameters execute: If False, executes SELECT query. If True, executes INSERT/UPDATE/DELETE. client: Optional Prisma client for transactions (only used when execute=True). - set_public_search_path: If True, sets search_path to include public schema. - Needed for pgvector types and other public schema objects. Returns: - list[dict] if execute=False (query results) - int if execute=True (number of affected rows) + + Example with vector type: + await execute_raw_with_schema( + 'INSERT INTO {schema_prefix}"Embedding" (vec) VALUES ($1::vector)', + embedding_data + ) """ schema = get_database_schema() schema_prefix = f'"{schema}".' if schema != "public" else "" - formatted_query = query_template.format(schema_prefix=schema_prefix) + + formatted_query = query_template.format( + schema_prefix=schema_prefix, + schema=schema, + ) import prisma as prisma_module db_client = client if client else prisma_module.get_client() - # Set search_path to include public schema if requested - # Prisma doesn't support the 'options' connection parameter, so we set it per-session - # This is idempotent and safe to call multiple times - if set_public_search_path: - await db_client.execute_raw(f"SET search_path = {schema}, public") # type: ignore - if execute: result = await db_client.execute_raw(formatted_query, *args) # type: ignore else: @@ -167,16 +163,12 @@ async def _raw_with_schema( return result -async def query_raw_with_schema( - query_template: str, *args, set_public_search_path: bool = False -) -> list[dict]: +async def query_raw_with_schema(query_template: str, *args) -> list[dict]: """Execute raw SQL SELECT query with proper schema handling. Args: - query_template: SQL query with {schema_prefix} placeholder + query_template: SQL query with {schema_prefix} and/or {schema} placeholders *args: Query parameters - set_public_search_path: If True, sets search_path to include public schema. - Needed for pgvector types and other public schema objects. Returns: List of result rows as dictionaries @@ -187,23 +179,20 @@ async def query_raw_with_schema( user_id ) """ - return await _raw_with_schema(query_template, *args, execute=False, set_public_search_path=set_public_search_path) # type: ignore + return await _raw_with_schema(query_template, *args, execute=False) # type: ignore async def execute_raw_with_schema( query_template: str, *args, client: Prisma | None = None, - set_public_search_path: bool = False, ) -> int: """Execute raw SQL command (INSERT/UPDATE/DELETE) with proper schema handling. Args: - query_template: SQL query with {schema_prefix} placeholder + query_template: SQL query with {schema_prefix} and/or {schema} placeholders *args: Query parameters client: Optional Prisma client for transactions - set_public_search_path: If True, sets search_path to include public schema. - Needed for pgvector types and other public schema objects. Returns: Number of affected rows @@ -215,7 +204,7 @@ async def execute_raw_with_schema( client=tx # Optional transaction client ) """ - return await _raw_with_schema(query_template, *args, execute=True, client=client, set_public_search_path=set_public_search_path) # type: ignore + return await _raw_with_schema(query_template, *args, execute=True, client=client) # type: ignore class BaseDbModel(BaseModel): diff --git a/autogpt_platform/backend/backend/data/event_bus.py b/autogpt_platform/backend/backend/data/event_bus.py index 48f9df87fa..d8a1c5b729 100644 --- a/autogpt_platform/backend/backend/data/event_bus.py +++ b/autogpt_platform/backend/backend/data/event_bus.py @@ -103,8 +103,18 @@ class RedisEventBus(BaseRedisEventBus[M], ABC): return redis.get_redis() def publish_event(self, event: M, channel_key: str): - message, full_channel_name = self._serialize_message(event, channel_key) - self.connection.publish(full_channel_name, message) + """ + Publish an event to Redis. Gracefully handles connection failures + by logging the error instead of raising exceptions. + """ + try: + message, full_channel_name = self._serialize_message(event, channel_key) + self.connection.publish(full_channel_name, message) + except Exception: + logger.exception( + f"Failed to publish event to Redis channel {channel_key}. " + "Event bus operation will continue without Redis connectivity." + ) def listen_events(self, channel_key: str) -> Generator[M, None, None]: pubsub, full_channel_name = self._get_pubsub_channel( @@ -128,9 +138,19 @@ class AsyncRedisEventBus(BaseRedisEventBus[M], ABC): return await redis.get_redis_async() async def publish_event(self, event: M, channel_key: str): - message, full_channel_name = self._serialize_message(event, channel_key) - connection = await self.connection - await connection.publish(full_channel_name, message) + """ + Publish an event to Redis. Gracefully handles connection failures + by logging the error instead of raising exceptions. + """ + try: + message, full_channel_name = self._serialize_message(event, channel_key) + connection = await self.connection + await connection.publish(full_channel_name, message) + except Exception: + logger.exception( + f"Failed to publish event to Redis channel {channel_key}. " + "Event bus operation will continue without Redis connectivity." + ) async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]: pubsub, full_channel_name = self._get_pubsub_channel( diff --git a/autogpt_platform/backend/backend/data/event_bus_test.py b/autogpt_platform/backend/backend/data/event_bus_test.py new file mode 100644 index 0000000000..93ecc0ba2a --- /dev/null +++ b/autogpt_platform/backend/backend/data/event_bus_test.py @@ -0,0 +1,56 @@ +""" +Tests for event_bus graceful degradation when Redis is unavailable. +""" + +from unittest.mock import AsyncMock, patch + +import pytest +from pydantic import BaseModel + +from backend.data.event_bus import AsyncRedisEventBus + + +class TestEvent(BaseModel): + """Test event model.""" + + message: str + + +class TestNotificationBus(AsyncRedisEventBus[TestEvent]): + """Test implementation of AsyncRedisEventBus.""" + + Model = TestEvent + + @property + def event_bus_name(self) -> str: + return "test_event_bus" + + +@pytest.mark.asyncio +async def test_publish_event_handles_connection_failure_gracefully(): + """Test that publish_event logs exception instead of raising when Redis is unavailable.""" + bus = TestNotificationBus() + event = TestEvent(message="test message") + + # Mock get_redis_async to raise connection error + with patch( + "backend.data.event_bus.redis.get_redis_async", + side_effect=ConnectionError("Authentication required."), + ): + # Should not raise exception + await bus.publish_event(event, "test_channel") + + +@pytest.mark.asyncio +async def test_publish_event_works_with_redis_available(): + """Test that publish_event works normally when Redis is available.""" + bus = TestNotificationBus() + event = TestEvent(message="test message") + + # Mock successful Redis connection + mock_redis = AsyncMock() + mock_redis.publish = AsyncMock() + + with patch("backend.data.event_bus.redis.get_redis_async", return_value=mock_redis): + await bus.publish_event(event, "test_channel") + mock_redis.publish.assert_called_once() diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index a6797032fd..3c1fd25c51 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -81,7 +81,10 @@ class ExecutionContext(BaseModel): This includes information needed by blocks, sub-graphs, and execution management. """ - safe_mode: bool = True + model_config = {"extra": "ignore"} + + human_in_the_loop_safe_mode: bool = True + sensitive_action_safe_mode: bool = False user_timezone: str = "UTC" root_execution_id: Optional[str] = None parent_execution_id: Optional[str] = None diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index e9be80892c..c1f38f81d5 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -3,7 +3,7 @@ import logging import uuid from collections import defaultdict from datetime import datetime, timezone -from typing import TYPE_CHECKING, Any, Literal, Optional, cast +from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast from prisma.enums import SubmissionStatus from prisma.models import ( @@ -20,7 +20,7 @@ from prisma.types import ( AgentNodeLinkCreateInput, StoreListingVersionWhereInput, ) -from pydantic import BaseModel, Field, create_model +from pydantic import BaseModel, BeforeValidator, Field, create_model from pydantic.fields import computed_field from backend.blocks.agent import AgentExecutorBlock @@ -62,7 +62,31 @@ logger = logging.getLogger(__name__) class GraphSettings(BaseModel): - human_in_the_loop_safe_mode: bool | None = None + # Use Annotated with BeforeValidator to coerce None to default values. + # This handles cases where the database has null values for these fields. + model_config = {"extra": "ignore"} + + human_in_the_loop_safe_mode: Annotated[ + bool, BeforeValidator(lambda v: v if v is not None else True) + ] = True + sensitive_action_safe_mode: Annotated[ + bool, BeforeValidator(lambda v: v if v is not None else False) + ] = False + + @classmethod + def from_graph( + cls, + graph: "GraphModel", + hitl_safe_mode: bool | None = None, + sensitive_action_safe_mode: bool = False, + ) -> "GraphSettings": + # Default to True if not explicitly set + if hitl_safe_mode is None: + hitl_safe_mode = True + return cls( + human_in_the_loop_safe_mode=hitl_safe_mode, + sensitive_action_safe_mode=sensitive_action_safe_mode, + ) class Link(BaseDbModel): @@ -244,10 +268,14 @@ class BaseGraph(BaseDbModel): return any( node.block_id for node in self.nodes - if ( - node.block.block_type == BlockType.HUMAN_IN_THE_LOOP - or node.block.requires_human_review - ) + if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP + ) + + @computed_field + @property + def has_sensitive_action(self) -> bool: + return any( + node.block_id for node in self.nodes if node.block.is_sensitive_action ) @property diff --git a/autogpt_platform/backend/backend/data/human_review.py b/autogpt_platform/backend/backend/data/human_review.py index de7a30759e..c70eaa7b64 100644 --- a/autogpt_platform/backend/backend/data/human_review.py +++ b/autogpt_platform/backend/backend/data/human_review.py @@ -6,10 +6,10 @@ Handles all database operations for pending human reviews. import asyncio import logging from datetime import datetime, timezone -from typing import Optional +from typing import TYPE_CHECKING, Optional from prisma.enums import ReviewStatus -from prisma.models import PendingHumanReview +from prisma.models import AgentNodeExecution, PendingHumanReview from prisma.types import PendingHumanReviewUpdateInput from pydantic import BaseModel @@ -17,8 +17,12 @@ from backend.api.features.executions.review.model import ( PendingHumanReviewModel, SafeJsonData, ) +from backend.data.execution import get_graph_execution_meta from backend.util.json import SafeJson +if TYPE_CHECKING: + pass + logger = logging.getLogger(__name__) @@ -32,6 +36,125 @@ class ReviewResult(BaseModel): node_exec_id: str +def get_auto_approve_key(graph_exec_id: str, node_id: str) -> str: + """Generate the special nodeExecId key for auto-approval records.""" + return f"auto_approve_{graph_exec_id}_{node_id}" + + +async def check_approval( + node_exec_id: str, + graph_exec_id: str, + node_id: str, + user_id: str, + input_data: SafeJsonData | None = None, +) -> Optional[ReviewResult]: + """ + Check if there's an existing approval for this node execution. + + Checks both: + 1. Normal approval by node_exec_id (previous run of the same node execution) + 2. Auto-approval by special key pattern "auto_approve_{graph_exec_id}_{node_id}" + + Args: + node_exec_id: ID of the node execution + graph_exec_id: ID of the graph execution + node_id: ID of the node definition (not execution) + user_id: ID of the user (for data isolation) + input_data: Current input data (used for auto-approvals to avoid stale data) + + Returns: + ReviewResult if approval found (either normal or auto), None otherwise + """ + auto_approve_key = get_auto_approve_key(graph_exec_id, node_id) + + # Check for either normal approval or auto-approval in a single query + existing_review = await PendingHumanReview.prisma().find_first( + where={ + "OR": [ + {"nodeExecId": node_exec_id}, + {"nodeExecId": auto_approve_key}, + ], + "status": ReviewStatus.APPROVED, + "userId": user_id, + }, + ) + + if existing_review: + is_auto_approval = existing_review.nodeExecId == auto_approve_key + logger.info( + f"Found {'auto-' if is_auto_approval else ''}approval for node {node_id} " + f"(exec: {node_exec_id}) in execution {graph_exec_id}" + ) + # For auto-approvals, use current input_data to avoid replaying stale payload + # For normal approvals, use the stored payload (which may have been edited) + return ReviewResult( + data=( + input_data + if is_auto_approval and input_data is not None + else existing_review.payload + ), + status=ReviewStatus.APPROVED, + message=( + "Auto-approved (user approved all future actions for this node)" + if is_auto_approval + else existing_review.reviewMessage or "" + ), + processed=True, + node_exec_id=existing_review.nodeExecId, + ) + + return None + + +async def create_auto_approval_record( + user_id: str, + graph_exec_id: str, + graph_id: str, + graph_version: int, + node_id: str, + payload: SafeJsonData, +) -> None: + """ + Create an auto-approval record for a node in this execution. + + This is stored as a PendingHumanReview with a special nodeExecId pattern + and status=APPROVED, so future executions of the same node can skip review. + + Raises: + ValueError: If the graph execution doesn't belong to the user + """ + # Validate that the graph execution belongs to this user (defense in depth) + graph_exec = await get_graph_execution_meta( + user_id=user_id, execution_id=graph_exec_id + ) + if not graph_exec: + raise ValueError( + f"Graph execution {graph_exec_id} not found or doesn't belong to user {user_id}" + ) + + auto_approve_key = get_auto_approve_key(graph_exec_id, node_id) + + await PendingHumanReview.prisma().upsert( + where={"nodeExecId": auto_approve_key}, + data={ + "create": { + "nodeExecId": auto_approve_key, + "userId": user_id, + "graphExecId": graph_exec_id, + "graphId": graph_id, + "graphVersion": graph_version, + "payload": SafeJson(payload), + "instructions": "Auto-approval record", + "editable": False, + "status": ReviewStatus.APPROVED, + "processed": True, + "reviewedAt": datetime.now(timezone.utc), + }, + "update": {}, # Already exists, no update needed + }, + ) + + async def get_or_create_human_review( user_id: str, node_exec_id: str, @@ -108,6 +231,87 @@ async def get_or_create_human_review( ) +async def get_pending_review_by_node_exec_id( + node_exec_id: str, user_id: str +) -> Optional["PendingHumanReviewModel"]: + """ + Get a pending review by its node execution ID. + + Args: + node_exec_id: The node execution ID to look up + user_id: User ID for authorization (only returns if review belongs to this user) + + Returns: + The pending review if found and belongs to user, None otherwise + """ + review = await PendingHumanReview.prisma().find_first( + where={ + "nodeExecId": node_exec_id, + "userId": user_id, + "status": ReviewStatus.WAITING, + } + ) + + if not review: + return None + + # Local import to avoid event loop conflicts in tests + from backend.data.execution import get_node_execution + + node_exec = await get_node_execution(review.nodeExecId) + node_id = node_exec.node_id if node_exec else review.nodeExecId + return PendingHumanReviewModel.from_db(review, node_id=node_id) + + +async def get_pending_reviews_by_node_exec_ids( + node_exec_ids: list[str], user_id: str +) -> dict[str, "PendingHumanReviewModel"]: + """ + Get multiple pending reviews by their node execution IDs in a single batch query. + + Args: + node_exec_ids: List of node execution IDs to look up + user_id: User ID for authorization (only returns reviews belonging to this user) + + Returns: + Dictionary mapping node_exec_id -> PendingHumanReviewModel for found reviews + """ + if not node_exec_ids: + return {} + + reviews = await PendingHumanReview.prisma().find_many( + where={ + "nodeExecId": {"in": node_exec_ids}, + "userId": user_id, + "status": ReviewStatus.WAITING, + } + ) + + if not reviews: + return {} + + # Batch fetch all node executions to avoid N+1 queries + node_exec_ids_to_fetch = [review.nodeExecId for review in reviews] + node_execs = await AgentNodeExecution.prisma().find_many( + where={"id": {"in": node_exec_ids_to_fetch}}, + include={"Node": True}, + ) + + # Create mapping from node_exec_id to node_id + node_exec_id_to_node_id = { + node_exec.id: node_exec.agentNodeId for node_exec in node_execs + } + + result = {} + for review in reviews: + node_id = node_exec_id_to_node_id.get(review.nodeExecId, review.nodeExecId) + result[review.nodeExecId] = PendingHumanReviewModel.from_db( + review, node_id=node_id + ) + + return result + + async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool: """ Check if a graph execution has any pending reviews. @@ -137,8 +341,11 @@ async def get_pending_reviews_for_user( page_size: Number of reviews per page Returns: - List of pending review models + List of pending review models with node_id included """ + # Local import to avoid event loop conflicts in tests + from backend.data.execution import get_node_execution + # Calculate offset for pagination offset = (page - 1) * page_size @@ -149,7 +356,14 @@ async def get_pending_reviews_for_user( take=page_size, ) - return [PendingHumanReviewModel.from_db(review) for review in reviews] + # Fetch node_id for each review from NodeExecution + result = [] + for review in reviews: + node_exec = await get_node_execution(review.nodeExecId) + node_id = node_exec.node_id if node_exec else review.nodeExecId + result.append(PendingHumanReviewModel.from_db(review, node_id=node_id)) + + return result async def get_pending_reviews_for_execution( @@ -163,8 +377,11 @@ async def get_pending_reviews_for_execution( user_id: User ID for security validation Returns: - List of pending review models + List of pending review models with node_id included """ + # Local import to avoid event loop conflicts in tests + from backend.data.execution import get_node_execution + reviews = await PendingHumanReview.prisma().find_many( where={ "userId": user_id, @@ -174,7 +391,14 @@ async def get_pending_reviews_for_execution( order={"createdAt": "asc"}, ) - return [PendingHumanReviewModel.from_db(review) for review in reviews] + # Fetch node_id for each review from NodeExecution + result = [] + for review in reviews: + node_exec = await get_node_execution(review.nodeExecId) + node_id = node_exec.node_id if node_exec else review.nodeExecId + result.append(PendingHumanReviewModel.from_db(review, node_id=node_id)) + + return result async def process_all_reviews_for_execution( @@ -244,11 +468,19 @@ async def process_all_reviews_for_execution( # Note: Execution resumption is now handled at the API layer after ALL reviews # for an execution are processed (both approved and rejected) - # Return as dict for easy access - return { - review.nodeExecId: PendingHumanReviewModel.from_db(review) - for review in updated_reviews - } + # Fetch node_id for each review and return as dict for easy access + # Local import to avoid event loop conflicts in tests + from backend.data.execution import get_node_execution + + result = {} + for review in updated_reviews: + node_exec = await get_node_execution(review.nodeExecId) + node_id = node_exec.node_id if node_exec else review.nodeExecId + result[review.nodeExecId] = PendingHumanReviewModel.from_db( + review, node_id=node_id + ) + + return result async def update_review_processed_status(node_exec_id: str, processed: bool) -> None: @@ -256,3 +488,44 @@ async def update_review_processed_status(node_exec_id: str, processed: bool) -> await PendingHumanReview.prisma().update( where={"nodeExecId": node_exec_id}, data={"processed": processed} ) + + +async def cancel_pending_reviews_for_execution(graph_exec_id: str, user_id: str) -> int: + """ + Cancel all pending reviews for a graph execution (e.g., when execution is stopped). + + Marks all WAITING reviews as REJECTED with a message indicating the execution was stopped. + + Args: + graph_exec_id: The graph execution ID + user_id: User ID who owns the execution (for security validation) + + Returns: + Number of reviews cancelled + + Raises: + ValueError: If the graph execution doesn't belong to the user + """ + # Validate user ownership before cancelling reviews + graph_exec = await get_graph_execution_meta( + user_id=user_id, execution_id=graph_exec_id + ) + if not graph_exec: + raise ValueError( + f"Graph execution {graph_exec_id} not found or doesn't belong to user {user_id}" + ) + + result = await PendingHumanReview.prisma().update_many( + where={ + "graphExecId": graph_exec_id, + "userId": user_id, + "status": ReviewStatus.WAITING, + }, + data={ + "status": ReviewStatus.REJECTED, + "reviewMessage": "Execution was stopped by user", + "processed": True, + "reviewedAt": datetime.now(timezone.utc), + }, + ) + return result diff --git a/autogpt_platform/backend/backend/data/human_review_test.py b/autogpt_platform/backend/backend/data/human_review_test.py index c349fdde46..baa5c0c0c4 100644 --- a/autogpt_platform/backend/backend/data/human_review_test.py +++ b/autogpt_platform/backend/backend/data/human_review_test.py @@ -36,7 +36,7 @@ def sample_db_review(): return mock_review -@pytest.mark.asyncio +@pytest.mark.asyncio(loop_scope="function") async def test_get_or_create_human_review_new( mocker: pytest_mock.MockFixture, sample_db_review, @@ -46,8 +46,8 @@ async def test_get_or_create_human_review_new( sample_db_review.status = ReviewStatus.WAITING sample_db_review.processed = False - mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") - mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review) + mock_prisma = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_prisma.return_value.upsert = AsyncMock(return_value=sample_db_review) result = await get_or_create_human_review( user_id="test-user-123", @@ -64,7 +64,7 @@ async def test_get_or_create_human_review_new( assert result is None -@pytest.mark.asyncio +@pytest.mark.asyncio(loop_scope="function") async def test_get_or_create_human_review_approved( mocker: pytest_mock.MockFixture, sample_db_review, @@ -75,8 +75,8 @@ async def test_get_or_create_human_review_approved( sample_db_review.processed = False sample_db_review.reviewMessage = "Looks good" - mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") - mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review) + mock_prisma = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_prisma.return_value.upsert = AsyncMock(return_value=sample_db_review) result = await get_or_create_human_review( user_id="test-user-123", @@ -96,7 +96,7 @@ async def test_get_or_create_human_review_approved( assert result.message == "Looks good" -@pytest.mark.asyncio +@pytest.mark.asyncio(loop_scope="function") async def test_has_pending_reviews_for_graph_exec_true( mocker: pytest_mock.MockFixture, ): @@ -109,7 +109,7 @@ async def test_has_pending_reviews_for_graph_exec_true( assert result is True -@pytest.mark.asyncio +@pytest.mark.asyncio(loop_scope="function") async def test_has_pending_reviews_for_graph_exec_false( mocker: pytest_mock.MockFixture, ): @@ -122,7 +122,7 @@ async def test_has_pending_reviews_for_graph_exec_false( assert result is False -@pytest.mark.asyncio +@pytest.mark.asyncio(loop_scope="function") async def test_get_pending_reviews_for_user( mocker: pytest_mock.MockFixture, sample_db_review, @@ -131,10 +131,19 @@ async def test_get_pending_reviews_for_user( mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review]) + # Mock get_node_execution to return node with node_id (async function) + mock_node_exec = Mock() + mock_node_exec.node_id = "test_node_def_789" + mocker.patch( + "backend.data.execution.get_node_execution", + new=AsyncMock(return_value=mock_node_exec), + ) + result = await get_pending_reviews_for_user("test_user", page=2, page_size=10) assert len(result) == 1 assert result[0].node_exec_id == "test_node_123" + assert result[0].node_id == "test_node_def_789" # Verify pagination parameters call_args = mock_find_many.return_value.find_many.call_args @@ -142,7 +151,7 @@ async def test_get_pending_reviews_for_user( assert call_args.kwargs["take"] == 10 -@pytest.mark.asyncio +@pytest.mark.asyncio(loop_scope="function") async def test_get_pending_reviews_for_execution( mocker: pytest_mock.MockFixture, sample_db_review, @@ -151,12 +160,21 @@ async def test_get_pending_reviews_for_execution( mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review]) + # Mock get_node_execution to return node with node_id (async function) + mock_node_exec = Mock() + mock_node_exec.node_id = "test_node_def_789" + mocker.patch( + "backend.data.execution.get_node_execution", + new=AsyncMock(return_value=mock_node_exec), + ) + result = await get_pending_reviews_for_execution( "test_graph_exec_456", "test-user-123" ) assert len(result) == 1 assert result[0].graph_exec_id == "test_graph_exec_456" + assert result[0].node_id == "test_node_def_789" # Verify it filters by execution and user call_args = mock_find_many.return_value.find_many.call_args @@ -166,7 +184,7 @@ async def test_get_pending_reviews_for_execution( assert where_clause["status"] == ReviewStatus.WAITING -@pytest.mark.asyncio +@pytest.mark.asyncio(loop_scope="function") async def test_process_all_reviews_for_execution_success( mocker: pytest_mock.MockFixture, sample_db_review, @@ -201,6 +219,14 @@ async def test_process_all_reviews_for_execution_success( new=AsyncMock(return_value=[updated_review]), ) + # Mock get_node_execution to return node with node_id (async function) + mock_node_exec = Mock() + mock_node_exec.node_id = "test_node_def_789" + mocker.patch( + "backend.data.execution.get_node_execution", + new=AsyncMock(return_value=mock_node_exec), + ) + result = await process_all_reviews_for_execution( user_id="test-user-123", review_decisions={ @@ -211,9 +237,10 @@ async def test_process_all_reviews_for_execution_success( assert len(result) == 1 assert "test_node_123" in result assert result["test_node_123"].status == ReviewStatus.APPROVED + assert result["test_node_123"].node_id == "test_node_def_789" -@pytest.mark.asyncio +@pytest.mark.asyncio(loop_scope="function") async def test_process_all_reviews_for_execution_validation_errors( mocker: pytest_mock.MockFixture, ): @@ -233,7 +260,7 @@ async def test_process_all_reviews_for_execution_validation_errors( ) -@pytest.mark.asyncio +@pytest.mark.asyncio(loop_scope="function") async def test_process_all_reviews_edit_permission_error( mocker: pytest_mock.MockFixture, sample_db_review, @@ -259,7 +286,7 @@ async def test_process_all_reviews_edit_permission_error( ) -@pytest.mark.asyncio +@pytest.mark.asyncio(loop_scope="function") async def test_process_all_reviews_mixed_approval_rejection( mocker: pytest_mock.MockFixture, sample_db_review, @@ -329,6 +356,14 @@ async def test_process_all_reviews_mixed_approval_rejection( new=AsyncMock(return_value=[approved_review, rejected_review]), ) + # Mock get_node_execution to return node with node_id (async function) + mock_node_exec = Mock() + mock_node_exec.node_id = "test_node_def_789" + mocker.patch( + "backend.data.execution.get_node_execution", + new=AsyncMock(return_value=mock_node_exec), + ) + result = await process_all_reviews_for_execution( user_id="test-user-123", review_decisions={ @@ -340,3 +375,5 @@ async def test_process_all_reviews_mixed_approval_rejection( assert len(result) == 2 assert "test_node_123" in result assert "test_node_456" in result + assert result["test_node_123"].node_id == "test_node_def_789" + assert result["test_node_456"].node_id == "test_node_def_789" diff --git a/autogpt_platform/backend/backend/data/understanding.py b/autogpt_platform/backend/backend/data/understanding.py index eb63d719ca..c604e046b6 100644 --- a/autogpt_platform/backend/backend/data/understanding.py +++ b/autogpt_platform/backend/backend/data/understanding.py @@ -328,6 +328,8 @@ async def clear_business_understanding(user_id: str) -> bool: def format_understanding_for_prompt(understanding: BusinessUnderstanding) -> str: """Format business understanding as text for system prompt injection.""" + if not understanding: + return "" sections = [] # User info section diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index ac381bbd67..ae7474fc1d 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -50,6 +50,8 @@ from backend.data.graph import ( validate_graph_execution_permissions, ) from backend.data.human_review import ( + cancel_pending_reviews_for_execution, + check_approval, get_or_create_human_review, has_pending_reviews_for_graph_exec, update_review_processed_status, @@ -190,6 +192,8 @@ class DatabaseManager(AppService): get_user_notification_preference = _(get_user_notification_preference) # Human In The Loop + cancel_pending_reviews_for_execution = _(cancel_pending_reviews_for_execution) + check_approval = _(check_approval) get_or_create_human_review = _(get_or_create_human_review) has_pending_reviews_for_graph_exec = _(has_pending_reviews_for_graph_exec) update_review_processed_status = _(update_review_processed_status) @@ -313,6 +317,8 @@ class DatabaseManagerAsyncClient(AppServiceClient): set_execution_kv_data = d.set_execution_kv_data # Human In The Loop + cancel_pending_reviews_for_execution = d.cancel_pending_reviews_for_execution + check_approval = d.check_approval get_or_create_human_review = d.get_or_create_human_review update_review_processed_status = d.update_review_processed_status diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py index c2aa81b10a..44b77fc018 100644 --- a/autogpt_platform/backend/backend/executor/scheduler.py +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -309,7 +309,7 @@ def ensure_embeddings_coverage(): # Process in batches until no more missing embeddings while True: - result = db_client.backfill_missing_embeddings(batch_size=10) + result = db_client.backfill_missing_embeddings(batch_size=100) total_processed += result["processed"] total_success += result["success"] diff --git a/autogpt_platform/backend/backend/executor/utils.py b/autogpt_platform/backend/backend/executor/utils.py index 25f0389e99..f35bebb125 100644 --- a/autogpt_platform/backend/backend/executor/utils.py +++ b/autogpt_platform/backend/backend/executor/utils.py @@ -10,6 +10,7 @@ from pydantic import BaseModel, JsonValue, ValidationError from backend.data import execution as execution_db from backend.data import graph as graph_db +from backend.data import human_review as human_review_db from backend.data import onboarding as onboarding_db from backend.data import user as user_db from backend.data.block import ( @@ -749,9 +750,27 @@ async def stop_graph_execution( if graph_exec.status in [ ExecutionStatus.QUEUED, ExecutionStatus.INCOMPLETE, + ExecutionStatus.REVIEW, ]: - # If the graph is still on the queue, we can prevent them from being executed - # by setting the status to TERMINATED. + # If the graph is queued/incomplete/paused for review, terminate immediately + # No need to wait for executor since it's not actively running + + # If graph is in REVIEW status, clean up pending reviews before terminating + if graph_exec.status == ExecutionStatus.REVIEW: + # Use human_review_db if Prisma connected, else database manager + review_db = ( + human_review_db + if prisma.is_connected() + else get_database_manager_async_client() + ) + # Mark all pending reviews as rejected/cancelled + cancelled_count = await review_db.cancel_pending_reviews_for_execution( + graph_exec_id, user_id + ) + logger.info( + f"Cancelled {cancelled_count} pending review(s) for stopped execution {graph_exec_id}" + ) + graph_exec.status = ExecutionStatus.TERMINATED await asyncio.gather( @@ -873,11 +892,8 @@ async def add_graph_execution( settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id) execution_context = ExecutionContext( - safe_mode=( - settings.human_in_the_loop_safe_mode - if settings.human_in_the_loop_safe_mode is not None - else True - ), + human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode, + sensitive_action_safe_mode=settings.sensitive_action_safe_mode, user_timezone=( user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC" ), @@ -890,9 +906,28 @@ async def add_graph_execution( nodes_to_skip=nodes_to_skip, execution_context=execution_context, ) - logger.info(f"Publishing execution {graph_exec.id} to execution queue") + logger.info(f"Queueing execution {graph_exec.id}") + + # Update execution status to QUEUED BEFORE publishing to prevent race condition + # where two concurrent requests could both publish the same execution + updated_exec = await edb.update_graph_execution_stats( + graph_exec_id=graph_exec.id, + status=ExecutionStatus.QUEUED, + ) + + # Verify the status update succeeded (prevents duplicate queueing in race conditions) + # If another request already updated the status, this execution will not be QUEUED + if not updated_exec or updated_exec.status != ExecutionStatus.QUEUED: + logger.warning( + f"Skipping queue publish for execution {graph_exec.id} - " + f"status update failed or execution already queued by another request" + ) + return graph_exec + + graph_exec.status = ExecutionStatus.QUEUED # Publish to execution queue for executor to pick up + # This happens AFTER status update to ensure only one request publishes exec_queue = await get_async_execution_queue() await exec_queue.publish_message( routing_key=GRAPH_EXECUTION_ROUTING_KEY, @@ -900,13 +935,6 @@ async def add_graph_execution( exchange=GRAPH_EXECUTION_EXCHANGE, ) logger.info(f"Published execution {graph_exec.id} to RabbitMQ queue") - - # Update execution status to QUEUED - graph_exec.status = ExecutionStatus.QUEUED - await edb.update_graph_execution_stats( - graph_exec_id=graph_exec.id, - status=graph_exec.status, - ) except BaseException as e: err = str(e) or type(e).__name__ if not graph_exec: diff --git a/autogpt_platform/backend/backend/executor/utils_test.py b/autogpt_platform/backend/backend/executor/utils_test.py index 0e652f9627..4761a18c63 100644 --- a/autogpt_platform/backend/backend/executor/utils_test.py +++ b/autogpt_platform/backend/backend/executor/utils_test.py @@ -4,6 +4,7 @@ import pytest from pytest_mock import MockerFixture from backend.data.dynamic_fields import merge_execution_input, parse_execution_output +from backend.data.execution import ExecutionStatus from backend.util.mock import MockObject @@ -346,6 +347,7 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture): mock_graph_exec = mocker.MagicMock(spec=GraphExecutionWithNodes) mock_graph_exec.id = "execution-id-123" mock_graph_exec.node_executions = [] # Add this to avoid AttributeError + mock_graph_exec.status = ExecutionStatus.QUEUED # Required for race condition check mock_graph_exec.to_graph_execution_entry.return_value = mocker.MagicMock() # Mock the queue and event bus @@ -386,6 +388,7 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture): mock_user.timezone = "UTC" mock_settings = mocker.MagicMock() mock_settings.human_in_the_loop_safe_mode = True + mock_settings.sensitive_action_safe_mode = False mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user) mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings) @@ -610,6 +613,7 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture): mock_graph_exec = mocker.MagicMock(spec=GraphExecutionWithNodes) mock_graph_exec.id = "execution-id-123" mock_graph_exec.node_executions = [] + mock_graph_exec.status = ExecutionStatus.QUEUED # Required for race condition check # Track what's passed to to_graph_execution_entry captured_kwargs = {} @@ -651,6 +655,7 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture): mock_user.timezone = "UTC" mock_settings = mocker.MagicMock() mock_settings.human_in_the_loop_safe_mode = True + mock_settings.sensitive_action_safe_mode = False mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user) mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings) @@ -668,3 +673,232 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture): # Verify nodes_to_skip was passed to to_graph_execution_entry assert "nodes_to_skip" in captured_kwargs assert captured_kwargs["nodes_to_skip"] == nodes_to_skip + + +@pytest.mark.asyncio +async def test_stop_graph_execution_in_review_status_cancels_pending_reviews( + mocker: MockerFixture, +): + """Test that stopping an execution in REVIEW status cancels pending reviews.""" + from backend.data.execution import ExecutionStatus, GraphExecutionMeta + from backend.executor.utils import stop_graph_execution + + user_id = "test-user" + graph_exec_id = "test-exec-123" + + # Mock graph execution in REVIEW status + mock_graph_exec = mocker.MagicMock(spec=GraphExecutionMeta) + mock_graph_exec.id = graph_exec_id + mock_graph_exec.status = ExecutionStatus.REVIEW + + # Mock dependencies + mock_get_queue = mocker.patch("backend.executor.utils.get_async_execution_queue") + mock_queue_client = mocker.AsyncMock() + mock_get_queue.return_value = mock_queue_client + + mock_prisma = mocker.patch("backend.executor.utils.prisma") + mock_prisma.is_connected.return_value = True + + mock_human_review_db = mocker.patch("backend.executor.utils.human_review_db") + mock_human_review_db.cancel_pending_reviews_for_execution = mocker.AsyncMock( + return_value=2 # 2 reviews cancelled + ) + + mock_execution_db = mocker.patch("backend.executor.utils.execution_db") + mock_execution_db.get_graph_execution_meta = mocker.AsyncMock( + return_value=mock_graph_exec + ) + mock_execution_db.update_graph_execution_stats = mocker.AsyncMock() + + mock_get_event_bus = mocker.patch( + "backend.executor.utils.get_async_execution_event_bus" + ) + mock_event_bus = mocker.MagicMock() + mock_event_bus.publish = mocker.AsyncMock() + mock_get_event_bus.return_value = mock_event_bus + + mock_get_child_executions = mocker.patch( + "backend.executor.utils._get_child_executions" + ) + mock_get_child_executions.return_value = [] # No children + + # Call stop_graph_execution with timeout to allow status check + await stop_graph_execution( + user_id=user_id, + graph_exec_id=graph_exec_id, + wait_timeout=1.0, # Wait to allow status check + cascade=True, + ) + + # Verify pending reviews were cancelled + mock_human_review_db.cancel_pending_reviews_for_execution.assert_called_once_with( + graph_exec_id, user_id + ) + + # Verify execution status was updated to TERMINATED + mock_execution_db.update_graph_execution_stats.assert_called_once() + call_kwargs = mock_execution_db.update_graph_execution_stats.call_args[1] + assert call_kwargs["graph_exec_id"] == graph_exec_id + assert call_kwargs["status"] == ExecutionStatus.TERMINATED + + +@pytest.mark.asyncio +async def test_stop_graph_execution_with_database_manager_when_prisma_disconnected( + mocker: MockerFixture, +): + """Test that stop uses database manager when Prisma is not connected.""" + from backend.data.execution import ExecutionStatus, GraphExecutionMeta + from backend.executor.utils import stop_graph_execution + + user_id = "test-user" + graph_exec_id = "test-exec-456" + + # Mock graph execution in REVIEW status + mock_graph_exec = mocker.MagicMock(spec=GraphExecutionMeta) + mock_graph_exec.id = graph_exec_id + mock_graph_exec.status = ExecutionStatus.REVIEW + + # Mock dependencies + mock_get_queue = mocker.patch("backend.executor.utils.get_async_execution_queue") + mock_queue_client = mocker.AsyncMock() + mock_get_queue.return_value = mock_queue_client + + # Prisma is NOT connected + mock_prisma = mocker.patch("backend.executor.utils.prisma") + mock_prisma.is_connected.return_value = False + + # Mock database manager client + mock_get_db_manager = mocker.patch( + "backend.executor.utils.get_database_manager_async_client" + ) + mock_db_manager = mocker.AsyncMock() + mock_db_manager.get_graph_execution_meta = mocker.AsyncMock( + return_value=mock_graph_exec + ) + mock_db_manager.cancel_pending_reviews_for_execution = mocker.AsyncMock( + return_value=3 # 3 reviews cancelled + ) + mock_db_manager.update_graph_execution_stats = mocker.AsyncMock() + mock_get_db_manager.return_value = mock_db_manager + + mock_get_event_bus = mocker.patch( + "backend.executor.utils.get_async_execution_event_bus" + ) + mock_event_bus = mocker.MagicMock() + mock_event_bus.publish = mocker.AsyncMock() + mock_get_event_bus.return_value = mock_event_bus + + mock_get_child_executions = mocker.patch( + "backend.executor.utils._get_child_executions" + ) + mock_get_child_executions.return_value = [] # No children + + # Call stop_graph_execution with timeout + await stop_graph_execution( + user_id=user_id, + graph_exec_id=graph_exec_id, + wait_timeout=1.0, + cascade=True, + ) + + # Verify database manager was used for cancel_pending_reviews + mock_db_manager.cancel_pending_reviews_for_execution.assert_called_once_with( + graph_exec_id, user_id + ) + + # Verify execution status was updated via database manager + mock_db_manager.update_graph_execution_stats.assert_called_once() + + +@pytest.mark.asyncio +async def test_stop_graph_execution_cascades_to_child_with_reviews( + mocker: MockerFixture, +): + """Test that stopping parent execution cascades to children and cancels their reviews.""" + from backend.data.execution import ExecutionStatus, GraphExecutionMeta + from backend.executor.utils import stop_graph_execution + + user_id = "test-user" + parent_exec_id = "parent-exec" + child_exec_id = "child-exec" + + # Mock parent execution in RUNNING status + mock_parent_exec = mocker.MagicMock(spec=GraphExecutionMeta) + mock_parent_exec.id = parent_exec_id + mock_parent_exec.status = ExecutionStatus.RUNNING + + # Mock child execution in REVIEW status + mock_child_exec = mocker.MagicMock(spec=GraphExecutionMeta) + mock_child_exec.id = child_exec_id + mock_child_exec.status = ExecutionStatus.REVIEW + + # Mock dependencies + mock_get_queue = mocker.patch("backend.executor.utils.get_async_execution_queue") + mock_queue_client = mocker.AsyncMock() + mock_get_queue.return_value = mock_queue_client + + mock_prisma = mocker.patch("backend.executor.utils.prisma") + mock_prisma.is_connected.return_value = True + + mock_human_review_db = mocker.patch("backend.executor.utils.human_review_db") + mock_human_review_db.cancel_pending_reviews_for_execution = mocker.AsyncMock( + return_value=1 # 1 child review cancelled + ) + + # Mock execution_db to return different status based on which execution is queried + mock_execution_db = mocker.patch("backend.executor.utils.execution_db") + + # Track call count to simulate status transition + call_count = {"count": 0} + + async def get_exec_meta_side_effect(execution_id, user_id): + call_count["count"] += 1 + if execution_id == parent_exec_id: + # After a few calls (child processing happens), transition parent to TERMINATED + # This simulates the executor service processing the stop request + if call_count["count"] > 3: + mock_parent_exec.status = ExecutionStatus.TERMINATED + return mock_parent_exec + elif execution_id == child_exec_id: + return mock_child_exec + return None + + mock_execution_db.get_graph_execution_meta = mocker.AsyncMock( + side_effect=get_exec_meta_side_effect + ) + mock_execution_db.update_graph_execution_stats = mocker.AsyncMock() + + mock_get_event_bus = mocker.patch( + "backend.executor.utils.get_async_execution_event_bus" + ) + mock_event_bus = mocker.MagicMock() + mock_event_bus.publish = mocker.AsyncMock() + mock_get_event_bus.return_value = mock_event_bus + + # Mock _get_child_executions to return the child + mock_get_child_executions = mocker.patch( + "backend.executor.utils._get_child_executions" + ) + + def get_children_side_effect(parent_id): + if parent_id == parent_exec_id: + return [mock_child_exec] + return [] + + mock_get_child_executions.side_effect = get_children_side_effect + + # Call stop_graph_execution on parent with cascade=True + await stop_graph_execution( + user_id=user_id, + graph_exec_id=parent_exec_id, + wait_timeout=1.0, + cascade=True, + ) + + # Verify child reviews were cancelled + mock_human_review_db.cancel_pending_reviews_for_execution.assert_called_once_with( + child_exec_id, user_id + ) + + # Verify both parent and child status updates + assert mock_execution_db.update_graph_execution_stats.call_count >= 1 diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 4448aeb77e..8d34292803 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -350,6 +350,19 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): description="Whether to mark failed scans as clean or not", ) + agentgenerator_host: str = Field( + default="", + description="The host for the Agent Generator service (empty to use built-in)", + ) + agentgenerator_port: int = Field( + default=8000, + description="The port for the Agent Generator service", + ) + agentgenerator_timeout: int = Field( + default=120, + description="The timeout in seconds for Agent Generator service requests", + ) + enable_example_blocks: bool = Field( default=False, description="Whether to enable example blocks in production", @@ -666,6 +679,12 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings): default="https://cloud.langfuse.com", description="Langfuse host URL" ) + # PostHog analytics + posthog_api_key: str = Field(default="", description="PostHog API key") + posthog_host: str = Field( + default="https://eu.i.posthog.com", description="PostHog host URL" + ) + # Add more secret fields as needed model_config = SettingsConfigDict( env_file=".env", diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py index 1e8244ff8e..0a539644ee 100644 --- a/autogpt_platform/backend/backend/util/test.py +++ b/autogpt_platform/backend/backend/util/test.py @@ -1,3 +1,4 @@ +import asyncio import inspect import logging import time @@ -58,6 +59,11 @@ class SpinTestServer: self.db_api.__exit__(exc_type, exc_val, exc_tb) self.notif_manager.__exit__(exc_type, exc_val, exc_tb) + # Give services time to fully shut down + # This prevents event loop issues where services haven't fully cleaned up + # before the next test starts + await asyncio.sleep(0.5) + def setup_dependency_overrides(self): # Override get_user_id for testing self.agent_server.set_test_dependency_overrides( diff --git a/autogpt_platform/backend/migrations/20260109181714_add_docs_embedding/migration.sql b/autogpt_platform/backend/migrations/20260109181714_add_docs_embedding/migration.sql index 0897e1865a..a839070a28 100644 --- a/autogpt_platform/backend/migrations/20260109181714_add_docs_embedding/migration.sql +++ b/autogpt_platform/backend/migrations/20260109181714_add_docs_embedding/migration.sql @@ -1,11 +1,37 @@ -- CreateExtension -- Supabase: pgvector must be enabled via Dashboard → Database → Extensions first --- Create in public schema so vector type is available across all schemas +-- Ensures vector extension is in the current schema (from DATABASE_URL ?schema= param) +-- If it exists in a different schema (e.g., public), we drop and recreate it in the current schema +-- This ensures vector type is in the same schema as tables, making ::vector work without explicit qualification DO $$ +DECLARE + current_schema_name text; + vector_schema text; BEGIN - CREATE EXTENSION IF NOT EXISTS "vector" WITH SCHEMA "public"; -EXCEPTION WHEN OTHERS THEN - RAISE NOTICE 'vector extension not available or already exists, skipping'; + -- Get the current schema from search_path + SELECT current_schema() INTO current_schema_name; + + -- Check if vector extension exists and which schema it's in + SELECT n.nspname INTO vector_schema + FROM pg_extension e + JOIN pg_namespace n ON e.extnamespace = n.oid + WHERE e.extname = 'vector'; + + -- Handle removal if in wrong schema + IF vector_schema IS NOT NULL AND vector_schema != current_schema_name THEN + BEGIN + -- Vector exists in a different schema, drop it first + RAISE WARNING 'pgvector found in schema "%" but need it in "%". Dropping and reinstalling...', + vector_schema, current_schema_name; + EXECUTE 'DROP EXTENSION IF EXISTS vector CASCADE'; + EXCEPTION WHEN OTHERS THEN + RAISE EXCEPTION 'Failed to drop pgvector from schema "%": %. You may need to drop it manually.', + vector_schema, SQLERRM; + END; + END IF; + + -- Create extension in current schema (let it fail naturally if not available) + EXECUTE format('CREATE EXTENSION IF NOT EXISTS vector SCHEMA %I', current_schema_name); END $$; -- CreateEnum @@ -19,7 +45,7 @@ CREATE TABLE "UnifiedContentEmbedding" ( "contentType" "ContentType" NOT NULL, "contentId" TEXT NOT NULL, "userId" TEXT, - "embedding" public.vector(1536) NOT NULL, + "embedding" vector(1536) NOT NULL, "searchableText" TEXT NOT NULL, "metadata" JSONB NOT NULL DEFAULT '{}', @@ -45,4 +71,4 @@ CREATE UNIQUE INDEX "UnifiedContentEmbedding_contentType_contentId_userId_key" O -- Uses cosine distance operator (<=>), which matches the query in hybrid_search.py -- Note: Drop first in case Prisma created a btree index (Prisma doesn't support HNSW) DROP INDEX IF EXISTS "UnifiedContentEmbedding_embedding_idx"; -CREATE INDEX "UnifiedContentEmbedding_embedding_idx" ON "UnifiedContentEmbedding" USING hnsw ("embedding" public.vector_cosine_ops); +CREATE INDEX "UnifiedContentEmbedding_embedding_idx" ON "UnifiedContentEmbedding" USING hnsw ("embedding" vector_cosine_ops); diff --git a/autogpt_platform/backend/migrations/20260112173500_add_supabase_extensions_to_platform_schema/migration.sql b/autogpt_platform/backend/migrations/20260112173500_add_supabase_extensions_to_platform_schema/migration.sql deleted file mode 100644 index ca91bc5cab..0000000000 --- a/autogpt_platform/backend/migrations/20260112173500_add_supabase_extensions_to_platform_schema/migration.sql +++ /dev/null @@ -1,71 +0,0 @@ --- Acknowledge Supabase-managed extensions to prevent drift warnings --- These extensions are pre-installed by Supabase in specific schemas --- This migration ensures they exist where available (Supabase) or skips gracefully (CI) - --- Create schemas (safe in both CI and Supabase) -CREATE SCHEMA IF NOT EXISTS "extensions"; - --- Extensions that exist in both CI and Supabase -DO $$ -BEGIN - CREATE EXTENSION IF NOT EXISTS "pgcrypto" WITH SCHEMA "extensions"; -EXCEPTION WHEN OTHERS THEN - RAISE NOTICE 'pgcrypto extension not available, skipping'; -END $$; - -DO $$ -BEGIN - CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA "extensions"; -EXCEPTION WHEN OTHERS THEN - RAISE NOTICE 'uuid-ossp extension not available, skipping'; -END $$; - --- Supabase-specific extensions (skip gracefully in CI) -DO $$ -BEGIN - CREATE EXTENSION IF NOT EXISTS "pg_stat_statements" WITH SCHEMA "extensions"; -EXCEPTION WHEN OTHERS THEN - RAISE NOTICE 'pg_stat_statements extension not available, skipping'; -END $$; - -DO $$ -BEGIN - CREATE EXTENSION IF NOT EXISTS "pg_net" WITH SCHEMA "extensions"; -EXCEPTION WHEN OTHERS THEN - RAISE NOTICE 'pg_net extension not available, skipping'; -END $$; - -DO $$ -BEGIN - CREATE EXTENSION IF NOT EXISTS "pgjwt" WITH SCHEMA "extensions"; -EXCEPTION WHEN OTHERS THEN - RAISE NOTICE 'pgjwt extension not available, skipping'; -END $$; - -DO $$ -BEGIN - CREATE SCHEMA IF NOT EXISTS "graphql"; - CREATE EXTENSION IF NOT EXISTS "pg_graphql" WITH SCHEMA "graphql"; -EXCEPTION WHEN OTHERS THEN - RAISE NOTICE 'pg_graphql extension not available, skipping'; -END $$; - -DO $$ -BEGIN - CREATE SCHEMA IF NOT EXISTS "pgsodium"; - CREATE EXTENSION IF NOT EXISTS "pgsodium" WITH SCHEMA "pgsodium"; -EXCEPTION WHEN OTHERS THEN - RAISE NOTICE 'pgsodium extension not available, skipping'; -END $$; - -DO $$ -BEGIN - CREATE SCHEMA IF NOT EXISTS "vault"; - CREATE EXTENSION IF NOT EXISTS "supabase_vault" WITH SCHEMA "vault"; -EXCEPTION WHEN OTHERS THEN - RAISE NOTICE 'supabase_vault extension not available, skipping'; -END $$; - - --- Return to platform -CREATE SCHEMA IF NOT EXISTS "platform"; \ No newline at end of file diff --git a/autogpt_platform/backend/migrations/20260121200000_remove_node_execution_fk_from_pending_human_review/migration.sql b/autogpt_platform/backend/migrations/20260121200000_remove_node_execution_fk_from_pending_human_review/migration.sql new file mode 100644 index 0000000000..c43cb0b1e0 --- /dev/null +++ b/autogpt_platform/backend/migrations/20260121200000_remove_node_execution_fk_from_pending_human_review/migration.sql @@ -0,0 +1,7 @@ +-- Remove NodeExecution foreign key from PendingHumanReview +-- The nodeExecId column remains as the primary key, but we remove the FK constraint +-- to AgentNodeExecution since PendingHumanReview records can persist after node +-- execution records are deleted. + +-- Drop foreign key constraint that linked PendingHumanReview.nodeExecId to AgentNodeExecution.id +ALTER TABLE "PendingHumanReview" DROP CONSTRAINT IF EXISTS "PendingHumanReview_nodeExecId_fkey"; diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock index 2aa55ce5b6..91ac358ade 100644 --- a/autogpt_platform/backend/poetry.lock +++ b/autogpt_platform/backend/poetry.lock @@ -4204,14 +4204,14 @@ strenum = {version = ">=0.4.9,<0.5.0", markers = "python_version < \"3.11\""} [[package]] name = "posthog" -version = "6.1.1" +version = "7.6.0" description = "Integrate PostHog into any python application." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "posthog-6.1.1-py3-none-any.whl", hash = "sha256:329fd3d06b4d54cec925f47235bd8e327c91403c2f9ec38f1deb849535934dba"}, - {file = "posthog-6.1.1.tar.gz", hash = "sha256:b453f54c4a2589da859fd575dd3bf86fcb40580727ec399535f268b1b9f318b8"}, + {file = "posthog-7.6.0-py3-none-any.whl", hash = "sha256:c4dd78cf77c4fecceb965f86066e5ac37886ef867d68ffe75a1db5d681d7d9ad"}, + {file = "posthog-7.6.0.tar.gz", hash = "sha256:941dfd278ee427c9b14640f09b35b5bb52a71bdf028d7dbb7307e1838fd3002e"}, ] [package.dependencies] @@ -4225,7 +4225,7 @@ typing-extensions = ">=4.2.0" [package.extras] dev = ["django-stubs", "lxml", "mypy", "mypy-baseline", "packaging", "pre-commit", "pydantic", "ruff", "setuptools", "tomli", "tomli_w", "twine", "types-mock", "types-python-dateutil", "types-requests", "types-setuptools", "types-six", "wheel"] langchain = ["langchain (>=0.2.0)"] -test = ["anthropic", "coverage", "django", "freezegun (==1.5.1)", "google-genai", "langchain-anthropic (>=0.3.15)", "langchain-community (>=0.3.25)", "langchain-core (>=0.3.65)", "langchain-openai (>=0.3.22)", "langgraph (>=0.4.8)", "mock (>=2.0.0)", "openai", "parameterized (>=0.8.1)", "pydantic", "pytest", "pytest-asyncio", "pytest-timeout"] +test = ["anthropic (>=0.72)", "coverage", "django", "freezegun (==1.5.1)", "google-genai", "langchain-anthropic (>=1.0)", "langchain-community (>=0.4)", "langchain-core (>=1.0)", "langchain-openai (>=1.0)", "langgraph (>=1.0)", "mock (>=2.0.0)", "openai (>=2.0)", "parameterized (>=0.8.1)", "pydantic", "pytest", "pytest-asyncio", "pytest-timeout"] [[package]] name = "postmarker" @@ -7512,4 +7512,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.14" -content-hash = "18b92e09596298c82432e4d0a85cb6d80a40b4229bee0a0c15f0529fd6cb21a4" +content-hash = "ee5742dc1a9df50dfc06d4b26a1682cbb2b25cab6b79ce5625ec272f93e4f4bf" diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml index 1f489d2bc4..fe263e47c0 100644 --- a/autogpt_platform/backend/pyproject.toml +++ b/autogpt_platform/backend/pyproject.toml @@ -85,6 +85,7 @@ exa-py = "^1.14.20" croniter = "^6.0.0" stagehand = "^0.5.1" gravitas-md2gdocs = "^0.1.0" +posthog = "^7.6.0" [tool.poetry.group.dev.dependencies] aiohappyeyeballs = "^2.6.1" diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index 4a2a7b583a..de94600820 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -517,8 +517,6 @@ model AgentNodeExecution { stats Json? - PendingHumanReview PendingHumanReview? - @@index([agentGraphExecutionId, agentNodeId, executionStatus]) @@index([agentNodeId, executionStatus]) @@index([addedTime, queuedTime]) @@ -567,6 +565,7 @@ enum ReviewStatus { } // Pending human reviews for Human-in-the-loop blocks +// Also stores auto-approval records with special nodeExecId patterns (e.g., "auto_approve_{graph_exec_id}_{node_id}") model PendingHumanReview { nodeExecId String @id userId String @@ -585,7 +584,6 @@ model PendingHumanReview { reviewedAt DateTime? User User @relation(fields: [userId], references: [id], onDelete: Cascade) - NodeExecution AgentNodeExecution @relation(fields: [nodeExecId], references: [id], onDelete: Cascade) GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade) @@unique([nodeExecId]) // One pending review per node execution diff --git a/autogpt_platform/backend/scripts/generate_block_docs.py b/autogpt_platform/backend/scripts/generate_block_docs.py index 1a49cc8805..bb60eddb5d 100644 --- a/autogpt_platform/backend/scripts/generate_block_docs.py +++ b/autogpt_platform/backend/scripts/generate_block_docs.py @@ -34,7 +34,10 @@ logger = logging.getLogger(__name__) # Default output directory relative to repo root DEFAULT_OUTPUT_DIR = ( - Path(__file__).parent.parent.parent.parent / "docs" / "integrations" + Path(__file__).parent.parent.parent.parent + / "docs" + / "integrations" + / "block-integrations" ) @@ -366,12 +369,12 @@ def generate_block_markdown( lines.append("") # What it is (full description) - lines.append(f"### What it is") + lines.append("### What it is") lines.append(block.description or "No description available.") lines.append("") # How it works (manual section) - lines.append(f"### How it works") + lines.append("### How it works") how_it_works = manual_content.get( "how_it_works", "_Add technical explanation here._" ) @@ -383,7 +386,7 @@ def generate_block_markdown( # Inputs table (auto-generated) visible_inputs = [f for f in block.inputs if not f.hidden] if visible_inputs: - lines.append(f"### Inputs") + lines.append("### Inputs") lines.append("") lines.append("| Input | Description | Type | Required |") lines.append("|-------|-------------|------|----------|") @@ -400,7 +403,7 @@ def generate_block_markdown( # Outputs table (auto-generated) visible_outputs = [f for f in block.outputs if not f.hidden] if visible_outputs: - lines.append(f"### Outputs") + lines.append("### Outputs") lines.append("") lines.append("| Output | Description | Type |") lines.append("|--------|-------------|------|") @@ -414,13 +417,21 @@ def generate_block_markdown( lines.append("") # Possible use case (manual section) - lines.append(f"### Possible use case") + lines.append("### Possible use case") use_case = manual_content.get("use_case", "_Add practical use case examples here._") lines.append("") lines.append(use_case) lines.append("") lines.append("") + # Optional per-block extras (only include if has content) + extras = manual_content.get("extras", "") + if extras: + lines.append("") + lines.append(extras) + lines.append("") + lines.append("") + lines.append("---") lines.append("") @@ -456,25 +467,52 @@ def get_block_file_mapping(blocks: list[BlockDoc]) -> dict[str, list[BlockDoc]]: return dict(file_mapping) -def generate_overview_table(blocks: list[BlockDoc]) -> str: - """Generate the overview table markdown (blocks.md).""" +def generate_overview_table(blocks: list[BlockDoc], block_dir_prefix: str = "") -> str: + """Generate the overview table markdown (blocks.md). + + Args: + blocks: List of block documentation objects + block_dir_prefix: Prefix for block file links (e.g., "block-integrations/") + """ lines = [] + # GitBook YAML frontmatter + lines.append("---") + lines.append("layout:") + lines.append(" width: default") + lines.append(" title:") + lines.append(" visible: true") + lines.append(" description:") + lines.append(" visible: true") + lines.append(" tableOfContents:") + lines.append(" visible: false") + lines.append(" outline:") + lines.append(" visible: true") + lines.append(" pagination:") + lines.append(" visible: true") + lines.append(" metadata:") + lines.append(" visible: true") + lines.append("---") + lines.append("") + lines.append("# AutoGPT Blocks Overview") lines.append("") lines.append( 'AutoGPT uses a modular approach with various "blocks" to handle different tasks. These blocks are the building blocks of AutoGPT workflows, allowing users to create complex automations by combining simple, specialized components.' ) lines.append("") - lines.append('!!! info "Creating Your Own Blocks"') - lines.append(" Want to create your own custom blocks? Check out our guides:") - lines.append(" ") + lines.append('{% hint style="info" %}') + lines.append("**Creating Your Own Blocks**") + lines.append("") + lines.append("Want to create your own custom blocks? Check out our guides:") + lines.append("") lines.append( - " - [Build your own Blocks](https://docs.agpt.co/platform/new_blocks/) - Step-by-step tutorial with examples" + "* [Build your own Blocks](https://docs.agpt.co/platform/new_blocks/) - Step-by-step tutorial with examples" ) lines.append( - " - [Block SDK Guide](https://docs.agpt.co/platform/block-sdk-guide/) - Advanced SDK patterns with OAuth, webhooks, and provider configuration" + "* [Block SDK Guide](https://docs.agpt.co/platform/block-sdk-guide/) - Advanced SDK patterns with OAuth, webhooks, and provider configuration" ) + lines.append("{% endhint %}") lines.append("") lines.append( "Below is a comprehensive list of all available blocks, categorized by their primary function. Click on any block name to view its detailed documentation." @@ -537,7 +575,8 @@ def generate_overview_table(blocks: list[BlockDoc]) -> str: else "No description" ) short_desc = short_desc.replace("\n", " ").replace("|", "\\|") - lines.append(f"| [{block.name}]({file_path}#{anchor}) | {short_desc} |") + link_path = f"{block_dir_prefix}{file_path}" + lines.append(f"| [{block.name}]({link_path}#{anchor}) | {short_desc} |") lines.append("") continue @@ -563,13 +602,55 @@ def generate_overview_table(blocks: list[BlockDoc]) -> str: ) short_desc = short_desc.replace("\n", " ").replace("|", "\\|") - lines.append(f"| [{block.name}]({file_path}#{anchor}) | {short_desc} |") + link_path = f"{block_dir_prefix}{file_path}" + lines.append(f"| [{block.name}]({link_path}#{anchor}) | {short_desc} |") lines.append("") return "\n".join(lines) +def generate_summary_md( + blocks: list[BlockDoc], root_dir: Path, block_dir_prefix: str = "" +) -> str: + """Generate SUMMARY.md for GitBook navigation. + + Args: + blocks: List of block documentation objects + root_dir: The root docs directory (e.g., docs/integrations/) + block_dir_prefix: Prefix for block file links (e.g., "block-integrations/") + """ + lines = [] + lines.append("# Table of contents") + lines.append("") + lines.append("* [AutoGPT Blocks Overview](README.md)") + lines.append("") + + # Check for guides/ directory at the root level (docs/integrations/guides/) + guides_dir = root_dir / "guides" + if guides_dir.exists(): + lines.append("## Guides") + lines.append("") + for guide_file in sorted(guides_dir.glob("*.md")): + # Use just the file name for title (replace hyphens/underscores with spaces) + title = file_path_to_title(guide_file.stem.replace("-", "_") + ".md") + lines.append(f"* [{title}](guides/{guide_file.name})") + lines.append("") + + lines.append("## Block Integrations") + lines.append("") + + file_mapping = get_block_file_mapping(blocks) + for file_path in sorted(file_mapping.keys()): + title = file_path_to_title(file_path) + link_path = f"{block_dir_prefix}{file_path}" + lines.append(f"* [{title}]({link_path})") + + lines.append("") + + return "\n".join(lines) + + def load_all_blocks_for_docs() -> list[BlockDoc]: """Load all blocks and extract documentation.""" from backend.blocks import load_all_blocks @@ -653,6 +734,16 @@ def write_block_docs( ) ) + # Add file-level additional_content section if present + file_additional = extract_manual_content(existing_content).get( + "additional_content", "" + ) + if file_additional: + content_parts.append("") + content_parts.append(file_additional) + content_parts.append("") + content_parts.append("") + full_content = file_header + "\n" + "\n".join(content_parts) generated_files[str(file_path)] = full_content @@ -661,14 +752,28 @@ def write_block_docs( full_path.write_text(full_content) - # Generate overview file - overview_content = generate_overview_table(blocks) - overview_path = output_dir / "README.md" + # Generate overview file at the parent directory (docs/integrations/) + # with links prefixed to point into block-integrations/ + root_dir = output_dir.parent + block_dir_name = output_dir.name # "block-integrations" + block_dir_prefix = f"{block_dir_name}/" + + overview_content = generate_overview_table(blocks, block_dir_prefix) + overview_path = root_dir / "README.md" generated_files["README.md"] = overview_content overview_path.write_text(overview_content) if verbose: - print(" Writing README.md (overview)") + print(" Writing README.md (overview) to parent directory") + + # Generate SUMMARY.md for GitBook navigation at the parent directory + summary_content = generate_summary_md(blocks, root_dir, block_dir_prefix) + summary_path = root_dir / "SUMMARY.md" + generated_files["SUMMARY.md"] = summary_content + summary_path.write_text(summary_content) + + if verbose: + print(" Writing SUMMARY.md (navigation) to parent directory") return generated_files @@ -748,6 +853,16 @@ def check_docs_in_sync(output_dir: Path, blocks: list[BlockDoc]) -> bool: elif block_match.group(1).strip() != expected_block_content.strip(): mismatched_blocks.append(block.name) + # Add file-level additional_content to expected content (matches write_block_docs) + file_additional = extract_manual_content(existing_content).get( + "additional_content", "" + ) + if file_additional: + content_parts.append("") + content_parts.append(file_additional) + content_parts.append("") + content_parts.append("") + expected_content = file_header + "\n" + "\n".join(content_parts) if existing_content.strip() != expected_content.strip(): @@ -757,11 +872,15 @@ def check_docs_in_sync(output_dir: Path, blocks: list[BlockDoc]) -> bool: out_of_sync_details.append((file_path, mismatched_blocks)) all_match = False - # Check overview - overview_path = output_dir / "README.md" + # Check overview at the parent directory (docs/integrations/) + root_dir = output_dir.parent + block_dir_name = output_dir.name # "block-integrations" + block_dir_prefix = f"{block_dir_name}/" + + overview_path = root_dir / "README.md" if overview_path.exists(): existing_overview = overview_path.read_text() - expected_overview = generate_overview_table(blocks) + expected_overview = generate_overview_table(blocks, block_dir_prefix) if existing_overview.strip() != expected_overview.strip(): print("OUT OF SYNC: README.md (overview)") print(" The blocks overview table needs regeneration") @@ -772,6 +891,21 @@ def check_docs_in_sync(output_dir: Path, blocks: list[BlockDoc]) -> bool: out_of_sync_details.append(("README.md", ["overview table"])) all_match = False + # Check SUMMARY.md at the parent directory + summary_path = root_dir / "SUMMARY.md" + if summary_path.exists(): + existing_summary = summary_path.read_text() + expected_summary = generate_summary_md(blocks, root_dir, block_dir_prefix) + if existing_summary.strip() != expected_summary.strip(): + print("OUT OF SYNC: SUMMARY.md (navigation)") + print(" The GitBook navigation needs regeneration") + out_of_sync_details.append(("SUMMARY.md", ["navigation"])) + all_match = False + else: + print("MISSING: SUMMARY.md (navigation)") + out_of_sync_details.append(("SUMMARY.md", ["navigation"])) + all_match = False + # Check for unfilled manual sections unfilled_patterns = [ "_Add a description of this category of blocks._", diff --git a/autogpt_platform/backend/snapshots/grph_single b/autogpt_platform/backend/snapshots/grph_single index 7ce8695e6b..1811a57ec8 100644 --- a/autogpt_platform/backend/snapshots/grph_single +++ b/autogpt_platform/backend/snapshots/grph_single @@ -11,6 +11,7 @@ "forked_from_version": null, "has_external_trigger": false, "has_human_in_the_loop": false, + "has_sensitive_action": false, "id": "graph-123", "input_schema": { "properties": {}, diff --git a/autogpt_platform/backend/snapshots/grphs_all b/autogpt_platform/backend/snapshots/grphs_all index f69b45a6de..0b314d96f9 100644 --- a/autogpt_platform/backend/snapshots/grphs_all +++ b/autogpt_platform/backend/snapshots/grphs_all @@ -11,6 +11,7 @@ "forked_from_version": null, "has_external_trigger": false, "has_human_in_the_loop": false, + "has_sensitive_action": false, "id": "graph-123", "input_schema": { "properties": {}, diff --git a/autogpt_platform/backend/snapshots/lib_agts_search b/autogpt_platform/backend/snapshots/lib_agts_search index c8e3cc73a6..67c307b09e 100644 --- a/autogpt_platform/backend/snapshots/lib_agts_search +++ b/autogpt_platform/backend/snapshots/lib_agts_search @@ -27,6 +27,8 @@ "properties": {} }, "has_external_trigger": false, + "has_human_in_the_loop": false, + "has_sensitive_action": false, "trigger_setup_info": null, "new_output": false, "can_access_graph": true, @@ -34,7 +36,8 @@ "is_favorite": false, "recommended_schedule_cron": null, "settings": { - "human_in_the_loop_safe_mode": null + "human_in_the_loop_safe_mode": true, + "sensitive_action_safe_mode": false }, "marketplace_listing": null }, @@ -65,6 +68,8 @@ "properties": {} }, "has_external_trigger": false, + "has_human_in_the_loop": false, + "has_sensitive_action": false, "trigger_setup_info": null, "new_output": false, "can_access_graph": false, @@ -72,7 +77,8 @@ "is_favorite": false, "recommended_schedule_cron": null, "settings": { - "human_in_the_loop_safe_mode": null + "human_in_the_loop_safe_mode": true, + "sensitive_action_safe_mode": false }, "marketplace_listing": null } diff --git a/autogpt_platform/backend/test/agent_generator/__init__.py b/autogpt_platform/backend/test/agent_generator/__init__.py new file mode 100644 index 0000000000..8fcde1fa0f --- /dev/null +++ b/autogpt_platform/backend/test/agent_generator/__init__.py @@ -0,0 +1 @@ +"""Tests for agent generator module.""" diff --git a/autogpt_platform/backend/test/agent_generator/test_core_integration.py b/autogpt_platform/backend/test/agent_generator/test_core_integration.py new file mode 100644 index 0000000000..bdcc24ba79 --- /dev/null +++ b/autogpt_platform/backend/test/agent_generator/test_core_integration.py @@ -0,0 +1,273 @@ +""" +Tests for the Agent Generator core module. + +This test suite verifies that the core functions correctly delegate to +the external Agent Generator service. +""" + +from unittest.mock import AsyncMock, patch + +import pytest + +from backend.api.features.chat.tools.agent_generator import core +from backend.api.features.chat.tools.agent_generator.core import ( + AgentGeneratorNotConfiguredError, +) + + +class TestServiceNotConfigured: + """Test that functions raise AgentGeneratorNotConfiguredError when service is not configured.""" + + @pytest.mark.asyncio + async def test_decompose_goal_raises_when_not_configured(self): + """Test that decompose_goal raises error when service not configured.""" + with patch.object(core, "is_external_service_configured", return_value=False): + with pytest.raises(AgentGeneratorNotConfiguredError): + await core.decompose_goal("Build a chatbot") + + @pytest.mark.asyncio + async def test_generate_agent_raises_when_not_configured(self): + """Test that generate_agent raises error when service not configured.""" + with patch.object(core, "is_external_service_configured", return_value=False): + with pytest.raises(AgentGeneratorNotConfiguredError): + await core.generate_agent({"steps": []}) + + @pytest.mark.asyncio + async def test_generate_agent_patch_raises_when_not_configured(self): + """Test that generate_agent_patch raises error when service not configured.""" + with patch.object(core, "is_external_service_configured", return_value=False): + with pytest.raises(AgentGeneratorNotConfiguredError): + await core.generate_agent_patch("Add a node", {"nodes": []}) + + +class TestDecomposeGoal: + """Test decompose_goal function service delegation.""" + + @pytest.mark.asyncio + async def test_calls_external_service(self): + """Test that decompose_goal calls the external service.""" + expected_result = {"type": "instructions", "steps": ["Step 1"]} + + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "decompose_goal_external", new_callable=AsyncMock + ) as mock_external: + mock_external.return_value = expected_result + + result = await core.decompose_goal("Build a chatbot") + + mock_external.assert_called_once_with("Build a chatbot", "") + assert result == expected_result + + @pytest.mark.asyncio + async def test_passes_context_to_external_service(self): + """Test that decompose_goal passes context to external service.""" + expected_result = {"type": "instructions", "steps": ["Step 1"]} + + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "decompose_goal_external", new_callable=AsyncMock + ) as mock_external: + mock_external.return_value = expected_result + + await core.decompose_goal("Build a chatbot", "Use Python") + + mock_external.assert_called_once_with("Build a chatbot", "Use Python") + + @pytest.mark.asyncio + async def test_returns_none_on_service_failure(self): + """Test that decompose_goal returns None when external service fails.""" + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "decompose_goal_external", new_callable=AsyncMock + ) as mock_external: + mock_external.return_value = None + + result = await core.decompose_goal("Build a chatbot") + + assert result is None + + +class TestGenerateAgent: + """Test generate_agent function service delegation.""" + + @pytest.mark.asyncio + async def test_calls_external_service(self): + """Test that generate_agent calls the external service.""" + expected_result = {"name": "Test Agent", "nodes": [], "links": []} + + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_external", new_callable=AsyncMock + ) as mock_external: + mock_external.return_value = expected_result + + instructions = {"type": "instructions", "steps": ["Step 1"]} + result = await core.generate_agent(instructions) + + mock_external.assert_called_once_with(instructions) + # Result should have id, version, is_active added if not present + assert result is not None + assert result["name"] == "Test Agent" + assert "id" in result + assert result["version"] == 1 + assert result["is_active"] is True + + @pytest.mark.asyncio + async def test_preserves_existing_id_and_version(self): + """Test that external service result preserves existing id and version.""" + expected_result = { + "id": "existing-id", + "version": 3, + "is_active": False, + "name": "Test Agent", + } + + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_external", new_callable=AsyncMock + ) as mock_external: + mock_external.return_value = expected_result.copy() + + result = await core.generate_agent({"steps": []}) + + assert result is not None + assert result["id"] == "existing-id" + assert result["version"] == 3 + assert result["is_active"] is False + + @pytest.mark.asyncio + async def test_returns_none_when_external_service_fails(self): + """Test that generate_agent returns None when external service fails.""" + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_external", new_callable=AsyncMock + ) as mock_external: + mock_external.return_value = None + + result = await core.generate_agent({"steps": []}) + + assert result is None + + +class TestGenerateAgentPatch: + """Test generate_agent_patch function service delegation.""" + + @pytest.mark.asyncio + async def test_calls_external_service(self): + """Test that generate_agent_patch calls the external service.""" + expected_result = {"name": "Updated Agent", "nodes": [], "links": []} + + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_patch_external", new_callable=AsyncMock + ) as mock_external: + mock_external.return_value = expected_result + + current_agent = {"nodes": [], "links": []} + result = await core.generate_agent_patch("Add a node", current_agent) + + mock_external.assert_called_once_with("Add a node", current_agent) + assert result == expected_result + + @pytest.mark.asyncio + async def test_returns_clarifying_questions(self): + """Test that generate_agent_patch returns clarifying questions.""" + expected_result = { + "type": "clarifying_questions", + "questions": [{"question": "What type of node?"}], + } + + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_patch_external", new_callable=AsyncMock + ) as mock_external: + mock_external.return_value = expected_result + + result = await core.generate_agent_patch("Add a node", {"nodes": []}) + + assert result == expected_result + + @pytest.mark.asyncio + async def test_returns_none_when_external_service_fails(self): + """Test that generate_agent_patch returns None when service fails.""" + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_patch_external", new_callable=AsyncMock + ) as mock_external: + mock_external.return_value = None + + result = await core.generate_agent_patch("Add a node", {"nodes": []}) + + assert result is None + + +class TestJsonToGraph: + """Test json_to_graph function.""" + + def test_converts_agent_json_to_graph(self): + """Test conversion of agent JSON to Graph model.""" + agent_json = { + "id": "test-id", + "version": 2, + "is_active": True, + "name": "Test Agent", + "description": "A test agent", + "nodes": [ + { + "id": "node1", + "block_id": "block1", + "input_default": {"key": "value"}, + "metadata": {"x": 100}, + } + ], + "links": [ + { + "id": "link1", + "source_id": "node1", + "sink_id": "output", + "source_name": "result", + "sink_name": "input", + "is_static": False, + } + ], + } + + graph = core.json_to_graph(agent_json) + + assert graph.id == "test-id" + assert graph.version == 2 + assert graph.is_active is True + assert graph.name == "Test Agent" + assert graph.description == "A test agent" + assert len(graph.nodes) == 1 + assert graph.nodes[0].id == "node1" + assert graph.nodes[0].block_id == "block1" + assert len(graph.links) == 1 + assert graph.links[0].source_id == "node1" + + def test_generates_ids_if_missing(self): + """Test that missing IDs are generated.""" + agent_json = { + "name": "Test Agent", + "nodes": [{"block_id": "block1"}], + "links": [], + } + + graph = core.json_to_graph(agent_json) + + assert graph.id is not None + assert graph.nodes[0].id is not None + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/autogpt_platform/backend/test/agent_generator/test_service.py b/autogpt_platform/backend/test/agent_generator/test_service.py new file mode 100644 index 0000000000..81ff794532 --- /dev/null +++ b/autogpt_platform/backend/test/agent_generator/test_service.py @@ -0,0 +1,422 @@ +""" +Tests for the Agent Generator external service client. + +This test suite verifies the external Agent Generator service integration, +including service detection, API calls, and error handling. +""" + +from unittest.mock import AsyncMock, MagicMock, patch + +import httpx +import pytest + +from backend.api.features.chat.tools.agent_generator import service + + +class TestServiceConfiguration: + """Test service configuration detection.""" + + def setup_method(self): + """Reset settings singleton before each test.""" + service._settings = None + service._client = None + + def test_external_service_not_configured_when_host_empty(self): + """Test that external service is not configured when host is empty.""" + mock_settings = MagicMock() + mock_settings.config.agentgenerator_host = "" + + with patch.object(service, "_get_settings", return_value=mock_settings): + assert service.is_external_service_configured() is False + + def test_external_service_configured_when_host_set(self): + """Test that external service is configured when host is set.""" + mock_settings = MagicMock() + mock_settings.config.agentgenerator_host = "agent-generator.local" + + with patch.object(service, "_get_settings", return_value=mock_settings): + assert service.is_external_service_configured() is True + + def test_get_base_url(self): + """Test base URL construction.""" + mock_settings = MagicMock() + mock_settings.config.agentgenerator_host = "agent-generator.local" + mock_settings.config.agentgenerator_port = 8000 + + with patch.object(service, "_get_settings", return_value=mock_settings): + url = service._get_base_url() + assert url == "http://agent-generator.local:8000" + + +class TestDecomposeGoalExternal: + """Test decompose_goal_external function.""" + + def setup_method(self): + """Reset client singleton before each test.""" + service._settings = None + service._client = None + + @pytest.mark.asyncio + async def test_decompose_goal_returns_instructions(self): + """Test successful decomposition returning instructions.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "type": "instructions", + "steps": ["Step 1", "Step 2"], + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.decompose_goal_external("Build a chatbot") + + assert result == {"type": "instructions", "steps": ["Step 1", "Step 2"]} + mock_client.post.assert_called_once_with( + "/api/decompose-description", json={"description": "Build a chatbot"} + ) + + @pytest.mark.asyncio + async def test_decompose_goal_returns_clarifying_questions(self): + """Test decomposition returning clarifying questions.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "type": "clarifying_questions", + "questions": ["What platform?", "What language?"], + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.decompose_goal_external("Build something") + + assert result == { + "type": "clarifying_questions", + "questions": ["What platform?", "What language?"], + } + + @pytest.mark.asyncio + async def test_decompose_goal_with_context(self): + """Test decomposition with additional context.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "type": "instructions", + "steps": ["Step 1"], + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + await service.decompose_goal_external( + "Build a chatbot", context="Use Python" + ) + + mock_client.post.assert_called_once_with( + "/api/decompose-description", + json={"description": "Build a chatbot", "user_instruction": "Use Python"}, + ) + + @pytest.mark.asyncio + async def test_decompose_goal_returns_unachievable_goal(self): + """Test decomposition returning unachievable goal response.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "type": "unachievable_goal", + "reason": "Cannot do X", + "suggested_goal": "Try Y instead", + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.decompose_goal_external("Do something impossible") + + assert result == { + "type": "unachievable_goal", + "reason": "Cannot do X", + "suggested_goal": "Try Y instead", + } + + @pytest.mark.asyncio + async def test_decompose_goal_handles_http_error(self): + """Test decomposition handles HTTP errors gracefully.""" + mock_client = AsyncMock() + mock_client.post.side_effect = httpx.HTTPStatusError( + "Server error", request=MagicMock(), response=MagicMock() + ) + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.decompose_goal_external("Build a chatbot") + + assert result is None + + @pytest.mark.asyncio + async def test_decompose_goal_handles_request_error(self): + """Test decomposition handles request errors gracefully.""" + mock_client = AsyncMock() + mock_client.post.side_effect = httpx.RequestError("Connection failed") + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.decompose_goal_external("Build a chatbot") + + assert result is None + + @pytest.mark.asyncio + async def test_decompose_goal_handles_service_error(self): + """Test decomposition handles service returning error.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "success": False, + "error": "Internal error", + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.decompose_goal_external("Build a chatbot") + + assert result is None + + +class TestGenerateAgentExternal: + """Test generate_agent_external function.""" + + def setup_method(self): + """Reset client singleton before each test.""" + service._settings = None + service._client = None + + @pytest.mark.asyncio + async def test_generate_agent_success(self): + """Test successful agent generation.""" + agent_json = { + "name": "Test Agent", + "nodes": [], + "links": [], + } + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "agent_json": agent_json, + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + instructions = {"type": "instructions", "steps": ["Step 1"]} + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.generate_agent_external(instructions) + + assert result == agent_json + mock_client.post.assert_called_once_with( + "/api/generate-agent", json={"instructions": instructions} + ) + + @pytest.mark.asyncio + async def test_generate_agent_handles_error(self): + """Test agent generation handles errors gracefully.""" + mock_client = AsyncMock() + mock_client.post.side_effect = httpx.RequestError("Connection failed") + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.generate_agent_external({"steps": []}) + + assert result is None + + +class TestGenerateAgentPatchExternal: + """Test generate_agent_patch_external function.""" + + def setup_method(self): + """Reset client singleton before each test.""" + service._settings = None + service._client = None + + @pytest.mark.asyncio + async def test_generate_patch_returns_updated_agent(self): + """Test successful patch generation returning updated agent.""" + updated_agent = { + "name": "Updated Agent", + "nodes": [{"id": "1", "block_id": "test"}], + "links": [], + } + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "agent_json": updated_agent, + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + current_agent = {"name": "Old Agent", "nodes": [], "links": []} + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.generate_agent_patch_external( + "Add a new node", current_agent + ) + + assert result == updated_agent + mock_client.post.assert_called_once_with( + "/api/update-agent", + json={ + "update_request": "Add a new node", + "current_agent_json": current_agent, + }, + ) + + @pytest.mark.asyncio + async def test_generate_patch_returns_clarifying_questions(self): + """Test patch generation returning clarifying questions.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "type": "clarifying_questions", + "questions": ["What type of node?"], + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.generate_agent_patch_external( + "Add something", {"nodes": []} + ) + + assert result == { + "type": "clarifying_questions", + "questions": ["What type of node?"], + } + + +class TestHealthCheck: + """Test health_check function.""" + + def setup_method(self): + """Reset singletons before each test.""" + service._settings = None + service._client = None + + @pytest.mark.asyncio + async def test_health_check_returns_false_when_not_configured(self): + """Test health check returns False when service not configured.""" + with patch.object( + service, "is_external_service_configured", return_value=False + ): + result = await service.health_check() + assert result is False + + @pytest.mark.asyncio + async def test_health_check_returns_true_when_healthy(self): + """Test health check returns True when service is healthy.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "status": "healthy", + "blocks_loaded": True, + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.get.return_value = mock_response + + with patch.object(service, "is_external_service_configured", return_value=True): + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.health_check() + + assert result is True + mock_client.get.assert_called_once_with("/health") + + @pytest.mark.asyncio + async def test_health_check_returns_false_when_not_healthy(self): + """Test health check returns False when service is not healthy.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "status": "unhealthy", + "blocks_loaded": False, + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.get.return_value = mock_response + + with patch.object(service, "is_external_service_configured", return_value=True): + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.health_check() + + assert result is False + + @pytest.mark.asyncio + async def test_health_check_returns_false_on_error(self): + """Test health check returns False on connection error.""" + mock_client = AsyncMock() + mock_client.get.side_effect = httpx.RequestError("Connection failed") + + with patch.object(service, "is_external_service_configured", return_value=True): + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.health_check() + + assert result is False + + +class TestGetBlocksExternal: + """Test get_blocks_external function.""" + + def setup_method(self): + """Reset client singleton before each test.""" + service._settings = None + service._client = None + + @pytest.mark.asyncio + async def test_get_blocks_success(self): + """Test successful blocks retrieval.""" + blocks = [ + {"id": "block1", "name": "Block 1"}, + {"id": "block2", "name": "Block 2"}, + ] + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "blocks": blocks, + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.get.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.get_blocks_external() + + assert result == blocks + mock_client.get.assert_called_once_with("/api/blocks") + + @pytest.mark.asyncio + async def test_get_blocks_handles_error(self): + """Test blocks retrieval handles errors gracefully.""" + mock_client = AsyncMock() + mock_client.get.side_effect = httpx.RequestError("Connection failed") + + with patch.object(service, "_get_client", return_value=mock_client): + result = await service.get_blocks_external() + + assert result is None + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/autogpt_platform/frontend/.env.default b/autogpt_platform/frontend/.env.default index dc3f67efab..af250fb8bf 100644 --- a/autogpt_platform/frontend/.env.default +++ b/autogpt_platform/frontend/.env.default @@ -29,4 +29,8 @@ NEXT_PUBLIC_CLOUDFLARE_TURNSTILE_SITE_KEY= NEXT_PUBLIC_TURNSTILE=disabled # PR previews -NEXT_PUBLIC_PREVIEW_STEALING_DEV= \ No newline at end of file +NEXT_PUBLIC_PREVIEW_STEALING_DEV= + +# PostHog Analytics +NEXT_PUBLIC_POSTHOG_KEY= +NEXT_PUBLIC_POSTHOG_HOST=https://eu.i.posthog.com diff --git a/autogpt_platform/frontend/CONTRIBUTING.md b/autogpt_platform/frontend/CONTRIBUTING.md index 1b2b810986..649bb1ca92 100644 --- a/autogpt_platform/frontend/CONTRIBUTING.md +++ b/autogpt_platform/frontend/CONTRIBUTING.md @@ -175,6 +175,8 @@ While server components and actions are cool and cutting-edge, they introduce a - Prefer [React Query](https://tanstack.com/query/latest/docs/framework/react/overview) for server state, colocated near consumers (see [state colocation](https://kentcdodds.com/blog/state-colocation-will-make-your-react-app-faster)) - Co-locate UI state inside components/hooks; keep global state minimal +- Avoid `useMemo` and `useCallback` unless you have a measured performance issue +- Do not abuse `useEffect`; prefer state colocation and derive values directly when possible ### Styling and components @@ -549,9 +551,48 @@ Files: Types: - Prefer `interface` for object shapes -- Component props should be `interface Props { ... }` +- Component props should be `interface Props { ... }` (not exported) +- Only use specific exported names (e.g., `export interface MyComponentProps`) when the interface needs to be used outside the component +- Keep type definitions inline with the component - do not create separate `types.ts` files unless types are shared across multiple files - Use precise types; avoid `any` and unsafe casts +**Props naming examples:** + +```tsx +// ✅ Good - internal props, not exported +interface Props { + title: string; + onClose: () => void; +} + +export function Modal({ title, onClose }: Props) { + // ... +} + +// ✅ Good - exported when needed externally +export interface ModalProps { + title: string; + onClose: () => void; +} + +export function Modal({ title, onClose }: ModalProps) { + // ... +} + +// ❌ Bad - unnecessarily specific name for internal use +interface ModalComponentProps { + title: string; + onClose: () => void; +} + +// ❌ Bad - separate types.ts file for single component +// types.ts +export interface ModalProps { ... } + +// Modal.tsx +import type { ModalProps } from './types'; +``` + Parameters: - If more than one parameter is needed, pass a single `Args` object for clarity diff --git a/autogpt_platform/frontend/orval.config.ts b/autogpt_platform/frontend/orval.config.ts index dff857e1b6..9e09467518 100644 --- a/autogpt_platform/frontend/orval.config.ts +++ b/autogpt_platform/frontend/orval.config.ts @@ -16,6 +16,12 @@ export default defineConfig({ client: "react-query", httpClient: "fetch", indexFiles: false, + mock: { + type: "msw", + baseUrl: "http://localhost:3000/api/proxy", + generateEachHttpStatus: true, + delay: 0, + }, override: { mutator: { path: "./mutators/custom-mutator.ts", diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index 0823400c87..f22a182d20 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -15,6 +15,8 @@ "types": "tsc --noEmit", "test": "NEXT_PUBLIC_PW_TEST=true next build --turbo && playwright test", "test-ui": "NEXT_PUBLIC_PW_TEST=true next build --turbo && playwright test --ui", + "test:unit": "vitest run", + "test:unit:watch": "vitest", "test:no-build": "playwright test", "gentests": "playwright codegen http://localhost:3000", "storybook": "storybook dev -p 6006", @@ -32,6 +34,7 @@ "@hookform/resolvers": "5.2.2", "@next/third-parties": "15.4.6", "@phosphor-icons/react": "2.1.10", + "@posthog/react": "1.7.0", "@radix-ui/react-accordion": "1.2.12", "@radix-ui/react-alert-dialog": "1.1.15", "@radix-ui/react-avatar": "1.1.10", @@ -89,6 +92,7 @@ "next-themes": "0.4.6", "nuqs": "2.7.2", "party-js": "2.2.0", + "posthog-js": "1.334.1", "react": "18.3.1", "react-currency-input-field": "4.0.3", "react-day-picker": "9.11.1", @@ -127,6 +131,8 @@ "@storybook/nextjs": "9.1.5", "@tanstack/eslint-plugin-query": "5.91.2", "@tanstack/react-query-devtools": "5.90.2", + "@testing-library/dom": "10.4.1", + "@testing-library/react": "16.3.2", "@types/canvas-confetti": "1.9.0", "@types/lodash": "4.17.20", "@types/negotiator": "0.6.4", @@ -135,6 +141,7 @@ "@types/react-dom": "18.3.5", "@types/react-modal": "3.16.3", "@types/react-window": "1.8.8", + "@vitejs/plugin-react": "5.1.2", "axe-playwright": "2.2.2", "chromatic": "13.3.3", "concurrently": "9.2.1", @@ -142,6 +149,7 @@ "eslint": "8.57.1", "eslint-config-next": "15.5.7", "eslint-plugin-storybook": "9.1.5", + "happy-dom": "20.3.4", "import-in-the-middle": "2.0.2", "msw": "2.11.6", "msw-storybook-addon": "2.0.6", @@ -153,7 +161,9 @@ "require-in-the-middle": "8.0.1", "storybook": "9.1.5", "tailwindcss": "3.4.17", - "typescript": "5.9.3" + "typescript": "5.9.3", + "vite-tsconfig-paths": "6.0.4", + "vitest": "4.0.17" }, "msw": { "workerDirectory": [ diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml index f503c23ea8..db891ccf3f 100644 --- a/autogpt_platform/frontend/pnpm-lock.yaml +++ b/autogpt_platform/frontend/pnpm-lock.yaml @@ -23,6 +23,9 @@ importers: '@phosphor-icons/react': specifier: 2.1.10 version: 2.1.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@posthog/react': + specifier: 1.7.0 + version: 1.7.0(@types/react@18.3.17)(posthog-js@1.334.1)(react@18.3.1) '@radix-ui/react-accordion': specifier: 1.2.12 version: 1.2.12(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -194,6 +197,9 @@ importers: party-js: specifier: 2.2.0 version: 2.2.0 + posthog-js: + specifier: 1.334.1 + version: 1.334.1 react: specifier: 18.3.1 version: 18.3.1 @@ -275,7 +281,7 @@ importers: devDependencies: '@chromatic-com/storybook': specifier: 4.1.2 - version: 4.1.2(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) + version: 4.1.2(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))) '@opentelemetry/instrumentation': specifier: 0.209.0 version: 0.209.0(@opentelemetry/api@1.9.0) @@ -284,25 +290,31 @@ importers: version: 1.56.1 '@storybook/addon-a11y': specifier: 9.1.5 - version: 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) + version: 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))) '@storybook/addon-docs': specifier: 9.1.5 - version: 9.1.5(@types/react@18.3.17)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) + version: 9.1.5(@types/react@18.3.17)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))) '@storybook/addon-links': specifier: 9.1.5 - version: 9.1.5(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) + version: 9.1.5(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))) '@storybook/addon-onboarding': specifier: 9.1.5 - version: 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) + version: 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))) '@storybook/nextjs': specifier: 9.1.5 - version: 9.1.5(esbuild@0.25.12)(next@15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.104.1(esbuild@0.25.12)) + version: 9.1.5(esbuild@0.25.12)(next@15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.104.1(esbuild@0.25.12)) '@tanstack/eslint-plugin-query': specifier: 5.91.2 version: 5.91.2(eslint@8.57.1)(typescript@5.9.3) '@tanstack/react-query-devtools': specifier: 5.90.2 version: 5.90.2(@tanstack/react-query@5.90.6(react@18.3.1))(react@18.3.1) + '@testing-library/dom': + specifier: 10.4.1 + version: 10.4.1 + '@testing-library/react': + specifier: 16.3.2 + version: 16.3.2(@testing-library/dom@10.4.1)(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@types/canvas-confetti': specifier: 1.9.0 version: 1.9.0 @@ -327,6 +339,9 @@ importers: '@types/react-window': specifier: 1.8.8 version: 1.8.8 + '@vitejs/plugin-react': + specifier: 5.1.2 + version: 5.1.2(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) axe-playwright: specifier: 2.2.2 version: 2.2.2(playwright@1.56.1) @@ -347,7 +362,10 @@ importers: version: 15.5.7(eslint@8.57.1)(typescript@5.9.3) eslint-plugin-storybook: specifier: 9.1.5 - version: 9.1.5(eslint@8.57.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3) + version: 9.1.5(eslint@8.57.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))(typescript@5.9.3) + happy-dom: + specifier: 20.3.4 + version: 20.3.4 import-in-the-middle: specifier: 2.0.2 version: 2.0.2 @@ -377,16 +395,25 @@ importers: version: 8.0.1 storybook: specifier: 9.1.5 - version: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + version: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) tailwindcss: specifier: 3.4.17 version: 3.4.17 typescript: specifier: 5.9.3 version: 5.9.3 + vite-tsconfig-paths: + specifier: 6.0.4 + version: 6.0.4(typescript@5.9.3)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) + vitest: + specifier: 4.0.17 + version: 4.0.17(@opentelemetry/api@1.9.0)(@types/node@24.10.0)(happy-dom@20.3.4)(jiti@2.6.1)(jsdom@27.4.0)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(terser@5.44.1)(yaml@2.8.2) packages: + '@acemir/cssom@0.9.31': + resolution: {integrity: sha512-ZnR3GSaH+/vJ0YlHau21FjfLYjMpYVIzTD8M8vIEQvIGxeOXyXdzCI140rrCY862p/C/BbzWsjc1dgnM9mkoTA==} + '@adobe/css-tools@4.4.4': resolution: {integrity: sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==} @@ -416,6 +443,15 @@ packages: '@apm-js-collab/tracing-hooks@0.3.1': resolution: {integrity: sha512-Vu1CbmPURlN5fTboVuKMoJjbO5qcq9fA5YXpskx3dXe/zTBvjODFoerw+69rVBlRLrJpwPqSDqEuJDEKIrTldw==} + '@asamuzakjp/css-color@4.1.1': + resolution: {integrity: sha512-B0Hv6G3gWGMn0xKJ0txEi/jM5iFpT3MfDxmhZFb4W047GvytCf1DHQ1D69W3zHI4yWe2aTZAA0JnbMZ7Xc8DuQ==} + + '@asamuzakjp/dom-selector@6.7.6': + resolution: {integrity: sha512-hBaJER6A9MpdG3WgdlOolHmbOYvSk46y7IQN/1+iqiCuUu6iWdQrs9DGKF8ocqsEqWujWf/V7b7vaDgiUmIvUg==} + + '@asamuzakjp/nwsapi@2.3.9': + resolution: {integrity: sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==} + '@asyncapi/specs@6.10.0': resolution: {integrity: sha512-vB5oKLsdrLUORIZ5BXortZTlVyGWWMC1Nud/0LtgxQ3Yn2738HigAD6EVqScvpPsDUI/bcLVsYEXN4dtXQHVng==} @@ -849,6 +885,18 @@ packages: peerDependencies: '@babel/core': ^7.0.0-0 + '@babel/plugin-transform-react-jsx-self@7.27.1': + resolution: {integrity: sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-jsx-source@7.27.1': + resolution: {integrity: sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + '@babel/plugin-transform-react-jsx@7.27.1': resolution: {integrity: sha512-2KH4LWGSrJIkVf5tSiBFYuXDAoWRq2MMwgivCf+93dd0GQi8RXLjKA/0EvRnVV5G0hrHczsquXuD01L8s6dmBw==} engines: {node: '>=6.9.0'} @@ -995,6 +1043,38 @@ packages: peerDependencies: commander: ~14.0.0 + '@csstools/color-helpers@5.1.0': + resolution: {integrity: sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==} + engines: {node: '>=18'} + + '@csstools/css-calc@2.1.4': + resolution: {integrity: sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-parser-algorithms': ^3.0.5 + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-color-parser@3.1.0': + resolution: {integrity: sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-parser-algorithms': ^3.0.5 + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-parser-algorithms@3.0.5': + resolution: {integrity: sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-syntax-patches-for-csstree@1.0.25': + resolution: {integrity: sha512-g0Kw9W3vjx5BEBAF8c5Fm2NcB/Fs8jJXh85aXqwEXiL+tqtOut07TWgyaGzAAfTM+gKckrrncyeGEZPcaRgm2Q==} + engines: {node: '>=18'} + + '@csstools/css-tokenizer@3.0.4': + resolution: {integrity: sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==} + engines: {node: '>=18'} + '@date-fns/tz@1.4.1': resolution: {integrity: sha512-P5LUNhtbj6YfI3iJjw5EL9eUAG6OitD0W3fWQcpQjDRc/QIsL0tRNuO1PcDvPccWL1fSTXXdE1ds+l95DV/OFA==} @@ -1022,156 +1102,312 @@ packages: cpu: [ppc64] os: [aix] + '@esbuild/aix-ppc64@0.27.2': + resolution: {integrity: sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + '@esbuild/android-arm64@0.25.12': resolution: {integrity: sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==} engines: {node: '>=18'} cpu: [arm64] os: [android] + '@esbuild/android-arm64@0.27.2': + resolution: {integrity: sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + '@esbuild/android-arm@0.25.12': resolution: {integrity: sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==} engines: {node: '>=18'} cpu: [arm] os: [android] + '@esbuild/android-arm@0.27.2': + resolution: {integrity: sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + '@esbuild/android-x64@0.25.12': resolution: {integrity: sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==} engines: {node: '>=18'} cpu: [x64] os: [android] + '@esbuild/android-x64@0.27.2': + resolution: {integrity: sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + '@esbuild/darwin-arm64@0.25.12': resolution: {integrity: sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==} engines: {node: '>=18'} cpu: [arm64] os: [darwin] + '@esbuild/darwin-arm64@0.27.2': + resolution: {integrity: sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + '@esbuild/darwin-x64@0.25.12': resolution: {integrity: sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==} engines: {node: '>=18'} cpu: [x64] os: [darwin] + '@esbuild/darwin-x64@0.27.2': + resolution: {integrity: sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + '@esbuild/freebsd-arm64@0.25.12': resolution: {integrity: sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==} engines: {node: '>=18'} cpu: [arm64] os: [freebsd] + '@esbuild/freebsd-arm64@0.27.2': + resolution: {integrity: sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + '@esbuild/freebsd-x64@0.25.12': resolution: {integrity: sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==} engines: {node: '>=18'} cpu: [x64] os: [freebsd] + '@esbuild/freebsd-x64@0.27.2': + resolution: {integrity: sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + '@esbuild/linux-arm64@0.25.12': resolution: {integrity: sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==} engines: {node: '>=18'} cpu: [arm64] os: [linux] + '@esbuild/linux-arm64@0.27.2': + resolution: {integrity: sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + '@esbuild/linux-arm@0.25.12': resolution: {integrity: sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==} engines: {node: '>=18'} cpu: [arm] os: [linux] + '@esbuild/linux-arm@0.27.2': + resolution: {integrity: sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + '@esbuild/linux-ia32@0.25.12': resolution: {integrity: sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==} engines: {node: '>=18'} cpu: [ia32] os: [linux] + '@esbuild/linux-ia32@0.27.2': + resolution: {integrity: sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + '@esbuild/linux-loong64@0.25.12': resolution: {integrity: sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==} engines: {node: '>=18'} cpu: [loong64] os: [linux] + '@esbuild/linux-loong64@0.27.2': + resolution: {integrity: sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + '@esbuild/linux-mips64el@0.25.12': resolution: {integrity: sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==} engines: {node: '>=18'} cpu: [mips64el] os: [linux] + '@esbuild/linux-mips64el@0.27.2': + resolution: {integrity: sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + '@esbuild/linux-ppc64@0.25.12': resolution: {integrity: sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==} engines: {node: '>=18'} cpu: [ppc64] os: [linux] + '@esbuild/linux-ppc64@0.27.2': + resolution: {integrity: sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + '@esbuild/linux-riscv64@0.25.12': resolution: {integrity: sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==} engines: {node: '>=18'} cpu: [riscv64] os: [linux] + '@esbuild/linux-riscv64@0.27.2': + resolution: {integrity: sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + '@esbuild/linux-s390x@0.25.12': resolution: {integrity: sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==} engines: {node: '>=18'} cpu: [s390x] os: [linux] + '@esbuild/linux-s390x@0.27.2': + resolution: {integrity: sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + '@esbuild/linux-x64@0.25.12': resolution: {integrity: sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==} engines: {node: '>=18'} cpu: [x64] os: [linux] + '@esbuild/linux-x64@0.27.2': + resolution: {integrity: sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + '@esbuild/netbsd-arm64@0.25.12': resolution: {integrity: sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==} engines: {node: '>=18'} cpu: [arm64] os: [netbsd] + '@esbuild/netbsd-arm64@0.27.2': + resolution: {integrity: sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + '@esbuild/netbsd-x64@0.25.12': resolution: {integrity: sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==} engines: {node: '>=18'} cpu: [x64] os: [netbsd] + '@esbuild/netbsd-x64@0.27.2': + resolution: {integrity: sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + '@esbuild/openbsd-arm64@0.25.12': resolution: {integrity: sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==} engines: {node: '>=18'} cpu: [arm64] os: [openbsd] + '@esbuild/openbsd-arm64@0.27.2': + resolution: {integrity: sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + '@esbuild/openbsd-x64@0.25.12': resolution: {integrity: sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==} engines: {node: '>=18'} cpu: [x64] os: [openbsd] + '@esbuild/openbsd-x64@0.27.2': + resolution: {integrity: sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + '@esbuild/openharmony-arm64@0.25.12': resolution: {integrity: sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==} engines: {node: '>=18'} cpu: [arm64] os: [openharmony] + '@esbuild/openharmony-arm64@0.27.2': + resolution: {integrity: sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + '@esbuild/sunos-x64@0.25.12': resolution: {integrity: sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==} engines: {node: '>=18'} cpu: [x64] os: [sunos] + '@esbuild/sunos-x64@0.27.2': + resolution: {integrity: sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + '@esbuild/win32-arm64@0.25.12': resolution: {integrity: sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==} engines: {node: '>=18'} cpu: [arm64] os: [win32] + '@esbuild/win32-arm64@0.27.2': + resolution: {integrity: sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + '@esbuild/win32-ia32@0.25.12': resolution: {integrity: sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==} engines: {node: '>=18'} cpu: [ia32] os: [win32] + '@esbuild/win32-ia32@0.27.2': + resolution: {integrity: sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + '@esbuild/win32-x64@0.25.12': resolution: {integrity: sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==} engines: {node: '>=18'} cpu: [x64] os: [win32] + '@esbuild/win32-x64@0.27.2': + resolution: {integrity: sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + '@eslint-community/eslint-utils@4.9.1': resolution: {integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} @@ -1190,6 +1426,15 @@ packages: resolution: {integrity: sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + '@exodus/bytes@1.9.0': + resolution: {integrity: sha512-lagqsvnk09NKogQaN/XrtlWeUF8SRhT12odMvbTIIaVObqzwAogL6jhR4DAp0gPuKoM1AOVrKUshJpRdpMFrww==} + engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0} + peerDependencies: + '@noble/hashes': ^1.8.0 || ^2.0.0 + peerDependenciesMeta: + '@noble/hashes': + optional: true + '@exodus/schemasafe@1.3.0': resolution: {integrity: sha512-5Aap/GaRupgNx/feGBwLLTVv8OQFfv3pq2lPRzPg9R+IOBnDgghTGW7l7EuVXOvg5cc/xSAlRW8rBrjIC3Nvqw==} @@ -1555,6 +1800,10 @@ packages: '@open-draft/until@2.1.0': resolution: {integrity: sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==} + '@opentelemetry/api-logs@0.208.0': + resolution: {integrity: sha512-CjruKY9V6NMssL/T1kAFgzosF1v9o6oeN+aX5JB/C/xPNtmgIJqcXHG7fA82Ou1zCpWGl4lROQUKwUNE1pMCyg==} + engines: {node: '>=8.0.0'} + '@opentelemetry/api-logs@0.209.0': resolution: {integrity: sha512-xomnUNi7TiAGtOgs0tb54LyrjRZLu9shJGGwkcN7NgtiPYOpNnKLkRJtzZvTjD/w6knSZH9sFZcUSUovYOPg6A==} engines: {node: '>=8.0.0'} @@ -1575,6 +1824,12 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' + '@opentelemetry/exporter-logs-otlp-http@0.208.0': + resolution: {integrity: sha512-jOv40Bs9jy9bZVLo/i8FwUiuCvbjWDI+ZW13wimJm4LjnlwJxGgB+N/VWOZUTpM+ah/awXeQqKdNlpLf2EjvYg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + '@opentelemetry/instrumentation-amqplib@0.55.0': resolution: {integrity: sha512-5ULoU8p+tWcQw5PDYZn8rySptGSLZHNX/7srqo2TioPnAAcvTy6sQFQXsNPrAnyRRtYGMetXVyZUy5OaX1+IfA==} engines: {node: ^18.19.0 || >=20.6.0} @@ -1713,6 +1968,18 @@ packages: peerDependencies: '@opentelemetry/api': ^1.3.0 + '@opentelemetry/otlp-exporter-base@0.208.0': + resolution: {integrity: sha512-gMd39gIfVb2OgxldxUtOwGJYSH8P1kVFFlJLuut32L6KgUC4gl1dMhn+YC2mGn0bDOiQYSk/uHOdSjuKp58vvA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-transformer@0.208.0': + resolution: {integrity: sha512-DCFPY8C6lAQHUNkzcNT9R+qYExvsk6C5Bto2pbNxgicpcSWbe2WHShLxkOxIdNcBiYPdVHv/e7vH7K6TI+C+fQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + '@opentelemetry/redis-common@0.38.2': resolution: {integrity: sha512-1BCcU93iwSRZvDAgwUxC/DV4T/406SkMfxGqu5ojc3AvNI+I9GhV7v0J1HljsczuuhcnFLYqD5VmwVXfCGHzxA==} engines: {node: ^18.19.0 || >=20.6.0} @@ -1723,6 +1990,18 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.3.0 <1.10.0' + '@opentelemetry/sdk-logs@0.208.0': + resolution: {integrity: sha512-QlAyL1jRpOeaqx7/leG1vJMp84g0xKP6gJmfELBpnI4O/9xPX+Hu5m1POk9Kl+veNkyth5t19hRlN6tNY1sjbA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.4.0 <1.10.0' + + '@opentelemetry/sdk-metrics@2.2.0': + resolution: {integrity: sha512-G5KYP6+VJMZzpGipQw7Giif48h6SGQ2PFKEYCybeXJsOCB4fp8azqMAAzE5lnnHK3ZVwYQrgmFbsUJO/zOnwGw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.9.0 <1.10.0' + '@opentelemetry/sdk-trace-base@2.2.0': resolution: {integrity: sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw==} engines: {node: ^18.19.0 || >=20.6.0} @@ -1811,11 +2090,57 @@ packages: webpack-plugin-serve: optional: true + '@posthog/core@1.13.0': + resolution: {integrity: sha512-knjncrk7qRmssFRbGzBl1Tunt21GRpe0Wv+uVelyL0Rh7PdQUsgguulzXFTps8hA6wPwTU4kq85qnbAJ3eH6Wg==} + + '@posthog/react@1.7.0': + resolution: {integrity: sha512-pM7GL7z/rKjiIwosbRiQA3buhLI6vUo+wg+T/ZrVZC7O5bVU07TfgNZTcuOj8E9dx7vDbfNrc1kjDN7PKMM8ug==} + peerDependencies: + '@types/react': '>=16.8.0' + posthog-js: '>=1.257.2' + react: '>=16.8.0' + peerDependenciesMeta: + '@types/react': + optional: true + + '@posthog/types@1.334.1': + resolution: {integrity: sha512-ypFnwTO7qbV7icylLbujbamPdQXbJq0a61GUUBnJAeTbBw/qYPIss5IRYICcbCj0uunQrwD7/CGxVb5TOYKWgA==} + '@prisma/instrumentation@6.19.0': resolution: {integrity: sha512-QcuYy25pkXM8BJ37wVFBO7Zh34nyRV1GOb2n3lPkkbRYfl4hWl3PTcImP41P0KrzVXfa/45p6eVCos27x3exIg==} peerDependencies: '@opentelemetry/api': ^1.8 + '@protobufjs/aspromise@1.1.2': + resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} + + '@protobufjs/base64@1.1.2': + resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} + + '@protobufjs/codegen@2.0.4': + resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} + + '@protobufjs/eventemitter@1.1.0': + resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} + + '@protobufjs/fetch@1.1.0': + resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} + + '@protobufjs/float@1.0.2': + resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} + + '@protobufjs/inquire@1.1.0': + resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} + + '@protobufjs/path@1.1.2': + resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} + + '@protobufjs/pool@1.1.0': + resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} + + '@protobufjs/utf8@1.1.0': + resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} + '@radix-ui/number@1.1.1': resolution: {integrity: sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==} @@ -2407,6 +2732,9 @@ packages: peerDependencies: '@rjsf/utils': ^6.x + '@rolldown/pluginutils@1.0.0-beta.53': + resolution: {integrity: sha512-vENRlFU4YbrwVqNDZ7fLvy+JR1CRkyr01jhSiDpE1u6py3OMzQfztQU2jxykW3ALNxO4kSlqIDeYyD0Y9RcQeQ==} + '@rollup/plugin-commonjs@28.0.1': resolution: {integrity: sha512-+tNWdlWKbpB3WgBN7ijjYkq9X5uhjmcvyjEght4NmH5fAU++zfQzAJ6wumLS+dNcvwEZhKx2Z+skY8m7v0wGSA==} engines: {node: '>=16.0.0 || 14 >= 14.17'} @@ -2965,6 +3293,21 @@ packages: resolution: {integrity: sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==} engines: {node: '>=14', npm: '>=6', yarn: '>=1'} + '@testing-library/react@16.3.2': + resolution: {integrity: sha512-XU5/SytQM+ykqMnAnvB2umaJNIOsLF3PVv//1Ew4CTcpz0/BRyy/af40qqrt7SjKpDdT1saBMc42CUok5gaw+g==} + engines: {node: '>=18'} + peerDependencies: + '@testing-library/dom': ^10.0.0 + '@types/react': ^18.0.0 || ^19.0.0 + '@types/react-dom': ^18.0.0 || ^19.0.0 + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + '@testing-library/user-event@14.6.1': resolution: {integrity: sha512-vq7fv0rnt+QTXgPxr5Hjc210p6YKq2kmdziLgnsZGgLJ9e6VAShx1pACLuRjd/AS/sr7phAR58OIIpf0LlmQNw==} engines: {node: '>=12', npm: '>=6'} @@ -3144,6 +3487,9 @@ packages: '@types/tedious@4.0.14': resolution: {integrity: sha512-KHPsfX/FoVbUGbyYvk1q9MMQHLPeRZhRJZdO45Q4YjvFkv4hMNghCWTvy7rdKessBsmtz4euWCWAB6/tVpI1Iw==} + '@types/trusted-types@2.0.7': + resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + '@types/unist@2.0.11': resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==} @@ -3156,6 +3502,9 @@ packages: '@types/use-sync-external-store@0.0.6': resolution: {integrity: sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg==} + '@types/whatwg-mimetype@3.0.2': + resolution: {integrity: sha512-c2AKvDT8ToxLIOUlN51gTiHXflsfIFisS4pO7pDPoKouJCESkhZnEy623gwP9laCy5lnLDAw1vAzu2vM2YLOrA==} + '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} @@ -3365,9 +3714,18 @@ packages: vue-router: optional: true + '@vitejs/plugin-react@5.1.2': + resolution: {integrity: sha512-EcA07pHJouywpzsoTUqNh5NwGayl2PPVEJKUSinGGSxFGYn+shYbqMGBg6FXDqgXum9Ou/ecb+411ssw8HImJQ==} + engines: {node: ^20.19.0 || >=22.12.0} + peerDependencies: + vite: ^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 + '@vitest/expect@3.2.4': resolution: {integrity: sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==} + '@vitest/expect@4.0.17': + resolution: {integrity: sha512-mEoqP3RqhKlbmUmntNDDCJeTDavDR+fVYkSOw8qRwJFaW/0/5zA9zFeTrHqNtcmwh6j26yMmwx2PqUDPzt5ZAQ==} + '@vitest/mocker@3.2.4': resolution: {integrity: sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==} peerDependencies: @@ -3379,15 +3737,41 @@ packages: vite: optional: true + '@vitest/mocker@4.0.17': + resolution: {integrity: sha512-+ZtQhLA3lDh1tI2wxe3yMsGzbp7uuJSWBM1iTIKCbppWTSBN09PUC+L+fyNlQApQoR+Ps8twt2pbSSXg2fQVEQ==} + peerDependencies: + msw: ^2.4.9 + vite: ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + '@vitest/pretty-format@3.2.4': resolution: {integrity: sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==} + '@vitest/pretty-format@4.0.17': + resolution: {integrity: sha512-Ah3VAYmjcEdHg6+MwFE17qyLqBHZ+ni2ScKCiW2XrlSBV4H3Z7vYfPfz7CWQ33gyu76oc0Ai36+kgLU3rfF4nw==} + + '@vitest/runner@4.0.17': + resolution: {integrity: sha512-JmuQyf8aMWoo/LmNFppdpkfRVHJcsgzkbCA+/Bk7VfNH7RE6Ut2qxegeyx2j3ojtJtKIbIGy3h+KxGfYfk28YQ==} + + '@vitest/snapshot@4.0.17': + resolution: {integrity: sha512-npPelD7oyL+YQM2gbIYvlavlMVWUfNNGZPcu0aEUQXt7FXTuqhmgiYupPnAanhKvyP6Srs2pIbWo30K0RbDtRQ==} + '@vitest/spy@3.2.4': resolution: {integrity: sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==} + '@vitest/spy@4.0.17': + resolution: {integrity: sha512-I1bQo8QaP6tZlTomQNWKJE6ym4SHf3oLS7ceNjozxxgzavRAgZDc06T7kD8gb9bXKEgcLNt00Z+kZO6KaJ62Ew==} + '@vitest/utils@3.2.4': resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==} + '@vitest/utils@4.0.17': + resolution: {integrity: sha512-RG6iy+IzQpa9SB8HAFHJ9Y+pTzI+h8553MrciN9eC6TFBErqrQaTas4vG+MVj8S4uKk8uTT2p0vgZPnTdxd96w==} + '@webassemblyjs/ast@1.14.1': resolution: {integrity: sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==} @@ -3484,6 +3868,10 @@ packages: resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==} engines: {node: '>= 6.0.0'} + agent-base@7.1.4: + resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} + engines: {node: '>= 14'} + ajv-draft-04@1.0.0: resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==} peerDependencies: @@ -3702,6 +4090,9 @@ packages: resolution: {integrity: sha512-aVNobHnJqLiUelTaHat9DZ1qM2w0C0Eym4LPI/3JxOnSokGVdsl1T1kN7TFvsEAD8G47A6VKQ0TVHqbBnYMJlQ==} engines: {node: '>=12.0.0'} + bidi-js@1.0.3: + resolution: {integrity: sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==} + big.js@5.2.2: resolution: {integrity: sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==} @@ -3811,6 +4202,10 @@ packages: resolution: {integrity: sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==} engines: {node: '>=18'} + chai@6.2.2: + resolution: {integrity: sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==} + engines: {node: '>=18'} + chalk@4.1.2: resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} engines: {node: '>=10'} @@ -3972,6 +4367,9 @@ packages: core-js-pure@3.47.0: resolution: {integrity: sha512-BcxeDbzUrRnXGYIVAGFtcGQVNpFcUhVjr6W7F8XktvQW2iJP9e66GP6xdKotCRFlrxBvNIBrhwKteRXqMV86Nw==} + core-js@3.48.0: + resolution: {integrity: sha512-zpEHTy1fjTMZCKLHUZoVeylt9XrzaIN2rbPXEt0k+q7JE5CkCZdo6bNq55bn24a69CH7ErAVLKijxJja4fw+UQ==} + core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} @@ -4025,6 +4423,10 @@ packages: css-select@4.3.0: resolution: {integrity: sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==} + css-tree@3.1.0: + resolution: {integrity: sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0} + css-what@6.2.2: resolution: {integrity: sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA==} engines: {node: '>= 6'} @@ -4037,6 +4439,10 @@ packages: engines: {node: '>=4'} hasBin: true + cssstyle@5.3.7: + resolution: {integrity: sha512-7D2EPVltRrsTkhpQmksIu+LxeWAIEk6wRDMJ1qljlv+CKHJM+cJLlfhWIzNA44eAsHXSNe3+vO6DW1yCYx8SuQ==} + engines: {node: '>=20'} + csstype@3.2.3: resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} @@ -4109,6 +4515,10 @@ packages: damerau-levenshtein@1.0.8: resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==} + data-urls@6.0.1: + resolution: {integrity: sha512-euIQENZg6x8mj3fO6o9+fOW8MimUI4PpD/fZBhJfeioZVy9TUpM4UY7KjQNVZFlqwJ0UdzRDzkycB997HEq1BQ==} + engines: {node: '>=20'} + data-view-buffer@1.0.2: resolution: {integrity: sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==} engines: {node: '>= 0.4'} @@ -4147,6 +4557,9 @@ packages: decimal.js-light@2.5.1: resolution: {integrity: sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==} + decimal.js@10.6.0: + resolution: {integrity: sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==} + decode-named-character-reference@1.2.0: resolution: {integrity: sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==} @@ -4248,6 +4661,9 @@ packages: resolution: {integrity: sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==} engines: {node: '>= 4'} + dompurify@3.3.1: + resolution: {integrity: sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==} + domutils@2.8.0: resolution: {integrity: sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==} @@ -4388,6 +4804,11 @@ packages: engines: {node: '>=18'} hasBin: true + esbuild@0.27.2: + resolution: {integrity: sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==} + engines: {node: '>=18'} + hasBin: true + escalade@3.2.0: resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} engines: {node: '>=6'} @@ -4562,6 +4983,10 @@ packages: exenv@1.2.2: resolution: {integrity: sha512-Z+ktTxTwv9ILfgKCk32OX3n/doe+OcLTRtqK9pcL+JsP3J1/VW8Uvl4ZjLlKqeW4rzK4oesDOGMEMRIZqtP4Iw==} + expect-type@1.3.0: + resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} + engines: {node: '>=12.0.0'} + extend@3.0.2: resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} @@ -4609,6 +5034,9 @@ packages: picomatch: optional: true + fflate@0.4.8: + resolution: {integrity: sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==} + file-entry-cache@6.0.1: resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} engines: {node: ^10.12.0 || >=12.0.0} @@ -4802,6 +5230,9 @@ packages: resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} engines: {node: '>=10'} + globrex@0.1.2: + resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} + gopd@1.2.0: resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} engines: {node: '>= 0.4'} @@ -4816,6 +5247,10 @@ packages: resolution: {integrity: sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==} engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} + happy-dom@20.3.4: + resolution: {integrity: sha512-rfbiwB6OKxZFIFQ7SRnCPB2WL9WhyXsFoTfecYgeCeFSOBxvkWLaXsdv5ehzJrfqwXQmDephAKWLRQoFoJwrew==} + engines: {node: '>=20.0.0'} + has-bigints@1.1.0: resolution: {integrity: sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==} engines: {node: '>= 0.4'} @@ -4907,6 +5342,10 @@ packages: hoist-non-react-statics@3.3.2: resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} + html-encoding-sniffer@6.0.0: + resolution: {integrity: sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg==} + engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0} + html-entities@2.6.0: resolution: {integrity: sha512-kig+rMn/QOVRvr7c86gQ8lWXq+Hkv6CbAH1hLu+RG338StTpE8Z0b44SDVaqVu7HGKf27frdmUYEs9hTUX/cLQ==} @@ -4933,6 +5372,10 @@ packages: htmlparser2@6.1.0: resolution: {integrity: sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==} + http-proxy-agent@7.0.2: + resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} + engines: {node: '>= 14'} + http2-client@1.3.5: resolution: {integrity: sha512-EC2utToWl4RKfs5zd36Mxq7nzHHBuomZboI0yYL6Y0RmBgT7Sgkq4rQ0ezFTYoIsSs7Tm9SJe+o2FcAg6GBhGA==} @@ -4943,6 +5386,10 @@ packages: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + human-signals@2.1.0: resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} engines: {node: '>=10.17.0'} @@ -5128,6 +5575,9 @@ packages: resolution: {integrity: sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==} engines: {node: '>=12'} + is-potential-custom-element-name@1.0.1: + resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==} + is-reference@1.2.1: resolution: {integrity: sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ==} @@ -5217,6 +5667,15 @@ packages: resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} hasBin: true + jsdom@27.4.0: + resolution: {integrity: sha512-mjzqwWRD9Y1J1KUi7W97Gja1bwOOM5Ug0EZ6UDK3xS7j7mndrkwozHtSblfomlzyB4NepioNt+B2sOSzczVgtQ==} + engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0} + peerDependencies: + canvas: ^3.0.0 + peerDependenciesMeta: + canvas: + optional: true + jsep@1.4.0: resolution: {integrity: sha512-B7qPcEVE3NVkmSJbaYxvv4cHkVW7DQsZz13pUMrfS8z8Q/BuShN+gcTXrUlPiGqM2/t/EEaI030bpxMqY8gMlw==} engines: {node: '>= 10.16.0'} @@ -5384,6 +5843,9 @@ packages: resolution: {integrity: sha512-HgMmCqIJSAKqo68l0rS2AanEWfkxaZ5wNiEFb5ggm08lDs9Xl2KxBlX3PTcaD2chBM1gXAYf491/M2Rv8Jwayg==} engines: {node: '>= 0.6.0'} + long@5.3.2: + resolution: {integrity: sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==} + longest-streak@3.1.0: resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==} @@ -5403,6 +5865,10 @@ packages: lru-cache@10.4.3: resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} + lru-cache@11.2.4: + resolution: {integrity: sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==} + engines: {node: 20 || >=22} + lru-cache@5.1.1: resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} @@ -5500,6 +5966,9 @@ packages: mdast-util-to-string@4.0.0: resolution: {integrity: sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==} + mdn-data@2.12.2: + resolution: {integrity: sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==} + mdurl@2.0.0: resolution: {integrity: sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==} @@ -5862,6 +6331,9 @@ packages: objectorarray@1.0.5: resolution: {integrity: sha512-eJJDYkhJFFbBBAxeh8xW+weHlkI28n2ZdQV/J/DNfWfSKlGEf2xcfAbZTv3riEXHAhL9SVOTs2pRmXiSTf78xg==} + obug@2.1.1: + resolution: {integrity: sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==} + once@1.4.0: resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} @@ -5952,6 +6424,9 @@ packages: parse5@7.3.0: resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==} + parse5@8.0.0: + resolution: {integrity: sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==} + party-js@2.2.0: resolution: {integrity: sha512-50hGuALCpvDTrQLPQ1fgUgxKIWAH28ShVkmeK/3zhO0YJyCqkhrZhQEkWPxDYLvbFJ7YAXyROmFEu35gKpZLtQ==} @@ -5991,6 +6466,9 @@ packages: resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} engines: {node: '>=8'} + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + pathval@2.0.1: resolution: {integrity: sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==} engines: {node: '>= 14.16'} @@ -6157,6 +6635,12 @@ packages: resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==} engines: {node: '>=0.10.0'} + posthog-js@1.334.1: + resolution: {integrity: sha512-5cDzLICr2afnwX/cR9fwoLC0vN0Nb5gP5HiCigzHkgHdO+E3WsYefla3EFMQz7U4r01CBPZ+nZ9/srkzeACxtQ==} + + preact@10.28.2: + resolution: {integrity: sha512-lbteaWGzGHdlIuiJ0l2Jq454m6kcpI1zNje6d8MlGAFlYvP2GO4ibnat7P74Esfz4sPTdM6UxtTwh/d3pwM9JA==} + prelude-ls@1.2.1: resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} engines: {node: '>= 0.8.0'} @@ -6245,6 +6729,10 @@ packages: property-information@7.1.0: resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==} + protobufjs@7.5.4: + resolution: {integrity: sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==} + engines: {node: '>=12.0.0'} + proxy-from-env@1.1.0: resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} @@ -6266,6 +6754,9 @@ packages: resolution: {integrity: sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==} engines: {node: '>=0.6'} + query-selector-shadow-dom@1.0.1: + resolution: {integrity: sha512-lT5yCqEBgfoMYpf3F2xQRK7zEr1rhIIZuceDK6+xRkJQ4NMbHTwXqk4NkwDwQMNqXgG9r9fyHnzwNVs6zV5KRw==} + querystring-es3@0.2.1: resolution: {integrity: sha512-773xhDQnZBMFobEiztv8LIl70ch5MSF/jUQVlhwFyBILqq96anmoctVIYz+ZRp0qbCKATTn6ev02M3r7Ga5vqA==} engines: {node: '>=0.4.x'} @@ -6359,6 +6850,10 @@ packages: resolution: {integrity: sha512-jCvmsr+1IUSMUyzOkRcvnVbX3ZYC6g9TDrDbFuFmRDq7PD4yaGbLKNQL6k2jnArV8hjYxh7hVhAZB6s9HDGpZA==} engines: {node: '>=0.10.0'} + react-refresh@0.18.0: + resolution: {integrity: sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==} + engines: {node: '>=0.10.0'} + react-remove-scroll-bar@2.3.8: resolution: {integrity: sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==} engines: {node: '>=10'} @@ -6627,6 +7122,10 @@ packages: webpack: optional: true + saxes@6.0.0: + resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==} + engines: {node: '>=v12.22.7'} + scheduler@0.23.2: resolution: {integrity: sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==} @@ -6724,6 +7223,9 @@ packages: resolution: {integrity: sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==} engines: {node: '>= 0.4'} + siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + signal-exit@3.0.7: resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} @@ -6766,6 +7268,9 @@ packages: stable-hash@0.0.5: resolution: {integrity: sha512-+L3ccpzibovGXFK+Ap/f8LOS0ahMrHTf3xu7mMLSpEGU0EO9ucaysSylKo9eRDFNhWve/y275iPmIZ4z39a9iA==} + stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + stackframe@1.3.4: resolution: {integrity: sha512-oeVtt7eWQS+Na6F//S4kJ2K2VbRlS9D43mAlMyVpVWovy9o+jfgH8O9agzANzaiLjclA0oYzUXEM4PurhSUChw==} @@ -6777,6 +7282,9 @@ packages: resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==} engines: {node: '>= 0.8'} + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + stop-iteration-iterator@1.1.0: resolution: {integrity: sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==} engines: {node: '>= 0.4'} @@ -6930,6 +7438,9 @@ packages: resolution: {integrity: sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g==} hasBin: true + symbol-tree@3.2.4: + resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} + tailwind-merge@2.6.0: resolution: {integrity: sha512-P+Vu1qXfzediirmHOC3xKGAYeZtPcV9g76X+xg2FD4tYgR71ewMA35Y3sCz3zhiN/dwefRpJX0yBcgwi1fXNQA==} @@ -6994,6 +7505,13 @@ packages: tiny-invariant@1.3.3: resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} + tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + + tinyexec@1.0.2: + resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} + engines: {node: '>=18'} + tinyglobby@0.2.15: resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} @@ -7002,6 +7520,10 @@ packages: resolution: {integrity: sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==} engines: {node: '>=14.0.0'} + tinyrainbow@3.0.3: + resolution: {integrity: sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==} + engines: {node: '>=14.0.0'} + tinyspy@4.0.4: resolution: {integrity: sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==} engines: {node: '>=14.0.0'} @@ -7028,6 +7550,10 @@ packages: tr46@0.0.3: resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} + tr46@6.0.0: + resolution: {integrity: sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==} + engines: {node: '>=20'} + tree-kill@1.2.2: resolution: {integrity: sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==} hasBin: true @@ -7061,6 +7587,16 @@ packages: typescript: optional: true + tsconfck@3.1.6: + resolution: {integrity: sha512-ks6Vjr/jEw0P1gmOVwutM3B7fWxoWBL2KRDb1JfqGVawBmO5UsvmWOQFGHBPl5yxYz4eERr19E6L7NMv+Fej4w==} + engines: {node: ^18 || >=20} + hasBin: true + peerDependencies: + typescript: ^5.0.0 + peerDependenciesMeta: + typescript: + optional: true + tsconfig-paths-webpack-plugin@4.2.0: resolution: {integrity: sha512-zbem3rfRS8BgeNK50Zz5SIQgXzLafiHjOwUAvk/38/o1jHn/V5QAgVUcz884or7WYcPaH3N2CIfUc2u0ul7UcA==} engines: {node: '>=10.13.0'} @@ -7300,9 +7836,95 @@ packages: victory-vendor@37.3.6: resolution: {integrity: sha512-SbPDPdDBYp+5MJHhBCAyI7wKM3d5ivekigc2Dk2s7pgbZ9wIgIBYGVw4zGHBml/qTFbexrofXW6Gu4noGxrOwQ==} + vite-tsconfig-paths@6.0.4: + resolution: {integrity: sha512-iIsEJ+ek5KqRTK17pmxtgIxXtqr3qDdE6OxrP9mVeGhVDNXRJTKN/l9oMbujTQNzMLe6XZ8qmpztfbkPu2TiFQ==} + peerDependencies: + vite: '*' + peerDependenciesMeta: + vite: + optional: true + + vite@7.3.1: + resolution: {integrity: sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==} + engines: {node: ^20.19.0 || >=22.12.0} + hasBin: true + peerDependencies: + '@types/node': ^20.19.0 || >=22.12.0 + jiti: '>=1.21.0' + less: ^4.0.0 + lightningcss: ^1.21.0 + sass: ^1.70.0 + sass-embedded: ^1.70.0 + stylus: '>=0.54.8' + sugarss: ^5.0.0 + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + vitest@4.0.17: + resolution: {integrity: sha512-FQMeF0DJdWY0iOnbv466n/0BudNdKj1l5jYgl5JVTwjSsZSlqyXFt/9+1sEyhR6CLowbZpV7O1sCHrzBhucKKg==} + engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@opentelemetry/api': ^1.9.0 + '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 + '@vitest/browser-playwright': 4.0.17 + '@vitest/browser-preview': 4.0.17 + '@vitest/browser-webdriverio': 4.0.17 + '@vitest/ui': 4.0.17 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@opentelemetry/api': + optional: true + '@types/node': + optional: true + '@vitest/browser-playwright': + optional: true + '@vitest/browser-preview': + optional: true + '@vitest/browser-webdriverio': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + vm-browserify@1.1.2: resolution: {integrity: sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==} + w3c-xmlserializer@5.0.0: + resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==} + engines: {node: '>=18'} + warning@4.0.3: resolution: {integrity: sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==} @@ -7313,9 +7935,16 @@ packages: web-namespaces@2.0.1: resolution: {integrity: sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==} + web-vitals@5.1.0: + resolution: {integrity: sha512-ArI3kx5jI0atlTtmV0fWU3fjpLmq/nD3Zr1iFFlJLaqa5wLBkUSzINwBPySCX/8jRyjlmy1Volw1kz1g9XE4Jg==} + webidl-conversions@3.0.1: resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} + webidl-conversions@8.0.1: + resolution: {integrity: sha512-BMhLD/Sw+GbJC21C/UgyaZX41nPt8bUTg+jWyDeg7e7YN4xOM05YPSIXceACnXVtqyEw/LMClUQMtMZ+PGGpqQ==} + engines: {node: '>=20'} + webpack-dev-middleware@6.1.3: resolution: {integrity: sha512-A4ChP0Qj8oGociTs6UdlRUGANIGrCDL3y+pmQMc+dSsraXHCatFpmMey4mYELA+juqwUqwQsUgJJISXl1KWmiw==} engines: {node: '>= 14.15.0'} @@ -7348,6 +7977,22 @@ packages: webpack-cli: optional: true + whatwg-mimetype@3.0.0: + resolution: {integrity: sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==} + engines: {node: '>=12'} + + whatwg-mimetype@4.0.0: + resolution: {integrity: sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==} + engines: {node: '>=18'} + + whatwg-mimetype@5.0.0: + resolution: {integrity: sha512-sXcNcHOC51uPGF0P/D4NVtrkjSU2fNsm9iog4ZvZJsL3rjoDAzXZhkm2MWt1y+PUdggKAYVoMAIYcs78wJ51Cw==} + engines: {node: '>=20'} + + whatwg-url@15.1.0: + resolution: {integrity: sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g==} + engines: {node: '>=20'} + whatwg-url@5.0.0: resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} @@ -7372,6 +8017,11 @@ packages: engines: {node: '>= 8'} hasBin: true + why-is-node-running@2.3.0: + resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} + engines: {node: '>=8'} + hasBin: true + word-wrap@1.2.5: resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} engines: {node: '>=0.10.0'} @@ -7403,10 +8053,17 @@ packages: utf-8-validate: optional: true + xml-name-validator@5.0.0: + resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==} + engines: {node: '>=18'} + xmlbuilder@15.1.1: resolution: {integrity: sha512-yMqGBqtXyeN1e3TGYvgNgDVZ3j84W4cwkOXQswghol6APgZWaff9lnbvN7MHYJOiXsvGPXtjTYJEiC9J2wv9Eg==} engines: {node: '>=8.0'} + xmlchars@2.2.0: + resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} + xtend@4.0.2: resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==} engines: {node: '>=0.4'} @@ -7488,6 +8145,9 @@ packages: snapshots: + '@acemir/cssom@0.9.31': + optional: true + '@adobe/css-tools@4.4.4': {} '@alloc/quick-lru@5.2.0': {} @@ -7521,6 +8181,27 @@ snapshots: transitivePeerDependencies: - supports-color + '@asamuzakjp/css-color@4.1.1': + dependencies: + '@csstools/css-calc': 2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-color-parser': 3.1.0(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + lru-cache: 11.2.4 + optional: true + + '@asamuzakjp/dom-selector@6.7.6': + dependencies: + '@asamuzakjp/nwsapi': 2.3.9 + bidi-js: 1.0.3 + css-tree: 3.1.0 + is-potential-custom-element-name: 1.0.1 + lru-cache: 11.2.4 + optional: true + + '@asamuzakjp/nwsapi@2.3.9': + optional: true + '@asyncapi/specs@6.10.0': dependencies: '@types/json-schema': 7.0.15 @@ -8036,6 +8717,16 @@ snapshots: transitivePeerDependencies: - supports-color + '@babel/plugin-transform-react-jsx-self@7.27.1(@babel/core@7.28.5)': + dependencies: + '@babel/core': 7.28.5 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-transform-react-jsx-source@7.27.1(@babel/core@7.28.5)': + dependencies: + '@babel/core': 7.28.5 + '@babel/helper-plugin-utils': 7.27.1 + '@babel/plugin-transform-react-jsx@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 @@ -8274,13 +8965,13 @@ snapshots: '@babel/helper-string-parser': 7.27.1 '@babel/helper-validator-identifier': 7.28.5 - '@chromatic-com/storybook@4.1.2(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))': + '@chromatic-com/storybook@4.1.2(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))': dependencies: '@neoconfetti/react': 1.0.0 chromatic: 12.2.0 filesize: 10.1.6 jsonfile: 6.2.0 - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) strip-ansi: 7.1.2 transitivePeerDependencies: - '@chromatic-com/cypress' @@ -8290,6 +8981,34 @@ snapshots: dependencies: commander: 14.0.2 + '@csstools/color-helpers@5.1.0': + optional: true + + '@csstools/css-calc@2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + optional: true + + '@csstools/css-color-parser@3.1.0(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/color-helpers': 5.1.0 + '@csstools/css-calc': 2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + optional: true + + '@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/css-tokenizer': 3.0.4 + optional: true + + '@csstools/css-syntax-patches-for-csstree@1.0.25': + optional: true + + '@csstools/css-tokenizer@3.0.4': + optional: true + '@date-fns/tz@1.4.1': {} '@emnapi/core@1.8.1': @@ -8321,81 +9040,159 @@ snapshots: '@esbuild/aix-ppc64@0.25.12': optional: true + '@esbuild/aix-ppc64@0.27.2': + optional: true + '@esbuild/android-arm64@0.25.12': optional: true + '@esbuild/android-arm64@0.27.2': + optional: true + '@esbuild/android-arm@0.25.12': optional: true + '@esbuild/android-arm@0.27.2': + optional: true + '@esbuild/android-x64@0.25.12': optional: true + '@esbuild/android-x64@0.27.2': + optional: true + '@esbuild/darwin-arm64@0.25.12': optional: true + '@esbuild/darwin-arm64@0.27.2': + optional: true + '@esbuild/darwin-x64@0.25.12': optional: true + '@esbuild/darwin-x64@0.27.2': + optional: true + '@esbuild/freebsd-arm64@0.25.12': optional: true + '@esbuild/freebsd-arm64@0.27.2': + optional: true + '@esbuild/freebsd-x64@0.25.12': optional: true + '@esbuild/freebsd-x64@0.27.2': + optional: true + '@esbuild/linux-arm64@0.25.12': optional: true + '@esbuild/linux-arm64@0.27.2': + optional: true + '@esbuild/linux-arm@0.25.12': optional: true + '@esbuild/linux-arm@0.27.2': + optional: true + '@esbuild/linux-ia32@0.25.12': optional: true + '@esbuild/linux-ia32@0.27.2': + optional: true + '@esbuild/linux-loong64@0.25.12': optional: true + '@esbuild/linux-loong64@0.27.2': + optional: true + '@esbuild/linux-mips64el@0.25.12': optional: true + '@esbuild/linux-mips64el@0.27.2': + optional: true + '@esbuild/linux-ppc64@0.25.12': optional: true + '@esbuild/linux-ppc64@0.27.2': + optional: true + '@esbuild/linux-riscv64@0.25.12': optional: true + '@esbuild/linux-riscv64@0.27.2': + optional: true + '@esbuild/linux-s390x@0.25.12': optional: true + '@esbuild/linux-s390x@0.27.2': + optional: true + '@esbuild/linux-x64@0.25.12': optional: true + '@esbuild/linux-x64@0.27.2': + optional: true + '@esbuild/netbsd-arm64@0.25.12': optional: true + '@esbuild/netbsd-arm64@0.27.2': + optional: true + '@esbuild/netbsd-x64@0.25.12': optional: true + '@esbuild/netbsd-x64@0.27.2': + optional: true + '@esbuild/openbsd-arm64@0.25.12': optional: true + '@esbuild/openbsd-arm64@0.27.2': + optional: true + '@esbuild/openbsd-x64@0.25.12': optional: true + '@esbuild/openbsd-x64@0.27.2': + optional: true + '@esbuild/openharmony-arm64@0.25.12': optional: true + '@esbuild/openharmony-arm64@0.27.2': + optional: true + '@esbuild/sunos-x64@0.25.12': optional: true + '@esbuild/sunos-x64@0.27.2': + optional: true + '@esbuild/win32-arm64@0.25.12': optional: true + '@esbuild/win32-arm64@0.27.2': + optional: true + '@esbuild/win32-ia32@0.25.12': optional: true + '@esbuild/win32-ia32@0.27.2': + optional: true + '@esbuild/win32-x64@0.25.12': optional: true + '@esbuild/win32-x64@0.27.2': + optional: true + '@eslint-community/eslint-utils@4.9.1(eslint@8.57.1)': dependencies: eslint: 8.57.1 @@ -8419,6 +9216,9 @@ snapshots: '@eslint/js@8.57.1': {} + '@exodus/bytes@1.9.0': + optional: true + '@exodus/schemasafe@1.3.0': {} '@faker-js/faker@10.0.0': {} @@ -8737,6 +9537,10 @@ snapshots: '@open-draft/until@2.1.0': {} + '@opentelemetry/api-logs@0.208.0': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs@0.209.0': dependencies: '@opentelemetry/api': 1.9.0 @@ -8752,6 +9556,15 @@ snapshots: '@opentelemetry/api': 1.9.0 '@opentelemetry/semantic-conventions': 1.38.0 + '@opentelemetry/exporter-logs-otlp-http@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-amqplib@0.55.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -8946,6 +9759,23 @@ snapshots: transitivePeerDependencies: - supports-color + '@opentelemetry/otlp-exporter-base@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.208.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-transformer@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + protobufjs: 7.5.4 + '@opentelemetry/redis-common@0.38.2': {} '@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0)': @@ -8954,6 +9784,19 @@ snapshots: '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.38.0 + '@opentelemetry/sdk-logs@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -9118,6 +9961,19 @@ snapshots: type-fest: 4.41.0 webpack-hot-middleware: 2.26.1 + '@posthog/core@1.13.0': + dependencies: + cross-spawn: 7.0.6 + + '@posthog/react@1.7.0(@types/react@18.3.17)(posthog-js@1.334.1)(react@18.3.1)': + dependencies: + posthog-js: 1.334.1 + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.17 + + '@posthog/types@1.334.1': {} + '@prisma/instrumentation@6.19.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -9125,6 +9981,29 @@ snapshots: transitivePeerDependencies: - supports-color + '@protobufjs/aspromise@1.1.2': {} + + '@protobufjs/base64@1.1.2': {} + + '@protobufjs/codegen@2.0.4': {} + + '@protobufjs/eventemitter@1.1.0': {} + + '@protobufjs/fetch@1.1.0': + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/inquire': 1.1.0 + + '@protobufjs/float@1.0.2': {} + + '@protobufjs/inquire@1.1.0': {} + + '@protobufjs/path@1.1.2': {} + + '@protobufjs/pool@1.1.0': {} + + '@protobufjs/utf8@1.1.0': {} + '@radix-ui/number@1.1.1': {} '@radix-ui/primitive@1.1.3': {} @@ -9755,6 +10634,8 @@ snapshots: lodash: 4.17.21 lodash-es: 4.17.22 + '@rolldown/pluginutils@1.0.0-beta.53': {} + '@rollup/plugin-commonjs@28.0.1(rollup@4.55.1)': dependencies: '@rollup/pluginutils': 5.3.0(rollup@4.55.1) @@ -10247,39 +11128,39 @@ snapshots: '@stoplight/yaml-ast-parser': 0.0.50 tslib: 2.8.1 - '@storybook/addon-a11y@9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))': + '@storybook/addon-a11y@9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))': dependencies: '@storybook/global': 5.0.0 axe-core: 4.11.1 - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) - '@storybook/addon-docs@9.1.5(@types/react@18.3.17)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))': + '@storybook/addon-docs@9.1.5(@types/react@18.3.17)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))': dependencies: '@mdx-js/react': 3.1.1(@types/react@18.3.17)(react@18.3.1) - '@storybook/csf-plugin': 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) + '@storybook/csf-plugin': 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))) '@storybook/icons': 1.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@storybook/react-dom-shim': 9.1.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) + '@storybook/react-dom-shim': 9.1.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) ts-dedent: 2.2.0 transitivePeerDependencies: - '@types/react' - '@storybook/addon-links@9.1.5(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))': + '@storybook/addon-links@9.1.5(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))': dependencies: '@storybook/global': 5.0.0 - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) optionalDependencies: react: 18.3.1 - '@storybook/addon-onboarding@9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))': + '@storybook/addon-onboarding@9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))': dependencies: - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) - '@storybook/builder-webpack5@9.1.5(esbuild@0.25.12)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3)': + '@storybook/builder-webpack5@9.1.5(esbuild@0.25.12)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))(typescript@5.9.3)': dependencies: - '@storybook/core-webpack': 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) + '@storybook/core-webpack': 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))) case-sensitive-paths-webpack-plugin: 2.4.0 cjs-module-lexer: 1.4.3 css-loader: 6.11.0(webpack@5.104.1(esbuild@0.25.12)) @@ -10287,7 +11168,7 @@ snapshots: fork-ts-checker-webpack-plugin: 8.0.0(typescript@5.9.3)(webpack@5.104.1(esbuild@0.25.12)) html-webpack-plugin: 5.6.5(webpack@5.104.1(esbuild@0.25.12)) magic-string: 0.30.21 - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) style-loader: 3.3.4(webpack@5.104.1(esbuild@0.25.12)) terser-webpack-plugin: 5.3.16(esbuild@0.25.12)(webpack@5.104.1(esbuild@0.25.12)) ts-dedent: 2.2.0 @@ -10304,14 +11185,14 @@ snapshots: - uglify-js - webpack-cli - '@storybook/core-webpack@9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))': + '@storybook/core-webpack@9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))': dependencies: - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) ts-dedent: 2.2.0 - '@storybook/csf-plugin@9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))': + '@storybook/csf-plugin@9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))': dependencies: - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) unplugin: 1.16.1 '@storybook/global@5.0.0': {} @@ -10321,7 +11202,7 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@storybook/nextjs@9.1.5(esbuild@0.25.12)(next@15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.104.1(esbuild@0.25.12))': + '@storybook/nextjs@9.1.5(esbuild@0.25.12)(next@15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.104.1(esbuild@0.25.12))': dependencies: '@babel/core': 7.28.5 '@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.28.5) @@ -10337,9 +11218,9 @@ snapshots: '@babel/preset-typescript': 7.28.5(@babel/core@7.28.5) '@babel/runtime': 7.28.4 '@pmmmwh/react-refresh-webpack-plugin': 0.5.17(react-refresh@0.14.2)(type-fest@4.41.0)(webpack-hot-middleware@2.26.1)(webpack@5.104.1(esbuild@0.25.12)) - '@storybook/builder-webpack5': 9.1.5(esbuild@0.25.12)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3) - '@storybook/preset-react-webpack': 9.1.5(esbuild@0.25.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3) - '@storybook/react': 9.1.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3) + '@storybook/builder-webpack5': 9.1.5(esbuild@0.25.12)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))(typescript@5.9.3) + '@storybook/preset-react-webpack': 9.1.5(esbuild@0.25.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))(typescript@5.9.3) + '@storybook/react': 9.1.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))(typescript@5.9.3) '@types/semver': 7.7.1 babel-loader: 9.2.1(@babel/core@7.28.5)(webpack@5.104.1(esbuild@0.25.12)) css-loader: 6.11.0(webpack@5.104.1(esbuild@0.25.12)) @@ -10355,7 +11236,7 @@ snapshots: resolve-url-loader: 5.0.0 sass-loader: 16.0.6(webpack@5.104.1(esbuild@0.25.12)) semver: 7.7.3 - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) style-loader: 3.3.4(webpack@5.104.1(esbuild@0.25.12)) styled-jsx: 5.1.7(@babel/core@7.28.5)(react@18.3.1) tsconfig-paths: 4.2.0 @@ -10381,9 +11262,9 @@ snapshots: - webpack-hot-middleware - webpack-plugin-serve - '@storybook/preset-react-webpack@9.1.5(esbuild@0.25.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3)': + '@storybook/preset-react-webpack@9.1.5(esbuild@0.25.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))(typescript@5.9.3)': dependencies: - '@storybook/core-webpack': 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) + '@storybook/core-webpack': 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))) '@storybook/react-docgen-typescript-plugin': 1.0.6--canary.9.0c3f3b7.0(typescript@5.9.3)(webpack@5.104.1(esbuild@0.25.12)) '@types/semver': 7.7.1 find-up: 7.0.0 @@ -10393,7 +11274,7 @@ snapshots: react-dom: 18.3.1(react@18.3.1) resolve: 1.22.11 semver: 7.7.3 - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) tsconfig-paths: 4.2.0 webpack: 5.104.1(esbuild@0.25.12) optionalDependencies: @@ -10419,19 +11300,19 @@ snapshots: transitivePeerDependencies: - supports-color - '@storybook/react-dom-shim@9.1.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))': + '@storybook/react-dom-shim@9.1.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))': dependencies: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) - '@storybook/react@9.1.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3)': + '@storybook/react@9.1.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))(typescript@5.9.3)': dependencies: '@storybook/global': 5.0.0 - '@storybook/react-dom-shim': 9.1.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) + '@storybook/react-dom-shim': 9.1.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) optionalDependencies: typescript: 5.9.3 @@ -10542,6 +11423,16 @@ snapshots: picocolors: 1.1.1 redent: 3.0.0 + '@testing-library/react@16.3.2(@testing-library/dom@10.4.1)(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@babel/runtime': 7.28.4 + '@testing-library/dom': 10.4.1 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.17 + '@types/react-dom': 18.3.5(@types/react@18.3.17) + '@testing-library/user-event@14.6.1(@testing-library/dom@10.4.1)': dependencies: '@testing-library/dom': 10.4.1 @@ -10731,6 +11622,9 @@ snapshots: dependencies: '@types/node': 24.10.0 + '@types/trusted-types@2.0.7': + optional: true + '@types/unist@2.0.11': {} '@types/unist@3.0.3': {} @@ -10739,6 +11633,8 @@ snapshots: '@types/use-sync-external-store@0.0.6': {} + '@types/whatwg-mimetype@3.0.2': {} + '@types/ws@8.18.1': dependencies: '@types/node': 24.10.0 @@ -10905,6 +11801,18 @@ snapshots: next: 15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 + '@vitejs/plugin-react@5.1.2(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))': + dependencies: + '@babel/core': 7.28.5 + '@babel/plugin-transform-react-jsx-self': 7.27.1(@babel/core@7.28.5) + '@babel/plugin-transform-react-jsx-source': 7.27.1(@babel/core@7.28.5) + '@rolldown/pluginutils': 1.0.0-beta.53 + '@types/babel__core': 7.20.5 + react-refresh: 0.18.0 + vite: 7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2) + transitivePeerDependencies: + - supports-color + '@vitest/expect@3.2.4': dependencies: '@types/chai': 5.2.3 @@ -10913,28 +11821,69 @@ snapshots: chai: 5.3.3 tinyrainbow: 2.0.0 - '@vitest/mocker@3.2.4(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))': + '@vitest/expect@4.0.17': + dependencies: + '@standard-schema/spec': 1.1.0 + '@types/chai': 5.2.3 + '@vitest/spy': 4.0.17 + '@vitest/utils': 4.0.17 + chai: 6.2.2 + tinyrainbow: 3.0.3 + + '@vitest/mocker@3.2.4(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))': dependencies: '@vitest/spy': 3.2.4 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: msw: 2.11.6(@types/node@24.10.0)(typescript@5.9.3) + vite: 7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2) + + '@vitest/mocker@4.0.17(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))': + dependencies: + '@vitest/spy': 4.0.17 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + msw: 2.11.6(@types/node@24.10.0)(typescript@5.9.3) + vite: 7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2) '@vitest/pretty-format@3.2.4': dependencies: tinyrainbow: 2.0.0 + '@vitest/pretty-format@4.0.17': + dependencies: + tinyrainbow: 3.0.3 + + '@vitest/runner@4.0.17': + dependencies: + '@vitest/utils': 4.0.17 + pathe: 2.0.3 + + '@vitest/snapshot@4.0.17': + dependencies: + '@vitest/pretty-format': 4.0.17 + magic-string: 0.30.21 + pathe: 2.0.3 + '@vitest/spy@3.2.4': dependencies: tinyspy: 4.0.4 + '@vitest/spy@4.0.17': {} + '@vitest/utils@3.2.4': dependencies: '@vitest/pretty-format': 3.2.4 loupe: 3.2.1 tinyrainbow: 2.0.0 + '@vitest/utils@4.0.17': + dependencies: + '@vitest/pretty-format': 4.0.17 + tinyrainbow: 3.0.3 + '@webassemblyjs/ast@1.14.1': dependencies: '@webassemblyjs/helper-numbers': 1.13.2 @@ -11071,6 +12020,9 @@ snapshots: transitivePeerDependencies: - supports-color + agent-base@7.1.4: + optional: true + ajv-draft-04@1.0.0(ajv@8.17.1): optionalDependencies: ajv: 8.17.1 @@ -11305,6 +12257,11 @@ snapshots: dependencies: open: 8.4.2 + bidi-js@1.0.3: + dependencies: + require-from-string: 2.0.2 + optional: true + big.js@5.2.2: {} binary-extensions@2.3.0: {} @@ -11437,6 +12394,8 @@ snapshots: loupe: 3.2.1 pathval: 2.0.1 + chai@6.2.2: {} + chalk@4.1.2: dependencies: ansi-styles: 4.3.0 @@ -11567,6 +12526,8 @@ snapshots: core-js-pure@3.47.0: {} + core-js@3.48.0: {} + core-util-is@1.0.3: {} cosmiconfig@7.1.0: @@ -11655,12 +12616,26 @@ snapshots: domutils: 2.8.0 nth-check: 2.1.1 + css-tree@3.1.0: + dependencies: + mdn-data: 2.12.2 + source-map-js: 1.2.1 + optional: true + css-what@6.2.2: {} css.escape@1.5.1: {} cssesc@3.0.0: {} + cssstyle@5.3.7: + dependencies: + '@asamuzakjp/css-color': 4.1.1 + '@csstools/css-syntax-patches-for-csstree': 1.0.25 + css-tree: 3.1.0 + lru-cache: 11.2.4 + optional: true + csstype@3.2.3: {} d3-array@3.2.4: @@ -11729,6 +12704,12 @@ snapshots: damerau-levenshtein@1.0.8: {} + data-urls@6.0.1: + dependencies: + whatwg-mimetype: 5.0.0 + whatwg-url: 15.1.0 + optional: true + data-view-buffer@1.0.2: dependencies: call-bound: 1.0.4 @@ -11761,6 +12742,9 @@ snapshots: decimal.js-light@2.5.1: {} + decimal.js@10.6.0: + optional: true + decode-named-character-reference@1.2.0: dependencies: character-entities: 2.0.2 @@ -11853,6 +12837,10 @@ snapshots: dependencies: domelementtype: 2.3.0 + dompurify@3.3.1: + optionalDependencies: + '@types/trusted-types': 2.0.7 + domutils@2.8.0: dependencies: dom-serializer: 1.4.1 @@ -12094,6 +13082,35 @@ snapshots: '@esbuild/win32-ia32': 0.25.12 '@esbuild/win32-x64': 0.25.12 + esbuild@0.27.2: + optionalDependencies: + '@esbuild/aix-ppc64': 0.27.2 + '@esbuild/android-arm': 0.27.2 + '@esbuild/android-arm64': 0.27.2 + '@esbuild/android-x64': 0.27.2 + '@esbuild/darwin-arm64': 0.27.2 + '@esbuild/darwin-x64': 0.27.2 + '@esbuild/freebsd-arm64': 0.27.2 + '@esbuild/freebsd-x64': 0.27.2 + '@esbuild/linux-arm': 0.27.2 + '@esbuild/linux-arm64': 0.27.2 + '@esbuild/linux-ia32': 0.27.2 + '@esbuild/linux-loong64': 0.27.2 + '@esbuild/linux-mips64el': 0.27.2 + '@esbuild/linux-ppc64': 0.27.2 + '@esbuild/linux-riscv64': 0.27.2 + '@esbuild/linux-s390x': 0.27.2 + '@esbuild/linux-x64': 0.27.2 + '@esbuild/netbsd-arm64': 0.27.2 + '@esbuild/netbsd-x64': 0.27.2 + '@esbuild/openbsd-arm64': 0.27.2 + '@esbuild/openbsd-x64': 0.27.2 + '@esbuild/openharmony-arm64': 0.27.2 + '@esbuild/sunos-x64': 0.27.2 + '@esbuild/win32-arm64': 0.27.2 + '@esbuild/win32-ia32': 0.27.2 + '@esbuild/win32-x64': 0.27.2 + escalade@3.2.0: {} escape-string-regexp@4.0.0: {} @@ -12228,11 +13245,11 @@ snapshots: string.prototype.matchall: 4.0.12 string.prototype.repeat: 1.0.0 - eslint-plugin-storybook@9.1.5(eslint@8.57.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3): + eslint-plugin-storybook@9.1.5(eslint@8.57.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))(typescript@5.9.3): dependencies: '@typescript-eslint/utils': 8.52.0(eslint@8.57.1)(typescript@5.9.3) eslint: 8.57.1 - storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2) + storybook: 9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) transitivePeerDependencies: - supports-color - typescript @@ -12349,6 +13366,8 @@ snapshots: exenv@1.2.2: {} + expect-type@1.3.0: {} + extend@3.0.2: {} fast-deep-equal@2.0.1: {} @@ -12391,6 +13410,8 @@ snapshots: optionalDependencies: picomatch: 4.0.3 + fflate@0.4.8: {} + file-entry-cache@6.0.1: dependencies: flat-cache: 3.2.0 @@ -12612,6 +13633,8 @@ snapshots: merge2: 1.4.1 slash: 3.0.0 + globrex@0.1.2: {} + gopd@1.2.0: {} graceful-fs@4.2.11: {} @@ -12620,6 +13643,18 @@ snapshots: graphql@16.12.0: {} + happy-dom@20.3.4: + dependencies: + '@types/node': 24.10.0 + '@types/whatwg-mimetype': 3.0.2 + '@types/ws': 8.18.1 + entities: 4.5.0 + whatwg-mimetype: 3.0.0 + ws: 8.19.0 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + has-bigints@1.1.0: {} has-flag@4.0.0: {} @@ -12763,6 +13798,13 @@ snapshots: dependencies: react-is: 16.13.1 + html-encoding-sniffer@6.0.0: + dependencies: + '@exodus/bytes': 1.9.0 + transitivePeerDependencies: + - '@noble/hashes' + optional: true + html-entities@2.6.0: {} html-minifier-terser@6.1.0: @@ -12794,6 +13836,14 @@ snapshots: domutils: 2.8.0 entities: 2.2.0 + http-proxy-agent@7.0.2: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + optional: true + http2-client@1.3.5: {} https-browserify@1.0.0: {} @@ -12805,6 +13855,14 @@ snapshots: transitivePeerDependencies: - supports-color + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + optional: true + human-signals@2.1.0: {} icss-utils@5.1.0(postcss@8.5.6): @@ -12970,6 +14028,9 @@ snapshots: is-plain-obj@4.1.0: {} + is-potential-custom-element-name@1.0.1: + optional: true + is-reference@1.2.1: dependencies: '@types/estree': 1.0.8 @@ -13062,6 +14123,35 @@ snapshots: dependencies: argparse: 2.0.1 + jsdom@27.4.0: + dependencies: + '@acemir/cssom': 0.9.31 + '@asamuzakjp/dom-selector': 6.7.6 + '@exodus/bytes': 1.9.0 + cssstyle: 5.3.7 + data-urls: 6.0.1 + decimal.js: 10.6.0 + html-encoding-sniffer: 6.0.0 + http-proxy-agent: 7.0.2 + https-proxy-agent: 7.0.6 + is-potential-custom-element-name: 1.0.1 + parse5: 8.0.0 + saxes: 6.0.0 + symbol-tree: 3.2.4 + tough-cookie: 6.0.0 + w3c-xmlserializer: 5.0.0 + webidl-conversions: 8.0.1 + whatwg-mimetype: 4.0.0 + whatwg-url: 15.1.0 + ws: 8.19.0 + xml-name-validator: 5.0.0 + transitivePeerDependencies: + - '@noble/hashes' + - bufferutil + - supports-color + - utf-8-validate + optional: true + jsep@1.4.0: {} jsesc@3.1.0: {} @@ -13209,6 +14299,8 @@ snapshots: loglevel@1.9.2: {} + long@5.3.2: {} + longest-streak@3.1.0: {} loose-envify@1.4.0: @@ -13229,6 +14321,9 @@ snapshots: lru-cache@10.4.3: {} + lru-cache@11.2.4: + optional: true + lru-cache@5.1.1: dependencies: yallist: 3.1.1 @@ -13441,6 +14536,9 @@ snapshots: dependencies: '@types/mdast': 4.0.4 + mdn-data@2.12.2: + optional: true + mdurl@2.0.0: {} memfs@3.5.3: @@ -13943,6 +15041,8 @@ snapshots: objectorarray@1.0.5: {} + obug@2.1.1: {} + once@1.4.0: dependencies: wrappy: 1.0.2 @@ -14085,6 +15185,11 @@ snapshots: dependencies: entities: 6.0.1 + parse5@8.0.0: + dependencies: + entities: 6.0.1 + optional: true + party-js@2.2.0: {} pascal-case@3.1.2: @@ -14113,6 +15218,8 @@ snapshots: path-type@4.0.0: {} + pathe@2.0.3: {} + pathval@2.0.1: {} pbkdf2@3.1.5: @@ -14256,6 +15363,24 @@ snapshots: dependencies: xtend: 4.0.2 + posthog-js@1.334.1: + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/exporter-logs-otlp-http': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@posthog/core': 1.13.0 + '@posthog/types': 1.334.1 + core-js: 3.48.0 + dompurify: 3.3.1 + fflate: 0.4.8 + preact: 10.28.2 + query-selector-shadow-dom: 1.0.1 + web-vitals: 5.1.0 + + preact@10.28.2: {} + prelude-ls@1.2.1: {} prettier-plugin-tailwindcss@0.7.1(prettier@3.6.2): @@ -14289,6 +15414,21 @@ snapshots: property-information@7.1.0: {} + protobufjs@7.5.4: + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 24.10.0 + long: 5.3.2 + proxy-from-env@1.1.0: {} public-encrypt@4.0.3: @@ -14310,6 +15450,8 @@ snapshots: dependencies: side-channel: 1.1.0 + query-selector-shadow-dom@1.0.1: {} + querystring-es3@0.2.1: {} queue-microtask@1.2.3: {} @@ -14414,6 +15556,8 @@ snapshots: react-refresh@0.14.2: {} + react-refresh@0.18.0: {} + react-remove-scroll-bar@2.3.8(@types/react@18.3.17)(react@18.3.1): dependencies: react: 18.3.1 @@ -14791,6 +15935,11 @@ snapshots: optionalDependencies: webpack: 5.104.1(esbuild@0.25.12) + saxes@6.0.0: + dependencies: + xmlchars: 2.2.0 + optional: true + scheduler@0.23.2: dependencies: loose-envify: 1.4.0 @@ -14946,6 +16095,8 @@ snapshots: side-channel-map: 1.0.1 side-channel-weakmap: 1.0.2 + siginfo@2.0.0: {} + signal-exit@3.0.7: {} signal-exit@4.1.0: {} @@ -14976,6 +16127,8 @@ snapshots: stable-hash@0.0.5: {} + stackback@0.0.2: {} + stackframe@1.3.4: {} stacktrace-parser@0.1.11: @@ -14984,18 +16137,20 @@ snapshots: statuses@2.0.2: {} + std-env@3.10.0: {} + stop-iteration-iterator@1.1.0: dependencies: es-errors: 1.3.0 internal-slot: 1.1.0 - storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2): + storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)): dependencies: '@storybook/global': 5.0.0 '@testing-library/jest-dom': 6.9.1 '@testing-library/user-event': 14.6.1(@testing-library/dom@10.4.1) '@vitest/expect': 3.2.4 - '@vitest/mocker': 3.2.4(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3)) + '@vitest/mocker': 3.2.4(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) '@vitest/spy': 3.2.4 better-opn: 3.0.2 esbuild: 0.25.12 @@ -15186,6 +16341,9 @@ snapshots: transitivePeerDependencies: - encoding + symbol-tree@3.2.4: + optional: true + tailwind-merge@2.6.0: {} tailwind-scrollbar@3.1.0(tailwindcss@3.4.17): @@ -15261,6 +16419,10 @@ snapshots: tiny-invariant@1.3.3: {} + tinybench@2.9.0: {} + + tinyexec@1.0.2: {} + tinyglobby@0.2.15: dependencies: fdir: 6.5.0(picomatch@4.0.3) @@ -15268,6 +16430,8 @@ snapshots: tinyrainbow@2.0.0: {} + tinyrainbow@3.0.3: {} + tinyspy@4.0.4: {} tldts-core@7.0.19: {} @@ -15292,6 +16456,11 @@ snapshots: tr46@0.0.3: {} + tr46@6.0.0: + dependencies: + punycode: 2.3.1 + optional: true + tree-kill@1.2.2: {} trim-lines@3.0.1: {} @@ -15310,6 +16479,10 @@ snapshots: optionalDependencies: typescript: 5.9.3 + tsconfck@3.1.6(typescript@5.9.3): + optionalDependencies: + typescript: 5.9.3 + tsconfig-paths-webpack-plugin@4.2.0: dependencies: chalk: 4.1.2 @@ -15606,8 +16779,79 @@ snapshots: d3-time: 3.1.0 d3-timer: 3.0.1 + vite-tsconfig-paths@6.0.4(typescript@5.9.3)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)): + dependencies: + debug: 4.4.3 + globrex: 0.1.2 + tsconfck: 3.1.6(typescript@5.9.3) + optionalDependencies: + vite: 7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2) + transitivePeerDependencies: + - supports-color + - typescript + + vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2): + dependencies: + esbuild: 0.27.2 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + postcss: 8.5.6 + rollup: 4.55.1 + tinyglobby: 0.2.15 + optionalDependencies: + '@types/node': 24.10.0 + fsevents: 2.3.3 + jiti: 2.6.1 + terser: 5.44.1 + yaml: 2.8.2 + + vitest@4.0.17(@opentelemetry/api@1.9.0)(@types/node@24.10.0)(happy-dom@20.3.4)(jiti@2.6.1)(jsdom@27.4.0)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(terser@5.44.1)(yaml@2.8.2): + dependencies: + '@vitest/expect': 4.0.17 + '@vitest/mocker': 4.0.17(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.17 + '@vitest/runner': 4.0.17 + '@vitest/snapshot': 4.0.17 + '@vitest/spy': 4.0.17 + '@vitest/utils': 4.0.17 + es-module-lexer: 1.7.0 + expect-type: 1.3.0 + magic-string: 0.30.21 + obug: 2.1.1 + pathe: 2.0.3 + picomatch: 4.0.3 + std-env: 3.10.0 + tinybench: 2.9.0 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 + vite: 7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2) + why-is-node-running: 2.3.0 + optionalDependencies: + '@opentelemetry/api': 1.9.0 + '@types/node': 24.10.0 + happy-dom: 20.3.4 + jsdom: 27.4.0 + transitivePeerDependencies: + - jiti + - less + - lightningcss + - msw + - sass + - sass-embedded + - stylus + - sugarss + - terser + - tsx + - yaml + vm-browserify@1.1.2: {} + w3c-xmlserializer@5.0.0: + dependencies: + xml-name-validator: 5.0.0 + optional: true + warning@4.0.3: dependencies: loose-envify: 1.4.0 @@ -15619,8 +16863,13 @@ snapshots: web-namespaces@2.0.1: {} + web-vitals@5.1.0: {} + webidl-conversions@3.0.1: {} + webidl-conversions@8.0.1: + optional: true + webpack-dev-middleware@6.1.3(webpack@5.104.1(esbuild@0.25.12)): dependencies: colorette: 2.0.20 @@ -15675,6 +16924,20 @@ snapshots: - esbuild - uglify-js + whatwg-mimetype@3.0.0: {} + + whatwg-mimetype@4.0.0: + optional: true + + whatwg-mimetype@5.0.0: + optional: true + + whatwg-url@15.1.0: + dependencies: + tr46: 6.0.0 + webidl-conversions: 8.0.1 + optional: true + whatwg-url@5.0.0: dependencies: tr46: 0.0.3 @@ -15725,6 +16988,11 @@ snapshots: dependencies: isexe: 2.0.0 + why-is-node-running@2.3.0: + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 + word-wrap@1.2.5: {} wrap-ansi@6.2.0: @@ -15749,8 +17017,14 @@ snapshots: ws@8.19.0: {} + xml-name-validator@5.0.0: + optional: true + xmlbuilder@15.1.1: {} + xmlchars@2.2.0: + optional: true + xtend@4.0.2: {} y18n@5.0.8: {} diff --git a/autogpt_platform/frontend/public/integrations/amazon.png b/autogpt_platform/frontend/public/integrations/amazon.png new file mode 100644 index 0000000000..4e8605abef Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/amazon.png differ diff --git a/autogpt_platform/frontend/public/integrations/anthropic-color.png b/autogpt_platform/frontend/public/integrations/anthropic-color.png new file mode 100644 index 0000000000..dfc85ecaa3 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/anthropic-color.png differ diff --git a/autogpt_platform/frontend/public/integrations/cohere.png b/autogpt_platform/frontend/public/integrations/cohere.png new file mode 100644 index 0000000000..3da0b83737 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/cohere.png differ diff --git a/autogpt_platform/frontend/public/integrations/deepseek.png b/autogpt_platform/frontend/public/integrations/deepseek.png new file mode 100644 index 0000000000..b11d0a9d86 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/deepseek.png differ diff --git a/autogpt_platform/frontend/public/integrations/gemini.png b/autogpt_platform/frontend/public/integrations/gemini.png new file mode 100644 index 0000000000..cd78455bc5 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/gemini.png differ diff --git a/autogpt_platform/frontend/public/integrations/gryphe.png b/autogpt_platform/frontend/public/integrations/gryphe.png new file mode 100644 index 0000000000..13d87e1d9d Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/gryphe.png differ diff --git a/autogpt_platform/frontend/public/integrations/microsoft.webp b/autogpt_platform/frontend/public/integrations/microsoft.webp new file mode 100644 index 0000000000..c416883b2a Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/microsoft.webp differ diff --git a/autogpt_platform/frontend/public/integrations/mistral.png b/autogpt_platform/frontend/public/integrations/mistral.png new file mode 100644 index 0000000000..a7169adcc8 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/mistral.png differ diff --git a/autogpt_platform/frontend/public/integrations/moonshot.png b/autogpt_platform/frontend/public/integrations/moonshot.png new file mode 100644 index 0000000000..01c7d5694a Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/moonshot.png differ diff --git a/autogpt_platform/frontend/public/integrations/nousresearch.avif b/autogpt_platform/frontend/public/integrations/nousresearch.avif new file mode 100644 index 0000000000..e55a0626b6 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/nousresearch.avif differ diff --git a/autogpt_platform/frontend/public/integrations/perplexity.webp b/autogpt_platform/frontend/public/integrations/perplexity.webp new file mode 100644 index 0000000000..29515cc066 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/perplexity.webp differ diff --git a/autogpt_platform/frontend/public/integrations/qwen.png b/autogpt_platform/frontend/public/integrations/qwen.png new file mode 100644 index 0000000000..e6fba24e8d Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/qwen.png differ diff --git a/autogpt_platform/frontend/public/integrations/xai.webp b/autogpt_platform/frontend/public/integrations/xai.webp new file mode 100644 index 0000000000..fe6050b103 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/xai.webp differ diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/logout/page.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/logout/page.tsx new file mode 100644 index 0000000000..ef3dc03f1a --- /dev/null +++ b/autogpt_platform/frontend/src/app/(no-navbar)/logout/page.tsx @@ -0,0 +1,58 @@ +"use client"; + +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { Text } from "@/components/atoms/Text/Text"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { useRouter } from "next/navigation"; +import { useEffect, useRef } from "react"; + +const LOGOUT_REDIRECT_DELAY_MS = 400; + +function wait(ms: number): Promise { + return new Promise(function resolveAfterDelay(resolve) { + setTimeout(resolve, ms); + }); +} + +export default function LogoutPage() { + const { logOut } = useSupabase(); + const { toast } = useToast(); + const router = useRouter(); + const hasStartedRef = useRef(false); + + useEffect( + function handleLogoutEffect() { + if (hasStartedRef.current) return; + hasStartedRef.current = true; + + async function runLogout() { + try { + await logOut(); + } catch { + toast({ + title: "Failed to log out. Redirecting to login.", + variant: "destructive", + }); + } finally { + await wait(LOGOUT_REDIRECT_DELAY_MS); + router.replace("/login"); + } + } + + void runLogout(); + }, + [logOut, router, toast], + ); + + return ( +
+
+ + + Logging you out... + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts b/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts index 13f8d988fe..a6a07a703f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts +++ b/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts @@ -9,7 +9,7 @@ export async function GET(request: Request) { const { searchParams, origin } = new URL(request.url); const code = searchParams.get("code"); - let next = "/marketplace"; + let next = "/"; if (code) { const supabase = await getServerSupabase(); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx index cfea5d9452..8ec1ba8be3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx @@ -38,8 +38,12 @@ export const AgentOutputs = ({ flowID }: { flowID: string | null }) => { return outputNodes .map((node) => { - const executionResult = node.data.nodeExecutionResult; - const outputData = executionResult?.output_data?.output; + const executionResults = node.data.nodeExecutionResults || []; + const latestResult = + executionResults.length > 0 + ? executionResults[executionResults.length - 1] + : undefined; + const outputData = latestResult?.output_data?.output; const renderer = globalRegistry.getRenderer(outputData); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx index f381ccb93b..57890b1f17 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx @@ -5,10 +5,11 @@ import { TooltipContent, TooltipTrigger, } from "@/components/atoms/Tooltip/BaseTooltip"; -import { PlayIcon, StopIcon } from "@phosphor-icons/react"; +import { CircleNotchIcon, PlayIcon, StopIcon } from "@phosphor-icons/react"; import { useShallow } from "zustand/react/shallow"; import { RunInputDialog } from "../RunInputDialog/RunInputDialog"; import { useRunGraph } from "./useRunGraph"; +import { cn } from "@/lib/utils"; export const RunGraph = ({ flowID }: { flowID: string | null }) => { const { @@ -24,6 +25,31 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => { useShallow((state) => state.isGraphRunning), ); + const isLoading = isExecutingGraph || isTerminatingGraph || isSaving; + + // Determine which icon to show with proper animation + const renderIcon = () => { + const iconClass = cn( + "size-4 transition-transform duration-200 ease-out", + !isLoading && "group-hover:scale-110", + ); + + if (isLoading) { + return ( + + ); + } + + if (isGraphRunning) { + return ; + } + + return ; + }; + return ( <> @@ -33,18 +59,18 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => { variant={isGraphRunning ? "destructive" : "primary"} data-id={isGraphRunning ? "stop-graph-button" : "run-graph-button"} onClick={isGraphRunning ? handleStopGraph : handleRunGraph} - disabled={!flowID || isExecutingGraph || isTerminatingGraph} - loading={isExecutingGraph || isTerminatingGraph || isSaving} + disabled={!flowID || isLoading} + className="group" > - {!isGraphRunning ? ( - - ) : ( - - )} + {renderIcon()} - {isGraphRunning ? "Stop agent" : "Run agent"} + {isLoading + ? "Processing..." + : isGraphRunning + ? "Stop agent" + : "Run agent"} state.hasInputs); const hasCredentials = useGraphStore((state) => state.hasCredentials); const inputSchema = useGraphStore((state) => state.inputSchema); - const credentialsSchema = useGraphStore( - (state) => state.credentialsInputSchema, - ); const { - credentialsUiSchema, + credentialFields, + requiredCredentials, handleManualRun, handleInputChange, openCronSchedulerDialog, setOpenCronSchedulerDialog, inputValues, credentialValues, - handleCredentialChange, + handleCredentialFieldChange, isExecutingGraph, } = useRunInputDialog({ setIsOpen }); @@ -62,67 +61,67 @@ export const RunInputDialog = ({ isOpen, set: setIsOpen, }} - styling={{ maxWidth: "600px", minWidth: "600px" }} + styling={{ maxWidth: "700px", minWidth: "700px" }} > -
- {/* Credentials Section */} - {hasCredentials() && ( -
-
- - Credentials - +
+
+ {/* Credentials Section */} + {hasCredentials() && credentialFields.length > 0 && ( +
+
+ + Credentials + +
+
+ +
-
- handleCredentialChange(v.formData)} - uiSchema={credentialsUiSchema} - initialValues={{}} - formContext={{ - showHandles: false, - size: "large", - showOptionalToggle: false, - }} - /> -
-
- )} + )} - {/* Inputs Section */} - {hasInputs() && ( -
-
- - Inputs - + {/* Inputs Section */} + {hasInputs() && ( +
+
+ + Inputs + +
+
+ handleInputChange(v.formData)} + uiSchema={uiSchema} + initialValues={{}} + formContext={{ + showHandles: false, + size: "large", + }} + /> +
-
- handleInputChange(v.formData)} - uiSchema={uiSchema} - initialValues={{}} - formContext={{ - showHandles: false, - size: "large", - }} - /> -
-
- )} + )} +
- {/* Action Button */}
{purpose === "run" && ( + + +
+
+ {label}: {isEnabled ? "ON" : "OFF"} +
+
+ {isEnabled ? tooltipEnabled : tooltipDisabled} +
+
+
+ + ); +} + export function FloatingSafeModeToggle({ graph, className, fullWidth = false, }: Props) { const { - currentSafeMode, + currentHITLSafeMode, + showHITLToggle, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, isPending, shouldShowToggle, - isStateUndetermined, - handleToggle, } = useAgentSafeMode(graph); - if (!shouldShowToggle || isStateUndetermined || isPending) { + if (!shouldShowToggle || isPending) { return null; } return ( -
- - - - - -
-
- Safe Mode: {currentSafeMode! ? "ON" : "OFF"} -
-
- {currentSafeMode! - ? "Human in the loop blocks require manual review" - : "Human in the loop blocks proceed automatically"} -
-
-
-
+
+ {showHITLToggle && ( + + )} + {showSensitiveActionToggle && ( + + )}
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/components/CustomControl.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/components/CustomControl.tsx index 50e2034f75..7b723d73b3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/components/CustomControl.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/components/CustomControl.tsx @@ -53,14 +53,14 @@ export const CustomControls = memo( const controls = [ { id: "zoom-in-button", - icon: , + icon: , label: "Zoom In", onClick: () => zoomIn(), className: "h-10 w-10 border-none", }, { id: "zoom-out-button", - icon: , + icon: , label: "Zoom Out", onClick: () => zoomOut(), className: "h-10 w-10 border-none", @@ -68,9 +68,9 @@ export const CustomControls = memo( { id: "tutorial-button", icon: isTutorialLoading ? ( - + ) : ( - + ), label: isTutorialLoading ? "Loading Tutorial..." : "Start Tutorial", onClick: handleTutorialClick, @@ -79,7 +79,7 @@ export const CustomControls = memo( }, { id: "fit-view-button", - icon: , + icon: , label: "Fit View", onClick: () => fitView({ padding: 0.2, duration: 800, maxZoom: 1 }), className: "h-10 w-10 border-none", @@ -87,9 +87,9 @@ export const CustomControls = memo( { id: "lock-button", icon: !isLocked ? ( - + ) : ( - + ), label: "Toggle Lock", onClick: () => setIsLocked(!isLocked), diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts index 694c1be81b..f5533848d2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts @@ -139,14 +139,6 @@ export const useFlow = () => { useNodeStore.getState().setNodes([]); useNodeStore.getState().clearResolutionState(); addNodes(customNodes); - - // Sync hardcoded values with handle IDs. - // If a key–value field has a key without a value, the backend omits it from hardcoded values. - // But if a handleId exists for that key, it causes inconsistency. - // This ensures hardcoded values stay in sync with handle IDs. - customNodes.forEach((node) => { - useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id); - }); } }, [customNodes, addNodes]); @@ -158,6 +150,14 @@ export const useFlow = () => { } }, [graph?.links, addLinks]); + useEffect(() => { + if (customNodes.length > 0 && graph?.links) { + customNodes.forEach((node) => { + useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id); + }); + } + }, [customNodes, graph?.links]); + // update node execution status in nodes useEffect(() => { if ( diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/edges/CustomEdge.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/edges/CustomEdge.tsx index eb221b5d34..3b6425a7c6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/edges/CustomEdge.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/edges/CustomEdge.tsx @@ -19,6 +19,8 @@ export type CustomEdgeData = { beadUp?: number; beadDown?: number; beadData?: Map; + edgeColorClass?: string; + edgeHexColor?: string; }; export type CustomEdge = XYEdge; @@ -36,7 +38,6 @@ const CustomEdge = ({ selected, }: EdgeProps) => { const removeConnection = useEdgeStore((state) => state.removeEdge); - // Subscribe to the brokenEdgeIDs map and check if this edge is broken across any node const isBroken = useNodeStore((state) => state.isEdgeBroken(id)); const [isHovered, setIsHovered] = useState(false); @@ -52,6 +53,7 @@ const CustomEdge = ({ const isStatic = data?.isStatic ?? false; const beadUp = data?.beadUp ?? 0; const beadDown = data?.beadDown ?? 0; + const edgeColorClass = data?.edgeColorClass; const handleRemoveEdge = () => { removeConnection(id); @@ -70,7 +72,9 @@ const CustomEdge = ({ ? "!stroke-red-500 !stroke-[2px] [stroke-dasharray:4]" : selected ? "stroke-zinc-800" - : "stroke-zinc-500/50 hover:stroke-zinc-500", + : edgeColorClass + ? cn(edgeColorClass, "opacity-70 hover:opacity-100") + : "stroke-zinc-500/50 hover:stroke-zinc-500", )} /> { const edges = useEdgeStore((s) => s.edges); @@ -34,8 +35,13 @@ export const useCustomEdge = () => { if (exists) return; const nodes = useNodeStore.getState().nodes; - const isStatic = nodes.find((n) => n.id === conn.source)?.data - ?.staticOutput; + const sourceNode = nodes.find((n) => n.id === conn.source); + const isStatic = sourceNode?.data?.staticOutput; + + const { colorClass, hexColor } = getEdgeColorFromOutputType( + sourceNode?.data?.outputSchema, + conn.sourceHandle, + ); addEdge({ source: conn.source, @@ -44,6 +50,8 @@ export const useCustomEdge = () => { targetHandle: conn.targetHandle, data: { isStatic, + edgeColorClass: colorClass, + edgeHexColor: hexColor, }, }); }, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx index 6306582c3b..d4aa26480d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx @@ -34,7 +34,7 @@ export type CustomNodeData = { uiType: BlockUIType; block_id: string; status?: AgentExecutionStatus; - nodeExecutionResult?: NodeExecutionResult; + nodeExecutionResults?: NodeExecutionResult[]; staticOutput?: boolean; // TODO : We need better type safety for the following backend fields. costs: BlockCost[]; @@ -75,7 +75,11 @@ export const CustomNode: React.FC> = React.memo( (value) => value !== null && value !== undefined && value !== "", ); - const outputData = data.nodeExecutionResult?.output_data; + const latestResult = + data.nodeExecutionResults && data.nodeExecutionResults.length > 0 + ? data.nodeExecutionResults[data.nodeExecutionResults.length - 1] + : undefined; + const outputData = latestResult?.output_data; const hasOutputError = typeof outputData === "object" && outputData !== null && diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx index 7189ab9ca7..c5df24e0e6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput.tsx @@ -1,7 +1,13 @@ import { Button } from "@/components/atoms/Button/Button"; import { Text } from "@/components/atoms/Text/Text"; +import { + Accordion, + AccordionContent, + AccordionItem, + AccordionTrigger, +} from "@/components/molecules/Accordion/Accordion"; import { beautifyString, cn } from "@/lib/utils"; -import { CaretDownIcon, CopyIcon, CheckIcon } from "@phosphor-icons/react"; +import { CopyIcon, CheckIcon } from "@phosphor-icons/react"; import { NodeDataViewer } from "./components/NodeDataViewer/NodeDataViewer"; import { ContentRenderer } from "./components/ContentRenderer"; import { useNodeOutput } from "./useNodeOutput"; @@ -9,138 +15,134 @@ import { ViewMoreData } from "./components/ViewMoreData"; export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => { const { - outputData, - isExpanded, - setIsExpanded, + latestOutputData, copiedKey, handleCopy, executionResultId, - inputData, + latestInputData, } = useNodeOutput(nodeId); - if (Object.keys(outputData).length === 0) { + if (Object.keys(latestOutputData).length === 0) { return null; } return (
-
- - Node Output - - -
+ + + + + Node Output + + + +
+
+ Input - {isExpanded && ( - <> -
-
- Input + - - -
- - +
+ + +
-
- {Object.entries(outputData) - .slice(0, 2) - .map(([key, value]) => ( -
-
- - Pin: - - - {beautifyString(key)} - -
-
- - Data: - -
- {value.map((item, index) => ( -
- -
- ))} - -
- - + Pin: + + + {beautifyString(key)} + +
+
+ + Data: + +
+ {value.map((item, index) => ( +
+ +
+ ))} + +
+ + +
+
-
-
- ))} -
- - {Object.keys(outputData).length > 2 && ( - - )} - - )} + ); + })} +
+ + + +
); }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx index 0858db8f0e..680b6bc44a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx @@ -19,22 +19,51 @@ import { CopyIcon, DownloadIcon, } from "@phosphor-icons/react"; -import { FC } from "react"; +import React, { FC } from "react"; import { useNodeDataViewer } from "./useNodeDataViewer"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; +import { useShallow } from "zustand/react/shallow"; +import { NodeDataType } from "../../helpers"; -interface NodeDataViewerProps { - data: any; +export interface NodeDataViewerProps { + data?: any; pinName: string; + nodeId?: string; execId?: string; isViewMoreData?: boolean; + dataType?: NodeDataType; } export const NodeDataViewer: FC = ({ data, pinName, + nodeId, execId = "N/A", isViewMoreData = false, + dataType = "output", }) => { + const executionResults = useNodeStore( + useShallow((state) => + nodeId ? state.getNodeExecutionResults(nodeId) : [], + ), + ); + const latestInputData = useNodeStore( + useShallow((state) => + nodeId ? state.getLatestNodeInputData(nodeId) : undefined, + ), + ); + const accumulatedOutputData = useNodeStore( + useShallow((state) => + nodeId ? state.getAccumulatedNodeOutputData(nodeId) : {}, + ), + ); + + const resolvedData = + data ?? + (dataType === "input" + ? (latestInputData ?? {}) + : (accumulatedOutputData[pinName] ?? [])); + const { outputItems, copyExecutionId, @@ -42,7 +71,20 @@ export const NodeDataViewer: FC = ({ handleDownloadItem, dataArray, copiedIndex, - } = useNodeDataViewer(data, pinName, execId); + groupedExecutions, + totalGroupedItems, + handleCopyGroupedItem, + handleDownloadGroupedItem, + copiedKey, + } = useNodeDataViewer( + resolvedData, + pinName, + execId, + executionResults, + dataType, + ); + + const shouldGroupExecutions = groupedExecutions.length > 0; return ( @@ -68,44 +110,141 @@ export const NodeDataViewer: FC = ({
- Full Output Preview + Full {dataType === "input" ? "Input" : "Output"} Preview
- {dataArray.length} item{dataArray.length !== 1 ? "s" : ""} total + {shouldGroupExecutions ? totalGroupedItems : dataArray.length}{" "} + item + {shouldGroupExecutions + ? totalGroupedItems !== 1 + ? "s" + : "" + : dataArray.length !== 1 + ? "s" + : ""}{" "} + total
-
- - Execution ID: - - - {execId} - - -
-
- Pin:{" "} - {beautifyString(pinName)} -
+ {shouldGroupExecutions ? ( +
+ Pin:{" "} + {beautifyString(pinName)} +
+ ) : ( + <> +
+ + Execution ID: + + + {execId} + + +
+
+ Pin:{" "} + + {beautifyString(pinName)} + +
+ + )}
- {dataArray.length > 0 ? ( + {shouldGroupExecutions ? ( +
+ {groupedExecutions.map((execution) => ( +
+
+ + Execution ID: + + + {execution.execId} + +
+
+ {execution.outputItems.length > 0 ? ( + execution.outputItems.map((item, index) => ( +
+
+ +
+ +
+ + +
+
+ )) + ) : ( +
+ No data available +
+ )} +
+
+ ))} +
+ ) : dataArray.length > 0 ? (
{outputItems.map((item, index) => (
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts index d3c555970c..818d1266c1 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts @@ -1,82 +1,70 @@ -import type { OutputMetadata } from "@/components/contextual/OutputRenderers"; -import { globalRegistry } from "@/components/contextual/OutputRenderers"; import { downloadOutputs } from "@/components/contextual/OutputRenderers/utils/download"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { beautifyString } from "@/lib/utils"; -import React, { useMemo, useState } from "react"; +import { useState } from "react"; +import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult"; +import { + NodeDataType, + createOutputItems, + getExecutionData, + normalizeToArray, + type OutputItem, +} from "../../helpers"; + +export type GroupedExecution = { + execId: string; + outputItems: Array; +}; export const useNodeDataViewer = ( data: any, pinName: string, execId: string, + executionResults?: NodeExecutionResult[], + dataType?: NodeDataType, ) => { const { toast } = useToast(); const [copiedIndex, setCopiedIndex] = useState(null); + const [copiedKey, setCopiedKey] = useState(null); - // Normalize data to array format - const dataArray = useMemo(() => { - return Array.isArray(data) ? data : [data]; - }, [data]); + const dataArray = Array.isArray(data) ? data : [data]; - // Prepare items for the enhanced renderer system - const outputItems = useMemo(() => { - if (!dataArray) return []; - - const items: Array<{ - key: string; - label: string; - value: unknown; - metadata?: OutputMetadata; - renderer: any; - }> = []; - - dataArray.forEach((value, index) => { - const metadata: OutputMetadata = {}; - - // Extract metadata from the value if it's an object - if ( - typeof value === "object" && - value !== null && - !React.isValidElement(value) - ) { - const objValue = value as any; - if (objValue.type) metadata.type = objValue.type; - if (objValue.mimeType) metadata.mimeType = objValue.mimeType; - if (objValue.filename) metadata.filename = objValue.filename; - if (objValue.language) metadata.language = objValue.language; - } - - const renderer = globalRegistry.getRenderer(value, metadata); - if (renderer) { - items.push({ - key: `item-${index}`, + const outputItems = + !dataArray || dataArray.length === 0 + ? [] + : createOutputItems(dataArray).map((item, index) => ({ + ...item, label: index === 0 ? beautifyString(pinName) : "", - value, - metadata, - renderer, - }); - } else { - // Fallback to text renderer - const textRenderer = globalRegistry - .getAllRenderers() - .find((r) => r.name === "TextRenderer"); - if (textRenderer) { - items.push({ - key: `item-${index}`, - label: index === 0 ? beautifyString(pinName) : "", - value: - typeof value === "string" - ? value - : JSON.stringify(value, null, 2), - metadata, - renderer: textRenderer, - }); - } - } - }); + })); - return items; - }, [dataArray, pinName]); + const groupedExecutions = + !executionResults || executionResults.length === 0 + ? [] + : [...executionResults].reverse().map((result) => { + const rawData = getExecutionData( + result, + dataType || "output", + pinName, + ); + let dataArray: unknown[]; + if (dataType === "input") { + dataArray = + rawData !== undefined && rawData !== null ? [rawData] : []; + } else { + dataArray = normalizeToArray(rawData); + } + + const outputItems = createOutputItems(dataArray); + return { + execId: result.node_exec_id, + outputItems, + }; + }); + + const totalGroupedItems = groupedExecutions.reduce( + (total, execution) => total + execution.outputItems.length, + 0, + ); const copyExecutionId = () => { navigator.clipboard.writeText(execId).then(() => { @@ -122,6 +110,45 @@ export const useNodeDataViewer = ( ]); }; + const handleCopyGroupedItem = async ( + execId: string, + index: number, + item: OutputItem, + ) => { + const copyContent = item.renderer.getCopyContent(item.value, item.metadata); + + if (!copyContent) { + return; + } + + try { + let text: string; + if (typeof copyContent.data === "string") { + text = copyContent.data; + } else if (copyContent.fallbackText) { + text = copyContent.fallbackText; + } else { + return; + } + + await navigator.clipboard.writeText(text); + setCopiedKey(`${execId}-${index}`); + setTimeout(() => setCopiedKey(null), 2000); + } catch (error) { + console.error("Failed to copy:", error); + } + }; + + const handleDownloadGroupedItem = (item: OutputItem) => { + downloadOutputs([ + { + value: item.value, + metadata: item.metadata, + renderer: item.renderer, + }, + ]); + }; + return { outputItems, dataArray, @@ -129,5 +156,10 @@ export const useNodeDataViewer = ( handleCopyItem, handleDownloadItem, copiedIndex, + groupedExecutions, + totalGroupedItems, + handleCopyGroupedItem, + handleDownloadGroupedItem, + copiedKey, }; }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx index 7bf026fe43..74d0da06c2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ViewMoreData.tsx @@ -8,16 +8,28 @@ import { useState } from "react"; import { NodeDataViewer } from "./NodeDataViewer/NodeDataViewer"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { CheckIcon, CopyIcon } from "@phosphor-icons/react"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; +import { useShallow } from "zustand/react/shallow"; +import { + NodeDataType, + getExecutionEntries, + normalizeToArray, +} from "../helpers"; export const ViewMoreData = ({ - outputData, - execId, + nodeId, + dataType = "output", }: { - outputData: Record>; - execId?: string; + nodeId: string; + dataType?: NodeDataType; }) => { const [copiedKey, setCopiedKey] = useState(null); const { toast } = useToast(); + const executionResults = useNodeStore( + useShallow((state) => state.getNodeExecutionResults(nodeId)), + ); + + const reversedExecutionResults = [...executionResults].reverse(); const handleCopy = (key: string, value: any) => { const textToCopy = @@ -29,8 +41,8 @@ export const ViewMoreData = ({ setTimeout(() => setCopiedKey(null), 2000); }; - const copyExecutionId = () => { - navigator.clipboard.writeText(execId || "N/A").then(() => { + const copyExecutionId = (executionId: string) => { + navigator.clipboard.writeText(executionId || "N/A").then(() => { toast({ title: "Execution ID copied to clipboard!", duration: 2000, @@ -42,7 +54,7 @@ export const ViewMoreData = ({ -
-
- {Object.entries(outputData).map(([key, value]) => ( -
+ {reversedExecutionResults.map((result) => ( +
+ + Execution ID: + - Pin: - - - {beautifyString(key)} + {result.node_exec_id} +
-
- - Data: - -
- {value.map((item, index) => ( -
- -
- ))} -
- - -
-
+
+ {getExecutionEntries(result, dataType).map( + ([key, value]) => { + const normalizedValue = normalizeToArray(value); + return ( +
+
+ + Pin: + + + {beautifyString(key)} + +
+
+ + Data: + +
+ {normalizedValue.map((item, index) => ( +
+ +
+ ))} + +
+ + +
+
+
+
+ ); + }, + )}
))} diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/helpers.ts new file mode 100644 index 0000000000..c75cd83cac --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/helpers.ts @@ -0,0 +1,83 @@ +import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult"; +import type { OutputMetadata } from "@/components/contextual/OutputRenderers"; +import { globalRegistry } from "@/components/contextual/OutputRenderers"; +import React from "react"; + +export type NodeDataType = "input" | "output"; + +export type OutputItem = { + key: string; + value: unknown; + metadata?: OutputMetadata; + renderer: any; +}; + +export const normalizeToArray = (value: unknown) => { + if (value === undefined) return []; + return Array.isArray(value) ? value : [value]; +}; + +export const getExecutionData = ( + result: NodeExecutionResult, + dataType: NodeDataType, + pinName: string, +) => { + if (dataType === "input") { + return result.input_data; + } + + return result.output_data?.[pinName]; +}; + +export const createOutputItems = (dataArray: unknown[]): Array => { + const items: Array = []; + + dataArray.forEach((value, index) => { + const metadata: OutputMetadata = {}; + + if ( + typeof value === "object" && + value !== null && + !React.isValidElement(value) + ) { + const objValue = value as any; + if (objValue.type) metadata.type = objValue.type; + if (objValue.mimeType) metadata.mimeType = objValue.mimeType; + if (objValue.filename) metadata.filename = objValue.filename; + if (objValue.language) metadata.language = objValue.language; + } + + const renderer = globalRegistry.getRenderer(value, metadata); + if (renderer) { + items.push({ + key: `item-${index}`, + value, + metadata, + renderer, + }); + } else { + const textRenderer = globalRegistry + .getAllRenderers() + .find((r) => r.name === "TextRenderer"); + if (textRenderer) { + items.push({ + key: `item-${index}`, + value: + typeof value === "string" ? value : JSON.stringify(value, null, 2), + metadata, + renderer: textRenderer, + }); + } + } + }); + + return items; +}; + +export const getExecutionEntries = ( + result: NodeExecutionResult, + dataType: NodeDataType, +) => { + const data = dataType === "input" ? result.input_data : result.output_data; + return Object.entries(data || {}); +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx index ba8559a66c..8ebf1dfaf3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/useNodeOutput.tsx @@ -4,19 +4,21 @@ import { useShallow } from "zustand/react/shallow"; import { useState } from "react"; export const useNodeOutput = (nodeId: string) => { - const [isExpanded, setIsExpanded] = useState(true); const [copiedKey, setCopiedKey] = useState(null); const { toast } = useToast(); - const nodeExecutionResult = useNodeStore( - useShallow((state) => state.getNodeExecutionResult(nodeId)), + const latestResult = useNodeStore( + useShallow((state) => state.getLatestNodeExecutionResult(nodeId)), ); - const inputData = nodeExecutionResult?.input_data; + const latestInputData = useNodeStore( + useShallow((state) => state.getLatestNodeInputData(nodeId)), + ); + + const latestOutputData: Record> = useNodeStore( + useShallow((state) => state.getLatestNodeOutputData(nodeId) || {}), + ); - const outputData: Record> = { - ...nodeExecutionResult?.output_data, - }; const handleCopy = async (key: string, value: any) => { try { const text = JSON.stringify(value, null, 2); @@ -36,14 +38,12 @@ export const useNodeOutput = (nodeId: string) => { }); } }; + return { - outputData: outputData, - inputData: inputData, - isExpanded: isExpanded, - setIsExpanded: setIsExpanded, - copiedKey: copiedKey, - setCopiedKey: setCopiedKey, - handleCopy: handleCopy, - executionResultId: nodeExecutionResult?.node_exec_id, + latestOutputData, + latestInputData, + copiedKey, + handleCopy, + executionResultId: latestResult?.node_exec_id, }; }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts index d4ba538172..143cd58509 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/useSubAgentUpdateState.ts @@ -1,10 +1,7 @@ import { useState, useCallback, useEffect } from "react"; import { useShallow } from "zustand/react/shallow"; import { useGraphStore } from "@/app/(platform)/build/stores/graphStore"; -import { - useNodeStore, - NodeResolutionData, -} from "@/app/(platform)/build/stores/nodeStore"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore"; import { useSubAgentUpdate, @@ -13,6 +10,7 @@ import { } from "@/app/(platform)/build/hooks/useSubAgentUpdate"; import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api"; import { CustomNodeData } from "../../CustomNode"; +import { NodeResolutionData } from "@/app/(platform)/build/stores/types"; // Stable empty set to avoid creating new references in selectors const EMPTY_SET: Set = new Set(); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts index 54ddf2a61d..50326a03e6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts @@ -1,5 +1,5 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; -import { NodeResolutionData } from "@/app/(platform)/build/stores/nodeStore"; +import { NodeResolutionData } from "@/app/(platform)/build/stores/types"; import { RJSFSchema } from "@rjsf/utils"; export const nodeStyleBasedOnStatus: Record = { diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/helpers.ts index 46032a67ea..48f4fc19c7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/helpers.ts @@ -187,3 +187,38 @@ export const getTypeDisplayInfo = (schema: any) => { hexColor, }; }; + +export function getEdgeColorFromOutputType( + outputSchema: RJSFSchema | undefined, + sourceHandle: string, +): { colorClass: string; hexColor: string } { + const defaultColor = { + colorClass: "stroke-zinc-500/50", + hexColor: "#6b7280", + }; + + if (!outputSchema?.properties) return defaultColor; + + const properties = outputSchema.properties as Record; + const handleParts = sourceHandle.split("_#_"); + let currentSchema: Record = properties; + + for (let i = 0; i < handleParts.length; i++) { + const part = handleParts[i]; + const fieldSchema = currentSchema[part] as Record; + if (!fieldSchema) return defaultColor; + + if (i === handleParts.length - 1) { + const { hexColor, colorClass } = getTypeDisplayInfo(fieldSchema); + return { colorClass: colorClass.replace("!text-", "stroke-"), hexColor }; + } + + if (fieldSchema.properties) { + currentSchema = fieldSchema.properties as Record; + } else { + return defaultColor; + } + } + + return defaultColor; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/icons.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/icons.ts index 19e133ef7d..2c7a22b423 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/icons.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/icons.ts @@ -1,7 +1,32 @@ -// These are SVG Phosphor icons +type IconOptions = { + size?: number; + color?: string; +}; + +const DEFAULT_SIZE = 16; +const DEFAULT_COLOR = "#52525b"; // zinc-600 + +const iconPaths = { + ClickIcon: `M88,24V16a8,8,0,0,1,16,0v8a8,8,0,0,1-16,0ZM16,104h8a8,8,0,0,0,0-16H16a8,8,0,0,0,0,16ZM124.42,39.16a8,8,0,0,0,10.74-3.58l8-16a8,8,0,0,0-14.31-7.16l-8,16A8,8,0,0,0,124.42,39.16Zm-96,81.69-16,8a8,8,0,0,0,7.16,14.31l16-8a8,8,0,1,0-7.16-14.31ZM219.31,184a16,16,0,0,1,0,22.63l-12.68,12.68a16,16,0,0,1-22.63,0L132.7,168,115,214.09c0,.1-.08.21-.13.32a15.83,15.83,0,0,1-14.6,9.59l-.79,0a15.83,15.83,0,0,1-14.41-11L32.8,52.92A16,16,0,0,1,52.92,32.8L213,85.07a16,16,0,0,1,1.41,29.8l-.32.13L168,132.69ZM208,195.31,156.69,144h0a16,16,0,0,1,4.93-26l.32-.14,45.95-17.64L48,48l52.2,159.86,17.65-46c0-.11.08-.22.13-.33a16,16,0,0,1,11.69-9.34,16.72,16.72,0,0,1,3-.28,16,16,0,0,1,11.3,4.69L195.31,208Z`, + Keyboard: `M224,48H32A16,16,0,0,0,16,64V192a16,16,0,0,0,16,16H224a16,16,0,0,0,16-16V64A16,16,0,0,0,224,48Zm0,144H32V64H224V192Zm-16-64a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,128Zm0-32a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,96ZM72,160a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16h8A8,8,0,0,1,72,160Zm96,0a8,8,0,0,1-8,8H96a8,8,0,0,1,0-16h64A8,8,0,0,1,168,160Zm40,0a8,8,0,0,1-8,8h-8a8,8,0,0,1,0-16h8A8,8,0,0,1,208,160Z`, + Drag: `M188,80a27.79,27.79,0,0,0-13.36,3.4,28,28,0,0,0-46.64-11A28,28,0,0,0,80,92v20H68a28,28,0,0,0-28,28v12a88,88,0,0,0,176,0V108A28,28,0,0,0,188,80Zm12,72a72,72,0,0,1-144,0V140a12,12,0,0,1,12-12H80v24a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V108a12,12,0,0,1,24,0Z`, +}; + +function createIcon(path: string, options: IconOptions = {}): string { + const size = options.size ?? DEFAULT_SIZE; + const color = options.color ?? DEFAULT_COLOR; + return ``; +} export const ICONS = { - ClickIcon: ``, - Keyboard: ``, - Drag: ``, + ClickIcon: createIcon(iconPaths.ClickIcon), + Keyboard: createIcon(iconPaths.Keyboard), + Drag: createIcon(iconPaths.Drag), }; + +export function getIcon( + name: keyof typeof iconPaths, + options?: IconOptions, +): string { + return createIcon(iconPaths[name], options); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/index.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/index.ts index fac08ec145..49f505054b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/index.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/tutorial/index.ts @@ -11,6 +11,7 @@ import { } from "./helpers"; import { useNodeStore } from "../../../stores/nodeStore"; import { useEdgeStore } from "../../../stores/edgeStore"; +import { useTutorialStore } from "../../../stores/tutorialStore"; let isTutorialLoading = false; let tutorialLoadingCallback: ((loading: boolean) => void) | null = null; @@ -60,12 +61,14 @@ export const startTutorial = async () => { handleTutorialComplete(); removeTutorialStyles(); clearPrefetchedBlocks(); + useTutorialStore.getState().setIsTutorialRunning(false); }); tour.on("cancel", () => { handleTutorialCancel(tour); removeTutorialStyles(); clearPrefetchedBlocks(); + useTutorialStore.getState().setIsTutorialRunning(false); }); for (const step of tour.steps) { diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts index 7b3c5b1d01..00c151d35b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/helper.ts @@ -61,12 +61,18 @@ export const convertNodesPlusBlockInfoIntoCustomNodes = ( return customNode; }; +const isToolSourceName = (sourceName: string): boolean => + sourceName.startsWith("tools_^_"); + +const cleanupSourceName = (sourceName: string): string => + isToolSourceName(sourceName) ? "tools" : sourceName; + export const linkToCustomEdge = (link: Link): CustomEdge => ({ id: link.id ?? "", type: "custom" as const, source: link.source_id, target: link.sink_id, - sourceHandle: link.source_name, + sourceHandle: cleanupSourceName(link.source_name), targetHandle: link.sink_name, data: { isStatic: link.is_static, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/helpers.ts new file mode 100644 index 0000000000..bcdfd4c313 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/helpers.ts @@ -0,0 +1,16 @@ +export const accumulateExecutionData = ( + accumulated: Record, + data: Record | undefined, +) => { + if (!data) return { ...accumulated }; + const next = { ...accumulated }; + Object.entries(data).forEach(([key, values]) => { + const nextValues = Array.isArray(values) ? values : [values]; + if (next[key]) { + next[key] = [...next[key], ...nextValues]; + } else { + next[key] = [...nextValues]; + } + }); + return next; +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts index 5502a8780d..f7a52636f3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts @@ -10,6 +10,8 @@ import { import { Node } from "@/app/api/__generated__/models/node"; import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; import { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult"; +import { NodeExecutionResultInputData } from "@/app/api/__generated__/models/nodeExecutionResultInputData"; +import { NodeExecutionResultOutputData } from "@/app/api/__generated__/models/nodeExecutionResultOutputData"; import { useHistoryStore } from "./historyStore"; import { useEdgeStore } from "./edgeStore"; import { BlockUIType } from "../components/types"; @@ -18,31 +20,10 @@ import { ensurePathExists, parseHandleIdToPath, } from "@/components/renderers/InputRenderer/helpers"; -import { IncompatibilityInfo } from "../hooks/useSubAgentUpdate/types"; +import { accumulateExecutionData } from "./helpers"; +import { NodeResolutionData } from "./types"; -// Resolution mode data stored per node -export type NodeResolutionData = { - incompatibilities: IncompatibilityInfo; - // The NEW schema from the update (what we're updating TO) - pendingUpdate: { - input_schema: Record; - output_schema: Record; - }; - // The OLD schema before the update (what we're updating FROM) - // Needed to merge and show removed inputs during resolution - currentSchema: { - input_schema: Record; - output_schema: Record; - }; - // The full updated hardcoded values to apply when resolution completes - pendingHardcodedValues: Record; -}; - -// Minimum movement (in pixels) required before logging position change to history -// Prevents spamming history with small movements when clicking on inputs inside blocks const MINIMUM_MOVE_BEFORE_LOG = 50; - -// Track initial positions when drag starts (outside store to avoid re-renders) const dragStartPositions: Record = {}; let dragStartState: { nodes: CustomNode[]; edges: CustomEdge[] } | null = null; @@ -52,6 +33,15 @@ type NodeStore = { nodeCounter: number; setNodeCounter: (nodeCounter: number) => void; nodeAdvancedStates: Record; + + latestNodeInputData: Record; + latestNodeOutputData: Record< + string, + NodeExecutionResultOutputData | undefined + >; + accumulatedNodeInputData: Record>; + accumulatedNodeOutputData: Record>; + setNodes: (nodes: CustomNode[]) => void; onNodesChange: (changes: NodeChange[]) => void; addNode: (node: CustomNode) => void; @@ -72,12 +62,26 @@ type NodeStore = { updateNodeStatus: (nodeId: string, status: AgentExecutionStatus) => void; getNodeStatus: (nodeId: string) => AgentExecutionStatus | undefined; + cleanNodesStatuses: () => void; updateNodeExecutionResult: ( nodeId: string, result: NodeExecutionResult, ) => void; - getNodeExecutionResult: (nodeId: string) => NodeExecutionResult | undefined; + getNodeExecutionResults: (nodeId: string) => NodeExecutionResult[]; + getLatestNodeInputData: ( + nodeId: string, + ) => NodeExecutionResultInputData | undefined; + getLatestNodeOutputData: ( + nodeId: string, + ) => NodeExecutionResultOutputData | undefined; + getAccumulatedNodeInputData: (nodeId: string) => Record; + getAccumulatedNodeOutputData: (nodeId: string) => Record; + getLatestNodeExecutionResult: ( + nodeId: string, + ) => NodeExecutionResult | undefined; + clearAllNodeExecutionResults: () => void; + getNodeBlockUIType: (nodeId: string) => BlockUIType; hasWebhookNodes: () => boolean; @@ -122,6 +126,10 @@ export const useNodeStore = create((set, get) => ({ nodeCounter: 0, setNodeCounter: (nodeCounter) => set({ nodeCounter }), nodeAdvancedStates: {}, + latestNodeInputData: {}, + latestNodeOutputData: {}, + accumulatedNodeInputData: {}, + accumulatedNodeOutputData: {}, incrementNodeCounter: () => set((state) => ({ nodeCounter: state.nodeCounter + 1, @@ -317,17 +325,162 @@ export const useNodeStore = create((set, get) => ({ return get().nodes.find((n) => n.id === nodeId)?.data?.status; }, - updateNodeExecutionResult: (nodeId: string, result: NodeExecutionResult) => { + cleanNodesStatuses: () => { set((state) => ({ - nodes: state.nodes.map((n) => - n.id === nodeId - ? { ...n, data: { ...n.data, nodeExecutionResult: result } } - : n, - ), + nodes: state.nodes.map((n) => ({ + ...n, + data: { ...n.data, status: undefined }, + })), })); }, - getNodeExecutionResult: (nodeId: string) => { - return get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResult; + + updateNodeExecutionResult: (nodeId: string, result: NodeExecutionResult) => { + set((state) => { + let latestNodeInputData = state.latestNodeInputData; + let latestNodeOutputData = state.latestNodeOutputData; + let accumulatedNodeInputData = state.accumulatedNodeInputData; + let accumulatedNodeOutputData = state.accumulatedNodeOutputData; + + const nodes = state.nodes.map((n) => { + if (n.id !== nodeId) return n; + + const existingResults = n.data.nodeExecutionResults || []; + const duplicateIndex = existingResults.findIndex( + (r) => r.node_exec_id === result.node_exec_id, + ); + + if (duplicateIndex !== -1) { + const oldResult = existingResults[duplicateIndex]; + const inputDataChanged = + JSON.stringify(oldResult.input_data) !== + JSON.stringify(result.input_data); + const outputDataChanged = + JSON.stringify(oldResult.output_data) !== + JSON.stringify(result.output_data); + + if (!inputDataChanged && !outputDataChanged) { + return n; + } + + const updatedResults = [...existingResults]; + updatedResults[duplicateIndex] = result; + + const recomputedAccumulatedInput = updatedResults.reduce( + (acc, r) => accumulateExecutionData(acc, r.input_data), + {} as Record, + ); + const recomputedAccumulatedOutput = updatedResults.reduce( + (acc, r) => accumulateExecutionData(acc, r.output_data), + {} as Record, + ); + + const mostRecentResult = updatedResults[updatedResults.length - 1]; + latestNodeInputData = { + ...latestNodeInputData, + [nodeId]: mostRecentResult.input_data, + }; + latestNodeOutputData = { + ...latestNodeOutputData, + [nodeId]: mostRecentResult.output_data, + }; + + accumulatedNodeInputData = { + ...accumulatedNodeInputData, + [nodeId]: recomputedAccumulatedInput, + }; + accumulatedNodeOutputData = { + ...accumulatedNodeOutputData, + [nodeId]: recomputedAccumulatedOutput, + }; + + return { + ...n, + data: { + ...n.data, + nodeExecutionResults: updatedResults, + }, + }; + } + + accumulatedNodeInputData = { + ...accumulatedNodeInputData, + [nodeId]: accumulateExecutionData( + accumulatedNodeInputData[nodeId] || {}, + result.input_data, + ), + }; + accumulatedNodeOutputData = { + ...accumulatedNodeOutputData, + [nodeId]: accumulateExecutionData( + accumulatedNodeOutputData[nodeId] || {}, + result.output_data, + ), + }; + + latestNodeInputData = { + ...latestNodeInputData, + [nodeId]: result.input_data, + }; + latestNodeOutputData = { + ...latestNodeOutputData, + [nodeId]: result.output_data, + }; + + return { + ...n, + data: { + ...n.data, + nodeExecutionResults: [...existingResults, result], + }, + }; + }); + + return { + nodes, + latestNodeInputData, + latestNodeOutputData, + accumulatedNodeInputData, + accumulatedNodeOutputData, + }; + }); + }, + getNodeExecutionResults: (nodeId: string) => { + return ( + get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResults || [] + ); + }, + getLatestNodeInputData: (nodeId: string) => { + return get().latestNodeInputData[nodeId]; + }, + getLatestNodeOutputData: (nodeId: string) => { + return get().latestNodeOutputData[nodeId]; + }, + getAccumulatedNodeInputData: (nodeId: string) => { + return get().accumulatedNodeInputData[nodeId] || {}; + }, + getAccumulatedNodeOutputData: (nodeId: string) => { + return get().accumulatedNodeOutputData[nodeId] || {}; + }, + getLatestNodeExecutionResult: (nodeId: string) => { + const results = + get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResults || + []; + return results.length > 0 ? results[results.length - 1] : undefined; + }, + clearAllNodeExecutionResults: () => { + set((state) => ({ + nodes: state.nodes.map((n) => ({ + ...n, + data: { + ...n.data, + nodeExecutionResults: [], + }, + })), + latestNodeInputData: {}, + latestNodeOutputData: {}, + accumulatedNodeInputData: {}, + accumulatedNodeOutputData: {}, + })); }, getNodeBlockUIType: (nodeId: string) => { return ( diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/types.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/types.ts new file mode 100644 index 0000000000..f0ec7e6c1c --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/types.ts @@ -0,0 +1,14 @@ +import { IncompatibilityInfo } from "../hooks/useSubAgentUpdate/types"; + +export type NodeResolutionData = { + incompatibilities: IncompatibilityInfo; + pendingUpdate: { + input_schema: Record; + output_schema: Record; + }; + currentSchema: { + input_schema: Record; + output_schema: Record; + }; + pendingHardcodedValues: Record; +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/Chat.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/Chat.tsx deleted file mode 100644 index 461c885dc3..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/Chat.tsx +++ /dev/null @@ -1,134 +0,0 @@ -"use client"; - -import { Button } from "@/components/atoms/Button/Button"; -import { Text } from "@/components/atoms/Text/Text"; -import { cn } from "@/lib/utils"; -import { List } from "@phosphor-icons/react"; -import React, { useState } from "react"; -import { ChatContainer } from "./components/ChatContainer/ChatContainer"; -import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState"; -import { ChatLoadingState } from "./components/ChatLoadingState/ChatLoadingState"; -import { SessionsDrawer } from "./components/SessionsDrawer/SessionsDrawer"; -import { useChat } from "./useChat"; - -export interface ChatProps { - className?: string; - headerTitle?: React.ReactNode; - showHeader?: boolean; - showSessionInfo?: boolean; - showNewChatButton?: boolean; - onNewChat?: () => void; - headerActions?: React.ReactNode; -} - -export function Chat({ - className, - headerTitle = "AutoGPT Copilot", - showHeader = true, - showSessionInfo = true, - showNewChatButton = true, - onNewChat, - headerActions, -}: ChatProps) { - const { - messages, - isLoading, - isCreating, - error, - sessionId, - createSession, - clearSession, - loadSession, - } = useChat(); - - const [isSessionsDrawerOpen, setIsSessionsDrawerOpen] = useState(false); - - const handleNewChat = () => { - clearSession(); - onNewChat?.(); - }; - - const handleSelectSession = async (sessionId: string) => { - try { - await loadSession(sessionId); - } catch (err) { - console.error("Failed to load session:", err); - } - }; - - return ( -
- {/* Header */} - {showHeader && ( -
-
-
- - {typeof headerTitle === "string" ? ( - - {headerTitle} - - ) : ( - headerTitle - )} -
-
- {showSessionInfo && sessionId && ( - <> - {showNewChatButton && ( - - )} - - )} - {headerActions} -
-
-
- )} - - {/* Main Content */} -
- {/* Loading State - show when explicitly loading/creating OR when we don't have a session yet and no error */} - {(isLoading || isCreating || (!sessionId && !error)) && ( - - )} - - {/* Error State */} - {error && !isLoading && ( - - )} - - {/* Session Content */} - {sessionId && !isLoading && !error && ( - - )} -
- - {/* Sessions Drawer */} - setIsSessionsDrawerOpen(false)} - onSelectSession={handleSelectSession} - currentSessionId={sessionId} - /> -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/ChatContainer.tsx deleted file mode 100644 index 6f7a0e8f51..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/ChatContainer.tsx +++ /dev/null @@ -1,88 +0,0 @@ -import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { cn } from "@/lib/utils"; -import { useCallback } from "react"; -import { usePageContext } from "../../usePageContext"; -import { ChatInput } from "../ChatInput/ChatInput"; -import { MessageList } from "../MessageList/MessageList"; -import { QuickActionsWelcome } from "../QuickActionsWelcome/QuickActionsWelcome"; -import { useChatContainer } from "./useChatContainer"; - -export interface ChatContainerProps { - sessionId: string | null; - initialMessages: SessionDetailResponse["messages"]; - className?: string; -} - -export function ChatContainer({ - sessionId, - initialMessages, - className, -}: ChatContainerProps) { - const { messages, streamingChunks, isStreaming, sendMessage } = - useChatContainer({ - sessionId, - initialMessages, - }); - const { capturePageContext } = usePageContext(); - - // Wrap sendMessage to automatically capture page context - const sendMessageWithContext = useCallback( - async (content: string, isUserMessage: boolean = true) => { - const context = capturePageContext(); - await sendMessage(content, isUserMessage, context); - }, - [sendMessage, capturePageContext], - ); - - const quickActions = [ - "Find agents for social media management", - "Show me agents for content creation", - "Help me automate my business", - "What can you help me with?", - ]; - - return ( -
- {/* Messages or Welcome Screen */} -
- {messages.length === 0 ? ( - - ) : ( - - )} -
- - {/* Input - Always visible */} -
- -
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/ChatInput.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/ChatInput.tsx deleted file mode 100644 index 3101174a11..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/ChatInput.tsx +++ /dev/null @@ -1,64 +0,0 @@ -import { Input } from "@/components/atoms/Input/Input"; -import { cn } from "@/lib/utils"; -import { ArrowUpIcon } from "@phosphor-icons/react"; -import { useChatInput } from "./useChatInput"; - -export interface ChatInputProps { - onSend: (message: string) => void; - disabled?: boolean; - placeholder?: string; - className?: string; -} - -export function ChatInput({ - onSend, - disabled = false, - placeholder = "Type your message...", - className, -}: ChatInputProps) { - const inputId = "chat-input"; - const { value, setValue, handleKeyDown, handleSend } = useChatInput({ - onSend, - disabled, - maxRows: 5, - inputId, - }); - - return ( -
- setValue(e.target.value)} - onKeyDown={handleKeyDown} - placeholder={placeholder} - disabled={disabled} - rows={1} - wrapperClassName="mb-0 relative" - className="pr-12" - /> - - Press Enter to send, Shift+Enter for new line - - - -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/useChatInput.ts b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/useChatInput.ts deleted file mode 100644 index 08cf565daa..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/useChatInput.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { KeyboardEvent, useCallback, useEffect, useState } from "react"; - -interface UseChatInputArgs { - onSend: (message: string) => void; - disabled?: boolean; - maxRows?: number; - inputId?: string; -} - -export function useChatInput({ - onSend, - disabled = false, - maxRows = 5, - inputId = "chat-input", -}: UseChatInputArgs) { - const [value, setValue] = useState(""); - - useEffect(() => { - const textarea = document.getElementById(inputId) as HTMLTextAreaElement; - if (!textarea) return; - textarea.style.height = "auto"; - const lineHeight = parseInt( - window.getComputedStyle(textarea).lineHeight, - 10, - ); - const maxHeight = lineHeight * maxRows; - const newHeight = Math.min(textarea.scrollHeight, maxHeight); - textarea.style.height = `${newHeight}px`; - textarea.style.overflowY = - textarea.scrollHeight > maxHeight ? "auto" : "hidden"; - }, [value, maxRows, inputId]); - - const handleSend = useCallback(() => { - if (disabled || !value.trim()) return; - onSend(value.trim()); - setValue(""); - const textarea = document.getElementById(inputId) as HTMLTextAreaElement; - if (textarea) { - textarea.style.height = "auto"; - } - }, [value, onSend, disabled, inputId]); - - const handleKeyDown = useCallback( - (event: KeyboardEvent) => { - if (event.key === "Enter" && !event.shiftKey) { - event.preventDefault(); - handleSend(); - } - // Shift+Enter allows default behavior (new line) - no need to handle explicitly - }, - [handleSend], - ); - - return { - value, - setValue, - handleKeyDown, - handleSend, - }; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/MessageList/MessageList.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/MessageList/MessageList.tsx deleted file mode 100644 index 22b51c0a92..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/MessageList/MessageList.tsx +++ /dev/null @@ -1,121 +0,0 @@ -"use client"; - -import { cn } from "@/lib/utils"; -import { ChatMessage } from "../ChatMessage/ChatMessage"; -import type { ChatMessageData } from "../ChatMessage/useChatMessage"; -import { StreamingMessage } from "../StreamingMessage/StreamingMessage"; -import { ThinkingMessage } from "../ThinkingMessage/ThinkingMessage"; -import { useMessageList } from "./useMessageList"; - -export interface MessageListProps { - messages: ChatMessageData[]; - streamingChunks?: string[]; - isStreaming?: boolean; - className?: string; - onStreamComplete?: () => void; - onSendMessage?: (content: string) => void; -} - -export function MessageList({ - messages, - streamingChunks = [], - isStreaming = false, - className, - onStreamComplete, - onSendMessage, -}: MessageListProps) { - const { messagesEndRef, messagesContainerRef } = useMessageList({ - messageCount: messages.length, - isStreaming, - }); - - return ( -
-
- {/* Render all persisted messages */} - {messages.map((message, index) => { - // Check if current message is an agent_output tool_response - // and if previous message is an assistant message - let agentOutput: ChatMessageData | undefined; - - if (message.type === "tool_response" && message.result) { - let parsedResult: Record | null = null; - try { - parsedResult = - typeof message.result === "string" - ? JSON.parse(message.result) - : (message.result as Record); - } catch { - parsedResult = null; - } - if (parsedResult?.type === "agent_output") { - const prevMessage = messages[index - 1]; - if ( - prevMessage && - prevMessage.type === "message" && - prevMessage.role === "assistant" - ) { - // This agent output will be rendered inside the previous assistant message - // Skip rendering this message separately - return null; - } - } - } - - // Check if next message is an agent_output tool_response to include in current assistant message - if (message.type === "message" && message.role === "assistant") { - const nextMessage = messages[index + 1]; - if ( - nextMessage && - nextMessage.type === "tool_response" && - nextMessage.result - ) { - let parsedResult: Record | null = null; - try { - parsedResult = - typeof nextMessage.result === "string" - ? JSON.parse(nextMessage.result) - : (nextMessage.result as Record); - } catch { - parsedResult = null; - } - if (parsedResult?.type === "agent_output") { - agentOutput = nextMessage; - } - } - } - - return ( - - ); - })} - - {/* Render thinking message when streaming but no chunks yet */} - {isStreaming && streamingChunks.length === 0 && } - - {/* Render streaming message if active */} - {isStreaming && streamingChunks.length > 0 && ( - - )} - - {/* Invisible div to scroll to */} -
-
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ThinkingMessage/ThinkingMessage.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ThinkingMessage/ThinkingMessage.tsx deleted file mode 100644 index d8adddf416..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ThinkingMessage/ThinkingMessage.tsx +++ /dev/null @@ -1,70 +0,0 @@ -import { cn } from "@/lib/utils"; -import { RobotIcon } from "@phosphor-icons/react"; -import { useEffect, useRef, useState } from "react"; -import { MessageBubble } from "../MessageBubble/MessageBubble"; - -export interface ThinkingMessageProps { - className?: string; -} - -export function ThinkingMessage({ className }: ThinkingMessageProps) { - const [showSlowLoader, setShowSlowLoader] = useState(false); - const timerRef = useRef(null); - - useEffect(() => { - if (timerRef.current === null) { - timerRef.current = setTimeout(() => { - setShowSlowLoader(true); - }, 8000); - } - - return () => { - if (timerRef.current) { - clearTimeout(timerRef.current); - timerRef.current = null; - } - }; - }, []); - - return ( -
-
-
-
- -
-
- -
- -
- {showSlowLoader ? ( -
-
-

- Taking a bit longer to think, wait a moment please -

-
- ) : ( - - Thinking... - - )} -
- -
-
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolCallMessage/ToolCallMessage.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolCallMessage/ToolCallMessage.tsx deleted file mode 100644 index 97590ae0cf..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolCallMessage/ToolCallMessage.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import { Text } from "@/components/atoms/Text/Text"; -import { cn } from "@/lib/utils"; -import { WrenchIcon } from "@phosphor-icons/react"; -import { getToolActionPhrase } from "../../helpers"; - -export interface ToolCallMessageProps { - toolName: string; - className?: string; -} - -export function ToolCallMessage({ toolName, className }: ToolCallMessageProps) { - return ( -
- - - {getToolActionPhrase(toolName)}... - -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx deleted file mode 100644 index b84204c3ff..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx +++ /dev/null @@ -1,260 +0,0 @@ -import { Text } from "@/components/atoms/Text/Text"; -import "@/components/contextual/OutputRenderers"; -import { - globalRegistry, - OutputItem, -} from "@/components/contextual/OutputRenderers"; -import { cn } from "@/lib/utils"; -import type { ToolResult } from "@/types/chat"; -import { WrenchIcon } from "@phosphor-icons/react"; -import { getToolActionPhrase } from "../../helpers"; - -export interface ToolResponseMessageProps { - toolName: string; - result?: ToolResult; - success?: boolean; - className?: string; -} - -export function ToolResponseMessage({ - toolName, - result, - success: _success = true, - className, -}: ToolResponseMessageProps) { - if (!result) { - return ( -
- - - {getToolActionPhrase(toolName)}... - -
- ); - } - - let parsedResult: Record | null = null; - try { - parsedResult = - typeof result === "string" - ? JSON.parse(result) - : (result as Record); - } catch { - parsedResult = null; - } - - if (parsedResult && typeof parsedResult === "object") { - const responseType = parsedResult.type as string | undefined; - - if (responseType === "agent_output") { - const execution = parsedResult.execution as - | { - outputs?: Record; - } - | null - | undefined; - const outputs = execution?.outputs || {}; - const message = parsedResult.message as string | undefined; - - return ( -
-
- - - {getToolActionPhrase(toolName)} - -
- {message && ( -
- - {message} - -
- )} - {Object.keys(outputs).length > 0 && ( -
- {Object.entries(outputs).map(([outputName, values]) => - values.map((value, index) => { - const renderer = globalRegistry.getRenderer(value); - if (renderer) { - return ( - - ); - } - return ( -
- - {outputName} - -
-                        {JSON.stringify(value, null, 2)}
-                      
-
- ); - }), - )} -
- )} -
- ); - } - - if (responseType === "block_output" && parsedResult.outputs) { - const outputs = parsedResult.outputs as Record; - - return ( -
-
- - - {getToolActionPhrase(toolName)} - -
-
- {Object.entries(outputs).map(([outputName, values]) => - values.map((value, index) => { - const renderer = globalRegistry.getRenderer(value); - if (renderer) { - return ( - - ); - } - return ( -
- - {outputName} - -
-                      {JSON.stringify(value, null, 2)}
-                    
-
- ); - }), - )} -
-
- ); - } - - // Handle other response types with a message field (e.g., understanding_updated) - if (parsedResult.message && typeof parsedResult.message === "string") { - // Format tool name from snake_case to Title Case - const formattedToolName = toolName - .split("_") - .map((word) => word.charAt(0).toUpperCase() + word.slice(1)) - .join(" "); - - // Clean up message - remove incomplete user_name references - let cleanedMessage = parsedResult.message; - // Remove "Updated understanding with: user_name" pattern if user_name is just a placeholder - cleanedMessage = cleanedMessage.replace( - /Updated understanding with:\s*user_name\.?\s*/gi, - "", - ); - // Remove standalone user_name references - cleanedMessage = cleanedMessage.replace(/\buser_name\b\.?\s*/gi, ""); - cleanedMessage = cleanedMessage.trim(); - - // Only show message if it has content after cleaning - if (!cleanedMessage) { - return ( -
- - - {formattedToolName} - -
- ); - } - - return ( -
-
- - - {formattedToolName} - -
-
- - {cleanedMessage} - -
-
- ); - } - } - - const renderer = globalRegistry.getRenderer(result); - if (renderer) { - return ( -
-
- - - {getToolActionPhrase(toolName)} - -
- -
- ); - } - - return ( -
- - - {getToolActionPhrase(toolName)}... - -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/helpers.ts deleted file mode 100644 index 0fade56b73..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/helpers.ts +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Maps internal tool names to user-friendly display names with emojis. - * @deprecated Use getToolActionPhrase or getToolCompletionPhrase for status messages - * - * @param toolName - The internal tool name from the backend - * @returns A user-friendly display name with an emoji prefix - */ -export function getToolDisplayName(toolName: string): string { - const toolDisplayNames: Record = { - find_agent: "🔍 Search Marketplace", - get_agent_details: "📋 Get Agent Details", - check_credentials: "🔑 Check Credentials", - setup_agent: "⚙️ Setup Agent", - run_agent: "▶️ Run Agent", - get_required_setup_info: "📝 Get Setup Requirements", - }; - return toolDisplayNames[toolName] || toolName; -} - -/** - * Maps internal tool names to human-friendly action phrases (present continuous). - * Used for tool call messages to indicate what action is currently happening. - * - * @param toolName - The internal tool name from the backend - * @returns A human-friendly action phrase in present continuous tense - */ -export function getToolActionPhrase(toolName: string): string { - const toolActionPhrases: Record = { - find_agent: "Looking for agents in the marketplace", - agent_carousel: "Looking for agents in the marketplace", - get_agent_details: "Learning about the agent", - check_credentials: "Checking your credentials", - setup_agent: "Setting up the agent", - execution_started: "Running the agent", - run_agent: "Running the agent", - get_required_setup_info: "Getting setup requirements", - schedule_agent: "Scheduling the agent to run", - }; - - // Return mapped phrase or generate human-friendly fallback - return toolActionPhrases[toolName] || toolName; -} - -/** - * Maps internal tool names to human-friendly completion phrases (past tense). - * Used for tool response messages to indicate what action was completed. - * - * @param toolName - The internal tool name from the backend - * @returns A human-friendly completion phrase in past tense - */ -export function getToolCompletionPhrase(toolName: string): string { - const toolCompletionPhrases: Record = { - find_agent: "Finished searching the marketplace", - get_agent_details: "Got agent details", - check_credentials: "Checked credentials", - setup_agent: "Agent setup complete", - run_agent: "Agent execution started", - get_required_setup_info: "Got setup requirements", - }; - - // Return mapped phrase or generate human-friendly fallback - return ( - toolCompletionPhrases[toolName] || - `Finished ${toolName.replace(/_/g, " ").replace("...", "")}` - ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/useChatSession.ts b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/useChatSession.ts deleted file mode 100644 index a54dc9e32a..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/useChatSession.ts +++ /dev/null @@ -1,271 +0,0 @@ -import { - getGetV2GetSessionQueryKey, - getGetV2GetSessionQueryOptions, - postV2CreateSession, - useGetV2GetSession, - usePatchV2SessionAssignUser, - usePostV2CreateSession, -} from "@/app/api/__generated__/endpoints/chat/chat"; -import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { okData } from "@/app/api/helpers"; -import { isValidUUID } from "@/lib/utils"; -import { Key, storage } from "@/services/storage/local-storage"; -import { useQueryClient } from "@tanstack/react-query"; -import { useCallback, useEffect, useMemo, useRef, useState } from "react"; -import { toast } from "sonner"; - -interface UseChatSessionArgs { - urlSessionId?: string | null; - autoCreate?: boolean; -} - -export function useChatSession({ - urlSessionId, - autoCreate = false, -}: UseChatSessionArgs = {}) { - const queryClient = useQueryClient(); - const [sessionId, setSessionId] = useState(null); - const [error, setError] = useState(null); - const justCreatedSessionIdRef = useRef(null); - - useEffect(() => { - if (urlSessionId) { - if (!isValidUUID(urlSessionId)) { - console.error("Invalid session ID format:", urlSessionId); - toast.error("Invalid session ID", { - description: - "The session ID in the URL is not valid. Starting a new session...", - }); - setSessionId(null); - storage.clean(Key.CHAT_SESSION_ID); - return; - } - setSessionId(urlSessionId); - storage.set(Key.CHAT_SESSION_ID, urlSessionId); - } else { - const storedSessionId = storage.get(Key.CHAT_SESSION_ID); - if (storedSessionId) { - if (!isValidUUID(storedSessionId)) { - console.error("Invalid stored session ID:", storedSessionId); - storage.clean(Key.CHAT_SESSION_ID); - setSessionId(null); - } else { - setSessionId(storedSessionId); - } - } else if (autoCreate) { - setSessionId(null); - } - } - }, [urlSessionId, autoCreate]); - - const { - mutateAsync: createSessionMutation, - isPending: isCreating, - error: createError, - } = usePostV2CreateSession(); - - const { - data: sessionData, - isLoading: isLoadingSession, - error: loadError, - refetch, - } = useGetV2GetSession(sessionId || "", { - query: { - enabled: !!sessionId, - select: okData, - staleTime: Infinity, // Never mark as stale - refetchOnMount: false, // Don't refetch on component mount - refetchOnWindowFocus: false, // Don't refetch when window regains focus - refetchOnReconnect: false, // Don't refetch when network reconnects - retry: 1, - }, - }); - - const { mutateAsync: claimSessionMutation } = usePatchV2SessionAssignUser(); - - const session = useMemo(() => { - if (sessionData) return sessionData; - - if (sessionId && justCreatedSessionIdRef.current === sessionId) { - return { - id: sessionId, - user_id: null, - messages: [], - created_at: new Date().toISOString(), - updated_at: new Date().toISOString(), - } as SessionDetailResponse; - } - return null; - }, [sessionData, sessionId]); - - const messages = session?.messages || []; - const isLoading = isCreating || isLoadingSession; - - useEffect(() => { - if (createError) { - setError( - createError instanceof Error - ? createError - : new Error("Failed to create session"), - ); - } else if (loadError) { - setError( - loadError instanceof Error - ? loadError - : new Error("Failed to load session"), - ); - } else { - setError(null); - } - }, [createError, loadError]); - - const createSession = useCallback( - async function createSession() { - try { - setError(null); - const response = await postV2CreateSession({ - body: JSON.stringify({}), - }); - if (response.status !== 200) { - throw new Error("Failed to create session"); - } - const newSessionId = response.data.id; - setSessionId(newSessionId); - storage.set(Key.CHAT_SESSION_ID, newSessionId); - justCreatedSessionIdRef.current = newSessionId; - setTimeout(() => { - if (justCreatedSessionIdRef.current === newSessionId) { - justCreatedSessionIdRef.current = null; - } - }, 10000); - return newSessionId; - } catch (err) { - const error = - err instanceof Error ? err : new Error("Failed to create session"); - setError(error); - toast.error("Failed to create chat session", { - description: error.message, - }); - throw error; - } - }, - [createSessionMutation], - ); - - const loadSession = useCallback( - async function loadSession(id: string) { - try { - setError(null); - // Invalidate the query cache for this session to force a fresh fetch - await queryClient.invalidateQueries({ - queryKey: getGetV2GetSessionQueryKey(id), - }); - // Set sessionId after invalidation to ensure the hook refetches - setSessionId(id); - storage.set(Key.CHAT_SESSION_ID, id); - // Force fetch with fresh data (bypass cache) - const queryOptions = getGetV2GetSessionQueryOptions(id, { - query: { - staleTime: 0, // Force fresh fetch - retry: 1, - }, - }); - const result = await queryClient.fetchQuery(queryOptions); - if (!result || ("status" in result && result.status !== 200)) { - console.warn("Session not found on server, clearing local state"); - storage.clean(Key.CHAT_SESSION_ID); - setSessionId(null); - throw new Error("Session not found"); - } - } catch (err) { - const error = - err instanceof Error ? err : new Error("Failed to load session"); - setError(error); - throw error; - } - }, - [queryClient], - ); - - const refreshSession = useCallback( - async function refreshSession() { - if (!sessionId) { - console.log("[refreshSession] Skipping - no session ID"); - return; - } - try { - setError(null); - await refetch(); - } catch (err) { - const error = - err instanceof Error ? err : new Error("Failed to refresh session"); - setError(error); - throw error; - } - }, - [sessionId, refetch], - ); - - const claimSession = useCallback( - async function claimSession(id: string) { - try { - setError(null); - await claimSessionMutation({ sessionId: id }); - if (justCreatedSessionIdRef.current === id) { - justCreatedSessionIdRef.current = null; - } - await queryClient.invalidateQueries({ - queryKey: getGetV2GetSessionQueryKey(id), - }); - await refetch(); - toast.success("Session claimed successfully", { - description: "Your chat history has been saved to your account", - }); - } catch (err: unknown) { - const error = - err instanceof Error ? err : new Error("Failed to claim session"); - const is404 = - (typeof err === "object" && - err !== null && - "status" in err && - err.status === 404) || - (typeof err === "object" && - err !== null && - "response" in err && - typeof err.response === "object" && - err.response !== null && - "status" in err.response && - err.response.status === 404); - if (!is404) { - setError(error); - toast.error("Failed to claim session", { - description: error.message || "Unable to claim session", - }); - } - throw error; - } - }, - [claimSessionMutation, queryClient, refetch], - ); - - const clearSession = useCallback(function clearSession() { - setSessionId(null); - setError(null); - storage.clean(Key.CHAT_SESSION_ID); - justCreatedSessionIdRef.current = null; - }, []); - - return { - session, - sessionId, - messages, - isLoading, - isCreating, - error, - createSession, - loadSession, - refreshSession, - claimSession, - clearSession, - }; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/page.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/page.tsx deleted file mode 100644 index 9c04e40594..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/page.tsx +++ /dev/null @@ -1,27 +0,0 @@ -"use client"; - -import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; -import { useRouter } from "next/navigation"; -import { useEffect } from "react"; -import { Chat } from "./components/Chat/Chat"; - -export default function ChatPage() { - const isChatEnabled = useGetFlag(Flag.CHAT); - const router = useRouter(); - - useEffect(() => { - if (isChatEnabled === false) { - router.push("/marketplace"); - } - }, [isChatEnabled, router]); - - if (isChatEnabled === null || isChatEnabled === false) { - return null; - } - - return ( -
- -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/NewChatContext.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/NewChatContext.tsx new file mode 100644 index 0000000000..0826637043 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/NewChatContext.tsx @@ -0,0 +1,41 @@ +"use client"; + +import { createContext, useContext, useRef, type ReactNode } from "react"; + +interface NewChatContextValue { + onNewChatClick: () => void; + setOnNewChatClick: (handler?: () => void) => void; + performNewChat?: () => void; + setPerformNewChat: (handler?: () => void) => void; +} + +const NewChatContext = createContext(null); + +export function NewChatProvider({ children }: { children: ReactNode }) { + const onNewChatRef = useRef<(() => void) | undefined>(); + const performNewChatRef = useRef<(() => void) | undefined>(); + const contextValueRef = useRef({ + onNewChatClick() { + onNewChatRef.current?.(); + }, + setOnNewChatClick(handler?: () => void) { + onNewChatRef.current = handler; + }, + performNewChat() { + performNewChatRef.current?.(); + }, + setPerformNewChat(handler?: () => void) { + performNewChatRef.current = handler; + }, + }); + + return ( + + {children} + + ); +} + +export function useNewChat() { + return useContext(NewChatContext); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx new file mode 100644 index 0000000000..44e32024a8 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx @@ -0,0 +1,105 @@ +"use client"; + +import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; +import { NAVBAR_HEIGHT_PX } from "@/lib/constants"; +import type { ReactNode } from "react"; +import { useEffect } from "react"; +import { useNewChat } from "../../NewChatContext"; +import { DesktopSidebar } from "./components/DesktopSidebar/DesktopSidebar"; +import { LoadingState } from "./components/LoadingState/LoadingState"; +import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer"; +import { MobileHeader } from "./components/MobileHeader/MobileHeader"; +import { useCopilotShell } from "./useCopilotShell"; + +interface Props { + children: ReactNode; +} + +export function CopilotShell({ children }: Props) { + const { + isMobile, + isDrawerOpen, + isLoading, + isLoggedIn, + hasActiveSession, + sessions, + currentSessionId, + handleSelectSession, + handleOpenDrawer, + handleCloseDrawer, + handleDrawerOpenChange, + handleNewChat, + hasNextPage, + isFetchingNextPage, + fetchNextPage, + isReadyToShowContent, + } = useCopilotShell(); + + const newChatContext = useNewChat(); + const handleNewChatClickWrapper = + newChatContext?.onNewChatClick || handleNewChat; + + useEffect( + function registerNewChatHandler() { + if (!newChatContext) return; + newChatContext.setPerformNewChat(handleNewChat); + return function cleanup() { + newChatContext.setPerformNewChat(undefined); + }; + }, + [newChatContext, handleNewChat], + ); + + if (!isLoggedIn) { + return ( +
+ +
+ ); + } + + return ( +
+ {!isMobile && ( + + )} + +
+ {isMobile && } +
+ {isReadyToShowContent ? children : } +
+
+ + {isMobile && ( + + )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx new file mode 100644 index 0000000000..122a09a02f --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx @@ -0,0 +1,70 @@ +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { scrollbarStyles } from "@/components/styles/scrollbars"; +import { cn } from "@/lib/utils"; +import { Plus } from "@phosphor-icons/react"; +import { SessionsList } from "../SessionsList/SessionsList"; + +interface Props { + sessions: SessionSummaryResponse[]; + currentSessionId: string | null; + isLoading: boolean; + hasNextPage: boolean; + isFetchingNextPage: boolean; + onSelectSession: (sessionId: string) => void; + onFetchNextPage: () => void; + onNewChat: () => void; + hasActiveSession: boolean; +} + +export function DesktopSidebar({ + sessions, + currentSessionId, + isLoading, + hasNextPage, + isFetchingNextPage, + onSelectSession, + onFetchNextPage, + onNewChat, + hasActiveSession, +}: Props) { + return ( + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/LoadingState/LoadingState.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/LoadingState/LoadingState.tsx new file mode 100644 index 0000000000..21b1663916 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/LoadingState/LoadingState.tsx @@ -0,0 +1,15 @@ +import { Text } from "@/components/atoms/Text/Text"; +import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; + +export function LoadingState() { + return ( +
+
+ + + Loading your chats... + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx new file mode 100644 index 0000000000..ea3b39f829 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx @@ -0,0 +1,91 @@ +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { Button } from "@/components/atoms/Button/Button"; +import { scrollbarStyles } from "@/components/styles/scrollbars"; +import { cn } from "@/lib/utils"; +import { PlusIcon, X } from "@phosphor-icons/react"; +import { Drawer } from "vaul"; +import { SessionsList } from "../SessionsList/SessionsList"; + +interface Props { + isOpen: boolean; + sessions: SessionSummaryResponse[]; + currentSessionId: string | null; + isLoading: boolean; + hasNextPage: boolean; + isFetchingNextPage: boolean; + onSelectSession: (sessionId: string) => void; + onFetchNextPage: () => void; + onNewChat: () => void; + onClose: () => void; + onOpenChange: (open: boolean) => void; + hasActiveSession: boolean; +} + +export function MobileDrawer({ + isOpen, + sessions, + currentSessionId, + isLoading, + hasNextPage, + isFetchingNextPage, + onSelectSession, + onFetchNextPage, + onNewChat, + onClose, + onOpenChange, + hasActiveSession, +}: Props) { + return ( + + + + +
+
+ + Your chats + + +
+
+
+ +
+ {hasActiveSession && ( +
+ +
+ )} +
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts new file mode 100644 index 0000000000..c9504e49a9 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts @@ -0,0 +1,24 @@ +import { useState } from "react"; + +export function useMobileDrawer() { + const [isDrawerOpen, setIsDrawerOpen] = useState(false); + + function handleOpenDrawer() { + setIsDrawerOpen(true); + } + + function handleCloseDrawer() { + setIsDrawerOpen(false); + } + + function handleDrawerOpenChange(open: boolean) { + setIsDrawerOpen(open); + } + + return { + isDrawerOpen, + handleOpenDrawer, + handleCloseDrawer, + handleDrawerOpenChange, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileHeader/MobileHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileHeader/MobileHeader.tsx new file mode 100644 index 0000000000..e0d6161744 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileHeader/MobileHeader.tsx @@ -0,0 +1,22 @@ +import { Button } from "@/components/atoms/Button/Button"; +import { NAVBAR_HEIGHT_PX } from "@/lib/constants"; +import { ListIcon } from "@phosphor-icons/react"; + +interface Props { + onOpenDrawer: () => void; +} + +export function MobileHeader({ onOpenDrawer }: Props) { + return ( + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx new file mode 100644 index 0000000000..ef63e1aff4 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx @@ -0,0 +1,80 @@ +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { Text } from "@/components/atoms/Text/Text"; +import { InfiniteList } from "@/components/molecules/InfiniteList/InfiniteList"; +import { cn } from "@/lib/utils"; +import { getSessionTitle } from "../../helpers"; + +interface Props { + sessions: SessionSummaryResponse[]; + currentSessionId: string | null; + isLoading: boolean; + hasNextPage: boolean; + isFetchingNextPage: boolean; + onSelectSession: (sessionId: string) => void; + onFetchNextPage: () => void; +} + +export function SessionsList({ + sessions, + currentSessionId, + isLoading, + hasNextPage, + isFetchingNextPage, + onSelectSession, + onFetchNextPage, +}: Props) { + if (isLoading) { + return ( +
+ {Array.from({ length: 5 }).map((_, i) => ( +
+ +
+ ))} +
+ ); + } + + if (sessions.length === 0) { + return ( +
+ + You don't have previous chats + +
+ ); + } + + return ( + { + const isActive = session.id === currentSessionId; + return ( + + ); + }} + /> + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts new file mode 100644 index 0000000000..8833a419c1 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts @@ -0,0 +1,92 @@ +import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat"; +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { okData } from "@/app/api/helpers"; +import { useEffect, useMemo, useState } from "react"; + +const PAGE_SIZE = 50; + +export interface UseSessionsPaginationArgs { + enabled: boolean; +} + +export function useSessionsPagination({ enabled }: UseSessionsPaginationArgs) { + const [offset, setOffset] = useState(0); + const [accumulatedSessions, setAccumulatedSessions] = useState< + SessionSummaryResponse[] + >([]); + const [totalCount, setTotalCount] = useState(null); + + const { data, isLoading, isFetching, isError } = useGetV2ListSessions( + { limit: PAGE_SIZE, offset }, + { + query: { + enabled: enabled && offset >= 0, + }, + }, + ); + + useEffect(() => { + const responseData = okData(data); + if (responseData) { + const newSessions = responseData.sessions; + const total = responseData.total; + setTotalCount(total); + + if (offset === 0) { + setAccumulatedSessions(newSessions); + } else { + setAccumulatedSessions((prev) => [...prev, ...newSessions]); + } + } else if (!enabled) { + setAccumulatedSessions([]); + setTotalCount(null); + } + }, [data, offset, enabled]); + + const hasNextPage = useMemo(() => { + if (totalCount === null) return false; + return accumulatedSessions.length < totalCount; + }, [accumulatedSessions.length, totalCount]); + + const areAllSessionsLoaded = useMemo(() => { + if (totalCount === null) return false; + return ( + accumulatedSessions.length >= totalCount && !isFetching && !isLoading + ); + }, [accumulatedSessions.length, totalCount, isFetching, isLoading]); + + useEffect(() => { + if ( + hasNextPage && + !isFetching && + !isLoading && + !isError && + totalCount !== null + ) { + setOffset((prev) => prev + PAGE_SIZE); + } + }, [hasNextPage, isFetching, isLoading, isError, totalCount]); + + function fetchNextPage() { + if (hasNextPage && !isFetching) { + setOffset((prev) => prev + PAGE_SIZE); + } + } + + function reset() { + setOffset(0); + setAccumulatedSessions([]); + setTotalCount(null); + } + + return { + sessions: accumulatedSessions, + isLoading, + isFetching, + hasNextPage, + areAllSessionsLoaded, + totalCount, + fetchNextPage, + reset, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts new file mode 100644 index 0000000000..bf4eb70ccb --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts @@ -0,0 +1,165 @@ +import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { format, formatDistanceToNow, isToday } from "date-fns"; + +export function convertSessionDetailToSummary( + session: SessionDetailResponse, +): SessionSummaryResponse { + return { + id: session.id, + created_at: session.created_at, + updated_at: session.updated_at, + title: undefined, + }; +} + +export function filterVisibleSessions( + sessions: SessionSummaryResponse[], +): SessionSummaryResponse[] { + return sessions.filter( + (session) => session.updated_at !== session.created_at, + ); +} + +export function getSessionTitle(session: SessionSummaryResponse): string { + if (session.title) return session.title; + const isNewSession = session.updated_at === session.created_at; + if (isNewSession) { + const createdDate = new Date(session.created_at); + if (isToday(createdDate)) { + return "Today"; + } + return format(createdDate, "MMM d, yyyy"); + } + return "Untitled Chat"; +} + +export function getSessionUpdatedLabel( + session: SessionSummaryResponse, +): string { + if (!session.updated_at) return ""; + return formatDistanceToNow(new Date(session.updated_at), { addSuffix: true }); +} + +export function mergeCurrentSessionIntoList( + accumulatedSessions: SessionSummaryResponse[], + currentSessionId: string | null, + currentSessionData: SessionDetailResponse | null | undefined, +): SessionSummaryResponse[] { + const filteredSessions: SessionSummaryResponse[] = []; + + if (accumulatedSessions.length > 0) { + const visibleSessions = filterVisibleSessions(accumulatedSessions); + + if (currentSessionId) { + const currentInAll = accumulatedSessions.find( + (s) => s.id === currentSessionId, + ); + if (currentInAll) { + const isInVisible = visibleSessions.some( + (s) => s.id === currentSessionId, + ); + if (!isInVisible) { + filteredSessions.push(currentInAll); + } + } + } + + filteredSessions.push(...visibleSessions); + } + + if (currentSessionId && currentSessionData) { + const isCurrentInList = filteredSessions.some( + (s) => s.id === currentSessionId, + ); + if (!isCurrentInList) { + const summarySession = convertSessionDetailToSummary(currentSessionData); + filteredSessions.unshift(summarySession); + } + } + + return filteredSessions; +} + +export function getCurrentSessionId( + searchParams: URLSearchParams, +): string | null { + return searchParams.get("sessionId"); +} + +export function shouldAutoSelectSession( + areAllSessionsLoaded: boolean, + hasAutoSelectedSession: boolean, + paramSessionId: string | null, + visibleSessions: SessionSummaryResponse[], + accumulatedSessions: SessionSummaryResponse[], + isLoading: boolean, + totalCount: number | null, +): { + shouldSelect: boolean; + sessionIdToSelect: string | null; + shouldCreate: boolean; +} { + if (!areAllSessionsLoaded || hasAutoSelectedSession) { + return { + shouldSelect: false, + sessionIdToSelect: null, + shouldCreate: false, + }; + } + + if (paramSessionId) { + return { + shouldSelect: false, + sessionIdToSelect: null, + shouldCreate: false, + }; + } + + if (visibleSessions.length > 0) { + return { + shouldSelect: true, + sessionIdToSelect: visibleSessions[0].id, + shouldCreate: false, + }; + } + + if (accumulatedSessions.length === 0 && !isLoading && totalCount === 0) { + return { shouldSelect: false, sessionIdToSelect: null, shouldCreate: true }; + } + + if (totalCount === 0) { + return { + shouldSelect: false, + sessionIdToSelect: null, + shouldCreate: false, + }; + } + + return { shouldSelect: false, sessionIdToSelect: null, shouldCreate: false }; +} + +export function checkReadyToShowContent( + areAllSessionsLoaded: boolean, + paramSessionId: string | null, + accumulatedSessions: SessionSummaryResponse[], + isCurrentSessionLoading: boolean, + currentSessionData: SessionDetailResponse | null | undefined, + hasAutoSelectedSession: boolean, +): boolean { + if (!areAllSessionsLoaded) return false; + + if (paramSessionId) { + const sessionFound = accumulatedSessions.some( + (s) => s.id === paramSessionId, + ); + return ( + sessionFound || + (!isCurrentSessionLoading && + currentSessionData !== undefined && + currentSessionData !== null) + ); + } + + return hasAutoSelectedSession; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts new file mode 100644 index 0000000000..cadd98da3e --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts @@ -0,0 +1,172 @@ +"use client"; + +import { + getGetV2ListSessionsQueryKey, + useGetV2GetSession, +} from "@/app/api/__generated__/endpoints/chat/chat"; +import { okData } from "@/app/api/helpers"; +import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { useQueryClient } from "@tanstack/react-query"; +import { usePathname, useRouter, useSearchParams } from "next/navigation"; +import { useEffect, useRef, useState } from "react"; +import { useMobileDrawer } from "./components/MobileDrawer/useMobileDrawer"; +import { useSessionsPagination } from "./components/SessionsList/useSessionsPagination"; +import { + checkReadyToShowContent, + filterVisibleSessions, + getCurrentSessionId, + mergeCurrentSessionIntoList, +} from "./helpers"; + +export function useCopilotShell() { + const router = useRouter(); + const pathname = usePathname(); + const searchParams = useSearchParams(); + const queryClient = useQueryClient(); + const breakpoint = useBreakpoint(); + const { isLoggedIn } = useSupabase(); + const isMobile = + breakpoint === "base" || breakpoint === "sm" || breakpoint === "md"; + + const isOnHomepage = pathname === "/copilot"; + const paramSessionId = searchParams.get("sessionId"); + + const { + isDrawerOpen, + handleOpenDrawer, + handleCloseDrawer, + handleDrawerOpenChange, + } = useMobileDrawer(); + + const paginationEnabled = !isMobile || isDrawerOpen || !!paramSessionId; + + const { + sessions: accumulatedSessions, + isLoading: isSessionsLoading, + isFetching: isSessionsFetching, + hasNextPage, + areAllSessionsLoaded, + fetchNextPage, + reset: resetPagination, + } = useSessionsPagination({ + enabled: paginationEnabled, + }); + + const currentSessionId = getCurrentSessionId(searchParams); + + const { data: currentSessionData, isLoading: isCurrentSessionLoading } = + useGetV2GetSession(currentSessionId || "", { + query: { + enabled: !!currentSessionId, + select: okData, + }, + }); + + const [hasAutoSelectedSession, setHasAutoSelectedSession] = useState(false); + const hasAutoSelectedRef = useRef(false); + + // Mark as auto-selected when sessionId is in URL + useEffect(() => { + if (paramSessionId && !hasAutoSelectedRef.current) { + hasAutoSelectedRef.current = true; + setHasAutoSelectedSession(true); + } + }, [paramSessionId]); + + // On homepage without sessionId, mark as ready immediately + useEffect(() => { + if (isOnHomepage && !paramSessionId && !hasAutoSelectedRef.current) { + hasAutoSelectedRef.current = true; + setHasAutoSelectedSession(true); + } + }, [isOnHomepage, paramSessionId]); + + // Invalidate sessions list when navigating to homepage (to show newly created sessions) + useEffect(() => { + if (isOnHomepage && !paramSessionId) { + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + } + }, [isOnHomepage, paramSessionId, queryClient]); + + // Reset pagination when query becomes disabled + const prevPaginationEnabledRef = useRef(paginationEnabled); + useEffect(() => { + if (prevPaginationEnabledRef.current && !paginationEnabled) { + resetPagination(); + resetAutoSelect(); + } + prevPaginationEnabledRef.current = paginationEnabled; + }, [paginationEnabled, resetPagination]); + + const sessions = mergeCurrentSessionIntoList( + accumulatedSessions, + currentSessionId, + currentSessionData, + ); + + const visibleSessions = filterVisibleSessions(sessions); + + const sidebarSelectedSessionId = + isOnHomepage && !paramSessionId ? null : currentSessionId; + + const isReadyToShowContent = isOnHomepage + ? true + : checkReadyToShowContent( + areAllSessionsLoaded, + paramSessionId, + accumulatedSessions, + isCurrentSessionLoading, + currentSessionData, + hasAutoSelectedSession, + ); + + function handleSelectSession(sessionId: string) { + // Navigate using replaceState to avoid full page reload + window.history.replaceState(null, "", `/copilot?sessionId=${sessionId}`); + // Force a re-render by updating the URL through router + router.replace(`/copilot?sessionId=${sessionId}`); + if (isMobile) handleCloseDrawer(); + } + + function handleNewChat() { + resetAutoSelect(); + resetPagination(); + // Invalidate and refetch sessions list to ensure newly created sessions appear + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + window.history.replaceState(null, "", "/copilot"); + router.replace("/copilot"); + if (isMobile) handleCloseDrawer(); + } + + function resetAutoSelect() { + hasAutoSelectedRef.current = false; + setHasAutoSelectedSession(false); + } + + const isLoading = isSessionsLoading && accumulatedSessions.length === 0; + + return { + isMobile, + isDrawerOpen, + isLoggedIn, + hasActiveSession: + Boolean(currentSessionId) && (!isOnHomepage || Boolean(paramSessionId)), + isLoading, + sessions: visibleSessions, + currentSessionId: sidebarSelectedSessionId, + handleSelectSession, + handleOpenDrawer, + handleCloseDrawer, + handleDrawerOpenChange, + handleNewChat, + hasNextPage, + isFetchingNextPage: isSessionsFetching, + fetchNextPage, + isReadyToShowContent, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts new file mode 100644 index 0000000000..a5818f0a9f --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts @@ -0,0 +1,56 @@ +import type { User } from "@supabase/supabase-js"; + +export type PageState = + | { type: "welcome" } + | { type: "newChat" } + | { type: "creating"; prompt: string } + | { type: "chat"; sessionId: string; initialPrompt?: string }; + +export function getInitialPromptFromState( + pageState: PageState, + storedInitialPrompt: string | undefined, +) { + if (storedInitialPrompt) return storedInitialPrompt; + if (pageState.type === "creating") return pageState.prompt; + if (pageState.type === "chat") return pageState.initialPrompt; +} + +export function shouldResetToWelcome(pageState: PageState) { + return ( + pageState.type !== "newChat" && + pageState.type !== "creating" && + pageState.type !== "welcome" + ); +} + +export function getGreetingName(user?: User | null): string { + if (!user) return "there"; + const metadata = user.user_metadata as Record | undefined; + const fullName = metadata?.full_name; + const name = metadata?.name; + if (typeof fullName === "string" && fullName.trim()) { + return fullName.split(" ")[0]; + } + if (typeof name === "string" && name.trim()) { + return name.split(" ")[0]; + } + if (user.email) { + return user.email.split("@")[0]; + } + return "there"; +} + +export function buildCopilotChatUrl(prompt: string): string { + const trimmed = prompt.trim(); + if (!trimmed) return "/copilot/chat"; + const encoded = encodeURIComponent(trimmed); + return `/copilot/chat?prompt=${encoded}`; +} + +export function getQuickActions(): string[] { + return [ + "Show me what I can automate", + "Design a custom workflow", + "Help me with content creation", + ]; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx new file mode 100644 index 0000000000..0f40de8f25 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx @@ -0,0 +1,11 @@ +import type { ReactNode } from "react"; +import { NewChatProvider } from "./NewChatContext"; +import { CopilotShell } from "./components/CopilotShell/CopilotShell"; + +export default function CopilotLayout({ children }: { children: ReactNode }) { + return ( + + {children} + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx new file mode 100644 index 0000000000..3bbafd087b --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx @@ -0,0 +1,169 @@ +"use client"; + +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Chat } from "@/components/contextual/Chat/Chat"; +import { ChatInput } from "@/components/contextual/Chat/components/ChatInput/ChatInput"; +import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useCopilotPage } from "./useCopilotPage"; + +export default function CopilotPage() { + const { state, handlers } = useCopilotPage(); + const { + greetingName, + quickActions, + isLoading, + pageState, + isNewChatModalOpen, + isReady, + } = state; + const { + handleQuickAction, + startChatWithPrompt, + handleSessionNotFound, + handleStreamingChange, + handleCancelNewChat, + proceedWithNewChat, + handleNewChatModalOpen, + } = handlers; + + if (!isReady) { + return null; + } + + // Show Chat when we have an active session + if (pageState.type === "chat") { + return ( +
+ + + +
+ + The current chat response will be interrupted. Are you sure you + want to start a new chat? + + + + + +
+
+
+
+ ); + } + + if (pageState.type === "newChat") { + return ( +
+
+ + + Loading your chats... + +
+
+ ); + } + + // Show loading state while creating session and sending first message + if (pageState.type === "creating") { + return ( +
+
+ + + Loading your chats... + +
+
+ ); + } + + // Show Welcome screen + return ( +
+
+ {isLoading ? ( +
+ + +
+ +
+
+ {Array.from({ length: 4 }).map((_, i) => ( + + ))} +
+
+ ) : ( + <> +
+ + Hey, {greetingName} + + + What do you want to automate? + + +
+ +
+
+
+ {quickActions.map((action) => ( + + ))} +
+ + )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts new file mode 100644 index 0000000000..cb13137432 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts @@ -0,0 +1,266 @@ +import { postV2CreateSession } from "@/app/api/__generated__/endpoints/chat/chat"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { getHomepageRoute } from "@/lib/constants"; +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { + Flag, + type FlagValues, + useGetFlag, +} from "@/services/feature-flags/use-get-flag"; +import * as Sentry from "@sentry/nextjs"; +import { useFlags } from "launchdarkly-react-client-sdk"; +import { useRouter } from "next/navigation"; +import { useEffect, useReducer } from "react"; +import { useNewChat } from "./NewChatContext"; +import { getGreetingName, getQuickActions, type PageState } from "./helpers"; +import { useCopilotURLState } from "./useCopilotURLState"; + +type CopilotState = { + pageState: PageState; + isStreaming: boolean; + isNewChatModalOpen: boolean; + initialPrompts: Record; + previousSessionId: string | null; +}; + +type CopilotAction = + | { type: "setPageState"; pageState: PageState } + | { type: "setStreaming"; isStreaming: boolean } + | { type: "setNewChatModalOpen"; isOpen: boolean } + | { type: "setInitialPrompt"; sessionId: string; prompt: string } + | { type: "setPreviousSessionId"; sessionId: string | null }; + +function isSamePageState(next: PageState, current: PageState) { + if (next.type !== current.type) return false; + if (next.type === "creating" && current.type === "creating") { + return next.prompt === current.prompt; + } + if (next.type === "chat" && current.type === "chat") { + return ( + next.sessionId === current.sessionId && + next.initialPrompt === current.initialPrompt + ); + } + return true; +} + +function copilotReducer( + state: CopilotState, + action: CopilotAction, +): CopilotState { + if (action.type === "setPageState") { + if (isSamePageState(action.pageState, state.pageState)) return state; + return { ...state, pageState: action.pageState }; + } + if (action.type === "setStreaming") { + if (action.isStreaming === state.isStreaming) return state; + return { ...state, isStreaming: action.isStreaming }; + } + if (action.type === "setNewChatModalOpen") { + if (action.isOpen === state.isNewChatModalOpen) return state; + return { ...state, isNewChatModalOpen: action.isOpen }; + } + if (action.type === "setInitialPrompt") { + if (state.initialPrompts[action.sessionId] === action.prompt) return state; + return { + ...state, + initialPrompts: { + ...state.initialPrompts, + [action.sessionId]: action.prompt, + }, + }; + } + if (action.type === "setPreviousSessionId") { + if (state.previousSessionId === action.sessionId) return state; + return { ...state, previousSessionId: action.sessionId }; + } + return state; +} + +export function useCopilotPage() { + const router = useRouter(); + const { user, isLoggedIn, isUserLoading } = useSupabase(); + const { toast } = useToast(); + + const isChatEnabled = useGetFlag(Flag.CHAT); + const flags = useFlags(); + const homepageRoute = getHomepageRoute(isChatEnabled); + const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true"; + const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID; + const isLaunchDarklyConfigured = envEnabled && Boolean(clientId); + const isFlagReady = + !isLaunchDarklyConfigured || flags[Flag.CHAT] !== undefined; + + const [state, dispatch] = useReducer(copilotReducer, { + pageState: { type: "welcome" }, + isStreaming: false, + isNewChatModalOpen: false, + initialPrompts: {}, + previousSessionId: null, + }); + + const newChatContext = useNewChat(); + const greetingName = getGreetingName(user); + const quickActions = getQuickActions(); + + function setPageState(pageState: PageState) { + dispatch({ type: "setPageState", pageState }); + } + + function setInitialPrompt(sessionId: string, prompt: string) { + dispatch({ type: "setInitialPrompt", sessionId, prompt }); + } + + function setPreviousSessionId(sessionId: string | null) { + dispatch({ type: "setPreviousSessionId", sessionId }); + } + + const { setUrlSessionId } = useCopilotURLState({ + pageState: state.pageState, + initialPrompts: state.initialPrompts, + previousSessionId: state.previousSessionId, + setPageState, + setInitialPrompt, + setPreviousSessionId, + }); + + useEffect( + function registerNewChatHandler() { + if (!newChatContext) return; + newChatContext.setOnNewChatClick(handleNewChatClick); + return function cleanup() { + newChatContext.setOnNewChatClick(undefined); + }; + }, + [newChatContext, handleNewChatClick], + ); + + useEffect( + function transitionNewChatToWelcome() { + if (state.pageState.type === "newChat") { + function setWelcomeState() { + dispatch({ type: "setPageState", pageState: { type: "welcome" } }); + } + + const timer = setTimeout(setWelcomeState, 300); + + return function cleanup() { + clearTimeout(timer); + }; + } + }, + [state.pageState.type], + ); + + useEffect( + function ensureAccess() { + if (!isFlagReady) return; + if (isChatEnabled === false) { + router.replace(homepageRoute); + } + }, + [homepageRoute, isChatEnabled, isFlagReady, router], + ); + + async function startChatWithPrompt(prompt: string) { + if (!prompt?.trim()) return; + if (state.pageState.type === "creating") return; + + const trimmedPrompt = prompt.trim(); + dispatch({ + type: "setPageState", + pageState: { type: "creating", prompt: trimmedPrompt }, + }); + + try { + const sessionResponse = await postV2CreateSession({ + body: JSON.stringify({}), + }); + + if (sessionResponse.status !== 200 || !sessionResponse.data?.id) { + throw new Error("Failed to create session"); + } + + const sessionId = sessionResponse.data.id; + + dispatch({ + type: "setInitialPrompt", + sessionId, + prompt: trimmedPrompt, + }); + + await setUrlSessionId(sessionId, { shallow: false }); + dispatch({ + type: "setPageState", + pageState: { type: "chat", sessionId, initialPrompt: trimmedPrompt }, + }); + } catch (error) { + console.error("[CopilotPage] Failed to start chat:", error); + toast({ title: "Failed to start chat", variant: "destructive" }); + Sentry.captureException(error); + dispatch({ type: "setPageState", pageState: { type: "welcome" } }); + } + } + + function handleQuickAction(action: string) { + startChatWithPrompt(action); + } + + function handleSessionNotFound() { + router.replace("/copilot"); + } + + function handleStreamingChange(isStreamingValue: boolean) { + dispatch({ type: "setStreaming", isStreaming: isStreamingValue }); + } + + async function proceedWithNewChat() { + dispatch({ type: "setNewChatModalOpen", isOpen: false }); + if (newChatContext?.performNewChat) { + newChatContext.performNewChat(); + return; + } + try { + await setUrlSessionId(null, { shallow: false }); + } catch (error) { + console.error("[CopilotPage] Failed to clear session:", error); + } + router.replace("/copilot"); + } + + function handleCancelNewChat() { + dispatch({ type: "setNewChatModalOpen", isOpen: false }); + } + + function handleNewChatModalOpen(isOpen: boolean) { + dispatch({ type: "setNewChatModalOpen", isOpen }); + } + + function handleNewChatClick() { + if (state.isStreaming) { + dispatch({ type: "setNewChatModalOpen", isOpen: true }); + } else { + proceedWithNewChat(); + } + } + + return { + state: { + greetingName, + quickActions, + isLoading: isUserLoading, + pageState: state.pageState, + isNewChatModalOpen: state.isNewChatModalOpen, + isReady: isFlagReady && isChatEnabled !== false && isLoggedIn, + }, + handlers: { + handleQuickAction, + startChatWithPrompt, + handleSessionNotFound, + handleStreamingChange, + handleCancelNewChat, + proceedWithNewChat, + handleNewChatModalOpen, + }, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotURLState.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotURLState.ts new file mode 100644 index 0000000000..5e37e29a15 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotURLState.ts @@ -0,0 +1,80 @@ +import { parseAsString, useQueryState } from "nuqs"; +import { useLayoutEffect } from "react"; +import { + getInitialPromptFromState, + type PageState, + shouldResetToWelcome, +} from "./helpers"; + +interface UseCopilotUrlStateArgs { + pageState: PageState; + initialPrompts: Record; + previousSessionId: string | null; + setPageState: (pageState: PageState) => void; + setInitialPrompt: (sessionId: string, prompt: string) => void; + setPreviousSessionId: (sessionId: string | null) => void; +} + +export function useCopilotURLState({ + pageState, + initialPrompts, + previousSessionId, + setPageState, + setInitialPrompt, + setPreviousSessionId, +}: UseCopilotUrlStateArgs) { + const [urlSessionId, setUrlSessionId] = useQueryState( + "sessionId", + parseAsString, + ); + + function syncSessionFromUrl() { + if (urlSessionId) { + if (pageState.type === "chat" && pageState.sessionId === urlSessionId) { + setPreviousSessionId(urlSessionId); + return; + } + + const storedInitialPrompt = initialPrompts[urlSessionId]; + const currentInitialPrompt = getInitialPromptFromState( + pageState, + storedInitialPrompt, + ); + + if (currentInitialPrompt) { + setInitialPrompt(urlSessionId, currentInitialPrompt); + } + + setPageState({ + type: "chat", + sessionId: urlSessionId, + initialPrompt: currentInitialPrompt, + }); + setPreviousSessionId(urlSessionId); + return; + } + + const wasInChat = previousSessionId !== null && pageState.type === "chat"; + setPreviousSessionId(null); + if (wasInChat) { + setPageState({ type: "newChat" }); + return; + } + + if (shouldResetToWelcome(pageState)) { + setPageState({ type: "welcome" }); + } + } + + useLayoutEffect(syncSessionFromUrl, [ + urlSessionId, + pageState.type, + previousSessionId, + initialPrompts, + ]); + + return { + urlSessionId, + setUrlSessionId, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/error/page.tsx b/autogpt_platform/frontend/src/app/(platform)/error/page.tsx index b7858787cf..b26ca4559b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/error/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/error/page.tsx @@ -1,6 +1,8 @@ "use client"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { getHomepageRoute } from "@/lib/constants"; +import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { useSearchParams } from "next/navigation"; import { Suspense } from "react"; import { getErrorDetails } from "./helpers"; @@ -9,6 +11,8 @@ function ErrorPageContent() { const searchParams = useSearchParams(); const errorMessage = searchParams.get("message"); const errorDetails = getErrorDetails(errorMessage); + const isChatEnabled = useGetFlag(Flag.CHAT); + const homepageRoute = getHomepageRoute(isChatEnabled); function handleRetry() { // Auth-related errors should redirect to login @@ -25,8 +29,8 @@ function ErrorPageContent() { window.location.reload(); }, 2000); } else { - // For server/network errors, go to marketplace - window.location.href = "/marketplace"; + // For server/network errors, go to home + window.location.href = homepageRoute; } } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx index 7886f7adaf..de912c5fc3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx @@ -31,10 +31,18 @@ export function AgentSettingsModal({ } } - const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } = - useAgentSafeMode(agent); + const { + currentHITLSafeMode, + showHITLToggle, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + isPending, + shouldShowToggle, + } = useAgentSafeMode(agent); - if (!hasHITLBlocks) return null; + if (!shouldShowToggle) return null; return (
-
-
-
- Require human approval - - The agent will pause and wait for your review before - continuing - + {showHITLToggle && ( +
+
+
+ + Human-in-the-loop approval + + + The agent will pause at human-in-the-loop blocks and wait + for your review before continuing + +
+
-
-
+ )} + {showSensitiveActionToggle && ( +
+
+
+ + Sensitive action approval + + + The agent will pause at sensitive action blocks and wait for + your review before continuing + +
+ +
+
+ )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx index cd0c666be6..aff06d79c5 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx @@ -14,6 +14,10 @@ import { import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { useEffect, useRef, useState } from "react"; import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal"; +import { + AIAgentSafetyPopup, + useAIAgentSafetyPopup, +} from "./components/AIAgentSafetyPopup/AIAgentSafetyPopup"; import { ModalHeader } from "./components/ModalHeader/ModalHeader"; import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection"; import { RunActions } from "./components/RunActions/RunActions"; @@ -83,8 +87,18 @@ export function RunAgentModal({ const [isScheduleModalOpen, setIsScheduleModalOpen] = useState(false); const [hasOverflow, setHasOverflow] = useState(false); + const [isSafetyPopupOpen, setIsSafetyPopupOpen] = useState(false); + const [pendingRunAction, setPendingRunAction] = useState<(() => void) | null>( + null, + ); const contentRef = useRef(null); + const { shouldShowPopup, dismissPopup } = useAIAgentSafetyPopup( + agent.id, + agent.has_sensitive_action, + agent.has_human_in_the_loop, + ); + const hasAnySetupFields = Object.keys(agentInputFields || {}).length > 0 || Object.keys(agentCredentialsInputFields || {}).length > 0; @@ -165,6 +179,24 @@ export function RunAgentModal({ onScheduleCreated?.(schedule); } + function handleRunWithSafetyCheck() { + if (shouldShowPopup) { + setPendingRunAction(() => handleRun); + setIsSafetyPopupOpen(true); + } else { + handleRun(); + } + } + + function handleSafetyPopupAcknowledge() { + setIsSafetyPopupOpen(false); + dismissPopup(); + if (pendingRunAction) { + pendingRunAction(); + setPendingRunAction(null); + } + } + return ( <> +
+ + ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AIAgentSafetyPopup/AIAgentSafetyPopup.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AIAgentSafetyPopup/AIAgentSafetyPopup.tsx new file mode 100644 index 0000000000..f2d178b33d --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AIAgentSafetyPopup/AIAgentSafetyPopup.tsx @@ -0,0 +1,108 @@ +"use client"; + +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { Key, storage } from "@/services/storage/local-storage"; +import { ShieldCheckIcon } from "@phosphor-icons/react"; +import { useCallback, useEffect, useState } from "react"; + +interface Props { + agentId: string; + onAcknowledge: () => void; + isOpen: boolean; +} + +export function AIAgentSafetyPopup({ agentId, onAcknowledge, isOpen }: Props) { + function handleAcknowledge() { + // Add this agent to the list of agents for which popup has been shown + const seenAgentsJson = storage.get(Key.AI_AGENT_SAFETY_POPUP_SHOWN); + const seenAgents: string[] = seenAgentsJson + ? JSON.parse(seenAgentsJson) + : []; + + if (!seenAgents.includes(agentId)) { + seenAgents.push(agentId); + storage.set(Key.AI_AGENT_SAFETY_POPUP_SHOWN, JSON.stringify(seenAgents)); + } + + onAcknowledge(); + } + + if (!isOpen) return null; + + return ( + {} }} + styling={{ maxWidth: "480px" }} + > + +
+
+ +
+ + + Safety Checks Enabled + + + + AI-generated agents may take actions that affect your data or + external systems. + + + + AutoGPT includes safety checks so you'll always have the + opportunity to review and approve sensitive actions before they + happen. + + + +
+
+
+ ); +} + +export function useAIAgentSafetyPopup( + agentId: string, + hasSensitiveAction: boolean, + hasHumanInTheLoop: boolean, +) { + const [shouldShowPopup, setShouldShowPopup] = useState(false); + const [hasChecked, setHasChecked] = useState(false); + + useEffect(() => { + if (hasChecked) return; + + const seenAgentsJson = storage.get(Key.AI_AGENT_SAFETY_POPUP_SHOWN); + const seenAgents: string[] = seenAgentsJson + ? JSON.parse(seenAgentsJson) + : []; + const hasSeenPopupForThisAgent = seenAgents.includes(agentId); + const isRelevantAgent = hasSensitiveAction || hasHumanInTheLoop; + + setShouldShowPopup(!hasSeenPopupForThisAgent && isRelevantAgent); + setHasChecked(true); + }, [agentId, hasSensitiveAction, hasHumanInTheLoop, hasChecked]); + + const dismissPopup = useCallback(() => { + setShouldShowPopup(false); + }, []); + + return { + shouldShowPopup, + dismissPopup, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalHeader/ModalHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalHeader/ModalHeader.tsx index 2a31f62f82..b35ab87198 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalHeader/ModalHeader.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalHeader/ModalHeader.tsx @@ -29,7 +29,7 @@ export function ModalHeader({ agent }: ModalHeaderProps) { {agent.description} @@ -40,6 +40,8 @@ export function ModalHeader({ agent }: ModalHeaderProps) { Tip +
+ For best results, run this agent{" "} {humanizeCronExpression( @@ -50,7 +52,7 @@ export function ModalHeader({ agent }: ModalHeaderProps) { ) : null} {agent.instructions ? ( -
+
Instructions diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx index 7660de7c15..b3e0c17d74 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx @@ -1,9 +1,9 @@ import { Input } from "@/components/atoms/Input/Input"; +import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView"; import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { useMemo } from "react"; import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs"; import { useRunAgentModalContext } from "../../context"; -import { CredentialsGroupedView } from "../CredentialsGroupedView/CredentialsGroupedView"; import { ModalSection } from "../ModalSection/ModalSection"; import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBanner"; @@ -19,6 +19,8 @@ export function ModalRunSection() { setInputValue, agentInputFields, agentCredentialsInputFields, + inputCredentials, + setInputCredentialsValue, } = useRunAgentModalContext(); const inputFields = Object.entries(agentInputFields || {}); @@ -102,6 +104,9 @@ export function ModalRunSection() { ) : null} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx index 9ba37d8d17..0fafa67414 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SafeModeToggle.tsx @@ -5,48 +5,104 @@ import { Graph } from "@/lib/autogpt-server-api/types"; import { cn } from "@/lib/utils"; import { ShieldCheckIcon, ShieldIcon } from "@phosphor-icons/react"; import { useAgentSafeMode } from "@/hooks/useAgentSafeMode"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/atoms/Tooltip/BaseTooltip"; interface Props { graph: GraphModel | LibraryAgent | Graph; className?: string; - fullWidth?: boolean; } -export function SafeModeToggle({ graph }: Props) { +interface SafeModeIconButtonProps { + isEnabled: boolean; + label: string; + tooltipEnabled: string; + tooltipDisabled: string; + onToggle: () => void; + isPending: boolean; +} + +function SafeModeIconButton({ + isEnabled, + label, + tooltipEnabled, + tooltipDisabled, + onToggle, + isPending, +}: SafeModeIconButtonProps) { + return ( + + + + + +
+
+ {label}: {isEnabled ? "ON" : "OFF"} +
+
+ {isEnabled ? tooltipEnabled : tooltipDisabled} +
+
+
+
+ ); +} + +export function SafeModeToggle({ graph, className }: Props) { const { - currentSafeMode, + currentHITLSafeMode, + showHITLToggle, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, isPending, shouldShowToggle, - isStateUndetermined, - handleToggle, } = useAgentSafeMode(graph); - if (!shouldShowToggle || isStateUndetermined) { + if (!shouldShowToggle) { return null; } return ( - + {showSensitiveActionToggle && ( + + )} +
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx index 57d7055e1c..530d24529f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx @@ -13,8 +13,16 @@ interface Props { } export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) { - const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } = - useAgentSafeMode(agent); + const { + currentHITLSafeMode, + showHITLToggle, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + isPending, + shouldShowToggle, + } = useAgentSafeMode(agent); return ( @@ -34,24 +42,51 @@ export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
- {hasHITLBlocks ? ( -
-
-
- Require human approval - - The agent will pause and wait for your review before - continuing - + {shouldShowToggle ? ( + <> + {showHITLToggle && ( +
+
+
+ + Human-in-the-loop approval + + + The agent will pause at human-in-the-loop blocks and + wait for your review before continuing + +
+ +
- -
-
+ )} + {showSensitiveActionToggle && ( +
+
+
+ + Sensitive action approval + + + The agent will pause at sensitive action blocks and wait + for your review before continuing + +
+ +
+
+ )} + ) : (
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx index 53a7e8b860..0147c19a5c 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx @@ -1,8 +1,15 @@ "use client"; -import React, { useCallback, useEffect, useMemo, useState } from "react"; +import React, { + useCallback, + useContext, + useEffect, + useMemo, + useState, +} from "react"; import { CredentialsMetaInput, + CredentialsType, GraphExecutionID, GraphMeta, LibraryAgentPreset, @@ -29,7 +36,11 @@ import { } from "@/components/__legacy__/ui/icons"; import { Input } from "@/components/__legacy__/ui/input"; import { Button } from "@/components/atoms/Button/Button"; -import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput"; +import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView"; +import { + findSavedCredentialByProviderAndType, + findSavedUserCredentialByProviderAndType, +} from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers"; import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { useToast, @@ -37,6 +48,7 @@ import { } from "@/components/molecules/Toast/use-toast"; import { humanizeCronExpression } from "@/lib/cron-expression-utils"; import { cn, isEmpty } from "@/lib/utils"; +import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider"; import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react"; import { CalendarClockIcon, Trash2Icon } from "lucide-react"; @@ -90,6 +102,7 @@ export function AgentRunDraftView({ const api = useBackendAPI(); const { toast } = useToast(); const toastOnFail = useToastOnFail(); + const allProviders = useContext(CredentialsProvidersContext); const [inputValues, setInputValues] = useState>({}); const [inputCredentials, setInputCredentials] = useState< @@ -128,6 +141,77 @@ export function AgentRunDraftView({ () => graph.credentials_input_schema.properties, [graph], ); + const credentialFields = useMemo( + function getCredentialFields() { + return Object.entries(agentCredentialsInputFields); + }, + [agentCredentialsInputFields], + ); + const requiredCredentials = useMemo( + function getRequiredCredentials() { + return new Set( + (graph.credentials_input_schema?.required as string[]) || [], + ); + }, + [graph.credentials_input_schema?.required], + ); + + useEffect( + function initializeDefaultCredentials() { + if (!allProviders) return; + if (!graph.credentials_input_schema?.properties) return; + if (requiredCredentials.size === 0) return; + + setInputCredentials(function updateCredentials(currentCreds) { + const next = { ...currentCreds }; + let didAdd = false; + + for (const key of requiredCredentials) { + if (next[key]) continue; + const schema = graph.credentials_input_schema.properties[key]; + if (!schema) continue; + + const providerNames = schema.credentials_provider || []; + const credentialTypes = schema.credentials_types || []; + const requiredScopes = schema.credentials_scopes; + + const userCredential = findSavedUserCredentialByProviderAndType( + providerNames, + credentialTypes, + requiredScopes, + allProviders, + ); + + const savedCredential = + userCredential || + findSavedCredentialByProviderAndType( + providerNames, + credentialTypes, + requiredScopes, + allProviders, + ); + + if (!savedCredential) continue; + + next[key] = { + id: savedCredential.id, + provider: savedCredential.provider, + type: savedCredential.type as CredentialsType, + title: savedCredential.title, + }; + didAdd = true; + } + + if (!didAdd) return currentCreds; + return next; + }); + }, + [ + allProviders, + graph.credentials_input_schema?.properties, + requiredCredentials, + ], + ); const [allRequiredInputsAreSet, missingInputs] = useMemo(() => { const nonEmptyInputs = new Set( @@ -145,18 +229,35 @@ export function AgentRunDraftView({ ); return [isSuperset, difference]; }, [agentInputSchema.required, inputValues]); - const [allCredentialsAreSet, missingCredentials] = useMemo(() => { - const availableCredentials = new Set(Object.keys(inputCredentials)); - const allCredentials = new Set(Object.keys(agentCredentialsInputFields)); - // Backwards-compatible implementation of isSupersetOf and difference - const isSuperset = Array.from(allCredentials).every((item) => - availableCredentials.has(item), - ); - const difference = Array.from(allCredentials).filter( - (item) => !availableCredentials.has(item), - ); - return [isSuperset, difference]; - }, [agentCredentialsInputFields, inputCredentials]); + const [allCredentialsAreSet, missingCredentials] = useMemo( + function getCredentialStatus() { + const missing = Array.from(requiredCredentials).filter((key) => { + const cred = inputCredentials[key]; + return !cred || !cred.id; + }); + return [missing.length === 0, missing]; + }, + [requiredCredentials, inputCredentials], + ); + function addChangedCredentials(prev: Set) { + const next = new Set(prev); + next.add("credentials"); + return next; + } + + function handleCredentialChange(key: string, value?: CredentialsMetaInput) { + setInputCredentials(function updateInputCredentials(currentCreds) { + const next = { ...currentCreds }; + if (value === undefined) { + delete next[key]; + return next; + } + next[key] = value; + return next; + }); + setChangedPresetAttributes(addChangedCredentials); + } + const notifyMissingInputs = useCallback( (needPresetName: boolean = true) => { const allMissingFields = ( @@ -649,35 +750,6 @@ export function AgentRunDraftView({ )} - {/* Credentials inputs */} - {Object.entries(agentCredentialsInputFields).map( - ([key, inputSubSchema]) => ( - { - setInputCredentials((obj) => { - const newObj = { ...obj }; - if (value === undefined) { - delete newObj[key]; - return newObj; - } - return { - ...obj, - [key]: value, - }; - }); - setChangedPresetAttributes((prev) => - prev.add("credentials"), - ); - }} - /> - ), - )} - {/* Regular inputs */} {Object.entries(agentInputFields).map(([key, inputSubSchema]) => ( ))} + + {/* Credentials inputs */} + {credentialFields.length > 0 && ( + + )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts b/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts index 4232847226..87e9e9e9bc 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts @@ -8,6 +8,8 @@ import { useGetV2GetUserProfile } from "@/app/api/__generated__/endpoints/store/ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { okData } from "@/app/api/helpers"; import { useToast } from "@/components/molecules/Toast/use-toast"; +import { isLogoutInProgress } from "@/lib/autogpt-server-api/helpers"; +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { updateFavoriteInQueries } from "./helpers"; interface Props { @@ -23,10 +25,14 @@ export function useLibraryAgentCard({ agent }: Props) { const { toast } = useToast(); const queryClient = getQueryClient(); const { mutateAsync: updateLibraryAgent } = usePatchV2UpdateLibraryAgent(); + const { user, isLoggedIn } = useSupabase(); + const logoutInProgress = isLogoutInProgress(); const { data: profile } = useGetV2GetUserProfile({ query: { select: okData, + enabled: isLoggedIn && !!user && !logoutInProgress, + queryKey: ["/api/store/profile", user?.id], }, }); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryUploadAgentDialog/LibraryUploadAgentDialog.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryUploadAgentDialog/LibraryUploadAgentDialog.tsx index 1a6999721e..436be6f15a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryUploadAgentDialog/LibraryUploadAgentDialog.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryUploadAgentDialog/LibraryUploadAgentDialog.tsx @@ -2,6 +2,7 @@ import { Button } from "@/components/atoms/Button/Button"; import { FileInput } from "@/components/atoms/FileInput/FileInput"; import { Input } from "@/components/atoms/Input/Input"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { Form, @@ -120,7 +121,7 @@ export default function LibraryUploadAgentDialog() { > {isUploading ? (
-
+ Uploading...
) : ( diff --git a/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts b/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts index 656e1febc2..9bde570548 100644 --- a/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts @@ -1,6 +1,8 @@ import { useToast } from "@/components/molecules/Toast/use-toast"; +import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { environment } from "@/services/environment"; +import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { loginFormSchema, LoginProvider } from "@/types/auth"; import { zodResolver } from "@hookform/resolvers/zod"; import { useRouter, useSearchParams } from "next/navigation"; @@ -20,15 +22,17 @@ export function useLoginPage() { const [isGoogleLoading, setIsGoogleLoading] = useState(false); const [showNotAllowedModal, setShowNotAllowedModal] = useState(false); const isCloudEnv = environment.isCloud(); + const isChatEnabled = useGetFlag(Flag.CHAT); + const homepageRoute = getHomepageRoute(isChatEnabled); // Get redirect destination from 'next' query parameter const nextUrl = searchParams.get("next"); useEffect(() => { if (isLoggedIn && !isLoggingIn) { - router.push(nextUrl || "/marketplace"); + router.push(nextUrl || homepageRoute); } - }, [isLoggedIn, isLoggingIn, nextUrl, router]); + }, [homepageRoute, isLoggedIn, isLoggingIn, nextUrl, router]); const form = useForm>({ resolver: zodResolver(loginFormSchema), @@ -98,7 +102,7 @@ export function useLoginPage() { } else if (result.onboarding) { router.replace("/onboarding"); } else { - router.replace("/marketplace"); + router.replace(homepageRoute); } } catch (error) { toast({ diff --git a/autogpt_platform/frontend/src/app/(platform)/marketplace/components/MainMarketplacePage/__tests__/main.test.tsx b/autogpt_platform/frontend/src/app/(platform)/marketplace/components/MainMarketplacePage/__tests__/main.test.tsx new file mode 100644 index 0000000000..bee227a7af --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/marketplace/components/MainMarketplacePage/__tests__/main.test.tsx @@ -0,0 +1,15 @@ +import { expect, test } from "vitest"; +import { render, screen } from "@/tests/integrations/test-utils"; +import { MainMarkeplacePage } from "../MainMarketplacePage"; +import { server } from "@/mocks/mock-server"; +import { getDeleteV2DeleteStoreSubmissionMockHandler422 } from "@/app/api/__generated__/endpoints/store/store.msw"; + +// Only for CI testing purpose, will remove it in future PR +test("MainMarketplacePage", async () => { + server.use(getDeleteV2DeleteStoreSubmissionMockHandler422()); + + render(); + expect( + await screen.findByText("Featured agents", { exact: false }), + ).toBeDefined(); +}); diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx index 260fbc0b52..979b113f55 100644 --- a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx @@ -3,12 +3,14 @@ import { useGetV2GetUserProfile } from "@/app/api/__generated__/endpoints/store/store"; import { ProfileInfoForm } from "@/components/__legacy__/ProfileInfoForm"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { isLogoutInProgress } from "@/lib/autogpt-server-api/helpers"; import { ProfileDetails } from "@/lib/autogpt-server-api/types"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { ProfileLoading } from "./ProfileLoading"; export default function UserProfilePage() { const { user } = useSupabase(); + const logoutInProgress = isLogoutInProgress(); const { data: profile, @@ -18,7 +20,7 @@ export default function UserProfilePage() { refetch, } = useGetV2GetUserProfile({ query: { - enabled: !!user, + enabled: !!user && !logoutInProgress, select: (res) => { if (res.status === 200) { return { diff --git a/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts b/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts index 68f7ae10ec..6d68782e7a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts @@ -1,5 +1,6 @@ "use server"; +import { getHomepageRoute } from "@/lib/constants"; import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase"; import { signupFormSchema } from "@/types/auth"; import * as Sentry from "@sentry/nextjs"; @@ -11,6 +12,7 @@ export async function signup( password: string, confirmPassword: string, agreeToTerms: boolean, + isChatEnabled: boolean, ) { try { const parsed = signupFormSchema.safeParse({ @@ -58,7 +60,9 @@ export async function signup( } const isOnboardingEnabled = await shouldShowOnboarding(); - const next = isOnboardingEnabled ? "/onboarding" : "/"; + const next = isOnboardingEnabled + ? "/onboarding" + : getHomepageRoute(isChatEnabled); return { success: true, next }; } catch (err) { diff --git a/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts b/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts index e6d7c68aef..5bd53ca846 100644 --- a/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts @@ -1,6 +1,8 @@ import { useToast } from "@/components/molecules/Toast/use-toast"; +import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { environment } from "@/services/environment"; +import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { LoginProvider, signupFormSchema } from "@/types/auth"; import { zodResolver } from "@hookform/resolvers/zod"; import { useRouter, useSearchParams } from "next/navigation"; @@ -20,15 +22,17 @@ export function useSignupPage() { const [isGoogleLoading, setIsGoogleLoading] = useState(false); const [showNotAllowedModal, setShowNotAllowedModal] = useState(false); const isCloudEnv = environment.isCloud(); + const isChatEnabled = useGetFlag(Flag.CHAT); + const homepageRoute = getHomepageRoute(isChatEnabled); // Get redirect destination from 'next' query parameter const nextUrl = searchParams.get("next"); useEffect(() => { if (isLoggedIn && !isSigningUp) { - router.push(nextUrl || "/marketplace"); + router.push(nextUrl || homepageRoute); } - }, [isLoggedIn, isSigningUp, nextUrl, router]); + }, [homepageRoute, isLoggedIn, isSigningUp, nextUrl, router]); const form = useForm>({ resolver: zodResolver(signupFormSchema), @@ -104,6 +108,7 @@ export function useSignupPage() { data.password, data.confirmPassword, data.agreeToTerms, + isChatEnabled === true, ); setIsLoading(false); @@ -129,7 +134,7 @@ export function useSignupPage() { } // Prefer the URL's next parameter, then result.next (for onboarding), then default - const redirectTo = nextUrl || result.next || "/"; + const redirectTo = nextUrl || result.next || homepageRoute; router.replace(redirectTo); } catch (error) { setIsLoading(false); diff --git a/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts b/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts index 4578ac03fe..3c9eda7785 100644 --- a/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts +++ b/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts @@ -4,12 +4,12 @@ import { getServerAuthToken, } from "@/lib/autogpt-server-api/helpers"; -import { transformDates } from "./date-transformer"; -import { environment } from "@/services/environment"; import { IMPERSONATION_HEADER_NAME, IMPERSONATION_STORAGE_KEY, } from "@/lib/constants"; +import { environment } from "@/services/environment"; +import { transformDates } from "./date-transformer"; const FRONTEND_BASE_URL = process.env.NEXT_PUBLIC_FRONTEND_BASE_URL || "http://localhost:3000"; diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 3c7c488588..2d4239a8f2 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -1008,7 +1008,7 @@ "get": { "tags": ["v2", "chat", "chat"], "summary": "Get Session", - "description": "Retrieve the details of a specific chat session.\n\nLooks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.\n\nArgs:\n session_id: The unique identifier for the desired chat session.\n user_id: The optional authenticated user ID, or None for anonymous access.\n\nReturns:\n SessionDetailResponse: Details for the requested session; raises NotFoundError if not found.", + "description": "Retrieve the details of a specific chat session.\n\nLooks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.\n\nArgs:\n session_id: The unique identifier for the desired chat session.\n user_id: The optional authenticated user ID, or None for anonymous access.\n\nReturns:\n SessionDetailResponse: Details for the requested session, or None if not found.", "operationId": "getV2GetSession", "security": [{ "HTTPBearerJWT": [] }], "parameters": [ @@ -6369,6 +6369,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -6385,6 +6390,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info" ], "title": "BaseGraph" @@ -7615,6 +7621,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7638,6 +7649,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info", "credentials_input_schema" ], @@ -7716,6 +7728,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7740,6 +7757,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info", "credentials_input_schema" ], @@ -7748,8 +7766,14 @@ "GraphSettings": { "properties": { "human_in_the_loop_safe_mode": { - "anyOf": [{ "type": "boolean" }, { "type": "null" }], - "title": "Human In The Loop Safe Mode" + "type": "boolean", + "title": "Human In The Loop Safe Mode", + "default": true + }, + "sensitive_action_safe_mode": { + "type": "boolean", + "title": "Sensitive Action Safe Mode", + "default": false } }, "type": "object", @@ -7907,6 +7931,16 @@ "title": "Has External Trigger", "description": "Whether the agent has an external trigger (e.g. webhook) node" }, + "has_human_in_the_loop": { + "type": "boolean", + "title": "Has Human In The Loop", + "description": "Whether the agent has human-in-the-loop blocks" + }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "description": "Whether the agent has sensitive action blocks" + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7953,6 +7987,8 @@ "output_schema", "credentials_input_schema", "has_external_trigger", + "has_human_in_the_loop", + "has_sensitive_action", "new_output", "can_access_graph", "is_latest_version", @@ -8759,6 +8795,12 @@ "title": "Node Exec Id", "description": "Node execution ID (primary key)" }, + "node_id": { + "type": "string", + "title": "Node Id", + "description": "Node definition ID (for grouping)", + "default": "" + }, "user_id": { "type": "string", "title": "User Id", @@ -8858,7 +8900,7 @@ "created_at" ], "title": "PendingHumanReviewModel", - "description": "Response model for pending human review data.\n\nRepresents a human review request that is awaiting user action.\nContains all necessary information for a user to review and approve\nor reject data from a Human-in-the-Loop block execution.\n\nAttributes:\n id: Unique identifier for the review record\n user_id: ID of the user who must perform the review\n node_exec_id: ID of the node execution that created this review\n graph_exec_id: ID of the graph execution containing the node\n graph_id: ID of the graph template being executed\n graph_version: Version number of the graph template\n payload: The actual data payload awaiting review\n instructions: Instructions or message for the reviewer\n editable: Whether the reviewer can edit the data\n status: Current review status (WAITING, APPROVED, or REJECTED)\n review_message: Optional message from the reviewer\n created_at: Timestamp when review was created\n updated_at: Timestamp when review was last modified\n reviewed_at: Timestamp when review was completed (if applicable)" + "description": "Response model for pending human review data.\n\nRepresents a human review request that is awaiting user action.\nContains all necessary information for a user to review and approve\nor reject data from a Human-in-the-Loop block execution.\n\nAttributes:\n id: Unique identifier for the review record\n user_id: ID of the user who must perform the review\n node_exec_id: ID of the node execution that created this review\n node_id: ID of the node definition (for grouping reviews from same node)\n graph_exec_id: ID of the graph execution containing the node\n graph_id: ID of the graph template being executed\n graph_version: Version number of the graph template\n payload: The actual data payload awaiting review\n instructions: Instructions or message for the reviewer\n editable: Whether the reviewer can edit the data\n status: Current review status (WAITING, APPROVED, or REJECTED)\n review_message: Optional message from the reviewer\n created_at: Timestamp when review was created\n updated_at: Timestamp when review was last modified\n reviewed_at: Timestamp when review was completed (if applicable)" }, "PostmarkBounceEnum": { "type": "integer", @@ -9361,6 +9403,12 @@ ], "title": "Reviewed Data", "description": "Optional edited data (ignored if approved=False)" + }, + "auto_approve_future": { + "type": "boolean", + "title": "Auto Approve Future", + "description": "If true and this review is approved, future executions of this same block (node) will be automatically approved. This only affects approved reviews.", + "default": false } }, "type": "object", @@ -9380,7 +9428,7 @@ "type": "object", "required": ["reviews"], "title": "ReviewRequest", - "description": "Request model for processing ALL pending reviews for an execution.\n\nThis request must include ALL pending reviews for a graph execution.\nEach review will be either approved (with optional data modifications)\nor rejected (data ignored). The execution will resume only after ALL reviews are processed." + "description": "Request model for processing ALL pending reviews for an execution.\n\nThis request must include ALL pending reviews for a graph execution.\nEach review will be either approved (with optional data modifications)\nor rejected (data ignored). The execution will resume only after ALL reviews are processed.\n\nEach review item can individually specify whether to auto-approve future executions\nof the same block via the `auto_approve_future` field on ReviewItem." }, "ReviewResponse": { "properties": { diff --git a/autogpt_platform/frontend/src/app/globals.css b/autogpt_platform/frontend/src/app/globals.css index 0625c26082..1f782f753b 100644 --- a/autogpt_platform/frontend/src/app/globals.css +++ b/autogpt_platform/frontend/src/app/globals.css @@ -141,52 +141,6 @@ } } -@keyframes shimmer { - 0% { - background-position: -200% 0; - } - 100% { - background-position: 200% 0; - } -} - -@keyframes l3 { - 25% { - background-position: - 0 0, - 100% 100%, - 100% calc(100% - 5px); - } - 50% { - background-position: - 0 100%, - 100% 100%, - 0 calc(100% - 5px); - } - 75% { - background-position: - 0 100%, - 100% 0, - 100% 5px; - } -} - -.loader { - width: 80px; - height: 70px; - border: 5px solid rgb(241 245 249); - padding: 0 8px; - box-sizing: border-box; - background: - linear-gradient(rgb(15 23 42) 0 0) 0 0/8px 20px, - linear-gradient(rgb(15 23 42) 0 0) 100% 0/8px 20px, - radial-gradient(farthest-side, rgb(15 23 42) 90%, #0000) 0 5px/8px 8px - content-box, - transparent; - background-repeat: no-repeat; - animation: l3 2s infinite linear; -} - input[type="number"]::-webkit-outer-spin-button, input[type="number"]::-webkit-inner-spin-button { -webkit-appearance: none; diff --git a/autogpt_platform/frontend/src/app/page.tsx b/autogpt_platform/frontend/src/app/page.tsx index b499a40d71..dbfab49469 100644 --- a/autogpt_platform/frontend/src/app/page.tsx +++ b/autogpt_platform/frontend/src/app/page.tsx @@ -1,5 +1,27 @@ -import { redirect } from "next/navigation"; +"use client"; + +import { getHomepageRoute } from "@/lib/constants"; +import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; +import { useRouter } from "next/navigation"; +import { useEffect } from "react"; export default function Page() { - redirect("/marketplace"); + const isChatEnabled = useGetFlag(Flag.CHAT); + const router = useRouter(); + const homepageRoute = getHomepageRoute(isChatEnabled); + const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true"; + const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID; + const isLaunchDarklyConfigured = envEnabled && Boolean(clientId); + const isFlagReady = + !isLaunchDarklyConfigured || typeof isChatEnabled === "boolean"; + + useEffect( + function redirectToHomepage() { + if (!isFlagReady) return; + router.replace(homepageRoute); + }, + [homepageRoute, isFlagReady, router], + ); + + return null; } diff --git a/autogpt_platform/frontend/src/app/providers.tsx b/autogpt_platform/frontend/src/app/providers.tsx index 8ea199abc8..267814e7c2 100644 --- a/autogpt_platform/frontend/src/app/providers.tsx +++ b/autogpt_platform/frontend/src/app/providers.tsx @@ -6,28 +6,40 @@ import { BackendAPIProvider } from "@/lib/autogpt-server-api/context"; import { getQueryClient } from "@/lib/react-query/queryClient"; import CredentialsProvider from "@/providers/agent-credentials/credentials-provider"; import OnboardingProvider from "@/providers/onboarding/onboarding-provider"; +import { + PostHogPageViewTracker, + PostHogProvider, + PostHogUserTracker, +} from "@/providers/posthog/posthog-provider"; import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider"; import { QueryClientProvider } from "@tanstack/react-query"; import { ThemeProvider, ThemeProviderProps } from "next-themes"; import { NuqsAdapter } from "nuqs/adapters/next/app"; +import { Suspense } from "react"; export function Providers({ children, ...props }: ThemeProviderProps) { const queryClient = getQueryClient(); return ( - - - - - - - {children} - - - - - + + + + + + + + + + + + {children} + + + + + + ); diff --git a/autogpt_platform/frontend/src/components/atoms/Badge/Badge.test.tsx b/autogpt_platform/frontend/src/components/atoms/Badge/Badge.test.tsx deleted file mode 100644 index cd8531375b..0000000000 --- a/autogpt_platform/frontend/src/components/atoms/Badge/Badge.test.tsx +++ /dev/null @@ -1,81 +0,0 @@ -// import { render, screen } from "@testing-library/react"; -// import { describe, expect, it } from "vitest"; -// import { Badge } from "./Badge"; - -// describe("Badge Component", () => { -// it("renders badge with content", () => { -// render(Success); - -// expect(screen.getByText("Success")).toBeInTheDocument(); -// }); - -// it("applies correct variant styles", () => { -// const { rerender } = render(Success); -// let badge = screen.getByText("Success"); -// expect(badge).toHaveClass("bg-green-100", "text-green-800"); - -// rerender(Error); -// badge = screen.getByText("Error"); -// expect(badge).toHaveClass("bg-red-100", "text-red-800"); - -// rerender(Info); -// badge = screen.getByText("Info"); -// expect(badge).toHaveClass("bg-slate-100", "text-slate-800"); -// }); - -// it("applies custom className", () => { -// render( -// -// Success -// , -// ); - -// const badge = screen.getByText("Success"); -// expect(badge).toHaveClass("custom-class"); -// }); - -// it("renders as span element", () => { -// render(Success); - -// const badge = screen.getByText("Success"); -// expect(badge.tagName).toBe("SPAN"); -// }); - -// it("renders children correctly", () => { -// render( -// -// Custom Content -// , -// ); - -// expect(screen.getByText("Custom")).toBeInTheDocument(); -// expect(screen.getByText("Content")).toBeInTheDocument(); -// }); - -// it("supports all badge variants", () => { -// const variants = ["success", "error", "info"] as const; - -// variants.forEach((variant) => { -// const { unmount } = render( -// -// {variant} -// , -// ); - -// expect(screen.getByTestId(`badge-${variant}`)).toBeInTheDocument(); -// unmount(); -// }); -// }); - -// it("handles long text content", () => { -// render( -// -// Very long text that should be handled properly by the component -// , -// ); - -// const badge = screen.getByText(/Very long text/); -// expect(badge).toBeInTheDocument(); -// expect(badge).toHaveClass("overflow-hidden", "text-ellipsis"); -// }); -// }); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx new file mode 100644 index 0000000000..ba7584765d --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx @@ -0,0 +1,84 @@ +"use client"; + +import { Text } from "@/components/atoms/Text/Text"; +import { cn } from "@/lib/utils"; +import { useEffect, useRef } from "react"; +import { ChatContainer } from "./components/ChatContainer/ChatContainer"; +import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState"; +import { ChatLoader } from "./components/ChatLoader/ChatLoader"; +import { useChat } from "./useChat"; + +export interface ChatProps { + className?: string; + urlSessionId?: string | null; + initialPrompt?: string; + onSessionNotFound?: () => void; + onStreamingChange?: (isStreaming: boolean) => void; +} + +export function Chat({ + className, + urlSessionId, + initialPrompt, + onSessionNotFound, + onStreamingChange, +}: ChatProps) { + const hasHandledNotFoundRef = useRef(false); + const { + messages, + isLoading, + isCreating, + error, + isSessionNotFound, + sessionId, + createSession, + showLoader, + } = useChat({ urlSessionId }); + + useEffect( + function handleMissingSession() { + if (!onSessionNotFound) return; + if (!urlSessionId) return; + if (!isSessionNotFound || isLoading || isCreating) return; + if (hasHandledNotFoundRef.current) return; + hasHandledNotFoundRef.current = true; + onSessionNotFound(); + }, + [onSessionNotFound, urlSessionId, isSessionNotFound, isLoading, isCreating], + ); + + return ( +
+ {/* Main Content */} +
+ {/* Loading State */} + {showLoader && (isLoading || isCreating) && ( +
+
+ + + Loading your chats... + +
+
+ )} + + {/* Error State */} + {error && !isLoading && ( + + )} + + {/* Session Content */} + {sessionId && !isLoading && !error && ( + + )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx new file mode 100644 index 0000000000..f5d56fcb15 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx @@ -0,0 +1,15 @@ +import { cn } from "@/lib/utils"; +import { ReactNode } from "react"; + +export interface AIChatBubbleProps { + children: ReactNode; + className?: string; +} + +export function AIChatBubble({ children, className }: AIChatBubbleProps) { + return ( +
+ {children} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx similarity index 99% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx index 33f02e660f..b2cf92ec56 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx @@ -21,7 +21,7 @@ export function AuthPromptWidget({ message, sessionId, agentInfo, - returnUrl = "/chat", + returnUrl = "/copilot/chat", className, }: AuthPromptWidgetProps) { const router = useRouter(); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx new file mode 100644 index 0000000000..17748f8dbc --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx @@ -0,0 +1,113 @@ +import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; +import { cn } from "@/lib/utils"; +import { useEffect } from "react"; +import { ChatInput } from "../ChatInput/ChatInput"; +import { MessageList } from "../MessageList/MessageList"; +import { useChatContainer } from "./useChatContainer"; + +export interface ChatContainerProps { + sessionId: string | null; + initialMessages: SessionDetailResponse["messages"]; + initialPrompt?: string; + className?: string; + onStreamingChange?: (isStreaming: boolean) => void; +} + +export function ChatContainer({ + sessionId, + initialMessages, + initialPrompt, + className, + onStreamingChange, +}: ChatContainerProps) { + const { + messages, + streamingChunks, + isStreaming, + stopStreaming, + isRegionBlockedModalOpen, + sendMessageWithContext, + handleRegionModalOpenChange, + handleRegionModalClose, + } = useChatContainer({ + sessionId, + initialMessages, + initialPrompt, + }); + + useEffect(() => { + onStreamingChange?.(isStreaming); + }, [isStreaming, onStreamingChange]); + + const breakpoint = useBreakpoint(); + const isMobile = + breakpoint === "base" || breakpoint === "sm" || breakpoint === "md"; + + return ( +
+ + +
+ + This model is not available in your region. Please connect via VPN + and try again. + +
+ +
+
+
+
+ {/* Messages - Scrollable */} +
+
+ +
+
+ + {/* Input - Fixed at bottom */} +
+
+ +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/createStreamEventDispatcher.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts similarity index 55% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/createStreamEventDispatcher.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts index 844f126d49..791cf046d5 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/createStreamEventDispatcher.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts @@ -1,6 +1,6 @@ import { toast } from "sonner"; import { StreamChunk } from "../../useChatStream"; -import type { HandlerDependencies } from "./useChatContainer.handlers"; +import type { HandlerDependencies } from "./handlers"; import { handleError, handleLoginNeeded, @@ -9,12 +9,30 @@ import { handleTextEnded, handleToolCallStart, handleToolResponse, -} from "./useChatContainer.handlers"; + isRegionBlockedError, +} from "./handlers"; export function createStreamEventDispatcher( deps: HandlerDependencies, ): (chunk: StreamChunk) => void { return function dispatchStreamEvent(chunk: StreamChunk): void { + if ( + chunk.type === "text_chunk" || + chunk.type === "tool_call_start" || + chunk.type === "tool_response" || + chunk.type === "login_needed" || + chunk.type === "need_login" || + chunk.type === "error" + ) { + if (!deps.hasResponseRef.current) { + console.info("[ChatStream] First response chunk:", { + type: chunk.type, + sessionId: deps.sessionId, + }); + } + deps.hasResponseRef.current = true; + } + switch (chunk.type) { case "text_chunk": handleTextChunk(chunk, deps); @@ -38,15 +56,23 @@ export function createStreamEventDispatcher( break; case "stream_end": + console.info("[ChatStream] Stream ended:", { + sessionId: deps.sessionId, + hasResponse: deps.hasResponseRef.current, + chunkCount: deps.streamingChunksRef.current.length, + }); handleStreamEnd(chunk, deps); break; case "error": + const isRegionBlocked = isRegionBlockedError(chunk); handleError(chunk, deps); // Show toast at dispatcher level to avoid circular dependencies - toast.error("Chat Error", { - description: chunk.message || chunk.content || "An error occurred", - }); + if (!isRegionBlocked) { + toast.error("Chat Error", { + description: chunk.message || chunk.content || "An error occurred", + }); + } break; case "usage": diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.handlers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts similarity index 66% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.handlers.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts index 064b847064..96198a0386 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.handlers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts @@ -7,15 +7,30 @@ import { parseToolResponse, } from "./helpers"; +function isToolCallMessage( + message: ChatMessageData, +): message is Extract { + return message.type === "tool_call"; +} + export interface HandlerDependencies { setHasTextChunks: Dispatch>; setStreamingChunks: Dispatch>; streamingChunksRef: MutableRefObject; + hasResponseRef: MutableRefObject; setMessages: Dispatch>; setIsStreamingInitiated: Dispatch>; + setIsRegionBlockedModalOpen: Dispatch>; sessionId: string; } +export function isRegionBlockedError(chunk: StreamChunk): boolean { + if (chunk.code === "MODEL_NOT_AVAILABLE_REGION") return true; + const message = chunk.message || chunk.content; + if (typeof message !== "string") return false; + return message.toLowerCase().includes("not available in your region"); +} + export function handleTextChunk(chunk: StreamChunk, deps: HandlerDependencies) { if (!chunk.content) return; deps.setHasTextChunks(true); @@ -30,16 +45,17 @@ export function handleTextEnded( _chunk: StreamChunk, deps: HandlerDependencies, ) { - console.log("[Text Ended] Saving streamed text as assistant message"); const completedText = deps.streamingChunksRef.current.join(""); if (completedText.trim()) { - const assistantMessage: ChatMessageData = { - type: "message", - role: "assistant", - content: completedText, - timestamp: new Date(), - }; - deps.setMessages((prev) => [...prev, assistantMessage]); + deps.setMessages((prev) => { + const assistantMessage: ChatMessageData = { + type: "message", + role: "assistant", + content: completedText, + timestamp: new Date(), + }; + return [...prev, assistantMessage]; + }); } deps.setStreamingChunks([]); deps.streamingChunksRef.current = []; @@ -50,30 +66,45 @@ export function handleToolCallStart( chunk: StreamChunk, deps: HandlerDependencies, ) { - const toolCallMessage: ChatMessageData = { + const toolCallMessage: Extract = { type: "tool_call", toolId: chunk.tool_id || `tool-${Date.now()}-${chunk.idx || 0}`, - toolName: chunk.tool_name || "Executing...", + toolName: chunk.tool_name || "Executing", arguments: chunk.arguments || {}, timestamp: new Date(), }; - deps.setMessages((prev) => [...prev, toolCallMessage]); - console.log("[Tool Call Start]", { - toolId: toolCallMessage.toolId, - toolName: toolCallMessage.toolName, - timestamp: new Date().toISOString(), - }); + + function updateToolCallMessages(prev: ChatMessageData[]) { + const existingIndex = prev.findIndex(function findToolCallIndex(msg) { + return isToolCallMessage(msg) && msg.toolId === toolCallMessage.toolId; + }); + if (existingIndex === -1) { + return [...prev, toolCallMessage]; + } + const nextMessages = [...prev]; + const existing = nextMessages[existingIndex]; + if (!isToolCallMessage(existing)) return prev; + const nextArguments = + toolCallMessage.arguments && + Object.keys(toolCallMessage.arguments).length > 0 + ? toolCallMessage.arguments + : existing.arguments; + nextMessages[existingIndex] = { + ...existing, + toolName: toolCallMessage.toolName || existing.toolName, + arguments: nextArguments, + timestamp: toolCallMessage.timestamp, + }; + return nextMessages; + } + + deps.setMessages(updateToolCallMessages); } export function handleToolResponse( chunk: StreamChunk, deps: HandlerDependencies, ) { - console.log("[Tool Response] Received:", { - toolId: chunk.tool_id, - toolName: chunk.tool_name, - timestamp: new Date().toISOString(), - }); let toolName = chunk.tool_name || "unknown"; if (!chunk.tool_name || chunk.tool_name === "unknown") { deps.setMessages((prev) => { @@ -127,22 +158,15 @@ export function handleToolResponse( const toolCallIndex = prev.findIndex( (msg) => msg.type === "tool_call" && msg.toolId === chunk.tool_id, ); + const hasResponse = prev.some( + (msg) => msg.type === "tool_response" && msg.toolId === chunk.tool_id, + ); + if (hasResponse) return prev; if (toolCallIndex !== -1) { const newMessages = [...prev]; - newMessages[toolCallIndex] = responseMessage; - console.log( - "[Tool Response] Replaced tool_call with matching tool_id:", - chunk.tool_id, - "at index:", - toolCallIndex, - ); + newMessages.splice(toolCallIndex + 1, 0, responseMessage); return newMessages; } - console.warn( - "[Tool Response] No tool_call found with tool_id:", - chunk.tool_id, - "appending instead", - ); return [...prev, responseMessage]; }); } @@ -167,55 +191,38 @@ export function handleStreamEnd( deps: HandlerDependencies, ) { const completedContent = deps.streamingChunksRef.current.join(""); - // Only save message if there are uncommitted chunks - // (text_ended already saved if there were tool calls) + if (!completedContent.trim() && !deps.hasResponseRef.current) { + deps.setMessages((prev) => [ + ...prev, + { + type: "message", + role: "assistant", + content: "No response received. Please try again.", + timestamp: new Date(), + }, + ]); + } if (completedContent.trim()) { - console.log( - "[Stream End] Saving remaining streamed text as assistant message", - ); const assistantMessage: ChatMessageData = { type: "message", role: "assistant", content: completedContent, timestamp: new Date(), }; - deps.setMessages((prev) => { - const updated = [...prev, assistantMessage]; - console.log("[Stream End] Final state:", { - localMessages: updated.map((m) => ({ - type: m.type, - ...(m.type === "message" && { - role: m.role, - contentLength: m.content.length, - }), - ...(m.type === "tool_call" && { - toolId: m.toolId, - toolName: m.toolName, - }), - ...(m.type === "tool_response" && { - toolId: m.toolId, - toolName: m.toolName, - success: m.success, - }), - })), - streamingChunks: deps.streamingChunksRef.current, - timestamp: new Date().toISOString(), - }); - return updated; - }); - } else { - console.log("[Stream End] No uncommitted chunks, message already saved"); + deps.setMessages((prev) => [...prev, assistantMessage]); } deps.setStreamingChunks([]); deps.streamingChunksRef.current = []; deps.setHasTextChunks(false); deps.setIsStreamingInitiated(false); - console.log("[Stream End] Stream complete, messages in local state"); } export function handleError(chunk: StreamChunk, deps: HandlerDependencies) { const errorMessage = chunk.message || chunk.content || "An error occurred"; console.error("Stream error:", errorMessage); + if (isRegionBlockedError(chunk)) { + deps.setIsRegionBlockedModalOpen(true); + } deps.setIsStreamingInitiated(false); deps.setHasTextChunks(false); deps.setStreamingChunks([]); diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts similarity index 83% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/helpers.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts index cd05563369..9d51003a93 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts @@ -1,6 +1,33 @@ +import { SessionKey, sessionStorage } from "@/services/storage/session-storage"; import type { ToolResult } from "@/types/chat"; import type { ChatMessageData } from "../ChatMessage/useChatMessage"; +export function hasSentInitialPrompt(sessionId: string): boolean { + try { + const sent = JSON.parse( + sessionStorage.get(SessionKey.CHAT_SENT_INITIAL_PROMPTS) || "{}", + ); + return sent[sessionId] === true; + } catch { + return false; + } +} + +export function markInitialPromptSent(sessionId: string): void { + try { + const sent = JSON.parse( + sessionStorage.get(SessionKey.CHAT_SENT_INITIAL_PROMPTS) || "{}", + ); + sent[sessionId] = true; + sessionStorage.set( + SessionKey.CHAT_SENT_INITIAL_PROMPTS, + JSON.stringify(sent), + ); + } catch { + // Ignore storage errors + } +} + export function removePageContext(content: string): string { // Remove "Page URL: ..." pattern at start of line (case insensitive, handles various formats) let cleaned = content.replace(/^\s*Page URL:\s*[^\n\r]*/gim, ""); @@ -207,12 +234,22 @@ export function parseToolResponse( if (responseType === "setup_requirements") { return null; } + if (responseType === "understanding_updated") { + return { + type: "tool_response", + toolId, + toolName, + result: (parsedResult || result) as ToolResult, + success: true, + timestamp: timestamp || new Date(), + }; + } } return { type: "tool_response", toolId, toolName, - result, + result: parsedResult ? (parsedResult as ToolResult) : result, success: true, timestamp: timestamp || new Date(), }; @@ -267,23 +304,34 @@ export function extractCredentialsNeeded( | undefined; if (missingCreds && Object.keys(missingCreds).length > 0) { const agentName = (setupInfo?.agent_name as string) || "this block"; - const credentials = Object.values(missingCreds).map((credInfo) => ({ - provider: (credInfo.provider as string) || "unknown", - providerName: - (credInfo.provider_name as string) || - (credInfo.provider as string) || - "Unknown Provider", - credentialType: + const credentials = Object.values(missingCreds).map((credInfo) => { + // Normalize to array at boundary - prefer 'types' array, fall back to single 'type' + const typesArray = credInfo.types as + | Array<"api_key" | "oauth2" | "user_password" | "host_scoped"> + | undefined; + const singleType = (credInfo.type as | "api_key" | "oauth2" | "user_password" - | "host_scoped") || "api_key", - title: - (credInfo.title as string) || - `${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`, - scopes: credInfo.scopes as string[] | undefined, - })); + | "host_scoped" + | undefined) || "api_key"; + const credentialTypes = + typesArray && typesArray.length > 0 ? typesArray : [singleType]; + + return { + provider: (credInfo.provider as string) || "unknown", + providerName: + (credInfo.provider_name as string) || + (credInfo.provider as string) || + "Unknown Provider", + credentialTypes, + title: + (credInfo.title as string) || + `${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`, + scopes: credInfo.scopes as string[] | undefined, + }; + }); return { type: "credentials_needed", toolName, @@ -358,11 +406,14 @@ export function extractInputsNeeded( credentials.forEach((cred) => { const id = cred.id as string; if (id) { + const credentialTypes = Array.isArray(cred.types) + ? cred.types + : [(cred.type as string) || "api_key"]; credentialsSchema[id] = { type: "object", properties: {}, credentials_provider: [cred.provider as string], - credentials_types: [(cred.type as string) || "api_key"], + credentials_types: credentialTypes, credentials_scopes: cred.scopes as string[] | undefined, }; } diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts similarity index 65% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts index 8e7dee7718..42dd04670d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts @@ -1,14 +1,17 @@ import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { useCallback, useMemo, useRef, useState } from "react"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { toast } from "sonner"; import { useChatStream } from "../../useChatStream"; +import { usePageContext } from "../../usePageContext"; import type { ChatMessageData } from "../ChatMessage/useChatMessage"; import { createStreamEventDispatcher } from "./createStreamEventDispatcher"; import { createUserMessage, filterAuthMessages, + hasSentInitialPrompt, isToolCallArray, isValidMessage, + markInitialPromptSent, parseToolResponse, removePageContext, } from "./helpers"; @@ -16,20 +19,45 @@ import { interface Args { sessionId: string | null; initialMessages: SessionDetailResponse["messages"]; + initialPrompt?: string; } -export function useChatContainer({ sessionId, initialMessages }: Args) { +export function useChatContainer({ + sessionId, + initialMessages, + initialPrompt, +}: Args) { const [messages, setMessages] = useState([]); const [streamingChunks, setStreamingChunks] = useState([]); const [hasTextChunks, setHasTextChunks] = useState(false); const [isStreamingInitiated, setIsStreamingInitiated] = useState(false); + const [isRegionBlockedModalOpen, setIsRegionBlockedModalOpen] = + useState(false); + const hasResponseRef = useRef(false); const streamingChunksRef = useRef([]); - const { error, sendMessage: sendStreamMessage } = useChatStream(); + const previousSessionIdRef = useRef(null); + const { + error, + sendMessage: sendStreamMessage, + stopStreaming, + } = useChatStream(); const isStreaming = isStreamingInitiated || hasTextChunks; + useEffect(() => { + if (sessionId !== previousSessionIdRef.current) { + stopStreaming(previousSessionIdRef.current ?? undefined, true); + previousSessionIdRef.current = sessionId; + setMessages([]); + setStreamingChunks([]); + streamingChunksRef.current = []; + setHasTextChunks(false); + setIsStreamingInitiated(false); + hasResponseRef.current = false; + } + }, [sessionId, stopStreaming]); + const allMessages = useMemo(() => { const processedInitialMessages: ChatMessageData[] = []; - // Map to track tool calls by their ID so we can look up tool names for tool responses const toolCallMap = new Map(); for (const msg of initialMessages) { @@ -45,13 +73,9 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { ? new Date(msg.timestamp as string) : undefined; - // Remove page context from user messages when loading existing sessions if (role === "user") { content = removePageContext(content); - // Skip user messages that become empty after removing page context - if (!content.trim()) { - continue; - } + if (!content.trim()) continue; processedInitialMessages.push({ type: "message", role: "user", @@ -61,19 +85,15 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { continue; } - // Handle assistant messages first (before tool messages) to build tool call map if (role === "assistant") { - // Strip tags from content content = content .replace(/[\s\S]*?<\/thinking>/gi, "") .trim(); - // If assistant has tool calls, create tool_call messages for each if (toolCalls && isToolCallArray(toolCalls) && toolCalls.length > 0) { for (const toolCall of toolCalls) { const toolName = toolCall.function.name; const toolId = toolCall.id; - // Store tool name for later lookup toolCallMap.set(toolId, toolName); try { @@ -96,7 +116,6 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { }); } } - // Only add assistant message if there's content after stripping thinking tags if (content.trim()) { processedInitialMessages.push({ type: "message", @@ -106,7 +125,6 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { }); } } else if (content.trim()) { - // Assistant message without tool calls, but with content processedInitialMessages.push({ type: "message", role: "assistant", @@ -117,7 +135,6 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { continue; } - // Handle tool messages - look up tool name from tool call map if (role === "tool") { const toolCallId = (msg.tool_call_id as string) || ""; const toolName = toolCallMap.get(toolCallId) || "unknown"; @@ -133,7 +150,6 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { continue; } - // Handle other message types (system, etc.) if (content.trim()) { processedInitialMessages.push({ type: "message", @@ -154,9 +170,10 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { context?: { url: string; content: string }, ) { if (!sessionId) { - console.error("Cannot send message: no session ID"); + console.error("[useChatContainer] Cannot send message: no session ID"); return; } + setIsRegionBlockedModalOpen(false); if (isUserMessage) { const userMessage = createUserMessage(content); setMessages((prev) => [...filterAuthMessages(prev), userMessage]); @@ -167,14 +184,19 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { streamingChunksRef.current = []; setHasTextChunks(false); setIsStreamingInitiated(true); + hasResponseRef.current = false; + const dispatcher = createStreamEventDispatcher({ setHasTextChunks, setStreamingChunks, streamingChunksRef, + hasResponseRef, setMessages, + setIsRegionBlockedModalOpen, sessionId, setIsStreamingInitiated, }); + try { await sendStreamMessage( sessionId, @@ -184,8 +206,12 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { context, ); } catch (err) { - console.error("Failed to send message:", err); + console.error("[useChatContainer] Failed to send message:", err); setIsStreamingInitiated(false); + + // Don't show error toast for AbortError (expected during cleanup) + if (err instanceof Error && err.name === "AbortError") return; + const errorMessage = err instanceof Error ? err.message : "Failed to send message"; toast.error("Failed to send message", { @@ -196,11 +222,63 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { [sessionId, sendStreamMessage], ); + const handleStopStreaming = useCallback(() => { + stopStreaming(); + setStreamingChunks([]); + streamingChunksRef.current = []; + setHasTextChunks(false); + setIsStreamingInitiated(false); + }, [stopStreaming]); + + const { capturePageContext } = usePageContext(); + + // Send initial prompt if provided (for new sessions from homepage) + useEffect( + function handleInitialPrompt() { + if (!initialPrompt || !sessionId) return; + if (initialMessages.length > 0) return; + if (hasSentInitialPrompt(sessionId)) return; + + markInitialPromptSent(sessionId); + const context = capturePageContext(); + sendMessage(initialPrompt, true, context); + }, + [ + initialPrompt, + sessionId, + initialMessages.length, + sendMessage, + capturePageContext, + ], + ); + + async function sendMessageWithContext( + content: string, + isUserMessage: boolean = true, + ) { + const context = capturePageContext(); + await sendMessage(content, isUserMessage, context); + } + + function handleRegionModalOpenChange(open: boolean) { + setIsRegionBlockedModalOpen(open); + } + + function handleRegionModalClose() { + setIsRegionBlockedModalOpen(false); + } + return { messages: allMessages, streamingChunks, isStreaming, error, + isRegionBlockedModalOpen, + setIsRegionBlockedModalOpen, + sendMessageWithContext, + handleRegionModalOpenChange, + handleRegionModalClose, sendMessage, + stopStreaming: handleStopStreaming, }; } diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx similarity index 97% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx index 4b9da57286..f0dfadd1f7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx @@ -9,7 +9,9 @@ import { useChatCredentialsSetup } from "./useChatCredentialsSetup"; export interface CredentialInfo { provider: string; providerName: string; - credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped"; + credentialTypes: Array< + "api_key" | "oauth2" | "user_password" | "host_scoped" + >; title: string; scopes?: string[]; } @@ -30,7 +32,7 @@ function createSchemaFromCredentialInfo( type: "object", properties: {}, credentials_provider: [credential.provider], - credentials_types: [credential.credentialType], + credentials_types: credential.credentialTypes, credentials_scopes: credential.scopes, discriminator: undefined, discriminator_mapping: undefined, diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatErrorState/ChatErrorState.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatErrorState/ChatErrorState.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatErrorState/ChatErrorState.tsx rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatErrorState/ChatErrorState.tsx diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx new file mode 100644 index 0000000000..8cdecf0bf4 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx @@ -0,0 +1,103 @@ +import { Button } from "@/components/atoms/Button/Button"; +import { cn } from "@/lib/utils"; +import { ArrowUpIcon, StopIcon } from "@phosphor-icons/react"; +import { useChatInput } from "./useChatInput"; + +export interface Props { + onSend: (message: string) => void; + disabled?: boolean; + isStreaming?: boolean; + onStop?: () => void; + placeholder?: string; + className?: string; +} + +export function ChatInput({ + onSend, + disabled = false, + isStreaming = false, + onStop, + placeholder = "Type your message...", + className, +}: Props) { + const inputId = "chat-input"; + const { value, setValue, handleKeyDown, handleSend, hasMultipleLines } = + useChatInput({ + onSend, + disabled: disabled || isStreaming, + maxRows: 4, + inputId, + }); + + function handleSubmit(e: React.FormEvent) { + e.preventDefault(); + handleSend(); + } + + function handleChange(e: React.ChangeEvent) { + setValue(e.target.value); + } + + return ( +
+
+
+