From 071b3bb5cd3c85f99d12970e60f83672ceab1c08 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Wed, 28 Jan 2026 00:49:28 +0700 Subject: [PATCH 01/25] fix(frontend): more copilot refinements (#11858) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes 🏗️ On the **Copilot** page: - prevent unnecessary sidebar repaints - show a disclaimer when switching chats on the sidebar to terminate a current stream - handle loading better - save streams better when disconnecting ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run the app locally and test the above --- .../components/CopilotShell/CopilotShell.tsx | 63 +++++- .../CopilotShell/useCopilotShell.ts | 106 +++++++++- .../(platform)/copilot/copilot-page-store.ts | 81 ++++++-- .../src/app/(platform)/copilot/page.tsx | 31 ++- .../app/(platform)/copilot/useCopilotPage.ts | 13 -- .../src/components/contextual/Chat/Chat.tsx | 21 +- .../components/contextual/Chat/chat-store.ts | 181 ++++++++++++------ .../components/ChatMessage/ChatMessage.tsx | 6 +- .../UserChatBubble/UserChatBubble.tsx | 2 +- .../contextual/Chat/useChatSession.ts | 12 -- 10 files changed, 376 insertions(+), 140 deletions(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx index fb22640302..8c9f9d528c 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx @@ -3,7 +3,7 @@ import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; import { NAVBAR_HEIGHT_PX } from "@/lib/constants"; import type { ReactNode } from "react"; -import { useEffect } from "react"; +import { useCallback, useEffect } from "react"; import { useCopilotStore } from "../../copilot-page-store"; import { DesktopSidebar } from "./components/DesktopSidebar/DesktopSidebar"; import { LoadingState } from "./components/LoadingState/LoadingState"; @@ -25,10 +25,12 @@ export function CopilotShell({ children }: Props) { sessions, currentSessionId, handleSelectSession, + performSelectSession, handleOpenDrawer, handleCloseDrawer, handleDrawerOpenChange, handleNewChat, + performNewChat, hasNextPage, isFetchingNextPage, fetchNextPage, @@ -36,22 +38,71 @@ export function CopilotShell({ children }: Props) { } = useCopilotShell(); const setNewChatHandler = useCopilotStore((s) => s.setNewChatHandler); + const setNewChatWithInterruptHandler = useCopilotStore( + (s) => s.setNewChatWithInterruptHandler, + ); + const setSelectSessionHandler = useCopilotStore( + (s) => s.setSelectSessionHandler, + ); + const setSelectSessionWithInterruptHandler = useCopilotStore( + (s) => s.setSelectSessionWithInterruptHandler, + ); const requestNewChat = useCopilotStore((s) => s.requestNewChat); + const requestSelectSession = useCopilotStore((s) => s.requestSelectSession); + + const stableHandleNewChat = useCallback(handleNewChat, [handleNewChat]); + const stablePerformNewChat = useCallback(performNewChat, [performNewChat]); useEffect( - function registerNewChatHandler() { - setNewChatHandler(handleNewChat); + function registerNewChatHandlers() { + setNewChatHandler(stableHandleNewChat); + setNewChatWithInterruptHandler(stablePerformNewChat); return function cleanup() { setNewChatHandler(null); + setNewChatWithInterruptHandler(null); }; }, - [handleNewChat], + [ + stableHandleNewChat, + stablePerformNewChat, + setNewChatHandler, + setNewChatWithInterruptHandler, + ], + ); + + const stableHandleSelectSession = useCallback(handleSelectSession, [ + handleSelectSession, + ]); + + const stablePerformSelectSession = useCallback(performSelectSession, [ + performSelectSession, + ]); + + useEffect( + function registerSelectSessionHandlers() { + setSelectSessionHandler(stableHandleSelectSession); + setSelectSessionWithInterruptHandler(stablePerformSelectSession); + return function cleanup() { + setSelectSessionHandler(null); + setSelectSessionWithInterruptHandler(null); + }; + }, + [ + stableHandleSelectSession, + stablePerformSelectSession, + setSelectSessionHandler, + setSelectSessionWithInterruptHandler, + ], ); function handleNewChatClick() { requestNewChat(); } + function handleSessionClick(sessionId: string) { + requestSelectSession(sessionId); + } + if (!isLoggedIn) { return (
@@ -72,7 +123,7 @@ export function CopilotShell({ children }: Props) { isLoading={isLoading} hasNextPage={hasNextPage} isFetchingNextPage={isFetchingNextPage} - onSelectSession={handleSelectSession} + onSelectSession={handleSessionClick} onFetchNextPage={fetchNextPage} onNewChat={handleNewChatClick} hasActiveSession={Boolean(hasActiveSession)} @@ -94,7 +145,7 @@ export function CopilotShell({ children }: Props) { isLoading={isLoading} hasNextPage={hasNextPage} isFetchingNextPage={isFetchingNextPage} - onSelectSession={handleSelectSession} + onSelectSession={handleSessionClick} onFetchNextPage={fetchNextPage} onNewChat={handleNewChatClick} onClose={handleCloseDrawer} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts index a3aa0b55b2..3154df2975 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts @@ -1,17 +1,20 @@ "use client"; import { + getGetV2GetSessionQueryKey, getGetV2ListSessionsQueryKey, useGetV2GetSession, } from "@/app/api/__generated__/endpoints/chat/chat"; import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; import { okData } from "@/app/api/helpers"; +import { useChatStore } from "@/components/contextual/Chat/chat-store"; import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useQueryClient } from "@tanstack/react-query"; import { parseAsString, useQueryState } from "nuqs"; import { usePathname, useSearchParams } from "next/navigation"; import { useEffect, useRef, useState } from "react"; +import { useCopilotStore } from "../../copilot-page-store"; import { useMobileDrawer } from "./components/MobileDrawer/useMobileDrawer"; import { useSessionsPagination } from "./components/SessionsList/useSessionsPagination"; import { @@ -73,6 +76,19 @@ export function useCopilotShell() { Map >(new Map()); + const [optimisticSessionId, setOptimisticSessionId] = useState( + null, + ); + + useEffect( + function clearOptimisticWhenUrlMatches() { + if (optimisticSessionId && currentSessionId === optimisticSessionId) { + setOptimisticSessionId(null); + } + }, + [currentSessionId, optimisticSessionId], + ); + // Mark as auto-selected when sessionId is in URL useEffect(() => { if (paramSessionId && !hasAutoSelectedRef.current) { @@ -142,7 +158,9 @@ export function useCopilotShell() { const visibleSessions = filterVisibleSessions(sessions); const sidebarSelectedSessionId = - isOnHomepage && !paramSessionId ? null : currentSessionId; + isOnHomepage && !paramSessionId && !optimisticSessionId + ? null + : optimisticSessionId || currentSessionId; const isReadyToShowContent = isOnHomepage ? true @@ -155,8 +173,89 @@ export function useCopilotShell() { hasAutoSelectedSession, ); - function handleSelectSession(sessionId: string) { + const stopStream = useChatStore((s) => s.stopStream); + const onStreamComplete = useChatStore((s) => s.onStreamComplete); + const setIsSwitchingSession = useCopilotStore((s) => s.setIsSwitchingSession); + + async function performSelectSession(sessionId: string) { + if (sessionId === currentSessionId) return; + + const sourceSessionId = currentSessionId; + + if (sourceSessionId) { + setIsSwitchingSession(true); + + await new Promise(function waitForStreamComplete(resolve) { + const unsubscribe = onStreamComplete( + function handleComplete(completedId) { + if (completedId === sourceSessionId) { + clearTimeout(timeout); + unsubscribe(); + resolve(); + } + }, + ); + const timeout = setTimeout(function handleTimeout() { + unsubscribe(); + resolve(); + }, 3000); + stopStream(sourceSessionId); + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2GetSessionQueryKey(sourceSessionId), + }); + } + + setOptimisticSessionId(sessionId); setUrlSessionId(sessionId, { shallow: false }); + setIsSwitchingSession(false); + if (isMobile) handleCloseDrawer(); + } + + function handleSelectSession(sessionId: string) { + if (sessionId === currentSessionId) return; + setOptimisticSessionId(sessionId); + setUrlSessionId(sessionId, { shallow: false }); + if (isMobile) handleCloseDrawer(); + } + + async function performNewChat() { + const sourceSessionId = currentSessionId; + + if (sourceSessionId) { + setIsSwitchingSession(true); + + await new Promise(function waitForStreamComplete(resolve) { + const unsubscribe = onStreamComplete( + function handleComplete(completedId) { + if (completedId === sourceSessionId) { + clearTimeout(timeout); + unsubscribe(); + resolve(); + } + }, + ); + const timeout = setTimeout(function handleTimeout() { + unsubscribe(); + resolve(); + }, 3000); + stopStream(sourceSessionId); + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2GetSessionQueryKey(sourceSessionId), + }); + setIsSwitchingSession(false); + } + + resetAutoSelect(); + resetPagination(); + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + setUrlSessionId(null, { shallow: false }); + setOptimisticSessionId(null); if (isMobile) handleCloseDrawer(); } @@ -167,6 +266,7 @@ export function useCopilotShell() { queryKey: getGetV2ListSessionsQueryKey(), }); setUrlSessionId(null, { shallow: false }); + setOptimisticSessionId(null); if (isMobile) handleCloseDrawer(); } @@ -187,10 +287,12 @@ export function useCopilotShell() { sessions: visibleSessions, currentSessionId: sidebarSelectedSessionId, handleSelectSession, + performSelectSession, handleOpenDrawer, handleCloseDrawer, handleDrawerOpenChange, handleNewChat, + performNewChat, hasNextPage, isFetchingNextPage: isSessionsFetching, fetchNextPage, diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts index 22bf5000a1..486d31865b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/copilot-page-store.ts @@ -4,51 +4,106 @@ import { create } from "zustand"; interface CopilotStoreState { isStreaming: boolean; - isNewChatModalOpen: boolean; + isSwitchingSession: boolean; + isInterruptModalOpen: boolean; + pendingAction: (() => void) | null; newChatHandler: (() => void) | null; + newChatWithInterruptHandler: (() => void) | null; + selectSessionHandler: ((sessionId: string) => void) | null; + selectSessionWithInterruptHandler: ((sessionId: string) => void) | null; } interface CopilotStoreActions { setIsStreaming: (isStreaming: boolean) => void; + setIsSwitchingSession: (isSwitchingSession: boolean) => void; setNewChatHandler: (handler: (() => void) | null) => void; + setNewChatWithInterruptHandler: (handler: (() => void) | null) => void; + setSelectSessionHandler: ( + handler: ((sessionId: string) => void) | null, + ) => void; + setSelectSessionWithInterruptHandler: ( + handler: ((sessionId: string) => void) | null, + ) => void; requestNewChat: () => void; - confirmNewChat: () => void; - cancelNewChat: () => void; + requestSelectSession: (sessionId: string) => void; + confirmInterrupt: () => void; + cancelInterrupt: () => void; } type CopilotStore = CopilotStoreState & CopilotStoreActions; export const useCopilotStore = create((set, get) => ({ isStreaming: false, - isNewChatModalOpen: false, + isSwitchingSession: false, + isInterruptModalOpen: false, + pendingAction: null, newChatHandler: null, + newChatWithInterruptHandler: null, + selectSessionHandler: null, + selectSessionWithInterruptHandler: null, setIsStreaming(isStreaming) { set({ isStreaming }); }, + setIsSwitchingSession(isSwitchingSession) { + set({ isSwitchingSession }); + }, + setNewChatHandler(handler) { set({ newChatHandler: handler }); }, + setNewChatWithInterruptHandler(handler) { + set({ newChatWithInterruptHandler: handler }); + }, + + setSelectSessionHandler(handler) { + set({ selectSessionHandler: handler }); + }, + + setSelectSessionWithInterruptHandler(handler) { + set({ selectSessionWithInterruptHandler: handler }); + }, + requestNewChat() { - const { isStreaming, newChatHandler } = get(); + const { isStreaming, newChatHandler, newChatWithInterruptHandler } = get(); if (isStreaming) { - set({ isNewChatModalOpen: true }); + if (!newChatWithInterruptHandler) return; + set({ + isInterruptModalOpen: true, + pendingAction: newChatWithInterruptHandler, + }); } else if (newChatHandler) { newChatHandler(); } }, - confirmNewChat() { - const { newChatHandler } = get(); - set({ isNewChatModalOpen: false }); - if (newChatHandler) { - newChatHandler(); + requestSelectSession(sessionId) { + const { + isStreaming, + selectSessionHandler, + selectSessionWithInterruptHandler, + } = get(); + if (isStreaming) { + if (!selectSessionWithInterruptHandler) return; + set({ + isInterruptModalOpen: true, + pendingAction: () => selectSessionWithInterruptHandler(sessionId), + }); + } else { + if (!selectSessionHandler) return; + selectSessionHandler(sessionId); } }, - cancelNewChat() { - set({ isNewChatModalOpen: false }); + confirmInterrupt() { + const { pendingAction } = get(); + set({ isInterruptModalOpen: false, pendingAction: null }); + if (pendingAction) pendingAction(); + }, + + cancelInterrupt() { + set({ isInterruptModalOpen: false, pendingAction: null }); }, })); diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx index 83b21bf82e..008b06fcda 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx @@ -13,22 +13,15 @@ import { useCopilotPage } from "./useCopilotPage"; export default function CopilotPage() { const { state, handlers } = useCopilotPage(); - const confirmNewChat = useCopilotStore((s) => s.confirmNewChat); - const { - greetingName, - quickActions, - isLoading, - pageState, - isNewChatModalOpen, - isReady, - } = state; + const isInterruptModalOpen = useCopilotStore((s) => s.isInterruptModalOpen); + const confirmInterrupt = useCopilotStore((s) => s.confirmInterrupt); + const cancelInterrupt = useCopilotStore((s) => s.cancelInterrupt); + const { greetingName, quickActions, isLoading, pageState, isReady } = state; const { handleQuickAction, startChatWithPrompt, handleSessionNotFound, handleStreamingChange, - handleCancelNewChat, - handleNewChatModalOpen, } = handlers; if (!isReady) return null; @@ -48,31 +41,33 @@ export default function CopilotPage() { title="Interrupt current chat?" styling={{ maxWidth: 300, width: "100%" }} controlled={{ - isOpen: isNewChatModalOpen, - set: handleNewChatModalOpen, + isOpen: isInterruptModalOpen, + set: (open) => { + if (!open) cancelInterrupt(); + }, }} - onClose={handleCancelNewChat} + onClose={cancelInterrupt} >
The current chat response will be interrupted. Are you sure you - want to start a new chat? + want to continue?
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts index 1d9c843d7d..8cf4599a12 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts @@ -75,9 +75,7 @@ export function useCopilotPage() { const { user, isLoggedIn, isUserLoading } = useSupabase(); const { toast } = useToast(); - const isNewChatModalOpen = useCopilotStore((s) => s.isNewChatModalOpen); const setIsStreaming = useCopilotStore((s) => s.setIsStreaming); - const cancelNewChat = useCopilotStore((s) => s.cancelNewChat); const isChatEnabled = useGetFlag(Flag.CHAT); const flags = useFlags(); @@ -201,21 +199,12 @@ export function useCopilotPage() { setIsStreaming(isStreamingValue); } - function handleCancelNewChat() { - cancelNewChat(); - } - - function handleNewChatModalOpen(isOpen: boolean) { - if (!isOpen) cancelNewChat(); - } - return { state: { greetingName, quickActions, isLoading: isUserLoading, pageState: state.pageState, - isNewChatModalOpen, isReady: isFlagReady && isChatEnabled !== false && isLoggedIn, }, handlers: { @@ -223,8 +212,6 @@ export function useCopilotPage() { startChatWithPrompt, handleSessionNotFound, handleStreamingChange, - handleCancelNewChat, - handleNewChatModalOpen, }, }; } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx index ba7584765d..a7a5f61674 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx @@ -1,11 +1,12 @@ "use client"; +import { useCopilotStore } from "@/app/(platform)/copilot/copilot-page-store"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; import { useEffect, useRef } from "react"; import { ChatContainer } from "./components/ChatContainer/ChatContainer"; import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState"; -import { ChatLoader } from "./components/ChatLoader/ChatLoader"; import { useChat } from "./useChat"; export interface ChatProps { @@ -24,6 +25,7 @@ export function Chat({ onStreamingChange, }: ChatProps) { const hasHandledNotFoundRef = useRef(false); + const isSwitchingSession = useCopilotStore((s) => s.isSwitchingSession); const { messages, isLoading, @@ -47,29 +49,34 @@ export function Chat({ [onSessionNotFound, urlSessionId, isSessionNotFound, isLoading, isCreating], ); + const shouldShowLoader = + (showLoader && (isLoading || isCreating)) || isSwitchingSession; + return (
{/* Main Content */}
{/* Loading State */} - {showLoader && (isLoading || isCreating) && ( + {shouldShowLoader && (
-
- +
+ - Loading your chats... + {isSwitchingSession + ? "Switching chat..." + : "Loading your chat..."}
)} {/* Error State */} - {error && !isLoading && ( + {error && !isLoading && !isSwitchingSession && ( )} {/* Session Content */} - {sessionId && !isLoading && !error && ( + {sessionId && !isLoading && !error && !isSwitchingSession && ( ) { +function cleanupExpiredStreams( + completedStreams: Map, +): Map { const now = Date.now(); - for (const [sessionId, result] of completedStreams) { + const cleaned = new Map(completedStreams); + for (const [sessionId, result] of cleaned) { if (now - result.completedAt > COMPLETED_STREAM_TTL) { - completedStreams.delete(sessionId); + cleaned.delete(sessionId); } } -} - -function moveToCompleted( - activeStreams: Map, - completedStreams: Map, - streamCompleteCallbacks: Set, - sessionId: string, -) { - const stream = activeStreams.get(sessionId); - if (!stream) return; - - const result: StreamResult = { - sessionId, - status: stream.status, - chunks: stream.chunks, - completedAt: Date.now(), - error: stream.error, - }; - - completedStreams.set(sessionId, result); - activeStreams.delete(sessionId); - cleanupCompletedStreams(completedStreams); - - if (stream.status === "completed" || stream.status === "error") { - notifyStreamComplete(streamCompleteCallbacks, sessionId); - } + return cleaned; } export const useChatStore = create((set, get) => ({ @@ -106,17 +84,31 @@ export const useChatStore = create((set, get) => ({ context, onChunk, ) { - const { activeStreams, completedStreams, streamCompleteCallbacks } = get(); + const state = get(); + const newActiveStreams = new Map(state.activeStreams); + let newCompletedStreams = new Map(state.completedStreams); + const callbacks = state.streamCompleteCallbacks; - const existingStream = activeStreams.get(sessionId); + const existingStream = newActiveStreams.get(sessionId); if (existingStream) { existingStream.abortController.abort(); - moveToCompleted( - activeStreams, - completedStreams, - streamCompleteCallbacks, + const normalizedStatus = + existingStream.status === "streaming" + ? "completed" + : existingStream.status; + const result: StreamResult = { sessionId, - ); + status: normalizedStatus, + chunks: existingStream.chunks, + completedAt: Date.now(), + error: existingStream.error, + }; + newCompletedStreams.set(sessionId, result); + newActiveStreams.delete(sessionId); + newCompletedStreams = cleanupExpiredStreams(newCompletedStreams); + if (normalizedStatus === "completed" || normalizedStatus === "error") { + notifyStreamComplete(callbacks, sessionId); + } } const abortController = new AbortController(); @@ -132,36 +124,76 @@ export const useChatStore = create((set, get) => ({ onChunkCallbacks: initialCallbacks, }; - activeStreams.set(sessionId, stream); + newActiveStreams.set(sessionId, stream); + set({ + activeStreams: newActiveStreams, + completedStreams: newCompletedStreams, + }); try { await executeStream(stream, message, isUserMessage, context); } finally { if (onChunk) stream.onChunkCallbacks.delete(onChunk); if (stream.status !== "streaming") { - moveToCompleted( - activeStreams, - completedStreams, - streamCompleteCallbacks, - sessionId, - ); + const currentState = get(); + const finalActiveStreams = new Map(currentState.activeStreams); + let finalCompletedStreams = new Map(currentState.completedStreams); + + const storedStream = finalActiveStreams.get(sessionId); + if (storedStream === stream) { + const result: StreamResult = { + sessionId, + status: stream.status, + chunks: stream.chunks, + completedAt: Date.now(), + error: stream.error, + }; + finalCompletedStreams.set(sessionId, result); + finalActiveStreams.delete(sessionId); + finalCompletedStreams = cleanupExpiredStreams(finalCompletedStreams); + set({ + activeStreams: finalActiveStreams, + completedStreams: finalCompletedStreams, + }); + if (stream.status === "completed" || stream.status === "error") { + notifyStreamComplete( + currentState.streamCompleteCallbacks, + sessionId, + ); + } + } } } }, stopStream: function stopStream(sessionId) { - const { activeStreams, completedStreams, streamCompleteCallbacks } = get(); - const stream = activeStreams.get(sessionId); - if (stream) { - stream.abortController.abort(); - stream.status = "completed"; - moveToCompleted( - activeStreams, - completedStreams, - streamCompleteCallbacks, - sessionId, - ); - } + const state = get(); + const stream = state.activeStreams.get(sessionId); + if (!stream) return; + + stream.abortController.abort(); + stream.status = "completed"; + + const newActiveStreams = new Map(state.activeStreams); + let newCompletedStreams = new Map(state.completedStreams); + + const result: StreamResult = { + sessionId, + status: stream.status, + chunks: stream.chunks, + completedAt: Date.now(), + error: stream.error, + }; + newCompletedStreams.set(sessionId, result); + newActiveStreams.delete(sessionId); + newCompletedStreams = cleanupExpiredStreams(newCompletedStreams); + + set({ + activeStreams: newActiveStreams, + completedStreams: newCompletedStreams, + }); + + notifyStreamComplete(state.streamCompleteCallbacks, sessionId); }, subscribeToStream: function subscribeToStream( @@ -169,16 +201,18 @@ export const useChatStore = create((set, get) => ({ onChunk, skipReplay = false, ) { - const { activeStreams } = get(); + const state = get(); + const stream = state.activeStreams.get(sessionId); - const stream = activeStreams.get(sessionId); if (stream) { if (!skipReplay) { for (const chunk of stream.chunks) { onChunk(chunk); } } + stream.onChunkCallbacks.add(onChunk); + return function unsubscribe() { stream.onChunkCallbacks.delete(onChunk); }; @@ -204,7 +238,12 @@ export const useChatStore = create((set, get) => ({ }, clearCompletedStream: function clearCompletedStream(sessionId) { - get().completedStreams.delete(sessionId); + const state = get(); + if (!state.completedStreams.has(sessionId)) return; + + const newCompletedStreams = new Map(state.completedStreams); + newCompletedStreams.delete(sessionId); + set({ completedStreams: newCompletedStreams }); }, isStreaming: function isStreaming(sessionId) { @@ -213,11 +252,21 @@ export const useChatStore = create((set, get) => ({ }, registerActiveSession: function registerActiveSession(sessionId) { - get().activeSessions.add(sessionId); + const state = get(); + if (state.activeSessions.has(sessionId)) return; + + const newActiveSessions = new Set(state.activeSessions); + newActiveSessions.add(sessionId); + set({ activeSessions: newActiveSessions }); }, unregisterActiveSession: function unregisterActiveSession(sessionId) { - get().activeSessions.delete(sessionId); + const state = get(); + if (!state.activeSessions.has(sessionId)) return; + + const newActiveSessions = new Set(state.activeSessions); + newActiveSessions.delete(sessionId); + set({ activeSessions: newActiveSessions }); }, isSessionActive: function isSessionActive(sessionId) { @@ -225,10 +274,16 @@ export const useChatStore = create((set, get) => ({ }, onStreamComplete: function onStreamComplete(callback) { - const { streamCompleteCallbacks } = get(); - streamCompleteCallbacks.add(callback); + const state = get(); + const newCallbacks = new Set(state.streamCompleteCallbacks); + newCallbacks.add(callback); + set({ streamCompleteCallbacks: newCallbacks }); + return function unsubscribe() { - streamCompleteCallbacks.delete(callback); + const currentState = get(); + const cleanedCallbacks = new Set(currentState.streamCompleteCallbacks); + cleanedCallbacks.delete(callback); + set({ streamCompleteCallbacks: cleanedCallbacks }); }; }, })); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx index 0fee33dbc0..29e3a60a8c 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx @@ -126,10 +126,6 @@ export function ChatMessage({ [displayContent, message], ); - function isLongResponse(content: string): boolean { - return content.split("\n").length > 5; - } - const handleTryAgain = useCallback(() => { if (message.type !== "message" || !onSendMessage) return; onSendMessage(message.content, message.role === "user"); @@ -358,7 +354,7 @@ export function ChatMessage({ )} - {!isUser && isFinalMessage && isLongResponse(displayContent) && ( + {!isUser && isFinalMessage && !isStreaming && (
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx index f062df1397..dec221338a 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx @@ -16,6 +16,7 @@ export interface ChatContainerProps { initialPrompt?: string; className?: string; onStreamingChange?: (isStreaming: boolean) => void; + onOperationStarted?: () => void; } export function ChatContainer({ @@ -24,6 +25,7 @@ export function ChatContainer({ initialPrompt, className, onStreamingChange, + onOperationStarted, }: ChatContainerProps) { const { messages, @@ -38,6 +40,7 @@ export function ChatContainer({ sessionId, initialMessages, initialPrompt, + onOperationStarted, }); useEffect(() => { diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts index f406d33db4..f3cac01f96 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts @@ -22,6 +22,7 @@ export interface HandlerDependencies { setIsStreamingInitiated: Dispatch>; setIsRegionBlockedModalOpen: Dispatch>; sessionId: string; + onOperationStarted?: () => void; } export function isRegionBlockedError(chunk: StreamChunk): boolean { @@ -163,6 +164,11 @@ export function handleToolResponse( } return; } + // Trigger polling when operation_started is received + if (responseMessage.type === "operation_started") { + deps.onOperationStarted?.(); + } + deps.setMessages((prev) => { const toolCallIndex = prev.findIndex( (msg) => msg.type === "tool_call" && msg.toolId === chunk.tool_id, diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts index 83730cc308..46f384d055 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts @@ -14,16 +14,40 @@ import { processInitialMessages, } from "./helpers"; +// Helper to generate deduplication key for a message +function getMessageKey(msg: ChatMessageData): string { + if (msg.type === "message") { + // Don't include timestamp - dedupe by role + content only + // This handles the case where local and server timestamps differ + // Server messages are authoritative, so duplicates from local state are filtered + return `msg:${msg.role}:${msg.content}`; + } else if (msg.type === "tool_call") { + return `toolcall:${msg.toolId}`; + } else if (msg.type === "tool_response") { + return `toolresponse:${(msg as any).toolId}`; + } else if ( + msg.type === "operation_started" || + msg.type === "operation_pending" || + msg.type === "operation_in_progress" + ) { + return `op:${(msg as any).toolId || (msg as any).operationId || (msg as any).toolCallId || ""}:${msg.toolName}`; + } else { + return `${msg.type}:${JSON.stringify(msg).slice(0, 100)}`; + } +} + interface Args { sessionId: string | null; initialMessages: SessionDetailResponse["messages"]; initialPrompt?: string; + onOperationStarted?: () => void; } export function useChatContainer({ sessionId, initialMessages, initialPrompt, + onOperationStarted, }: Args) { const [messages, setMessages] = useState([]); const [streamingChunks, setStreamingChunks] = useState([]); @@ -73,13 +97,20 @@ export function useChatContainer({ setIsRegionBlockedModalOpen, sessionId, setIsStreamingInitiated, + onOperationStarted, }); setIsStreamingInitiated(true); const skipReplay = initialMessages.length > 0; return subscribeToStream(sessionId, dispatcher, skipReplay); }, - [sessionId, stopStreaming, activeStreams, subscribeToStream], + [ + sessionId, + stopStreaming, + activeStreams, + subscribeToStream, + onOperationStarted, + ], ); // Collect toolIds from completed tool results in initialMessages @@ -130,12 +161,19 @@ export function useChatContainer({ ); // Combine initial messages from backend with local streaming messages, - // then deduplicate to prevent duplicates when polling refreshes initialMessages + // Server messages maintain correct order; only append truly new local messages const allMessages = useMemo(() => { const processedInitial = processInitialMessages(initialMessages); - // Filter local messages to remove operation messages for completed tools - const filteredLocalMessages = messages.filter((msg) => { + // Build a set of keys from server messages for deduplication + const serverKeys = new Set(); + for (const msg of processedInitial) { + serverKeys.add(getMessageKey(msg)); + } + + // Filter local messages: remove duplicates and completed operation messages + const newLocalMessages = messages.filter((msg) => { + // Remove operation messages for completed tools if ( msg.type === "operation_started" || msg.type === "operation_pending" || @@ -143,48 +181,17 @@ export function useChatContainer({ ) { const toolId = (msg as any).toolId || (msg as any).toolCallId; if (toolId && completedToolIds.has(toolId)) { - return false; // Filter out - operation completed + return false; } } - return true; + // Remove messages that already exist in server data + const key = getMessageKey(msg); + return !serverKeys.has(key); }); - const combined = [...processedInitial, ...filteredLocalMessages]; - - // Deduplicate by content+role+timestamp. When initialMessages is refreshed via polling, - // it may contain messages that are also in the local `messages` state. - // Including timestamp prevents dropping legitimate repeated messages (e.g., user sends "yes" twice) - const seen = new Set(); - return combined.filter((msg) => { - // Create a key based on type, role, content, and timestamp for deduplication - let key: string; - if (msg.type === "message") { - // Use timestamp (rounded to nearest second) to allow slight variations - // while still catching true duplicates from SSE/polling overlap - const ts = msg.timestamp - ? Math.floor(new Date(msg.timestamp).getTime() / 1000) - : ""; - key = `msg:${msg.role}:${ts}:${msg.content}`; - } else if (msg.type === "tool_call") { - key = `toolcall:${msg.toolId}`; - } else if ( - msg.type === "operation_started" || - msg.type === "operation_pending" || - msg.type === "operation_in_progress" - ) { - // Dedupe operation messages by toolId or operationId - key = `op:${(msg as any).toolId || (msg as any).operationId || (msg as any).toolCallId || ""}:${msg.toolName}`; - } else { - // For other types, use a combination of type and first few fields - key = `${msg.type}:${JSON.stringify(msg).slice(0, 100)}`; - } - if (seen.has(key)) { - return false; - } - seen.add(key); - return true; - }); - }, [initialMessages, messages]); + // Server messages first (correct order), then new local messages + return [...processedInitial, ...newLocalMessages]; + }, [initialMessages, messages, completedToolIds]); async function sendMessage( content: string, @@ -217,6 +224,7 @@ export function useChatContainer({ setIsRegionBlockedModalOpen, sessionId, setIsStreamingInitiated, + onOperationStarted, }); try { diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts index f6b2031059..124301abc4 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts @@ -26,6 +26,7 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) { claimSession, clearSession: clearSessionBase, loadSession, + startPollingForOperation, } = useChatSession({ urlSessionId, autoCreate: false, @@ -94,5 +95,6 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) { loadSession, sessionId: sessionIdFromHook, showLoader, + startPollingForOperation, }; } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts index 3fe4f801c6..936a49936c 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts @@ -103,9 +103,14 @@ export function useChatSession({ } }, [createError, loadError]); + // Track if we should be polling (set by external callers when they receive operation_started via SSE) + const [forcePolling, setForcePolling] = useState(false); + // Track if we've seen server acknowledge the pending operation (to avoid clearing forcePolling prematurely) + const hasSeenServerPendingRef = useRef(false); + // Check if there are any pending operations in the messages // Must check all operation types: operation_pending, operation_started, operation_in_progress - const hasPendingOperations = useMemo(() => { + const hasPendingOperationsFromServer = useMemo(() => { if (!messages || messages.length === 0) return false; const pendingTypes = new Set([ "operation_pending", @@ -126,6 +131,35 @@ export function useChatSession({ }); }, [messages]); + // Track when server has acknowledged the pending operation + useEffect(() => { + if (hasPendingOperationsFromServer) { + hasSeenServerPendingRef.current = true; + } + }, [hasPendingOperationsFromServer]); + + // Combined: poll if server has pending ops OR if we received operation_started via SSE + const hasPendingOperations = hasPendingOperationsFromServer || forcePolling; + + // Clear forcePolling only after server has acknowledged AND completed the operation + useEffect(() => { + if ( + forcePolling && + !hasPendingOperationsFromServer && + hasSeenServerPendingRef.current + ) { + // Server acknowledged the operation and it's now complete + setForcePolling(false); + hasSeenServerPendingRef.current = false; + } + }, [forcePolling, hasPendingOperationsFromServer]); + + // Function to trigger polling (called when operation_started is received via SSE) + function startPollingForOperation() { + setForcePolling(true); + hasSeenServerPendingRef.current = false; // Reset for new operation + } + // Refresh sessions list when a pending operation completes // (hasPendingOperations transitions from true to false) const prevHasPendingOperationsRef = useRef(hasPendingOperations); @@ -144,7 +178,8 @@ export function useChatSession({ [hasPendingOperations, sessionId, queryClient], ); - // Poll for updates when there are pending operations (long poll - 10s intervals with backoff) + // Poll for updates when there are pending operations + // Backoff: 2s, 4s, 6s, 8s, 10s, ... up to 30s max const pollAttemptRef = useRef(0); const hasPendingOperationsRef = useRef(hasPendingOperations); hasPendingOperationsRef.current = hasPendingOperations; @@ -159,27 +194,17 @@ export function useChatSession({ let cancelled = false; let timeoutId: ReturnType | null = null; - // Calculate delay with exponential backoff: 10s, 15s, 20s, 25s, 30s (max) - const baseDelay = 10000; - const maxDelay = 30000; - function schedule() { - const delay = Math.min( - baseDelay + pollAttemptRef.current * 5000, - maxDelay, - ); + // 2s, 4s, 6s, 8s, 10s, ... 30s (max) + const delay = Math.min((pollAttemptRef.current + 1) * 2000, 30000); timeoutId = setTimeout(async () => { if (cancelled) return; - console.info( - `[useChatSession] Polling for pending operation updates (attempt ${pollAttemptRef.current + 1})`, - ); pollAttemptRef.current += 1; try { await refetch(); } catch (err) { console.error("[useChatSession] Poll failed:", err); } finally { - // Continue polling if still pending and not cancelled if (!cancelled && hasPendingOperationsRef.current) { schedule(); } @@ -329,6 +354,7 @@ export function useChatSession({ refreshSession, claimSession, clearSession, + startPollingForOperation, }; } From 09539839441e86f0684ed6bc5bebefe7ab4bf03f Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Wed, 28 Jan 2026 01:22:46 -0600 Subject: [PATCH 09/25] feat(platform): disable onboarding redirects and add $5 signup bonus (#11862) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Disable automatic onboarding redirects on signup/login while keeping the checklist/wallet functional. Users now receive $5 (500 credits) on their first visit to /copilot. ### Changes 🏗️ - **Frontend**: `shouldShowOnboarding()` now returns `false`, disabling auto-redirects to `/onboarding` - **Backend**: Added `VISIT_COPILOT` onboarding step with 500 credit ($5) reward - **Frontend**: Copilot page automatically completes `VISIT_COPILOT` step on mount - **Database**: Migration to add `VISIT_COPILOT` to `OnboardingStep` enum NOTE: /onboarding/1-welcome -> /library now as shouldShowOnboardin is always false Users land directly on `/copilot` after signup/login and receive $5 invisibly (not shown in checklist UI). ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] New user signup (email/password) → lands on `/copilot`, wallet shows 500 credits - [x] Verified credits are only granted once (idempotent via onboarding reward mechanism) - [x] Existing user login (already granted flag set) → lands on `/copilot`, no duplicate credits - [x] Checklist/wallet remains functional #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**) No configuration changes required. --- OPEN-2967 🤖 Generated with [Claude Code](https://claude.ai/code) --- > [!NOTE] > Introduces a new onboarding step and adjusts onboarding flow. > > - Adds `VISIT_COPILOT` onboarding step (+500 credits) with DB enum migration and API/type updates > - Copilot page auto-completes `VISIT_COPILOT` on mount to grant the welcome bonus > - Changes `/onboarding/enabled` to require user context and return `false` when `CHAT` feature is enabled (skips legacy onboarding) > - Wallet now refreshes credits on any onboarding `step_completed` notification; confetti limited to visible tasks > - Test flows updated to accept redirects to `copilot`/`library` and verify authenticated state > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit ec5a5a4dfdd76f8bd9b5918c38cee9b9d6832247. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). --------- Co-authored-by: Claude Opus 4.5 Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: claude[bot] <41898282+claude[bot]@users.noreply.github.com> Co-authored-by: Nicholas Tindle --- .../backend/backend/api/features/v1.py | 8 ++++-- .../backend/backend/data/onboarding.py | 4 +++ .../migration.sql | 2 ++ autogpt_platform/backend/schema.prisma | 1 + .../app/(platform)/copilot/useCopilotPage.ts | 9 +++++++ .../frontend/src/app/api/openapi.json | 2 ++ .../Navbar/components/Wallet/Wallet.tsx | 14 +++++++---- .../src/lib/autogpt-server-api/types.ts | 1 + .../frontend/src/tests/pages/login.page.ts | 6 ++++- .../frontend/src/tests/utils/signup.ts | 25 ++++++++++++------- 10 files changed, 55 insertions(+), 17 deletions(-) create mode 100644 autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql diff --git a/autogpt_platform/backend/backend/api/features/v1.py b/autogpt_platform/backend/backend/api/features/v1.py index 51789f9e2b..62b532089c 100644 --- a/autogpt_platform/backend/backend/api/features/v1.py +++ b/autogpt_platform/backend/backend/api/features/v1.py @@ -265,9 +265,13 @@ async def get_onboarding_agents( "/onboarding/enabled", summary="Is onboarding enabled", tags=["onboarding", "public"], - dependencies=[Security(requires_user)], ) -async def is_onboarding_enabled() -> bool: +async def is_onboarding_enabled( + user_id: Annotated[str, Security(get_user_id)], +) -> bool: + # If chat is enabled for user, skip legacy onboarding + if await is_feature_enabled(Flag.CHAT, user_id, False): + return False return await onboarding_enabled() diff --git a/autogpt_platform/backend/backend/data/onboarding.py b/autogpt_platform/backend/backend/data/onboarding.py index 6a842d1022..4af8e8dffd 100644 --- a/autogpt_platform/backend/backend/data/onboarding.py +++ b/autogpt_platform/backend/backend/data/onboarding.py @@ -41,6 +41,7 @@ FrontendOnboardingStep = Literal[ OnboardingStep.AGENT_NEW_RUN, OnboardingStep.AGENT_INPUT, OnboardingStep.CONGRATS, + OnboardingStep.VISIT_COPILOT, OnboardingStep.MARKETPLACE_VISIT, OnboardingStep.BUILDER_OPEN, ] @@ -122,6 +123,9 @@ async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate): async def _reward_user(user_id: str, onboarding: UserOnboarding, step: OnboardingStep): reward = 0 match step: + # Welcome bonus for visiting copilot ($5 = 500 credits) + case OnboardingStep.VISIT_COPILOT: + reward = 500 # Reward user when they clicked New Run during onboarding # This is because they need credits before scheduling a run (next step) # This is seen as a reward for the GET_RESULTS step in the wallet diff --git a/autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql b/autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql new file mode 100644 index 0000000000..6a08d9231b --- /dev/null +++ b/autogpt_platform/backend/migrations/20260127211502_add_visit_copilot_onboarding_step/migration.sql @@ -0,0 +1,2 @@ +-- AlterEnum +ALTER TYPE "OnboardingStep" ADD VALUE 'VISIT_COPILOT'; diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index de94600820..2c52528e3f 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -81,6 +81,7 @@ enum OnboardingStep { AGENT_INPUT CONGRATS // First Wins + VISIT_COPILOT GET_RESULTS MARKETPLACE_VISIT MARKETPLACE_ADD_AGENT diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts index 38796946f4..e4713cd24a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts @@ -5,6 +5,7 @@ import { import { useToast } from "@/components/molecules/Toast/use-toast"; import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { useOnboarding } from "@/providers/onboarding/onboarding-provider"; import { Flag, type FlagValues, @@ -25,12 +26,20 @@ export function useCopilotPage() { const queryClient = useQueryClient(); const { user, isLoggedIn, isUserLoading } = useSupabase(); const { toast } = useToast(); + const { completeStep } = useOnboarding(); const { urlSessionId, setUrlSessionId } = useCopilotSessionId(); const setIsStreaming = useCopilotStore((s) => s.setIsStreaming); const isCreating = useCopilotStore((s) => s.isCreatingSession); const setIsCreating = useCopilotStore((s) => s.setIsCreatingSession); + // Complete VISIT_COPILOT onboarding step to grant $5 welcome bonus + useEffect(() => { + if (isLoggedIn) { + completeStep("VISIT_COPILOT"); + } + }, [completeStep, isLoggedIn]); + const isChatEnabled = useGetFlag(Flag.CHAT); const flags = useFlags(); const homepageRoute = getHomepageRoute(isChatEnabled); diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index b4e2bc80bd..d1ecd91702 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -4594,6 +4594,7 @@ "AGENT_NEW_RUN", "AGENT_INPUT", "CONGRATS", + "VISIT_COPILOT", "MARKETPLACE_VISIT", "BUILDER_OPEN" ], @@ -8754,6 +8755,7 @@ "AGENT_NEW_RUN", "AGENT_INPUT", "CONGRATS", + "VISIT_COPILOT", "GET_RESULTS", "MARKETPLACE_VISIT", "MARKETPLACE_ADD_AGENT", diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx index 0a3c7de6c8..4a25c84f92 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx @@ -255,13 +255,18 @@ export function Wallet() { (notification: WebSocketNotification) => { if ( notification.type !== "onboarding" || - notification.event !== "step_completed" || - !walletRef.current + notification.event !== "step_completed" ) { return; } - // Only trigger confetti for tasks that are in groups + // Always refresh credits when any onboarding step completes + fetchCredits(); + + // Only trigger confetti for tasks that are in displayed groups + if (!walletRef.current) { + return; + } const taskIds = groups .flatMap((group) => group.tasks) .map((task) => task.id); @@ -274,7 +279,6 @@ export function Wallet() { return; } - fetchCredits(); party.confetti(walletRef.current, { count: 30, spread: 120, @@ -284,7 +288,7 @@ export function Wallet() { modules: [fadeOut], }); }, - [fetchCredits, fadeOut], + [fetchCredits, fadeOut, groups], ); // WebSocket setup for onboarding notifications diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 82c03bc9f1..2d583d2062 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -1003,6 +1003,7 @@ export type OnboardingStep = | "AGENT_INPUT" | "CONGRATS" // First Wins + | "VISIT_COPILOT" | "GET_RESULTS" | "MARKETPLACE_VISIT" | "MARKETPLACE_ADD_AGENT" diff --git a/autogpt_platform/frontend/src/tests/pages/login.page.ts b/autogpt_platform/frontend/src/tests/pages/login.page.ts index 9082cc6219..adcb8d908b 100644 --- a/autogpt_platform/frontend/src/tests/pages/login.page.ts +++ b/autogpt_platform/frontend/src/tests/pages/login.page.ts @@ -37,9 +37,13 @@ export class LoginPage { this.page.on("load", (page) => console.log(`ℹ️ Now at URL: ${page.url()}`)); // Start waiting for navigation before clicking + // Wait for redirect to marketplace, onboarding, library, or copilot (new landing pages) const leaveLoginPage = this.page .waitForURL( - (url) => /^\/(marketplace|onboarding(\/.*)?)?$/.test(url.pathname), + (url: URL) => + /^\/(marketplace|onboarding(\/.*)?|library|copilot)?$/.test( + url.pathname, + ), { timeout: 10_000 }, ) .catch((reason) => { diff --git a/autogpt_platform/frontend/src/tests/utils/signup.ts b/autogpt_platform/frontend/src/tests/utils/signup.ts index 7c8fdbe01b..192a9129b9 100644 --- a/autogpt_platform/frontend/src/tests/utils/signup.ts +++ b/autogpt_platform/frontend/src/tests/utils/signup.ts @@ -36,14 +36,16 @@ export async function signupTestUser( const signupButton = getButton("Sign up"); await signupButton.click(); - // Wait for successful signup - could redirect to onboarding or marketplace + // Wait for successful signup - could redirect to various pages depending on onboarding state try { - // Wait for either onboarding or marketplace redirect - await Promise.race([ - page.waitForURL(/\/onboarding/, { timeout: 15000 }), - page.waitForURL(/\/marketplace/, { timeout: 15000 }), - ]); + // Wait for redirect to onboarding, marketplace, copilot, or library + // Use a single waitForURL with a callback to avoid Promise.race race conditions + await page.waitForURL( + (url: URL) => + /\/(onboarding|marketplace|copilot|library)/.test(url.pathname), + { timeout: 15000 }, + ); } catch (error) { console.error( "❌ Timeout waiting for redirect, current URL:", @@ -54,14 +56,19 @@ export async function signupTestUser( const currentUrl = page.url(); - // Handle onboarding or marketplace redirect + // Handle onboarding redirect if needed if (currentUrl.includes("/onboarding") && ignoreOnboarding) { await page.goto("http://localhost:3000/marketplace"); await page.waitForLoadState("domcontentloaded", { timeout: 10000 }); } - // Verify we're on the expected final page - if (ignoreOnboarding || currentUrl.includes("/marketplace")) { + // Verify we're on an expected final page and user is authenticated + if (currentUrl.includes("/copilot") || currentUrl.includes("/library")) { + // For copilot/library landing pages, just verify user is authenticated + await page + .getByTestId("profile-popout-menu-trigger") + .waitFor({ state: "visible", timeout: 10000 }); + } else if (ignoreOnboarding || currentUrl.includes("/marketplace")) { // Verify we're on marketplace await page .getByText( From d855f79874ae7a7846ea6f30bce96b78986727c0 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Wed, 28 Jan 2026 12:28:27 -0600 Subject: [PATCH 10/25] fix(platform): reduce Sentry alert spam for expected errors (#11872) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary - Add `InvalidInputError` for validation errors (search term too long, invalid pagination) - returns 400 instead of 500 - Remove redundant try/catch blocks in library routes - global exception handlers already handle `ValueError`→400 and `NotFoundError`→404 - Aggregate embedding backfill errors and log once at the end instead of per content type to prevent Sentry issue spam ## Test plan - [x] Verify validation errors (search term >100 chars) return 400 Bad Request - [x] Verify NotFoundError still returns 404 - [x] Verify embedding errors are logged once at the end with aggregated counts Fixes AUTOGPT-SERVER-7K5, BUILDER-6NC --------- Co-authored-by: Swifty --- .../backend/api/features/library/db.py | 8 +- .../api/features/library/routes/agents.py | 230 +++--------------- .../api/features/library/routes_test.py | 48 ---- .../backend/api/features/store/embeddings.py | 23 +- .../backend/backend/util/exceptions.py | 6 + .../frontend/src/app/api/openapi.json | 39 +-- 6 files changed, 67 insertions(+), 287 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/library/db.py b/autogpt_platform/backend/backend/api/features/library/db.py index 18d535d896..872fe66b28 100644 --- a/autogpt_platform/backend/backend/api/features/library/db.py +++ b/autogpt_platform/backend/backend/api/features/library/db.py @@ -21,7 +21,7 @@ from backend.data.model import CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate from backend.util.clients import get_scheduler_client -from backend.util.exceptions import DatabaseError, NotFoundError +from backend.util.exceptions import DatabaseError, InvalidInputError, NotFoundError from backend.util.json import SafeJson from backend.util.models import Pagination from backend.util.settings import Config @@ -64,11 +64,11 @@ async def list_library_agents( if page < 1 or page_size < 1: logger.warning(f"Invalid pagination: page={page}, page_size={page_size}") - raise DatabaseError("Invalid pagination input") + raise InvalidInputError("Invalid pagination input") if search_term and len(search_term.strip()) > 100: logger.warning(f"Search term too long: {repr(search_term)}") - raise DatabaseError("Search term is too long") + raise InvalidInputError("Search term is too long") where_clause: prisma.types.LibraryAgentWhereInput = { "userId": user_id, @@ -175,7 +175,7 @@ async def list_favorite_library_agents( if page < 1 or page_size < 1: logger.warning(f"Invalid pagination: page={page}, page_size={page_size}") - raise DatabaseError("Invalid pagination input") + raise InvalidInputError("Invalid pagination input") where_clause: prisma.types.LibraryAgentWhereInput = { "userId": user_id, diff --git a/autogpt_platform/backend/backend/api/features/library/routes/agents.py b/autogpt_platform/backend/backend/api/features/library/routes/agents.py index 38c34dd3b8..fa3d1a0f0c 100644 --- a/autogpt_platform/backend/backend/api/features/library/routes/agents.py +++ b/autogpt_platform/backend/backend/api/features/library/routes/agents.py @@ -1,4 +1,3 @@ -import logging from typing import Literal, Optional import autogpt_libs.auth as autogpt_auth_lib @@ -6,15 +5,11 @@ from fastapi import APIRouter, Body, HTTPException, Query, Security, status from fastapi.responses import Response from prisma.enums import OnboardingStep -import backend.api.features.store.exceptions as store_exceptions from backend.data.onboarding import complete_onboarding_step -from backend.util.exceptions import DatabaseError, NotFoundError from .. import db as library_db from .. import model as library_model -logger = logging.getLogger(__name__) - router = APIRouter( prefix="/agents", tags=["library", "private"], @@ -26,10 +21,6 @@ router = APIRouter( "", summary="List Library Agents", response_model=library_model.LibraryAgentResponse, - responses={ - 200: {"description": "List of library agents"}, - 500: {"description": "Server error", "content": {"application/json": {}}}, - }, ) async def list_library_agents( user_id: str = Security(autogpt_auth_lib.get_user_id), @@ -53,43 +44,19 @@ async def list_library_agents( ) -> library_model.LibraryAgentResponse: """ Get all agents in the user's library (both created and saved). - - Args: - user_id: ID of the authenticated user. - search_term: Optional search term to filter agents by name/description. - filter_by: List of filters to apply (favorites, created by user). - sort_by: List of sorting criteria (created date, updated date). - page: Page number to retrieve. - page_size: Number of agents per page. - - Returns: - A LibraryAgentResponse containing agents and pagination metadata. - - Raises: - HTTPException: If a server/database error occurs. """ - try: - return await library_db.list_library_agents( - user_id=user_id, - search_term=search_term, - sort_by=sort_by, - page=page, - page_size=page_size, - ) - except Exception as e: - logger.error(f"Could not list library agents for user #{user_id}: {e}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=str(e), - ) from e + return await library_db.list_library_agents( + user_id=user_id, + search_term=search_term, + sort_by=sort_by, + page=page, + page_size=page_size, + ) @router.get( "/favorites", summary="List Favorite Library Agents", - responses={ - 500: {"description": "Server error", "content": {"application/json": {}}}, - }, ) async def list_favorite_library_agents( user_id: str = Security(autogpt_auth_lib.get_user_id), @@ -106,30 +73,12 @@ async def list_favorite_library_agents( ) -> library_model.LibraryAgentResponse: """ Get all favorite agents in the user's library. - - Args: - user_id: ID of the authenticated user. - page: Page number to retrieve. - page_size: Number of agents per page. - - Returns: - A LibraryAgentResponse containing favorite agents and pagination metadata. - - Raises: - HTTPException: If a server/database error occurs. """ - try: - return await library_db.list_favorite_library_agents( - user_id=user_id, - page=page, - page_size=page_size, - ) - except Exception as e: - logger.error(f"Could not list favorite library agents for user #{user_id}: {e}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=str(e), - ) from e + return await library_db.list_favorite_library_agents( + user_id=user_id, + page=page, + page_size=page_size, + ) @router.get("/{library_agent_id}", summary="Get Library Agent") @@ -162,10 +111,6 @@ async def get_library_agent_by_graph_id( summary="Get Agent By Store ID", tags=["store", "library"], response_model=library_model.LibraryAgent | None, - responses={ - 200: {"description": "Library agent found"}, - 404: {"description": "Agent not found"}, - }, ) async def get_library_agent_by_store_listing_version_id( store_listing_version_id: str, @@ -174,32 +119,15 @@ async def get_library_agent_by_store_listing_version_id( """ Get Library Agent from Store Listing Version ID. """ - try: - return await library_db.get_library_agent_by_store_version_id( - store_listing_version_id, user_id - ) - except NotFoundError as e: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=str(e), - ) - except Exception as e: - logger.error(f"Could not fetch library agent from store version ID: {e}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=str(e), - ) from e + return await library_db.get_library_agent_by_store_version_id( + store_listing_version_id, user_id + ) @router.post( "", summary="Add Marketplace Agent", status_code=status.HTTP_201_CREATED, - responses={ - 201: {"description": "Agent added successfully"}, - 404: {"description": "Store listing version not found"}, - 500: {"description": "Server error"}, - }, ) async def add_marketplace_agent_to_library( store_listing_version_id: str = Body(embed=True), @@ -210,59 +138,19 @@ async def add_marketplace_agent_to_library( ) -> library_model.LibraryAgent: """ Add an agent from the marketplace to the user's library. - - Args: - store_listing_version_id: ID of the store listing version to add. - user_id: ID of the authenticated user. - - Returns: - library_model.LibraryAgent: Agent added to the library - - Raises: - HTTPException(404): If the listing version is not found. - HTTPException(500): If a server/database error occurs. """ - try: - agent = await library_db.add_store_agent_to_library( - store_listing_version_id=store_listing_version_id, - user_id=user_id, - ) - if source != "onboarding": - await complete_onboarding_step( - user_id, OnboardingStep.MARKETPLACE_ADD_AGENT - ) - return agent - - except store_exceptions.AgentNotFoundError as e: - logger.warning( - f"Could not find store listing version {store_listing_version_id} " - "to add to library" - ) - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) - except DatabaseError as e: - logger.error(f"Database error while adding agent to library: {e}", e) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail={"message": str(e), "hint": "Inspect DB logs for details."}, - ) from e - except Exception as e: - logger.error(f"Unexpected error while adding agent to library: {e}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail={ - "message": str(e), - "hint": "Check server logs for more information.", - }, - ) from e + agent = await library_db.add_store_agent_to_library( + store_listing_version_id=store_listing_version_id, + user_id=user_id, + ) + if source != "onboarding": + await complete_onboarding_step(user_id, OnboardingStep.MARKETPLACE_ADD_AGENT) + return agent @router.patch( "/{library_agent_id}", summary="Update Library Agent", - responses={ - 200: {"description": "Agent updated successfully"}, - 500: {"description": "Server error"}, - }, ) async def update_library_agent( library_agent_id: str, @@ -271,52 +159,21 @@ async def update_library_agent( ) -> library_model.LibraryAgent: """ Update the library agent with the given fields. - - Args: - library_agent_id: ID of the library agent to update. - payload: Fields to update (auto_update_version, is_favorite, etc.). - user_id: ID of the authenticated user. - - Raises: - HTTPException(500): If a server/database error occurs. """ - try: - return await library_db.update_library_agent( - library_agent_id=library_agent_id, - user_id=user_id, - auto_update_version=payload.auto_update_version, - graph_version=payload.graph_version, - is_favorite=payload.is_favorite, - is_archived=payload.is_archived, - settings=payload.settings, - ) - except NotFoundError as e: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=str(e), - ) from e - except DatabaseError as e: - logger.error(f"Database error while updating library agent: {e}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail={"message": str(e), "hint": "Verify DB connection."}, - ) from e - except Exception as e: - logger.error(f"Unexpected error while updating library agent: {e}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail={"message": str(e), "hint": "Check server logs."}, - ) from e + return await library_db.update_library_agent( + library_agent_id=library_agent_id, + user_id=user_id, + auto_update_version=payload.auto_update_version, + graph_version=payload.graph_version, + is_favorite=payload.is_favorite, + is_archived=payload.is_archived, + settings=payload.settings, + ) @router.delete( "/{library_agent_id}", summary="Delete Library Agent", - responses={ - 204: {"description": "Agent deleted successfully"}, - 404: {"description": "Agent not found"}, - 500: {"description": "Server error"}, - }, ) async def delete_library_agent( library_agent_id: str, @@ -324,28 +181,11 @@ async def delete_library_agent( ) -> Response: """ Soft-delete the specified library agent. - - Args: - library_agent_id: ID of the library agent to delete. - user_id: ID of the authenticated user. - - Returns: - 204 No Content if successful. - - Raises: - HTTPException(404): If the agent does not exist. - HTTPException(500): If a server/database error occurs. """ - try: - await library_db.delete_library_agent( - library_agent_id=library_agent_id, user_id=user_id - ) - return Response(status_code=status.HTTP_204_NO_CONTENT) - except NotFoundError as e: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=str(e), - ) from e + await library_db.delete_library_agent( + library_agent_id=library_agent_id, user_id=user_id + ) + return Response(status_code=status.HTTP_204_NO_CONTENT) @router.post("/{library_agent_id}/fork", summary="Fork Library Agent") diff --git a/autogpt_platform/backend/backend/api/features/library/routes_test.py b/autogpt_platform/backend/backend/api/features/library/routes_test.py index ca604af760..4d83812891 100644 --- a/autogpt_platform/backend/backend/api/features/library/routes_test.py +++ b/autogpt_platform/backend/backend/api/features/library/routes_test.py @@ -118,21 +118,6 @@ async def test_get_library_agents_success( ) -def test_get_library_agents_error(mocker: pytest_mock.MockFixture, test_user_id: str): - mock_db_call = mocker.patch("backend.api.features.library.db.list_library_agents") - mock_db_call.side_effect = Exception("Test error") - - response = client.get("/agents?search_term=test") - assert response.status_code == 500 - mock_db_call.assert_called_once_with( - user_id=test_user_id, - search_term="test", - sort_by=library_model.LibraryAgentSort.UPDATED_AT, - page=1, - page_size=15, - ) - - @pytest.mark.asyncio async def test_get_favorite_library_agents_success( mocker: pytest_mock.MockFixture, @@ -190,23 +175,6 @@ async def test_get_favorite_library_agents_success( ) -def test_get_favorite_library_agents_error( - mocker: pytest_mock.MockFixture, test_user_id: str -): - mock_db_call = mocker.patch( - "backend.api.features.library.db.list_favorite_library_agents" - ) - mock_db_call.side_effect = Exception("Test error") - - response = client.get("/agents/favorites") - assert response.status_code == 500 - mock_db_call.assert_called_once_with( - user_id=test_user_id, - page=1, - page_size=15, - ) - - def test_add_agent_to_library_success( mocker: pytest_mock.MockFixture, test_user_id: str ): @@ -258,19 +226,3 @@ def test_add_agent_to_library_success( store_listing_version_id="test-version-id", user_id=test_user_id ) mock_complete_onboarding.assert_awaited_once() - - -def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture, test_user_id: str): - mock_db_call = mocker.patch( - "backend.api.features.library.db.add_store_agent_to_library" - ) - mock_db_call.side_effect = Exception("Test error") - - response = client.post( - "/agents", json={"store_listing_version_id": "test-version-id"} - ) - assert response.status_code == 500 - assert "detail" in response.json() # Verify error response structure - mock_db_call.assert_called_once_with( - store_listing_version_id="test-version-id", user_id=test_user_id - ) diff --git a/autogpt_platform/backend/backend/api/features/store/embeddings.py b/autogpt_platform/backend/backend/api/features/store/embeddings.py index 79a9a4e219..434f2fe2ce 100644 --- a/autogpt_platform/backend/backend/api/features/store/embeddings.py +++ b/autogpt_platform/backend/backend/api/features/store/embeddings.py @@ -454,6 +454,7 @@ async def backfill_all_content_types(batch_size: int = 10) -> dict[str, Any]: total_processed = 0 total_success = 0 total_failed = 0 + all_errors: dict[str, int] = {} # Aggregate errors across all content types # Process content types in explicit order processing_order = [ @@ -499,23 +500,12 @@ async def backfill_all_content_types(batch_size: int = 10) -> dict[str, Any]: success = sum(1 for result in results if result is True) failed = len(results) - success - # Aggregate unique errors to avoid Sentry spam + # Aggregate errors across all content types if failed > 0: - # Group errors by type and message - error_summary: dict[str, int] = {} for result in results: if isinstance(result, Exception): error_key = f"{type(result).__name__}: {str(result)}" - error_summary[error_key] = error_summary.get(error_key, 0) + 1 - - # Log aggregated error summary - error_details = ", ".join( - f"{error} ({count}x)" for error, count in error_summary.items() - ) - logger.error( - f"{content_type.value}: {failed}/{len(results)} embeddings failed. " - f"Errors: {error_details}" - ) + all_errors[error_key] = all_errors.get(error_key, 0) + 1 results_by_type[content_type.value] = { "processed": len(missing_items), @@ -542,6 +532,13 @@ async def backfill_all_content_types(batch_size: int = 10) -> dict[str, Any]: "error": str(e), } + # Log aggregated errors once at the end + if all_errors: + error_details = ", ".join( + f"{error} ({count}x)" for error, count in all_errors.items() + ) + logger.error(f"Embedding backfill errors: {error_details}") + return { "by_type": results_by_type, "totals": { diff --git a/autogpt_platform/backend/backend/util/exceptions.py b/autogpt_platform/backend/backend/util/exceptions.py index 6d0192c0e5..ffda783873 100644 --- a/autogpt_platform/backend/backend/util/exceptions.py +++ b/autogpt_platform/backend/backend/util/exceptions.py @@ -135,6 +135,12 @@ class GraphValidationError(ValueError): ) +class InvalidInputError(ValueError): + """Raised when user input validation fails (e.g., search term too long)""" + + pass + + class DatabaseError(Exception): """Raised when there is an error interacting with the database""" diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index d1ecd91702..a6fdded27f 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -3339,7 +3339,7 @@ "get": { "tags": ["v2", "library", "private"], "summary": "List Library Agents", - "description": "Get all agents in the user's library (both created and saved).\n\nArgs:\n user_id: ID of the authenticated user.\n search_term: Optional search term to filter agents by name/description.\n filter_by: List of filters to apply (favorites, created by user).\n sort_by: List of sorting criteria (created date, updated date).\n page: Page number to retrieve.\n page_size: Number of agents per page.\n\nReturns:\n A LibraryAgentResponse containing agents and pagination metadata.\n\nRaises:\n HTTPException: If a server/database error occurs.", + "description": "Get all agents in the user's library (both created and saved).", "operationId": "getV2List library agents", "security": [{ "HTTPBearerJWT": [] }], "parameters": [ @@ -3394,7 +3394,7 @@ ], "responses": { "200": { - "description": "List of library agents", + "description": "Successful Response", "content": { "application/json": { "schema": { @@ -3413,17 +3413,13 @@ "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } } - }, - "500": { - "description": "Server error", - "content": { "application/json": {} } } } }, "post": { "tags": ["v2", "library", "private"], "summary": "Add Marketplace Agent", - "description": "Add an agent from the marketplace to the user's library.\n\nArgs:\n store_listing_version_id: ID of the store listing version to add.\n user_id: ID of the authenticated user.\n\nReturns:\n library_model.LibraryAgent: Agent added to the library\n\nRaises:\n HTTPException(404): If the listing version is not found.\n HTTPException(500): If a server/database error occurs.", + "description": "Add an agent from the marketplace to the user's library.", "operationId": "postV2Add marketplace agent", "security": [{ "HTTPBearerJWT": [] }], "requestBody": { @@ -3438,7 +3434,7 @@ }, "responses": { "201": { - "description": "Agent added successfully", + "description": "Successful Response", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/LibraryAgent" } @@ -3448,7 +3444,6 @@ "401": { "$ref": "#/components/responses/HTTP401NotAuthenticatedError" }, - "404": { "description": "Store listing version not found" }, "422": { "description": "Validation Error", "content": { @@ -3456,8 +3451,7 @@ "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } } - }, - "500": { "description": "Server error" } + } } } }, @@ -3511,7 +3505,7 @@ "get": { "tags": ["v2", "library", "private"], "summary": "List Favorite Library Agents", - "description": "Get all favorite agents in the user's library.\n\nArgs:\n user_id: ID of the authenticated user.\n page: Page number to retrieve.\n page_size: Number of agents per page.\n\nReturns:\n A LibraryAgentResponse containing favorite agents and pagination metadata.\n\nRaises:\n HTTPException: If a server/database error occurs.", + "description": "Get all favorite agents in the user's library.", "operationId": "getV2List favorite library agents", "security": [{ "HTTPBearerJWT": [] }], "parameters": [ @@ -3563,10 +3557,6 @@ "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } } - }, - "500": { - "description": "Server error", - "content": { "application/json": {} } } } } @@ -3588,7 +3578,7 @@ ], "responses": { "200": { - "description": "Library agent found", + "description": "Successful Response", "content": { "application/json": { "schema": { @@ -3604,7 +3594,6 @@ "401": { "$ref": "#/components/responses/HTTP401NotAuthenticatedError" }, - "404": { "description": "Agent not found" }, "422": { "description": "Validation Error", "content": { @@ -3620,7 +3609,7 @@ "delete": { "tags": ["v2", "library", "private"], "summary": "Delete Library Agent", - "description": "Soft-delete the specified library agent.\n\nArgs:\n library_agent_id: ID of the library agent to delete.\n user_id: ID of the authenticated user.\n\nReturns:\n 204 No Content if successful.\n\nRaises:\n HTTPException(404): If the agent does not exist.\n HTTPException(500): If a server/database error occurs.", + "description": "Soft-delete the specified library agent.", "operationId": "deleteV2Delete library agent", "security": [{ "HTTPBearerJWT": [] }], "parameters": [ @@ -3636,11 +3625,9 @@ "description": "Successful Response", "content": { "application/json": { "schema": {} } } }, - "204": { "description": "Agent deleted successfully" }, "401": { "$ref": "#/components/responses/HTTP401NotAuthenticatedError" }, - "404": { "description": "Agent not found" }, "422": { "description": "Validation Error", "content": { @@ -3648,8 +3635,7 @@ "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } } - }, - "500": { "description": "Server error" } + } } }, "get": { @@ -3690,7 +3676,7 @@ "patch": { "tags": ["v2", "library", "private"], "summary": "Update Library Agent", - "description": "Update the library agent with the given fields.\n\nArgs:\n library_agent_id: ID of the library agent to update.\n payload: Fields to update (auto_update_version, is_favorite, etc.).\n user_id: ID of the authenticated user.\n\nRaises:\n HTTPException(500): If a server/database error occurs.", + "description": "Update the library agent with the given fields.", "operationId": "patchV2Update library agent", "security": [{ "HTTPBearerJWT": [] }], "parameters": [ @@ -3713,7 +3699,7 @@ }, "responses": { "200": { - "description": "Agent updated successfully", + "description": "Successful Response", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/LibraryAgent" } @@ -3730,8 +3716,7 @@ "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } } - }, - "500": { "description": "Server error" } + } } } }, From e0dfae573288bfcdd367fbd1a284d70639ea8ea8 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Wed, 28 Jan 2026 14:58:02 -0600 Subject: [PATCH 11/25] fix(platform): evaluate chat flag after auth for correct redirect (#11873) Co-authored-by: Zamil Majdy Co-authored-by: Claude Opus 4.5 --- .../backend/backend/api/features/v1.py | 28 +++++++++++++++---- .../src/app/(no-navbar)/onboarding/page.tsx | 16 +++++++---- .../src/app/(platform)/auth/callback/route.ts | 11 ++++++-- .../src/app/(platform)/login/actions.ts | 11 ++++++-- .../src/app/(platform)/login/useLoginPage.ts | 9 ++---- .../src/app/(platform)/signup/actions.ts | 8 +++--- .../app/(platform)/signup/useSignupPage.ts | 1 - .../frontend/src/app/api/helpers.ts | 9 ++++-- .../frontend/src/app/api/openapi.json | 16 +++++++++-- 9 files changed, 75 insertions(+), 34 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/v1.py b/autogpt_platform/backend/backend/api/features/v1.py index 62b532089c..09d3759a65 100644 --- a/autogpt_platform/backend/backend/api/features/v1.py +++ b/autogpt_platform/backend/backend/api/features/v1.py @@ -261,18 +261,36 @@ async def get_onboarding_agents( return await get_recommended_agents(user_id) +class OnboardingStatusResponse(pydantic.BaseModel): + """Response for onboarding status check.""" + + is_onboarding_enabled: bool + is_chat_enabled: bool + + @v1_router.get( "/onboarding/enabled", summary="Is onboarding enabled", tags=["onboarding", "public"], + response_model=OnboardingStatusResponse, ) async def is_onboarding_enabled( user_id: Annotated[str, Security(get_user_id)], -) -> bool: - # If chat is enabled for user, skip legacy onboarding - if await is_feature_enabled(Flag.CHAT, user_id, False): - return False - return await onboarding_enabled() +) -> OnboardingStatusResponse: + # Check if chat is enabled for user + is_chat_enabled = await is_feature_enabled(Flag.CHAT, user_id, False) + + # If chat is enabled, skip legacy onboarding + if is_chat_enabled: + return OnboardingStatusResponse( + is_onboarding_enabled=False, + is_chat_enabled=True, + ) + + return OnboardingStatusResponse( + is_onboarding_enabled=await onboarding_enabled(), + is_chat_enabled=False, + ) @v1_router.post( diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/page.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/page.tsx index 1ebfe6b87b..70d9783ccd 100644 --- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/page.tsx +++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/page.tsx @@ -2,8 +2,9 @@ import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { useRouter } from "next/navigation"; import { useEffect } from "react"; -import { resolveResponse, shouldShowOnboarding } from "@/app/api/helpers"; +import { resolveResponse, getOnboardingStatus } from "@/app/api/helpers"; import { getV1OnboardingState } from "@/app/api/__generated__/endpoints/onboarding/onboarding"; +import { getHomepageRoute } from "@/lib/constants"; export default function OnboardingPage() { const router = useRouter(); @@ -11,10 +12,13 @@ export default function OnboardingPage() { useEffect(() => { async function redirectToStep() { try { - // Check if onboarding is enabled - const isEnabled = await shouldShowOnboarding(); - if (!isEnabled) { - router.replace("/"); + // Check if onboarding is enabled (also gets chat flag for redirect) + const { shouldShowOnboarding, isChatEnabled } = + await getOnboardingStatus(); + const homepageRoute = getHomepageRoute(isChatEnabled); + + if (!shouldShowOnboarding) { + router.replace(homepageRoute); return; } @@ -22,7 +26,7 @@ export default function OnboardingPage() { // Handle completed onboarding if (onboarding.completedSteps.includes("GET_RESULTS")) { - router.replace("/"); + router.replace(homepageRoute); return; } diff --git a/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts b/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts index a6a07a703f..15be137f63 100644 --- a/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts +++ b/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts @@ -1,8 +1,9 @@ import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase"; +import { getHomepageRoute } from "@/lib/constants"; import BackendAPI from "@/lib/autogpt-server-api"; import { NextResponse } from "next/server"; import { revalidatePath } from "next/cache"; -import { shouldShowOnboarding } from "@/app/api/helpers"; +import { getOnboardingStatus } from "@/app/api/helpers"; // Handle the callback to complete the user session login export async function GET(request: Request) { @@ -25,11 +26,15 @@ export async function GET(request: Request) { const api = new BackendAPI(); await api.createUser(); - if (await shouldShowOnboarding()) { + // Get onboarding status from backend (includes chat flag evaluated for this user) + const { shouldShowOnboarding, isChatEnabled } = + await getOnboardingStatus(); + if (shouldShowOnboarding) { next = "/onboarding"; revalidatePath("/onboarding", "layout"); } else { - revalidatePath("/", "layout"); + next = getHomepageRoute(isChatEnabled); + revalidatePath(next, "layout"); } } catch (createUserError) { console.error("Error creating user:", createUserError); diff --git a/autogpt_platform/frontend/src/app/(platform)/login/actions.ts b/autogpt_platform/frontend/src/app/(platform)/login/actions.ts index 936c879d69..447a25a41d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/login/actions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/login/actions.ts @@ -1,10 +1,11 @@ "use server"; +import { getHomepageRoute } from "@/lib/constants"; import BackendAPI from "@/lib/autogpt-server-api"; import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase"; import { loginFormSchema } from "@/types/auth"; import * as Sentry from "@sentry/nextjs"; -import { shouldShowOnboarding } from "../../api/helpers"; +import { getOnboardingStatus } from "../../api/helpers"; export async function login(email: string, password: string) { try { @@ -36,11 +37,15 @@ export async function login(email: string, password: string) { const api = new BackendAPI(); await api.createUser(); - const onboarding = await shouldShowOnboarding(); + // Get onboarding status from backend (includes chat flag evaluated for this user) + const { shouldShowOnboarding, isChatEnabled } = await getOnboardingStatus(); + const next = shouldShowOnboarding + ? "/onboarding" + : getHomepageRoute(isChatEnabled); return { success: true, - onboarding, + next, }; } catch (err) { Sentry.captureException(err); diff --git a/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts b/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts index 9bde570548..e64cc1858d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts @@ -97,13 +97,8 @@ export function useLoginPage() { throw new Error(result.error || "Login failed"); } - if (nextUrl) { - router.replace(nextUrl); - } else if (result.onboarding) { - router.replace("/onboarding"); - } else { - router.replace(homepageRoute); - } + // Prefer URL's next parameter, then use backend-determined route + router.replace(nextUrl || result.next || homepageRoute); } catch (error) { toast({ title: diff --git a/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts b/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts index 6d68782e7a..0fbba54b8e 100644 --- a/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts @@ -5,14 +5,13 @@ import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase"; import { signupFormSchema } from "@/types/auth"; import * as Sentry from "@sentry/nextjs"; import { isWaitlistError, logWaitlistError } from "../../api/auth/utils"; -import { shouldShowOnboarding } from "../../api/helpers"; +import { getOnboardingStatus } from "../../api/helpers"; export async function signup( email: string, password: string, confirmPassword: string, agreeToTerms: boolean, - isChatEnabled: boolean, ) { try { const parsed = signupFormSchema.safeParse({ @@ -59,8 +58,9 @@ export async function signup( await supabase.auth.setSession(data.session); } - const isOnboardingEnabled = await shouldShowOnboarding(); - const next = isOnboardingEnabled + // Get onboarding status from backend (includes chat flag evaluated for this user) + const { shouldShowOnboarding, isChatEnabled } = await getOnboardingStatus(); + const next = shouldShowOnboarding ? "/onboarding" : getHomepageRoute(isChatEnabled); diff --git a/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts b/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts index 5bd53ca846..5fa8c2c159 100644 --- a/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts @@ -108,7 +108,6 @@ export function useSignupPage() { data.password, data.confirmPassword, data.agreeToTerms, - isChatEnabled === true, ); setIsLoading(false); diff --git a/autogpt_platform/frontend/src/app/api/helpers.ts b/autogpt_platform/frontend/src/app/api/helpers.ts index e9a708ba4c..c2104d231a 100644 --- a/autogpt_platform/frontend/src/app/api/helpers.ts +++ b/autogpt_platform/frontend/src/app/api/helpers.ts @@ -175,9 +175,12 @@ export async function resolveResponse< return res.data; } -export async function shouldShowOnboarding() { - const isEnabled = await resolveResponse(getV1IsOnboardingEnabled()); +export async function getOnboardingStatus() { + const status = await resolveResponse(getV1IsOnboardingEnabled()); const onboarding = await resolveResponse(getV1OnboardingState()); const isCompleted = onboarding.completedSteps.includes("CONGRATS"); - return isEnabled && !isCompleted; + return { + shouldShowOnboarding: status.is_onboarding_enabled && !isCompleted, + isChatEnabled: status.is_chat_enabled, + }; } diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index a6fdded27f..2a9db1990d 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -4525,8 +4525,7 @@ "content": { "application/json": { "schema": { - "type": "boolean", - "title": "Response Getv1Is Onboarding Enabled" + "$ref": "#/components/schemas/OnboardingStatusResponse" } } } @@ -8730,6 +8729,19 @@ "title": "OAuthApplicationPublicInfo", "description": "Public information about an OAuth application (for consent screen)" }, + "OnboardingStatusResponse": { + "properties": { + "is_onboarding_enabled": { + "type": "boolean", + "title": "Is Onboarding Enabled" + }, + "is_chat_enabled": { "type": "boolean", "title": "Is Chat Enabled" } + }, + "type": "object", + "required": ["is_onboarding_enabled", "is_chat_enabled"], + "title": "OnboardingStatusResponse", + "description": "Response for onboarding status check." + }, "OnboardingStep": { "type": "string", "enum": [ From 7668c17d9cb59633a73cfbaf3b7854439a013113 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Wed, 28 Jan 2026 23:49:47 -0600 Subject: [PATCH 12/25] feat(platform): add User Workspace for persistent CoPilot file storage (#11867) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implements persistent User Workspace storage for CoPilot, enabling blocks to save and retrieve files across sessions. Files are stored in session-scoped virtual paths (`/sessions/{session_id}/`). Fixes SECRT-1833 ### Changes 🏗️ **Database & Storage:** - Add `UserWorkspace` and `UserWorkspaceFile` Prisma models - Implement `WorkspaceStorageBackend` abstraction (GCS for cloud, local filesystem for self-hosted) - Add `workspace_id` and `session_id` fields to `ExecutionContext` **Backend API:** - Add REST endpoints: `GET/POST /api/workspace/files`, `GET/DELETE /api/workspace/files/{id}`, `GET /api/workspace/files/{id}/download` - Add CoPilot tools: `list_workspace_files`, `read_workspace_file`, `write_workspace_file` - Integrate workspace storage into `store_media_file()` - returns `workspace://file-id` references **Block Updates:** - Refactor all file-handling blocks to use unified `ExecutionContext` parameter - Update media-generating blocks to persist outputs to workspace (AIImageGenerator, AIImageCustomizer, FluxKontext, TalkingHead, FAL video, Bannerbear, etc.) **Frontend:** - Render `workspace://` image references in chat via proxy endpoint - Add "AI cannot see this image" overlay indicator **CoPilot Context Mapping:** - Session = Agent (graph_id) = Run (graph_exec_id) - Files scoped to `/sessions/{session_id}/` ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [ ] I have tested my changes according to the test plan: - [ ] Create CoPilot session, generate image with AIImageGeneratorBlock - [ ] Verify image returns `workspace://file-id` (not base64) - [ ] Verify image renders in chat with visibility indicator - [ ] Verify workspace files persist across sessions - [ ] Test list/read/write workspace files via CoPilot tools - [ ] Test local storage backend for self-hosted deployments #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**) 🤖 Generated with [Claude Code](https://claude.ai/code) --- > [!NOTE] > **Medium Risk** > Introduces a new persistent file-storage surface area (DB tables, storage backends, download API, and chat tools) and rewires `store_media_file()`/block execution context across many blocks, so regressions could impact file handling, access control, or storage costs. > > **Overview** > Adds a **persistent per-user Workspace** (new `UserWorkspace`/`UserWorkspaceFile` models plus `WorkspaceManager` + `WorkspaceStorageBackend` with GCS/local implementations) and wires it into the API via a new `/api/workspace/files/{file_id}/download` route (including header-sanitized `Content-Disposition`) and shutdown lifecycle hooks. > > Extends `ExecutionContext` to carry execution identity + `workspace_id`/`session_id`, updates executor tooling to clone node-specific contexts, and updates `run_block` (CoPilot) to create a session-scoped workspace and synthetic graph/run/node IDs. > > Refactors `store_media_file()` to require `execution_context` + `return_format` and to support `workspace://` references; migrates many media/file-handling blocks and related tests to the new API and to persist generated media as `workspace://...` (or fall back to data URIs outside CoPilot), and adds CoPilot chat tools for listing/reading/writing/deleting workspace files with safeguards against context bloat. > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit 6abc70f7931ec1d8d4e9b99c49d606b21bf740fa. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). --------- Co-authored-by: Claude Opus 4.5 Co-authored-by: Reinier van der Leer --- autogpt_platform/CLAUDE.md | 44 ++ .../backend/api/features/chat/tools/IDEAS.md | 79 +++ .../api/features/chat/tools/__init__.py | 11 + .../backend/api/features/chat/tools/models.py | 6 + .../api/features/chat/tools/run_block.py | 45 +- .../features/chat/tools/workspace_files.py | 620 ++++++++++++++++++ .../api/features/workspace/__init__.py | 1 + .../backend/api/features/workspace/routes.py | 122 ++++ .../backend/backend/api/rest_api.py | 12 + .../backend/blocks/ai_image_customizer.py | 24 +- .../blocks/ai_image_generator_block.py | 26 +- .../blocks/ai_shortform_video_block.py | 80 ++- .../backend/blocks/bannerbear/text_overlay.py | 30 +- .../backend/backend/blocks/basic.py | 27 +- .../backend/blocks/discord/bot_blocks.py | 15 +- .../backend/blocks/fal/ai_video_generator.py | 26 +- .../backend/backend/blocks/flux_kontext.py | 27 +- .../backend/backend/blocks/google/gmail.py | 78 +-- .../backend/backend/blocks/http.py | 20 +- autogpt_platform/backend/backend/blocks/io.py | 14 +- .../backend/backend/blocks/media.py | 71 +- .../backend/backend/blocks/screenshotone.py | 15 +- .../backend/backend/blocks/spreadsheet.py | 13 +- .../backend/backend/blocks/talking_head.py | 24 +- .../test/test_blocks_dos_vulnerability.py | 15 +- .../backend/backend/blocks/test/test_http.py | 33 +- .../backend/backend/blocks/text.py | 16 +- .../backend/backend/data/execution.py | 17 + .../backend/backend/data/workspace.py | 276 ++++++++ .../backend/backend/executor/manager.py | 7 + .../backend/backend/executor/utils.py | 8 + .../backend/backend/executor/utils_test.py | 5 + .../backend/backend/util/cloud_storage.py | 65 +- autogpt_platform/backend/backend/util/file.py | 201 ++++-- .../backend/backend/util/file_test.py | 37 +- .../backend/backend/util/gcs_utils.py | 108 +++ .../backend/backend/util/settings.py | 13 + autogpt_platform/backend/backend/util/test.py | 29 +- .../backend/backend/util/workspace.py | 419 ++++++++++++ .../backend/backend/util/workspace_storage.py | 398 +++++++++++ .../migration.sql | 52 ++ .../migration.sql | 16 + autogpt_platform/backend/schema.prisma | 48 ++ .../frontend/src/app/api/openapi.json | 34 + .../src/app/api/proxy/[...path]/route.ts | 69 ++ .../MarkdownContent/MarkdownContent.tsx | 81 +++ .../components/ToolResponseMessage/helpers.ts | 103 ++- docs/integrations/README.md | 2 +- docs/integrations/block-integrations/basic.md | 8 +- .../block-integrations/multimedia.md | 6 +- docs/platform/block-sdk-guide.md | 44 ++ docs/platform/new_blocks.md | 65 ++ 52 files changed, 3272 insertions(+), 333 deletions(-) create mode 100644 autogpt_platform/backend/backend/api/features/chat/tools/IDEAS.md create mode 100644 autogpt_platform/backend/backend/api/features/chat/tools/workspace_files.py create mode 100644 autogpt_platform/backend/backend/api/features/workspace/__init__.py create mode 100644 autogpt_platform/backend/backend/api/features/workspace/routes.py create mode 100644 autogpt_platform/backend/backend/data/workspace.py create mode 100644 autogpt_platform/backend/backend/util/gcs_utils.py create mode 100644 autogpt_platform/backend/backend/util/workspace.py create mode 100644 autogpt_platform/backend/backend/util/workspace_storage.py create mode 100644 autogpt_platform/backend/migrations/20260127230419_add_user_workspace/migration.sql create mode 100644 autogpt_platform/backend/migrations/20260129011611_remove_workspace_file_source/migration.sql diff --git a/autogpt_platform/CLAUDE.md b/autogpt_platform/CLAUDE.md index 2c76e7db80..9690178587 100644 --- a/autogpt_platform/CLAUDE.md +++ b/autogpt_platform/CLAUDE.md @@ -194,6 +194,50 @@ ex: do the inputs and outputs tie well together? If you get any pushback or hit complex block conditions check the new_blocks guide in the docs. +**Handling files in blocks with `store_media_file()`:** + +When blocks need to work with files (images, videos, documents), use `store_media_file()` from `backend.util.file`. The `return_format` parameter determines what you get back: + +| Format | Use When | Returns | +|--------|----------|---------| +| `"for_local_processing"` | Processing with local tools (ffmpeg, MoviePy, PIL) | Local file path (e.g., `"image.png"`) | +| `"for_external_api"` | Sending content to external APIs (Replicate, OpenAI) | Data URI (e.g., `"data:image/png;base64,..."`) | +| `"for_block_output"` | Returning output from your block | Smart: `workspace://` in CoPilot, data URI in graphs | + +**Examples:** +```python +# INPUT: Need to process file locally with ffmpeg +local_path = await store_media_file( + file=input_data.video, + execution_context=execution_context, + return_format="for_local_processing", +) +# local_path = "video.mp4" - use with Path/ffmpeg/etc + +# INPUT: Need to send to external API like Replicate +image_b64 = await store_media_file( + file=input_data.image, + execution_context=execution_context, + return_format="for_external_api", +) +# image_b64 = "data:image/png;base64,iVBORw0..." - send to API + +# OUTPUT: Returning result from block +result_url = await store_media_file( + file=generated_image_url, + execution_context=execution_context, + return_format="for_block_output", +) +yield "image_url", result_url +# In CoPilot: result_url = "workspace://abc123" +# In graphs: result_url = "data:image/png;base64,..." +``` + +**Key points:** +- `for_block_output` is the ONLY format that auto-adapts to execution context +- Always use `for_block_output` for block outputs unless you have a specific reason not to +- Never hardcode workspace checks - let `for_block_output` handle it + **Modifying the API:** 1. Update route in `/backend/backend/server/routers/` diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/IDEAS.md b/autogpt_platform/backend/backend/api/features/chat/tools/IDEAS.md new file mode 100644 index 0000000000..656aac61c4 --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tools/IDEAS.md @@ -0,0 +1,79 @@ +# CoPilot Tools - Future Ideas + +## Multimodal Image Support for CoPilot + +**Problem:** CoPilot uses a vision-capable model but can't "see" workspace images. When a block generates an image and returns `workspace://abc123`, CoPilot can't evaluate it (e.g., checking blog thumbnail quality). + +**Backend Solution:** +When preparing messages for the LLM, detect `workspace://` image references and convert them to proper image content blocks: + +```python +# Before sending to LLM, scan for workspace image references +# and inject them as image content parts + +# Example message transformation: +# FROM: {"role": "assistant", "content": "Generated image: workspace://abc123"} +# TO: {"role": "assistant", "content": [ +# {"type": "text", "text": "Generated image: workspace://abc123"}, +# {"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}} +# ]} +``` + +**Where to implement:** +- In the chat stream handler before calling the LLM +- Or in a message preprocessing step +- Need to fetch image from workspace, convert to base64, add as image content + +**Considerations:** +- Only do this for image MIME types (image/png, image/jpeg, etc.) +- May want a size limit (don't pass 10MB images) +- Track which images were "shown" to the AI for frontend indicator +- Cost implications - vision API calls are more expensive + +**Frontend Solution:** +Show visual indicator on workspace files in chat: +- If AI saw the image: normal display +- If AI didn't see it: overlay icon saying "AI can't see this image" + +Requires response metadata indicating which `workspace://` refs were passed to the model. + +--- + +## Output Post-Processing Layer for run_block + +**Problem:** Many blocks produce large outputs that: +- Consume massive context (100KB base64 image = ~133KB tokens) +- Can't fit in conversation +- Break things and cause high LLM costs + +**Proposed Solution:** Instead of modifying individual blocks or `store_media_file()`, implement a centralized output processor in `run_block.py` that handles outputs before they're returned to CoPilot. + +**Benefits:** +1. **Centralized** - one place to handle all output processing +2. **Future-proof** - new blocks automatically get output processing +3. **Keeps blocks pure** - they don't need to know about context constraints +4. **Handles all large outputs** - not just images + +**Processing Rules:** +- Detect base64 data URIs → save to workspace, return `workspace://` reference +- Truncate very long strings (>N chars) with truncation note +- Summarize large arrays/lists (e.g., "Array with 1000 items, first 5: [...]") +- Handle nested large outputs in dicts recursively +- Cap total output size + +**Implementation Location:** `run_block.py` after block execution, before returning `BlockOutputResponse` + +**Example:** +```python +def _process_outputs_for_context( + outputs: dict[str, list[Any]], + workspace_manager: WorkspaceManager, + max_string_length: int = 10000, + max_array_preview: int = 5, +) -> dict[str, list[Any]]: + """Process block outputs to prevent context bloat.""" + processed = {} + for name, values in outputs.items(): + processed[name] = [_process_value(v, workspace_manager) for v in values] + return processed +``` diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py index beeb128ae9..d078860c3a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/__init__.py @@ -18,6 +18,12 @@ from .get_doc_page import GetDocPageTool from .run_agent import RunAgentTool from .run_block import RunBlockTool from .search_docs import SearchDocsTool +from .workspace_files import ( + DeleteWorkspaceFileTool, + ListWorkspaceFilesTool, + ReadWorkspaceFileTool, + WriteWorkspaceFileTool, +) if TYPE_CHECKING: from backend.api.features.chat.response_model import StreamToolOutputAvailable @@ -37,6 +43,11 @@ TOOL_REGISTRY: dict[str, BaseTool] = { "view_agent_output": AgentOutputTool(), "search_docs": SearchDocsTool(), "get_doc_page": GetDocPageTool(), + # Workspace tools for CoPilot file operations + "list_workspace_files": ListWorkspaceFilesTool(), + "read_workspace_file": ReadWorkspaceFileTool(), + "write_workspace_file": WriteWorkspaceFileTool(), + "delete_workspace_file": DeleteWorkspaceFileTool(), } # Export individual tool instances for backwards compatibility diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/models.py b/autogpt_platform/backend/backend/api/features/chat/tools/models.py index 8552681d03..49b233784e 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/models.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/models.py @@ -28,6 +28,12 @@ class ResponseType(str, Enum): BLOCK_OUTPUT = "block_output" DOC_SEARCH_RESULTS = "doc_search_results" DOC_PAGE = "doc_page" + # Workspace response types + WORKSPACE_FILE_LIST = "workspace_file_list" + WORKSPACE_FILE_CONTENT = "workspace_file_content" + WORKSPACE_FILE_METADATA = "workspace_file_metadata" + WORKSPACE_FILE_WRITTEN = "workspace_file_written" + WORKSPACE_FILE_DELETED = "workspace_file_deleted" # Long-running operation types OPERATION_STARTED = "operation_started" OPERATION_PENDING = "operation_pending" diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index 3f57236564..a59082b399 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -1,6 +1,7 @@ """Tool for executing blocks directly.""" import logging +import uuid from collections import defaultdict from typing import Any @@ -8,6 +9,7 @@ from backend.api.features.chat.model import ChatSession from backend.data.block import get_block from backend.data.execution import ExecutionContext from backend.data.model import CredentialsMetaInput +from backend.data.workspace import get_or_create_workspace from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util.exceptions import BlockError @@ -223,11 +225,48 @@ class RunBlockTool(BaseTool): ) try: - # Fetch actual credentials and prepare kwargs for block execution - # Create execution context with defaults (blocks may require it) + # Get or create user's workspace for CoPilot file operations + workspace = await get_or_create_workspace(user_id) + + # Generate synthetic IDs for CoPilot context + # Each chat session is treated as its own agent with one continuous run + # This means: + # - graph_id (agent) = session (memories scoped to session when limit_to_agent=True) + # - graph_exec_id (run) = session (memories scoped to session when limit_to_run=True) + # - node_exec_id = unique per block execution + synthetic_graph_id = f"copilot-session-{session.session_id}" + synthetic_graph_exec_id = f"copilot-session-{session.session_id}" + synthetic_node_id = f"copilot-node-{block_id}" + synthetic_node_exec_id = ( + f"copilot-{session.session_id}-{uuid.uuid4().hex[:8]}" + ) + + # Create unified execution context with all required fields + execution_context = ExecutionContext( + # Execution identity + user_id=user_id, + graph_id=synthetic_graph_id, + graph_exec_id=synthetic_graph_exec_id, + graph_version=1, # Versions are 1-indexed + node_id=synthetic_node_id, + node_exec_id=synthetic_node_exec_id, + # Workspace with session scoping + workspace_id=workspace.id, + session_id=session.session_id, + ) + + # Prepare kwargs for block execution + # Keep individual kwargs for backwards compatibility with existing blocks exec_kwargs: dict[str, Any] = { "user_id": user_id, - "execution_context": ExecutionContext(), + "execution_context": execution_context, + # Legacy: individual kwargs for blocks not yet using execution_context + "workspace_id": workspace.id, + "graph_exec_id": synthetic_graph_exec_id, + "node_exec_id": synthetic_node_exec_id, + "node_id": synthetic_node_id, + "graph_version": 1, # Versions are 1-indexed + "graph_id": synthetic_graph_id, } for field_name, cred_meta in matched_credentials.items(): diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/workspace_files.py b/autogpt_platform/backend/backend/api/features/chat/tools/workspace_files.py new file mode 100644 index 0000000000..03532c8fee --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tools/workspace_files.py @@ -0,0 +1,620 @@ +"""CoPilot tools for workspace file operations.""" + +import base64 +import logging +from typing import Any, Optional + +from pydantic import BaseModel + +from backend.api.features.chat.model import ChatSession +from backend.data.workspace import get_or_create_workspace +from backend.util.settings import Config +from backend.util.virus_scanner import scan_content_safe +from backend.util.workspace import WorkspaceManager + +from .base import BaseTool +from .models import ErrorResponse, ResponseType, ToolResponseBase + +logger = logging.getLogger(__name__) + + +class WorkspaceFileInfoData(BaseModel): + """Data model for workspace file information (not a response itself).""" + + file_id: str + name: str + path: str + mime_type: str + size_bytes: int + + +class WorkspaceFileListResponse(ToolResponseBase): + """Response containing list of workspace files.""" + + type: ResponseType = ResponseType.WORKSPACE_FILE_LIST + files: list[WorkspaceFileInfoData] + total_count: int + + +class WorkspaceFileContentResponse(ToolResponseBase): + """Response containing workspace file content (legacy, for small text files).""" + + type: ResponseType = ResponseType.WORKSPACE_FILE_CONTENT + file_id: str + name: str + path: str + mime_type: str + content_base64: str + + +class WorkspaceFileMetadataResponse(ToolResponseBase): + """Response containing workspace file metadata and download URL (prevents context bloat).""" + + type: ResponseType = ResponseType.WORKSPACE_FILE_METADATA + file_id: str + name: str + path: str + mime_type: str + size_bytes: int + download_url: str + preview: str | None = None # First 500 chars for text files + + +class WorkspaceWriteResponse(ToolResponseBase): + """Response after writing a file to workspace.""" + + type: ResponseType = ResponseType.WORKSPACE_FILE_WRITTEN + file_id: str + name: str + path: str + size_bytes: int + + +class WorkspaceDeleteResponse(ToolResponseBase): + """Response after deleting a file from workspace.""" + + type: ResponseType = ResponseType.WORKSPACE_FILE_DELETED + file_id: str + success: bool + + +class ListWorkspaceFilesTool(BaseTool): + """Tool for listing files in user's workspace.""" + + @property + def name(self) -> str: + return "list_workspace_files" + + @property + def description(self) -> str: + return ( + "List files in the user's workspace. " + "Returns file names, paths, sizes, and metadata. " + "Optionally filter by path prefix." + ) + + @property + def parameters(self) -> dict[str, Any]: + return { + "type": "object", + "properties": { + "path_prefix": { + "type": "string", + "description": ( + "Optional path prefix to filter files " + "(e.g., '/documents/' to list only files in documents folder). " + "By default, only files from the current session are listed." + ), + }, + "limit": { + "type": "integer", + "description": "Maximum number of files to return (default 50, max 100)", + "minimum": 1, + "maximum": 100, + }, + "include_all_sessions": { + "type": "boolean", + "description": ( + "If true, list files from all sessions. " + "Default is false (only current session's files)." + ), + }, + }, + "required": [], + } + + @property + def requires_auth(self) -> bool: + return True + + async def _execute( + self, + user_id: str | None, + session: ChatSession, + **kwargs, + ) -> ToolResponseBase: + session_id = session.session_id + + if not user_id: + return ErrorResponse( + message="Authentication required", + session_id=session_id, + ) + + path_prefix: Optional[str] = kwargs.get("path_prefix") + limit = min(kwargs.get("limit", 50), 100) + include_all_sessions: bool = kwargs.get("include_all_sessions", False) + + try: + workspace = await get_or_create_workspace(user_id) + # Pass session_id for session-scoped file access + manager = WorkspaceManager(user_id, workspace.id, session_id) + + files = await manager.list_files( + path=path_prefix, + limit=limit, + include_all_sessions=include_all_sessions, + ) + total = await manager.get_file_count( + path=path_prefix, + include_all_sessions=include_all_sessions, + ) + + file_infos = [ + WorkspaceFileInfoData( + file_id=f.id, + name=f.name, + path=f.path, + mime_type=f.mimeType, + size_bytes=f.sizeBytes, + ) + for f in files + ] + + scope_msg = "all sessions" if include_all_sessions else "current session" + return WorkspaceFileListResponse( + files=file_infos, + total_count=total, + message=f"Found {len(files)} files in workspace ({scope_msg})", + session_id=session_id, + ) + + except Exception as e: + logger.error(f"Error listing workspace files: {e}", exc_info=True) + return ErrorResponse( + message=f"Failed to list workspace files: {str(e)}", + error=str(e), + session_id=session_id, + ) + + +class ReadWorkspaceFileTool(BaseTool): + """Tool for reading file content from workspace.""" + + # Size threshold for returning full content vs metadata+URL + # Files larger than this return metadata with download URL to prevent context bloat + MAX_INLINE_SIZE_BYTES = 32 * 1024 # 32KB + # Preview size for text files + PREVIEW_SIZE = 500 + + @property + def name(self) -> str: + return "read_workspace_file" + + @property + def description(self) -> str: + return ( + "Read a file from the user's workspace. " + "Specify either file_id or path to identify the file. " + "For small text files, returns content directly. " + "For large or binary files, returns metadata and a download URL. " + "Paths are scoped to the current session by default. " + "Use /sessions//... for cross-session access." + ) + + @property + def parameters(self) -> dict[str, Any]: + return { + "type": "object", + "properties": { + "file_id": { + "type": "string", + "description": "The file's unique ID (from list_workspace_files)", + }, + "path": { + "type": "string", + "description": ( + "The virtual file path (e.g., '/documents/report.pdf'). " + "Scoped to current session by default." + ), + }, + "force_download_url": { + "type": "boolean", + "description": ( + "If true, always return metadata+URL instead of inline content. " + "Default is false (auto-selects based on file size/type)." + ), + }, + }, + "required": [], # At least one must be provided + } + + @property + def requires_auth(self) -> bool: + return True + + def _is_text_mime_type(self, mime_type: str) -> bool: + """Check if the MIME type is a text-based type.""" + text_types = [ + "text/", + "application/json", + "application/xml", + "application/javascript", + "application/x-python", + "application/x-sh", + ] + return any(mime_type.startswith(t) for t in text_types) + + async def _execute( + self, + user_id: str | None, + session: ChatSession, + **kwargs, + ) -> ToolResponseBase: + session_id = session.session_id + + if not user_id: + return ErrorResponse( + message="Authentication required", + session_id=session_id, + ) + + file_id: Optional[str] = kwargs.get("file_id") + path: Optional[str] = kwargs.get("path") + force_download_url: bool = kwargs.get("force_download_url", False) + + if not file_id and not path: + return ErrorResponse( + message="Please provide either file_id or path", + session_id=session_id, + ) + + try: + workspace = await get_or_create_workspace(user_id) + # Pass session_id for session-scoped file access + manager = WorkspaceManager(user_id, workspace.id, session_id) + + # Get file info + if file_id: + file_info = await manager.get_file_info(file_id) + if file_info is None: + return ErrorResponse( + message=f"File not found: {file_id}", + session_id=session_id, + ) + target_file_id = file_id + else: + # path is guaranteed to be non-None here due to the check above + assert path is not None + file_info = await manager.get_file_info_by_path(path) + if file_info is None: + return ErrorResponse( + message=f"File not found at path: {path}", + session_id=session_id, + ) + target_file_id = file_info.id + + # Decide whether to return inline content or metadata+URL + is_small_file = file_info.sizeBytes <= self.MAX_INLINE_SIZE_BYTES + is_text_file = self._is_text_mime_type(file_info.mimeType) + + # Return inline content for small text files (unless force_download_url) + if is_small_file and is_text_file and not force_download_url: + content = await manager.read_file_by_id(target_file_id) + content_b64 = base64.b64encode(content).decode("utf-8") + + return WorkspaceFileContentResponse( + file_id=file_info.id, + name=file_info.name, + path=file_info.path, + mime_type=file_info.mimeType, + content_base64=content_b64, + message=f"Successfully read file: {file_info.name}", + session_id=session_id, + ) + + # Return metadata + workspace:// reference for large or binary files + # This prevents context bloat (100KB file = ~133KB as base64) + # Use workspace:// format so frontend urlTransform can add proxy prefix + download_url = f"workspace://{target_file_id}" + + # Generate preview for text files + preview: str | None = None + if is_text_file: + try: + content = await manager.read_file_by_id(target_file_id) + preview_text = content[: self.PREVIEW_SIZE].decode( + "utf-8", errors="replace" + ) + if len(content) > self.PREVIEW_SIZE: + preview_text += "..." + preview = preview_text + except Exception: + pass # Preview is optional + + return WorkspaceFileMetadataResponse( + file_id=file_info.id, + name=file_info.name, + path=file_info.path, + mime_type=file_info.mimeType, + size_bytes=file_info.sizeBytes, + download_url=download_url, + preview=preview, + message=f"File: {file_info.name} ({file_info.sizeBytes} bytes). Use download_url to retrieve content.", + session_id=session_id, + ) + + except FileNotFoundError as e: + return ErrorResponse( + message=str(e), + session_id=session_id, + ) + except Exception as e: + logger.error(f"Error reading workspace file: {e}", exc_info=True) + return ErrorResponse( + message=f"Failed to read workspace file: {str(e)}", + error=str(e), + session_id=session_id, + ) + + +class WriteWorkspaceFileTool(BaseTool): + """Tool for writing files to workspace.""" + + @property + def name(self) -> str: + return "write_workspace_file" + + @property + def description(self) -> str: + return ( + "Write or create a file in the user's workspace. " + "Provide the content as a base64-encoded string. " + f"Maximum file size is {Config().max_file_size_mb}MB. " + "Files are saved to the current session's folder by default. " + "Use /sessions//... for cross-session access." + ) + + @property + def parameters(self) -> dict[str, Any]: + return { + "type": "object", + "properties": { + "filename": { + "type": "string", + "description": "Name for the file (e.g., 'report.pdf')", + }, + "content_base64": { + "type": "string", + "description": "Base64-encoded file content", + }, + "path": { + "type": "string", + "description": ( + "Optional virtual path where to save the file " + "(e.g., '/documents/report.pdf'). " + "Defaults to '/{filename}'. Scoped to current session." + ), + }, + "mime_type": { + "type": "string", + "description": ( + "Optional MIME type of the file. " + "Auto-detected from filename if not provided." + ), + }, + "overwrite": { + "type": "boolean", + "description": "Whether to overwrite if file exists at path (default: false)", + }, + }, + "required": ["filename", "content_base64"], + } + + @property + def requires_auth(self) -> bool: + return True + + async def _execute( + self, + user_id: str | None, + session: ChatSession, + **kwargs, + ) -> ToolResponseBase: + session_id = session.session_id + + if not user_id: + return ErrorResponse( + message="Authentication required", + session_id=session_id, + ) + + filename: str = kwargs.get("filename", "") + content_b64: str = kwargs.get("content_base64", "") + path: Optional[str] = kwargs.get("path") + mime_type: Optional[str] = kwargs.get("mime_type") + overwrite: bool = kwargs.get("overwrite", False) + + if not filename: + return ErrorResponse( + message="Please provide a filename", + session_id=session_id, + ) + + if not content_b64: + return ErrorResponse( + message="Please provide content_base64", + session_id=session_id, + ) + + # Decode content + try: + content = base64.b64decode(content_b64) + except Exception: + return ErrorResponse( + message="Invalid base64-encoded content", + session_id=session_id, + ) + + # Check size + max_file_size = Config().max_file_size_mb * 1024 * 1024 + if len(content) > max_file_size: + return ErrorResponse( + message=f"File too large. Maximum size is {Config().max_file_size_mb}MB", + session_id=session_id, + ) + + try: + # Virus scan + await scan_content_safe(content, filename=filename) + + workspace = await get_or_create_workspace(user_id) + # Pass session_id for session-scoped file access + manager = WorkspaceManager(user_id, workspace.id, session_id) + + file_record = await manager.write_file( + content=content, + filename=filename, + path=path, + mime_type=mime_type, + overwrite=overwrite, + ) + + return WorkspaceWriteResponse( + file_id=file_record.id, + name=file_record.name, + path=file_record.path, + size_bytes=file_record.sizeBytes, + message=f"Successfully wrote file: {file_record.name}", + session_id=session_id, + ) + + except ValueError as e: + return ErrorResponse( + message=str(e), + session_id=session_id, + ) + except Exception as e: + logger.error(f"Error writing workspace file: {e}", exc_info=True) + return ErrorResponse( + message=f"Failed to write workspace file: {str(e)}", + error=str(e), + session_id=session_id, + ) + + +class DeleteWorkspaceFileTool(BaseTool): + """Tool for deleting files from workspace.""" + + @property + def name(self) -> str: + return "delete_workspace_file" + + @property + def description(self) -> str: + return ( + "Delete a file from the user's workspace. " + "Specify either file_id or path to identify the file. " + "Paths are scoped to the current session by default. " + "Use /sessions//... for cross-session access." + ) + + @property + def parameters(self) -> dict[str, Any]: + return { + "type": "object", + "properties": { + "file_id": { + "type": "string", + "description": "The file's unique ID (from list_workspace_files)", + }, + "path": { + "type": "string", + "description": ( + "The virtual file path (e.g., '/documents/report.pdf'). " + "Scoped to current session by default." + ), + }, + }, + "required": [], # At least one must be provided + } + + @property + def requires_auth(self) -> bool: + return True + + async def _execute( + self, + user_id: str | None, + session: ChatSession, + **kwargs, + ) -> ToolResponseBase: + session_id = session.session_id + + if not user_id: + return ErrorResponse( + message="Authentication required", + session_id=session_id, + ) + + file_id: Optional[str] = kwargs.get("file_id") + path: Optional[str] = kwargs.get("path") + + if not file_id and not path: + return ErrorResponse( + message="Please provide either file_id or path", + session_id=session_id, + ) + + try: + workspace = await get_or_create_workspace(user_id) + # Pass session_id for session-scoped file access + manager = WorkspaceManager(user_id, workspace.id, session_id) + + # Determine the file_id to delete + target_file_id: str + if file_id: + target_file_id = file_id + else: + # path is guaranteed to be non-None here due to the check above + assert path is not None + file_info = await manager.get_file_info_by_path(path) + if file_info is None: + return ErrorResponse( + message=f"File not found at path: {path}", + session_id=session_id, + ) + target_file_id = file_info.id + + success = await manager.delete_file(target_file_id) + + if not success: + return ErrorResponse( + message=f"File not found: {target_file_id}", + session_id=session_id, + ) + + return WorkspaceDeleteResponse( + file_id=target_file_id, + success=True, + message="File deleted successfully", + session_id=session_id, + ) + + except Exception as e: + logger.error(f"Error deleting workspace file: {e}", exc_info=True) + return ErrorResponse( + message=f"Failed to delete workspace file: {str(e)}", + error=str(e), + session_id=session_id, + ) diff --git a/autogpt_platform/backend/backend/api/features/workspace/__init__.py b/autogpt_platform/backend/backend/api/features/workspace/__init__.py new file mode 100644 index 0000000000..688ada9937 --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/workspace/__init__.py @@ -0,0 +1 @@ +# Workspace API feature module diff --git a/autogpt_platform/backend/backend/api/features/workspace/routes.py b/autogpt_platform/backend/backend/api/features/workspace/routes.py new file mode 100644 index 0000000000..b6d0c84572 --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/workspace/routes.py @@ -0,0 +1,122 @@ +""" +Workspace API routes for managing user file storage. +""" + +import logging +import re +from typing import Annotated +from urllib.parse import quote + +import fastapi +from autogpt_libs.auth.dependencies import get_user_id, requires_user +from fastapi.responses import Response + +from backend.data.workspace import get_workspace, get_workspace_file +from backend.util.workspace_storage import get_workspace_storage + + +def _sanitize_filename_for_header(filename: str) -> str: + """ + Sanitize filename for Content-Disposition header to prevent header injection. + + Removes/replaces characters that could break the header or inject new headers. + Uses RFC5987 encoding for non-ASCII characters. + """ + # Remove CR, LF, and null bytes (header injection prevention) + sanitized = re.sub(r"[\r\n\x00]", "", filename) + # Escape quotes + sanitized = sanitized.replace('"', '\\"') + # For non-ASCII, use RFC5987 filename* parameter + # Check if filename has non-ASCII characters + try: + sanitized.encode("ascii") + return f'attachment; filename="{sanitized}"' + except UnicodeEncodeError: + # Use RFC5987 encoding for UTF-8 filenames + encoded = quote(sanitized, safe="") + return f"attachment; filename*=UTF-8''{encoded}" + + +logger = logging.getLogger(__name__) + +router = fastapi.APIRouter( + dependencies=[fastapi.Security(requires_user)], +) + + +def _create_streaming_response(content: bytes, file) -> Response: + """Create a streaming response for file content.""" + return Response( + content=content, + media_type=file.mimeType, + headers={ + "Content-Disposition": _sanitize_filename_for_header(file.name), + "Content-Length": str(len(content)), + }, + ) + + +async def _create_file_download_response(file) -> Response: + """ + Create a download response for a workspace file. + + Handles both local storage (direct streaming) and GCS (signed URL redirect + with fallback to streaming). + """ + storage = await get_workspace_storage() + + # For local storage, stream the file directly + if file.storagePath.startswith("local://"): + content = await storage.retrieve(file.storagePath) + return _create_streaming_response(content, file) + + # For GCS, try to redirect to signed URL, fall back to streaming + try: + url = await storage.get_download_url(file.storagePath, expires_in=300) + # If we got back an API path (fallback), stream directly instead + if url.startswith("/api/"): + content = await storage.retrieve(file.storagePath) + return _create_streaming_response(content, file) + return fastapi.responses.RedirectResponse(url=url, status_code=302) + except Exception as e: + # Log the signed URL failure with context + logger.error( + f"Failed to get signed URL for file {file.id} " + f"(storagePath={file.storagePath}): {e}", + exc_info=True, + ) + # Fall back to streaming directly from GCS + try: + content = await storage.retrieve(file.storagePath) + return _create_streaming_response(content, file) + except Exception as fallback_error: + logger.error( + f"Fallback streaming also failed for file {file.id} " + f"(storagePath={file.storagePath}): {fallback_error}", + exc_info=True, + ) + raise + + +@router.get( + "/files/{file_id}/download", + summary="Download file by ID", +) +async def download_file( + user_id: Annotated[str, fastapi.Security(get_user_id)], + file_id: str, +) -> Response: + """ + Download a file by its ID. + + Returns the file content directly or redirects to a signed URL for GCS. + """ + workspace = await get_workspace(user_id) + if workspace is None: + raise fastapi.HTTPException(status_code=404, detail="Workspace not found") + + file = await get_workspace_file(file_id, workspace.id) + if file is None: + raise fastapi.HTTPException(status_code=404, detail="File not found") + + return await _create_file_download_response(file) diff --git a/autogpt_platform/backend/backend/api/rest_api.py b/autogpt_platform/backend/backend/api/rest_api.py index e9556e992f..b936312ce1 100644 --- a/autogpt_platform/backend/backend/api/rest_api.py +++ b/autogpt_platform/backend/backend/api/rest_api.py @@ -32,6 +32,7 @@ import backend.api.features.postmark.postmark import backend.api.features.store.model import backend.api.features.store.routes import backend.api.features.v1 +import backend.api.features.workspace.routes as workspace_routes import backend.data.block import backend.data.db import backend.data.graph @@ -52,6 +53,7 @@ from backend.util.exceptions import ( ) from backend.util.feature_flag import initialize_launchdarkly, shutdown_launchdarkly from backend.util.service import UnhealthyServiceError +from backend.util.workspace_storage import shutdown_workspace_storage from .external.fastapi_app import external_api from .features.analytics import router as analytics_router @@ -124,6 +126,11 @@ async def lifespan_context(app: fastapi.FastAPI): except Exception as e: logger.warning(f"Error shutting down cloud storage handler: {e}") + try: + await shutdown_workspace_storage() + except Exception as e: + logger.warning(f"Error shutting down workspace storage: {e}") + await backend.data.db.disconnect() @@ -315,6 +322,11 @@ app.include_router( tags=["v2", "chat"], prefix="/api/chat", ) +app.include_router( + workspace_routes.router, + tags=["workspace"], + prefix="/api/workspace", +) app.include_router( backend.api.features.oauth.router, tags=["oauth"], diff --git a/autogpt_platform/backend/backend/blocks/ai_image_customizer.py b/autogpt_platform/backend/backend/blocks/ai_image_customizer.py index 83178e924d..91be33a60e 100644 --- a/autogpt_platform/backend/backend/blocks/ai_image_customizer.py +++ b/autogpt_platform/backend/backend/blocks/ai_image_customizer.py @@ -13,6 +13,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import ( APIKeyCredentials, CredentialsField, @@ -117,11 +118,13 @@ class AIImageCustomizerBlock(Block): "credentials": TEST_CREDENTIALS_INPUT, }, test_output=[ - ("image_url", "https://replicate.delivery/generated-image.jpg"), + # Output will be a workspace ref or data URI depending on context + ("image_url", lambda x: x.startswith(("workspace://", "data:"))), ], test_mock={ + # Use data URI to avoid HTTP requests during tests "run_model": lambda *args, **kwargs: MediaFileType( - "https://replicate.delivery/generated-image.jpg" + "data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAABAAEDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigD//2Q==" ), }, test_credentials=TEST_CREDENTIALS, @@ -132,8 +135,7 @@ class AIImageCustomizerBlock(Block): input_data: Input, *, credentials: APIKeyCredentials, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: try: @@ -141,10 +143,9 @@ class AIImageCustomizerBlock(Block): processed_images = await asyncio.gather( *( store_media_file( - graph_exec_id=graph_exec_id, file=img, - user_id=user_id, - return_content=True, + execution_context=execution_context, + return_format="for_external_api", # Get content for Replicate API ) for img in input_data.images ) @@ -158,7 +159,14 @@ class AIImageCustomizerBlock(Block): aspect_ratio=input_data.aspect_ratio.value, output_format=input_data.output_format.value, ) - yield "image_url", result + + # Store the generated image to the user's workspace for persistence + stored_url = await store_media_file( + file=result, + execution_context=execution_context, + return_format="for_block_output", + ) + yield "image_url", stored_url except Exception as e: yield "error", str(e) diff --git a/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py b/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py index 8c7b6e6102..e40731cd97 100644 --- a/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py +++ b/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py @@ -6,6 +6,7 @@ from replicate.client import Client as ReplicateClient from replicate.helpers import FileOutput from backend.data.block import Block, BlockCategory, BlockSchemaInput, BlockSchemaOutput +from backend.data.execution import ExecutionContext from backend.data.model import ( APIKeyCredentials, CredentialsField, @@ -13,6 +14,8 @@ from backend.data.model import ( SchemaField, ) from backend.integrations.providers import ProviderName +from backend.util.file import store_media_file +from backend.util.type import MediaFileType class ImageSize(str, Enum): @@ -165,11 +168,13 @@ class AIImageGeneratorBlock(Block): test_output=[ ( "image_url", - "https://replicate.delivery/generated-image.webp", + # Test output is a data URI since we now store images + lambda x: x.startswith("data:image/"), ), ], test_mock={ - "_run_client": lambda *args, **kwargs: "https://replicate.delivery/generated-image.webp" + # Return a data URI directly so store_media_file doesn't need to download + "_run_client": lambda *args, **kwargs: "data:image/webp;base64,UklGRiQAAABXRUJQVlA4IBgAAAAwAQCdASoBAAEAAQAcJYgCdAEO" }, ) @@ -318,11 +323,24 @@ class AIImageGeneratorBlock(Block): style_text = style_map.get(style, "") return f"{style_text} of" if style_text else "" - async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs): + async def run( + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + execution_context: ExecutionContext, + **kwargs, + ): try: url = await self.generate_image(input_data, credentials) if url: - yield "image_url", url + # Store the generated image to the user's workspace/execution folder + stored_url = await store_media_file( + file=MediaFileType(url), + execution_context=execution_context, + return_format="for_block_output", + ) + yield "image_url", stored_url else: yield "error", "Image generation returned an empty result." except Exception as e: diff --git a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py index a9e96890d3..eb60843185 100644 --- a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py +++ b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py @@ -13,6 +13,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import ( APIKeyCredentials, CredentialsField, @@ -21,7 +22,9 @@ from backend.data.model import ( ) from backend.integrations.providers import ProviderName from backend.util.exceptions import BlockExecutionError +from backend.util.file import store_media_file from backend.util.request import Requests +from backend.util.type import MediaFileType TEST_CREDENTIALS = APIKeyCredentials( id="01234567-89ab-cdef-0123-456789abcdef", @@ -271,7 +274,10 @@ class AIShortformVideoCreatorBlock(Block): "voice": Voice.LILY, "video_style": VisualMediaType.STOCK_VIDEOS, }, - test_output=("video_url", "https://example.com/video.mp4"), + test_output=( + "video_url", + lambda x: x.startswith(("workspace://", "data:")), + ), test_mock={ "create_webhook": lambda *args, **kwargs: ( "test_uuid", @@ -280,15 +286,21 @@ class AIShortformVideoCreatorBlock(Block): "create_video": lambda *args, **kwargs: {"pid": "test_pid"}, "check_video_status": lambda *args, **kwargs: { "status": "ready", - "videoUrl": "https://example.com/video.mp4", + "videoUrl": "data:video/mp4;base64,AAAA", }, - "wait_for_video": lambda *args, **kwargs: "https://example.com/video.mp4", + # Use data URI to avoid HTTP requests during tests + "wait_for_video": lambda *args, **kwargs: "data:video/mp4;base64,AAAA", }, test_credentials=TEST_CREDENTIALS, ) async def run( - self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + execution_context: ExecutionContext, + **kwargs, ) -> BlockOutput: # Create a new Webhook.site URL webhook_token, webhook_url = await self.create_webhook() @@ -340,7 +352,13 @@ class AIShortformVideoCreatorBlock(Block): ) video_url = await self.wait_for_video(credentials.api_key, pid) logger.debug(f"Video ready: {video_url}") - yield "video_url", video_url + # Store the generated video to the user's workspace for persistence + stored_url = await store_media_file( + file=MediaFileType(video_url), + execution_context=execution_context, + return_format="for_block_output", + ) + yield "video_url", stored_url class AIAdMakerVideoCreatorBlock(Block): @@ -447,7 +465,10 @@ class AIAdMakerVideoCreatorBlock(Block): "https://cdn.revid.ai/uploads/1747076315114-image.png", ], }, - test_output=("video_url", "https://example.com/ad.mp4"), + test_output=( + "video_url", + lambda x: x.startswith(("workspace://", "data:")), + ), test_mock={ "create_webhook": lambda *args, **kwargs: ( "test_uuid", @@ -456,14 +477,21 @@ class AIAdMakerVideoCreatorBlock(Block): "create_video": lambda *args, **kwargs: {"pid": "test_pid"}, "check_video_status": lambda *args, **kwargs: { "status": "ready", - "videoUrl": "https://example.com/ad.mp4", + "videoUrl": "data:video/mp4;base64,AAAA", }, - "wait_for_video": lambda *args, **kwargs: "https://example.com/ad.mp4", + "wait_for_video": lambda *args, **kwargs: "data:video/mp4;base64,AAAA", }, test_credentials=TEST_CREDENTIALS, ) - async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs): + async def run( + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + execution_context: ExecutionContext, + **kwargs, + ): webhook_token, webhook_url = await self.create_webhook() payload = { @@ -531,7 +559,13 @@ class AIAdMakerVideoCreatorBlock(Block): raise RuntimeError("Failed to create video: No project ID returned") video_url = await self.wait_for_video(credentials.api_key, pid) - yield "video_url", video_url + # Store the generated video to the user's workspace for persistence + stored_url = await store_media_file( + file=MediaFileType(video_url), + execution_context=execution_context, + return_format="for_block_output", + ) + yield "video_url", stored_url class AIScreenshotToVideoAdBlock(Block): @@ -626,7 +660,10 @@ class AIScreenshotToVideoAdBlock(Block): "script": "Amazing numbers!", "screenshot_url": "https://cdn.revid.ai/uploads/1747080376028-image.png", }, - test_output=("video_url", "https://example.com/screenshot.mp4"), + test_output=( + "video_url", + lambda x: x.startswith(("workspace://", "data:")), + ), test_mock={ "create_webhook": lambda *args, **kwargs: ( "test_uuid", @@ -635,14 +672,21 @@ class AIScreenshotToVideoAdBlock(Block): "create_video": lambda *args, **kwargs: {"pid": "test_pid"}, "check_video_status": lambda *args, **kwargs: { "status": "ready", - "videoUrl": "https://example.com/screenshot.mp4", + "videoUrl": "data:video/mp4;base64,AAAA", }, - "wait_for_video": lambda *args, **kwargs: "https://example.com/screenshot.mp4", + "wait_for_video": lambda *args, **kwargs: "data:video/mp4;base64,AAAA", }, test_credentials=TEST_CREDENTIALS, ) - async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs): + async def run( + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + execution_context: ExecutionContext, + **kwargs, + ): webhook_token, webhook_url = await self.create_webhook() payload = { @@ -710,4 +754,10 @@ class AIScreenshotToVideoAdBlock(Block): raise RuntimeError("Failed to create video: No project ID returned") video_url = await self.wait_for_video(credentials.api_key, pid) - yield "video_url", video_url + # Store the generated video to the user's workspace for persistence + stored_url = await store_media_file( + file=MediaFileType(video_url), + execution_context=execution_context, + return_format="for_block_output", + ) + yield "video_url", stored_url diff --git a/autogpt_platform/backend/backend/blocks/bannerbear/text_overlay.py b/autogpt_platform/backend/backend/blocks/bannerbear/text_overlay.py index 16d46c0d99..62aaf63d88 100644 --- a/autogpt_platform/backend/backend/blocks/bannerbear/text_overlay.py +++ b/autogpt_platform/backend/backend/blocks/bannerbear/text_overlay.py @@ -6,6 +6,7 @@ if TYPE_CHECKING: from pydantic import SecretStr +from backend.data.execution import ExecutionContext from backend.sdk import ( APIKeyCredentials, Block, @@ -17,6 +18,8 @@ from backend.sdk import ( Requests, SchemaField, ) +from backend.util.file import store_media_file +from backend.util.type import MediaFileType from ._config import bannerbear @@ -135,15 +138,17 @@ class BannerbearTextOverlayBlock(Block): }, test_output=[ ("success", True), - ("image_url", "https://cdn.bannerbear.com/test-image.jpg"), + # Output will be a workspace ref or data URI depending on context + ("image_url", lambda x: x.startswith(("workspace://", "data:"))), ("uid", "test-uid-123"), ("status", "completed"), ], test_mock={ + # Use data URI to avoid HTTP requests during tests "_make_api_request": lambda *args, **kwargs: { "uid": "test-uid-123", "status": "completed", - "image_url": "https://cdn.bannerbear.com/test-image.jpg", + "image_url": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/wAALCAABAAEBAREA/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/9oACAEBAAA/APn+v//Z", } }, test_credentials=TEST_CREDENTIALS, @@ -177,7 +182,12 @@ class BannerbearTextOverlayBlock(Block): raise Exception(error_msg) async def run( - self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + execution_context: ExecutionContext, + **kwargs, ) -> BlockOutput: # Build the modifications array modifications = [] @@ -234,6 +244,18 @@ class BannerbearTextOverlayBlock(Block): # Synchronous request - image should be ready yield "success", True - yield "image_url", data.get("image_url", "") + + # Store the generated image to workspace for persistence + image_url = data.get("image_url", "") + if image_url: + stored_url = await store_media_file( + file=MediaFileType(image_url), + execution_context=execution_context, + return_format="for_block_output", + ) + yield "image_url", stored_url + else: + yield "image_url", "" + yield "uid", data.get("uid", "") yield "status", data.get("status", "completed") diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py index a9c77e2b93..95193b3feb 100644 --- a/autogpt_platform/backend/backend/blocks/basic.py +++ b/autogpt_platform/backend/backend/blocks/basic.py @@ -9,6 +9,7 @@ from backend.data.block import ( BlockSchemaOutput, BlockType, ) +from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.file import store_media_file from backend.util.type import MediaFileType, convert @@ -17,10 +18,10 @@ from backend.util.type import MediaFileType, convert class FileStoreBlock(Block): class Input(BlockSchemaInput): file_in: MediaFileType = SchemaField( - description="The file to store in the temporary directory, it can be a URL, data URI, or local path." + description="The file to download and store. Can be a URL (https://...), data URI, or local path." ) base_64: bool = SchemaField( - description="Whether produce an output in base64 format (not recommended, you can pass the string path just fine accross blocks).", + description="Whether to produce output in base64 format (not recommended, you can pass the file reference across blocks).", default=False, advanced=True, title="Produce Base64 Output", @@ -28,13 +29,18 @@ class FileStoreBlock(Block): class Output(BlockSchemaOutput): file_out: MediaFileType = SchemaField( - description="The relative path to the stored file in the temporary directory." + description="Reference to the stored file. In CoPilot: workspace:// URI (visible in list_workspace_files). In graphs: data URI for passing to other blocks." ) def __init__(self): super().__init__( id="cbb50872-625b-42f0-8203-a2ae78242d8a", - description="Stores the input file in the temporary directory.", + description=( + "Downloads and stores a file from a URL, data URI, or local path. " + "Use this to fetch images, documents, or other files for processing. " + "In CoPilot: saves to workspace (use list_workspace_files to see it). " + "In graphs: outputs a data URI to pass to other blocks." + ), categories={BlockCategory.BASIC, BlockCategory.MULTIMEDIA}, input_schema=FileStoreBlock.Input, output_schema=FileStoreBlock.Output, @@ -45,15 +51,18 @@ class FileStoreBlock(Block): self, input_data: Input, *, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: + # Determine return format based on user preference + # for_external_api: always returns data URI (base64) - honors "Produce Base64 Output" + # for_block_output: smart format - workspace:// in CoPilot, data URI in graphs + return_format = "for_external_api" if input_data.base_64 else "for_block_output" + yield "file_out", await store_media_file( - graph_exec_id=graph_exec_id, file=input_data.file_in, - user_id=user_id, - return_content=input_data.base_64, + execution_context=execution_context, + return_format=return_format, ) diff --git a/autogpt_platform/backend/backend/blocks/discord/bot_blocks.py b/autogpt_platform/backend/backend/blocks/discord/bot_blocks.py index 5ecd730f47..4438af1955 100644 --- a/autogpt_platform/backend/backend/blocks/discord/bot_blocks.py +++ b/autogpt_platform/backend/backend/blocks/discord/bot_blocks.py @@ -15,6 +15,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import APIKeyCredentials, SchemaField from backend.util.file import store_media_file from backend.util.request import Requests @@ -666,8 +667,7 @@ class SendDiscordFileBlock(Block): file: MediaFileType, filename: str, message_content: str, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, ) -> dict: intents = discord.Intents.default() intents.guilds = True @@ -731,10 +731,9 @@ class SendDiscordFileBlock(Block): # Local file path - read from stored media file # This would be a path from a previous block's output stored_file = await store_media_file( - graph_exec_id=graph_exec_id, file=file, - user_id=user_id, - return_content=True, # Get as data URI + execution_context=execution_context, + return_format="for_external_api", # Get content to send to Discord ) # Now process as data URI header, encoded = stored_file.split(",", 1) @@ -781,8 +780,7 @@ class SendDiscordFileBlock(Block): input_data: Input, *, credentials: APIKeyCredentials, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: try: @@ -793,8 +791,7 @@ class SendDiscordFileBlock(Block): file=input_data.file, filename=input_data.filename, message_content=input_data.message_content, - graph_exec_id=graph_exec_id, - user_id=user_id, + execution_context=execution_context, ) yield "status", result.get("status", "Unknown error") diff --git a/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py b/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py index 2a71548dcc..c2079ef159 100644 --- a/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py +++ b/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py @@ -17,8 +17,11 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import SchemaField +from backend.util.file import store_media_file from backend.util.request import ClientResponseError, Requests +from backend.util.type import MediaFileType logger = logging.getLogger(__name__) @@ -64,9 +67,13 @@ class AIVideoGeneratorBlock(Block): "credentials": TEST_CREDENTIALS_INPUT, }, test_credentials=TEST_CREDENTIALS, - test_output=[("video_url", "https://fal.media/files/example/video.mp4")], + test_output=[ + # Output will be a workspace ref or data URI depending on context + ("video_url", lambda x: x.startswith(("workspace://", "data:"))), + ], test_mock={ - "generate_video": lambda *args, **kwargs: "https://fal.media/files/example/video.mp4" + # Use data URI to avoid HTTP requests during tests + "generate_video": lambda *args, **kwargs: "data:video/mp4;base64,AAAA" }, ) @@ -208,11 +215,22 @@ class AIVideoGeneratorBlock(Block): raise RuntimeError(f"API request failed: {str(e)}") async def run( - self, input_data: Input, *, credentials: FalCredentials, **kwargs + self, + input_data: Input, + *, + credentials: FalCredentials, + execution_context: ExecutionContext, + **kwargs, ) -> BlockOutput: try: video_url = await self.generate_video(input_data, credentials) - yield "video_url", video_url + # Store the generated video to the user's workspace for persistence + stored_url = await store_media_file( + file=MediaFileType(video_url), + execution_context=execution_context, + return_format="for_block_output", + ) + yield "video_url", stored_url except Exception as e: error_message = str(e) yield "error", error_message diff --git a/autogpt_platform/backend/backend/blocks/flux_kontext.py b/autogpt_platform/backend/backend/blocks/flux_kontext.py index dd8375c4ce..d56baa6d92 100644 --- a/autogpt_platform/backend/backend/blocks/flux_kontext.py +++ b/autogpt_platform/backend/backend/blocks/flux_kontext.py @@ -12,6 +12,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import ( APIKeyCredentials, CredentialsField, @@ -121,10 +122,12 @@ class AIImageEditorBlock(Block): "credentials": TEST_CREDENTIALS_INPUT, }, test_output=[ - ("output_image", "https://replicate.com/output/edited-image.png"), + # Output will be a workspace ref or data URI depending on context + ("output_image", lambda x: x.startswith(("workspace://", "data:"))), ], test_mock={ - "run_model": lambda *args, **kwargs: "https://replicate.com/output/edited-image.png", + # Use data URI to avoid HTTP requests during tests + "run_model": lambda *args, **kwargs: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==", }, test_credentials=TEST_CREDENTIALS, ) @@ -134,8 +137,7 @@ class AIImageEditorBlock(Block): input_data: Input, *, credentials: APIKeyCredentials, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: result = await self.run_model( @@ -144,20 +146,25 @@ class AIImageEditorBlock(Block): prompt=input_data.prompt, input_image_b64=( await store_media_file( - graph_exec_id=graph_exec_id, file=input_data.input_image, - user_id=user_id, - return_content=True, + execution_context=execution_context, + return_format="for_external_api", # Get content for Replicate API ) if input_data.input_image else None ), aspect_ratio=input_data.aspect_ratio.value, seed=input_data.seed, - user_id=user_id, - graph_exec_id=graph_exec_id, + user_id=execution_context.user_id or "", + graph_exec_id=execution_context.graph_exec_id or "", ) - yield "output_image", result + # Store the generated image to the user's workspace for persistence + stored_url = await store_media_file( + file=result, + execution_context=execution_context, + return_format="for_block_output", + ) + yield "output_image", stored_url async def run_model( self, diff --git a/autogpt_platform/backend/backend/blocks/google/gmail.py b/autogpt_platform/backend/backend/blocks/google/gmail.py index d1b3ecd4bf..2040cabe3f 100644 --- a/autogpt_platform/backend/backend/blocks/google/gmail.py +++ b/autogpt_platform/backend/backend/blocks/google/gmail.py @@ -21,6 +21,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.file import MediaFileType, get_exec_file_path, store_media_file from backend.util.settings import Settings @@ -95,8 +96,7 @@ def _make_mime_text( async def create_mime_message( input_data, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, ) -> str: """Create a MIME message with attachments and return base64-encoded raw message.""" @@ -117,12 +117,12 @@ async def create_mime_message( if input_data.attachments: for attach in input_data.attachments: local_path = await store_media_file( - user_id=user_id, - graph_exec_id=graph_exec_id, file=attach, - return_content=False, + execution_context=execution_context, + return_format="for_local_processing", ) - abs_path = get_exec_file_path(graph_exec_id, local_path) + assert execution_context.graph_exec_id # Validated by store_media_file + abs_path = get_exec_file_path(execution_context.graph_exec_id, local_path) part = MIMEBase("application", "octet-stream") with open(abs_path, "rb") as f: part.set_payload(f.read()) @@ -582,27 +582,25 @@ class GmailSendBlock(GmailBase): input_data: Input, *, credentials: GoogleCredentials, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: service = self._build_service(credentials, **kwargs) result = await self._send_email( service, input_data, - graph_exec_id, - user_id, + execution_context, ) yield "result", result async def _send_email( - self, service, input_data: Input, graph_exec_id: str, user_id: str + self, service, input_data: Input, execution_context: ExecutionContext ) -> dict: if not input_data.to or not input_data.subject or not input_data.body: raise ValueError( "At least one recipient, subject, and body are required for sending an email" ) - raw_message = await create_mime_message(input_data, graph_exec_id, user_id) + raw_message = await create_mime_message(input_data, execution_context) sent_message = await asyncio.to_thread( lambda: service.users() .messages() @@ -692,30 +690,28 @@ class GmailCreateDraftBlock(GmailBase): input_data: Input, *, credentials: GoogleCredentials, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: service = self._build_service(credentials, **kwargs) result = await self._create_draft( service, input_data, - graph_exec_id, - user_id, + execution_context, ) yield "result", GmailDraftResult( id=result["id"], message_id=result["message"]["id"], status="draft_created" ) async def _create_draft( - self, service, input_data: Input, graph_exec_id: str, user_id: str + self, service, input_data: Input, execution_context: ExecutionContext ) -> dict: if not input_data.to or not input_data.subject: raise ValueError( "At least one recipient and subject are required for creating a draft" ) - raw_message = await create_mime_message(input_data, graph_exec_id, user_id) + raw_message = await create_mime_message(input_data, execution_context) draft = await asyncio.to_thread( lambda: service.users() .drafts() @@ -1100,7 +1096,7 @@ class GmailGetThreadBlock(GmailBase): async def _build_reply_message( - service, input_data, graph_exec_id: str, user_id: str + service, input_data, execution_context: ExecutionContext ) -> tuple[str, str]: """ Builds a reply MIME message for Gmail threads. @@ -1190,12 +1186,12 @@ async def _build_reply_message( # Handle attachments for attach in input_data.attachments: local_path = await store_media_file( - user_id=user_id, - graph_exec_id=graph_exec_id, file=attach, - return_content=False, + execution_context=execution_context, + return_format="for_local_processing", ) - abs_path = get_exec_file_path(graph_exec_id, local_path) + assert execution_context.graph_exec_id # Validated by store_media_file + abs_path = get_exec_file_path(execution_context.graph_exec_id, local_path) part = MIMEBase("application", "octet-stream") with open(abs_path, "rb") as f: part.set_payload(f.read()) @@ -1311,16 +1307,14 @@ class GmailReplyBlock(GmailBase): input_data: Input, *, credentials: GoogleCredentials, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: service = self._build_service(credentials, **kwargs) message = await self._reply( service, input_data, - graph_exec_id, - user_id, + execution_context, ) yield "messageId", message["id"] yield "threadId", message.get("threadId", input_data.threadId) @@ -1343,11 +1337,11 @@ class GmailReplyBlock(GmailBase): yield "email", email async def _reply( - self, service, input_data: Input, graph_exec_id: str, user_id: str + self, service, input_data: Input, execution_context: ExecutionContext ) -> dict: # Build the reply message using the shared helper raw, thread_id = await _build_reply_message( - service, input_data, graph_exec_id, user_id + service, input_data, execution_context ) # Send the message @@ -1441,16 +1435,14 @@ class GmailDraftReplyBlock(GmailBase): input_data: Input, *, credentials: GoogleCredentials, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: service = self._build_service(credentials, **kwargs) draft = await self._create_draft_reply( service, input_data, - graph_exec_id, - user_id, + execution_context, ) yield "draftId", draft["id"] yield "messageId", draft["message"]["id"] @@ -1458,11 +1450,11 @@ class GmailDraftReplyBlock(GmailBase): yield "status", "draft_created" async def _create_draft_reply( - self, service, input_data: Input, graph_exec_id: str, user_id: str + self, service, input_data: Input, execution_context: ExecutionContext ) -> dict: # Build the reply message using the shared helper raw, thread_id = await _build_reply_message( - service, input_data, graph_exec_id, user_id + service, input_data, execution_context ) # Create draft with proper thread association @@ -1629,23 +1621,21 @@ class GmailForwardBlock(GmailBase): input_data: Input, *, credentials: GoogleCredentials, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: service = self._build_service(credentials, **kwargs) result = await self._forward_message( service, input_data, - graph_exec_id, - user_id, + execution_context, ) yield "messageId", result["id"] yield "threadId", result.get("threadId", "") yield "status", "forwarded" async def _forward_message( - self, service, input_data: Input, graph_exec_id: str, user_id: str + self, service, input_data: Input, execution_context: ExecutionContext ) -> dict: if not input_data.to: raise ValueError("At least one recipient is required for forwarding") @@ -1727,12 +1717,12 @@ To: {original_to} # Add any additional attachments for attach in input_data.additionalAttachments: local_path = await store_media_file( - user_id=user_id, - graph_exec_id=graph_exec_id, file=attach, - return_content=False, + execution_context=execution_context, + return_format="for_local_processing", ) - abs_path = get_exec_file_path(graph_exec_id, local_path) + assert execution_context.graph_exec_id # Validated by store_media_file + abs_path = get_exec_file_path(execution_context.graph_exec_id, local_path) part = MIMEBase("application", "octet-stream") with open(abs_path, "rb") as f: part.set_payload(f.read()) diff --git a/autogpt_platform/backend/backend/blocks/http.py b/autogpt_platform/backend/backend/blocks/http.py index 9b27a3b129..77e7fe243f 100644 --- a/autogpt_platform/backend/backend/blocks/http.py +++ b/autogpt_platform/backend/backend/blocks/http.py @@ -15,6 +15,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import ( CredentialsField, CredentialsMetaInput, @@ -116,10 +117,9 @@ class SendWebRequestBlock(Block): @staticmethod async def _prepare_files( - graph_exec_id: str, + execution_context: ExecutionContext, files_name: str, files: list[MediaFileType], - user_id: str, ) -> list[tuple[str, tuple[str, BytesIO, str]]]: """ Prepare files for the request by storing them and reading their content. @@ -127,11 +127,16 @@ class SendWebRequestBlock(Block): (files_name, (filename, BytesIO, mime_type)) """ files_payload: list[tuple[str, tuple[str, BytesIO, str]]] = [] + graph_exec_id = execution_context.graph_exec_id + if graph_exec_id is None: + raise ValueError("graph_exec_id is required for file operations") for media in files: # Normalise to a list so we can repeat the same key rel_path = await store_media_file( - graph_exec_id, media, user_id, return_content=False + file=media, + execution_context=execution_context, + return_format="for_local_processing", ) abs_path = get_exec_file_path(graph_exec_id, rel_path) async with aiofiles.open(abs_path, "rb") as f: @@ -143,7 +148,7 @@ class SendWebRequestBlock(Block): return files_payload async def run( - self, input_data: Input, *, graph_exec_id: str, user_id: str, **kwargs + self, input_data: Input, *, execution_context: ExecutionContext, **kwargs ) -> BlockOutput: # ─── Parse/normalise body ──────────────────────────────────── body = input_data.body @@ -174,7 +179,7 @@ class SendWebRequestBlock(Block): files_payload: list[tuple[str, tuple[str, BytesIO, str]]] = [] if use_files: files_payload = await self._prepare_files( - graph_exec_id, input_data.files_name, input_data.files, user_id + execution_context, input_data.files_name, input_data.files ) # Enforce body format rules @@ -238,9 +243,8 @@ class SendAuthenticatedWebRequestBlock(SendWebRequestBlock): self, input_data: Input, *, - graph_exec_id: str, + execution_context: ExecutionContext, credentials: HostScopedCredentials, - user_id: str, **kwargs, ) -> BlockOutput: # Create SendWebRequestBlock.Input from our input (removing credentials field) @@ -271,6 +275,6 @@ class SendAuthenticatedWebRequestBlock(SendWebRequestBlock): # Use parent class run method async for output_name, output_data in super().run( - base_input, graph_exec_id=graph_exec_id, user_id=user_id, **kwargs + base_input, execution_context=execution_context, **kwargs ): yield output_name, output_data diff --git a/autogpt_platform/backend/backend/blocks/io.py b/autogpt_platform/backend/backend/blocks/io.py index 6f8e62e339..a9c3859490 100644 --- a/autogpt_platform/backend/backend/blocks/io.py +++ b/autogpt_platform/backend/backend/blocks/io.py @@ -12,6 +12,7 @@ from backend.data.block import ( BlockSchemaInput, BlockType, ) +from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.file import store_media_file from backend.util.mock import MockObject @@ -462,18 +463,21 @@ class AgentFileInputBlock(AgentInputBlock): self, input_data: Input, *, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: if not input_data.value: return + # Determine return format based on user preference + # for_external_api: always returns data URI (base64) - honors "Produce Base64 Output" + # for_block_output: smart format - workspace:// in CoPilot, data URI in graphs + return_format = "for_external_api" if input_data.base_64 else "for_block_output" + yield "result", await store_media_file( - graph_exec_id=graph_exec_id, file=input_data.value, - user_id=user_id, - return_content=input_data.base_64, + execution_context=execution_context, + return_format=return_format, ) diff --git a/autogpt_platform/backend/backend/blocks/media.py b/autogpt_platform/backend/backend/blocks/media.py index c8d4b4768f..a8d145bc64 100644 --- a/autogpt_platform/backend/backend/blocks/media.py +++ b/autogpt_platform/backend/backend/blocks/media.py @@ -1,6 +1,6 @@ import os import tempfile -from typing import Literal, Optional +from typing import Optional from moviepy.audio.io.AudioFileClip import AudioFileClip from moviepy.video.fx.Loop import Loop @@ -13,6 +13,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.file import MediaFileType, get_exec_file_path, store_media_file @@ -46,18 +47,19 @@ class MediaDurationBlock(Block): self, input_data: Input, *, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: # 1) Store the input media locally local_media_path = await store_media_file( - graph_exec_id=graph_exec_id, file=input_data.media_in, - user_id=user_id, - return_content=False, + execution_context=execution_context, + return_format="for_local_processing", + ) + assert execution_context.graph_exec_id is not None + media_abspath = get_exec_file_path( + execution_context.graph_exec_id, local_media_path ) - media_abspath = get_exec_file_path(graph_exec_id, local_media_path) # 2) Load the clip if input_data.is_video: @@ -88,10 +90,6 @@ class LoopVideoBlock(Block): default=None, ge=1, ) - output_return_type: Literal["file_path", "data_uri"] = SchemaField( - description="How to return the output video. Either a relative path or base64 data URI.", - default="file_path", - ) class Output(BlockSchemaOutput): video_out: str = SchemaField( @@ -111,17 +109,19 @@ class LoopVideoBlock(Block): self, input_data: Input, *, - node_exec_id: str, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: + assert execution_context.graph_exec_id is not None + assert execution_context.node_exec_id is not None + graph_exec_id = execution_context.graph_exec_id + node_exec_id = execution_context.node_exec_id + # 1) Store the input video locally local_video_path = await store_media_file( - graph_exec_id=graph_exec_id, file=input_data.video_in, - user_id=user_id, - return_content=False, + execution_context=execution_context, + return_format="for_local_processing", ) input_abspath = get_exec_file_path(graph_exec_id, local_video_path) @@ -149,12 +149,11 @@ class LoopVideoBlock(Block): looped_clip = looped_clip.with_audio(clip.audio) looped_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac") - # Return as data URI + # Return output - for_block_output returns workspace:// if available, else data URI video_out = await store_media_file( - graph_exec_id=graph_exec_id, file=output_filename, - user_id=user_id, - return_content=input_data.output_return_type == "data_uri", + execution_context=execution_context, + return_format="for_block_output", ) yield "video_out", video_out @@ -177,10 +176,6 @@ class AddAudioToVideoBlock(Block): description="Volume scale for the newly attached audio track (1.0 = original).", default=1.0, ) - output_return_type: Literal["file_path", "data_uri"] = SchemaField( - description="Return the final output as a relative path or base64 data URI.", - default="file_path", - ) class Output(BlockSchemaOutput): video_out: MediaFileType = SchemaField( @@ -200,23 +195,24 @@ class AddAudioToVideoBlock(Block): self, input_data: Input, *, - node_exec_id: str, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: + assert execution_context.graph_exec_id is not None + assert execution_context.node_exec_id is not None + graph_exec_id = execution_context.graph_exec_id + node_exec_id = execution_context.node_exec_id + # 1) Store the inputs locally local_video_path = await store_media_file( - graph_exec_id=graph_exec_id, file=input_data.video_in, - user_id=user_id, - return_content=False, + execution_context=execution_context, + return_format="for_local_processing", ) local_audio_path = await store_media_file( - graph_exec_id=graph_exec_id, file=input_data.audio_in, - user_id=user_id, - return_content=False, + execution_context=execution_context, + return_format="for_local_processing", ) abs_temp_dir = os.path.join(tempfile.gettempdir(), "exec_file", graph_exec_id) @@ -240,12 +236,11 @@ class AddAudioToVideoBlock(Block): output_abspath = os.path.join(abs_temp_dir, output_filename) final_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac") - # 5) Return either path or data URI + # 5) Return output - for_block_output returns workspace:// if available, else data URI video_out = await store_media_file( - graph_exec_id=graph_exec_id, file=output_filename, - user_id=user_id, - return_content=input_data.output_return_type == "data_uri", + execution_context=execution_context, + return_format="for_block_output", ) yield "video_out", video_out diff --git a/autogpt_platform/backend/backend/blocks/screenshotone.py b/autogpt_platform/backend/backend/blocks/screenshotone.py index 1f8947376b..ee998f8da2 100644 --- a/autogpt_platform/backend/backend/blocks/screenshotone.py +++ b/autogpt_platform/backend/backend/blocks/screenshotone.py @@ -11,6 +11,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import ( APIKeyCredentials, CredentialsField, @@ -112,8 +113,7 @@ class ScreenshotWebPageBlock(Block): @staticmethod async def take_screenshot( credentials: APIKeyCredentials, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, url: str, viewport_width: int, viewport_height: int, @@ -155,12 +155,11 @@ class ScreenshotWebPageBlock(Block): return { "image": await store_media_file( - graph_exec_id=graph_exec_id, file=MediaFileType( f"data:image/{format.value};base64,{b64encode(content).decode('utf-8')}" ), - user_id=user_id, - return_content=True, + execution_context=execution_context, + return_format="for_block_output", ) } @@ -169,15 +168,13 @@ class ScreenshotWebPageBlock(Block): input_data: Input, *, credentials: APIKeyCredentials, - graph_exec_id: str, - user_id: str, + execution_context: ExecutionContext, **kwargs, ) -> BlockOutput: try: screenshot_data = await self.take_screenshot( credentials=credentials, - graph_exec_id=graph_exec_id, - user_id=user_id, + execution_context=execution_context, url=input_data.url, viewport_width=input_data.viewport_width, viewport_height=input_data.viewport_height, diff --git a/autogpt_platform/backend/backend/blocks/spreadsheet.py b/autogpt_platform/backend/backend/blocks/spreadsheet.py index 211aac23f4..a13f9e2f6d 100644 --- a/autogpt_platform/backend/backend/blocks/spreadsheet.py +++ b/autogpt_platform/backend/backend/blocks/spreadsheet.py @@ -7,6 +7,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import ContributorDetails, SchemaField from backend.util.file import get_exec_file_path, store_media_file from backend.util.type import MediaFileType @@ -98,7 +99,7 @@ class ReadSpreadsheetBlock(Block): ) async def run( - self, input_data: Input, *, graph_exec_id: str, user_id: str, **_kwargs + self, input_data: Input, *, execution_context: ExecutionContext, **_kwargs ) -> BlockOutput: import csv from io import StringIO @@ -106,14 +107,16 @@ class ReadSpreadsheetBlock(Block): # Determine data source - prefer file_input if provided, otherwise use contents if input_data.file_input: stored_file_path = await store_media_file( - user_id=user_id, - graph_exec_id=graph_exec_id, file=input_data.file_input, - return_content=False, + execution_context=execution_context, + return_format="for_local_processing", ) # Get full file path - file_path = get_exec_file_path(graph_exec_id, stored_file_path) + assert execution_context.graph_exec_id # Validated by store_media_file + file_path = get_exec_file_path( + execution_context.graph_exec_id, stored_file_path + ) if not Path(file_path).exists(): raise ValueError(f"File does not exist: {file_path}") diff --git a/autogpt_platform/backend/backend/blocks/talking_head.py b/autogpt_platform/backend/backend/blocks/talking_head.py index 7a466bec7e..e01e3d4023 100644 --- a/autogpt_platform/backend/backend/blocks/talking_head.py +++ b/autogpt_platform/backend/backend/blocks/talking_head.py @@ -10,6 +10,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import ( APIKeyCredentials, CredentialsField, @@ -17,7 +18,9 @@ from backend.data.model import ( SchemaField, ) from backend.integrations.providers import ProviderName +from backend.util.file import store_media_file from backend.util.request import Requests +from backend.util.type import MediaFileType TEST_CREDENTIALS = APIKeyCredentials( id="01234567-89ab-cdef-0123-456789abcdef", @@ -102,7 +105,7 @@ class CreateTalkingAvatarVideoBlock(Block): test_output=[ ( "video_url", - "https://d-id.com/api/clips/abcd1234-5678-efgh-ijkl-mnopqrstuvwx/video", + lambda x: x.startswith(("workspace://", "data:")), ), ], test_mock={ @@ -110,9 +113,10 @@ class CreateTalkingAvatarVideoBlock(Block): "id": "abcd1234-5678-efgh-ijkl-mnopqrstuvwx", "status": "created", }, + # Use data URI to avoid HTTP requests during tests "get_clip_status": lambda *args, **kwargs: { "status": "done", - "result_url": "https://d-id.com/api/clips/abcd1234-5678-efgh-ijkl-mnopqrstuvwx/video", + "result_url": "data:video/mp4;base64,AAAA", }, }, test_credentials=TEST_CREDENTIALS, @@ -138,7 +142,12 @@ class CreateTalkingAvatarVideoBlock(Block): return response.json() async def run( - self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + execution_context: ExecutionContext, + **kwargs, ) -> BlockOutput: # Create the clip payload = { @@ -165,7 +174,14 @@ class CreateTalkingAvatarVideoBlock(Block): for _ in range(input_data.max_polling_attempts): status_response = await self.get_clip_status(credentials.api_key, clip_id) if status_response["status"] == "done": - yield "video_url", status_response["result_url"] + # Store the generated video to the user's workspace for persistence + video_url = status_response["result_url"] + stored_url = await store_media_file( + file=MediaFileType(video_url), + execution_context=execution_context, + return_format="for_block_output", + ) + yield "video_url", stored_url return elif status_response["status"] == "error": raise RuntimeError( diff --git a/autogpt_platform/backend/backend/blocks/test/test_blocks_dos_vulnerability.py b/autogpt_platform/backend/backend/blocks/test/test_blocks_dos_vulnerability.py index 389bb5c636..e2e44b194c 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_blocks_dos_vulnerability.py +++ b/autogpt_platform/backend/backend/blocks/test/test_blocks_dos_vulnerability.py @@ -12,6 +12,7 @@ from backend.blocks.iteration import StepThroughItemsBlock from backend.blocks.llm import AITextSummarizerBlock from backend.blocks.text import ExtractTextInformationBlock from backend.blocks.xml_parser import XMLParserBlock +from backend.data.execution import ExecutionContext from backend.util.file import store_media_file from backend.util.type import MediaFileType @@ -233,9 +234,12 @@ class TestStoreMediaFileSecurity: with pytest.raises(ValueError, match="File too large"): await store_media_file( - graph_exec_id="test", file=MediaFileType(large_data_uri), - user_id="test_user", + execution_context=ExecutionContext( + user_id="test_user", + graph_exec_id="test", + ), + return_format="for_local_processing", ) @patch("backend.util.file.Path") @@ -270,9 +274,12 @@ class TestStoreMediaFileSecurity: # Should raise an error when directory size exceeds limit with pytest.raises(ValueError, match="Disk usage limit exceeded"): await store_media_file( - graph_exec_id="test", file=MediaFileType( "data:text/plain;base64,dGVzdA==" ), # Small test file - user_id="test_user", + execution_context=ExecutionContext( + user_id="test_user", + graph_exec_id="test", + ), + return_format="for_local_processing", ) diff --git a/autogpt_platform/backend/backend/blocks/test/test_http.py b/autogpt_platform/backend/backend/blocks/test/test_http.py index bdc30f3ecf..e01b8e2c5b 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_http.py +++ b/autogpt_platform/backend/backend/blocks/test/test_http.py @@ -11,10 +11,22 @@ from backend.blocks.http import ( HttpMethod, SendAuthenticatedWebRequestBlock, ) +from backend.data.execution import ExecutionContext from backend.data.model import HostScopedCredentials from backend.util.request import Response +def make_test_context( + graph_exec_id: str = "test-exec-id", + user_id: str = "test-user-id", +) -> ExecutionContext: + """Helper to create test ExecutionContext.""" + return ExecutionContext( + user_id=user_id, + graph_exec_id=graph_exec_id, + ) + + class TestHttpBlockWithHostScopedCredentials: """Test suite for HTTP block integration with HostScopedCredentials.""" @@ -105,8 +117,7 @@ class TestHttpBlockWithHostScopedCredentials: async for output_name, output_data in http_block.run( input_data, credentials=exact_match_credentials, - graph_exec_id="test-exec-id", - user_id="test-user-id", + execution_context=make_test_context(), ): result.append((output_name, output_data)) @@ -161,8 +172,7 @@ class TestHttpBlockWithHostScopedCredentials: async for output_name, output_data in http_block.run( input_data, credentials=wildcard_credentials, - graph_exec_id="test-exec-id", - user_id="test-user-id", + execution_context=make_test_context(), ): result.append((output_name, output_data)) @@ -208,8 +218,7 @@ class TestHttpBlockWithHostScopedCredentials: async for output_name, output_data in http_block.run( input_data, credentials=non_matching_credentials, - graph_exec_id="test-exec-id", - user_id="test-user-id", + execution_context=make_test_context(), ): result.append((output_name, output_data)) @@ -258,8 +267,7 @@ class TestHttpBlockWithHostScopedCredentials: async for output_name, output_data in http_block.run( input_data, credentials=exact_match_credentials, - graph_exec_id="test-exec-id", - user_id="test-user-id", + execution_context=make_test_context(), ): result.append((output_name, output_data)) @@ -318,8 +326,7 @@ class TestHttpBlockWithHostScopedCredentials: async for output_name, output_data in http_block.run( input_data, credentials=auto_discovered_creds, # Execution manager found these - graph_exec_id="test-exec-id", - user_id="test-user-id", + execution_context=make_test_context(), ): result.append((output_name, output_data)) @@ -382,8 +389,7 @@ class TestHttpBlockWithHostScopedCredentials: async for output_name, output_data in http_block.run( input_data, credentials=multi_header_creds, - graph_exec_id="test-exec-id", - user_id="test-user-id", + execution_context=make_test_context(), ): result.append((output_name, output_data)) @@ -471,8 +477,7 @@ class TestHttpBlockWithHostScopedCredentials: async for output_name, output_data in http_block.run( input_data, credentials=test_creds, - graph_exec_id="test-exec-id", - user_id="test-user-id", + execution_context=make_test_context(), ): result.append((output_name, output_data)) diff --git a/autogpt_platform/backend/backend/blocks/text.py b/autogpt_platform/backend/backend/blocks/text.py index 5e58e27101..359e22a84f 100644 --- a/autogpt_platform/backend/backend/blocks/text.py +++ b/autogpt_platform/backend/backend/blocks/text.py @@ -11,6 +11,7 @@ from backend.data.block import ( BlockSchemaInput, BlockSchemaOutput, ) +from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util import json, text from backend.util.file import get_exec_file_path, store_media_file @@ -444,18 +445,21 @@ class FileReadBlock(Block): ) async def run( - self, input_data: Input, *, graph_exec_id: str, user_id: str, **_kwargs + self, input_data: Input, *, execution_context: ExecutionContext, **_kwargs ) -> BlockOutput: # Store the media file properly (handles URLs, data URIs, etc.) stored_file_path = await store_media_file( - user_id=user_id, - graph_exec_id=graph_exec_id, file=input_data.file_input, - return_content=False, + execution_context=execution_context, + return_format="for_local_processing", ) - # Get full file path - file_path = get_exec_file_path(graph_exec_id, stored_file_path) + # Get full file path (graph_exec_id validated by store_media_file above) + if not execution_context.graph_exec_id: + raise ValueError("execution_context.graph_exec_id is required") + file_path = get_exec_file_path( + execution_context.graph_exec_id, stored_file_path + ) if not Path(file_path).exists(): raise ValueError(f"File does not exist: {file_path}") diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index 3c1fd25c51..afb8c70538 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -83,12 +83,29 @@ class ExecutionContext(BaseModel): model_config = {"extra": "ignore"} + # Execution identity + user_id: Optional[str] = None + graph_id: Optional[str] = None + graph_exec_id: Optional[str] = None + graph_version: Optional[int] = None + node_id: Optional[str] = None + node_exec_id: Optional[str] = None + + # Safety settings human_in_the_loop_safe_mode: bool = True sensitive_action_safe_mode: bool = False + + # User settings user_timezone: str = "UTC" + + # Execution hierarchy root_execution_id: Optional[str] = None parent_execution_id: Optional[str] = None + # Workspace + workspace_id: Optional[str] = None + session_id: Optional[str] = None + # -------------------------- Models -------------------------- # diff --git a/autogpt_platform/backend/backend/data/workspace.py b/autogpt_platform/backend/backend/data/workspace.py new file mode 100644 index 0000000000..f3dba0a294 --- /dev/null +++ b/autogpt_platform/backend/backend/data/workspace.py @@ -0,0 +1,276 @@ +""" +Database CRUD operations for User Workspace. + +This module provides functions for managing user workspaces and workspace files. +""" + +import logging +from datetime import datetime, timezone +from typing import Optional + +from prisma.models import UserWorkspace, UserWorkspaceFile +from prisma.types import UserWorkspaceFileWhereInput + +from backend.util.json import SafeJson + +logger = logging.getLogger(__name__) + + +async def get_or_create_workspace(user_id: str) -> UserWorkspace: + """ + Get user's workspace, creating one if it doesn't exist. + + Uses upsert to handle race conditions when multiple concurrent requests + attempt to create a workspace for the same user. + + Args: + user_id: The user's ID + + Returns: + UserWorkspace instance + """ + workspace = await UserWorkspace.prisma().upsert( + where={"userId": user_id}, + data={ + "create": {"userId": user_id}, + "update": {}, # No updates needed if exists + }, + ) + + return workspace + + +async def get_workspace(user_id: str) -> Optional[UserWorkspace]: + """ + Get user's workspace if it exists. + + Args: + user_id: The user's ID + + Returns: + UserWorkspace instance or None + """ + return await UserWorkspace.prisma().find_unique(where={"userId": user_id}) + + +async def create_workspace_file( + workspace_id: str, + file_id: str, + name: str, + path: str, + storage_path: str, + mime_type: str, + size_bytes: int, + checksum: Optional[str] = None, + metadata: Optional[dict] = None, +) -> UserWorkspaceFile: + """ + Create a new workspace file record. + + Args: + workspace_id: The workspace ID + file_id: The file ID (same as used in storage path for consistency) + name: User-visible filename + path: Virtual path (e.g., "/documents/report.pdf") + storage_path: Actual storage path (GCS or local) + mime_type: MIME type of the file + size_bytes: File size in bytes + checksum: Optional SHA256 checksum + metadata: Optional additional metadata + + Returns: + Created UserWorkspaceFile instance + """ + # Normalize path to start with / + if not path.startswith("/"): + path = f"/{path}" + + file = await UserWorkspaceFile.prisma().create( + data={ + "id": file_id, + "workspaceId": workspace_id, + "name": name, + "path": path, + "storagePath": storage_path, + "mimeType": mime_type, + "sizeBytes": size_bytes, + "checksum": checksum, + "metadata": SafeJson(metadata or {}), + } + ) + + logger.info( + f"Created workspace file {file.id} at path {path} " + f"in workspace {workspace_id}" + ) + return file + + +async def get_workspace_file( + file_id: str, + workspace_id: Optional[str] = None, +) -> Optional[UserWorkspaceFile]: + """ + Get a workspace file by ID. + + Args: + file_id: The file ID + workspace_id: Optional workspace ID for validation + + Returns: + UserWorkspaceFile instance or None + """ + where_clause: dict = {"id": file_id, "isDeleted": False} + if workspace_id: + where_clause["workspaceId"] = workspace_id + + return await UserWorkspaceFile.prisma().find_first(where=where_clause) + + +async def get_workspace_file_by_path( + workspace_id: str, + path: str, +) -> Optional[UserWorkspaceFile]: + """ + Get a workspace file by its virtual path. + + Args: + workspace_id: The workspace ID + path: Virtual path + + Returns: + UserWorkspaceFile instance or None + """ + # Normalize path + if not path.startswith("/"): + path = f"/{path}" + + return await UserWorkspaceFile.prisma().find_first( + where={ + "workspaceId": workspace_id, + "path": path, + "isDeleted": False, + } + ) + + +async def list_workspace_files( + workspace_id: str, + path_prefix: Optional[str] = None, + include_deleted: bool = False, + limit: Optional[int] = None, + offset: int = 0, +) -> list[UserWorkspaceFile]: + """ + List files in a workspace. + + Args: + workspace_id: The workspace ID + path_prefix: Optional path prefix to filter (e.g., "/documents/") + include_deleted: Whether to include soft-deleted files + limit: Maximum number of files to return + offset: Number of files to skip + + Returns: + List of UserWorkspaceFile instances + """ + where_clause: UserWorkspaceFileWhereInput = {"workspaceId": workspace_id} + + if not include_deleted: + where_clause["isDeleted"] = False + + if path_prefix: + # Normalize prefix + if not path_prefix.startswith("/"): + path_prefix = f"/{path_prefix}" + where_clause["path"] = {"startswith": path_prefix} + + return await UserWorkspaceFile.prisma().find_many( + where=where_clause, + order={"createdAt": "desc"}, + take=limit, + skip=offset, + ) + + +async def count_workspace_files( + workspace_id: str, + path_prefix: Optional[str] = None, + include_deleted: bool = False, +) -> int: + """ + Count files in a workspace. + + Args: + workspace_id: The workspace ID + path_prefix: Optional path prefix to filter (e.g., "/sessions/abc123/") + include_deleted: Whether to include soft-deleted files + + Returns: + Number of files + """ + where_clause: dict = {"workspaceId": workspace_id} + if not include_deleted: + where_clause["isDeleted"] = False + + if path_prefix: + # Normalize prefix + if not path_prefix.startswith("/"): + path_prefix = f"/{path_prefix}" + where_clause["path"] = {"startswith": path_prefix} + + return await UserWorkspaceFile.prisma().count(where=where_clause) + + +async def soft_delete_workspace_file( + file_id: str, + workspace_id: Optional[str] = None, +) -> Optional[UserWorkspaceFile]: + """ + Soft-delete a workspace file. + + The path is modified to include a deletion timestamp to free up the original + path for new files while preserving the record for potential recovery. + + Args: + file_id: The file ID + workspace_id: Optional workspace ID for validation + + Returns: + Updated UserWorkspaceFile instance or None if not found + """ + # First verify the file exists and belongs to workspace + file = await get_workspace_file(file_id, workspace_id) + if file is None: + return None + + deleted_at = datetime.now(timezone.utc) + # Modify path to free up the unique constraint for new files at original path + # Format: {original_path}__deleted__{timestamp} + deleted_path = f"{file.path}__deleted__{int(deleted_at.timestamp())}" + + updated = await UserWorkspaceFile.prisma().update( + where={"id": file_id}, + data={ + "isDeleted": True, + "deletedAt": deleted_at, + "path": deleted_path, + }, + ) + + logger.info(f"Soft-deleted workspace file {file_id}") + return updated + + +async def get_workspace_total_size(workspace_id: str) -> int: + """ + Get the total size of all files in a workspace. + + Args: + workspace_id: The workspace ID + + Returns: + Total size in bytes + """ + files = await list_workspace_files(workspace_id) + return sum(file.sizeBytes for file in files) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 39d4f984eb..8362dae828 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -236,7 +236,14 @@ async def execute_node( input_size = len(input_data_str) log_metadata.debug("Executed node with input", input=input_data_str) + # Create node-specific execution context to avoid race conditions + # (multiple nodes can execute concurrently and would otherwise mutate shared state) + execution_context = execution_context.model_copy( + update={"node_id": node_id, "node_exec_id": node_exec_id} + ) + # Inject extra execution arguments for the blocks via kwargs + # Keep individual kwargs for backwards compatibility with existing blocks extra_exec_kwargs: dict = { "graph_id": graph_id, "graph_version": graph_version, diff --git a/autogpt_platform/backend/backend/executor/utils.py b/autogpt_platform/backend/backend/executor/utils.py index f35bebb125..fa264c30a7 100644 --- a/autogpt_platform/backend/backend/executor/utils.py +++ b/autogpt_platform/backend/backend/executor/utils.py @@ -892,11 +892,19 @@ async def add_graph_execution( settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id) execution_context = ExecutionContext( + # Execution identity + user_id=user_id, + graph_id=graph_id, + graph_exec_id=graph_exec.id, + graph_version=graph_exec.graph_version, + # Safety settings human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode, sensitive_action_safe_mode=settings.sensitive_action_safe_mode, + # User settings user_timezone=( user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC" ), + # Execution hierarchy root_execution_id=graph_exec.id, ) diff --git a/autogpt_platform/backend/backend/executor/utils_test.py b/autogpt_platform/backend/backend/executor/utils_test.py index 4761a18c63..db33249583 100644 --- a/autogpt_platform/backend/backend/executor/utils_test.py +++ b/autogpt_platform/backend/backend/executor/utils_test.py @@ -348,6 +348,7 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture): mock_graph_exec.id = "execution-id-123" mock_graph_exec.node_executions = [] # Add this to avoid AttributeError mock_graph_exec.status = ExecutionStatus.QUEUED # Required for race condition check + mock_graph_exec.graph_version = graph_version mock_graph_exec.to_graph_execution_entry.return_value = mocker.MagicMock() # Mock the queue and event bus @@ -434,6 +435,9 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture): # Create a second mock execution for the sanity check mock_graph_exec_2 = mocker.MagicMock(spec=GraphExecutionWithNodes) mock_graph_exec_2.id = "execution-id-456" + mock_graph_exec_2.node_executions = [] + mock_graph_exec_2.status = ExecutionStatus.QUEUED + mock_graph_exec_2.graph_version = graph_version mock_graph_exec_2.to_graph_execution_entry.return_value = mocker.MagicMock() # Reset mocks and set up for second call @@ -614,6 +618,7 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture): mock_graph_exec.id = "execution-id-123" mock_graph_exec.node_executions = [] mock_graph_exec.status = ExecutionStatus.QUEUED # Required for race condition check + mock_graph_exec.graph_version = graph_version # Track what's passed to to_graph_execution_entry captured_kwargs = {} diff --git a/autogpt_platform/backend/backend/util/cloud_storage.py b/autogpt_platform/backend/backend/util/cloud_storage.py index 93fb9039ec..28423d003d 100644 --- a/autogpt_platform/backend/backend/util/cloud_storage.py +++ b/autogpt_platform/backend/backend/util/cloud_storage.py @@ -13,6 +13,7 @@ import aiohttp from gcloud.aio import storage as async_gcs_storage from google.cloud import storage as gcs_storage +from backend.util.gcs_utils import download_with_fresh_session, generate_signed_url from backend.util.settings import Config logger = logging.getLogger(__name__) @@ -251,7 +252,7 @@ class CloudStorageHandler: f"in_task: {current_task is not None}" ) - # Parse bucket and blob name from path + # Parse bucket and blob name from path (path already has gcs:// prefix removed) parts = path.split("/", 1) if len(parts) != 2: raise ValueError(f"Invalid GCS path: {path}") @@ -261,50 +262,19 @@ class CloudStorageHandler: # Authorization check self._validate_file_access(blob_name, user_id, graph_exec_id) - # Use a fresh client for each download to avoid session issues - # This is less efficient but more reliable with the executor's event loop - logger.info("[CloudStorage] Creating fresh GCS client for download") - - # Create a new session specifically for this download - session = aiohttp.ClientSession( - connector=aiohttp.TCPConnector(limit=10, force_close=True) + logger.info( + f"[CloudStorage] About to download from GCS - bucket: {bucket_name}, blob: {blob_name}" ) - async_client = None try: - # Create a new GCS client with the fresh session - async_client = async_gcs_storage.Storage(session=session) - - logger.info( - f"[CloudStorage] About to download from GCS - bucket: {bucket_name}, blob: {blob_name}" - ) - - # Download content using the fresh client - content = await async_client.download(bucket_name, blob_name) + content = await download_with_fresh_session(bucket_name, blob_name) logger.info( f"[CloudStorage] GCS download successful - size: {len(content)} bytes" ) - - # Clean up - await async_client.close() - await session.close() - return content - + except FileNotFoundError: + raise except Exception as e: - # Always try to clean up - if async_client is not None: - try: - await async_client.close() - except Exception as cleanup_error: - logger.warning( - f"[CloudStorage] Error closing GCS client: {cleanup_error}" - ) - try: - await session.close() - except Exception as cleanup_error: - logger.warning(f"[CloudStorage] Error closing session: {cleanup_error}") - # Log the specific error for debugging logger.error( f"[CloudStorage] GCS download failed - error: {str(e)}, " @@ -319,10 +289,6 @@ class CloudStorageHandler: f"current_task: {current_task}, " f"bucket: {bucket_name}, blob: redacted for privacy" ) - - # Convert gcloud-aio exceptions to standard ones - if "404" in str(e) or "Not Found" in str(e): - raise FileNotFoundError(f"File not found: gcs://{path}") raise def _validate_file_access( @@ -445,8 +411,7 @@ class CloudStorageHandler: graph_exec_id: str | None = None, ) -> str: """Generate signed URL for GCS with authorization.""" - - # Parse bucket and blob name from path + # Parse bucket and blob name from path (path already has gcs:// prefix removed) parts = path.split("/", 1) if len(parts) != 2: raise ValueError(f"Invalid GCS path: {path}") @@ -456,21 +421,11 @@ class CloudStorageHandler: # Authorization check self._validate_file_access(blob_name, user_id, graph_exec_id) - # Use sync client for signed URLs since gcloud-aio doesn't support them sync_client = self._get_sync_gcs_client() - bucket = sync_client.bucket(bucket_name) - blob = bucket.blob(blob_name) - - # Generate signed URL asynchronously using sync client - url = await asyncio.to_thread( - blob.generate_signed_url, - version="v4", - expiration=datetime.now(timezone.utc) + timedelta(hours=expiration_hours), - method="GET", + return await generate_signed_url( + sync_client, bucket_name, blob_name, expiration_hours * 3600 ) - return url - async def delete_expired_files(self, provider: str = "gcs") -> int: """ Delete files that have passed their expiration time. diff --git a/autogpt_platform/backend/backend/util/file.py b/autogpt_platform/backend/backend/util/file.py index dc8f86ea41..baa9225629 100644 --- a/autogpt_platform/backend/backend/util/file.py +++ b/autogpt_platform/backend/backend/util/file.py @@ -5,13 +5,26 @@ import shutil import tempfile import uuid from pathlib import Path +from typing import TYPE_CHECKING, Literal from urllib.parse import urlparse from backend.util.cloud_storage import get_cloud_storage_handler from backend.util.request import Requests +from backend.util.settings import Config from backend.util.type import MediaFileType from backend.util.virus_scanner import scan_content_safe +if TYPE_CHECKING: + from backend.data.execution import ExecutionContext + +# Return format options for store_media_file +# - "for_local_processing": Returns local file path - use with ffmpeg, MoviePy, PIL, etc. +# - "for_external_api": Returns data URI (base64) - use when sending content to external APIs +# - "for_block_output": Returns best format for output - workspace:// in CoPilot, data URI in graphs +MediaReturnFormat = Literal[ + "for_local_processing", "for_external_api", "for_block_output" +] + TEMP_DIR = Path(tempfile.gettempdir()).resolve() # Maximum filename length (conservative limit for most filesystems) @@ -67,42 +80,56 @@ def clean_exec_files(graph_exec_id: str, file: str = "") -> None: async def store_media_file( - graph_exec_id: str, file: MediaFileType, - user_id: str, - return_content: bool = False, + execution_context: "ExecutionContext", + *, + return_format: MediaReturnFormat, ) -> MediaFileType: """ - Safely handle 'file' (a data URI, a URL, or a local path relative to {temp}/exec_file/{exec_id}), - placing or verifying it under: + Safely handle 'file' (a data URI, a URL, a workspace:// reference, or a local path + relative to {temp}/exec_file/{exec_id}), placing or verifying it under: {tempdir}/exec_file/{exec_id}/... - If 'return_content=True', return a data URI (data:;base64,). - Otherwise, returns the file media path relative to the exec_id folder. + For each MediaFileType input: + - Data URI: decode and store locally + - URL: download and store locally + - workspace:// reference: read from workspace, store locally + - Local path: verify it exists in exec_file directory - For each MediaFileType type: - - Data URI: - -> decode and store in a new random file in that folder - - URL: - -> download and store in that folder - - Local path: - -> interpret as relative to that folder; verify it exists - (no copying, as it's presumably already there). - We realpath-check so no symlink or '..' can escape the folder. + Return format options: + - "for_local_processing": Returns local file path - use with ffmpeg, MoviePy, PIL, etc. + - "for_external_api": Returns data URI (base64) - use when sending to external APIs + - "for_block_output": Returns best format for output - workspace:// in CoPilot, data URI in graphs - - :param graph_exec_id: The unique ID of the graph execution. - :param file: Data URI, URL, or local (relative) path. - :param return_content: If True, return a data URI of the file content. - If False, return the *relative* path inside the exec_id folder. - :return: The requested result: data URI or relative path of the media. + :param file: Data URI, URL, workspace://, or local (relative) path. + :param execution_context: ExecutionContext with user_id, graph_exec_id, workspace_id. + :param return_format: What to return: "for_local_processing", "for_external_api", or "for_block_output". + :return: The requested result based on return_format. """ + # Extract values from execution_context + graph_exec_id = execution_context.graph_exec_id + user_id = execution_context.user_id + + if not graph_exec_id: + raise ValueError("execution_context.graph_exec_id is required") + if not user_id: + raise ValueError("execution_context.user_id is required") + + # Create workspace_manager if we have workspace_id (with session scoping) + # Import here to avoid circular import (file.py → workspace.py → data → blocks → file.py) + from backend.util.workspace import WorkspaceManager + + workspace_manager: WorkspaceManager | None = None + if execution_context.workspace_id: + workspace_manager = WorkspaceManager( + user_id, execution_context.workspace_id, execution_context.session_id + ) # Build base path base_path = Path(get_exec_file_path(graph_exec_id, "")) base_path.mkdir(parents=True, exist_ok=True) # Security fix: Add disk space limits to prevent DoS - MAX_FILE_SIZE = 100 * 1024 * 1024 # 100MB per file + MAX_FILE_SIZE_BYTES = Config().max_file_size_mb * 1024 * 1024 MAX_TOTAL_DISK_USAGE = 1024 * 1024 * 1024 # 1GB total per execution directory # Check total disk usage in base_path @@ -142,9 +169,57 @@ async def store_media_file( """ return str(absolute_path.relative_to(base)) - # Check if this is a cloud storage path + # Get cloud storage handler for checking cloud paths cloud_storage = await get_cloud_storage_handler() - if cloud_storage.is_cloud_path(file): + + # Track if the input came from workspace (don't re-save it) + is_from_workspace = file.startswith("workspace://") + + # Check if this is a workspace file reference + if is_from_workspace: + if workspace_manager is None: + raise ValueError( + "Workspace file reference requires workspace context. " + "This file type is only available in CoPilot sessions." + ) + + # Parse workspace reference + # workspace://abc123 - by file ID + # workspace:///path/to/file.txt - by virtual path + file_ref = file[12:] # Remove "workspace://" + + if file_ref.startswith("/"): + # Path reference + workspace_content = await workspace_manager.read_file(file_ref) + file_info = await workspace_manager.get_file_info_by_path(file_ref) + filename = sanitize_filename( + file_info.name if file_info else f"{uuid.uuid4()}.bin" + ) + else: + # ID reference + workspace_content = await workspace_manager.read_file_by_id(file_ref) + file_info = await workspace_manager.get_file_info(file_ref) + filename = sanitize_filename( + file_info.name if file_info else f"{uuid.uuid4()}.bin" + ) + + try: + target_path = _ensure_inside_base(base_path / filename, base_path) + except OSError as e: + raise ValueError(f"Invalid file path '{filename}': {e}") from e + + # Check file size limit + if len(workspace_content) > MAX_FILE_SIZE_BYTES: + raise ValueError( + f"File too large: {len(workspace_content)} bytes > {MAX_FILE_SIZE_BYTES} bytes" + ) + + # Virus scan the workspace content before writing locally + await scan_content_safe(workspace_content, filename=filename) + target_path.write_bytes(workspace_content) + + # Check if this is a cloud storage path + elif cloud_storage.is_cloud_path(file): # Download from cloud storage and store locally cloud_content = await cloud_storage.retrieve_file( file, user_id=user_id, graph_exec_id=graph_exec_id @@ -159,9 +234,9 @@ async def store_media_file( raise ValueError(f"Invalid file path '{filename}': {e}") from e # Check file size limit - if len(cloud_content) > MAX_FILE_SIZE: + if len(cloud_content) > MAX_FILE_SIZE_BYTES: raise ValueError( - f"File too large: {len(cloud_content)} bytes > {MAX_FILE_SIZE} bytes" + f"File too large: {len(cloud_content)} bytes > {MAX_FILE_SIZE_BYTES} bytes" ) # Virus scan the cloud content before writing locally @@ -189,9 +264,9 @@ async def store_media_file( content = base64.b64decode(b64_content) # Check file size limit - if len(content) > MAX_FILE_SIZE: + if len(content) > MAX_FILE_SIZE_BYTES: raise ValueError( - f"File too large: {len(content)} bytes > {MAX_FILE_SIZE} bytes" + f"File too large: {len(content)} bytes > {MAX_FILE_SIZE_BYTES} bytes" ) # Virus scan the base64 content before writing @@ -199,23 +274,31 @@ async def store_media_file( target_path.write_bytes(content) elif file.startswith(("http://", "https://")): - # URL + # URL - download first to get Content-Type header + resp = await Requests().get(file) + + # Check file size limit + if len(resp.content) > MAX_FILE_SIZE_BYTES: + raise ValueError( + f"File too large: {len(resp.content)} bytes > {MAX_FILE_SIZE_BYTES} bytes" + ) + + # Extract filename from URL path parsed_url = urlparse(file) filename = sanitize_filename(Path(parsed_url.path).name or f"{uuid.uuid4()}") + + # If filename lacks extension, add one from Content-Type header + if "." not in filename: + content_type = resp.headers.get("Content-Type", "").split(";")[0].strip() + if content_type: + ext = _extension_from_mime(content_type) + filename = f"{filename}{ext}" + try: target_path = _ensure_inside_base(base_path / filename, base_path) except OSError as e: raise ValueError(f"Invalid file path '{filename}': {e}") from e - # Download and save - resp = await Requests().get(file) - - # Check file size limit - if len(resp.content) > MAX_FILE_SIZE: - raise ValueError( - f"File too large: {len(resp.content)} bytes > {MAX_FILE_SIZE} bytes" - ) - # Virus scan the downloaded content before writing await scan_content_safe(resp.content, filename=filename) target_path.write_bytes(resp.content) @@ -230,12 +313,44 @@ async def store_media_file( if not target_path.is_file(): raise ValueError(f"Local file does not exist: {target_path}") - # Return result - if return_content: - return MediaFileType(_file_to_data_uri(target_path)) - else: + # Return based on requested format + if return_format == "for_local_processing": + # Use when processing files locally with tools like ffmpeg, MoviePy, PIL + # Returns: relative path in exec_file directory (e.g., "image.png") return MediaFileType(_strip_base_prefix(target_path, base_path)) + elif return_format == "for_external_api": + # Use when sending content to external APIs that need base64 + # Returns: data URI (e.g., "data:image/png;base64,iVBORw0...") + return MediaFileType(_file_to_data_uri(target_path)) + + elif return_format == "for_block_output": + # Use when returning output from a block to user/next block + # Returns: workspace:// ref (CoPilot) or data URI (graph execution) + if workspace_manager is None: + # No workspace available (graph execution without CoPilot) + # Fallback to data URI so the content can still be used/displayed + return MediaFileType(_file_to_data_uri(target_path)) + + # Don't re-save if input was already from workspace + if is_from_workspace: + # Return original workspace reference + return MediaFileType(file) + + # Save new content to workspace + content = target_path.read_bytes() + filename = target_path.name + + file_record = await workspace_manager.write_file( + content=content, + filename=filename, + overwrite=True, + ) + return MediaFileType(f"workspace://{file_record.id}") + + else: + raise ValueError(f"Invalid return_format: {return_format}") + def get_dir_size(path: Path) -> int: """Get total size of directory.""" diff --git a/autogpt_platform/backend/backend/util/file_test.py b/autogpt_platform/backend/backend/util/file_test.py index cd4fc69706..9fe672d155 100644 --- a/autogpt_platform/backend/backend/util/file_test.py +++ b/autogpt_platform/backend/backend/util/file_test.py @@ -7,10 +7,22 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest +from backend.data.execution import ExecutionContext from backend.util.file import store_media_file from backend.util.type import MediaFileType +def make_test_context( + graph_exec_id: str = "test-exec-123", + user_id: str = "test-user-123", +) -> ExecutionContext: + """Helper to create test ExecutionContext.""" + return ExecutionContext( + user_id=user_id, + graph_exec_id=graph_exec_id, + ) + + class TestFileCloudIntegration: """Test cases for cloud storage integration in file utilities.""" @@ -70,10 +82,9 @@ class TestFileCloudIntegration: mock_path_class.side_effect = path_constructor result = await store_media_file( - graph_exec_id, - MediaFileType(cloud_path), - "test-user-123", - return_content=False, + file=MediaFileType(cloud_path), + execution_context=make_test_context(graph_exec_id=graph_exec_id), + return_format="for_local_processing", ) # Verify cloud storage operations @@ -144,10 +155,9 @@ class TestFileCloudIntegration: mock_path_obj.name = "image.png" with patch("backend.util.file.Path", return_value=mock_path_obj): result = await store_media_file( - graph_exec_id, - MediaFileType(cloud_path), - "test-user-123", - return_content=True, + file=MediaFileType(cloud_path), + execution_context=make_test_context(graph_exec_id=graph_exec_id), + return_format="for_external_api", ) # Verify result is a data URI @@ -198,10 +208,9 @@ class TestFileCloudIntegration: mock_resolved_path.relative_to.return_value = Path("test-uuid-789.txt") await store_media_file( - graph_exec_id, - MediaFileType(data_uri), - "test-user-123", - return_content=False, + file=MediaFileType(data_uri), + execution_context=make_test_context(graph_exec_id=graph_exec_id), + return_format="for_local_processing", ) # Verify cloud handler was checked but not used for retrieval @@ -234,5 +243,7 @@ class TestFileCloudIntegration: FileNotFoundError, match="File not found in cloud storage" ): await store_media_file( - graph_exec_id, MediaFileType(cloud_path), "test-user-123" + file=MediaFileType(cloud_path), + execution_context=make_test_context(graph_exec_id=graph_exec_id), + return_format="for_local_processing", ) diff --git a/autogpt_platform/backend/backend/util/gcs_utils.py b/autogpt_platform/backend/backend/util/gcs_utils.py new file mode 100644 index 0000000000..3f91f21897 --- /dev/null +++ b/autogpt_platform/backend/backend/util/gcs_utils.py @@ -0,0 +1,108 @@ +""" +Shared GCS utilities for workspace and cloud storage backends. + +This module provides common functionality for working with Google Cloud Storage, +including path parsing, client management, and signed URL generation. +""" + +import asyncio +import logging +from datetime import datetime, timedelta, timezone + +import aiohttp +from gcloud.aio import storage as async_gcs_storage +from google.cloud import storage as gcs_storage + +logger = logging.getLogger(__name__) + + +def parse_gcs_path(path: str) -> tuple[str, str]: + """ + Parse a GCS path in the format 'gcs://bucket/blob' to (bucket, blob). + + Args: + path: GCS path string (e.g., "gcs://my-bucket/path/to/file") + + Returns: + Tuple of (bucket_name, blob_name) + + Raises: + ValueError: If the path format is invalid + """ + if not path.startswith("gcs://"): + raise ValueError(f"Invalid GCS path: {path}") + + path_without_prefix = path[6:] # Remove "gcs://" + parts = path_without_prefix.split("/", 1) + if len(parts) != 2: + raise ValueError(f"Invalid GCS path format: {path}") + + return parts[0], parts[1] + + +async def download_with_fresh_session(bucket: str, blob: str) -> bytes: + """ + Download file content using a fresh session. + + This approach avoids event loop issues that can occur when reusing + sessions across different async contexts (e.g., in executors). + + Args: + bucket: GCS bucket name + blob: Blob path within the bucket + + Returns: + File content as bytes + + Raises: + FileNotFoundError: If the file doesn't exist + """ + session = aiohttp.ClientSession( + connector=aiohttp.TCPConnector(limit=10, force_close=True) + ) + client: async_gcs_storage.Storage | None = None + try: + client = async_gcs_storage.Storage(session=session) + content = await client.download(bucket, blob) + return content + except Exception as e: + if "404" in str(e) or "Not Found" in str(e): + raise FileNotFoundError(f"File not found: gcs://{bucket}/{blob}") + raise + finally: + if client: + try: + await client.close() + except Exception: + pass # Best-effort cleanup + await session.close() + + +async def generate_signed_url( + sync_client: gcs_storage.Client, + bucket_name: str, + blob_name: str, + expires_in: int, +) -> str: + """ + Generate a signed URL for temporary access to a GCS file. + + Uses asyncio.to_thread() to run the sync operation without blocking. + + Args: + sync_client: Sync GCS client with service account credentials + bucket_name: GCS bucket name + blob_name: Blob path within the bucket + expires_in: URL expiration time in seconds + + Returns: + Signed URL string + """ + bucket = sync_client.bucket(bucket_name) + blob = bucket.blob(blob_name) + return await asyncio.to_thread( + blob.generate_signed_url, + version="v4", + expiration=datetime.now(timezone.utc) + timedelta(seconds=expires_in), + method="GET", + ) diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index a42a4d29b4..aa28a4c9ac 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -263,6 +263,12 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): description="The name of the Google Cloud Storage bucket for media files", ) + workspace_storage_dir: str = Field( + default="", + description="Local directory for workspace file storage when GCS is not configured. " + "If empty, defaults to {app_data}/workspaces. Used for self-hosted deployments.", + ) + reddit_user_agent: str = Field( default="web:AutoGPT:v0.6.0 (by /u/autogpt)", description="The user agent for the Reddit API", @@ -389,6 +395,13 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): description="Maximum file size in MB for file uploads (1-1024 MB)", ) + max_file_size_mb: int = Field( + default=100, + ge=1, + le=1024, + description="Maximum file size in MB for workspace files (1-1024 MB)", + ) + # AutoMod configuration automod_enabled: bool = Field( default=False, diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py index 0a539644ee..23d7c24147 100644 --- a/autogpt_platform/backend/backend/util/test.py +++ b/autogpt_platform/backend/backend/util/test.py @@ -140,14 +140,29 @@ async def execute_block_test(block: Block): setattr(block, mock_name, mock_obj) # Populate credentials argument(s) + # Generate IDs for execution context + graph_id = str(uuid.uuid4()) + node_id = str(uuid.uuid4()) + graph_exec_id = str(uuid.uuid4()) + node_exec_id = str(uuid.uuid4()) + user_id = str(uuid.uuid4()) + graph_version = 1 # Default version for tests + extra_exec_kwargs: dict = { - "graph_id": str(uuid.uuid4()), - "node_id": str(uuid.uuid4()), - "graph_exec_id": str(uuid.uuid4()), - "node_exec_id": str(uuid.uuid4()), - "user_id": str(uuid.uuid4()), - "graph_version": 1, # Default version for tests - "execution_context": ExecutionContext(), + "graph_id": graph_id, + "node_id": node_id, + "graph_exec_id": graph_exec_id, + "node_exec_id": node_exec_id, + "user_id": user_id, + "graph_version": graph_version, + "execution_context": ExecutionContext( + user_id=user_id, + graph_id=graph_id, + graph_exec_id=graph_exec_id, + graph_version=graph_version, + node_id=node_id, + node_exec_id=node_exec_id, + ), } input_model = cast(type[BlockSchema], block.input_schema) diff --git a/autogpt_platform/backend/backend/util/workspace.py b/autogpt_platform/backend/backend/util/workspace.py new file mode 100644 index 0000000000..a2f1a61b9e --- /dev/null +++ b/autogpt_platform/backend/backend/util/workspace.py @@ -0,0 +1,419 @@ +""" +WorkspaceManager for managing user workspace file operations. + +This module provides a high-level interface for workspace file operations, +combining the storage backend and database layer. +""" + +import logging +import mimetypes +import uuid +from typing import Optional + +from prisma.errors import UniqueViolationError +from prisma.models import UserWorkspaceFile + +from backend.data.workspace import ( + count_workspace_files, + create_workspace_file, + get_workspace_file, + get_workspace_file_by_path, + list_workspace_files, + soft_delete_workspace_file, +) +from backend.util.settings import Config +from backend.util.workspace_storage import compute_file_checksum, get_workspace_storage + +logger = logging.getLogger(__name__) + + +class WorkspaceManager: + """ + Manages workspace file operations. + + Combines storage backend operations with database record management. + Supports session-scoped file segmentation where files are stored in + session-specific virtual paths: /sessions/{session_id}/{filename} + """ + + def __init__( + self, user_id: str, workspace_id: str, session_id: Optional[str] = None + ): + """ + Initialize WorkspaceManager. + + Args: + user_id: The user's ID + workspace_id: The workspace ID + session_id: Optional session ID for session-scoped file access + """ + self.user_id = user_id + self.workspace_id = workspace_id + self.session_id = session_id + # Session path prefix for file isolation + self.session_path = f"/sessions/{session_id}" if session_id else "" + + def _resolve_path(self, path: str) -> str: + """ + Resolve a path, defaulting to session folder if session_id is set. + + Cross-session access is allowed by explicitly using /sessions/other-session-id/... + + Args: + path: Virtual path (e.g., "/file.txt" or "/sessions/abc123/file.txt") + + Returns: + Resolved path with session prefix if applicable + """ + # If path explicitly references a session folder, use it as-is + if path.startswith("/sessions/"): + return path + + # If we have a session context, prepend session path + if self.session_path: + # Normalize the path + if not path.startswith("/"): + path = f"/{path}" + return f"{self.session_path}{path}" + + # No session context, use path as-is + return path if path.startswith("/") else f"/{path}" + + def _get_effective_path( + self, path: Optional[str], include_all_sessions: bool + ) -> Optional[str]: + """ + Get effective path for list/count operations based on session context. + + Args: + path: Optional path prefix to filter + include_all_sessions: If True, don't apply session scoping + + Returns: + Effective path prefix for database query + """ + if include_all_sessions: + # Normalize path to ensure leading slash (stored paths are normalized) + if path is not None and not path.startswith("/"): + return f"/{path}" + return path + elif path is not None: + # Resolve the provided path with session scoping + return self._resolve_path(path) + elif self.session_path: + # Default to session folder with trailing slash to prevent prefix collisions + # e.g., "/sessions/abc" should not match "/sessions/abc123" + return self.session_path.rstrip("/") + "/" + else: + # No session context, use path as-is + return path + + async def read_file(self, path: str) -> bytes: + """ + Read file from workspace by virtual path. + + When session_id is set, paths are resolved relative to the session folder + unless they explicitly reference /sessions/... + + Args: + path: Virtual path (e.g., "/documents/report.pdf") + + Returns: + File content as bytes + + Raises: + FileNotFoundError: If file doesn't exist + """ + resolved_path = self._resolve_path(path) + file = await get_workspace_file_by_path(self.workspace_id, resolved_path) + if file is None: + raise FileNotFoundError(f"File not found at path: {resolved_path}") + + storage = await get_workspace_storage() + return await storage.retrieve(file.storagePath) + + async def read_file_by_id(self, file_id: str) -> bytes: + """ + Read file from workspace by file ID. + + Args: + file_id: The file's ID + + Returns: + File content as bytes + + Raises: + FileNotFoundError: If file doesn't exist + """ + file = await get_workspace_file(file_id, self.workspace_id) + if file is None: + raise FileNotFoundError(f"File not found: {file_id}") + + storage = await get_workspace_storage() + return await storage.retrieve(file.storagePath) + + async def write_file( + self, + content: bytes, + filename: str, + path: Optional[str] = None, + mime_type: Optional[str] = None, + overwrite: bool = False, + ) -> UserWorkspaceFile: + """ + Write file to workspace. + + When session_id is set, files are written to /sessions/{session_id}/... + by default. Use explicit /sessions/... paths for cross-session access. + + Args: + content: File content as bytes + filename: Filename for the file + path: Virtual path (defaults to "/{filename}", session-scoped if session_id set) + mime_type: MIME type (auto-detected if not provided) + overwrite: Whether to overwrite existing file at path + + Returns: + Created UserWorkspaceFile instance + + Raises: + ValueError: If file exceeds size limit or path already exists + """ + # Enforce file size limit + max_file_size = Config().max_file_size_mb * 1024 * 1024 + if len(content) > max_file_size: + raise ValueError( + f"File too large: {len(content)} bytes exceeds " + f"{Config().max_file_size_mb}MB limit" + ) + + # Determine path with session scoping + if path is None: + path = f"/{filename}" + elif not path.startswith("/"): + path = f"/{path}" + + # Resolve path with session prefix + path = self._resolve_path(path) + + # Check if file exists at path (only error for non-overwrite case) + # For overwrite=True, we let the write proceed and handle via UniqueViolationError + # This ensures the new file is written to storage BEFORE the old one is deleted, + # preventing data loss if the new write fails + if not overwrite: + existing = await get_workspace_file_by_path(self.workspace_id, path) + if existing is not None: + raise ValueError(f"File already exists at path: {path}") + + # Auto-detect MIME type if not provided + if mime_type is None: + mime_type, _ = mimetypes.guess_type(filename) + mime_type = mime_type or "application/octet-stream" + + # Compute checksum + checksum = compute_file_checksum(content) + + # Generate unique file ID for storage + file_id = str(uuid.uuid4()) + + # Store file in storage backend + storage = await get_workspace_storage() + storage_path = await storage.store( + workspace_id=self.workspace_id, + file_id=file_id, + filename=filename, + content=content, + ) + + # Create database record - handle race condition where another request + # created a file at the same path between our check and create + try: + file = await create_workspace_file( + workspace_id=self.workspace_id, + file_id=file_id, + name=filename, + path=path, + storage_path=storage_path, + mime_type=mime_type, + size_bytes=len(content), + checksum=checksum, + ) + except UniqueViolationError: + # Race condition: another request created a file at this path + if overwrite: + # Re-fetch and delete the conflicting file, then retry + existing = await get_workspace_file_by_path(self.workspace_id, path) + if existing: + await self.delete_file(existing.id) + # Retry the create - if this also fails, clean up storage file + try: + file = await create_workspace_file( + workspace_id=self.workspace_id, + file_id=file_id, + name=filename, + path=path, + storage_path=storage_path, + mime_type=mime_type, + size_bytes=len(content), + checksum=checksum, + ) + except Exception: + # Clean up orphaned storage file on retry failure + try: + await storage.delete(storage_path) + except Exception as e: + logger.warning(f"Failed to clean up orphaned storage file: {e}") + raise + else: + # Clean up the orphaned storage file before raising + try: + await storage.delete(storage_path) + except Exception as e: + logger.warning(f"Failed to clean up orphaned storage file: {e}") + raise ValueError(f"File already exists at path: {path}") + except Exception: + # Any other database error (connection, validation, etc.) - clean up storage + try: + await storage.delete(storage_path) + except Exception as e: + logger.warning(f"Failed to clean up orphaned storage file: {e}") + raise + + logger.info( + f"Wrote file {file.id} ({filename}) to workspace {self.workspace_id} " + f"at path {path}, size={len(content)} bytes" + ) + + return file + + async def list_files( + self, + path: Optional[str] = None, + limit: Optional[int] = None, + offset: int = 0, + include_all_sessions: bool = False, + ) -> list[UserWorkspaceFile]: + """ + List files in workspace. + + When session_id is set and include_all_sessions is False (default), + only files in the current session's folder are listed. + + Args: + path: Optional path prefix to filter (e.g., "/documents/") + limit: Maximum number of files to return + offset: Number of files to skip + include_all_sessions: If True, list files from all sessions. + If False (default), only list current session's files. + + Returns: + List of UserWorkspaceFile instances + """ + effective_path = self._get_effective_path(path, include_all_sessions) + + return await list_workspace_files( + workspace_id=self.workspace_id, + path_prefix=effective_path, + limit=limit, + offset=offset, + ) + + async def delete_file(self, file_id: str) -> bool: + """ + Delete a file (soft-delete). + + Args: + file_id: The file's ID + + Returns: + True if deleted, False if not found + """ + file = await get_workspace_file(file_id, self.workspace_id) + if file is None: + return False + + # Delete from storage + storage = await get_workspace_storage() + try: + await storage.delete(file.storagePath) + except Exception as e: + logger.warning(f"Failed to delete file from storage: {e}") + # Continue with database soft-delete even if storage delete fails + + # Soft-delete database record + result = await soft_delete_workspace_file(file_id, self.workspace_id) + return result is not None + + async def get_download_url(self, file_id: str, expires_in: int = 3600) -> str: + """ + Get download URL for a file. + + Args: + file_id: The file's ID + expires_in: URL expiration in seconds (default 1 hour) + + Returns: + Download URL (signed URL for GCS, API endpoint for local) + + Raises: + FileNotFoundError: If file doesn't exist + """ + file = await get_workspace_file(file_id, self.workspace_id) + if file is None: + raise FileNotFoundError(f"File not found: {file_id}") + + storage = await get_workspace_storage() + return await storage.get_download_url(file.storagePath, expires_in) + + async def get_file_info(self, file_id: str) -> Optional[UserWorkspaceFile]: + """ + Get file metadata. + + Args: + file_id: The file's ID + + Returns: + UserWorkspaceFile instance or None + """ + return await get_workspace_file(file_id, self.workspace_id) + + async def get_file_info_by_path(self, path: str) -> Optional[UserWorkspaceFile]: + """ + Get file metadata by path. + + When session_id is set, paths are resolved relative to the session folder + unless they explicitly reference /sessions/... + + Args: + path: Virtual path + + Returns: + UserWorkspaceFile instance or None + """ + resolved_path = self._resolve_path(path) + return await get_workspace_file_by_path(self.workspace_id, resolved_path) + + async def get_file_count( + self, + path: Optional[str] = None, + include_all_sessions: bool = False, + ) -> int: + """ + Get number of files in workspace. + + When session_id is set and include_all_sessions is False (default), + only counts files in the current session's folder. + + Args: + path: Optional path prefix to filter (e.g., "/documents/") + include_all_sessions: If True, count all files in workspace. + If False (default), only count current session's files. + + Returns: + Number of files + """ + effective_path = self._get_effective_path(path, include_all_sessions) + + return await count_workspace_files( + self.workspace_id, path_prefix=effective_path + ) diff --git a/autogpt_platform/backend/backend/util/workspace_storage.py b/autogpt_platform/backend/backend/util/workspace_storage.py new file mode 100644 index 0000000000..2f4c8ae2b5 --- /dev/null +++ b/autogpt_platform/backend/backend/util/workspace_storage.py @@ -0,0 +1,398 @@ +""" +Workspace storage backend abstraction for supporting both cloud and local deployments. + +This module provides a unified interface for storing workspace files, with implementations +for Google Cloud Storage (cloud deployments) and local filesystem (self-hosted deployments). +""" + +import asyncio +import hashlib +import logging +from abc import ABC, abstractmethod +from datetime import datetime, timezone +from pathlib import Path +from typing import Optional + +import aiofiles +import aiohttp +from gcloud.aio import storage as async_gcs_storage +from google.cloud import storage as gcs_storage + +from backend.util.data import get_data_path +from backend.util.gcs_utils import ( + download_with_fresh_session, + generate_signed_url, + parse_gcs_path, +) +from backend.util.settings import Config + +logger = logging.getLogger(__name__) + + +class WorkspaceStorageBackend(ABC): + """Abstract interface for workspace file storage.""" + + @abstractmethod + async def store( + self, + workspace_id: str, + file_id: str, + filename: str, + content: bytes, + ) -> str: + """ + Store file content, return storage path. + + Args: + workspace_id: The workspace ID + file_id: Unique file ID for storage + filename: Original filename + content: File content as bytes + + Returns: + Storage path string (cloud path or local path) + """ + pass + + @abstractmethod + async def retrieve(self, storage_path: str) -> bytes: + """ + Retrieve file content from storage. + + Args: + storage_path: The storage path returned from store() + + Returns: + File content as bytes + """ + pass + + @abstractmethod + async def delete(self, storage_path: str) -> None: + """ + Delete file from storage. + + Args: + storage_path: The storage path to delete + """ + pass + + @abstractmethod + async def get_download_url(self, storage_path: str, expires_in: int = 3600) -> str: + """ + Get URL for downloading the file. + + Args: + storage_path: The storage path + expires_in: URL expiration time in seconds (default 1 hour) + + Returns: + Download URL (signed URL for GCS, direct API path for local) + """ + pass + + +class GCSWorkspaceStorage(WorkspaceStorageBackend): + """Google Cloud Storage implementation for workspace storage.""" + + def __init__(self, bucket_name: str): + self.bucket_name = bucket_name + self._async_client: Optional[async_gcs_storage.Storage] = None + self._sync_client: Optional[gcs_storage.Client] = None + self._session: Optional[aiohttp.ClientSession] = None + + async def _get_async_client(self) -> async_gcs_storage.Storage: + """Get or create async GCS client.""" + if self._async_client is None: + self._session = aiohttp.ClientSession( + connector=aiohttp.TCPConnector(limit=100, force_close=False) + ) + self._async_client = async_gcs_storage.Storage(session=self._session) + return self._async_client + + def _get_sync_client(self) -> gcs_storage.Client: + """Get or create sync GCS client (for signed URLs).""" + if self._sync_client is None: + self._sync_client = gcs_storage.Client() + return self._sync_client + + async def close(self) -> None: + """Close all client connections.""" + if self._async_client is not None: + try: + await self._async_client.close() + except Exception as e: + logger.warning(f"Error closing GCS client: {e}") + self._async_client = None + + if self._session is not None: + try: + await self._session.close() + except Exception as e: + logger.warning(f"Error closing session: {e}") + self._session = None + + def _build_blob_name(self, workspace_id: str, file_id: str, filename: str) -> str: + """Build the blob path for workspace files.""" + return f"workspaces/{workspace_id}/{file_id}/{filename}" + + async def store( + self, + workspace_id: str, + file_id: str, + filename: str, + content: bytes, + ) -> str: + """Store file in GCS.""" + client = await self._get_async_client() + blob_name = self._build_blob_name(workspace_id, file_id, filename) + + # Upload with metadata + upload_time = datetime.now(timezone.utc) + await client.upload( + self.bucket_name, + blob_name, + content, + metadata={ + "uploaded_at": upload_time.isoformat(), + "workspace_id": workspace_id, + "file_id": file_id, + }, + ) + + return f"gcs://{self.bucket_name}/{blob_name}" + + async def retrieve(self, storage_path: str) -> bytes: + """Retrieve file from GCS.""" + bucket_name, blob_name = parse_gcs_path(storage_path) + return await download_with_fresh_session(bucket_name, blob_name) + + async def delete(self, storage_path: str) -> None: + """Delete file from GCS.""" + bucket_name, blob_name = parse_gcs_path(storage_path) + client = await self._get_async_client() + + try: + await client.delete(bucket_name, blob_name) + except Exception as e: + if "404" not in str(e) and "Not Found" not in str(e): + raise + # File already deleted, that's fine + + async def get_download_url(self, storage_path: str, expires_in: int = 3600) -> str: + """ + Generate download URL for GCS file. + + Attempts to generate a signed URL if running with service account credentials. + Falls back to an API proxy endpoint if signed URL generation fails + (e.g., when running locally with user OAuth credentials). + """ + bucket_name, blob_name = parse_gcs_path(storage_path) + + # Extract file_id from blob_name for fallback: workspaces/{workspace_id}/{file_id}/{filename} + blob_parts = blob_name.split("/") + file_id = blob_parts[2] if len(blob_parts) >= 3 else None + + # Try to generate signed URL (requires service account credentials) + try: + sync_client = self._get_sync_client() + return await generate_signed_url( + sync_client, bucket_name, blob_name, expires_in + ) + except AttributeError as e: + # Signed URL generation requires service account with private key. + # When running with user OAuth credentials, fall back to API proxy. + if "private key" in str(e) and file_id: + logger.debug( + "Cannot generate signed URL (no service account credentials), " + "falling back to API proxy endpoint" + ) + return f"/api/workspace/files/{file_id}/download" + raise + + +class LocalWorkspaceStorage(WorkspaceStorageBackend): + """Local filesystem implementation for workspace storage (self-hosted deployments).""" + + def __init__(self, base_dir: Optional[str] = None): + """ + Initialize local storage backend. + + Args: + base_dir: Base directory for workspace storage. + If None, defaults to {app_data}/workspaces + """ + if base_dir: + self.base_dir = Path(base_dir) + else: + self.base_dir = Path(get_data_path()) / "workspaces" + + # Ensure base directory exists + self.base_dir.mkdir(parents=True, exist_ok=True) + + def _build_file_path(self, workspace_id: str, file_id: str, filename: str) -> Path: + """Build the local file path with path traversal protection.""" + # Import here to avoid circular import + # (file.py imports workspace.py which imports workspace_storage.py) + from backend.util.file import sanitize_filename + + # Sanitize filename to prevent path traversal (removes / and \ among others) + safe_filename = sanitize_filename(filename) + file_path = (self.base_dir / workspace_id / file_id / safe_filename).resolve() + + # Verify the resolved path is still under base_dir + if not file_path.is_relative_to(self.base_dir.resolve()): + raise ValueError("Invalid filename: path traversal detected") + + return file_path + + def _parse_storage_path(self, storage_path: str) -> Path: + """Parse local storage path to filesystem path.""" + if storage_path.startswith("local://"): + relative_path = storage_path[8:] # Remove "local://" + else: + relative_path = storage_path + + full_path = (self.base_dir / relative_path).resolve() + + # Security check: ensure path is under base_dir + # Use is_relative_to() for robust path containment check + # (handles case-insensitive filesystems and edge cases) + if not full_path.is_relative_to(self.base_dir.resolve()): + raise ValueError("Invalid storage path: path traversal detected") + + return full_path + + async def store( + self, + workspace_id: str, + file_id: str, + filename: str, + content: bytes, + ) -> str: + """Store file locally.""" + file_path = self._build_file_path(workspace_id, file_id, filename) + + # Create parent directories + file_path.parent.mkdir(parents=True, exist_ok=True) + + # Write file asynchronously + async with aiofiles.open(file_path, "wb") as f: + await f.write(content) + + # Return relative path as storage path + relative_path = file_path.relative_to(self.base_dir) + return f"local://{relative_path}" + + async def retrieve(self, storage_path: str) -> bytes: + """Retrieve file from local storage.""" + file_path = self._parse_storage_path(storage_path) + + if not file_path.exists(): + raise FileNotFoundError(f"File not found: {storage_path}") + + async with aiofiles.open(file_path, "rb") as f: + return await f.read() + + async def delete(self, storage_path: str) -> None: + """Delete file from local storage.""" + file_path = self._parse_storage_path(storage_path) + + if file_path.exists(): + # Remove file + file_path.unlink() + + # Clean up empty parent directories + parent = file_path.parent + while parent != self.base_dir: + try: + if parent.exists() and not any(parent.iterdir()): + parent.rmdir() + else: + break + except OSError: + break + parent = parent.parent + + async def get_download_url(self, storage_path: str, expires_in: int = 3600) -> str: + """ + Get download URL for local file. + + For local storage, this returns an API endpoint path. + The actual serving is handled by the API layer. + """ + # Parse the storage path to get the components + if storage_path.startswith("local://"): + relative_path = storage_path[8:] + else: + relative_path = storage_path + + # Return the API endpoint for downloading + # The file_id is extracted from the path: {workspace_id}/{file_id}/{filename} + parts = relative_path.split("/") + if len(parts) >= 2: + file_id = parts[1] # Second component is file_id + return f"/api/workspace/files/{file_id}/download" + else: + raise ValueError(f"Invalid storage path format: {storage_path}") + + +# Global storage backend instance +_workspace_storage: Optional[WorkspaceStorageBackend] = None +_storage_lock = asyncio.Lock() + + +async def get_workspace_storage() -> WorkspaceStorageBackend: + """ + Get the workspace storage backend instance. + + Uses GCS if media_gcs_bucket_name is configured, otherwise uses local storage. + """ + global _workspace_storage + + if _workspace_storage is None: + async with _storage_lock: + if _workspace_storage is None: + config = Config() + + if config.media_gcs_bucket_name: + logger.info( + f"Using GCS workspace storage: {config.media_gcs_bucket_name}" + ) + _workspace_storage = GCSWorkspaceStorage( + config.media_gcs_bucket_name + ) + else: + storage_dir = ( + config.workspace_storage_dir + if config.workspace_storage_dir + else None + ) + logger.info( + f"Using local workspace storage: {storage_dir or 'default'}" + ) + _workspace_storage = LocalWorkspaceStorage(storage_dir) + + return _workspace_storage + + +async def shutdown_workspace_storage() -> None: + """ + Properly shutdown the global workspace storage backend. + + Closes aiohttp sessions and other resources for GCS backend. + Should be called during application shutdown. + """ + global _workspace_storage + + if _workspace_storage is not None: + async with _storage_lock: + if _workspace_storage is not None: + if isinstance(_workspace_storage, GCSWorkspaceStorage): + await _workspace_storage.close() + _workspace_storage = None + + +def compute_file_checksum(content: bytes) -> str: + """Compute SHA256 checksum of file content.""" + return hashlib.sha256(content).hexdigest() diff --git a/autogpt_platform/backend/migrations/20260127230419_add_user_workspace/migration.sql b/autogpt_platform/backend/migrations/20260127230419_add_user_workspace/migration.sql new file mode 100644 index 0000000000..bb63dccb33 --- /dev/null +++ b/autogpt_platform/backend/migrations/20260127230419_add_user_workspace/migration.sql @@ -0,0 +1,52 @@ +-- CreateEnum +CREATE TYPE "WorkspaceFileSource" AS ENUM ('UPLOAD', 'EXECUTION', 'COPILOT', 'IMPORT'); + +-- CreateTable +CREATE TABLE "UserWorkspace" ( + "id" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + "userId" TEXT NOT NULL, + + CONSTRAINT "UserWorkspace_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "UserWorkspaceFile" ( + "id" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + "workspaceId" TEXT NOT NULL, + "name" TEXT NOT NULL, + "path" TEXT NOT NULL, + "storagePath" TEXT NOT NULL, + "mimeType" TEXT NOT NULL, + "sizeBytes" BIGINT NOT NULL, + "checksum" TEXT, + "isDeleted" BOOLEAN NOT NULL DEFAULT false, + "deletedAt" TIMESTAMP(3), + "source" "WorkspaceFileSource" NOT NULL DEFAULT 'UPLOAD', + "sourceExecId" TEXT, + "sourceSessionId" TEXT, + "metadata" JSONB NOT NULL DEFAULT '{}', + + CONSTRAINT "UserWorkspaceFile_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE UNIQUE INDEX "UserWorkspace_userId_key" ON "UserWorkspace"("userId"); + +-- CreateIndex +CREATE INDEX "UserWorkspace_userId_idx" ON "UserWorkspace"("userId"); + +-- CreateIndex +CREATE INDEX "UserWorkspaceFile_workspaceId_isDeleted_idx" ON "UserWorkspaceFile"("workspaceId", "isDeleted"); + +-- CreateIndex +CREATE UNIQUE INDEX "UserWorkspaceFile_workspaceId_path_key" ON "UserWorkspaceFile"("workspaceId", "path"); + +-- AddForeignKey +ALTER TABLE "UserWorkspace" ADD CONSTRAINT "UserWorkspace_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "UserWorkspaceFile" ADD CONSTRAINT "UserWorkspaceFile_workspaceId_fkey" FOREIGN KEY ("workspaceId") REFERENCES "UserWorkspace"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20260129011611_remove_workspace_file_source/migration.sql b/autogpt_platform/backend/migrations/20260129011611_remove_workspace_file_source/migration.sql new file mode 100644 index 0000000000..2709bc8484 --- /dev/null +++ b/autogpt_platform/backend/migrations/20260129011611_remove_workspace_file_source/migration.sql @@ -0,0 +1,16 @@ +/* + Warnings: + + - You are about to drop the column `source` on the `UserWorkspaceFile` table. All the data in the column will be lost. + - You are about to drop the column `sourceExecId` on the `UserWorkspaceFile` table. All the data in the column will be lost. + - You are about to drop the column `sourceSessionId` on the `UserWorkspaceFile` table. All the data in the column will be lost. + +*/ + +-- AlterTable +ALTER TABLE "UserWorkspaceFile" DROP COLUMN "source", +DROP COLUMN "sourceExecId", +DROP COLUMN "sourceSessionId"; + +-- DropEnum +DROP TYPE "WorkspaceFileSource"; diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index 2c52528e3f..2da898a7ce 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -63,6 +63,7 @@ model User { IntegrationWebhooks IntegrationWebhook[] NotificationBatches UserNotificationBatch[] PendingHumanReviews PendingHumanReview[] + Workspace UserWorkspace? // OAuth Provider relations OAuthApplications OAuthApplication[] @@ -137,6 +138,53 @@ model CoPilotUnderstanding { @@index([userId]) } +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////// USER WORKSPACE TABLES ///////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// + +// User's persistent file storage workspace +model UserWorkspace { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + userId String @unique + User User @relation(fields: [userId], references: [id], onDelete: Cascade) + + Files UserWorkspaceFile[] + + @@index([userId]) +} + +// Individual files in a user's workspace +model UserWorkspaceFile { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + workspaceId String + Workspace UserWorkspace @relation(fields: [workspaceId], references: [id], onDelete: Cascade) + + // File metadata + name String // User-visible filename + path String // Virtual path (e.g., "/documents/report.pdf") + storagePath String // Actual GCS or local storage path + mimeType String + sizeBytes BigInt + checksum String? // SHA256 for integrity + + // File state + isDeleted Boolean @default(false) + deletedAt DateTime? + + metadata Json @default("{}") + + @@unique([workspaceId, path]) + @@index([workspaceId, isDeleted]) +} + model BuilderSearchHistory { id String @id @default(uuid()) createdAt DateTime @default(now()) diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 2a9db1990d..6692c30e72 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -5912,6 +5912,40 @@ } } }, + "/api/workspace/files/{file_id}/download": { + "get": { + "tags": ["workspace"], + "summary": "Download file by ID", + "description": "Download a file by its ID.\n\nReturns the file content directly or redirects to a signed URL for GCS.", + "operationId": "getWorkspaceDownload file by id", + "security": [{ "HTTPBearerJWT": [] }], + "parameters": [ + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { "type": "string", "title": "File Id" } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { "application/json": { "schema": {} } } + }, + "401": { + "$ref": "#/components/responses/HTTP401NotAuthenticatedError" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/HTTPValidationError" } + } + } + } + } + } + }, "/health": { "get": { "tags": ["health"], diff --git a/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts b/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts index 293c406373..442bd77e0f 100644 --- a/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts +++ b/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts @@ -1,5 +1,6 @@ import { ApiError, + getServerAuthToken, makeAuthenticatedFileUpload, makeAuthenticatedRequest, } from "@/lib/autogpt-server-api/helpers"; @@ -15,6 +16,69 @@ function buildBackendUrl(path: string[], queryString: string): string { return `${environment.getAGPTServerBaseUrl()}/${backendPath}${queryString}`; } +/** + * Check if this is a workspace file download request that needs binary response handling. + */ +function isWorkspaceDownloadRequest(path: string[]): boolean { + // Match pattern: api/workspace/files/{id}/download (5 segments) + return ( + path.length == 5 && + path[0] === "api" && + path[1] === "workspace" && + path[2] === "files" && + path[path.length - 1] === "download" + ); +} + +/** + * Handle workspace file download requests with proper binary response streaming. + */ +async function handleWorkspaceDownload( + req: NextRequest, + backendUrl: string, +): Promise { + const token = await getServerAuthToken(); + + const headers: Record = {}; + if (token && token !== "no-token-found") { + headers["Authorization"] = `Bearer ${token}`; + } + + const response = await fetch(backendUrl, { + method: "GET", + headers, + redirect: "follow", // Follow redirects to signed URLs + }); + + if (!response.ok) { + return NextResponse.json( + { error: `Failed to download file: ${response.statusText}` }, + { status: response.status }, + ); + } + + // Get the content type from the backend response + const contentType = + response.headers.get("Content-Type") || "application/octet-stream"; + const contentDisposition = response.headers.get("Content-Disposition"); + + // Stream the response body + const responseHeaders: Record = { + "Content-Type": contentType, + }; + + if (contentDisposition) { + responseHeaders["Content-Disposition"] = contentDisposition; + } + + // Return the binary content + const arrayBuffer = await response.arrayBuffer(); + return new NextResponse(arrayBuffer, { + status: 200, + headers: responseHeaders, + }); +} + async function handleJsonRequest( req: NextRequest, method: string, @@ -180,6 +244,11 @@ async function handler( }; try { + // Handle workspace file downloads separately (binary response) + if (method === "GET" && isWorkspaceDownloadRequest(path)) { + return await handleWorkspaceDownload(req, backendUrl); + } + if (method === "GET" || method === "DELETE") { responseBody = await handleGetDeleteRequest(method, backendUrl, req); } else if (contentType?.includes("application/json")) { diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/MarkdownContent/MarkdownContent.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/MarkdownContent/MarkdownContent.tsx index 51a0794090..3dd5eca692 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/MarkdownContent/MarkdownContent.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/MarkdownContent/MarkdownContent.tsx @@ -1,6 +1,8 @@ "use client"; +import { getGetWorkspaceDownloadFileByIdUrl } from "@/app/api/__generated__/endpoints/workspace/workspace"; import { cn } from "@/lib/utils"; +import { EyeSlash } from "@phosphor-icons/react"; import React from "react"; import ReactMarkdown from "react-markdown"; import remarkGfm from "remark-gfm"; @@ -29,12 +31,88 @@ interface InputProps extends React.InputHTMLAttributes { type?: string; } +/** + * Converts a workspace:// URL to a proxy URL that routes through Next.js to the backend. + * workspace://abc123 -> /api/proxy/api/workspace/files/abc123/download + * + * Uses the generated API URL helper and routes through the Next.js proxy + * which handles authentication and proper backend routing. + */ +/** + * URL transformer for ReactMarkdown. + * Converts workspace:// URLs to proxy URLs that route through Next.js to the backend. + * workspace://abc123 -> /api/proxy/api/workspace/files/abc123/download + * + * This is needed because ReactMarkdown sanitizes URLs and only allows + * http, https, mailto, and tel protocols by default. + */ +function resolveWorkspaceUrl(src: string): string { + if (src.startsWith("workspace://")) { + const fileId = src.replace("workspace://", ""); + // Use the generated API URL helper to get the correct path + const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId); + // Route through the Next.js proxy (same pattern as customMutator for client-side) + return `/api/proxy${apiPath}`; + } + return src; +} + +/** + * Check if the image URL is a workspace file (AI cannot see these yet). + * After URL transformation, workspace files have URLs like /api/proxy/api/workspace/files/... + */ +function isWorkspaceImage(src: string | undefined): boolean { + return src?.includes("/workspace/files/") ?? false; +} + +/** + * Custom image component that shows an indicator when the AI cannot see the image. + * Note: src is already transformed by urlTransform, so workspace:// is now /api/workspace/... + */ +function MarkdownImage(props: Record) { + const src = props.src as string | undefined; + const alt = props.alt as string | undefined; + + const aiCannotSee = isWorkspaceImage(src); + + // If no src, show a placeholder + if (!src) { + return ( + + [Image: {alt || "missing src"}] + + ); + } + + return ( + + {/* eslint-disable-next-line @next/next/no-img-element */} + {alt + {aiCannotSee && ( + + + AI cannot see this image + + )} + + ); +} + export function MarkdownContent({ content, className }: MarkdownContentProps) { return (
{ const isInline = !className?.includes("language-"); @@ -206,6 +284,9 @@ export function MarkdownContent({ content, className }: MarkdownContentProps) { {children} ), + img: ({ src, alt, ...props }) => ( + + ), }} > {content} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts index 400f32936e..e886e1a28c 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts @@ -37,6 +37,87 @@ export function getErrorMessage(result: unknown): string { return "An error occurred"; } +/** + * Check if a value is a workspace file reference. + */ +function isWorkspaceRef(value: unknown): value is string { + return typeof value === "string" && value.startsWith("workspace://"); +} + +/** + * Check if a workspace reference appears to be an image based on common patterns. + * Since workspace refs don't have extensions, we check the context or assume image + * for certain block types. + * + * TODO: Replace keyword matching with MIME type encoded in workspace ref. + * e.g., workspace://abc123#image/png or workspace://abc123#video/mp4 + * This would let frontend render correctly without fragile keyword matching. + */ +function isLikelyImageRef(value: string, outputKey?: string): boolean { + if (!isWorkspaceRef(value)) return false; + + // Check output key name for video-related hints (these are NOT images) + const videoKeywords = ["video", "mp4", "mov", "avi", "webm", "movie", "clip"]; + if (outputKey) { + const lowerKey = outputKey.toLowerCase(); + if (videoKeywords.some((kw) => lowerKey.includes(kw))) { + return false; + } + } + + // Check output key name for image-related hints + const imageKeywords = [ + "image", + "img", + "photo", + "picture", + "thumbnail", + "avatar", + "icon", + "screenshot", + ]; + if (outputKey) { + const lowerKey = outputKey.toLowerCase(); + if (imageKeywords.some((kw) => lowerKey.includes(kw))) { + return true; + } + } + + // Default to treating workspace refs as potential images + // since that's the most common case for generated content + return true; +} + +/** + * Format a single output value, converting workspace refs to markdown images. + */ +function formatOutputValue(value: unknown, outputKey?: string): string { + if (isWorkspaceRef(value) && isLikelyImageRef(value, outputKey)) { + // Format as markdown image + return `![${outputKey || "Generated image"}](${value})`; + } + + if (typeof value === "string") { + // Check for data URIs (images) + if (value.startsWith("data:image/")) { + return `![${outputKey || "Generated image"}](${value})`; + } + return value; + } + + if (Array.isArray(value)) { + return value + .map((item, idx) => formatOutputValue(item, `${outputKey}_${idx}`)) + .join("\n\n"); + } + + if (typeof value === "object" && value !== null) { + return JSON.stringify(value, null, 2); + } + + return String(value); +} + function getToolCompletionPhrase(toolName: string): string { const toolCompletionPhrases: Record = { add_understanding: "Updated your business information", @@ -127,10 +208,26 @@ export function formatToolResponse(result: unknown, toolName: string): string { case "block_output": const blockName = (response.block_name as string) || "Block"; - const outputs = response.outputs as Record | undefined; + const outputs = response.outputs as Record | undefined; if (outputs && Object.keys(outputs).length > 0) { - const outputKeys = Object.keys(outputs); - return `${blockName} executed successfully. Outputs: ${outputKeys.join(", ")}`; + const formattedOutputs: string[] = []; + + for (const [key, values] of Object.entries(outputs)) { + if (!Array.isArray(values) || values.length === 0) continue; + + // Format each value in the output array + for (const value of values) { + const formatted = formatOutputValue(value, key); + if (formatted) { + formattedOutputs.push(formatted); + } + } + } + + if (formattedOutputs.length > 0) { + return `${blockName} executed successfully.\n\n${formattedOutputs.join("\n\n")}`; + } + return `${blockName} executed successfully.`; } return `${blockName} executed successfully.`; diff --git a/docs/integrations/README.md b/docs/integrations/README.md index 192405156c..263d7e6365 100644 --- a/docs/integrations/README.md +++ b/docs/integrations/README.md @@ -53,7 +53,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim | [Block Installation](block-integrations/basic.md#block-installation) | Given a code string, this block allows the verification and installation of a block code into the system | | [Concatenate Lists](block-integrations/basic.md#concatenate-lists) | Concatenates multiple lists into a single list | | [Dictionary Is Empty](block-integrations/basic.md#dictionary-is-empty) | Checks if a dictionary is empty | -| [File Store](block-integrations/basic.md#file-store) | Stores the input file in the temporary directory | +| [File Store](block-integrations/basic.md#file-store) | Downloads and stores a file from a URL, data URI, or local path | | [Find In Dictionary](block-integrations/basic.md#find-in-dictionary) | A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value | | [Find In List](block-integrations/basic.md#find-in-list) | Finds the index of the value in the list | | [Get All Memories](block-integrations/basic.md#get-all-memories) | Retrieve all memories from Mem0 with optional conversation filtering | diff --git a/docs/integrations/block-integrations/basic.md b/docs/integrations/block-integrations/basic.md index f92d19002f..5a73fd5a03 100644 --- a/docs/integrations/block-integrations/basic.md +++ b/docs/integrations/block-integrations/basic.md @@ -709,7 +709,7 @@ This is useful for conditional logic where you need to verify if data was return ## File Store ### What it is -Stores the input file in the temporary directory. +Downloads and stores a file from a URL, data URI, or local path. Use this to fetch images, documents, or other files for processing. In CoPilot: saves to workspace (use list_workspace_files to see it). In graphs: outputs a data URI to pass to other blocks. ### How it works @@ -722,15 +722,15 @@ The block outputs a file path that other blocks can use to access the stored fil | Input | Description | Type | Required | |-------|-------------|------|----------| -| file_in | The file to store in the temporary directory, it can be a URL, data URI, or local path. | str (file) | Yes | -| base_64 | Whether produce an output in base64 format (not recommended, you can pass the string path just fine accross blocks). | bool | No | +| file_in | The file to download and store. Can be a URL (https://...), data URI, or local path. | str (file) | Yes | +| base_64 | Whether to produce output in base64 format (not recommended, you can pass the file reference across blocks). | bool | No | ### Outputs | Output | Description | Type | |--------|-------------|------| | error | Error message if the operation failed | str | -| file_out | The relative path to the stored file in the temporary directory. | str (file) | +| file_out | Reference to the stored file. In CoPilot: workspace:// URI (visible in list_workspace_files). In graphs: data URI for passing to other blocks. | str (file) | ### Possible use case diff --git a/docs/integrations/block-integrations/multimedia.md b/docs/integrations/block-integrations/multimedia.md index e2d11cfbf7..6b8f261346 100644 --- a/docs/integrations/block-integrations/multimedia.md +++ b/docs/integrations/block-integrations/multimedia.md @@ -12,7 +12,7 @@ Block to attach an audio file to a video file using moviepy. This block combines a video file with an audio file using the moviepy library. The audio track is attached to the video, optionally with volume adjustment via the volume parameter (1.0 = original volume). -Input files can be URLs, data URIs, or local paths. The output can be returned as either a file path or base64 data URI. +Input files can be URLs, data URIs, or local paths. The output format is automatically determined: `workspace://` URLs in CoPilot, data URIs in graph executions. ### Inputs @@ -22,7 +22,6 @@ Input files can be URLs, data URIs, or local paths. The output can be returned a | video_in | Video input (URL, data URI, or local path). | str (file) | Yes | | audio_in | Audio input (URL, data URI, or local path). | str (file) | Yes | | volume | Volume scale for the newly attached audio track (1.0 = original). | float | No | -| output_return_type | Return the final output as a relative path or base64 data URI. | "file_path" \| "data_uri" | No | ### Outputs @@ -51,7 +50,7 @@ Block to loop a video to a given duration or number of repeats. This block extends a video by repeating it to reach a target duration or number of loops. Set duration to specify the total length in seconds, or use n_loops to repeat the video a specific number of times. -The looped video is seamlessly concatenated and can be output as a file path or base64 data URI. +The looped video is seamlessly concatenated. The output format is automatically determined: `workspace://` URLs in CoPilot, data URIs in graph executions. ### Inputs @@ -61,7 +60,6 @@ The looped video is seamlessly concatenated and can be output as a file path or | video_in | The input video (can be a URL, data URI, or local path). | str (file) | Yes | | duration | Target duration (in seconds) to loop the video to. If omitted, defaults to no looping. | float | No | | n_loops | Number of times to repeat the video. If omitted, defaults to 1 (no repeat). | int | No | -| output_return_type | How to return the output video. Either a relative path or base64 data URI. | "file_path" \| "data_uri" | No | ### Outputs diff --git a/docs/platform/block-sdk-guide.md b/docs/platform/block-sdk-guide.md index 5b3eda5184..42fd883251 100644 --- a/docs/platform/block-sdk-guide.md +++ b/docs/platform/block-sdk-guide.md @@ -277,6 +277,50 @@ async def run( token = credentials.api_key.get_secret_value() ``` +### Handling Files + +When your block works with files (images, videos, documents), use `store_media_file()`: + +```python +from backend.data.execution import ExecutionContext +from backend.util.file import store_media_file +from backend.util.type import MediaFileType + +async def run( + self, + input_data: Input, + *, + execution_context: ExecutionContext, + **kwargs, +): + # PROCESSING: Need local file path for tools like ffmpeg, MoviePy, PIL + local_path = await store_media_file( + file=input_data.video, + execution_context=execution_context, + return_format="for_local_processing", + ) + + # EXTERNAL API: Need base64 content for APIs like Replicate, OpenAI + image_b64 = await store_media_file( + file=input_data.image, + execution_context=execution_context, + return_format="for_external_api", + ) + + # OUTPUT: Return to user/next block (auto-adapts to context) + result = await store_media_file( + file=generated_url, + execution_context=execution_context, + return_format="for_block_output", # workspace:// in CoPilot, data URI in graphs + ) + yield "image_url", result +``` + +**Return format options:** +- `"for_local_processing"` - Local file path for processing tools +- `"for_external_api"` - Data URI for external APIs needing base64 +- `"for_block_output"` - **Always use for outputs** - automatically picks best format + ## Testing Your Block ```bash diff --git a/docs/platform/new_blocks.md b/docs/platform/new_blocks.md index d9d329ff51..114ff8d9a4 100644 --- a/docs/platform/new_blocks.md +++ b/docs/platform/new_blocks.md @@ -111,6 +111,71 @@ Follow these steps to create and test a new block: - `graph_exec_id`: The ID of the execution of the agent. This changes every time the agent has a new "run" - `node_exec_id`: The ID of the execution of the node. This changes every time the node is executed - `node_id`: The ID of the node that is being executed. It changes every version of the graph, but not every time the node is executed. + - `execution_context`: An `ExecutionContext` object containing user_id, graph_exec_id, workspace_id, and session_id. Required for file handling. + +### Handling Files in Blocks + +When your block needs to work with files (images, videos, documents), use `store_media_file()` from `backend.util.file`. This function handles downloading, validation, virus scanning, and storage. + +**Import:** +```python +from backend.data.execution import ExecutionContext +from backend.util.file import store_media_file +from backend.util.type import MediaFileType +``` + +**The `return_format` parameter determines what you get back:** + +| Format | Use When | Returns | +|--------|----------|---------| +| `"for_local_processing"` | Processing with local tools (ffmpeg, MoviePy, PIL) | Local file path (e.g., `"image.png"`) | +| `"for_external_api"` | Sending content to external APIs (Replicate, OpenAI) | Data URI (e.g., `"data:image/png;base64,..."`) | +| `"for_block_output"` | Returning output from your block | Smart: `workspace://` in CoPilot, data URI in graphs | + +**Examples:** + +```python +async def run( + self, + input_data: Input, + *, + execution_context: ExecutionContext, + **kwargs, +) -> BlockOutput: + # PROCESSING: Need to work with file locally (ffmpeg, MoviePy, PIL) + local_path = await store_media_file( + file=input_data.video, + execution_context=execution_context, + return_format="for_local_processing", + ) + # local_path = "video.mp4" - use with Path, ffmpeg, subprocess, etc. + full_path = get_exec_file_path(execution_context.graph_exec_id, local_path) + + # EXTERNAL API: Need to send content to an API like Replicate + image_b64 = await store_media_file( + file=input_data.image, + execution_context=execution_context, + return_format="for_external_api", + ) + # image_b64 = "data:image/png;base64,iVBORw0..." - send to external API + + # OUTPUT: Returning result from block to user/next block + result_url = await store_media_file( + file=generated_image_url, + execution_context=execution_context, + return_format="for_block_output", + ) + yield "image_url", result_url + # In CoPilot: result_url = "workspace://abc123" (persistent, context-efficient) + # In graphs: result_url = "data:image/png;base64,..." (for next block/display) +``` + +**Key points:** + +- `for_block_output` is the **only** format that auto-adapts to execution context +- Always use `for_block_output` for block outputs unless you have a specific reason not to +- Never manually check for `workspace_id` - let `for_block_output` handle the logic +- The function handles URLs, data URIs, `workspace://` references, and local paths as input ### Field Types From b94c83aacc9093ce480aa270971ac5baa177f311 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Thu, 29 Jan 2026 17:46:36 +0700 Subject: [PATCH 13/25] feat(frontend): Copilot speech to text via Whisper model (#11871) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes 🏗️ https://github.com/user-attachments/assets/d9c12ac0-625c-4b38-8834-e494b5eda9c0 Add a "speech to text" feature in the Chat input fox of Copilot, similar as what you have in ChatGPT. ## Checklist 📋 ### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run locally and try the speech to text feature as part of the chat input box ### For configuration changes: We need to add `OPENAI_API_KEY=` to Vercel ( used in the Front-end ) both in Dev and Prod. - [x] `.env.default` is updated or already compatible with my changes --------- Co-authored-by: Claude Opus 4.5 --- AGENTS.md | 24 +- autogpt_platform/CLAUDE.md | 16 +- autogpt_platform/frontend/.env.default | 3 + .../SessionsList/useSessionsPagination.ts | 4 +- .../frontend/src/app/api/transcribe/route.ts | 77 ++++++ .../Chat/components/ChatInput/ChatInput.tsx | 157 +++++++++--- .../ChatInput/components/AudioWaveform.tsx | 142 +++++++++++ .../components/RecordingIndicator.tsx | 26 ++ .../Chat/components/ChatInput/helpers.ts | 6 + .../Chat/components/ChatInput/useChatInput.ts | 4 +- .../components/ChatInput/useVoiceRecording.ts | 240 ++++++++++++++++++ 11 files changed, 626 insertions(+), 73 deletions(-) create mode 100644 autogpt_platform/frontend/src/app/api/transcribe/route.ts create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/components/AudioWaveform.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/components/RecordingIndicator.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/helpers.ts create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useVoiceRecording.ts diff --git a/AGENTS.md b/AGENTS.md index cd176f8a2d..202c4c6e02 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -16,7 +16,6 @@ See `docs/content/platform/getting-started.md` for setup instructions. - Format Python code with `poetry run format`. - Format frontend code using `pnpm format`. - ## Frontend guidelines: See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: @@ -33,14 +32,17 @@ See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: 4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only 5. **Testing**: Add Storybook stories for new components, Playwright for E2E 6. **Code conventions**: Function declarations (not arrow functions) for components/handlers + - Component props should be `interface Props { ... }` (not exported) unless the interface needs to be used outside the component - Separate render logic from business logic (component.tsx + useComponent.ts + helpers.ts) - Colocate state when possible and avoid creating large components, use sub-components ( local `/components` folder next to the parent component ) when sensible - Avoid large hooks, abstract logic into `helpers.ts` files when sensible - Use function declarations for components, arrow functions only for callbacks - No barrel files or `index.ts` re-exports -- Do not use `useCallback` or `useMemo` unless strictly needed - Avoid comments at all times unless the code is very complex +- Do not use `useCallback` or `useMemo` unless asked to optimise a given function +- Do not type hook returns, let Typescript infer as much as possible +- Never type with `any`, if not types available use `unknown` ## Testing @@ -49,22 +51,8 @@ See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: Always run the relevant linters and tests before committing. Use conventional commit messages for all commits (e.g. `feat(backend): add API`). - Types: - - feat - - fix - - refactor - - ci - - dx (developer experience) - Scopes: - - platform - - platform/library - - platform/marketplace - - backend - - backend/executor - - frontend - - frontend/library - - frontend/marketplace - - blocks +Types: - feat - fix - refactor - ci - dx (developer experience) +Scopes: - platform - platform/library - platform/marketplace - backend - backend/executor - frontend - frontend/library - frontend/marketplace - blocks ## Pull requests diff --git a/autogpt_platform/CLAUDE.md b/autogpt_platform/CLAUDE.md index 9690178587..a5a588b667 100644 --- a/autogpt_platform/CLAUDE.md +++ b/autogpt_platform/CLAUDE.md @@ -85,17 +85,6 @@ pnpm format pnpm types ``` -**📖 Complete Guide**: See `/frontend/CONTRIBUTING.md` and `/frontend/.cursorrules` for comprehensive frontend patterns. - -**Key Frontend Conventions:** - -- Separate render logic from data/behavior in components -- Use generated API hooks from `@/app/api/__generated__/endpoints/` -- Use function declarations (not arrow functions) for components/handlers -- Use design system components from `src/components/` (atoms, molecules, organisms) -- Only use Phosphor Icons -- Never use `src/components/__legacy__/*` or deprecated `BackendAPI` - ## Architecture Overview ### Backend Architecture @@ -261,14 +250,17 @@ See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: 4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only 5. **Testing**: Add Storybook stories for new components, Playwright for E2E 6. **Code conventions**: Function declarations (not arrow functions) for components/handlers + - Component props should be `interface Props { ... }` (not exported) unless the interface needs to be used outside the component - Separate render logic from business logic (component.tsx + useComponent.ts + helpers.ts) - Colocate state when possible and avoid creating large components, use sub-components ( local `/components` folder next to the parent component ) when sensible - Avoid large hooks, abstract logic into `helpers.ts` files when sensible - Use function declarations for components, arrow functions only for callbacks - No barrel files or `index.ts` re-exports -- Do not use `useCallback` or `useMemo` unless strictly needed +- Do not use `useCallback` or `useMemo` unless asked to optimise a given function - Avoid comments at all times unless the code is very complex +- Do not type hook returns, let Typescript infer as much as possible +- Never type with `any`, if not types available use `unknown` ### Security Implementation diff --git a/autogpt_platform/frontend/.env.default b/autogpt_platform/frontend/.env.default index af250fb8bf..7a9d81e39e 100644 --- a/autogpt_platform/frontend/.env.default +++ b/autogpt_platform/frontend/.env.default @@ -34,3 +34,6 @@ NEXT_PUBLIC_PREVIEW_STEALING_DEV= # PostHog Analytics NEXT_PUBLIC_POSTHOG_KEY= NEXT_PUBLIC_POSTHOG_HOST=https://eu.i.posthog.com + +# OpenAI (for voice transcription) +OPENAI_API_KEY= diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts index 11ddd937af..61e3e6f37f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts @@ -73,9 +73,9 @@ export function useSessionsPagination({ enabled }: UseSessionsPaginationArgs) { }; const reset = () => { + // Only reset the offset - keep existing sessions visible during refetch + // The effect will replace sessions when new data arrives at offset 0 setOffset(0); - setAccumulatedSessions([]); - setTotalCount(null); }; return { diff --git a/autogpt_platform/frontend/src/app/api/transcribe/route.ts b/autogpt_platform/frontend/src/app/api/transcribe/route.ts new file mode 100644 index 0000000000..10c182cdfa --- /dev/null +++ b/autogpt_platform/frontend/src/app/api/transcribe/route.ts @@ -0,0 +1,77 @@ +import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers"; +import { NextRequest, NextResponse } from "next/server"; + +const WHISPER_API_URL = "https://api.openai.com/v1/audio/transcriptions"; +const MAX_FILE_SIZE = 25 * 1024 * 1024; // 25MB - Whisper's limit + +function getExtensionFromMimeType(mimeType: string): string { + const subtype = mimeType.split("/")[1]?.split(";")[0]; + return subtype || "webm"; +} + +export async function POST(request: NextRequest) { + const token = await getServerAuthToken(); + + if (!token || token === "no-token-found") { + return NextResponse.json({ error: "Unauthorized" }, { status: 401 }); + } + + const apiKey = process.env.OPENAI_API_KEY; + + if (!apiKey) { + return NextResponse.json( + { error: "OpenAI API key not configured" }, + { status: 401 }, + ); + } + + try { + const formData = await request.formData(); + const audioFile = formData.get("audio"); + + if (!audioFile || !(audioFile instanceof Blob)) { + return NextResponse.json( + { error: "No audio file provided" }, + { status: 400 }, + ); + } + + if (audioFile.size > MAX_FILE_SIZE) { + return NextResponse.json( + { error: "File too large. Maximum size is 25MB." }, + { status: 413 }, + ); + } + + const ext = getExtensionFromMimeType(audioFile.type); + const whisperFormData = new FormData(); + whisperFormData.append("file", audioFile, `recording.${ext}`); + whisperFormData.append("model", "whisper-1"); + + const response = await fetch(WHISPER_API_URL, { + method: "POST", + headers: { + Authorization: `Bearer ${apiKey}`, + }, + body: whisperFormData, + }); + + if (!response.ok) { + const errorData = await response.json().catch(() => ({})); + console.error("Whisper API error:", errorData); + return NextResponse.json( + { error: errorData.error?.message || "Transcription failed" }, + { status: response.status }, + ); + } + + const result = await response.json(); + return NextResponse.json({ text: result.text }); + } catch (error) { + console.error("Transcription error:", error); + return NextResponse.json( + { error: "Failed to process audio" }, + { status: 500 }, + ); + } +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx index c45e8dc250..521f6f6320 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx @@ -1,7 +1,14 @@ import { Button } from "@/components/atoms/Button/Button"; import { cn } from "@/lib/utils"; -import { ArrowUpIcon, StopIcon } from "@phosphor-icons/react"; +import { + ArrowUpIcon, + CircleNotchIcon, + MicrophoneIcon, + StopIcon, +} from "@phosphor-icons/react"; +import { RecordingIndicator } from "./components/RecordingIndicator"; import { useChatInput } from "./useChatInput"; +import { useVoiceRecording } from "./useVoiceRecording"; export interface Props { onSend: (message: string) => void; @@ -21,13 +28,36 @@ export function ChatInput({ className, }: Props) { const inputId = "chat-input"; - const { value, handleKeyDown, handleSubmit, handleChange, hasMultipleLines } = - useChatInput({ - onSend, - disabled: disabled || isStreaming, - maxRows: 4, - inputId, - }); + const { + value, + setValue, + handleKeyDown: baseHandleKeyDown, + handleSubmit, + handleChange, + hasMultipleLines, + } = useChatInput({ + onSend, + disabled: disabled || isStreaming, + maxRows: 4, + inputId, + }); + + const { + isRecording, + isTranscribing, + elapsedTime, + toggleRecording, + handleKeyDown, + showMicButton, + isInputDisabled, + audioStream, + } = useVoiceRecording({ + setValue, + disabled: disabled || isStreaming, + isStreaming, + value, + baseHandleKeyDown, + }); return (
@@ -35,8 +65,11 @@ export function ChatInput({
@@ -46,48 +79,94 @@ export function ChatInput({ value={value} onChange={handleChange} onKeyDown={handleKeyDown} - placeholder={placeholder} - disabled={disabled || isStreaming} + placeholder={ + isTranscribing + ? "Transcribing..." + : isRecording + ? "" + : placeholder + } + disabled={isInputDisabled} rows={1} className={cn( "w-full resize-none overflow-y-auto border-0 bg-transparent text-[1rem] leading-6 text-black", "placeholder:text-zinc-400", "focus:outline-none focus:ring-0", "disabled:text-zinc-500", - hasMultipleLines ? "pb-6 pl-4 pr-4 pt-2" : "pb-4 pl-4 pr-14 pt-4", + hasMultipleLines + ? "pb-6 pl-4 pr-4 pt-2" + : showMicButton + ? "pb-4 pl-14 pr-14 pt-4" + : "pb-4 pl-4 pr-14 pt-4", )} /> + {isRecording && !value && ( +
+ +
+ )}
- Press Enter to send, Shift+Enter for new line + Press Enter to send, Shift+Enter for new line, Space to record voice - {isStreaming ? ( - - ) : ( - + {showMicButton && ( +
+ +
)} + +
+ {isStreaming ? ( + + ) : ( + + )} +
); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/components/AudioWaveform.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/components/AudioWaveform.tsx new file mode 100644 index 0000000000..10cbb3fc9f --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/components/AudioWaveform.tsx @@ -0,0 +1,142 @@ +"use client"; + +import { useEffect, useRef, useState } from "react"; + +interface Props { + stream: MediaStream | null; + barCount?: number; + barWidth?: number; + barGap?: number; + barColor?: string; + minBarHeight?: number; + maxBarHeight?: number; +} + +export function AudioWaveform({ + stream, + barCount = 24, + barWidth = 3, + barGap = 2, + barColor = "#ef4444", // red-500 + minBarHeight = 4, + maxBarHeight = 32, +}: Props) { + const [bars, setBars] = useState(() => + Array(barCount).fill(minBarHeight), + ); + const analyserRef = useRef(null); + const audioContextRef = useRef(null); + const sourceRef = useRef(null); + const animationRef = useRef(null); + + useEffect(() => { + if (!stream) { + setBars(Array(barCount).fill(minBarHeight)); + return; + } + + // Create audio context and analyser + const audioContext = new AudioContext(); + const analyser = audioContext.createAnalyser(); + analyser.fftSize = 512; + analyser.smoothingTimeConstant = 0.8; + + // Connect the stream to the analyser + const source = audioContext.createMediaStreamSource(stream); + source.connect(analyser); + + audioContextRef.current = audioContext; + analyserRef.current = analyser; + sourceRef.current = source; + + const timeData = new Uint8Array(analyser.frequencyBinCount); + + const updateBars = () => { + if (!analyserRef.current) return; + + analyserRef.current.getByteTimeDomainData(timeData); + + // Distribute time-domain data across bars + // This shows waveform amplitude, making all bars respond to audio + const newBars: number[] = []; + const samplesPerBar = timeData.length / barCount; + + for (let i = 0; i < barCount; i++) { + // Sample waveform data for this bar + let maxAmplitude = 0; + const startIdx = Math.floor(i * samplesPerBar); + const endIdx = Math.floor((i + 1) * samplesPerBar); + + for (let j = startIdx; j < endIdx && j < timeData.length; j++) { + // Convert to amplitude (distance from center 128) + const amplitude = Math.abs(timeData[j] - 128); + maxAmplitude = Math.max(maxAmplitude, amplitude); + } + + // Map amplitude (0-128) to bar height + const normalized = (maxAmplitude / 128) * 255; + const height = + minBarHeight + (normalized / 255) * (maxBarHeight - minBarHeight); + newBars.push(height); + } + + setBars(newBars); + animationRef.current = requestAnimationFrame(updateBars); + }; + + updateBars(); + + return () => { + if (animationRef.current) { + cancelAnimationFrame(animationRef.current); + } + if (sourceRef.current) { + sourceRef.current.disconnect(); + } + if (audioContextRef.current) { + audioContextRef.current.close(); + } + analyserRef.current = null; + audioContextRef.current = null; + sourceRef.current = null; + }; + }, [stream, barCount, minBarHeight, maxBarHeight]); + + const totalWidth = barCount * barWidth + (barCount - 1) * barGap; + + return ( +
+ {bars.map((height, i) => { + const barHeight = Math.max(minBarHeight, height); + return ( +
+
+
+ ); + })} +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/components/RecordingIndicator.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/components/RecordingIndicator.tsx new file mode 100644 index 0000000000..0be0d069bb --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/components/RecordingIndicator.tsx @@ -0,0 +1,26 @@ +import { formatElapsedTime } from "../helpers"; +import { AudioWaveform } from "./AudioWaveform"; + +type Props = { + elapsedTime: number; + audioStream: MediaStream | null; +}; + +export function RecordingIndicator({ elapsedTime, audioStream }: Props) { + return ( +
+ + + {formatElapsedTime(elapsedTime)} + +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/helpers.ts new file mode 100644 index 0000000000..26bae8c9d9 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/helpers.ts @@ -0,0 +1,6 @@ +export function formatElapsedTime(ms: number): string { + const seconds = Math.floor(ms / 1000); + const minutes = Math.floor(seconds / 60); + const remainingSeconds = seconds % 60; + return `${minutes}:${remainingSeconds.toString().padStart(2, "0")}`; +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts index 6fa8e7252b..a053e6080f 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts @@ -6,7 +6,7 @@ import { useState, } from "react"; -interface UseChatInputArgs { +interface Args { onSend: (message: string) => void; disabled?: boolean; maxRows?: number; @@ -18,7 +18,7 @@ export function useChatInput({ disabled = false, maxRows = 5, inputId = "chat-input", -}: UseChatInputArgs) { +}: Args) { const [value, setValue] = useState(""); const [hasMultipleLines, setHasMultipleLines] = useState(false); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useVoiceRecording.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useVoiceRecording.ts new file mode 100644 index 0000000000..13b625e69c --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useVoiceRecording.ts @@ -0,0 +1,240 @@ +import { useToast } from "@/components/molecules/Toast/use-toast"; +import React, { + KeyboardEvent, + useCallback, + useEffect, + useRef, + useState, +} from "react"; + +const MAX_RECORDING_DURATION = 2 * 60 * 1000; // 2 minutes in ms + +interface Args { + setValue: React.Dispatch>; + disabled?: boolean; + isStreaming?: boolean; + value: string; + baseHandleKeyDown: (event: KeyboardEvent) => void; +} + +export function useVoiceRecording({ + setValue, + disabled = false, + isStreaming = false, + value, + baseHandleKeyDown, +}: Args) { + const [isRecording, setIsRecording] = useState(false); + const [isTranscribing, setIsTranscribing] = useState(false); + const [error, setError] = useState(null); + const [elapsedTime, setElapsedTime] = useState(0); + + const mediaRecorderRef = useRef(null); + const chunksRef = useRef([]); + const timerRef = useRef(null); + const startTimeRef = useRef(0); + const streamRef = useRef(null); + const isRecordingRef = useRef(false); + + const isSupported = + typeof window !== "undefined" && + !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia); + + const clearTimer = useCallback(() => { + if (timerRef.current) { + clearInterval(timerRef.current); + timerRef.current = null; + } + }, []); + + const cleanup = useCallback(() => { + clearTimer(); + if (streamRef.current) { + streamRef.current.getTracks().forEach((track) => track.stop()); + streamRef.current = null; + } + mediaRecorderRef.current = null; + chunksRef.current = []; + setElapsedTime(0); + }, [clearTimer]); + + const handleTranscription = useCallback( + (text: string) => { + setValue((prev) => { + const trimmedPrev = prev.trim(); + if (trimmedPrev) { + return `${trimmedPrev} ${text}`; + } + return text; + }); + }, + [setValue], + ); + + const transcribeAudio = useCallback( + async (audioBlob: Blob) => { + setIsTranscribing(true); + setError(null); + + try { + const formData = new FormData(); + formData.append("audio", audioBlob); + + const response = await fetch("/api/transcribe", { + method: "POST", + body: formData, + }); + + if (!response.ok) { + const data = await response.json().catch(() => ({})); + throw new Error(data.error || "Transcription failed"); + } + + const data = await response.json(); + if (data.text) { + handleTranscription(data.text); + } + } catch (err) { + const message = + err instanceof Error ? err.message : "Transcription failed"; + setError(message); + console.error("Transcription error:", err); + } finally { + setIsTranscribing(false); + } + }, + [handleTranscription], + ); + + const stopRecording = useCallback(() => { + if (mediaRecorderRef.current && isRecordingRef.current) { + mediaRecorderRef.current.stop(); + isRecordingRef.current = false; + setIsRecording(false); + clearTimer(); + } + }, [clearTimer]); + + const startRecording = useCallback(async () => { + if (disabled || isRecordingRef.current || isTranscribing) return; + + setError(null); + chunksRef.current = []; + + try { + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + streamRef.current = stream; + + const mediaRecorder = new MediaRecorder(stream, { + mimeType: MediaRecorder.isTypeSupported("audio/webm") + ? "audio/webm" + : "audio/mp4", + }); + + mediaRecorderRef.current = mediaRecorder; + + mediaRecorder.ondataavailable = (event) => { + if (event.data.size > 0) { + chunksRef.current.push(event.data); + } + }; + + mediaRecorder.onstop = async () => { + const audioBlob = new Blob(chunksRef.current, { + type: mediaRecorder.mimeType, + }); + + // Cleanup stream + if (streamRef.current) { + streamRef.current.getTracks().forEach((track) => track.stop()); + streamRef.current = null; + } + + if (audioBlob.size > 0) { + await transcribeAudio(audioBlob); + } + }; + + mediaRecorder.start(1000); // Collect data every second + isRecordingRef.current = true; + setIsRecording(true); + startTimeRef.current = Date.now(); + + // Start elapsed time timer + timerRef.current = setInterval(() => { + const elapsed = Date.now() - startTimeRef.current; + setElapsedTime(elapsed); + + // Auto-stop at max duration + if (elapsed >= MAX_RECORDING_DURATION) { + stopRecording(); + } + }, 100); + } catch (err) { + console.error("Failed to start recording:", err); + if (err instanceof DOMException && err.name === "NotAllowedError") { + setError("Microphone permission denied"); + } else { + setError("Failed to access microphone"); + } + cleanup(); + } + }, [disabled, isTranscribing, stopRecording, transcribeAudio, cleanup]); + + const toggleRecording = useCallback(() => { + if (isRecording) { + stopRecording(); + } else { + startRecording(); + } + }, [isRecording, startRecording, stopRecording]); + + const { toast } = useToast(); + + useEffect(() => { + if (error) { + toast({ + title: "Voice recording failed", + description: error, + variant: "destructive", + }); + } + }, [error, toast]); + + const handleKeyDown = useCallback( + (event: KeyboardEvent) => { + if (event.key === " " && !value.trim() && !isTranscribing) { + event.preventDefault(); + toggleRecording(); + return; + } + baseHandleKeyDown(event); + }, + [value, isTranscribing, toggleRecording, baseHandleKeyDown], + ); + + const showMicButton = isSupported && !isStreaming; + const isInputDisabled = disabled || isStreaming || isTranscribing; + + // Cleanup on unmount + useEffect(() => { + return () => { + cleanup(); + }; + }, [cleanup]); + + return { + isRecording, + isTranscribing, + error, + elapsedTime, + startRecording, + stopRecording, + toggleRecording, + isSupported, + handleKeyDown, + showMicButton, + isInputDisabled, + audioStream: streamRef.current, + }; +} From 4cd5da678d73a4a9af8876de6cc763ad05d0c719 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 29 Jan 2026 18:33:02 +0100 Subject: [PATCH 14/25] refactor(claude): Split `autogpt_platform/CLAUDE.md` into project-specific files (#11788) Split `autogpt_platform/CLAUDE.md` into project-specific files, to make the scope of the instructions clearer. Also, some minor improvements: - Change references to other Markdown files to @file/path.md syntax that Claude recognizes - Update ambiguous/incorrect/outdated instructions - Remove trailing slashes - Fix broken file path references in other docs (including comments) --- .github/copilot-instructions.md | 6 +- .gitignore | 1 + autogpt_platform/CLAUDE.md | 259 +----------------- autogpt_platform/backend/CLAUDE.md | 170 ++++++++++++ autogpt_platform/backend/TESTING.md | 2 +- .../backend/api/features/builder/routes.py | 2 +- autogpt_platform/frontend/CLAUDE.md | 76 +++++ .../src/lib/autogpt-server-api/types.ts | 10 +- .../contributing/oauth-integration-flow.md | 2 +- docs/platform/ollama.md | 2 +- 10 files changed, 274 insertions(+), 256 deletions(-) create mode 100644 autogpt_platform/backend/CLAUDE.md create mode 100644 autogpt_platform/frontend/CLAUDE.md diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 870e6b4b0a..3c72eaae18 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -160,7 +160,7 @@ pnpm storybook # Start component development server **Backend Entry Points:** -- `backend/backend/server/server.py` - FastAPI application setup +- `backend/backend/api/rest_api.py` - FastAPI application setup - `backend/backend/data/` - Database models and user management - `backend/blocks/` - Agent execution blocks and logic @@ -219,7 +219,7 @@ Agents are built using a visual block-based system where each block performs a s ### API Development -1. Update routes in `/backend/backend/server/routers/` +1. Update routes in `/backend/backend/api/features/` 2. Add/update Pydantic models in same directory 3. Write tests alongside route files 4. For `data/*.py` changes, validate user ID checks @@ -285,7 +285,7 @@ Agents are built using a visual block-based system where each block performs a s ### Security Guidelines -**Cache Protection Middleware** (`/backend/backend/server/middleware/security.py`): +**Cache Protection Middleware** (`/backend/backend/api/middleware/security.py`): - Default: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private` - Uses allow list approach for cacheable paths (static assets, health checks, public pages) diff --git a/.gitignore b/.gitignore index dfce8ba810..1a2291b516 100644 --- a/.gitignore +++ b/.gitignore @@ -178,4 +178,5 @@ autogpt_platform/backend/settings.py *.ign.* .test-contents .claude/settings.local.json +CLAUDE.local.md /autogpt_platform/backend/logs diff --git a/autogpt_platform/CLAUDE.md b/autogpt_platform/CLAUDE.md index a5a588b667..62adbdaefa 100644 --- a/autogpt_platform/CLAUDE.md +++ b/autogpt_platform/CLAUDE.md @@ -6,141 +6,30 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co AutoGPT Platform is a monorepo containing: -- **Backend** (`/backend`): Python FastAPI server with async support -- **Frontend** (`/frontend`): Next.js React application -- **Shared Libraries** (`/autogpt_libs`): Common Python utilities +- **Backend** (`backend`): Python FastAPI server with async support +- **Frontend** (`frontend`): Next.js React application +- **Shared Libraries** (`autogpt_libs`): Common Python utilities -## Essential Commands +## Component Documentation -### Backend Development +- **Backend**: See @backend/CLAUDE.md for backend-specific commands, architecture, and development tasks +- **Frontend**: See @frontend/CLAUDE.md for frontend-specific commands, architecture, and development patterns -```bash -# Install dependencies -cd backend && poetry install - -# Run database migrations -poetry run prisma migrate dev - -# Start all services (database, redis, rabbitmq, clamav) -docker compose up -d - -# Run the backend server -poetry run serve - -# Run tests -poetry run test - -# Run specific test -poetry run pytest path/to/test_file.py::test_function_name - -# Run block tests (tests that validate all blocks work correctly) -poetry run pytest backend/blocks/test/test_block.py -xvs - -# Run tests for a specific block (e.g., GetCurrentTimeBlock) -poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[GetCurrentTimeBlock]' -xvs - -# Lint and format -# prefer format if you want to just "fix" it and only get the errors that can't be autofixed -poetry run format # Black + isort -poetry run lint # ruff -``` - -More details can be found in TESTING.md - -#### Creating/Updating Snapshots - -When you first write a test or when the expected output changes: - -```bash -poetry run pytest path/to/test.py --snapshot-update -``` - -⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected. - -### Frontend Development - -```bash -# Install dependencies -cd frontend && pnpm i - -# Generate API client from OpenAPI spec -pnpm generate:api - -# Start development server -pnpm dev - -# Run E2E tests -pnpm test - -# Run Storybook for component development -pnpm storybook - -# Build production -pnpm build - -# Format and lint -pnpm format - -# Type checking -pnpm types -``` - -## Architecture Overview - -### Backend Architecture - -- **API Layer**: FastAPI with REST and WebSocket endpoints -- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings -- **Queue System**: RabbitMQ for async task processing -- **Execution Engine**: Separate executor service processes agent workflows -- **Authentication**: JWT-based with Supabase integration -- **Security**: Cache protection middleware prevents sensitive data caching in browsers/proxies - -### Frontend Architecture - -- **Framework**: Next.js 15 App Router (client-first approach) -- **Data Fetching**: Type-safe generated API hooks via Orval + React Query -- **State Management**: React Query for server state, co-located UI state in components/hooks -- **Component Structure**: Separate render logic (`.tsx`) from business logic (`use*.ts` hooks) -- **Workflow Builder**: Visual graph editor using @xyflow/react -- **UI Components**: shadcn/ui (Radix UI primitives) with Tailwind CSS styling -- **Icons**: Phosphor Icons only -- **Feature Flags**: LaunchDarkly integration -- **Error Handling**: ErrorCard for render errors, toast for mutations, Sentry for exceptions -- **Testing**: Playwright for E2E, Storybook for component development - -### Key Concepts +## Key Concepts 1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend -2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks +2. **Blocks**: Reusable components in `backend/backend/blocks/` that perform specific tasks 3. **Integrations**: OAuth and API connections stored per user 4. **Store**: Marketplace for sharing agent templates 5. **Virus Scanning**: ClamAV integration for file upload security -### Testing Approach - -- Backend uses pytest with snapshot testing for API responses -- Test files are colocated with source files (`*_test.py`) -- Frontend uses Playwright for E2E tests -- Component testing via Storybook - -### Database Schema - -Key models (defined in `/backend/schema.prisma`): - -- `User`: Authentication and profile data -- `AgentGraph`: Workflow definitions with version control -- `AgentGraphExecution`: Execution history and results -- `AgentNode`: Individual nodes in a workflow -- `StoreListing`: Marketplace listings for sharing agents - ### Environment Configuration #### Configuration Files -- **Backend**: `/backend/.env.default` (defaults) → `/backend/.env` (user overrides) -- **Frontend**: `/frontend/.env.default` (defaults) → `/frontend/.env` (user overrides) -- **Platform**: `/.env.default` (Supabase/shared defaults) → `/.env` (user overrides) +- **Backend**: `backend/.env.default` (defaults) → `backend/.env` (user overrides) +- **Frontend**: `frontend/.env.default` (defaults) → `frontend/.env` (user overrides) +- **Platform**: `.env.default` (Supabase/shared defaults) → `.env` (user overrides) #### Docker Environment Loading Order @@ -156,130 +45,12 @@ Key models (defined in `/backend/schema.prisma`): - Backend/Frontend services use YAML anchors for consistent configuration - Supabase services (`db/docker/docker-compose.yml`) follow the same pattern -### Common Development Tasks - -**Adding a new block:** - -Follow the comprehensive [Block SDK Guide](../../../docs/content/platform/block-sdk-guide.md) which covers: - -- Provider configuration with `ProviderBuilder` -- Block schema definition -- Authentication (API keys, OAuth, webhooks) -- Testing and validation -- File organization - -Quick steps: - -1. Create new file in `/backend/backend/blocks/` -2. Configure provider using `ProviderBuilder` in `_config.py` -3. Inherit from `Block` base class -4. Define input/output schemas using `BlockSchema` -5. Implement async `run` method -6. Generate unique block ID using `uuid.uuid4()` -7. Test with `poetry run pytest backend/blocks/test/test_block.py` - -Note: when making many new blocks analyze the interfaces for each of these blocks and picture if they would go well together in a graph based editor or would they struggle to connect productively? -ex: do the inputs and outputs tie well together? - -If you get any pushback or hit complex block conditions check the new_blocks guide in the docs. - -**Handling files in blocks with `store_media_file()`:** - -When blocks need to work with files (images, videos, documents), use `store_media_file()` from `backend.util.file`. The `return_format` parameter determines what you get back: - -| Format | Use When | Returns | -|--------|----------|---------| -| `"for_local_processing"` | Processing with local tools (ffmpeg, MoviePy, PIL) | Local file path (e.g., `"image.png"`) | -| `"for_external_api"` | Sending content to external APIs (Replicate, OpenAI) | Data URI (e.g., `"data:image/png;base64,..."`) | -| `"for_block_output"` | Returning output from your block | Smart: `workspace://` in CoPilot, data URI in graphs | - -**Examples:** -```python -# INPUT: Need to process file locally with ffmpeg -local_path = await store_media_file( - file=input_data.video, - execution_context=execution_context, - return_format="for_local_processing", -) -# local_path = "video.mp4" - use with Path/ffmpeg/etc - -# INPUT: Need to send to external API like Replicate -image_b64 = await store_media_file( - file=input_data.image, - execution_context=execution_context, - return_format="for_external_api", -) -# image_b64 = "data:image/png;base64,iVBORw0..." - send to API - -# OUTPUT: Returning result from block -result_url = await store_media_file( - file=generated_image_url, - execution_context=execution_context, - return_format="for_block_output", -) -yield "image_url", result_url -# In CoPilot: result_url = "workspace://abc123" -# In graphs: result_url = "data:image/png;base64,..." -``` - -**Key points:** -- `for_block_output` is the ONLY format that auto-adapts to execution context -- Always use `for_block_output` for block outputs unless you have a specific reason not to -- Never hardcode workspace checks - let `for_block_output` handle it - -**Modifying the API:** - -1. Update route in `/backend/backend/server/routers/` -2. Add/update Pydantic models in same directory -3. Write tests alongside the route file -4. Run `poetry run test` to verify - -### Frontend guidelines: - -See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: - -1. **Pages**: Create in `src/app/(platform)/feature-name/page.tsx` - - Add `usePageName.ts` hook for logic - - Put sub-components in local `components/` folder -2. **Components**: Structure as `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts` - - Use design system components from `src/components/` (atoms, molecules, organisms) - - Never use `src/components/__legacy__/*` -3. **Data fetching**: Use generated API hooks from `@/app/api/__generated__/endpoints/` - - Regenerate with `pnpm generate:api` - - Pattern: `use{Method}{Version}{OperationName}` -4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only -5. **Testing**: Add Storybook stories for new components, Playwright for E2E -6. **Code conventions**: Function declarations (not arrow functions) for components/handlers - -- Component props should be `interface Props { ... }` (not exported) unless the interface needs to be used outside the component -- Separate render logic from business logic (component.tsx + useComponent.ts + helpers.ts) -- Colocate state when possible and avoid creating large components, use sub-components ( local `/components` folder next to the parent component ) when sensible -- Avoid large hooks, abstract logic into `helpers.ts` files when sensible -- Use function declarations for components, arrow functions only for callbacks -- No barrel files or `index.ts` re-exports -- Do not use `useCallback` or `useMemo` unless asked to optimise a given function -- Avoid comments at all times unless the code is very complex -- Do not type hook returns, let Typescript infer as much as possible -- Never type with `any`, if not types available use `unknown` - -### Security Implementation - -**Cache Protection Middleware:** - -- Located in `/backend/backend/server/middleware/security.py` -- Default behavior: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private` -- Uses an allow list approach - only explicitly permitted paths can be cached -- Cacheable paths include: static assets (`/static/*`, `/_next/static/*`), health checks, public store pages, documentation -- Prevents sensitive data (auth tokens, API keys, user data) from being cached by browsers/proxies -- To allow caching for a new endpoint, add it to `CACHEABLE_PATHS` in the middleware -- Applied to both main API server and external API applications - ### Creating Pull Requests -- Create the PR aginst the `dev` branch of the repository. -- Ensure the branch name is descriptive (e.g., `feature/add-new-block`)/ -- Use conventional commit messages (see below)/ -- Fill out the .github/PULL_REQUEST_TEMPLATE.md template as the PR description/ +- Create the PR against the `dev` branch of the repository. +- Ensure the branch name is descriptive (e.g., `feature/add-new-block`) +- Use conventional commit messages (see below) +- Fill out the .github/PULL_REQUEST_TEMPLATE.md template as the PR description - Run the github pre-commit hooks to ensure code quality. ### Reviewing/Revising Pull Requests diff --git a/autogpt_platform/backend/CLAUDE.md b/autogpt_platform/backend/CLAUDE.md new file mode 100644 index 0000000000..53d52bb4d3 --- /dev/null +++ b/autogpt_platform/backend/CLAUDE.md @@ -0,0 +1,170 @@ +# CLAUDE.md - Backend + +This file provides guidance to Claude Code when working with the backend. + +## Essential Commands + +To run something with Python package dependencies you MUST use `poetry run ...`. + +```bash +# Install dependencies +poetry install + +# Run database migrations +poetry run prisma migrate dev + +# Start all services (database, redis, rabbitmq, clamav) +docker compose up -d + +# Run the backend as a whole +poetry run app + +# Run tests +poetry run test + +# Run specific test +poetry run pytest path/to/test_file.py::test_function_name + +# Run block tests (tests that validate all blocks work correctly) +poetry run pytest backend/blocks/test/test_block.py -xvs + +# Run tests for a specific block (e.g., GetCurrentTimeBlock) +poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[GetCurrentTimeBlock]' -xvs + +# Lint and format +# prefer format if you want to just "fix" it and only get the errors that can't be autofixed +poetry run format # Black + isort +poetry run lint # ruff +``` + +More details can be found in @TESTING.md + +### Creating/Updating Snapshots + +When you first write a test or when the expected output changes: + +```bash +poetry run pytest path/to/test.py --snapshot-update +``` + +⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected. + +## Architecture + +- **API Layer**: FastAPI with REST and WebSocket endpoints +- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings +- **Queue System**: RabbitMQ for async task processing +- **Execution Engine**: Separate executor service processes agent workflows +- **Authentication**: JWT-based with Supabase integration +- **Security**: Cache protection middleware prevents sensitive data caching in browsers/proxies + +## Testing Approach + +- Uses pytest with snapshot testing for API responses +- Test files are colocated with source files (`*_test.py`) + +## Database Schema + +Key models (defined in `schema.prisma`): + +- `User`: Authentication and profile data +- `AgentGraph`: Workflow definitions with version control +- `AgentGraphExecution`: Execution history and results +- `AgentNode`: Individual nodes in a workflow +- `StoreListing`: Marketplace listings for sharing agents + +## Environment Configuration + +- **Backend**: `.env.default` (defaults) → `.env` (user overrides) + +## Common Development Tasks + +### Adding a new block + +Follow the comprehensive [Block SDK Guide](@../../docs/content/platform/block-sdk-guide.md) which covers: + +- Provider configuration with `ProviderBuilder` +- Block schema definition +- Authentication (API keys, OAuth, webhooks) +- Testing and validation +- File organization + +Quick steps: + +1. Create new file in `backend/blocks/` +2. Configure provider using `ProviderBuilder` in `_config.py` +3. Inherit from `Block` base class +4. Define input/output schemas using `BlockSchema` +5. Implement async `run` method +6. Generate unique block ID using `uuid.uuid4()` +7. Test with `poetry run pytest backend/blocks/test/test_block.py` + +Note: when making many new blocks analyze the interfaces for each of these blocks and picture if they would go well together in a graph-based editor or would they struggle to connect productively? +ex: do the inputs and outputs tie well together? + +If you get any pushback or hit complex block conditions check the new_blocks guide in the docs. + +#### Handling files in blocks with `store_media_file()` + +When blocks need to work with files (images, videos, documents), use `store_media_file()` from `backend.util.file`. The `return_format` parameter determines what you get back: + +| Format | Use When | Returns | +|--------|----------|---------| +| `"for_local_processing"` | Processing with local tools (ffmpeg, MoviePy, PIL) | Local file path (e.g., `"image.png"`) | +| `"for_external_api"` | Sending content to external APIs (Replicate, OpenAI) | Data URI (e.g., `"data:image/png;base64,..."`) | +| `"for_block_output"` | Returning output from your block | Smart: `workspace://` in CoPilot, data URI in graphs | + +**Examples:** + +```python +# INPUT: Need to process file locally with ffmpeg +local_path = await store_media_file( + file=input_data.video, + execution_context=execution_context, + return_format="for_local_processing", +) +# local_path = "video.mp4" - use with Path/ffmpeg/etc + +# INPUT: Need to send to external API like Replicate +image_b64 = await store_media_file( + file=input_data.image, + execution_context=execution_context, + return_format="for_external_api", +) +# image_b64 = "data:image/png;base64,iVBORw0..." - send to API + +# OUTPUT: Returning result from block +result_url = await store_media_file( + file=generated_image_url, + execution_context=execution_context, + return_format="for_block_output", +) +yield "image_url", result_url +# In CoPilot: result_url = "workspace://abc123" +# In graphs: result_url = "data:image/png;base64,..." +``` + +**Key points:** + +- `for_block_output` is the ONLY format that auto-adapts to execution context +- Always use `for_block_output` for block outputs unless you have a specific reason not to +- Never hardcode workspace checks - let `for_block_output` handle it + +### Modifying the API + +1. Update route in `backend/api/features/` +2. Add/update Pydantic models in same directory +3. Write tests alongside the route file +4. Run `poetry run test` to verify + +## Security Implementation + +### Cache Protection Middleware + +- Located in `backend/api/middleware/security.py` +- Default behavior: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private` +- Uses an allow list approach - only explicitly permitted paths can be cached +- Cacheable paths include: static assets (`static/*`, `_next/static/*`), health checks, public store pages, documentation +- Prevents sensitive data (auth tokens, API keys, user data) from being cached by browsers/proxies +- To allow caching for a new endpoint, add it to `CACHEABLE_PATHS` in the middleware +- Applied to both main API server and external API applications diff --git a/autogpt_platform/backend/TESTING.md b/autogpt_platform/backend/TESTING.md index a3a5db68ef..2e09144485 100644 --- a/autogpt_platform/backend/TESTING.md +++ b/autogpt_platform/backend/TESTING.md @@ -138,7 +138,7 @@ If the test doesn't need the `user_id` specifically, mocking is not necessary as #### Using Global Auth Fixtures -Two global auth fixtures are provided by `backend/server/conftest.py`: +Two global auth fixtures are provided by `backend/api/conftest.py`: - `mock_jwt_user` - Regular user with `test_user_id` ("test-user-id") - `mock_jwt_admin` - Admin user with `admin_user_id` ("admin-user-id") diff --git a/autogpt_platform/backend/backend/api/features/builder/routes.py b/autogpt_platform/backend/backend/api/features/builder/routes.py index 7fe9cab189..15b922178d 100644 --- a/autogpt_platform/backend/backend/api/features/builder/routes.py +++ b/autogpt_platform/backend/backend/api/features/builder/routes.py @@ -17,7 +17,7 @@ router = fastapi.APIRouter( ) -# Taken from backend/server/v2/store/db.py +# Taken from backend/api/features/store/db.py def sanitize_query(query: str | None) -> str | None: if query is None: return query diff --git a/autogpt_platform/frontend/CLAUDE.md b/autogpt_platform/frontend/CLAUDE.md new file mode 100644 index 0000000000..b58f1ad6aa --- /dev/null +++ b/autogpt_platform/frontend/CLAUDE.md @@ -0,0 +1,76 @@ +# CLAUDE.md - Frontend + +This file provides guidance to Claude Code when working with the frontend. + +## Essential Commands + +```bash +# Install dependencies +pnpm i + +# Generate API client from OpenAPI spec +pnpm generate:api + +# Start development server +pnpm dev + +# Run E2E tests +pnpm test + +# Run Storybook for component development +pnpm storybook + +# Build production +pnpm build + +# Format and lint +pnpm format + +# Type checking +pnpm types +``` + +### Code Style + +- Fully capitalize acronyms in symbols, e.g. `graphID`, `useBackendAPI` +- Use function declarations (not arrow functions) for components/handlers + +## Architecture + +- **Framework**: Next.js 15 App Router (client-first approach) +- **Data Fetching**: Type-safe generated API hooks via Orval + React Query +- **State Management**: React Query for server state, co-located UI state in components/hooks +- **Component Structure**: Separate render logic (`.tsx`) from business logic (`use*.ts` hooks) +- **Workflow Builder**: Visual graph editor using @xyflow/react +- **UI Components**: shadcn/ui (Radix UI primitives) with Tailwind CSS styling +- **Icons**: Phosphor Icons only +- **Feature Flags**: LaunchDarkly integration +- **Error Handling**: ErrorCard for render errors, toast for mutations, Sentry for exceptions +- **Testing**: Playwright for E2E, Storybook for component development + +## Environment Configuration + +`.env.default` (defaults) → `.env` (user overrides) + +## Feature Development + +See @CONTRIBUTING.md for complete patterns. Quick reference: + +1. **Pages**: Create in `src/app/(platform)/feature-name/page.tsx` + - Extract component logic into custom hooks grouped by concern, not by component. Each hook should represent a cohesive domain of functionality (e.g., useSearch, useFilters, usePagination) rather than bundling all state into one useComponentState hook. + - Put each hook in its own `.ts` file + - Put sub-components in local `components/` folder + - Component props should be `type Props = { ... }` (not exported) unless it needs to be used outside the component +2. **Components**: Structure as `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts` + - Use design system components from `src/components/` (atoms, molecules, organisms) + - Never use `src/components/__legacy__/*` +3. **Data fetching**: Use generated API hooks from `@/app/api/__generated__/endpoints/` + - Regenerate with `pnpm generate:api` + - Pattern: `use{Method}{Version}{OperationName}` +4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only +5. **Testing**: Add Storybook stories for new components, Playwright for E2E +6. **Code conventions**: + - Use function declarations (not arrow functions) for components/handlers + - Do not use `useCallback` or `useMemo` unless asked to optimise a given function + - Do not type hook returns, let Typescript infer as much as possible + - Never type with `any` unless a variable/attribute can ACTUALLY be of any type diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 2d583d2062..74855f5e28 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -516,7 +516,7 @@ export type GraphValidationErrorResponse = { /* *** LIBRARY *** */ -/* Mirror of backend/server/v2/library/model.py:LibraryAgent */ +/* Mirror of backend/api/features/library/model.py:LibraryAgent */ export type LibraryAgent = { id: LibraryAgentID; graph_id: GraphID; @@ -616,7 +616,7 @@ export enum LibraryAgentSortEnum { /* *** CREDENTIALS *** */ -/* Mirror of backend/server/integrations/router.py:CredentialsMetaResponse */ +/* Mirror of backend/api/features/integrations/router.py:CredentialsMetaResponse */ export type CredentialsMetaResponse = { id: string; provider: CredentialsProviderName; @@ -628,13 +628,13 @@ export type CredentialsMetaResponse = { is_system?: boolean; }; -/* Mirror of backend/server/integrations/router.py:CredentialsDeletionResponse */ +/* Mirror of backend/api/features/integrations/router.py:CredentialsDeletionResponse */ export type CredentialsDeleteResponse = { deleted: true; revoked: boolean | null; }; -/* Mirror of backend/server/integrations/router.py:CredentialsDeletionNeedsConfirmationResponse */ +/* Mirror of backend/api/features/integrations/router.py:CredentialsDeletionNeedsConfirmationResponse */ export type CredentialsDeleteNeedConfirmationResponse = { deleted: false; need_confirmation: true; @@ -888,7 +888,7 @@ export type Schedule = { export type ScheduleID = Brand; -/* Mirror of backend/server/routers/v1.py:ScheduleCreationRequest */ +/* Mirror of backend/api/features/v1.py:ScheduleCreationRequest */ export type ScheduleCreatable = { graph_id: GraphID; graph_version: number; diff --git a/docs/platform/contributing/oauth-integration-flow.md b/docs/platform/contributing/oauth-integration-flow.md index dbc7a54be5..f6c3f7fd17 100644 --- a/docs/platform/contributing/oauth-integration-flow.md +++ b/docs/platform/contributing/oauth-integration-flow.md @@ -25,7 +25,7 @@ This document focuses on the **API Integration OAuth flow** used for connecting ### 2. Backend API Trust Boundary - **Location**: Server-side FastAPI application - **Components**: - - Integration router (`/backend/backend/server/integrations/router.py`) + - Integration router (`/backend/backend/api/features/integrations/router.py`) - OAuth handlers (`/backend/backend/integrations/oauth/`) - Credentials store (`/backend/backend/integrations/credentials_store.py`) - **Trust Level**: Trusted - server-controlled environment diff --git a/docs/platform/ollama.md b/docs/platform/ollama.md index 392bfabfe8..ecab9b8ae1 100644 --- a/docs/platform/ollama.md +++ b/docs/platform/ollama.md @@ -246,7 +246,7 @@ If you encounter any issues, verify that: ```bash ollama pull llama3.2 ``` -- If using a custom model, ensure it's added to the model list in `backend/server/model.py` +- If using a custom model, ensure it's added to the model list in `backend/api/model.py` #### Docker Issues - Ensure Docker daemon is running: From b2eb4831bd3d309b04242a558c905b2ae4dc5aee Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Thu, 29 Jan 2026 13:53:40 -0600 Subject: [PATCH 15/25] feat(chat): improve agent generator error propagation (#11884) ## Summary - Add helper functions in `service.py` to create standardized error responses with `error_type` classification - Update service functions to return error dicts instead of `None`, preserving error details from the Agent Generator microservice - Update `core.py` to pass through error responses properly - Update `create_agent.py` to handle error responses with user-friendly messages based on error type ## Error Types Now Propagated | Error Type | Description | User Message | |------------|-------------|--------------| | `llm_parse_error` | LLM returned unparseable response | "The AI had trouble understanding this request" | | `llm_timeout` / `timeout` | Request timed out | "The request took too long" | | `llm_rate_limit` / `rate_limit` | Rate limited | "The service is currently busy" | | `validation_error` | Agent validation failed | "The generated agent failed validation" | | `connection_error` | Could not connect to Agent Generator | Generic error message | | `http_error` | HTTP error from Agent Generator | Generic error message | | `unknown` | Unclassified error | Generic error message | ## Motivation This enables better debugging for issues like SECRT-1817 where decomposition failed due to transient LLM errors but the root cause was unclear in the logs. Now: 1. Error details from the Agent Generator microservice are preserved 2. Users get more helpful error messages based on error type 3. Debugging is easier with `error_type` in response details ## Related PR - Agent Generator side: https://github.com/Significant-Gravitas/AutoGPT-Agent-Generator/pull/102 ## Test Plan - [ ] Test decomposition with various error scenarios (timeout, parse error) - [ ] Verify user-friendly messages are shown based on error type - [ ] Check that error details are logged properly --- .../chat/tools/agent_generator/__init__.py | 3 + .../chat/tools/agent_generator/core.py | 10 +- .../chat/tools/agent_generator/errors.py | 43 +++++ .../chat/tools/agent_generator/service.py | 163 ++++++++++++++---- .../api/features/chat/tools/create_agent.py | 50 +++++- .../api/features/chat/tools/edit_agent.py | 23 +++ .../test/agent_generator/test_service.py | 25 ++- 7 files changed, 274 insertions(+), 43 deletions(-) create mode 100644 autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py index 392f642c41..499025b7dc 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py @@ -9,6 +9,7 @@ from .core import ( json_to_graph, save_agent_to_library, ) +from .errors import get_user_message_for_error from .service import health_check as check_external_service_health from .service import is_external_service_configured @@ -25,4 +26,6 @@ __all__ = [ # Service "is_external_service_configured", "check_external_service_health", + # Error handling + "get_user_message_for_error", ] diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py index fc15587110..d56e33cbb0 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py @@ -64,7 +64,7 @@ async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: instructions: Structured instructions from decompose_goal Returns: - Agent JSON dict or None on error + Agent JSON dict, error dict {"type": "error", ...}, or None on error Raises: AgentGeneratorNotConfiguredError: If the external service is not configured. @@ -73,7 +73,10 @@ async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: logger.info("Calling external Agent Generator service for generate_agent") result = await generate_agent_external(instructions) if result: - # Ensure required fields + # Check if it's an error response - pass through as-is + if isinstance(result, dict) and result.get("type") == "error": + return result + # Ensure required fields for successful agent generation if "id" not in result: result["id"] = str(uuid.uuid4()) if "version" not in result: @@ -267,7 +270,8 @@ async def generate_agent_patch( current_agent: Current agent JSON Returns: - Updated agent JSON, clarifying questions dict, or None on error + Updated agent JSON, clarifying questions dict {"type": "clarifying_questions", ...}, + error dict {"type": "error", ...}, or None on unexpected error Raises: AgentGeneratorNotConfiguredError: If the external service is not configured. diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py new file mode 100644 index 0000000000..bf71a95df9 --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py @@ -0,0 +1,43 @@ +"""Error handling utilities for agent generator.""" + + +def get_user_message_for_error( + error_type: str, + operation: str = "process the request", + llm_parse_message: str | None = None, + validation_message: str | None = None, +) -> str: + """Get a user-friendly error message based on error type. + + This function maps internal error types to user-friendly messages, + providing a consistent experience across different agent operations. + + Args: + error_type: The error type from the external service + (e.g., "llm_parse_error", "timeout", "rate_limit") + operation: Description of what operation failed, used in the default + message (e.g., "analyze the goal", "generate the agent") + llm_parse_message: Custom message for llm_parse_error type + validation_message: Custom message for validation_error type + + Returns: + User-friendly error message suitable for display to the user + """ + if error_type == "llm_parse_error": + return ( + llm_parse_message + or "The AI had trouble processing this request. Please try again." + ) + elif error_type == "validation_error": + return ( + validation_message + or "The request failed validation. Please try rephrasing." + ) + elif error_type == "patch_error": + return "Failed to apply the changes. Please try a different approach." + elif error_type in ("timeout", "llm_timeout"): + return "The request took too long. Please try again." + elif error_type in ("rate_limit", "llm_rate_limit"): + return "The service is currently busy. Please try again in a moment." + else: + return f"Failed to {operation}. Please try again." diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py index a4d2f1af15..1df1faaaef 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py @@ -14,6 +14,70 @@ from backend.util.settings import Settings logger = logging.getLogger(__name__) + +def _create_error_response( + error_message: str, + error_type: str = "unknown", + details: dict[str, Any] | None = None, +) -> dict[str, Any]: + """Create a standardized error response dict. + + Args: + error_message: Human-readable error message + error_type: Machine-readable error type + details: Optional additional error details + + Returns: + Error dict with type="error" and error details + """ + response: dict[str, Any] = { + "type": "error", + "error": error_message, + "error_type": error_type, + } + if details: + response["details"] = details + return response + + +def _classify_http_error(e: httpx.HTTPStatusError) -> tuple[str, str]: + """Classify an HTTP error into error_type and message. + + Args: + e: The HTTP status error + + Returns: + Tuple of (error_type, error_message) + """ + status = e.response.status_code + if status == 429: + return "rate_limit", f"Agent Generator rate limited: {e}" + elif status == 503: + return "service_unavailable", f"Agent Generator unavailable: {e}" + elif status == 504 or status == 408: + return "timeout", f"Agent Generator timed out: {e}" + else: + return "http_error", f"HTTP error calling Agent Generator: {e}" + + +def _classify_request_error(e: httpx.RequestError) -> tuple[str, str]: + """Classify a request error into error_type and message. + + Args: + e: The request error + + Returns: + Tuple of (error_type, error_message) + """ + error_str = str(e).lower() + if "timeout" in error_str or "timed out" in error_str: + return "timeout", f"Agent Generator request timed out: {e}" + elif "connect" in error_str: + return "connection_error", f"Could not connect to Agent Generator: {e}" + else: + return "request_error", f"Request error calling Agent Generator: {e}" + + _client: httpx.AsyncClient | None = None _settings: Settings | None = None @@ -67,7 +131,8 @@ async def decompose_goal_external( - {"type": "instructions", "steps": [...]} - {"type": "unachievable_goal", ...} - {"type": "vague_goal", ...} - Or None on error + - {"type": "error", "error": "...", "error_type": "..."} on error + Or None on unexpected error """ client = _get_client() @@ -83,8 +148,13 @@ async def decompose_goal_external( data = response.json() if not data.get("success"): - logger.error(f"External service returned error: {data.get('error')}") - return None + error_msg = data.get("error", "Unknown error from Agent Generator") + error_type = data.get("error_type", "unknown") + logger.error( + f"Agent Generator decomposition failed: {error_msg} " + f"(type: {error_type})" + ) + return _create_error_response(error_msg, error_type) # Map the response to the expected format response_type = data.get("type") @@ -106,25 +176,37 @@ async def decompose_goal_external( "type": "vague_goal", "suggested_goal": data.get("suggested_goal"), } + elif response_type == "error": + # Pass through error from the service + return _create_error_response( + data.get("error", "Unknown error"), + data.get("error_type", "unknown"), + ) else: logger.error( f"Unknown response type from external service: {response_type}" ) - return None + return _create_error_response( + f"Unknown response type from Agent Generator: {response_type}", + "invalid_response", + ) except httpx.HTTPStatusError as e: - logger.error(f"HTTP error calling external agent generator: {e}") - return None + error_type, error_msg = _classify_http_error(e) + logger.error(error_msg) + return _create_error_response(error_msg, error_type) except httpx.RequestError as e: - logger.error(f"Request error calling external agent generator: {e}") - return None + error_type, error_msg = _classify_request_error(e) + logger.error(error_msg) + return _create_error_response(error_msg, error_type) except Exception as e: - logger.error(f"Unexpected error calling external agent generator: {e}") - return None + error_msg = f"Unexpected error calling Agent Generator: {e}" + logger.error(error_msg) + return _create_error_response(error_msg, "unexpected_error") async def generate_agent_external( - instructions: dict[str, Any] + instructions: dict[str, Any], ) -> dict[str, Any] | None: """Call the external service to generate an agent from instructions. @@ -132,7 +214,7 @@ async def generate_agent_external( instructions: Structured instructions from decompose_goal Returns: - Agent JSON dict or None on error + Agent JSON dict on success, or error dict {"type": "error", ...} on error """ client = _get_client() @@ -144,20 +226,28 @@ async def generate_agent_external( data = response.json() if not data.get("success"): - logger.error(f"External service returned error: {data.get('error')}") - return None + error_msg = data.get("error", "Unknown error from Agent Generator") + error_type = data.get("error_type", "unknown") + logger.error( + f"Agent Generator generation failed: {error_msg} " + f"(type: {error_type})" + ) + return _create_error_response(error_msg, error_type) return data.get("agent_json") except httpx.HTTPStatusError as e: - logger.error(f"HTTP error calling external agent generator: {e}") - return None + error_type, error_msg = _classify_http_error(e) + logger.error(error_msg) + return _create_error_response(error_msg, error_type) except httpx.RequestError as e: - logger.error(f"Request error calling external agent generator: {e}") - return None + error_type, error_msg = _classify_request_error(e) + logger.error(error_msg) + return _create_error_response(error_msg, error_type) except Exception as e: - logger.error(f"Unexpected error calling external agent generator: {e}") - return None + error_msg = f"Unexpected error calling Agent Generator: {e}" + logger.error(error_msg) + return _create_error_response(error_msg, "unexpected_error") async def generate_agent_patch_external( @@ -170,7 +260,7 @@ async def generate_agent_patch_external( current_agent: Current agent JSON Returns: - Updated agent JSON, clarifying questions dict, or None on error + Updated agent JSON, clarifying questions dict, or error dict on error """ client = _get_client() @@ -186,8 +276,13 @@ async def generate_agent_patch_external( data = response.json() if not data.get("success"): - logger.error(f"External service returned error: {data.get('error')}") - return None + error_msg = data.get("error", "Unknown error from Agent Generator") + error_type = data.get("error_type", "unknown") + logger.error( + f"Agent Generator patch generation failed: {error_msg} " + f"(type: {error_type})" + ) + return _create_error_response(error_msg, error_type) # Check if it's clarifying questions if data.get("type") == "clarifying_questions": @@ -196,18 +291,28 @@ async def generate_agent_patch_external( "questions": data.get("questions", []), } + # Check if it's an error passed through + if data.get("type") == "error": + return _create_error_response( + data.get("error", "Unknown error"), + data.get("error_type", "unknown"), + ) + # Otherwise return the updated agent JSON return data.get("agent_json") except httpx.HTTPStatusError as e: - logger.error(f"HTTP error calling external agent generator: {e}") - return None + error_type, error_msg = _classify_http_error(e) + logger.error(error_msg) + return _create_error_response(error_msg, error_type) except httpx.RequestError as e: - logger.error(f"Request error calling external agent generator: {e}") - return None + error_type, error_msg = _classify_request_error(e) + logger.error(error_msg) + return _create_error_response(error_msg, error_type) except Exception as e: - logger.error(f"Unexpected error calling external agent generator: {e}") - return None + error_msg = f"Unexpected error calling Agent Generator: {e}" + logger.error(error_msg) + return _create_error_response(error_msg, "unexpected_error") async def get_blocks_external() -> list[dict[str, Any]] | None: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py index 6b3784e323..74011c7e95 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py @@ -9,6 +9,7 @@ from .agent_generator import ( AgentGeneratorNotConfiguredError, decompose_goal, generate_agent, + get_user_message_for_error, save_agent_to_library, ) from .base import BaseTool @@ -117,11 +118,29 @@ class CreateAgentTool(BaseTool): if decomposition_result is None: return ErrorResponse( - message="Failed to analyze the goal. The agent generation service may be unavailable or timed out. Please try again.", + message="Failed to analyze the goal. The agent generation service may be unavailable. Please try again.", error="decomposition_failed", + details={"description": description[:100]}, + session_id=session_id, + ) + + # Check if the result is an error from the external service + if decomposition_result.get("type") == "error": + error_msg = decomposition_result.get("error", "Unknown error") + error_type = decomposition_result.get("error_type", "unknown") + user_message = get_user_message_for_error( + error_type, + operation="analyze the goal", + llm_parse_message="The AI had trouble understanding this request. Please try rephrasing your goal.", + ) + return ErrorResponse( + message=user_message, + error=f"decomposition_failed:{error_type}", details={ - "description": description[:100] - }, # Include context for debugging + "description": description[:100], + "service_error": error_msg, + "error_type": error_type, + }, session_id=session_id, ) @@ -186,11 +205,30 @@ class CreateAgentTool(BaseTool): if agent_json is None: return ErrorResponse( - message="Failed to generate the agent. The agent generation service may be unavailable or timed out. Please try again.", + message="Failed to generate the agent. The agent generation service may be unavailable. Please try again.", error="generation_failed", + details={"description": description[:100]}, + session_id=session_id, + ) + + # Check if the result is an error from the external service + if isinstance(agent_json, dict) and agent_json.get("type") == "error": + error_msg = agent_json.get("error", "Unknown error") + error_type = agent_json.get("error_type", "unknown") + user_message = get_user_message_for_error( + error_type, + operation="generate the agent", + llm_parse_message="The AI had trouble generating the agent. Please try again or simplify your goal.", + validation_message="The generated agent failed validation. Please try rephrasing your goal.", + ) + return ErrorResponse( + message=user_message, + error=f"generation_failed:{error_type}", details={ - "description": description[:100] - }, # Include context for debugging + "description": description[:100], + "service_error": error_msg, + "error_type": error_type, + }, session_id=session_id, ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py index 7c4da8ad43..ee8eee53ce 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py @@ -9,6 +9,7 @@ from .agent_generator import ( AgentGeneratorNotConfiguredError, generate_agent_patch, get_agent_as_json, + get_user_message_for_error, save_agent_to_library, ) from .base import BaseTool @@ -152,6 +153,28 @@ class EditAgentTool(BaseTool): session_id=session_id, ) + # Check if the result is an error from the external service + if isinstance(result, dict) and result.get("type") == "error": + error_msg = result.get("error", "Unknown error") + error_type = result.get("error_type", "unknown") + user_message = get_user_message_for_error( + error_type, + operation="generate the changes", + llm_parse_message="The AI had trouble generating the changes. Please try again or simplify your request.", + validation_message="The generated changes failed validation. Please try rephrasing your request.", + ) + return ErrorResponse( + message=user_message, + error=f"update_generation_failed:{error_type}", + details={ + "agent_id": agent_id, + "changes": changes[:100], + "service_error": error_msg, + "error_type": error_type, + }, + session_id=session_id, + ) + # Check if LLM returned clarifying questions if result.get("type") == "clarifying_questions": questions = result.get("questions", []) diff --git a/autogpt_platform/backend/test/agent_generator/test_service.py b/autogpt_platform/backend/test/agent_generator/test_service.py index 81ff794532..fe7a1a7fdd 100644 --- a/autogpt_platform/backend/test/agent_generator/test_service.py +++ b/autogpt_platform/backend/test/agent_generator/test_service.py @@ -151,15 +151,20 @@ class TestDecomposeGoalExternal: @pytest.mark.asyncio async def test_decompose_goal_handles_http_error(self): """Test decomposition handles HTTP errors gracefully.""" + mock_response = MagicMock() + mock_response.status_code = 500 mock_client = AsyncMock() mock_client.post.side_effect = httpx.HTTPStatusError( - "Server error", request=MagicMock(), response=MagicMock() + "Server error", request=MagicMock(), response=mock_response ) with patch.object(service, "_get_client", return_value=mock_client): result = await service.decompose_goal_external("Build a chatbot") - assert result is None + assert result is not None + assert result.get("type") == "error" + assert result.get("error_type") == "http_error" + assert "Server error" in result.get("error", "") @pytest.mark.asyncio async def test_decompose_goal_handles_request_error(self): @@ -170,7 +175,10 @@ class TestDecomposeGoalExternal: with patch.object(service, "_get_client", return_value=mock_client): result = await service.decompose_goal_external("Build a chatbot") - assert result is None + assert result is not None + assert result.get("type") == "error" + assert result.get("error_type") == "connection_error" + assert "Connection failed" in result.get("error", "") @pytest.mark.asyncio async def test_decompose_goal_handles_service_error(self): @@ -179,6 +187,7 @@ class TestDecomposeGoalExternal: mock_response.json.return_value = { "success": False, "error": "Internal error", + "error_type": "internal_error", } mock_response.raise_for_status = MagicMock() @@ -188,7 +197,10 @@ class TestDecomposeGoalExternal: with patch.object(service, "_get_client", return_value=mock_client): result = await service.decompose_goal_external("Build a chatbot") - assert result is None + assert result is not None + assert result.get("type") == "error" + assert result.get("error") == "Internal error" + assert result.get("error_type") == "internal_error" class TestGenerateAgentExternal: @@ -236,7 +248,10 @@ class TestGenerateAgentExternal: with patch.object(service, "_get_client", return_value=mock_client): result = await service.generate_agent_external({"steps": []}) - assert result is None + assert result is not None + assert result.get("type") == "error" + assert result.get("error_type") == "connection_error" + assert "Connection failed" in result.get("error", "") class TestGenerateAgentPatchExternal: From 3b822cdaf7141cd0900644b927b104d4ec185c0b Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 29 Jan 2026 18:31:34 -0600 Subject: [PATCH 16/25] chore(branchlet): Remove docs pip install from postCreateCmd (#11883) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ - Removed `cd docs && pip install -r requirements.txt` from `postCreateCmd` in `.branchlet.json` - Docs dependencies will no longer be auto-installed during branchlet worktree creation ### Rationale The docs setup step was adding unnecessary overhead to the worktree creation process. Developers who need to work on documentation can manually install the docs requirements when needed. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Verified branchlet worktree creation still works without the docs pip install step #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**) --- .branchlet.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.branchlet.json b/.branchlet.json index cc13ff9f74..d02cd60e20 100644 --- a/.branchlet.json +++ b/.branchlet.json @@ -29,8 +29,7 @@ "postCreateCmd": [ "cd autogpt_platform/autogpt_libs && poetry install", "cd autogpt_platform/backend && poetry install && poetry run prisma generate", - "cd autogpt_platform/frontend && pnpm install", - "cd docs && pip install -r requirements.txt" + "cd autogpt_platform/frontend && pnpm install" ], "terminalCommand": "code .", "deleteBranchWithWorktree": false From 582c6cad36b2ba0e675d22c617538e037f495d0b Mon Sep 17 00:00:00 2001 From: Otto Date: Fri, 30 Jan 2026 05:12:35 +0000 Subject: [PATCH 17/25] fix(e2e): Make E2E test data deterministic and fix flaky tests (#11890) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Fixes flaky E2E marketplace and library tests that were causing PRs to be removed from the merge queue. ## Root Cause 1. **Test data was probabilistic** - `e2e_test_data.py` used random chances (40% approve, then 20-50% feature), which could result in 0 featured agents 2. **Library pagination threshold wrong** - Checked `>= 10`, but page size is 20 3. **Fixed timeouts** - Used `waitForTimeout(2000)` / `waitForTimeout(10000)` instead of proper waits ## Changes ### Backend (`e2e_test_data.py`) - Add guaranteed minimums: 8 featured agents, 5 featured creators, 10 top agents - First N submissions are deterministically approved and featured - Increase agents per user from 15 → 25 (for pagination with page_size=20) - Fix library agent creation to use constants instead of hardcoded `10` ### Frontend Tests - `library.spec.ts`: Fix pagination threshold to `PAGE_SIZE` (20) - `library.page.ts`: Replace 2s timeout with `networkidle` + `waitForFunction` - `marketplace.page.ts`: Add `networkidle` wait, 30s waits in `getFirst*` methods - `marketplace.spec.ts`: Replace 10s timeout with `waitForFunction` - `marketplace-creator.spec.ts`: Add `networkidle` + element waits ## Related - Closes SECRT-1848, SECRT-1849 - Should unblock #11841 and other PRs in merge queue --------- Co-authored-by: Ubbe --- .../backend/test/e2e_test_data.py | 162 ++++++++++-------- .../frontend/src/tests/library.spec.ts | 13 +- .../src/tests/marketplace-creator.spec.ts | 3 + .../frontend/src/tests/marketplace.spec.ts | 11 +- .../frontend/src/tests/pages/library.page.ts | 26 +-- .../src/tests/pages/marketplace.page.ts | 15 +- 6 files changed, 136 insertions(+), 94 deletions(-) diff --git a/autogpt_platform/backend/test/e2e_test_data.py b/autogpt_platform/backend/test/e2e_test_data.py index d7576cdad3..7288197a90 100644 --- a/autogpt_platform/backend/test/e2e_test_data.py +++ b/autogpt_platform/backend/test/e2e_test_data.py @@ -43,19 +43,24 @@ faker = Faker() # Constants for data generation limits (reduced for E2E tests) NUM_USERS = 15 NUM_AGENT_BLOCKS = 30 -MIN_GRAPHS_PER_USER = 15 -MAX_GRAPHS_PER_USER = 15 +MIN_GRAPHS_PER_USER = 25 +MAX_GRAPHS_PER_USER = 25 MIN_NODES_PER_GRAPH = 3 MAX_NODES_PER_GRAPH = 6 MIN_PRESETS_PER_USER = 2 MAX_PRESETS_PER_USER = 3 -MIN_AGENTS_PER_USER = 15 -MAX_AGENTS_PER_USER = 15 +MIN_AGENTS_PER_USER = 25 +MAX_AGENTS_PER_USER = 25 MIN_EXECUTIONS_PER_GRAPH = 2 MAX_EXECUTIONS_PER_GRAPH = 8 MIN_REVIEWS_PER_VERSION = 2 MAX_REVIEWS_PER_VERSION = 5 +# Guaranteed minimums for marketplace tests (deterministic) +GUARANTEED_FEATURED_AGENTS = 8 +GUARANTEED_FEATURED_CREATORS = 5 +GUARANTEED_TOP_AGENTS = 10 + def get_image(): """Generate a consistent image URL using picsum.photos service.""" @@ -385,7 +390,7 @@ class TestDataCreator: library_agents = [] for user in self.users: - num_agents = 10 # Create exactly 10 agents per user + num_agents = random.randint(MIN_AGENTS_PER_USER, MAX_AGENTS_PER_USER) # Get available graphs for this user user_graphs = [ @@ -507,14 +512,17 @@ class TestDataCreator: existing_profiles, min(num_creators, len(existing_profiles)) ) - # Mark about 50% of creators as featured (more for testing) - num_featured = max(2, int(num_creators * 0.5)) + # Guarantee at least GUARANTEED_FEATURED_CREATORS featured creators + num_featured = max(GUARANTEED_FEATURED_CREATORS, int(num_creators * 0.5)) num_featured = min( num_featured, len(selected_profiles) ) # Don't exceed available profiles featured_profile_ids = set( random.sample([p.id for p in selected_profiles], num_featured) ) + print( + f"🎯 Creating {num_featured} featured creators (min: {GUARANTEED_FEATURED_CREATORS})" + ) for profile in selected_profiles: try: @@ -545,21 +553,25 @@ class TestDataCreator: return profiles async def create_test_store_submissions(self) -> List[Dict[str, Any]]: - """Create test store submissions using the API function.""" + """Create test store submissions using the API function. + + DETERMINISTIC: Guarantees minimum featured agents for E2E tests. + """ print("Creating test store submissions...") submissions = [] approved_submissions = [] + featured_count = 0 + submission_counter = 0 - # Create a special test submission for test123@gmail.com + # Create a special test submission for test123@gmail.com (ALWAYS approved + featured) test_user = next( (user for user in self.users if user["email"] == "test123@gmail.com"), None ) - if test_user: - # Special test data for consistent testing + if test_user and self.agent_graphs: test_submission_data = { "user_id": test_user["id"], - "agent_id": self.agent_graphs[0]["id"], # Use first available graph + "agent_id": self.agent_graphs[0]["id"], "agent_version": 1, "slug": "test-agent-submission", "name": "Test Agent Submission", @@ -580,37 +592,24 @@ class TestDataCreator: submissions.append(test_submission.model_dump()) print("✅ Created special test store submission for test123@gmail.com") - # Randomly approve, reject, or leave pending the test submission + # ALWAYS approve and feature the test submission if test_submission.store_listing_version_id: - random_value = random.random() - if random_value < 0.4: # 40% chance to approve - approved_submission = await review_store_submission( - store_listing_version_id=test_submission.store_listing_version_id, - is_approved=True, - external_comments="Test submission approved", - internal_comments="Auto-approved test submission", - reviewer_id=test_user["id"], - ) - approved_submissions.append(approved_submission.model_dump()) - print("✅ Approved test store submission") + approved_submission = await review_store_submission( + store_listing_version_id=test_submission.store_listing_version_id, + is_approved=True, + external_comments="Test submission approved", + internal_comments="Auto-approved test submission", + reviewer_id=test_user["id"], + ) + approved_submissions.append(approved_submission.model_dump()) + print("✅ Approved test store submission") - # Mark approved submission as featured - await prisma.storelistingversion.update( - where={"id": test_submission.store_listing_version_id}, - data={"isFeatured": True}, - ) - print("🌟 Marked test agent as FEATURED") - elif random_value < 0.7: # 30% chance to reject (40% to 70%) - await review_store_submission( - store_listing_version_id=test_submission.store_listing_version_id, - is_approved=False, - external_comments="Test submission rejected - needs improvements", - internal_comments="Auto-rejected test submission for E2E testing", - reviewer_id=test_user["id"], - ) - print("❌ Rejected test store submission") - else: # 30% chance to leave pending (70% to 100%) - print("⏳ Left test submission pending for review") + await prisma.storelistingversion.update( + where={"id": test_submission.store_listing_version_id}, + data={"isFeatured": True}, + ) + featured_count += 1 + print("🌟 Marked test agent as FEATURED") except Exception as e: print(f"Error creating test store submission: {e}") @@ -620,7 +619,6 @@ class TestDataCreator: # Create regular submissions for all users for user in self.users: - # Get available graphs for this specific user user_graphs = [ g for g in self.agent_graphs if g.get("userId") == user["id"] ] @@ -631,18 +629,17 @@ class TestDataCreator: ) continue - # Create exactly 4 store submissions per user for submission_index in range(4): graph = random.choice(user_graphs) + submission_counter += 1 try: print( - f"Creating store submission for user {user['id']} with graph {graph['id']} (owner: {graph.get('userId')})" + f"Creating store submission for user {user['id']} with graph {graph['id']}" ) - # Use the API function to create store submission with correct parameters submission = await create_store_submission( - user_id=user["id"], # Must match graph's userId + user_id=user["id"], agent_id=graph["id"], agent_version=graph.get("version", 1), slug=faker.slug(), @@ -651,22 +648,24 @@ class TestDataCreator: video_url=get_video_url() if random.random() < 0.3 else None, image_urls=[get_image() for _ in range(3)], description=faker.text(), - categories=[ - get_category() - ], # Single category from predefined list + categories=[get_category()], changes_summary="Initial E2E test submission", ) submissions.append(submission.model_dump()) print(f"✅ Created store submission: {submission.name}") - # Randomly approve, reject, or leave pending the submission if submission.store_listing_version_id: - random_value = random.random() - if random_value < 0.4: # 40% chance to approve - try: - # Pick a random user as the reviewer (admin) - reviewer_id = random.choice(self.users)["id"] + # DETERMINISTIC: First N submissions are always approved + # First GUARANTEED_FEATURED_AGENTS of those are always featured + should_approve = ( + submission_counter <= GUARANTEED_TOP_AGENTS + or random.random() < 0.4 + ) + should_feature = featured_count < GUARANTEED_FEATURED_AGENTS + if should_approve: + try: + reviewer_id = random.choice(self.users)["id"] approved_submission = await review_store_submission( store_listing_version_id=submission.store_listing_version_id, is_approved=True, @@ -681,16 +680,7 @@ class TestDataCreator: f"✅ Approved store submission: {submission.name}" ) - # Mark some agents as featured during creation (30% chance) - # More likely for creators and first submissions - is_creator = user["id"] in [ - p.get("userId") for p in self.profiles - ] - feature_chance = ( - 0.5 if is_creator else 0.2 - ) # 50% for creators, 20% for others - - if random.random() < feature_chance: + if should_feature: try: await prisma.storelistingversion.update( where={ @@ -698,8 +688,25 @@ class TestDataCreator: }, data={"isFeatured": True}, ) + featured_count += 1 print( - f"🌟 Marked agent as FEATURED: {submission.name}" + f"🌟 Marked agent as FEATURED ({featured_count}/{GUARANTEED_FEATURED_AGENTS}): {submission.name}" + ) + except Exception as e: + print( + f"Warning: Could not mark submission as featured: {e}" + ) + elif random.random() < 0.2: + try: + await prisma.storelistingversion.update( + where={ + "id": submission.store_listing_version_id + }, + data={"isFeatured": True}, + ) + featured_count += 1 + print( + f"🌟 Marked agent as FEATURED (bonus): {submission.name}" ) except Exception as e: print( @@ -710,11 +717,9 @@ class TestDataCreator: print( f"Warning: Could not approve submission {submission.name}: {e}" ) - elif random_value < 0.7: # 30% chance to reject (40% to 70%) + elif random.random() < 0.5: try: - # Pick a random user as the reviewer (admin) reviewer_id = random.choice(self.users)["id"] - await review_store_submission( store_listing_version_id=submission.store_listing_version_id, is_approved=False, @@ -729,7 +734,7 @@ class TestDataCreator: print( f"Warning: Could not reject submission {submission.name}: {e}" ) - else: # 30% chance to leave pending (70% to 100%) + else: print( f"⏳ Left submission pending for review: {submission.name}" ) @@ -743,9 +748,13 @@ class TestDataCreator: traceback.print_exc() continue + print("\n📊 Store Submissions Summary:") + print(f" Created: {len(submissions)}") + print(f" Approved: {len(approved_submissions)}") print( - f"Created {len(submissions)} store submissions, approved {len(approved_submissions)}" + f" Featured: {featured_count} (guaranteed min: {GUARANTEED_FEATURED_AGENTS})" ) + self.store_submissions = submissions return submissions @@ -825,12 +834,15 @@ class TestDataCreator: print(f"✅ Agent blocks available: {len(self.agent_blocks)}") print(f"✅ Agent graphs created: {len(self.agent_graphs)}") print(f"✅ Library agents created: {len(self.library_agents)}") - print(f"✅ Creator profiles updated: {len(self.profiles)} (some featured)") - print( - f"✅ Store submissions created: {len(self.store_submissions)} (some marked as featured during creation)" - ) + print(f"✅ Creator profiles updated: {len(self.profiles)}") + print(f"✅ Store submissions created: {len(self.store_submissions)}") print(f"✅ API keys created: {len(self.api_keys)}") print(f"✅ Presets created: {len(self.presets)}") + print("\n🎯 Deterministic Guarantees:") + print(f" • Featured agents: >= {GUARANTEED_FEATURED_AGENTS}") + print(f" • Featured creators: >= {GUARANTEED_FEATURED_CREATORS}") + print(f" • Top agents (approved): >= {GUARANTEED_TOP_AGENTS}") + print(f" • Library agents per user: >= {MIN_AGENTS_PER_USER}") print("\n🚀 Your E2E test database is ready to use!") diff --git a/autogpt_platform/frontend/src/tests/library.spec.ts b/autogpt_platform/frontend/src/tests/library.spec.ts index 1972e94522..52941785e3 100644 --- a/autogpt_platform/frontend/src/tests/library.spec.ts +++ b/autogpt_platform/frontend/src/tests/library.spec.ts @@ -59,12 +59,13 @@ test.describe("Library", () => { }); test("pagination works correctly", async ({ page }, testInfo) => { - test.setTimeout(testInfo.timeout * 3); // Increase timeout for pagination operations + test.setTimeout(testInfo.timeout * 3); await page.goto("/library"); + const PAGE_SIZE = 20; const paginationResult = await libraryPage.testPagination(); - if (paginationResult.initialCount >= 10) { + if (paginationResult.initialCount >= PAGE_SIZE) { expect(paginationResult.finalCount).toBeGreaterThanOrEqual( paginationResult.initialCount, ); @@ -133,7 +134,10 @@ test.describe("Library", () => { test.expect(clearedSearchValue).toBe(""); }); - test("pagination while searching works correctly", async ({ page }) => { + test("pagination while searching works correctly", async ({ + page, + }, testInfo) => { + test.setTimeout(testInfo.timeout * 3); await page.goto("/library"); const allAgents = await libraryPage.getAgents(); @@ -152,9 +156,10 @@ test.describe("Library", () => { ); expect(matchingResults.length).toEqual(initialSearchResults.length); + const PAGE_SIZE = 20; const searchPaginationResult = await libraryPage.testPagination(); - if (searchPaginationResult.initialCount >= 10) { + if (searchPaginationResult.initialCount >= PAGE_SIZE) { expect(searchPaginationResult.finalCount).toBeGreaterThanOrEqual( searchPaginationResult.initialCount, ); diff --git a/autogpt_platform/frontend/src/tests/marketplace-creator.spec.ts b/autogpt_platform/frontend/src/tests/marketplace-creator.spec.ts index 3558f0672c..a41b652afb 100644 --- a/autogpt_platform/frontend/src/tests/marketplace-creator.spec.ts +++ b/autogpt_platform/frontend/src/tests/marketplace-creator.spec.ts @@ -69,9 +69,12 @@ test.describe("Marketplace Creator Page – Basic Functionality", () => { await marketplacePage.getFirstCreatorProfile(page); await firstCreatorProfile.click(); await page.waitForURL("**/marketplace/creator/**"); + await page.waitForLoadState("networkidle").catch(() => {}); + const firstAgent = page .locator('[data-testid="store-card"]:visible') .first(); + await firstAgent.waitFor({ state: "visible", timeout: 30000 }); await firstAgent.click(); await page.waitForURL("**/marketplace/agent/**"); diff --git a/autogpt_platform/frontend/src/tests/marketplace.spec.ts b/autogpt_platform/frontend/src/tests/marketplace.spec.ts index 774713dc82..44d89bf351 100644 --- a/autogpt_platform/frontend/src/tests/marketplace.spec.ts +++ b/autogpt_platform/frontend/src/tests/marketplace.spec.ts @@ -77,7 +77,6 @@ test.describe("Marketplace – Basic Functionality", () => { const firstFeaturedAgent = await marketplacePage.getFirstFeaturedAgent(page); - await firstFeaturedAgent.waitFor({ state: "visible" }); await firstFeaturedAgent.click(); await page.waitForURL("**/marketplace/agent/**"); await matchesUrl(page, /\/marketplace\/agent\/.+/); @@ -116,7 +115,15 @@ test.describe("Marketplace – Basic Functionality", () => { const searchTerm = page.getByText("DummyInput").first(); await isVisible(searchTerm); - await page.waitForTimeout(10000); + await page.waitForLoadState("networkidle").catch(() => {}); + + await page + .waitForFunction( + () => + document.querySelectorAll('[data-testid="store-card"]').length > 0, + { timeout: 15000 }, + ) + .catch(() => console.log("No search results appeared within timeout")); const results = await marketplacePage.getSearchResultsCount(page); expect(results).toBeGreaterThan(0); diff --git a/autogpt_platform/frontend/src/tests/pages/library.page.ts b/autogpt_platform/frontend/src/tests/pages/library.page.ts index 3a7695ec3a..03e98598b4 100644 --- a/autogpt_platform/frontend/src/tests/pages/library.page.ts +++ b/autogpt_platform/frontend/src/tests/pages/library.page.ts @@ -300,21 +300,27 @@ export class LibraryPage extends BasePage { async scrollToLoadMore(): Promise { console.log(`scrolling to load more agents`); - // Get initial agent count - const initialCount = await this.getAgentCount(); - console.log(`Initial agent count: ${initialCount}`); + const initialCount = await this.getAgentCountByListLength(); + console.log(`Initial agent count (DOM cards): ${initialCount}`); - // Scroll down to trigger pagination await this.scrollToBottom(); - // Wait for potential new agents to load - await this.page.waitForTimeout(2000); + await this.page + .waitForLoadState("networkidle", { timeout: 10000 }) + .catch(() => console.log("Network idle timeout, continuing...")); - // Check if more agents loaded - const newCount = await this.getAgentCount(); - console.log(`New agent count after scroll: ${newCount}`); + await this.page + .waitForFunction( + (prevCount) => + document.querySelectorAll('[data-testid="library-agent-card"]') + .length > prevCount, + initialCount, + { timeout: 5000 }, + ) + .catch(() => {}); - return; + const newCount = await this.getAgentCountByListLength(); + console.log(`New agent count after scroll (DOM cards): ${newCount}`); } async testPagination(): Promise<{ diff --git a/autogpt_platform/frontend/src/tests/pages/marketplace.page.ts b/autogpt_platform/frontend/src/tests/pages/marketplace.page.ts index 20f60c371a..115a7b2f12 100644 --- a/autogpt_platform/frontend/src/tests/pages/marketplace.page.ts +++ b/autogpt_platform/frontend/src/tests/pages/marketplace.page.ts @@ -9,6 +9,7 @@ export class MarketplacePage extends BasePage { async goto(page: Page) { await page.goto("/marketplace"); + await page.waitForLoadState("networkidle").catch(() => {}); } async getMarketplaceTitle(page: Page) { @@ -109,16 +110,24 @@ export class MarketplacePage extends BasePage { async getFirstFeaturedAgent(page: Page) { const { getId } = getSelectors(page); - return getId("featured-store-card").first(); + const card = getId("featured-store-card").first(); + await card.waitFor({ state: "visible", timeout: 30000 }); + return card; } async getFirstTopAgent() { - return this.page.locator('[data-testid="store-card"]:visible').first(); + const card = this.page + .locator('[data-testid="store-card"]:visible') + .first(); + await card.waitFor({ state: "visible", timeout: 30000 }); + return card; } async getFirstCreatorProfile(page: Page) { const { getId } = getSelectors(page); - return getId("creator-card").first(); + const card = getId("creator-card").first(); + await card.waitFor({ state: "visible", timeout: 30000 }); + return card; } async getSearchResultsCount(page: Page) { From 7cb1e588b0d51eeb7b9991138b71a7f0ac96a5b5 Mon Sep 17 00:00:00 2001 From: Otto Date: Fri, 30 Jan 2026 07:49:05 +0000 Subject: [PATCH 18/25] fix(frontend): Refocus ChatInput after voice transcription completes (#11893) ## Summary Refocuses the chat input textarea after voice transcription finishes, allowing users to immediately use `spacebar+enter` to record and send their prompt. ## Changes - Added `inputId` parameter to `useVoiceRecording` hook - After transcription completes, the input is automatically focused - This improves the voice input UX flow ## Testing 1. Click mic button or press spacebar to record voice 2. Record a message and stop 3. After transcription completes, the input should be focused 4. User can now press Enter to send or spacebar to record again --------- Co-authored-by: Lluis Agusti --- .../Chat/components/ChatInput/ChatInput.tsx | 1 + .../Chat/components/ChatInput/useVoiceRecording.ts | 13 ++++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx index 521f6f6320..beb4678e73 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx @@ -57,6 +57,7 @@ export function ChatInput({ isStreaming, value, baseHandleKeyDown, + inputId, }); return ( diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useVoiceRecording.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useVoiceRecording.ts index 13b625e69c..4de74ef2e9 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useVoiceRecording.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useVoiceRecording.ts @@ -15,6 +15,7 @@ interface Args { isStreaming?: boolean; value: string; baseHandleKeyDown: (event: KeyboardEvent) => void; + inputId?: string; } export function useVoiceRecording({ @@ -23,6 +24,7 @@ export function useVoiceRecording({ isStreaming = false, value, baseHandleKeyDown, + inputId, }: Args) { const [isRecording, setIsRecording] = useState(false); const [isTranscribing, setIsTranscribing] = useState(false); @@ -103,7 +105,7 @@ export function useVoiceRecording({ setIsTranscribing(false); } }, - [handleTranscription], + [handleTranscription, inputId], ); const stopRecording = useCallback(() => { @@ -201,6 +203,15 @@ export function useVoiceRecording({ } }, [error, toast]); + useEffect(() => { + if (!isTranscribing && inputId) { + const inputElement = document.getElementById(inputId); + if (inputElement) { + inputElement.focus(); + } + } + }, [isTranscribing, inputId]); + const handleKeyDown = useCallback( (event: KeyboardEvent) => { if (event.key === " " && !value.trim() && !isTranscribing) { From de0ec3d388592c8e4e85832380a7105f63b7ef36 Mon Sep 17 00:00:00 2001 From: Bently Date: Fri, 30 Jan 2026 08:40:55 +0000 Subject: [PATCH 19/25] chore(llm): remove deprecated Claude 3.7 Sonnet model with migration and defensive handling (#11841) ## Summary Remove `claude-3-7-sonnet-20250219` from LLM model definitions ahead of Anthropic's API retirement, with comprehensive migration and defensive error handling. ## Background Anthropic is retiring Claude 3.7 Sonnet (`claude-3-7-sonnet-20250219`) on **February 19, 2026 at 9:00 AM PT**. This PR removes the model from the platform and migrates existing users to prevent service interruptions. ## Changes ### Code Changes - Remove `CLAUDE_3_7_SONNET` enum member from `LlmModel` in `llm.py` - Remove corresponding `ModelMetadata` entry - Remove `CLAUDE_3_7_SONNET` from `StagehandRecommendedLlmModel` enum - Remove `CLAUDE_3_7_SONNET` from block cost config - Add `CLAUDE_4_5_SONNET` to `StagehandRecommendedLlmModel` enum - Update Stagehand block defaults from `CLAUDE_3_7_SONNET` to `CLAUDE_4_5_SONNET` (staying in Claude family) - Add defensive error handling in `CredentialsFieldInfo.discriminate()` for deprecated model values ### Database Migration - Adds migration `20260126120000_migrate_claude_3_7_to_4_5_sonnet` - Migrates `AgentNode.constantInput` model references - Migrates `AgentNodeExecutionInputOutput.data` preset overrides ### Documentation - Updated `docs/integrations/block-integrations/llm.md` to remove deprecated model - Updated `docs/integrations/block-integrations/stagehand/blocks.md` to remove deprecated model and add Claude 4.5 Sonnet ## Notes - Agent JSON files in `autogpt_platform/backend/agents/` still reference this model in their provider mappings. These are auto-generated and should be regenerated separately. ## Testing - [ ] Verify LLM block still functions with remaining models - [ ] Confirm no import errors in affected files - [ ] Verify migration runs successfully - [ ] Verify deprecated model gives helpful error message instead of KeyError --- .../backend/backend/blocks/llm.py | 4 ---- .../backend/blocks/stagehand/blocks.py | 8 +++---- .../backend/backend/data/block_cost_config.py | 1 - .../backend/backend/data/model.py | 12 +++++++--- .../migration.sql | 22 +++++++++++++++++++ docs/integrations/block-integrations/llm.md | 14 ++++++------ .../block-integrations/stagehand/blocks.md | 6 ++--- 7 files changed, 45 insertions(+), 22 deletions(-) create mode 100644 autogpt_platform/backend/migrations/20260126120000_migrate_claude_3_7_to_4_5_sonnet/migration.sql diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index fdcd7f3568..732fb1354c 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -115,7 +115,6 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101" CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929" CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001" - CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219" CLAUDE_3_HAIKU = "claude-3-haiku-20240307" # AI/ML API models AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo" @@ -280,9 +279,6 @@ MODEL_METADATA = { LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata( "anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2 ), # claude-haiku-4-5-20251001 - LlmModel.CLAUDE_3_7_SONNET: ModelMetadata( - "anthropic", 200000, 64000, "Claude 3.7 Sonnet", "Anthropic", "Anthropic", 2 - ), # claude-3-7-sonnet-20250219 LlmModel.CLAUDE_3_HAIKU: ModelMetadata( "anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1 ), # claude-3-haiku-20240307 diff --git a/autogpt_platform/backend/backend/blocks/stagehand/blocks.py b/autogpt_platform/backend/backend/blocks/stagehand/blocks.py index be1d736962..4d5d6bf4f3 100644 --- a/autogpt_platform/backend/backend/blocks/stagehand/blocks.py +++ b/autogpt_platform/backend/backend/blocks/stagehand/blocks.py @@ -83,7 +83,7 @@ class StagehandRecommendedLlmModel(str, Enum): GPT41_MINI = "gpt-4.1-mini-2025-04-14" # Anthropic - CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219" + CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929" @property def provider_name(self) -> str: @@ -137,7 +137,7 @@ class StagehandObserveBlock(Block): model: StagehandRecommendedLlmModel = SchemaField( title="LLM Model", description="LLM to use for Stagehand (provider is inferred)", - default=StagehandRecommendedLlmModel.CLAUDE_3_7_SONNET, + default=StagehandRecommendedLlmModel.CLAUDE_4_5_SONNET, advanced=False, ) model_credentials: AICredentials = AICredentialsField() @@ -230,7 +230,7 @@ class StagehandActBlock(Block): model: StagehandRecommendedLlmModel = SchemaField( title="LLM Model", description="LLM to use for Stagehand (provider is inferred)", - default=StagehandRecommendedLlmModel.CLAUDE_3_7_SONNET, + default=StagehandRecommendedLlmModel.CLAUDE_4_5_SONNET, advanced=False, ) model_credentials: AICredentials = AICredentialsField() @@ -330,7 +330,7 @@ class StagehandExtractBlock(Block): model: StagehandRecommendedLlmModel = SchemaField( title="LLM Model", description="LLM to use for Stagehand (provider is inferred)", - default=StagehandRecommendedLlmModel.CLAUDE_3_7_SONNET, + default=StagehandRecommendedLlmModel.CLAUDE_4_5_SONNET, advanced=False, ) model_credentials: AICredentials = AICredentialsField() diff --git a/autogpt_platform/backend/backend/data/block_cost_config.py b/autogpt_platform/backend/backend/data/block_cost_config.py index 1b54ae0942..f46cc726f0 100644 --- a/autogpt_platform/backend/backend/data/block_cost_config.py +++ b/autogpt_platform/backend/backend/data/block_cost_config.py @@ -81,7 +81,6 @@ MODEL_COST: dict[LlmModel, int] = { LlmModel.CLAUDE_4_5_HAIKU: 4, LlmModel.CLAUDE_4_5_OPUS: 14, LlmModel.CLAUDE_4_5_SONNET: 9, - LlmModel.CLAUDE_3_7_SONNET: 5, LlmModel.CLAUDE_3_HAIKU: 1, LlmModel.AIML_API_QWEN2_5_72B: 1, LlmModel.AIML_API_LLAMA3_1_70B: 1, diff --git a/autogpt_platform/backend/backend/data/model.py b/autogpt_platform/backend/backend/data/model.py index 2cc73f6b7b..331126fbd6 100644 --- a/autogpt_platform/backend/backend/data/model.py +++ b/autogpt_platform/backend/backend/data/model.py @@ -666,10 +666,16 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]): if not (self.discriminator and self.discriminator_mapping): return self + try: + provider = self.discriminator_mapping[discriminator_value] + except KeyError: + raise ValueError( + f"Model '{discriminator_value}' is not supported. " + "It may have been deprecated. Please update your agent configuration." + ) + return CredentialsFieldInfo( - credentials_provider=frozenset( - [self.discriminator_mapping[discriminator_value]] - ), + credentials_provider=frozenset([provider]), credentials_types=self.supported_types, credentials_scopes=self.required_scopes, discriminator=self.discriminator, diff --git a/autogpt_platform/backend/migrations/20260126120000_migrate_claude_3_7_to_4_5_sonnet/migration.sql b/autogpt_platform/backend/migrations/20260126120000_migrate_claude_3_7_to_4_5_sonnet/migration.sql new file mode 100644 index 0000000000..5746c80820 --- /dev/null +++ b/autogpt_platform/backend/migrations/20260126120000_migrate_claude_3_7_to_4_5_sonnet/migration.sql @@ -0,0 +1,22 @@ +-- Migrate Claude 3.7 Sonnet to Claude 4.5 Sonnet +-- This updates all AgentNode blocks that use the deprecated Claude 3.7 Sonnet model +-- Anthropic is retiring claude-3-7-sonnet-20250219 on February 19, 2026 + +-- Update AgentNode constant inputs +UPDATE "AgentNode" +SET "constantInput" = JSONB_SET( + "constantInput"::jsonb, + '{model}', + '"claude-sonnet-4-5-20250929"'::jsonb + ) +WHERE "constantInput"::jsonb->>'model' = 'claude-3-7-sonnet-20250219'; + +-- Update AgentPreset input overrides (stored in AgentNodeExecutionInputOutput) +UPDATE "AgentNodeExecutionInputOutput" +SET "data" = JSONB_SET( + "data"::jsonb, + '{model}', + '"claude-sonnet-4-5-20250929"'::jsonb + ) +WHERE "agentPresetId" IS NOT NULL + AND "data"::jsonb->>'model' = 'claude-3-7-sonnet-20250219'; diff --git a/docs/integrations/block-integrations/llm.md b/docs/integrations/block-integrations/llm.md index f4d69b912b..6a0a9e0987 100644 --- a/docs/integrations/block-integrations/llm.md +++ b/docs/integrations/block-integrations/llm.md @@ -65,7 +65,7 @@ The result routes data to yes_output or no_output, enabling intelligent branchin | condition | A plaintext English description of the condition to evaluate | str | Yes | | yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No | | no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No | -| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | +| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | ### Outputs @@ -103,7 +103,7 @@ The block sends the entire conversation history to the chosen LLM, including sys |-------|-------------|------|----------| | prompt | The prompt to send to the language model. | str | No | | messages | List of messages in the conversation. | List[Any] | Yes | -| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | +| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | | max_tokens | The maximum number of tokens to generate in the chat completion. | int | No | | ollama_host | Ollama host for local models | str | No | @@ -257,7 +257,7 @@ The block formulates a prompt based on the given focus or source data, sends it |-------|-------------|------|----------| | focus | The focus of the list to generate. | str | No | | source_data | The data to generate the list from. | str | No | -| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | +| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | | max_retries | Maximum number of retries for generating a valid list. | int | No | | force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No | | max_tokens | The maximum number of tokens to generate in the chat completion. | int | No | @@ -424,7 +424,7 @@ The block sends the input prompt to a chosen LLM, along with any system prompts | prompt | The prompt to send to the language model. | str | Yes | | expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes | | list_result | Whether the response should be a list of objects in the expected format. | bool | No | -| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | +| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | | force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No | | sys_prompt | The system prompt to provide additional context to the model. | str | No | | conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No | @@ -464,7 +464,7 @@ The block sends the input prompt to a chosen LLM, processes the response, and re | Input | Description | Type | Required | |-------|-------------|------|----------| | prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes | -| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | +| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | | sys_prompt | The system prompt to provide additional context to the model. | str | No | | retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No | | prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No | @@ -501,7 +501,7 @@ The block splits the input text into smaller chunks, sends each chunk to an LLM | Input | Description | Type | Required | |-------|-------------|------|----------| | text | The text to summarize. | str | Yes | -| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | +| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | | focus | The topic to focus on in the summary | str | No | | style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No | | max_tokens | The maximum number of tokens to generate in the chat completion. | int | No | @@ -763,7 +763,7 @@ Configure agent_mode_max_iterations to control loop behavior: 0 for single decis | Input | Description | Type | Required | |-------|-------------|------|----------| | prompt | The prompt to send to the language model. | str | Yes | -| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | +| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No | | multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No | | sys_prompt | The system prompt to provide additional context to the model. | str | No | | conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No | diff --git a/docs/integrations/block-integrations/stagehand/blocks.md b/docs/integrations/block-integrations/stagehand/blocks.md index dac0586fa2..cc201d092b 100644 --- a/docs/integrations/block-integrations/stagehand/blocks.md +++ b/docs/integrations/block-integrations/stagehand/blocks.md @@ -20,7 +20,7 @@ Configure timeouts for DOM settlement and page loading. Variables can be passed | Input | Description | Type | Required | |-------|-------------|------|----------| | browserbase_project_id | Browserbase project ID (required if using Browserbase) | str | Yes | -| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-3-7-sonnet-20250219" | No | +| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-sonnet-4-5-20250929" | No | | url | URL to navigate to. | str | Yes | | action | Action to perform. Suggested actions are: click, fill, type, press, scroll, select from dropdown. For multi-step actions, add an entry for each step. | List[str] | Yes | | variables | Variables to use in the action. Variables contains data you want the action to use. | Dict[str, str] | No | @@ -65,7 +65,7 @@ Supports searching within iframes and configurable timeouts for dynamic content | Input | Description | Type | Required | |-------|-------------|------|----------| | browserbase_project_id | Browserbase project ID (required if using Browserbase) | str | Yes | -| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-3-7-sonnet-20250219" | No | +| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-sonnet-4-5-20250929" | No | | url | URL to navigate to. | str | Yes | | instruction | Natural language description of elements or actions to discover. | str | Yes | | iframes | Whether to search within iframes. If True, Stagehand will search for actions within iframes. | bool | No | @@ -106,7 +106,7 @@ Use this to explore a page's interactive elements before building automated work | Input | Description | Type | Required | |-------|-------------|------|----------| | browserbase_project_id | Browserbase project ID (required if using Browserbase) | str | Yes | -| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-3-7-sonnet-20250219" | No | +| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-sonnet-4-5-20250929" | No | | url | URL to navigate to. | str | Yes | | instruction | Natural language description of elements or actions to discover. | str | Yes | | iframes | Whether to search within iframes. If True, Stagehand will search for actions within iframes. | bool | No | From 350ad3591b1e7a9f5ebf5a2d3053339c0f8b57e2 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Fri, 30 Jan 2026 12:01:51 +0100 Subject: [PATCH 20/25] fix(backend/chat): Filter credentials for graph execution by scopes (#11881) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [SECRT-1842: run_agent tool does not correctly use credentials - agents fail with insufficient auth scopes](https://linear.app/autogpt/issue/SECRT-1842) ### Changes 🏗️ - Include scopes in credentials filter in `backend.api.features.chat.tools.utils.match_user_credentials_to_graph` ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - CI must pass - It's broken now and a simple change so we'll test in the dev deployment --- .../backend/api/features/chat/tools/utils.py | 40 ++++++++++++++++--- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py index a2ac91dc65..0046d0b249 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py @@ -8,7 +8,7 @@ from backend.api.features.library import model as library_model from backend.api.features.store import db as store_db from backend.data import graph as graph_db from backend.data.graph import GraphModel -from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput +from backend.data.model import Credentials, CredentialsFieldInfo, CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util.exceptions import NotFoundError @@ -266,13 +266,14 @@ async def match_user_credentials_to_graph( credential_requirements, _node_fields, ) in aggregated_creds.items(): - # Find first matching credential by provider and type + # Find first matching credential by provider, type, and scopes matching_cred = next( ( cred for cred in available_creds if cred.provider in credential_requirements.provider and cred.type in credential_requirements.supported_types + and _credential_has_required_scopes(cred, credential_requirements) ), None, ) @@ -296,10 +297,17 @@ async def match_user_credentials_to_graph( f"{credential_field_name} (validation failed: {e})" ) else: + # Build a helpful error message including scope requirements + error_parts = [ + f"provider in {list(credential_requirements.provider)}", + f"type in {list(credential_requirements.supported_types)}", + ] + if credential_requirements.required_scopes: + error_parts.append( + f"scopes including {list(credential_requirements.required_scopes)}" + ) missing_creds.append( - f"{credential_field_name} " - f"(requires provider in {list(credential_requirements.provider)}, " - f"type in {list(credential_requirements.supported_types)})" + f"{credential_field_name} (requires {', '.join(error_parts)})" ) logger.info( @@ -309,6 +317,28 @@ async def match_user_credentials_to_graph( return graph_credentials_inputs, missing_creds +def _credential_has_required_scopes( + credential: Credentials, + requirements: CredentialsFieldInfo, +) -> bool: + """ + Check if a credential has all the scopes required by the block. + + For OAuth2 credentials, verifies that the credential's scopes are a superset + of the required scopes. For other credential types, returns True (no scope check). + """ + # Only OAuth2 credentials have scopes to check + if credential.type != "oauth2": + return True + + # If no scopes are required, any credential matches + if not requirements.required_scopes: + return True + + # Check that credential scopes are a superset of required scopes + return set(credential.scopes).issuperset(requirements.required_scopes) + + async def check_user_has_required_credentials( user_id: str, required_credentials: list[CredentialsMetaInput], From b72521daa9bf9e84e33f03d312728a5175eb3228 Mon Sep 17 00:00:00 2001 From: Otto Date: Fri, 30 Jan 2026 22:59:45 +0000 Subject: [PATCH 21/25] fix(readme): update broken self-hosting docs link (#11911) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary The self-hosting guide link in README.md was broken. **Old link:** `https://docs.agpt.co/platform/getting-started/` - Redirects to `https://agpt.co/docs/platform/getting-started` - Returns HTTP 400 ❌ **New link:** `https://agpt.co/docs/platform/getting-started/getting-started` - Works correctly ✅ ## Changes - Updated the self-hosting guide URL in README.md Fixes #OPEN-2973 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3572fe318b..349d8818ef 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ Before proceeding with the installation, ensure your system meets the following ### Updated Setup Instructions: We've moved to a fully maintained and regularly updated documentation site. -👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/) +👉 [Follow the official self-hosting guide here](https://agpt.co/docs/platform/getting-started/getting-started) This tutorial assumes you have Docker, VSCode, git and npm installed. From 18a1661fa3ae88ee84465d30d30b76d26c93f08d Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 30 Jan 2026 18:18:21 -0600 Subject: [PATCH 22/25] feat: add library agent fetching with two-phase search for sub-agent support (#11889) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Context When users ask the chat to create agents, they may want to compose workflows that reuse their existing agents as sub-agents. For this to work, the Agent Generator service needs to know what agents the user has available. **Challenge:** Users can have large libraries with many agents. Fetching all of them would be slow and provide too much context to the LLM. ## Solution This PR implements **search-based library agent fetching** with a **two-phase search** strategy: 1. **Phase 1 (Initial Search):** When the user describes their goal, we search for relevant library agents using the goal as the search query 2. **Phase 2 (Step-Based Enrichment):** After the goal is decomposed into steps, we extract keywords from those steps and search for additional relevant agents This ensures we find agents that are relevant to both the high-level goal AND the specific steps identified. ### Example Flow ``` User goal: "Create an agent that fetches weather and sends a summary email" Phase 1: Search for "weather email summary" → finds "Weather Fetcher" agent Phase 2: After decomposition identifies steps like "send email notification" → searches "send email notification" → finds "Gmail Sender" agent ``` ### Changes **Library Agent Fetching:** - `get_library_agents_for_generation()` - Search-based fetching from user's library - `search_marketplace_agents_for_generation()` - Search public marketplace - `get_all_relevant_agents_for_generation()` - Combines both with deduplication **Two-Phase Search:** - `extract_search_terms_from_steps()` - Extracts keywords from decomposed steps - `enrich_library_agents_from_steps()` - Searches for additional agents based on steps - Integrated into `create_agent.py` as "Step 1.5" after goal decomposition **Type Safety:** - Added `TypedDict` definitions: `LibraryAgentSummary`, `MarketplaceAgentSummary`, `DecompositionStep`, `DecompositionResult` ### Design Decisions - **Search-based, not fetch-all:** Scalable for large libraries - **Library agents prioritized:** They have full schemas; marketplace agents have basic info only - **Deduplication by name and graph_id:** Prevents duplicates across searches - **Graceful degradation:** Failures don't block agent generation - **Limited to 3 search terms:** Avoids excessive API calls during enrichment ## Related PR - Agent Generator: https://github.com/Significant-Gravitas/AutoGPT-Agent-Generator/pull/103 ## Test plan - [x] `test_library_agents.py` - 19 tests covering all new functions - [x] `test_service.py` - 4 tests for library_agents passthrough - [ ] Integration test: Create agent with library sub-agent composition --- .../backend/api/features/chat/service.py | 5 + .../chat/tools/agent_generator/__init__.py | 42 +- .../chat/tools/agent_generator/core.py | 615 ++++++++++++- .../chat/tools/agent_generator/errors.py | 66 +- .../chat/tools/agent_generator/service.py | 40 +- .../api/features/chat/tools/agent_search.py | 132 ++- .../api/features/chat/tools/create_agent.py | 53 +- .../api/features/chat/tools/edit_agent.py | 32 +- .../backend/api/features/library/db.py | 8 +- .../backend/api/features/library/model.py | 118 ++- .../backend/snapshots/lib_agts_search | 8 + .../agent_generator/test_core_integration.py | 12 +- .../agent_generator/test_library_agents.py | 841 ++++++++++++++++++ .../test/agent_generator/test_service.py | 134 +++ .../legacy-builder/CustomNode/CustomNode.tsx | 2 +- .../frontend/src/app/api/openapi.json | 36 + .../components/ChatMessage/ChatMessage.tsx | 8 + .../ClarificationQuestionsWidget.tsx | 65 +- .../components/ToolResponseMessage/helpers.ts | 6 +- 19 files changed, 2069 insertions(+), 154 deletions(-) create mode 100644 autogpt_platform/backend/test/agent_generator/test_library_agents.py diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 20216162b5..bcd6856503 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -1834,6 +1834,11 @@ async def _execute_long_running_tool( tool_call_id=tool_call_id, result=error_response.model_dump_json(), ) + # Generate LLM continuation so user sees explanation even for errors + try: + await _generate_llm_continuation(session_id=session_id, user_id=user_id) + except Exception as llm_err: + logger.warning(f"Failed to generate LLM continuation for error: {llm_err}") finally: await _mark_operation_completed(tool_call_id) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py index 499025b7dc..b7650b3cbd 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/__init__.py @@ -2,30 +2,54 @@ from .core import ( AgentGeneratorNotConfiguredError, + AgentJsonValidationError, + AgentSummary, + DecompositionResult, + DecompositionStep, + LibraryAgentSummary, + MarketplaceAgentSummary, decompose_goal, + enrich_library_agents_from_steps, + extract_search_terms_from_steps, + extract_uuids_from_text, generate_agent, generate_agent_patch, get_agent_as_json, + get_all_relevant_agents_for_generation, + get_library_agent_by_graph_id, + get_library_agent_by_id, + get_library_agents_for_generation, json_to_graph, save_agent_to_library, + search_marketplace_agents_for_generation, ) from .errors import get_user_message_for_error from .service import health_check as check_external_service_health from .service import is_external_service_configured __all__ = [ - # Core functions + "AgentGeneratorNotConfiguredError", + "AgentJsonValidationError", + "AgentSummary", + "DecompositionResult", + "DecompositionStep", + "LibraryAgentSummary", + "MarketplaceAgentSummary", + "check_external_service_health", "decompose_goal", + "enrich_library_agents_from_steps", + "extract_search_terms_from_steps", + "extract_uuids_from_text", "generate_agent", "generate_agent_patch", - "save_agent_to_library", "get_agent_as_json", - "json_to_graph", - # Exceptions - "AgentGeneratorNotConfiguredError", - # Service - "is_external_service_configured", - "check_external_service_health", - # Error handling + "get_all_relevant_agents_for_generation", + "get_library_agent_by_graph_id", + "get_library_agent_by_id", + "get_library_agents_for_generation", "get_user_message_for_error", + "is_external_service_configured", + "json_to_graph", + "save_agent_to_library", + "search_marketplace_agents_for_generation", ] diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py index d56e33cbb0..466f6438a3 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py @@ -1,11 +1,21 @@ """Core agent generation functions.""" import logging +import re import uuid -from typing import Any +from typing import Any, NotRequired, TypedDict from backend.api.features.library import db as library_db -from backend.data.graph import Graph, Link, Node, create_graph +from backend.api.features.store import db as store_db +from backend.data.graph import ( + Graph, + Link, + Node, + create_graph, + get_graph, + get_graph_all_versions, +) +from backend.util.exceptions import DatabaseError, NotFoundError from .service import ( decompose_goal_external, @@ -16,6 +26,74 @@ from .service import ( logger = logging.getLogger(__name__) +AGENT_EXECUTOR_BLOCK_ID = "e189baac-8c20-45a1-94a7-55177ea42565" + + +class ExecutionSummary(TypedDict): + """Summary of a single execution for quality assessment.""" + + status: str + correctness_score: NotRequired[float] + activity_summary: NotRequired[str] + + +class LibraryAgentSummary(TypedDict): + """Summary of a library agent for sub-agent composition. + + Includes recent executions to help the LLM decide whether to use this agent. + Each execution shows status, correctness_score (0-1), and activity_summary. + """ + + graph_id: str + graph_version: int + name: str + description: str + input_schema: dict[str, Any] + output_schema: dict[str, Any] + recent_executions: NotRequired[list[ExecutionSummary]] + + +class MarketplaceAgentSummary(TypedDict): + """Summary of a marketplace agent for sub-agent composition.""" + + name: str + description: str + sub_heading: str + creator: str + is_marketplace_agent: bool + + +class DecompositionStep(TypedDict, total=False): + """A single step in decomposed instructions.""" + + description: str + action: str + block_name: str + tool: str + name: str + + +class DecompositionResult(TypedDict, total=False): + """Result from decompose_goal - can be instructions, questions, or error.""" + + type: str + steps: list[DecompositionStep] + questions: list[dict[str, Any]] + error: str + error_type: str + + +AgentSummary = LibraryAgentSummary | MarketplaceAgentSummary | dict[str, Any] + + +def _to_dict_list( + agents: list[AgentSummary] | list[dict[str, Any]] | None, +) -> list[dict[str, Any]] | None: + """Convert typed agent summaries to plain dicts for external service calls.""" + if agents is None: + return None + return [dict(a) for a in agents] + class AgentGeneratorNotConfiguredError(Exception): """Raised when the external Agent Generator service is not configured.""" @@ -36,15 +114,414 @@ def _check_service_configured() -> None: ) -async def decompose_goal(description: str, context: str = "") -> dict[str, Any] | None: +_UUID_PATTERN = re.compile( + r"[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}", + re.IGNORECASE, +) + + +def extract_uuids_from_text(text: str) -> list[str]: + """Extract all UUID v4 strings from text. + + Args: + text: Text that may contain UUIDs (e.g., user's goal description) + + Returns: + List of unique UUIDs found in the text (lowercase) + """ + matches = _UUID_PATTERN.findall(text) + return list({m.lower() for m in matches}) + + +async def get_library_agent_by_id( + user_id: str, agent_id: str +) -> LibraryAgentSummary | None: + """Fetch a specific library agent by its ID (library agent ID or graph_id). + + This function tries multiple lookup strategies: + 1. First tries to find by graph_id (AgentGraph primary key) + 2. If not found, tries to find by library agent ID (LibraryAgent primary key) + + This handles both cases: + - User provides graph_id (e.g., from AgentExecutorBlock) + - User provides library agent ID (e.g., from library URL) + + Args: + user_id: The user ID + agent_id: The ID to look up (can be graph_id or library agent ID) + + Returns: + LibraryAgentSummary if found, None otherwise + """ + try: + agent = await library_db.get_library_agent_by_graph_id(user_id, agent_id) + if agent: + logger.debug(f"Found library agent by graph_id: {agent.name}") + return LibraryAgentSummary( + graph_id=agent.graph_id, + graph_version=agent.graph_version, + name=agent.name, + description=agent.description, + input_schema=agent.input_schema, + output_schema=agent.output_schema, + ) + except DatabaseError: + raise + except Exception as e: + logger.debug(f"Could not fetch library agent by graph_id {agent_id}: {e}") + + try: + agent = await library_db.get_library_agent(agent_id, user_id) + if agent: + logger.debug(f"Found library agent by library_id: {agent.name}") + return LibraryAgentSummary( + graph_id=agent.graph_id, + graph_version=agent.graph_version, + name=agent.name, + description=agent.description, + input_schema=agent.input_schema, + output_schema=agent.output_schema, + ) + except NotFoundError: + logger.debug(f"Library agent not found by library_id: {agent_id}") + except DatabaseError: + raise + except Exception as e: + logger.warning( + f"Could not fetch library agent by library_id {agent_id}: {e}", + exc_info=True, + ) + + return None + + +get_library_agent_by_graph_id = get_library_agent_by_id + + +async def get_library_agents_for_generation( + user_id: str, + search_query: str | None = None, + exclude_graph_id: str | None = None, + max_results: int = 15, +) -> list[LibraryAgentSummary]: + """Fetch user's library agents formatted for Agent Generator. + + Uses search-based fetching to return relevant agents instead of all agents. + This is more scalable for users with large libraries. + + Includes recent_executions list to help the LLM assess agent quality: + - Each execution has status, correctness_score (0-1), and activity_summary + - This gives the LLM concrete examples of recent performance + + Args: + user_id: The user ID + search_query: Optional search term to find relevant agents (user's goal/description) + exclude_graph_id: Optional graph ID to exclude (prevents circular references) + max_results: Maximum number of agents to return (default 15) + + Returns: + List of LibraryAgentSummary with schemas and recent executions for sub-agent composition + """ + try: + response = await library_db.list_library_agents( + user_id=user_id, + search_term=search_query, + page=1, + page_size=max_results, + include_executions=True, + ) + + results: list[LibraryAgentSummary] = [] + for agent in response.agents: + if exclude_graph_id is not None and agent.graph_id == exclude_graph_id: + continue + + summary = LibraryAgentSummary( + graph_id=agent.graph_id, + graph_version=agent.graph_version, + name=agent.name, + description=agent.description, + input_schema=agent.input_schema, + output_schema=agent.output_schema, + ) + if agent.recent_executions: + exec_summaries: list[ExecutionSummary] = [] + for ex in agent.recent_executions: + exec_sum = ExecutionSummary(status=ex.status) + if ex.correctness_score is not None: + exec_sum["correctness_score"] = ex.correctness_score + if ex.activity_summary: + exec_sum["activity_summary"] = ex.activity_summary + exec_summaries.append(exec_sum) + summary["recent_executions"] = exec_summaries + results.append(summary) + return results + except DatabaseError: + raise + except Exception as e: + logger.warning(f"Failed to fetch library agents: {e}") + return [] + + +async def search_marketplace_agents_for_generation( + search_query: str, + max_results: int = 10, +) -> list[MarketplaceAgentSummary]: + """Search marketplace agents formatted for Agent Generator. + + Note: This returns basic agent info. Full input/output schemas would require + additional graph fetches and is a potential future enhancement. + + Args: + search_query: Search term to find relevant public agents + max_results: Maximum number of agents to return (default 10) + + Returns: + List of MarketplaceAgentSummary (without detailed schemas for now) + """ + try: + response = await store_db.get_store_agents( + search_query=search_query, + page=1, + page_size=max_results, + ) + + results: list[MarketplaceAgentSummary] = [] + for agent in response.agents: + results.append( + MarketplaceAgentSummary( + name=agent.agent_name, + description=agent.description, + sub_heading=agent.sub_heading, + creator=agent.creator, + is_marketplace_agent=True, + ) + ) + return results + except Exception as e: + logger.warning(f"Failed to search marketplace agents: {e}") + return [] + + +async def get_all_relevant_agents_for_generation( + user_id: str, + search_query: str | None = None, + exclude_graph_id: str | None = None, + include_library: bool = True, + include_marketplace: bool = True, + max_library_results: int = 15, + max_marketplace_results: int = 10, +) -> list[AgentSummary]: + """Fetch relevant agents from library and/or marketplace. + + Searches both user's library and marketplace by default. + Explicitly mentioned UUIDs in the search query are always looked up. + + Args: + user_id: The user ID + search_query: Search term to find relevant agents (user's goal/description) + exclude_graph_id: Optional graph ID to exclude (prevents circular references) + include_library: Whether to search user's library (default True) + include_marketplace: Whether to also search marketplace (default True) + max_library_results: Max library agents to return (default 15) + max_marketplace_results: Max marketplace agents to return (default 10) + + Returns: + List of AgentSummary, library agents first (with full schemas), + then marketplace agents (basic info only) + """ + agents: list[AgentSummary] = [] + seen_graph_ids: set[str] = set() + + if search_query: + mentioned_uuids = extract_uuids_from_text(search_query) + for graph_id in mentioned_uuids: + if graph_id == exclude_graph_id: + continue + agent = await get_library_agent_by_graph_id(user_id, graph_id) + agent_graph_id = agent.get("graph_id") if agent else None + if agent and agent_graph_id and agent_graph_id not in seen_graph_ids: + agents.append(agent) + seen_graph_ids.add(agent_graph_id) + logger.debug( + f"Found explicitly mentioned agent: {agent.get('name') or 'Unknown'}" + ) + + if include_library: + library_agents = await get_library_agents_for_generation( + user_id=user_id, + search_query=search_query, + exclude_graph_id=exclude_graph_id, + max_results=max_library_results, + ) + for agent in library_agents: + graph_id = agent.get("graph_id") + if graph_id and graph_id not in seen_graph_ids: + agents.append(agent) + seen_graph_ids.add(graph_id) + + if include_marketplace and search_query: + marketplace_agents = await search_marketplace_agents_for_generation( + search_query=search_query, + max_results=max_marketplace_results, + ) + library_names: set[str] = set() + for a in agents: + name = a.get("name") + if name and isinstance(name, str): + library_names.add(name.lower()) + for agent in marketplace_agents: + agent_name = agent.get("name") + if agent_name and isinstance(agent_name, str): + if agent_name.lower() not in library_names: + agents.append(agent) + + return agents + + +def extract_search_terms_from_steps( + decomposition_result: DecompositionResult | dict[str, Any], +) -> list[str]: + """Extract search terms from decomposed instruction steps. + + Analyzes the decomposition result to extract relevant keywords + for additional library agent searches. + + Args: + decomposition_result: Result from decompose_goal containing steps + + Returns: + List of unique search terms extracted from steps + """ + search_terms: list[str] = [] + + if decomposition_result.get("type") != "instructions": + return search_terms + + steps = decomposition_result.get("steps", []) + if not steps: + return search_terms + + step_keys: list[str] = ["description", "action", "block_name", "tool", "name"] + + for step in steps: + for key in step_keys: + value = step.get(key) # type: ignore[union-attr] + if isinstance(value, str) and len(value) > 3: + search_terms.append(value) + + seen: set[str] = set() + unique_terms: list[str] = [] + for term in search_terms: + term_lower = term.lower() + if term_lower not in seen: + seen.add(term_lower) + unique_terms.append(term) + + return unique_terms + + +async def enrich_library_agents_from_steps( + user_id: str, + decomposition_result: DecompositionResult | dict[str, Any], + existing_agents: list[AgentSummary] | list[dict[str, Any]], + exclude_graph_id: str | None = None, + include_marketplace: bool = True, + max_additional_results: int = 10, +) -> list[AgentSummary] | list[dict[str, Any]]: + """Enrich library agents list with additional searches based on decomposed steps. + + This implements two-phase search: after decomposition, we search for additional + relevant agents based on the specific steps identified. + + Args: + user_id: The user ID + decomposition_result: Result from decompose_goal containing steps + existing_agents: Already fetched library agents from initial search + exclude_graph_id: Optional graph ID to exclude + include_marketplace: Whether to also search marketplace + max_additional_results: Max additional agents per search term (default 10) + + Returns: + Combined list of library agents (existing + newly discovered) + """ + search_terms = extract_search_terms_from_steps(decomposition_result) + + if not search_terms: + return existing_agents + + existing_ids: set[str] = set() + existing_names: set[str] = set() + + for agent in existing_agents: + agent_name = agent.get("name") + if agent_name and isinstance(agent_name, str): + existing_names.add(agent_name.lower()) + graph_id = agent.get("graph_id") # type: ignore[call-overload] + if graph_id and isinstance(graph_id, str): + existing_ids.add(graph_id) + + all_agents: list[AgentSummary] | list[dict[str, Any]] = list(existing_agents) + + for term in search_terms[:3]: + try: + additional_agents = await get_all_relevant_agents_for_generation( + user_id=user_id, + search_query=term, + exclude_graph_id=exclude_graph_id, + include_marketplace=include_marketplace, + max_library_results=max_additional_results, + max_marketplace_results=5, + ) + + for agent in additional_agents: + agent_name = agent.get("name") + if not agent_name or not isinstance(agent_name, str): + continue + agent_name_lower = agent_name.lower() + + if agent_name_lower in existing_names: + continue + + graph_id = agent.get("graph_id") # type: ignore[call-overload] + if graph_id and graph_id in existing_ids: + continue + + all_agents.append(agent) + existing_names.add(agent_name_lower) + if graph_id and isinstance(graph_id, str): + existing_ids.add(graph_id) + + except DatabaseError: + logger.error(f"Database error searching for agents with term '{term}'") + raise + except Exception as e: + logger.warning( + f"Failed to search for additional agents with term '{term}': {e}" + ) + + logger.debug( + f"Enriched library agents: {len(existing_agents)} initial + " + f"{len(all_agents) - len(existing_agents)} additional = {len(all_agents)} total" + ) + + return all_agents + + +async def decompose_goal( + description: str, + context: str = "", + library_agents: list[AgentSummary] | None = None, +) -> DecompositionResult | None: """Break down a goal into steps or return clarifying questions. Args: description: Natural language goal description context: Additional context (e.g., answers to previous questions) + library_agents: User's library agents available for sub-agent composition Returns: - Dict with either: + DecompositionResult with either: - {"type": "clarifying_questions", "questions": [...]} - {"type": "instructions", "steps": [...]} Or None on error @@ -54,14 +531,21 @@ async def decompose_goal(description: str, context: str = "") -> dict[str, Any] """ _check_service_configured() logger.info("Calling external Agent Generator service for decompose_goal") - return await decompose_goal_external(description, context) + result = await decompose_goal_external( + description, context, _to_dict_list(library_agents) + ) + return result # type: ignore[return-value] -async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: +async def generate_agent( + instructions: DecompositionResult | dict[str, Any], + library_agents: list[AgentSummary] | list[dict[str, Any]] | None = None, +) -> dict[str, Any] | None: """Generate agent JSON from instructions. Args: instructions: Structured instructions from decompose_goal + library_agents: User's library agents available for sub-agent composition Returns: Agent JSON dict, error dict {"type": "error", ...}, or None on error @@ -71,12 +555,12 @@ async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: """ _check_service_configured() logger.info("Calling external Agent Generator service for generate_agent") - result = await generate_agent_external(instructions) + result = await generate_agent_external( + dict(instructions), _to_dict_list(library_agents) + ) if result: - # Check if it's an error response - pass through as-is if isinstance(result, dict) and result.get("type") == "error": return result - # Ensure required fields for successful agent generation if "id" not in result: result["id"] = str(uuid.uuid4()) if "version" not in result: @@ -86,6 +570,12 @@ async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None: return result +class AgentJsonValidationError(Exception): + """Raised when agent JSON is invalid or missing required fields.""" + + pass + + def json_to_graph(agent_json: dict[str, Any]) -> Graph: """Convert agent JSON dict to Graph model. @@ -94,25 +584,55 @@ def json_to_graph(agent_json: dict[str, Any]) -> Graph: Returns: Graph ready for saving + + Raises: + AgentJsonValidationError: If required fields are missing from nodes or links """ nodes = [] - for n in agent_json.get("nodes", []): + for idx, n in enumerate(agent_json.get("nodes", [])): + block_id = n.get("block_id") + if not block_id: + node_id = n.get("id", f"index_{idx}") + raise AgentJsonValidationError( + f"Node '{node_id}' is missing required field 'block_id'" + ) node = Node( id=n.get("id", str(uuid.uuid4())), - block_id=n["block_id"], + block_id=block_id, input_default=n.get("input_default", {}), metadata=n.get("metadata", {}), ) nodes.append(node) links = [] - for link_data in agent_json.get("links", []): + for idx, link_data in enumerate(agent_json.get("links", [])): + source_id = link_data.get("source_id") + sink_id = link_data.get("sink_id") + source_name = link_data.get("source_name") + sink_name = link_data.get("sink_name") + + missing_fields = [] + if not source_id: + missing_fields.append("source_id") + if not sink_id: + missing_fields.append("sink_id") + if not source_name: + missing_fields.append("source_name") + if not sink_name: + missing_fields.append("sink_name") + + if missing_fields: + link_id = link_data.get("id", f"index_{idx}") + raise AgentJsonValidationError( + f"Link '{link_id}' is missing required fields: {', '.join(missing_fields)}" + ) + link = Link( id=link_data.get("id", str(uuid.uuid4())), - source_id=link_data["source_id"], - sink_id=link_data["sink_id"], - source_name=link_data["source_name"], - sink_name=link_data["sink_name"], + source_id=source_id, + sink_id=sink_id, + source_name=source_name, + sink_name=sink_name, is_static=link_data.get("is_static", False), ) links.append(link) @@ -133,22 +653,40 @@ def _reassign_node_ids(graph: Graph) -> None: This is needed when creating a new version to avoid unique constraint violations. """ - # Create mapping from old node IDs to new UUIDs id_map = {node.id: str(uuid.uuid4()) for node in graph.nodes} - # Reassign node IDs for node in graph.nodes: node.id = id_map[node.id] - # Update link references to use new node IDs for link in graph.links: - link.id = str(uuid.uuid4()) # Also give links new IDs + link.id = str(uuid.uuid4()) if link.source_id in id_map: link.source_id = id_map[link.source_id] if link.sink_id in id_map: link.sink_id = id_map[link.sink_id] +def _populate_agent_executor_user_ids(agent_json: dict[str, Any], user_id: str) -> None: + """Populate user_id in AgentExecutorBlock nodes. + + The external agent generator creates AgentExecutorBlock nodes with empty user_id. + This function fills in the actual user_id so sub-agents run with correct permissions. + + Args: + agent_json: Agent JSON dict (modified in place) + user_id: User ID to set + """ + for node in agent_json.get("nodes", []): + if node.get("block_id") == AGENT_EXECUTOR_BLOCK_ID: + input_default = node.get("input_default") or {} + if not input_default.get("user_id"): + input_default["user_id"] = user_id + node["input_default"] = input_default + logger.debug( + f"Set user_id for AgentExecutorBlock node {node.get('id')}" + ) + + async def save_agent_to_library( agent_json: dict[str, Any], user_id: str, is_update: bool = False ) -> tuple[Graph, Any]: @@ -162,33 +700,27 @@ async def save_agent_to_library( Returns: Tuple of (created Graph, LibraryAgent) """ - from backend.data.graph import get_graph_all_versions + # Populate user_id in AgentExecutorBlock nodes before conversion + _populate_agent_executor_user_ids(agent_json, user_id) graph = json_to_graph(agent_json) if is_update: - # For updates, keep the same graph ID but increment version - # and reassign node/link IDs to avoid conflicts if graph.id: existing_versions = await get_graph_all_versions(graph.id, user_id) if existing_versions: latest_version = max(v.version for v in existing_versions) graph.version = latest_version + 1 - # Reassign node IDs (but keep graph ID the same) _reassign_node_ids(graph) logger.info(f"Updating agent {graph.id} to version {graph.version}") else: - # For new agents, always generate a fresh UUID to avoid collisions graph.id = str(uuid.uuid4()) graph.version = 1 - # Reassign all node IDs as well _reassign_node_ids(graph) logger.info(f"Creating new agent with ID {graph.id}") - # Save to database created_graph = await create_graph(graph, user_id) - # Add to user's library (or update existing library agent) library_agents = await library_db.create_library_agent( graph=created_graph, user_id=user_id, @@ -200,25 +732,31 @@ async def save_agent_to_library( async def get_agent_as_json( - graph_id: str, user_id: str | None + agent_id: str, user_id: str | None ) -> dict[str, Any] | None: """Fetch an agent and convert to JSON format for editing. Args: - graph_id: Graph ID or library agent ID + agent_id: Graph ID or library agent ID user_id: User ID Returns: Agent as JSON dict or None if not found """ - from backend.data.graph import get_graph + graph = await get_graph(agent_id, version=None, user_id=user_id) + + if not graph and user_id: + try: + library_agent = await library_db.get_library_agent(agent_id, user_id) + graph = await get_graph( + library_agent.graph_id, version=None, user_id=user_id + ) + except NotFoundError: + pass - # Try to get the graph (version=None gets the active version) - graph = await get_graph(graph_id, version=None, user_id=user_id) if not graph: return None - # Convert to JSON format nodes = [] for node in graph.nodes: nodes.append( @@ -256,7 +794,9 @@ async def get_agent_as_json( async def generate_agent_patch( - update_request: str, current_agent: dict[str, Any] + update_request: str, + current_agent: dict[str, Any], + library_agents: list[AgentSummary] | None = None, ) -> dict[str, Any] | None: """Update an existing agent using natural language. @@ -268,6 +808,7 @@ async def generate_agent_patch( Args: update_request: Natural language description of changes current_agent: Current agent JSON + library_agents: User's library agents available for sub-agent composition Returns: Updated agent JSON, clarifying questions dict {"type": "clarifying_questions", ...}, @@ -278,4 +819,6 @@ async def generate_agent_patch( """ _check_service_configured() logger.info("Calling external Agent Generator service for generate_agent_patch") - return await generate_agent_patch_external(update_request, current_agent) + return await generate_agent_patch_external( + update_request, current_agent, _to_dict_list(library_agents) + ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py index bf71a95df9..282d8cf9aa 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/errors.py @@ -1,11 +1,43 @@ """Error handling utilities for agent generator.""" +import re + + +def _sanitize_error_details(details: str) -> str: + """Sanitize error details to remove sensitive information. + + Strips common patterns that could expose internal system info: + - File paths (Unix and Windows) + - Database connection strings + - URLs with credentials + - Stack trace internals + + Args: + details: Raw error details string + + Returns: + Sanitized error details safe for user display + """ + sanitized = re.sub( + r"/[a-zA-Z0-9_./\-]+\.(py|js|ts|json|yaml|yml)", "[path]", details + ) + sanitized = re.sub(r"[A-Z]:\\[a-zA-Z0-9_\\.\\-]+", "[path]", sanitized) + sanitized = re.sub( + r"(postgres|mysql|mongodb|redis)://[^\s]+", "[database_url]", sanitized + ) + sanitized = re.sub(r"https?://[^:]+:[^@]+@[^\s]+", "[url]", sanitized) + sanitized = re.sub(r", line \d+", "", sanitized) + sanitized = re.sub(r'File "[^"]+",?', "", sanitized) + + return sanitized.strip() + def get_user_message_for_error( error_type: str, operation: str = "process the request", llm_parse_message: str | None = None, validation_message: str | None = None, + error_details: str | None = None, ) -> str: """Get a user-friendly error message based on error type. @@ -19,25 +51,45 @@ def get_user_message_for_error( message (e.g., "analyze the goal", "generate the agent") llm_parse_message: Custom message for llm_parse_error type validation_message: Custom message for validation_error type + error_details: Optional additional details about the error Returns: User-friendly error message suitable for display to the user """ + base_message = "" + if error_type == "llm_parse_error": - return ( + base_message = ( llm_parse_message or "The AI had trouble processing this request. Please try again." ) elif error_type == "validation_error": - return ( + base_message = ( validation_message - or "The request failed validation. Please try rephrasing." + or "The generated agent failed validation. " + "This usually happens when the agent structure doesn't match " + "what the platform expects. Please try simplifying your goal " + "or breaking it into smaller parts." ) elif error_type == "patch_error": - return "Failed to apply the changes. Please try a different approach." + base_message = ( + "Failed to apply the changes. The modification couldn't be " + "validated. Please try a different approach or simplify the change." + ) elif error_type in ("timeout", "llm_timeout"): - return "The request took too long. Please try again." + base_message = ( + "The request took too long to process. This can happen with " + "complex agents. Please try again or simplify your goal." + ) elif error_type in ("rate_limit", "llm_rate_limit"): - return "The service is currently busy. Please try again in a moment." + base_message = "The service is currently busy. Please try again in a moment." else: - return f"Failed to {operation}. Please try again." + base_message = f"Failed to {operation}. Please try again." + + if error_details: + details = _sanitize_error_details(error_details) + if len(details) > 200: + details = details[:200] + "..." + base_message += f"\n\nTechnical details: {details}" + + return base_message diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py index 1df1faaaef..c6242b0ba9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py @@ -117,13 +117,16 @@ def _get_client() -> httpx.AsyncClient: async def decompose_goal_external( - description: str, context: str = "" + description: str, + context: str = "", + library_agents: list[dict[str, Any]] | None = None, ) -> dict[str, Any] | None: """Call the external service to decompose a goal. Args: description: Natural language goal description context: Additional context (e.g., answers to previous questions) + library_agents: User's library agents available for sub-agent composition Returns: Dict with either: @@ -141,6 +144,8 @@ async def decompose_goal_external( if context: # The external service uses user_instruction for additional context payload["user_instruction"] = context + if library_agents: + payload["library_agents"] = library_agents try: response = await client.post("/api/decompose-description", json=payload) @@ -207,21 +212,25 @@ async def decompose_goal_external( async def generate_agent_external( instructions: dict[str, Any], + library_agents: list[dict[str, Any]] | None = None, ) -> dict[str, Any] | None: """Call the external service to generate an agent from instructions. Args: instructions: Structured instructions from decompose_goal + library_agents: User's library agents available for sub-agent composition Returns: Agent JSON dict on success, or error dict {"type": "error", ...} on error """ client = _get_client() + payload: dict[str, Any] = {"instructions": instructions} + if library_agents: + payload["library_agents"] = library_agents + try: - response = await client.post( - "/api/generate-agent", json={"instructions": instructions} - ) + response = await client.post("/api/generate-agent", json=payload) response.raise_for_status() data = response.json() @@ -229,8 +238,7 @@ async def generate_agent_external( error_msg = data.get("error", "Unknown error from Agent Generator") error_type = data.get("error_type", "unknown") logger.error( - f"Agent Generator generation failed: {error_msg} " - f"(type: {error_type})" + f"Agent Generator generation failed: {error_msg} (type: {error_type})" ) return _create_error_response(error_msg, error_type) @@ -251,27 +259,31 @@ async def generate_agent_external( async def generate_agent_patch_external( - update_request: str, current_agent: dict[str, Any] + update_request: str, + current_agent: dict[str, Any], + library_agents: list[dict[str, Any]] | None = None, ) -> dict[str, Any] | None: """Call the external service to generate a patch for an existing agent. Args: update_request: Natural language description of changes current_agent: Current agent JSON + library_agents: User's library agents available for sub-agent composition Returns: Updated agent JSON, clarifying questions dict, or error dict on error """ client = _get_client() + payload: dict[str, Any] = { + "update_request": update_request, + "current_agent_json": current_agent, + } + if library_agents: + payload["library_agents"] = library_agents + try: - response = await client.post( - "/api/update-agent", - json={ - "update_request": update_request, - "current_agent_json": current_agent, - }, - ) + response = await client.post("/api/update-agent", json=payload) response.raise_for_status() data = response.json() diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py index 5fa74ba04e..62d59c470e 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py @@ -1,6 +1,7 @@ """Shared agent search functionality for find_agent and find_library_agent tools.""" import logging +import re from typing import Literal from backend.api.features.library import db as library_db @@ -19,6 +20,85 @@ logger = logging.getLogger(__name__) SearchSource = Literal["marketplace", "library"] +_UUID_PATTERN = re.compile( + r"^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}$", + re.IGNORECASE, +) + + +def _is_uuid(text: str) -> bool: + """Check if text is a valid UUID v4.""" + return bool(_UUID_PATTERN.match(text.strip())) + + +async def _get_library_agent_by_id(user_id: str, agent_id: str) -> AgentInfo | None: + """Fetch a library agent by ID (library agent ID or graph_id). + + Tries multiple lookup strategies: + 1. First by graph_id (AgentGraph primary key) + 2. Then by library agent ID (LibraryAgent primary key) + + Args: + user_id: The user ID + agent_id: The ID to look up (can be graph_id or library agent ID) + + Returns: + AgentInfo if found, None otherwise + """ + try: + agent = await library_db.get_library_agent_by_graph_id(user_id, agent_id) + if agent: + logger.debug(f"Found library agent by graph_id: {agent.name}") + return AgentInfo( + id=agent.id, + name=agent.name, + description=agent.description or "", + source="library", + in_library=True, + creator=agent.creator_name, + status=agent.status.value, + can_access_graph=agent.can_access_graph, + has_external_trigger=agent.has_external_trigger, + new_output=agent.new_output, + graph_id=agent.graph_id, + ) + except DatabaseError: + raise + except Exception as e: + logger.warning( + f"Could not fetch library agent by graph_id {agent_id}: {e}", + exc_info=True, + ) + + try: + agent = await library_db.get_library_agent(agent_id, user_id) + if agent: + logger.debug(f"Found library agent by library_id: {agent.name}") + return AgentInfo( + id=agent.id, + name=agent.name, + description=agent.description or "", + source="library", + in_library=True, + creator=agent.creator_name, + status=agent.status.value, + can_access_graph=agent.can_access_graph, + has_external_trigger=agent.has_external_trigger, + new_output=agent.new_output, + graph_id=agent.graph_id, + ) + except NotFoundError: + logger.debug(f"Library agent not found by library_id: {agent_id}") + except DatabaseError: + raise + except Exception as e: + logger.warning( + f"Could not fetch library agent by library_id {agent_id}: {e}", + exc_info=True, + ) + + return None + async def search_agents( query: str, @@ -69,29 +149,37 @@ async def search_agents( is_featured=False, ) ) - else: # library - logger.info(f"Searching user library for: {query}") - results = await library_db.list_library_agents( - user_id=user_id, # type: ignore[arg-type] - search_term=query, - page_size=10, - ) - for agent in results.agents: - agents.append( - AgentInfo( - id=agent.id, - name=agent.name, - description=agent.description or "", - source="library", - in_library=True, - creator=agent.creator_name, - status=agent.status.value, - can_access_graph=agent.can_access_graph, - has_external_trigger=agent.has_external_trigger, - new_output=agent.new_output, - graph_id=agent.graph_id, - ) + else: + if _is_uuid(query): + logger.info(f"Query looks like UUID, trying direct lookup: {query}") + agent = await _get_library_agent_by_id(user_id, query) # type: ignore[arg-type] + if agent: + agents.append(agent) + logger.info(f"Found agent by direct ID lookup: {agent.name}") + + if not agents: + logger.info(f"Searching user library for: {query}") + results = await library_db.list_library_agents( + user_id=user_id, # type: ignore[arg-type] + search_term=query, + page_size=10, ) + for agent in results.agents: + agents.append( + AgentInfo( + id=agent.id, + name=agent.name, + description=agent.description or "", + source="library", + in_library=True, + creator=agent.creator_name, + status=agent.status.value, + can_access_graph=agent.can_access_graph, + has_external_trigger=agent.has_external_trigger, + new_output=agent.new_output, + graph_id=agent.graph_id, + ) + ) logger.info(f"Found {len(agents)} agents in {source}") except NotFoundError: pass diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py index 74011c7e95..adb2c78fce 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/create_agent.py @@ -8,7 +8,9 @@ from backend.api.features.chat.model import ChatSession from .agent_generator import ( AgentGeneratorNotConfiguredError, decompose_goal, + enrich_library_agents_from_steps, generate_agent, + get_all_relevant_agents_for_generation, get_user_message_for_error, save_agent_to_library, ) @@ -103,9 +105,24 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Step 1: Decompose goal into steps + library_agents = None + if user_id: + try: + library_agents = await get_all_relevant_agents_for_generation( + user_id=user_id, + search_query=description, + include_marketplace=True, + ) + logger.debug( + f"Found {len(library_agents)} relevant agents for sub-agent composition" + ) + except Exception as e: + logger.warning(f"Failed to fetch library agents: {e}") + try: - decomposition_result = await decompose_goal(description, context) + decomposition_result = await decompose_goal( + description, context, library_agents + ) except AgentGeneratorNotConfiguredError: return ErrorResponse( message=( @@ -124,7 +141,6 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Check if the result is an error from the external service if decomposition_result.get("type") == "error": error_msg = decomposition_result.get("error", "Unknown error") error_type = decomposition_result.get("error_type", "unknown") @@ -144,7 +160,6 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Check if LLM returned clarifying questions if decomposition_result.get("type") == "clarifying_questions": questions = decomposition_result.get("questions", []) return ClarificationNeededResponse( @@ -163,7 +178,6 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Check for unachievable/vague goals if decomposition_result.get("type") == "unachievable_goal": suggested = decomposition_result.get("suggested_goal", "") reason = decomposition_result.get("reason", "") @@ -190,9 +204,22 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Step 2: Generate agent JSON (external service handles fixing and validation) + if user_id and library_agents is not None: + try: + library_agents = await enrich_library_agents_from_steps( + user_id=user_id, + decomposition_result=decomposition_result, + existing_agents=library_agents, + include_marketplace=True, + ) + logger.debug( + f"After enrichment: {len(library_agents)} total agents for sub-agent composition" + ) + except Exception as e: + logger.warning(f"Failed to enrich library agents from steps: {e}") + try: - agent_json = await generate_agent(decomposition_result) + agent_json = await generate_agent(decomposition_result, library_agents) except AgentGeneratorNotConfiguredError: return ErrorResponse( message=( @@ -211,7 +238,6 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Check if the result is an error from the external service if isinstance(agent_json, dict) and agent_json.get("type") == "error": error_msg = agent_json.get("error", "Unknown error") error_type = agent_json.get("error_type", "unknown") @@ -219,7 +245,12 @@ class CreateAgentTool(BaseTool): error_type, operation="generate the agent", llm_parse_message="The AI had trouble generating the agent. Please try again or simplify your goal.", - validation_message="The generated agent failed validation. Please try rephrasing your goal.", + validation_message=( + "I wasn't able to create a valid agent for this request. " + "The generated workflow had some structural issues. " + "Please try simplifying your goal or breaking it into smaller steps." + ), + error_details=error_msg, ) return ErrorResponse( message=user_message, @@ -237,7 +268,6 @@ class CreateAgentTool(BaseTool): node_count = len(agent_json.get("nodes", [])) link_count = len(agent_json.get("links", [])) - # Step 3: Preview or save if not save: return AgentPreviewResponse( message=( @@ -252,7 +282,6 @@ class CreateAgentTool(BaseTool): session_id=session_id, ) - # Save to library if not user_id: return ErrorResponse( message="You must be logged in to save agents.", @@ -270,7 +299,7 @@ class CreateAgentTool(BaseTool): agent_id=created_graph.id, agent_name=created_graph.name, library_agent_id=library_agent.id, - library_agent_link=f"/library/{library_agent.id}", + library_agent_link=f"/library/agents/{library_agent.id}", agent_page_link=f"/build?flowID={created_graph.id}", session_id=session_id, ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py index ee8eee53ce..2c2c48226b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/edit_agent.py @@ -9,6 +9,7 @@ from .agent_generator import ( AgentGeneratorNotConfiguredError, generate_agent_patch, get_agent_as_json, + get_all_relevant_agents_for_generation, get_user_message_for_error, save_agent_to_library, ) @@ -117,7 +118,6 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Step 1: Fetch current agent current_agent = await get_agent_as_json(agent_id, user_id) if current_agent is None: @@ -127,14 +127,30 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Build the update request with context + library_agents = None + if user_id: + try: + graph_id = current_agent.get("id") + library_agents = await get_all_relevant_agents_for_generation( + user_id=user_id, + search_query=changes, + exclude_graph_id=graph_id, + include_marketplace=True, + ) + logger.debug( + f"Found {len(library_agents)} relevant agents for sub-agent composition" + ) + except Exception as e: + logger.warning(f"Failed to fetch library agents: {e}") + update_request = changes if context: update_request = f"{changes}\n\nAdditional context:\n{context}" - # Step 2: Generate updated agent (external service handles fixing and validation) try: - result = await generate_agent_patch(update_request, current_agent) + result = await generate_agent_patch( + update_request, current_agent, library_agents + ) except AgentGeneratorNotConfiguredError: return ErrorResponse( message=( @@ -153,7 +169,6 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Check if the result is an error from the external service if isinstance(result, dict) and result.get("type") == "error": error_msg = result.get("error", "Unknown error") error_type = result.get("error_type", "unknown") @@ -162,6 +177,7 @@ class EditAgentTool(BaseTool): operation="generate the changes", llm_parse_message="The AI had trouble generating the changes. Please try again or simplify your request.", validation_message="The generated changes failed validation. Please try rephrasing your request.", + error_details=error_msg, ) return ErrorResponse( message=user_message, @@ -175,7 +191,6 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Check if LLM returned clarifying questions if result.get("type") == "clarifying_questions": questions = result.get("questions", []) return ClarificationNeededResponse( @@ -194,7 +209,6 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Result is the updated agent JSON updated_agent = result agent_name = updated_agent.get("name", "Updated Agent") @@ -202,7 +216,6 @@ class EditAgentTool(BaseTool): node_count = len(updated_agent.get("nodes", [])) link_count = len(updated_agent.get("links", [])) - # Step 3: Preview or save if not save: return AgentPreviewResponse( message=( @@ -218,7 +231,6 @@ class EditAgentTool(BaseTool): session_id=session_id, ) - # Save to library (creates a new version) if not user_id: return ErrorResponse( message="You must be logged in to save agents.", @@ -236,7 +248,7 @@ class EditAgentTool(BaseTool): agent_id=created_graph.id, agent_name=created_graph.name, library_agent_id=library_agent.id, - library_agent_link=f"/library/{library_agent.id}", + library_agent_link=f"/library/agents/{library_agent.id}", agent_page_link=f"/build?flowID={created_graph.id}", session_id=session_id, ) diff --git a/autogpt_platform/backend/backend/api/features/library/db.py b/autogpt_platform/backend/backend/api/features/library/db.py index 872fe66b28..394f959953 100644 --- a/autogpt_platform/backend/backend/api/features/library/db.py +++ b/autogpt_platform/backend/backend/api/features/library/db.py @@ -39,6 +39,7 @@ async def list_library_agents( sort_by: library_model.LibraryAgentSort = library_model.LibraryAgentSort.UPDATED_AT, page: int = 1, page_size: int = 50, + include_executions: bool = False, ) -> library_model.LibraryAgentResponse: """ Retrieves a paginated list of LibraryAgent records for a given user. @@ -49,6 +50,9 @@ async def list_library_agents( sort_by: Sorting field (createdAt, updatedAt, isFavorite, isCreatedByUser). page: Current page (1-indexed). page_size: Number of items per page. + include_executions: Whether to include execution data for status calculation. + Defaults to False for performance (UI fetches status separately). + Set to True when accurate status/metrics are needed (e.g., agent generator). Returns: A LibraryAgentResponse containing the list of agents and pagination details. @@ -76,7 +80,6 @@ async def list_library_agents( "isArchived": False, } - # Build search filter if applicable if search_term: where_clause["OR"] = [ { @@ -93,7 +96,6 @@ async def list_library_agents( }, ] - # Determine sorting order_by: prisma.types.LibraryAgentOrderByInput | None = None if sort_by == library_model.LibraryAgentSort.CREATED_AT: @@ -105,7 +107,7 @@ async def list_library_agents( library_agents = await prisma.models.LibraryAgent.prisma().find_many( where=where_clause, include=library_agent_include( - user_id, include_nodes=False, include_executions=False + user_id, include_nodes=False, include_executions=include_executions ), order=order_by, skip=(page - 1) * page_size, diff --git a/autogpt_platform/backend/backend/api/features/library/model.py b/autogpt_platform/backend/backend/api/features/library/model.py index 14d7c7be81..c6bc0e0427 100644 --- a/autogpt_platform/backend/backend/api/features/library/model.py +++ b/autogpt_platform/backend/backend/api/features/library/model.py @@ -9,6 +9,7 @@ import pydantic from backend.data.block import BlockInput from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo from backend.data.model import CredentialsMetaInput, is_credentials_field_name +from backend.util.json import loads as json_loads from backend.util.models import Pagination if TYPE_CHECKING: @@ -16,10 +17,10 @@ if TYPE_CHECKING: class LibraryAgentStatus(str, Enum): - COMPLETED = "COMPLETED" # All runs completed - HEALTHY = "HEALTHY" # Agent is running (not all runs have completed) - WAITING = "WAITING" # Agent is queued or waiting to start - ERROR = "ERROR" # Agent is in an error state + COMPLETED = "COMPLETED" + HEALTHY = "HEALTHY" + WAITING = "WAITING" + ERROR = "ERROR" class MarketplaceListingCreator(pydantic.BaseModel): @@ -39,6 +40,30 @@ class MarketplaceListing(pydantic.BaseModel): creator: MarketplaceListingCreator +class RecentExecution(pydantic.BaseModel): + """Summary of a recent execution for quality assessment. + + Used by the LLM to understand the agent's recent performance with specific examples + rather than just aggregate statistics. + """ + + status: str + correctness_score: float | None = None + activity_summary: str | None = None + + +def _parse_settings(settings: dict | str | None) -> GraphSettings: + """Parse settings from database, handling both dict and string formats.""" + if settings is None: + return GraphSettings() + try: + if isinstance(settings, str): + settings = json_loads(settings) + return GraphSettings.model_validate(settings) + except Exception: + return GraphSettings() + + class LibraryAgent(pydantic.BaseModel): """ Represents an agent in the library, including metadata for display and @@ -48,7 +73,7 @@ class LibraryAgent(pydantic.BaseModel): id: str graph_id: str graph_version: int - owner_user_id: str # ID of user who owns/created this agent graph + owner_user_id: str image_url: str | None @@ -64,7 +89,7 @@ class LibraryAgent(pydantic.BaseModel): description: str instructions: str | None = None - input_schema: dict[str, Any] # Should be BlockIOObjectSubSchema in frontend + input_schema: dict[str, Any] output_schema: dict[str, Any] credentials_input_schema: dict[str, Any] | None = pydantic.Field( description="Input schema for credentials required by the agent", @@ -81,25 +106,19 @@ class LibraryAgent(pydantic.BaseModel): ) trigger_setup_info: Optional[GraphTriggerInfo] = None - # Indicates whether there's a new output (based on recent runs) new_output: bool - - # Whether the user can access the underlying graph + execution_count: int = 0 + success_rate: float | None = None + avg_correctness_score: float | None = None + recent_executions: list[RecentExecution] = pydantic.Field( + default_factory=list, + description="List of recent executions with status, score, and summary", + ) can_access_graph: bool - - # Indicates if this agent is the latest version is_latest_version: bool - - # Whether the agent is marked as favorite by the user is_favorite: bool - - # Recommended schedule cron (from marketplace agents) recommended_schedule_cron: str | None = None - - # User-specific settings for this library agent settings: GraphSettings = pydantic.Field(default_factory=GraphSettings) - - # Marketplace listing information if the agent has been published marketplace_listing: Optional["MarketplaceListing"] = None @staticmethod @@ -123,7 +142,6 @@ class LibraryAgent(pydantic.BaseModel): agent_updated_at = agent.AgentGraph.updatedAt lib_agent_updated_at = agent.updatedAt - # Compute updated_at as the latest between library agent and graph updated_at = ( max(agent_updated_at, lib_agent_updated_at) if agent_updated_at @@ -136,7 +154,6 @@ class LibraryAgent(pydantic.BaseModel): creator_name = agent.Creator.name or "Unknown" creator_image_url = agent.Creator.avatarUrl or "" - # Logic to calculate status and new_output week_ago = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta( days=7 ) @@ -145,13 +162,55 @@ class LibraryAgent(pydantic.BaseModel): status = status_result.status new_output = status_result.new_output - # Check if user can access the graph - can_access_graph = agent.AgentGraph.userId == agent.userId + execution_count = len(executions) + success_rate: float | None = None + avg_correctness_score: float | None = None + if execution_count > 0: + success_count = sum( + 1 + for e in executions + if e.executionStatus == prisma.enums.AgentExecutionStatus.COMPLETED + ) + success_rate = (success_count / execution_count) * 100 - # Hard-coded to True until a method to check is implemented + correctness_scores = [] + for e in executions: + if e.stats and isinstance(e.stats, dict): + score = e.stats.get("correctness_score") + if score is not None and isinstance(score, (int, float)): + correctness_scores.append(float(score)) + if correctness_scores: + avg_correctness_score = sum(correctness_scores) / len( + correctness_scores + ) + + recent_executions: list[RecentExecution] = [] + for e in executions: + exec_score: float | None = None + exec_summary: str | None = None + if e.stats and isinstance(e.stats, dict): + score = e.stats.get("correctness_score") + if score is not None and isinstance(score, (int, float)): + exec_score = float(score) + summary = e.stats.get("activity_status") + if summary is not None and isinstance(summary, str): + exec_summary = summary + exec_status = ( + e.executionStatus.value + if hasattr(e.executionStatus, "value") + else str(e.executionStatus) + ) + recent_executions.append( + RecentExecution( + status=exec_status, + correctness_score=exec_score, + activity_summary=exec_summary, + ) + ) + + can_access_graph = agent.AgentGraph.userId == agent.userId is_latest_version = True - # Build marketplace_listing if available marketplace_listing_data = None if store_listing and store_listing.ActiveVersion and profile: creator_data = MarketplaceListingCreator( @@ -190,11 +249,15 @@ class LibraryAgent(pydantic.BaseModel): has_sensitive_action=graph.has_sensitive_action, trigger_setup_info=graph.trigger_setup_info, new_output=new_output, + execution_count=execution_count, + success_rate=success_rate, + avg_correctness_score=avg_correctness_score, + recent_executions=recent_executions, can_access_graph=can_access_graph, is_latest_version=is_latest_version, is_favorite=agent.isFavorite, recommended_schedule_cron=agent.AgentGraph.recommendedScheduleCron, - settings=GraphSettings.model_validate(agent.settings), + settings=_parse_settings(agent.settings), marketplace_listing=marketplace_listing_data, ) @@ -220,18 +283,15 @@ def _calculate_agent_status( if not executions: return AgentStatusResult(status=LibraryAgentStatus.COMPLETED, new_output=False) - # Track how many times each execution status appears status_counts = {status: 0 for status in prisma.enums.AgentExecutionStatus} new_output = False for execution in executions: - # Check if there's a completed run more recent than `recent_threshold` if execution.createdAt >= recent_threshold: if execution.executionStatus == prisma.enums.AgentExecutionStatus.COMPLETED: new_output = True status_counts[execution.executionStatus] += 1 - # Determine the final status based on counts if status_counts[prisma.enums.AgentExecutionStatus.FAILED] > 0: return AgentStatusResult(status=LibraryAgentStatus.ERROR, new_output=new_output) elif status_counts[prisma.enums.AgentExecutionStatus.QUEUED] > 0: diff --git a/autogpt_platform/backend/snapshots/lib_agts_search b/autogpt_platform/backend/snapshots/lib_agts_search index 67c307b09e..3ce8402b63 100644 --- a/autogpt_platform/backend/snapshots/lib_agts_search +++ b/autogpt_platform/backend/snapshots/lib_agts_search @@ -31,6 +31,10 @@ "has_sensitive_action": false, "trigger_setup_info": null, "new_output": false, + "execution_count": 0, + "success_rate": null, + "avg_correctness_score": null, + "recent_executions": [], "can_access_graph": true, "is_latest_version": true, "is_favorite": false, @@ -72,6 +76,10 @@ "has_sensitive_action": false, "trigger_setup_info": null, "new_output": false, + "execution_count": 0, + "success_rate": null, + "avg_correctness_score": null, + "recent_executions": [], "can_access_graph": false, "is_latest_version": true, "is_favorite": false, diff --git a/autogpt_platform/backend/test/agent_generator/test_core_integration.py b/autogpt_platform/backend/test/agent_generator/test_core_integration.py index bdcc24ba79..05ce4a3aff 100644 --- a/autogpt_platform/backend/test/agent_generator/test_core_integration.py +++ b/autogpt_platform/backend/test/agent_generator/test_core_integration.py @@ -57,7 +57,8 @@ class TestDecomposeGoal: result = await core.decompose_goal("Build a chatbot") - mock_external.assert_called_once_with("Build a chatbot", "") + # library_agents defaults to None + mock_external.assert_called_once_with("Build a chatbot", "", None) assert result == expected_result @pytest.mark.asyncio @@ -74,7 +75,8 @@ class TestDecomposeGoal: await core.decompose_goal("Build a chatbot", "Use Python") - mock_external.assert_called_once_with("Build a chatbot", "Use Python") + # library_agents defaults to None + mock_external.assert_called_once_with("Build a chatbot", "Use Python", None) @pytest.mark.asyncio async def test_returns_none_on_service_failure(self): @@ -109,7 +111,8 @@ class TestGenerateAgent: instructions = {"type": "instructions", "steps": ["Step 1"]} result = await core.generate_agent(instructions) - mock_external.assert_called_once_with(instructions) + # library_agents defaults to None + mock_external.assert_called_once_with(instructions, None) # Result should have id, version, is_active added if not present assert result is not None assert result["name"] == "Test Agent" @@ -174,7 +177,8 @@ class TestGenerateAgentPatch: current_agent = {"nodes": [], "links": []} result = await core.generate_agent_patch("Add a node", current_agent) - mock_external.assert_called_once_with("Add a node", current_agent) + # library_agents defaults to None + mock_external.assert_called_once_with("Add a node", current_agent, None) assert result == expected_result @pytest.mark.asyncio diff --git a/autogpt_platform/backend/test/agent_generator/test_library_agents.py b/autogpt_platform/backend/test/agent_generator/test_library_agents.py new file mode 100644 index 0000000000..e62b0746e7 --- /dev/null +++ b/autogpt_platform/backend/test/agent_generator/test_library_agents.py @@ -0,0 +1,841 @@ +""" +Tests for library agent fetching functionality in agent generator. + +This test suite verifies the search-based library agent fetching, +including the combination of library and marketplace agents. +""" + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from backend.api.features.chat.tools.agent_generator import core + + +class TestGetLibraryAgentsForGeneration: + """Test get_library_agents_for_generation function.""" + + @pytest.mark.asyncio + async def test_fetches_agents_with_search_term(self): + """Test that search_term is passed to the library db.""" + # Create a mock agent with proper attribute values + mock_agent = MagicMock() + mock_agent.graph_id = "agent-123" + mock_agent.graph_version = 1 + mock_agent.name = "Email Agent" + mock_agent.description = "Sends emails" + mock_agent.input_schema = {"properties": {}} + mock_agent.output_schema = {"properties": {}} + mock_agent.recent_executions = [] + + mock_response = MagicMock() + mock_response.agents = [mock_agent] + + with patch.object( + core.library_db, + "list_library_agents", + new_callable=AsyncMock, + return_value=mock_response, + ) as mock_list: + result = await core.get_library_agents_for_generation( + user_id="user-123", + search_query="send email", + ) + + mock_list.assert_called_once_with( + user_id="user-123", + search_term="send email", + page=1, + page_size=15, + include_executions=True, + ) + + # Verify result format + assert len(result) == 1 + assert result[0]["graph_id"] == "agent-123" + assert result[0]["name"] == "Email Agent" + + @pytest.mark.asyncio + async def test_excludes_specified_graph_id(self): + """Test that agents with excluded graph_id are filtered out.""" + mock_response = MagicMock() + mock_response.agents = [ + MagicMock( + graph_id="agent-123", + graph_version=1, + name="Agent 1", + description="First agent", + input_schema={}, + output_schema={}, + recent_executions=[], + ), + MagicMock( + graph_id="agent-456", + graph_version=1, + name="Agent 2", + description="Second agent", + input_schema={}, + output_schema={}, + recent_executions=[], + ), + ] + + with patch.object( + core.library_db, + "list_library_agents", + new_callable=AsyncMock, + return_value=mock_response, + ): + result = await core.get_library_agents_for_generation( + user_id="user-123", + exclude_graph_id="agent-123", + ) + + # Verify the excluded agent is not in results + assert len(result) == 1 + assert result[0]["graph_id"] == "agent-456" + + @pytest.mark.asyncio + async def test_respects_max_results(self): + """Test that max_results parameter limits the page_size.""" + mock_response = MagicMock() + mock_response.agents = [] + + with patch.object( + core.library_db, + "list_library_agents", + new_callable=AsyncMock, + return_value=mock_response, + ) as mock_list: + await core.get_library_agents_for_generation( + user_id="user-123", + max_results=5, + ) + + mock_list.assert_called_once_with( + user_id="user-123", + search_term=None, + page=1, + page_size=5, + include_executions=True, + ) + + +class TestSearchMarketplaceAgentsForGeneration: + """Test search_marketplace_agents_for_generation function.""" + + @pytest.mark.asyncio + async def test_searches_marketplace_with_query(self): + """Test that marketplace is searched with the query.""" + mock_response = MagicMock() + mock_response.agents = [ + MagicMock( + agent_name="Public Agent", + description="A public agent", + sub_heading="Does something useful", + creator="creator-1", + ) + ] + + # The store_db is dynamically imported, so patch the import path + with patch( + "backend.api.features.store.db.get_store_agents", + new_callable=AsyncMock, + return_value=mock_response, + ) as mock_search: + result = await core.search_marketplace_agents_for_generation( + search_query="automation", + max_results=10, + ) + + mock_search.assert_called_once_with( + search_query="automation", + page=1, + page_size=10, + ) + + assert len(result) == 1 + assert result[0]["name"] == "Public Agent" + assert result[0]["is_marketplace_agent"] is True + + @pytest.mark.asyncio + async def test_handles_marketplace_error_gracefully(self): + """Test that marketplace errors don't crash the function.""" + with patch( + "backend.api.features.store.db.get_store_agents", + new_callable=AsyncMock, + side_effect=Exception("Marketplace unavailable"), + ): + result = await core.search_marketplace_agents_for_generation( + search_query="test" + ) + + # Should return empty list, not raise exception + assert result == [] + + +class TestGetAllRelevantAgentsForGeneration: + """Test get_all_relevant_agents_for_generation function.""" + + @pytest.mark.asyncio + async def test_combines_library_and_marketplace_agents(self): + """Test that agents from both sources are combined.""" + library_agents = [ + { + "graph_id": "lib-123", + "graph_version": 1, + "name": "Library Agent", + "description": "From library", + "input_schema": {}, + "output_schema": {}, + } + ] + + marketplace_agents = [ + { + "name": "Market Agent", + "description": "From marketplace", + "sub_heading": "Sub heading", + "creator": "creator-1", + "is_marketplace_agent": True, + } + ] + + with patch.object( + core, + "get_library_agents_for_generation", + new_callable=AsyncMock, + return_value=library_agents, + ): + with patch.object( + core, + "search_marketplace_agents_for_generation", + new_callable=AsyncMock, + return_value=marketplace_agents, + ): + result = await core.get_all_relevant_agents_for_generation( + user_id="user-123", + search_query="test query", + include_marketplace=True, + ) + + # Library agents should come first + assert len(result) == 2 + assert result[0]["name"] == "Library Agent" + assert result[1]["name"] == "Market Agent" + + @pytest.mark.asyncio + async def test_deduplicates_by_name(self): + """Test that marketplace agents with same name as library are excluded.""" + library_agents = [ + { + "graph_id": "lib-123", + "graph_version": 1, + "name": "Shared Agent", + "description": "From library", + "input_schema": {}, + "output_schema": {}, + } + ] + + marketplace_agents = [ + { + "name": "Shared Agent", # Same name, should be deduplicated + "description": "From marketplace", + "sub_heading": "Sub heading", + "creator": "creator-1", + "is_marketplace_agent": True, + }, + { + "name": "Unique Agent", + "description": "Only in marketplace", + "sub_heading": "Sub heading", + "creator": "creator-2", + "is_marketplace_agent": True, + }, + ] + + with patch.object( + core, + "get_library_agents_for_generation", + new_callable=AsyncMock, + return_value=library_agents, + ): + with patch.object( + core, + "search_marketplace_agents_for_generation", + new_callable=AsyncMock, + return_value=marketplace_agents, + ): + result = await core.get_all_relevant_agents_for_generation( + user_id="user-123", + search_query="test", + include_marketplace=True, + ) + + # Shared Agent from marketplace should be excluded + assert len(result) == 2 + names = [a["name"] for a in result] + assert "Shared Agent" in names + assert "Unique Agent" in names + + @pytest.mark.asyncio + async def test_skips_marketplace_when_disabled(self): + """Test that marketplace is not searched when include_marketplace=False.""" + library_agents = [ + { + "graph_id": "lib-123", + "graph_version": 1, + "name": "Library Agent", + "description": "From library", + "input_schema": {}, + "output_schema": {}, + } + ] + + with patch.object( + core, + "get_library_agents_for_generation", + new_callable=AsyncMock, + return_value=library_agents, + ): + with patch.object( + core, + "search_marketplace_agents_for_generation", + new_callable=AsyncMock, + ) as mock_marketplace: + result = await core.get_all_relevant_agents_for_generation( + user_id="user-123", + search_query="test", + include_marketplace=False, + ) + + # Marketplace should not be called + mock_marketplace.assert_not_called() + assert len(result) == 1 + + @pytest.mark.asyncio + async def test_skips_marketplace_when_no_search_query(self): + """Test that marketplace is not searched without a search query.""" + library_agents = [ + { + "graph_id": "lib-123", + "graph_version": 1, + "name": "Library Agent", + "description": "From library", + "input_schema": {}, + "output_schema": {}, + } + ] + + with patch.object( + core, + "get_library_agents_for_generation", + new_callable=AsyncMock, + return_value=library_agents, + ): + with patch.object( + core, + "search_marketplace_agents_for_generation", + new_callable=AsyncMock, + ) as mock_marketplace: + result = await core.get_all_relevant_agents_for_generation( + user_id="user-123", + search_query=None, # No search query + include_marketplace=True, + ) + + # Marketplace should not be called without search query + mock_marketplace.assert_not_called() + assert len(result) == 1 + + +class TestExtractSearchTermsFromSteps: + """Test extract_search_terms_from_steps function.""" + + def test_extracts_terms_from_instructions_type(self): + """Test extraction from valid instructions decomposition result.""" + decomposition_result = { + "type": "instructions", + "steps": [ + { + "description": "Send an email notification", + "block_name": "GmailSendBlock", + }, + {"description": "Fetch weather data", "action": "Get weather API"}, + ], + } + + result = core.extract_search_terms_from_steps(decomposition_result) + + assert "Send an email notification" in result + assert "GmailSendBlock" in result + assert "Fetch weather data" in result + assert "Get weather API" in result + + def test_returns_empty_for_non_instructions_type(self): + """Test that non-instructions types return empty list.""" + decomposition_result = { + "type": "clarifying_questions", + "questions": [{"question": "What email?"}], + } + + result = core.extract_search_terms_from_steps(decomposition_result) + + assert result == [] + + def test_deduplicates_terms_case_insensitively(self): + """Test that duplicate terms are removed (case-insensitive).""" + decomposition_result = { + "type": "instructions", + "steps": [ + {"description": "Send Email", "name": "send email"}, + {"description": "Other task"}, + ], + } + + result = core.extract_search_terms_from_steps(decomposition_result) + + # Should only have one "send email" variant + email_terms = [t for t in result if "email" in t.lower()] + assert len(email_terms) == 1 + + def test_filters_short_terms(self): + """Test that terms with 3 or fewer characters are filtered out.""" + decomposition_result = { + "type": "instructions", + "steps": [ + {"description": "ab", "action": "xyz"}, # Both too short + {"description": "Valid term here"}, + ], + } + + result = core.extract_search_terms_from_steps(decomposition_result) + + assert "ab" not in result + assert "xyz" not in result + assert "Valid term here" in result + + def test_handles_empty_steps(self): + """Test handling of empty steps list.""" + decomposition_result = { + "type": "instructions", + "steps": [], + } + + result = core.extract_search_terms_from_steps(decomposition_result) + + assert result == [] + + +class TestEnrichLibraryAgentsFromSteps: + """Test enrich_library_agents_from_steps function.""" + + @pytest.mark.asyncio + async def test_enriches_with_additional_agents(self): + """Test that additional agents are found based on steps.""" + existing_agents = [ + { + "graph_id": "existing-123", + "graph_version": 1, + "name": "Existing Agent", + "description": "Already fetched", + "input_schema": {}, + "output_schema": {}, + } + ] + + additional_agents = [ + { + "graph_id": "new-456", + "graph_version": 1, + "name": "Email Agent", + "description": "For sending emails", + "input_schema": {}, + "output_schema": {}, + } + ] + + decomposition_result = { + "type": "instructions", + "steps": [ + {"description": "Send email notification"}, + ], + } + + with patch.object( + core, + "get_all_relevant_agents_for_generation", + new_callable=AsyncMock, + return_value=additional_agents, + ): + result = await core.enrich_library_agents_from_steps( + user_id="user-123", + decomposition_result=decomposition_result, + existing_agents=existing_agents, + ) + + # Should have both existing and new agents + assert len(result) == 2 + names = [a["name"] for a in result] + assert "Existing Agent" in names + assert "Email Agent" in names + + @pytest.mark.asyncio + async def test_deduplicates_by_graph_id(self): + """Test that agents with same graph_id are not duplicated.""" + existing_agents = [ + { + "graph_id": "agent-123", + "graph_version": 1, + "name": "Existing Agent", + "description": "Already fetched", + "input_schema": {}, + "output_schema": {}, + } + ] + + # Additional search returns same agent + additional_agents = [ + { + "graph_id": "agent-123", # Same ID + "graph_version": 1, + "name": "Existing Agent Copy", + "description": "Same agent different name", + "input_schema": {}, + "output_schema": {}, + } + ] + + decomposition_result = { + "type": "instructions", + "steps": [{"description": "Some action"}], + } + + with patch.object( + core, + "get_all_relevant_agents_for_generation", + new_callable=AsyncMock, + return_value=additional_agents, + ): + result = await core.enrich_library_agents_from_steps( + user_id="user-123", + decomposition_result=decomposition_result, + existing_agents=existing_agents, + ) + + # Should not duplicate + assert len(result) == 1 + + @pytest.mark.asyncio + async def test_deduplicates_by_name(self): + """Test that agents with same name are not duplicated.""" + existing_agents = [ + { + "graph_id": "agent-123", + "graph_version": 1, + "name": "Email Agent", + "description": "Already fetched", + "input_schema": {}, + "output_schema": {}, + } + ] + + # Additional search returns agent with same name but different ID + additional_agents = [ + { + "graph_id": "agent-456", # Different ID + "graph_version": 1, + "name": "Email Agent", # Same name + "description": "Different agent same name", + "input_schema": {}, + "output_schema": {}, + } + ] + + decomposition_result = { + "type": "instructions", + "steps": [{"description": "Send email"}], + } + + with patch.object( + core, + "get_all_relevant_agents_for_generation", + new_callable=AsyncMock, + return_value=additional_agents, + ): + result = await core.enrich_library_agents_from_steps( + user_id="user-123", + decomposition_result=decomposition_result, + existing_agents=existing_agents, + ) + + # Should not duplicate by name + assert len(result) == 1 + assert result[0].get("graph_id") == "agent-123" # Original kept + + @pytest.mark.asyncio + async def test_returns_existing_when_no_steps(self): + """Test that existing agents are returned when no search terms extracted.""" + existing_agents = [ + { + "graph_id": "existing-123", + "graph_version": 1, + "name": "Existing Agent", + "description": "Already fetched", + "input_schema": {}, + "output_schema": {}, + } + ] + + decomposition_result = { + "type": "clarifying_questions", # Not instructions type + "questions": [], + } + + result = await core.enrich_library_agents_from_steps( + user_id="user-123", + decomposition_result=decomposition_result, + existing_agents=existing_agents, + ) + + # Should return existing unchanged + assert result == existing_agents + + @pytest.mark.asyncio + async def test_limits_search_terms_to_three(self): + """Test that only first 3 search terms are used.""" + existing_agents = [] + + decomposition_result = { + "type": "instructions", + "steps": [ + {"description": "First action"}, + {"description": "Second action"}, + {"description": "Third action"}, + {"description": "Fourth action"}, + {"description": "Fifth action"}, + ], + } + + call_count = 0 + + async def mock_get_agents(*args, **kwargs): + nonlocal call_count + call_count += 1 + return [] + + with patch.object( + core, + "get_all_relevant_agents_for_generation", + side_effect=mock_get_agents, + ): + await core.enrich_library_agents_from_steps( + user_id="user-123", + decomposition_result=decomposition_result, + existing_agents=existing_agents, + ) + + # Should only make 3 calls (limited to first 3 terms) + assert call_count == 3 + + +class TestExtractUuidsFromText: + """Test extract_uuids_from_text function.""" + + def test_extracts_single_uuid(self): + """Test extraction of a single UUID from text.""" + text = "Use my agent 46631191-e8a8-486f-ad90-84f89738321d for this task" + result = core.extract_uuids_from_text(text) + assert len(result) == 1 + assert "46631191-e8a8-486f-ad90-84f89738321d" in result + + def test_extracts_multiple_uuids(self): + """Test extraction of multiple UUIDs from text.""" + text = ( + "Combine agents 11111111-1111-4111-8111-111111111111 " + "and 22222222-2222-4222-9222-222222222222" + ) + result = core.extract_uuids_from_text(text) + assert len(result) == 2 + assert "11111111-1111-4111-8111-111111111111" in result + assert "22222222-2222-4222-9222-222222222222" in result + + def test_deduplicates_uuids(self): + """Test that duplicate UUIDs are deduplicated.""" + text = ( + "Use 46631191-e8a8-486f-ad90-84f89738321d twice: " + "46631191-e8a8-486f-ad90-84f89738321d" + ) + result = core.extract_uuids_from_text(text) + assert len(result) == 1 + + def test_normalizes_to_lowercase(self): + """Test that UUIDs are normalized to lowercase.""" + text = "Use 46631191-E8A8-486F-AD90-84F89738321D" + result = core.extract_uuids_from_text(text) + assert result[0] == "46631191-e8a8-486f-ad90-84f89738321d" + + def test_returns_empty_for_no_uuids(self): + """Test that empty list is returned when no UUIDs found.""" + text = "Create an email agent that sends notifications" + result = core.extract_uuids_from_text(text) + assert result == [] + + def test_ignores_invalid_uuids(self): + """Test that invalid UUID-like strings are ignored.""" + text = "Not a valid UUID: 12345678-1234-1234-1234-123456789abc" + result = core.extract_uuids_from_text(text) + # UUID v4 requires specific patterns (4 in third group, 8/9/a/b in fourth) + assert len(result) == 0 + + +class TestGetLibraryAgentById: + """Test get_library_agent_by_id function (and its alias get_library_agent_by_graph_id).""" + + @pytest.mark.asyncio + async def test_returns_agent_when_found_by_graph_id(self): + """Test that agent is returned when found by graph_id.""" + mock_agent = MagicMock() + mock_agent.graph_id = "agent-123" + mock_agent.graph_version = 1 + mock_agent.name = "Test Agent" + mock_agent.description = "Test description" + mock_agent.input_schema = {"properties": {}} + mock_agent.output_schema = {"properties": {}} + + with patch.object( + core.library_db, + "get_library_agent_by_graph_id", + new_callable=AsyncMock, + return_value=mock_agent, + ): + result = await core.get_library_agent_by_id("user-123", "agent-123") + + assert result is not None + assert result["graph_id"] == "agent-123" + assert result["name"] == "Test Agent" + + @pytest.mark.asyncio + async def test_falls_back_to_library_agent_id(self): + """Test that lookup falls back to library agent ID when graph_id not found.""" + mock_agent = MagicMock() + mock_agent.graph_id = "graph-456" # Different from the lookup ID + mock_agent.graph_version = 1 + mock_agent.name = "Library Agent" + mock_agent.description = "Found by library ID" + mock_agent.input_schema = {"properties": {}} + mock_agent.output_schema = {"properties": {}} + + with ( + patch.object( + core.library_db, + "get_library_agent_by_graph_id", + new_callable=AsyncMock, + return_value=None, # Not found by graph_id + ), + patch.object( + core.library_db, + "get_library_agent", + new_callable=AsyncMock, + return_value=mock_agent, # Found by library ID + ), + ): + result = await core.get_library_agent_by_id("user-123", "library-id-123") + + assert result is not None + assert result["graph_id"] == "graph-456" + assert result["name"] == "Library Agent" + + @pytest.mark.asyncio + async def test_returns_none_when_not_found_by_either_method(self): + """Test that None is returned when agent not found by either method.""" + with ( + patch.object( + core.library_db, + "get_library_agent_by_graph_id", + new_callable=AsyncMock, + return_value=None, + ), + patch.object( + core.library_db, + "get_library_agent", + new_callable=AsyncMock, + side_effect=core.NotFoundError("Not found"), + ), + ): + result = await core.get_library_agent_by_id("user-123", "nonexistent") + + assert result is None + + @pytest.mark.asyncio + async def test_returns_none_on_exception(self): + """Test that None is returned when exception occurs in both lookups.""" + with ( + patch.object( + core.library_db, + "get_library_agent_by_graph_id", + new_callable=AsyncMock, + side_effect=Exception("Database error"), + ), + patch.object( + core.library_db, + "get_library_agent", + new_callable=AsyncMock, + side_effect=Exception("Database error"), + ), + ): + result = await core.get_library_agent_by_id("user-123", "agent-123") + + assert result is None + + @pytest.mark.asyncio + async def test_alias_works(self): + """Test that get_library_agent_by_graph_id is an alias for get_library_agent_by_id.""" + assert core.get_library_agent_by_graph_id is core.get_library_agent_by_id + + +class TestGetAllRelevantAgentsWithUuids: + """Test UUID extraction in get_all_relevant_agents_for_generation.""" + + @pytest.mark.asyncio + async def test_fetches_explicitly_mentioned_agents(self): + """Test that agents mentioned by UUID are fetched directly.""" + mock_agent = MagicMock() + mock_agent.graph_id = "46631191-e8a8-486f-ad90-84f89738321d" + mock_agent.graph_version = 1 + mock_agent.name = "Mentioned Agent" + mock_agent.description = "Explicitly mentioned" + mock_agent.input_schema = {} + mock_agent.output_schema = {} + + mock_response = MagicMock() + mock_response.agents = [] + + with ( + patch.object( + core.library_db, + "get_library_agent_by_graph_id", + new_callable=AsyncMock, + return_value=mock_agent, + ), + patch.object( + core.library_db, + "list_library_agents", + new_callable=AsyncMock, + return_value=mock_response, + ), + ): + result = await core.get_all_relevant_agents_for_generation( + user_id="user-123", + search_query="Use agent 46631191-e8a8-486f-ad90-84f89738321d", + include_marketplace=False, + ) + + assert len(result) == 1 + assert result[0].get("graph_id") == "46631191-e8a8-486f-ad90-84f89738321d" + assert result[0].get("name") == "Mentioned Agent" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/autogpt_platform/backend/test/agent_generator/test_service.py b/autogpt_platform/backend/test/agent_generator/test_service.py index fe7a1a7fdd..d62dca1729 100644 --- a/autogpt_platform/backend/test/agent_generator/test_service.py +++ b/autogpt_platform/backend/test/agent_generator/test_service.py @@ -433,5 +433,139 @@ class TestGetBlocksExternal: assert result is None +class TestLibraryAgentsPassthrough: + """Test that library_agents are passed correctly in all requests.""" + + def setup_method(self): + """Reset client singleton before each test.""" + service._settings = None + service._client = None + + @pytest.mark.asyncio + async def test_decompose_goal_passes_library_agents(self): + """Test that library_agents are included in decompose goal payload.""" + library_agents = [ + { + "graph_id": "agent-123", + "graph_version": 1, + "name": "Email Sender", + "description": "Sends emails", + "input_schema": {"properties": {"to": {"type": "string"}}}, + "output_schema": {"properties": {"sent": {"type": "boolean"}}}, + }, + ] + + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "type": "instructions", + "steps": ["Step 1"], + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + await service.decompose_goal_external( + "Send an email", + library_agents=library_agents, + ) + + # Verify library_agents was passed in the payload + call_args = mock_client.post.call_args + assert call_args[1]["json"]["library_agents"] == library_agents + + @pytest.mark.asyncio + async def test_generate_agent_passes_library_agents(self): + """Test that library_agents are included in generate agent payload.""" + library_agents = [ + { + "graph_id": "agent-456", + "graph_version": 2, + "name": "Data Fetcher", + "description": "Fetches data from API", + "input_schema": {"properties": {"url": {"type": "string"}}}, + "output_schema": {"properties": {"data": {"type": "object"}}}, + }, + ] + + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "agent_json": {"name": "Test Agent", "nodes": []}, + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + await service.generate_agent_external( + {"steps": ["Step 1"]}, + library_agents=library_agents, + ) + + # Verify library_agents was passed in the payload + call_args = mock_client.post.call_args + assert call_args[1]["json"]["library_agents"] == library_agents + + @pytest.mark.asyncio + async def test_generate_agent_patch_passes_library_agents(self): + """Test that library_agents are included in patch generation payload.""" + library_agents = [ + { + "graph_id": "agent-789", + "graph_version": 1, + "name": "Slack Notifier", + "description": "Sends Slack messages", + "input_schema": {"properties": {"message": {"type": "string"}}}, + "output_schema": {"properties": {"success": {"type": "boolean"}}}, + }, + ] + + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "agent_json": {"name": "Updated Agent", "nodes": []}, + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + await service.generate_agent_patch_external( + "Add error handling", + {"name": "Original Agent", "nodes": []}, + library_agents=library_agents, + ) + + # Verify library_agents was passed in the payload + call_args = mock_client.post.call_args + assert call_args[1]["json"]["library_agents"] == library_agents + + @pytest.mark.asyncio + async def test_decompose_goal_without_library_agents(self): + """Test that decompose goal works without library_agents.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "success": True, + "type": "instructions", + "steps": ["Step 1"], + } + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.post.return_value = mock_response + + with patch.object(service, "_get_client", return_value=mock_client): + await service.decompose_goal_external("Build a workflow") + + # Verify library_agents was NOT passed when not provided + call_args = mock_client.post.call_args + assert "library_agents" not in call_args[1]["json"] + + if __name__ == "__main__": pytest.main([__file__, "-v"]) diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx index 94e917a4ac..834603cc4a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx @@ -857,7 +857,7 @@ export const CustomNode = React.memo( })(); const hasAdvancedFields = - data.inputSchema && + data.inputSchema?.properties && Object.entries(data.inputSchema.properties).some(([key, value]) => { return ( value.advanced === true && !data.inputSchema.required?.includes(key) diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 6692c30e72..a44ceb8388 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -7981,6 +7981,25 @@ ] }, "new_output": { "type": "boolean", "title": "New Output" }, + "execution_count": { + "type": "integer", + "title": "Execution Count", + "default": 0 + }, + "success_rate": { + "anyOf": [{ "type": "number" }, { "type": "null" }], + "title": "Success Rate" + }, + "avg_correctness_score": { + "anyOf": [{ "type": "number" }, { "type": "null" }], + "title": "Avg Correctness Score" + }, + "recent_executions": { + "items": { "$ref": "#/components/schemas/RecentExecution" }, + "type": "array", + "title": "Recent Executions", + "description": "List of recent executions with status, score, and summary" + }, "can_access_graph": { "type": "boolean", "title": "Can Access Graph" @@ -9374,6 +9393,23 @@ "required": ["providers", "pagination"], "title": "ProviderResponse" }, + "RecentExecution": { + "properties": { + "status": { "type": "string", "title": "Status" }, + "correctness_score": { + "anyOf": [{ "type": "number" }, { "type": "null" }], + "title": "Correctness Score" + }, + "activity_summary": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Activity Summary" + } + }, + "type": "object", + "required": ["status"], + "title": "RecentExecution", + "description": "Summary of a recent execution for quality assessment.\n\nUsed by the LLM to understand the agent's recent performance with specific examples\nrather than just aggregate statistics." + }, "RefundRequest": { "properties": { "id": { "type": "string", "title": "Id" }, diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx index c922d0da76..2ac433a272 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx @@ -156,11 +156,19 @@ export function ChatMessage({ } if (isClarificationNeeded && message.type === "clarification_needed") { + const hasUserReplyAfter = + index >= 0 && + messages + .slice(index + 1) + .some((m) => m.type === "message" && m.role === "user"); + return ( ); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx index a3bd17dd3f..3b225d1ef1 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ClarificationQuestionsWidget/ClarificationQuestionsWidget.tsx @@ -6,7 +6,7 @@ import { Input } from "@/components/atoms/Input/Input"; import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; import { CheckCircleIcon, QuestionIcon } from "@phosphor-icons/react"; -import { useState } from "react"; +import { useState, useEffect, useRef } from "react"; export interface ClarifyingQuestion { question: string; @@ -17,39 +17,96 @@ export interface ClarifyingQuestion { interface Props { questions: ClarifyingQuestion[]; message: string; + sessionId?: string; onSubmitAnswers: (answers: Record) => void; onCancel?: () => void; + isAnswered?: boolean; className?: string; } +function getStorageKey(sessionId?: string): string | null { + if (!sessionId) return null; + return `clarification_answers_${sessionId}`; +} + export function ClarificationQuestionsWidget({ questions, message, + sessionId, onSubmitAnswers, onCancel, + isAnswered = false, className, }: Props) { const [answers, setAnswers] = useState>({}); const [isSubmitted, setIsSubmitted] = useState(false); + const lastSessionIdRef = useRef(undefined); + + useEffect(() => { + const storageKey = getStorageKey(sessionId); + if (!storageKey) { + setAnswers({}); + setIsSubmitted(false); + lastSessionIdRef.current = sessionId; + return; + } + + try { + const saved = localStorage.getItem(storageKey); + if (saved) { + const parsed = JSON.parse(saved) as Record; + setAnswers(parsed); + } else { + setAnswers({}); + } + setIsSubmitted(false); + } catch { + setAnswers({}); + setIsSubmitted(false); + } + lastSessionIdRef.current = sessionId; + }, [sessionId]); + + useEffect(() => { + if (lastSessionIdRef.current !== sessionId) { + return; + } + const storageKey = getStorageKey(sessionId); + if (!storageKey) return; + + const hasAnswers = Object.values(answers).some((v) => v.trim()); + try { + if (hasAnswers) { + localStorage.setItem(storageKey, JSON.stringify(answers)); + } else { + localStorage.removeItem(storageKey); + } + } catch {} + }, [answers, sessionId]); function handleAnswerChange(keyword: string, value: string) { setAnswers((prev) => ({ ...prev, [keyword]: value })); } function handleSubmit() { - // Check if all questions are answered const allAnswered = questions.every((q) => answers[q.keyword]?.trim()); if (!allAnswered) { return; } setIsSubmitted(true); onSubmitAnswers(answers); + + const storageKey = getStorageKey(sessionId); + try { + if (storageKey) { + localStorage.removeItem(storageKey); + } + } catch {} } const allAnswered = questions.every((q) => answers[q.keyword]?.trim()); - // Show submitted state after answers are submitted - if (isSubmitted) { + if (isAnswered || isSubmitted) { return (
; - if (response.error) return stripInternalReasoning(String(response.error)); if (response.message) return stripInternalReasoning(String(response.message)); + if (response.error) return stripInternalReasoning(String(response.error)); } return "An error occurred"; } @@ -363,8 +363,8 @@ export function formatToolResponse(result: unknown, toolName: string): string { case "error": const errorMsg = - (response.error as string) || response.message || "An error occurred"; - return `Error: ${errorMsg}`; + (response.message as string) || response.error || "An error occurred"; + return stripInternalReasoning(String(errorMsg)); case "no_results": const suggestions = (response.suggestions as string[]) || []; From 7ee94d986cd39ba1c39ab0cd4577039cdeb83b92 Mon Sep 17 00:00:00 2001 From: Otto Date: Sat, 31 Jan 2026 03:05:31 +0000 Subject: [PATCH 23/25] docs: add credentials prerequisites to create-basic-agent guide (#11913) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Addresses #11785 - users were encountering `openai_api_key_credentials` errors when following the create-basic-agent guide because it didn't mention the need to configure API credentials before using AI blocks. ## Changes Added a **Prerequisites** section to `docs/platform/create-basic-agent.md` explaining: - **Cloud users:** Go to Profile → Integrations to add API keys - **Self-hosted (Docker):** Add keys to `autogpt_platform/backend/.env` and restart services Also added a note that the Calculator example doesn't need credentials, making it a good first test. ## Related - Issue: #11785 --- docs/platform/create-basic-agent.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/platform/create-basic-agent.md b/docs/platform/create-basic-agent.md index 7721fb9b9c..ffe654ba99 100644 --- a/docs/platform/create-basic-agent.md +++ b/docs/platform/create-basic-agent.md @@ -4,6 +4,28 @@ This guide walks through creating a simple question-answer AI agent using AutoGPT's visual builder. This is a basic example that can be expanded into more complex agents. +## **Prerequisites** + +### **Cloud-Hosted AutoGPT** +If you're using the cloud-hosted version at [agpt.co](https://agpt.co), you're ready to go! AI blocks come with **built-in credits** — no API keys required to get started. If you'd prefer to use your own API keys, you can add them via **Profile → Integrations**. + +### **Self-Hosted (Docker)** +If you're running AutoGPT locally with Docker, you'll need to add your own API keys to `autogpt_platform/backend/.env`: + +```bash +# Create or edit backend/.env +OPENAI_API_KEY=sk-your-key-here +ANTHROPIC_API_KEY=sk-ant-your-key-here +# Add other provider keys as needed +``` + +After adding keys, restart the services: +```bash +docker compose down && docker compose up -d +``` + +**Note:** The Calculator example below doesn't require any API credentials — it's a good way to test your setup before adding AI blocks. + ## **Example Agent: Q&A (with AI)** A step-by-step guide to creating a simple Q&A agent using input and output blocks. From 7e37de8e30529bd9b8c2ee3e3e74bf689ff13783 Mon Sep 17 00:00:00 2001 From: Otto Date: Sat, 31 Jan 2026 19:17:36 +0000 Subject: [PATCH 24/25] fix: Include graph schemas for marketplace agents in Agent Generator (#11920) ## Problem When marketplace agents are included in the `library_agents` payload sent to the Agent Generator service, they were missing required fields (`graph_id`, `graph_version`, `input_schema`, `output_schema`). This caused Pydantic validation to fail with HTTP 422 Unprocessable Entity. **Root cause:** The `MarketplaceAgentSummary` TypedDict had a different shape than `LibraryAgentInfo` expected by the Agent Generator: - Agent Generator expects: `graph_id`, `graph_version`, `name`, `description`, `input_schema`, `output_schema` - MarketplaceAgentSummary had: `name`, `description`, `sub_heading`, `creator`, `is_marketplace_agent` ## Solution 1. **Add `agent_graph_id` to `StoreAgent` model** - The field was already in the database view but not exposed 2. **Include `agentGraphId` in hybrid search SQL query** - Carry the field through the search CTEs 3. **Update `search_marketplace_agents_for_generation()`** - Now fetches full graph schemas using `get_graph()` and returns `LibraryAgentSummary` (same type as library agents) 4. **Update deduplication logic** - Use `graph_id` instead of name for more accurate deduplication ## Changes - `backend/api/features/store/model.py`: Add optional `agent_graph_id` field to `StoreAgent` - `backend/api/features/store/hybrid_search.py`: Include `agentGraphId` in SQL query columns - `backend/api/features/store/db.py`: Map `agentGraphId` when creating `StoreAgent` objects - `backend/api/features/chat/tools/agent_generator/core.py`: Update `search_marketplace_agents_for_generation()` to fetch and include full graph schemas ## Testing - [ ] Agent creation on dev with marketplace agents in context - [ ] Verify no 422 errors from Agent Generator - [ ] Verify marketplace agents can be used as sub-agents Fixes: SECRT-1817 --------- Co-authored-by: majdyz Co-authored-by: Zamil Majdy --- .../chat/tools/agent_generator/core.py | 59 +++++++++++-------- .../backend/backend/api/features/store/db.py | 2 + .../api/features/store/hybrid_search.py | 2 + .../backend/api/features/store/model.py | 1 + .../backend/api/features/store/model_test.py | 3 + .../backend/api/features/store/routes_test.py | 6 ++ .../api/features/store/test_cache_delete.py | 1 + .../backend/backend/data/graph.py | 33 +++++++++++ .../backend/snapshots/agts_by_creator | 3 +- .../backend/snapshots/agts_category | 3 +- .../backend/snapshots/agts_pagination | 15 +++-- .../backend/snapshots/agts_search | 3 +- .../backend/snapshots/agts_sorted | 3 +- autogpt_platform/backend/snapshots/feat_agts | 3 +- .../agent_generator/test_library_agents.py | 58 +++++++++++------- .../frontend/src/app/api/openapi.json | 6 +- 16 files changed, 143 insertions(+), 58 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py index 466f6438a3..0ddd2aa86b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py @@ -14,6 +14,7 @@ from backend.data.graph import ( create_graph, get_graph, get_graph_all_versions, + get_store_listed_graphs, ) from backend.util.exceptions import DatabaseError, NotFoundError @@ -266,18 +267,18 @@ async def get_library_agents_for_generation( async def search_marketplace_agents_for_generation( search_query: str, max_results: int = 10, -) -> list[MarketplaceAgentSummary]: +) -> list[LibraryAgentSummary]: """Search marketplace agents formatted for Agent Generator. - Note: This returns basic agent info. Full input/output schemas would require - additional graph fetches and is a potential future enhancement. + Fetches marketplace agents and their full schemas so they can be used + as sub-agents in generated workflows. Args: search_query: Search term to find relevant public agents max_results: Maximum number of agents to return (default 10) Returns: - List of MarketplaceAgentSummary (without detailed schemas for now) + List of LibraryAgentSummary with full input/output schemas """ try: response = await store_db.get_store_agents( @@ -286,17 +287,31 @@ async def search_marketplace_agents_for_generation( page_size=max_results, ) - results: list[MarketplaceAgentSummary] = [] - for agent in response.agents: - results.append( - MarketplaceAgentSummary( - name=agent.agent_name, - description=agent.description, - sub_heading=agent.sub_heading, - creator=agent.creator, - is_marketplace_agent=True, + agents_with_graphs = [ + agent for agent in response.agents if agent.agent_graph_id + ] + + if not agents_with_graphs: + return [] + + graph_ids = [agent.agent_graph_id for agent in agents_with_graphs] + graphs = await get_store_listed_graphs(*graph_ids) + + results: list[LibraryAgentSummary] = [] + for agent in agents_with_graphs: + graph_id = agent.agent_graph_id + if graph_id and graph_id in graphs: + graph = graphs[graph_id] + results.append( + LibraryAgentSummary( + graph_id=graph.id, + graph_version=graph.version, + name=agent.agent_name, + description=agent.description, + input_schema=graph.input_schema, + output_schema=graph.output_schema, + ) ) - ) return results except Exception as e: logger.warning(f"Failed to search marketplace agents: {e}") @@ -327,8 +342,7 @@ async def get_all_relevant_agents_for_generation( max_marketplace_results: Max marketplace agents to return (default 10) Returns: - List of AgentSummary, library agents first (with full schemas), - then marketplace agents (basic info only) + List of AgentSummary with full schemas (both library and marketplace agents) """ agents: list[AgentSummary] = [] seen_graph_ids: set[str] = set() @@ -365,16 +379,11 @@ async def get_all_relevant_agents_for_generation( search_query=search_query, max_results=max_marketplace_results, ) - library_names: set[str] = set() - for a in agents: - name = a.get("name") - if name and isinstance(name, str): - library_names.add(name.lower()) for agent in marketplace_agents: - agent_name = agent.get("name") - if agent_name and isinstance(agent_name, str): - if agent_name.lower() not in library_names: - agents.append(agent) + graph_id = agent.get("graph_id") + if graph_id and graph_id not in seen_graph_ids: + agents.append(agent) + seen_graph_ids.add(graph_id) return agents diff --git a/autogpt_platform/backend/backend/api/features/store/db.py b/autogpt_platform/backend/backend/api/features/store/db.py index 956fdfa7da..850a2bc3e9 100644 --- a/autogpt_platform/backend/backend/api/features/store/db.py +++ b/autogpt_platform/backend/backend/api/features/store/db.py @@ -112,6 +112,7 @@ async def get_store_agents( description=agent["description"], runs=agent["runs"], rating=agent["rating"], + agent_graph_id=agent.get("agentGraphId", ""), ) store_agents.append(store_agent) except Exception as e: @@ -170,6 +171,7 @@ async def get_store_agents( description=agent.description, runs=agent.runs, rating=agent.rating, + agent_graph_id=agent.agentGraphId, ) # Add to the list only if creation was successful store_agents.append(store_agent) diff --git a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py index 8b0884bb24..e1b8f402c8 100644 --- a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py +++ b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py @@ -600,6 +600,7 @@ async def hybrid_search( sa.featured, sa.is_available, sa.updated_at, + sa."agentGraphId", -- Searchable text for BM25 reranking COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text, -- Semantic score @@ -659,6 +660,7 @@ async def hybrid_search( featured, is_available, updated_at, + "agentGraphId", searchable_text, semantic_score, lexical_score, diff --git a/autogpt_platform/backend/backend/api/features/store/model.py b/autogpt_platform/backend/backend/api/features/store/model.py index a3310b96fc..d66b91807d 100644 --- a/autogpt_platform/backend/backend/api/features/store/model.py +++ b/autogpt_platform/backend/backend/api/features/store/model.py @@ -38,6 +38,7 @@ class StoreAgent(pydantic.BaseModel): description: str runs: int rating: float + agent_graph_id: str class StoreAgentsResponse(pydantic.BaseModel): diff --git a/autogpt_platform/backend/backend/api/features/store/model_test.py b/autogpt_platform/backend/backend/api/features/store/model_test.py index fd09a0cf77..c4109f4603 100644 --- a/autogpt_platform/backend/backend/api/features/store/model_test.py +++ b/autogpt_platform/backend/backend/api/features/store/model_test.py @@ -26,11 +26,13 @@ def test_store_agent(): description="Test description", runs=50, rating=4.5, + agent_graph_id="test-graph-id", ) assert agent.slug == "test-agent" assert agent.agent_name == "Test Agent" assert agent.runs == 50 assert agent.rating == 4.5 + assert agent.agent_graph_id == "test-graph-id" def test_store_agents_response(): @@ -46,6 +48,7 @@ def test_store_agents_response(): description="Test description", runs=50, rating=4.5, + agent_graph_id="test-graph-id", ) ], pagination=store_model.Pagination( diff --git a/autogpt_platform/backend/backend/api/features/store/routes_test.py b/autogpt_platform/backend/backend/api/features/store/routes_test.py index 36431c20ec..fcef3f845a 100644 --- a/autogpt_platform/backend/backend/api/features/store/routes_test.py +++ b/autogpt_platform/backend/backend/api/features/store/routes_test.py @@ -82,6 +82,7 @@ def test_get_agents_featured( description="Featured agent description", runs=100, rating=4.5, + agent_graph_id="test-graph-1", ) ], pagination=store_model.Pagination( @@ -127,6 +128,7 @@ def test_get_agents_by_creator( description="Creator agent description", runs=50, rating=4.0, + agent_graph_id="test-graph-2", ) ], pagination=store_model.Pagination( @@ -172,6 +174,7 @@ def test_get_agents_sorted( description="Top agent description", runs=1000, rating=5.0, + agent_graph_id="test-graph-3", ) ], pagination=store_model.Pagination( @@ -217,6 +220,7 @@ def test_get_agents_search( description="Specific search term description", runs=75, rating=4.2, + agent_graph_id="test-graph-search", ) ], pagination=store_model.Pagination( @@ -262,6 +266,7 @@ def test_get_agents_category( description="Category agent description", runs=60, rating=4.1, + agent_graph_id="test-graph-category", ) ], pagination=store_model.Pagination( @@ -306,6 +311,7 @@ def test_get_agents_pagination( description=f"Agent {i} description", runs=i * 10, rating=4.0, + agent_graph_id="test-graph-2", ) for i in range(5) ], diff --git a/autogpt_platform/backend/backend/api/features/store/test_cache_delete.py b/autogpt_platform/backend/backend/api/features/store/test_cache_delete.py index dd9be1f4ab..298c51d47c 100644 --- a/autogpt_platform/backend/backend/api/features/store/test_cache_delete.py +++ b/autogpt_platform/backend/backend/api/features/store/test_cache_delete.py @@ -33,6 +33,7 @@ class TestCacheDeletion: description="Test description", runs=100, rating=4.5, + agent_graph_id="test-graph-id", ) ], pagination=Pagination( diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index c1f38f81d5..ee6cd2e4b0 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -1028,6 +1028,39 @@ async def get_graph( return GraphModel.from_db(graph, for_export) +async def get_store_listed_graphs(*graph_ids: str) -> dict[str, GraphModel]: + """Batch-fetch multiple store-listed graphs by their IDs. + + Only returns graphs that have approved store listings (publicly available). + Does not require permission checks since store-listed graphs are public. + + Args: + *graph_ids: Variable number of graph IDs to fetch + + Returns: + Dict mapping graph_id to GraphModel for graphs with approved store listings + """ + if not graph_ids: + return {} + + store_listings = await StoreListingVersion.prisma().find_many( + where={ + "agentGraphId": {"in": list(graph_ids)}, + "submissionStatus": SubmissionStatus.APPROVED, + "isDeleted": False, + }, + include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}}, + distinct=["agentGraphId"], + order={"agentGraphVersion": "desc"}, + ) + + return { + listing.agentGraphId: GraphModel.from_db(listing.AgentGraph) + for listing in store_listings + if listing.AgentGraph + } + + async def get_graph_as_admin( graph_id: str, version: int | None = None, diff --git a/autogpt_platform/backend/snapshots/agts_by_creator b/autogpt_platform/backend/snapshots/agts_by_creator index 4d6dd12920..3f2e128a0d 100644 --- a/autogpt_platform/backend/snapshots/agts_by_creator +++ b/autogpt_platform/backend/snapshots/agts_by_creator @@ -9,7 +9,8 @@ "sub_heading": "Creator agent subheading", "description": "Creator agent description", "runs": 50, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" } ], "pagination": { diff --git a/autogpt_platform/backend/snapshots/agts_category b/autogpt_platform/backend/snapshots/agts_category index f65925ead3..4d0531763c 100644 --- a/autogpt_platform/backend/snapshots/agts_category +++ b/autogpt_platform/backend/snapshots/agts_category @@ -9,7 +9,8 @@ "sub_heading": "Category agent subheading", "description": "Category agent description", "runs": 60, - "rating": 4.1 + "rating": 4.1, + "agent_graph_id": "test-graph-category" } ], "pagination": { diff --git a/autogpt_platform/backend/snapshots/agts_pagination b/autogpt_platform/backend/snapshots/agts_pagination index 82e7f5f9bf..7b946157fb 100644 --- a/autogpt_platform/backend/snapshots/agts_pagination +++ b/autogpt_platform/backend/snapshots/agts_pagination @@ -9,7 +9,8 @@ "sub_heading": "Agent 0 subheading", "description": "Agent 0 description", "runs": 0, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" }, { "slug": "agent-1", @@ -20,7 +21,8 @@ "sub_heading": "Agent 1 subheading", "description": "Agent 1 description", "runs": 10, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" }, { "slug": "agent-2", @@ -31,7 +33,8 @@ "sub_heading": "Agent 2 subheading", "description": "Agent 2 description", "runs": 20, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" }, { "slug": "agent-3", @@ -42,7 +45,8 @@ "sub_heading": "Agent 3 subheading", "description": "Agent 3 description", "runs": 30, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" }, { "slug": "agent-4", @@ -53,7 +57,8 @@ "sub_heading": "Agent 4 subheading", "description": "Agent 4 description", "runs": 40, - "rating": 4.0 + "rating": 4.0, + "agent_graph_id": "test-graph-2" } ], "pagination": { diff --git a/autogpt_platform/backend/snapshots/agts_search b/autogpt_platform/backend/snapshots/agts_search index ca3f504584..ae9cc116bc 100644 --- a/autogpt_platform/backend/snapshots/agts_search +++ b/autogpt_platform/backend/snapshots/agts_search @@ -9,7 +9,8 @@ "sub_heading": "Search agent subheading", "description": "Specific search term description", "runs": 75, - "rating": 4.2 + "rating": 4.2, + "agent_graph_id": "test-graph-search" } ], "pagination": { diff --git a/autogpt_platform/backend/snapshots/agts_sorted b/autogpt_platform/backend/snapshots/agts_sorted index cddead76a5..b182256b2c 100644 --- a/autogpt_platform/backend/snapshots/agts_sorted +++ b/autogpt_platform/backend/snapshots/agts_sorted @@ -9,7 +9,8 @@ "sub_heading": "Top agent subheading", "description": "Top agent description", "runs": 1000, - "rating": 5.0 + "rating": 5.0, + "agent_graph_id": "test-graph-3" } ], "pagination": { diff --git a/autogpt_platform/backend/snapshots/feat_agts b/autogpt_platform/backend/snapshots/feat_agts index d57996a768..4f85786434 100644 --- a/autogpt_platform/backend/snapshots/feat_agts +++ b/autogpt_platform/backend/snapshots/feat_agts @@ -9,7 +9,8 @@ "sub_heading": "Featured agent subheading", "description": "Featured agent description", "runs": 100, - "rating": 4.5 + "rating": 4.5, + "agent_graph_id": "test-graph-1" } ], "pagination": { diff --git a/autogpt_platform/backend/test/agent_generator/test_library_agents.py b/autogpt_platform/backend/test/agent_generator/test_library_agents.py index e62b0746e7..8387339582 100644 --- a/autogpt_platform/backend/test/agent_generator/test_library_agents.py +++ b/autogpt_platform/backend/test/agent_generator/test_library_agents.py @@ -134,15 +134,28 @@ class TestSearchMarketplaceAgentsForGeneration: description="A public agent", sub_heading="Does something useful", creator="creator-1", + agent_graph_id="graph-123", ) ] - # The store_db is dynamically imported, so patch the import path - with patch( - "backend.api.features.store.db.get_store_agents", - new_callable=AsyncMock, - return_value=mock_response, - ) as mock_search: + mock_graph = MagicMock() + mock_graph.id = "graph-123" + mock_graph.version = 1 + mock_graph.input_schema = {"type": "object"} + mock_graph.output_schema = {"type": "object"} + + with ( + patch( + "backend.api.features.store.db.get_store_agents", + new_callable=AsyncMock, + return_value=mock_response, + ) as mock_search, + patch( + "backend.api.features.chat.tools.agent_generator.core.get_store_listed_graphs", + new_callable=AsyncMock, + return_value={"graph-123": mock_graph}, + ), + ): result = await core.search_marketplace_agents_for_generation( search_query="automation", max_results=10, @@ -156,7 +169,7 @@ class TestSearchMarketplaceAgentsForGeneration: assert len(result) == 1 assert result[0]["name"] == "Public Agent" - assert result[0]["is_marketplace_agent"] is True + assert result[0]["graph_id"] == "graph-123" @pytest.mark.asyncio async def test_handles_marketplace_error_gracefully(self): @@ -193,11 +206,12 @@ class TestGetAllRelevantAgentsForGeneration: marketplace_agents = [ { + "graph_id": "market-456", + "graph_version": 1, "name": "Market Agent", "description": "From marketplace", - "sub_heading": "Sub heading", - "creator": "creator-1", - "is_marketplace_agent": True, + "input_schema": {}, + "output_schema": {}, } ] @@ -225,11 +239,11 @@ class TestGetAllRelevantAgentsForGeneration: assert result[1]["name"] == "Market Agent" @pytest.mark.asyncio - async def test_deduplicates_by_name(self): - """Test that marketplace agents with same name as library are excluded.""" + async def test_deduplicates_by_graph_id(self): + """Test that marketplace agents with same graph_id as library are excluded.""" library_agents = [ { - "graph_id": "lib-123", + "graph_id": "shared-123", "graph_version": 1, "name": "Shared Agent", "description": "From library", @@ -240,18 +254,20 @@ class TestGetAllRelevantAgentsForGeneration: marketplace_agents = [ { - "name": "Shared Agent", # Same name, should be deduplicated + "graph_id": "shared-123", # Same graph_id, should be deduplicated + "graph_version": 1, + "name": "Shared Agent", "description": "From marketplace", - "sub_heading": "Sub heading", - "creator": "creator-1", - "is_marketplace_agent": True, + "input_schema": {}, + "output_schema": {}, }, { + "graph_id": "unique-456", + "graph_version": 1, "name": "Unique Agent", "description": "Only in marketplace", - "sub_heading": "Sub heading", - "creator": "creator-2", - "is_marketplace_agent": True, + "input_schema": {}, + "output_schema": {}, }, ] @@ -273,7 +289,7 @@ class TestGetAllRelevantAgentsForGeneration: include_marketplace=True, ) - # Shared Agent from marketplace should be excluded + # Shared Agent from marketplace should be excluded by graph_id assert len(result) == 2 names = [a["name"] for a in result] assert "Shared Agent" in names diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index a44ceb8388..aa4c49b1a2 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -9833,7 +9833,8 @@ "sub_heading": { "type": "string", "title": "Sub Heading" }, "description": { "type": "string", "title": "Description" }, "runs": { "type": "integer", "title": "Runs" }, - "rating": { "type": "number", "title": "Rating" } + "rating": { "type": "number", "title": "Rating" }, + "agent_graph_id": { "type": "string", "title": "Agent Graph Id" } }, "type": "object", "required": [ @@ -9845,7 +9846,8 @@ "sub_heading", "description", "runs", - "rating" + "rating", + "agent_graph_id" ], "title": "StoreAgent" }, From 1081590384c5355cc7af95f7f3936b25f16aa4aa Mon Sep 17 00:00:00 2001 From: "Guofang.Tang" Date: Mon, 2 Feb 2026 04:29:15 +0800 Subject: [PATCH 25/25] feat(backend): cover webhook ingress URL route (#11747) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ - Add a unit test to verify webhook ingress URL generation matches the FastAPI route. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] poetry run pytest backend/integrations/webhooks/utils_test.py --confcutdir=backend/integrations/webhooks #### For configuration changes: - [x] .env.default is updated or already compatible with my changes - [x] docker-compose.yml is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under Changes) ## Summary by CodeRabbit * **Tests** * Added a unit test that validates webhook ingress URL generation matches the application's resolved route (scheme, host, and path) for provider-specific webhook endpoints, improving confidence in routing behavior and helping prevent regressions. ✏️ Tip: You can customize this high-level summary in your review settings. --------- Co-authored-by: Reinier van der Leer --- .../integrations/webhooks/utils_test.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 autogpt_platform/backend/backend/integrations/webhooks/utils_test.py diff --git a/autogpt_platform/backend/backend/integrations/webhooks/utils_test.py b/autogpt_platform/backend/backend/integrations/webhooks/utils_test.py new file mode 100644 index 0000000000..bc502a8e44 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/webhooks/utils_test.py @@ -0,0 +1,39 @@ +from urllib.parse import urlparse + +import fastapi +from fastapi.routing import APIRoute + +from backend.api.features.integrations.router import router as integrations_router +from backend.integrations.providers import ProviderName +from backend.integrations.webhooks import utils as webhooks_utils + + +def test_webhook_ingress_url_matches_route(monkeypatch) -> None: + app = fastapi.FastAPI() + app.include_router(integrations_router, prefix="/api/integrations") + + provider = ProviderName.GITHUB + webhook_id = "webhook_123" + base_url = "https://example.com" + + monkeypatch.setattr(webhooks_utils.app_config, "platform_base_url", base_url) + + route = next( + route + for route in integrations_router.routes + if isinstance(route, APIRoute) + and route.path == "/{provider}/webhooks/{webhook_id}/ingress" + and "POST" in route.methods + ) + expected_path = f"/api/integrations{route.path}".format( + provider=provider.value, + webhook_id=webhook_id, + ) + actual_url = urlparse(webhooks_utils.webhook_ingress_url(provider, webhook_id)) + expected_base = urlparse(base_url) + + assert (actual_url.scheme, actual_url.netloc) == ( + expected_base.scheme, + expected_base.netloc, + ) + assert actual_url.path == expected_path