{
+ const { sessionId, abortController } = stream;
+
+ try {
+ const url = `/api/chat/sessions/${sessionId}/stream`;
+ const body = JSON.stringify({
+ message,
+ is_user_message: isUserMessage,
+ context: context || null,
+ });
+
+ const response = await fetch(url, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Accept: "text/event-stream",
+ },
+ body,
+ signal: abortController.signal,
+ });
+
+ if (!response.ok) {
+ const errorText = await response.text();
+ throw new Error(errorText || `HTTP ${response.status}`);
+ }
+
+ if (!response.body) {
+ throw new Error("Response body is null");
+ }
+
+ const reader = response.body.getReader();
+ const decoder = new TextDecoder();
+ let buffer = "";
+
+ while (true) {
+ const { done, value } = await reader.read();
+
+ if (done) {
+ notifySubscribers(stream, { type: "stream_end" });
+ stream.status = "completed";
+ return;
+ }
+
+ buffer += decoder.decode(value, { stream: true });
+ const lines = buffer.split("\n");
+ buffer = lines.pop() || "";
+
+ for (const line of lines) {
+ const data = parseSSELine(line);
+ if (data !== null) {
+ if (data === "[DONE]") {
+ notifySubscribers(stream, { type: "stream_end" });
+ stream.status = "completed";
+ return;
+ }
+
+ try {
+ const rawChunk = JSON.parse(data) as
+ | StreamChunk
+ | VercelStreamChunk;
+ const chunk = normalizeStreamChunk(rawChunk);
+ if (!chunk) continue;
+
+ notifySubscribers(stream, chunk);
+
+ if (chunk.type === "stream_end") {
+ stream.status = "completed";
+ return;
+ }
+
+ if (chunk.type === "error") {
+ stream.status = "error";
+ stream.error = new Error(
+ chunk.message || chunk.content || "Stream error",
+ );
+ return;
+ }
+ } catch (err) {
+ console.warn("[StreamExecutor] Failed to parse SSE chunk:", err);
+ }
+ }
+ }
+ }
+ } catch (err) {
+ if (err instanceof Error && err.name === "AbortError") {
+ notifySubscribers(stream, { type: "stream_end" });
+ stream.status = "completed";
+ return;
+ }
+
+ if (retryCount < MAX_RETRIES) {
+ const retryDelay = INITIAL_RETRY_DELAY * Math.pow(2, retryCount);
+ console.log(
+ `[StreamExecutor] Retrying in ${retryDelay}ms (attempt ${retryCount + 1}/${MAX_RETRIES})`,
+ );
+ await new Promise((resolve) => setTimeout(resolve, retryDelay));
+ return executeStream(
+ stream,
+ message,
+ isUserMessage,
+ context,
+ retryCount + 1,
+ );
+ }
+
+ stream.status = "error";
+ stream.error = err instanceof Error ? err : new Error("Stream failed");
+ notifySubscribers(stream, {
+ type: "error",
+ message: stream.error.message,
+ });
+ }
+}
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts b/autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts
new file mode 100644
index 0000000000..4100926e79
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/stream-utils.ts
@@ -0,0 +1,84 @@
+import type { ToolArguments, ToolResult } from "@/types/chat";
+import type { StreamChunk, VercelStreamChunk } from "./chat-types";
+
+const LEGACY_STREAM_TYPES = new Set
([
+ "text_chunk",
+ "text_ended",
+ "tool_call",
+ "tool_call_start",
+ "tool_response",
+ "login_needed",
+ "need_login",
+ "credentials_needed",
+ "error",
+ "usage",
+ "stream_end",
+]);
+
+export function isLegacyStreamChunk(
+ chunk: StreamChunk | VercelStreamChunk,
+): chunk is StreamChunk {
+ return LEGACY_STREAM_TYPES.has(chunk.type as StreamChunk["type"]);
+}
+
+export function normalizeStreamChunk(
+ chunk: StreamChunk | VercelStreamChunk,
+): StreamChunk | null {
+ if (isLegacyStreamChunk(chunk)) return chunk;
+
+ switch (chunk.type) {
+ case "text-delta":
+ return { type: "text_chunk", content: chunk.delta };
+ case "text-end":
+ return { type: "text_ended" };
+ case "tool-input-available":
+ return {
+ type: "tool_call_start",
+ tool_id: chunk.toolCallId,
+ tool_name: chunk.toolName,
+ arguments: chunk.input as ToolArguments,
+ };
+ case "tool-output-available":
+ return {
+ type: "tool_response",
+ tool_id: chunk.toolCallId,
+ tool_name: chunk.toolName,
+ result: chunk.output as ToolResult,
+ success: chunk.success ?? true,
+ };
+ case "usage":
+ return {
+ type: "usage",
+ promptTokens: chunk.promptTokens,
+ completionTokens: chunk.completionTokens,
+ totalTokens: chunk.totalTokens,
+ };
+ case "error":
+ return {
+ type: "error",
+ message: chunk.errorText,
+ code: chunk.code,
+ details: chunk.details,
+ };
+ case "finish":
+ return { type: "stream_end" };
+ case "start":
+ case "text-start":
+ return null;
+ case "tool-input-start":
+ return {
+ type: "tool_call_start",
+ tool_id: chunk.toolCallId,
+ tool_name: chunk.toolName,
+ arguments: {},
+ };
+ }
+}
+
+export const MAX_RETRIES = 3;
+export const INITIAL_RETRY_DELAY = 1000;
+
+export function parseSSELine(line: string): string | null {
+ if (line.startsWith("data: ")) return line.slice(6);
+ return null;
+}
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts
index cf629a287c..124301abc4 100644
--- a/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChat.ts
@@ -2,7 +2,6 @@
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { useEffect, useRef, useState } from "react";
-import { toast } from "sonner";
import { useChatSession } from "./useChatSession";
import { useChatStream } from "./useChatStream";
@@ -27,6 +26,7 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) {
claimSession,
clearSession: clearSessionBase,
loadSession,
+ startPollingForOperation,
} = useChatSession({
urlSessionId,
autoCreate: false,
@@ -67,38 +67,16 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) {
],
);
- useEffect(() => {
- if (isLoading || isCreating) {
- const timer = setTimeout(() => {
- setShowLoader(true);
- }, 300);
- return () => clearTimeout(timer);
- } else {
+ useEffect(
+ function showLoaderWithDelay() {
+ if (isLoading || isCreating) {
+ const timer = setTimeout(() => setShowLoader(true), 300);
+ return () => clearTimeout(timer);
+ }
setShowLoader(false);
- }
- }, [isLoading, isCreating]);
-
- useEffect(function monitorNetworkStatus() {
- function handleOnline() {
- toast.success("Connection restored", {
- description: "You're back online",
- });
- }
-
- function handleOffline() {
- toast.error("You're offline", {
- description: "Check your internet connection",
- });
- }
-
- window.addEventListener("online", handleOnline);
- window.addEventListener("offline", handleOffline);
-
- return () => {
- window.removeEventListener("online", handleOnline);
- window.removeEventListener("offline", handleOffline);
- };
- }, []);
+ },
+ [isLoading, isCreating],
+ );
function clearSession() {
clearSessionBase();
@@ -117,5 +95,6 @@ export function useChat({ urlSessionId }: UseChatArgs = {}) {
loadSession,
sessionId: sessionIdFromHook,
showLoader,
+ startPollingForOperation,
};
}
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatDrawer.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatDrawer.ts
deleted file mode 100644
index 62e1a5a569..0000000000
--- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatDrawer.ts
+++ /dev/null
@@ -1,17 +0,0 @@
-"use client";
-
-import { create } from "zustand";
-
-interface ChatDrawerState {
- isOpen: boolean;
- open: () => void;
- close: () => void;
- toggle: () => void;
-}
-
-export const useChatDrawer = create((set) => ({
- isOpen: false,
- open: () => set({ isOpen: true }),
- close: () => set({ isOpen: false }),
- toggle: () => set((state) => ({ isOpen: !state.isOpen })),
-}));
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts
index 553e348f79..936a49936c 100644
--- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts
@@ -1,6 +1,7 @@
import {
getGetV2GetSessionQueryKey,
getGetV2GetSessionQueryOptions,
+ getGetV2ListSessionsQueryKey,
postV2CreateSession,
useGetV2GetSession,
usePatchV2SessionAssignUser,
@@ -58,6 +59,7 @@ export function useChatSession({
query: {
enabled: !!sessionId,
select: okData,
+ staleTime: 0,
retry: shouldRetrySessionLoad,
retryDelay: getSessionRetryDelay,
},
@@ -101,6 +103,125 @@ export function useChatSession({
}
}, [createError, loadError]);
+ // Track if we should be polling (set by external callers when they receive operation_started via SSE)
+ const [forcePolling, setForcePolling] = useState(false);
+ // Track if we've seen server acknowledge the pending operation (to avoid clearing forcePolling prematurely)
+ const hasSeenServerPendingRef = useRef(false);
+
+ // Check if there are any pending operations in the messages
+ // Must check all operation types: operation_pending, operation_started, operation_in_progress
+ const hasPendingOperationsFromServer = useMemo(() => {
+ if (!messages || messages.length === 0) return false;
+ const pendingTypes = new Set([
+ "operation_pending",
+ "operation_in_progress",
+ "operation_started",
+ ]);
+ return messages.some((msg) => {
+ if (msg.role !== "tool" || !msg.content) return false;
+ try {
+ const content =
+ typeof msg.content === "string"
+ ? JSON.parse(msg.content)
+ : msg.content;
+ return pendingTypes.has(content?.type);
+ } catch {
+ return false;
+ }
+ });
+ }, [messages]);
+
+ // Track when server has acknowledged the pending operation
+ useEffect(() => {
+ if (hasPendingOperationsFromServer) {
+ hasSeenServerPendingRef.current = true;
+ }
+ }, [hasPendingOperationsFromServer]);
+
+ // Combined: poll if server has pending ops OR if we received operation_started via SSE
+ const hasPendingOperations = hasPendingOperationsFromServer || forcePolling;
+
+ // Clear forcePolling only after server has acknowledged AND completed the operation
+ useEffect(() => {
+ if (
+ forcePolling &&
+ !hasPendingOperationsFromServer &&
+ hasSeenServerPendingRef.current
+ ) {
+ // Server acknowledged the operation and it's now complete
+ setForcePolling(false);
+ hasSeenServerPendingRef.current = false;
+ }
+ }, [forcePolling, hasPendingOperationsFromServer]);
+
+ // Function to trigger polling (called when operation_started is received via SSE)
+ function startPollingForOperation() {
+ setForcePolling(true);
+ hasSeenServerPendingRef.current = false; // Reset for new operation
+ }
+
+ // Refresh sessions list when a pending operation completes
+ // (hasPendingOperations transitions from true to false)
+ const prevHasPendingOperationsRef = useRef(hasPendingOperations);
+ useEffect(
+ function refreshSessionsListOnOperationComplete() {
+ const wasHasPending = prevHasPendingOperationsRef.current;
+ prevHasPendingOperationsRef.current = hasPendingOperations;
+
+ // Only invalidate when transitioning from pending to not pending
+ if (wasHasPending && !hasPendingOperations && sessionId) {
+ queryClient.invalidateQueries({
+ queryKey: getGetV2ListSessionsQueryKey(),
+ });
+ }
+ },
+ [hasPendingOperations, sessionId, queryClient],
+ );
+
+ // Poll for updates when there are pending operations
+ // Backoff: 2s, 4s, 6s, 8s, 10s, ... up to 30s max
+ const pollAttemptRef = useRef(0);
+ const hasPendingOperationsRef = useRef(hasPendingOperations);
+ hasPendingOperationsRef.current = hasPendingOperations;
+
+ useEffect(
+ function pollForPendingOperations() {
+ if (!sessionId || !hasPendingOperations) {
+ pollAttemptRef.current = 0;
+ return;
+ }
+
+ let cancelled = false;
+ let timeoutId: ReturnType | null = null;
+
+ function schedule() {
+ // 2s, 4s, 6s, 8s, 10s, ... 30s (max)
+ const delay = Math.min((pollAttemptRef.current + 1) * 2000, 30000);
+ timeoutId = setTimeout(async () => {
+ if (cancelled) return;
+ pollAttemptRef.current += 1;
+ try {
+ await refetch();
+ } catch (err) {
+ console.error("[useChatSession] Poll failed:", err);
+ } finally {
+ if (!cancelled && hasPendingOperationsRef.current) {
+ schedule();
+ }
+ }
+ }, delay);
+ }
+
+ schedule();
+
+ return () => {
+ cancelled = true;
+ if (timeoutId) clearTimeout(timeoutId);
+ };
+ },
+ [sessionId, hasPendingOperations, refetch],
+ );
+
async function createSession() {
try {
setError(null);
@@ -227,11 +348,13 @@ export function useChatSession({
isCreating,
error,
isSessionNotFound: isNotFoundError(loadError),
+ hasPendingOperations,
createSession,
loadSession,
refreshSession,
claimSession,
clearSession,
+ startPollingForOperation,
};
}
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts
index 903c19cd30..5a9f637457 100644
--- a/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/useChatStream.ts
@@ -1,543 +1,110 @@
-import type { ToolArguments, ToolResult } from "@/types/chat";
-import { useCallback, useEffect, useRef, useState } from "react";
+"use client";
+
+import { useEffect, useRef, useState } from "react";
import { toast } from "sonner";
+import { useChatStore } from "./chat-store";
+import type { StreamChunk } from "./chat-types";
-const MAX_RETRIES = 3;
-const INITIAL_RETRY_DELAY = 1000;
-
-export interface StreamChunk {
- type:
- | "text_chunk"
- | "text_ended"
- | "tool_call"
- | "tool_call_start"
- | "tool_response"
- | "login_needed"
- | "need_login"
- | "credentials_needed"
- | "error"
- | "usage"
- | "stream_end";
- timestamp?: string;
- content?: string;
- message?: string;
- code?: string;
- details?: Record;
- tool_id?: string;
- tool_name?: string;
- arguments?: ToolArguments;
- result?: ToolResult;
- success?: boolean;
- idx?: number;
- session_id?: string;
- agent_info?: {
- graph_id: string;
- name: string;
- trigger_type: string;
- };
- provider?: string;
- provider_name?: string;
- credential_type?: string;
- scopes?: string[];
- title?: string;
- [key: string]: unknown;
-}
-
-type VercelStreamChunk =
- | { type: "start"; messageId: string }
- | { type: "finish" }
- | { type: "text-start"; id: string }
- | { type: "text-delta"; id: string; delta: string }
- | { type: "text-end"; id: string }
- | { type: "tool-input-start"; toolCallId: string; toolName: string }
- | {
- type: "tool-input-available";
- toolCallId: string;
- toolName: string;
- input: ToolArguments;
- }
- | {
- type: "tool-output-available";
- toolCallId: string;
- toolName?: string;
- output: ToolResult;
- success?: boolean;
- }
- | {
- type: "usage";
- promptTokens: number;
- completionTokens: number;
- totalTokens: number;
- }
- | {
- type: "error";
- errorText: string;
- code?: string;
- details?: Record;
- };
-
-const LEGACY_STREAM_TYPES = new Set([
- "text_chunk",
- "text_ended",
- "tool_call",
- "tool_call_start",
- "tool_response",
- "login_needed",
- "need_login",
- "credentials_needed",
- "error",
- "usage",
- "stream_end",
-]);
-
-function isLegacyStreamChunk(
- chunk: StreamChunk | VercelStreamChunk,
-): chunk is StreamChunk {
- return LEGACY_STREAM_TYPES.has(chunk.type as StreamChunk["type"]);
-}
-
-function normalizeStreamChunk(
- chunk: StreamChunk | VercelStreamChunk,
-): StreamChunk | null {
- if (isLegacyStreamChunk(chunk)) {
- return chunk;
- }
- switch (chunk.type) {
- case "text-delta":
- return { type: "text_chunk", content: chunk.delta };
- case "text-end":
- return { type: "text_ended" };
- case "tool-input-available":
- return {
- type: "tool_call_start",
- tool_id: chunk.toolCallId,
- tool_name: chunk.toolName,
- arguments: chunk.input,
- };
- case "tool-output-available":
- return {
- type: "tool_response",
- tool_id: chunk.toolCallId,
- tool_name: chunk.toolName,
- result: chunk.output,
- success: chunk.success ?? true,
- };
- case "usage":
- return {
- type: "usage",
- promptTokens: chunk.promptTokens,
- completionTokens: chunk.completionTokens,
- totalTokens: chunk.totalTokens,
- };
- case "error":
- return {
- type: "error",
- message: chunk.errorText,
- code: chunk.code,
- details: chunk.details,
- };
- case "finish":
- return { type: "stream_end" };
- case "start":
- case "text-start":
- return null;
- case "tool-input-start":
- const toolInputStart = chunk as Extract<
- VercelStreamChunk,
- { type: "tool-input-start" }
- >;
- return {
- type: "tool_call_start",
- tool_id: toolInputStart.toolCallId,
- tool_name: toolInputStart.toolName,
- arguments: {},
- };
- }
-}
+export type { StreamChunk } from "./chat-types";
export function useChatStream() {
const [isStreaming, setIsStreaming] = useState(false);
const [error, setError] = useState(null);
- const retryCountRef = useRef(0);
- const retryTimeoutRef = useRef(null);
- const abortControllerRef = useRef(null);
const currentSessionIdRef = useRef(null);
- const requestStartTimeRef = useRef(null);
-
- const stopStreaming = useCallback(
- (sessionId?: string, force: boolean = false) => {
- console.log("[useChatStream] stopStreaming called", {
- hasAbortController: !!abortControllerRef.current,
- isAborted: abortControllerRef.current?.signal.aborted,
- currentSessionId: currentSessionIdRef.current,
- requestedSessionId: sessionId,
- requestStartTime: requestStartTimeRef.current,
- timeSinceStart: requestStartTimeRef.current
- ? Date.now() - requestStartTimeRef.current
- : null,
- force,
- stack: new Error().stack,
- });
-
- if (
- sessionId &&
- currentSessionIdRef.current &&
- currentSessionIdRef.current !== sessionId
- ) {
- console.log(
- "[useChatStream] Session changed, aborting previous stream",
- {
- oldSessionId: currentSessionIdRef.current,
- newSessionId: sessionId,
- },
- );
- }
-
- const controller = abortControllerRef.current;
- if (controller) {
- const timeSinceStart = requestStartTimeRef.current
- ? Date.now() - requestStartTimeRef.current
- : null;
-
- if (!force && timeSinceStart !== null && timeSinceStart < 100) {
- console.log(
- "[useChatStream] Request just started (<100ms), skipping abort to prevent race condition",
- {
- timeSinceStart,
- },
- );
- return;
- }
-
- try {
- const signal = controller.signal;
-
- if (
- signal &&
- typeof signal.aborted === "boolean" &&
- !signal.aborted
- ) {
- console.log("[useChatStream] Aborting stream");
- controller.abort();
- } else {
- console.log(
- "[useChatStream] Stream already aborted or signal invalid",
- );
- }
- } catch (error) {
- if (error instanceof Error && error.name === "AbortError") {
- console.log(
- "[useChatStream] AbortError caught (expected during cleanup)",
- );
- } else {
- console.warn("[useChatStream] Error aborting stream:", error);
- }
- } finally {
- abortControllerRef.current = null;
- requestStartTimeRef.current = null;
- }
- }
- if (retryTimeoutRef.current) {
- clearTimeout(retryTimeoutRef.current);
- retryTimeoutRef.current = null;
- }
- setIsStreaming(false);
- },
- [],
+ const onChunkCallbackRef = useRef<((chunk: StreamChunk) => void) | null>(
+ null,
);
+ const stopStream = useChatStore((s) => s.stopStream);
+ const unregisterActiveSession = useChatStore(
+ (s) => s.unregisterActiveSession,
+ );
+ const isSessionActive = useChatStore((s) => s.isSessionActive);
+ const onStreamComplete = useChatStore((s) => s.onStreamComplete);
+ const getCompletedStream = useChatStore((s) => s.getCompletedStream);
+ const registerActiveSession = useChatStore((s) => s.registerActiveSession);
+ const startStream = useChatStore((s) => s.startStream);
+ const getStreamStatus = useChatStore((s) => s.getStreamStatus);
+
+ function stopStreaming(sessionId?: string) {
+ const targetSession = sessionId || currentSessionIdRef.current;
+ if (targetSession) {
+ stopStream(targetSession);
+ unregisterActiveSession(targetSession);
+ }
+ setIsStreaming(false);
+ }
+
useEffect(() => {
- console.log("[useChatStream] Component mounted");
- return () => {
- const sessionIdAtUnmount = currentSessionIdRef.current;
- console.log(
- "[useChatStream] Component unmounting, calling stopStreaming",
- {
- sessionIdAtUnmount,
- },
- );
- stopStreaming(undefined, false);
+ return function cleanup() {
+ const sessionId = currentSessionIdRef.current;
+ if (sessionId && !isSessionActive(sessionId)) {
+ stopStream(sessionId);
+ }
currentSessionIdRef.current = null;
+ onChunkCallbackRef.current = null;
};
- }, [stopStreaming]);
+ }, []);
- const sendMessage = useCallback(
- async (
- sessionId: string,
- message: string,
- onChunk: (chunk: StreamChunk) => void,
- isUserMessage: boolean = true,
- context?: { url: string; content: string },
- isRetry: boolean = false,
- ) => {
- console.log("[useChatStream] sendMessage called", {
- sessionId,
- message: message.substring(0, 50),
- isUserMessage,
- isRetry,
- stack: new Error().stack,
- });
+ useEffect(() => {
+ const unsubscribe = onStreamComplete(
+ function handleStreamComplete(completedSessionId) {
+ if (completedSessionId !== currentSessionIdRef.current) return;
- const previousSessionId = currentSessionIdRef.current;
- stopStreaming(sessionId, true);
- currentSessionIdRef.current = sessionId;
-
- const abortController = new AbortController();
- abortControllerRef.current = abortController;
- requestStartTimeRef.current = Date.now();
- console.log("[useChatStream] Created new AbortController", {
- sessionId,
- previousSessionId,
- requestStartTime: requestStartTimeRef.current,
- });
-
- if (abortController.signal.aborted) {
- console.warn(
- "[useChatStream] AbortController was aborted before request started",
- );
- requestStartTimeRef.current = null;
- return Promise.reject(new Error("Request aborted"));
- }
-
- if (!isRetry) {
- retryCountRef.current = 0;
- }
- setIsStreaming(true);
- setError(null);
-
- try {
- const url = `/api/chat/sessions/${sessionId}/stream`;
- const body = JSON.stringify({
- message,
- is_user_message: isUserMessage,
- context: context || null,
- });
-
- const response = await fetch(url, {
- method: "POST",
- headers: {
- "Content-Type": "application/json",
- Accept: "text/event-stream",
- },
- body,
- signal: abortController.signal,
- });
-
- console.info("[useChatStream] Stream response", {
- sessionId,
- status: response.status,
- ok: response.ok,
- contentType: response.headers.get("content-type"),
- });
-
- if (!response.ok) {
- const errorText = await response.text();
- console.warn("[useChatStream] Stream response error", {
- sessionId,
- status: response.status,
- errorText,
- });
- throw new Error(errorText || `HTTP ${response.status}`);
- }
-
- if (!response.body) {
- console.warn("[useChatStream] Response body is null", { sessionId });
- throw new Error("Response body is null");
- }
-
- const reader = response.body.getReader();
- const decoder = new TextDecoder();
- let buffer = "";
- let receivedChunkCount = 0;
- let firstChunkAt: number | null = null;
- let loggedLineCount = 0;
-
- return new Promise((resolve, reject) => {
- let didDispatchStreamEnd = false;
-
- function dispatchStreamEnd() {
- if (didDispatchStreamEnd) return;
- didDispatchStreamEnd = true;
- onChunk({ type: "stream_end" });
- }
-
- const cleanup = () => {
- reader.cancel().catch(() => {
- // Ignore cancel errors
- });
- };
-
- async function readStream() {
- try {
- while (true) {
- const { done, value } = await reader.read();
-
- if (done) {
- cleanup();
- console.info("[useChatStream] Stream closed", {
- sessionId,
- receivedChunkCount,
- timeSinceStart: requestStartTimeRef.current
- ? Date.now() - requestStartTimeRef.current
- : null,
- });
- dispatchStreamEnd();
- retryCountRef.current = 0;
- stopStreaming();
- resolve();
- return;
- }
-
- buffer += decoder.decode(value, { stream: true });
- const lines = buffer.split("\n");
- buffer = lines.pop() || "";
-
- for (const line of lines) {
- if (line.startsWith("data: ")) {
- const data = line.slice(6);
- if (loggedLineCount < 3) {
- console.info("[useChatStream] Raw stream line", {
- sessionId,
- data:
- data.length > 300 ? `${data.slice(0, 300)}...` : data,
- });
- loggedLineCount += 1;
- }
- if (data === "[DONE]") {
- cleanup();
- console.info("[useChatStream] Stream done marker", {
- sessionId,
- receivedChunkCount,
- timeSinceStart: requestStartTimeRef.current
- ? Date.now() - requestStartTimeRef.current
- : null,
- });
- dispatchStreamEnd();
- retryCountRef.current = 0;
- stopStreaming();
- resolve();
- return;
- }
-
- try {
- const rawChunk = JSON.parse(data) as
- | StreamChunk
- | VercelStreamChunk;
- const chunk = normalizeStreamChunk(rawChunk);
- if (!chunk) {
- continue;
- }
-
- if (!firstChunkAt) {
- firstChunkAt = Date.now();
- console.info("[useChatStream] First stream chunk", {
- sessionId,
- chunkType: chunk.type,
- timeSinceStart: requestStartTimeRef.current
- ? firstChunkAt - requestStartTimeRef.current
- : null,
- });
- }
- receivedChunkCount += 1;
-
- // Call the chunk handler
- onChunk(chunk);
-
- // Handle stream lifecycle
- if (chunk.type === "stream_end") {
- didDispatchStreamEnd = true;
- cleanup();
- console.info("[useChatStream] Stream end chunk", {
- sessionId,
- receivedChunkCount,
- timeSinceStart: requestStartTimeRef.current
- ? Date.now() - requestStartTimeRef.current
- : null,
- });
- retryCountRef.current = 0;
- stopStreaming();
- resolve();
- return;
- } else if (chunk.type === "error") {
- cleanup();
- reject(
- new Error(
- chunk.message || chunk.content || "Stream error",
- ),
- );
- return;
- }
- } catch (err) {
- // Skip invalid JSON lines
- console.warn("Failed to parse SSE chunk:", err, data);
- }
- }
- }
- }
- } catch (err) {
- if (err instanceof Error && err.name === "AbortError") {
- cleanup();
- dispatchStreamEnd();
- stopStreaming();
- resolve();
- return;
- }
-
- const streamError =
- err instanceof Error ? err : new Error("Failed to read stream");
-
- if (retryCountRef.current < MAX_RETRIES) {
- retryCountRef.current += 1;
- const retryDelay =
- INITIAL_RETRY_DELAY * Math.pow(2, retryCountRef.current - 1);
-
- toast.info("Connection interrupted", {
- description: `Retrying in ${retryDelay / 1000} seconds...`,
- });
-
- retryTimeoutRef.current = setTimeout(() => {
- sendMessage(
- sessionId,
- message,
- onChunk,
- isUserMessage,
- context,
- true,
- ).catch((_err) => {
- // Retry failed
- });
- }, retryDelay);
- } else {
- setError(streamError);
- toast.error("Connection Failed", {
- description:
- "Unable to connect to chat service. Please try again.",
- });
- cleanup();
- dispatchStreamEnd();
- retryCountRef.current = 0;
- stopStreaming();
- reject(streamError);
- }
- }
- }
-
- readStream();
- });
- } catch (err) {
- if (err instanceof Error && err.name === "AbortError") {
- setIsStreaming(false);
- return Promise.resolve();
- }
- const streamError =
- err instanceof Error ? err : new Error("Failed to start stream");
- setError(streamError);
setIsStreaming(false);
- throw streamError;
+ const completed = getCompletedStream(completedSessionId);
+ if (completed?.error) {
+ setError(completed.error);
+ }
+ unregisterActiveSession(completedSessionId);
+ },
+ );
+
+ return unsubscribe;
+ }, []);
+
+ async function sendMessage(
+ sessionId: string,
+ message: string,
+ onChunk: (chunk: StreamChunk) => void,
+ isUserMessage: boolean = true,
+ context?: { url: string; content: string },
+ ) {
+ const previousSessionId = currentSessionIdRef.current;
+ if (previousSessionId && previousSessionId !== sessionId) {
+ stopStreaming(previousSessionId);
+ }
+
+ currentSessionIdRef.current = sessionId;
+ onChunkCallbackRef.current = onChunk;
+ setIsStreaming(true);
+ setError(null);
+
+ registerActiveSession(sessionId);
+
+ try {
+ await startStream(sessionId, message, isUserMessage, context, onChunk);
+
+ const status = getStreamStatus(sessionId);
+ if (status === "error") {
+ const completed = getCompletedStream(sessionId);
+ if (completed?.error) {
+ setError(completed.error);
+ toast.error("Connection Failed", {
+ description: "Unable to connect to chat service. Please try again.",
+ });
+ throw completed.error;
+ }
}
- },
- [stopStreaming],
- );
+ } catch (err) {
+ const streamError =
+ err instanceof Error ? err : new Error("Failed to start stream");
+ setError(streamError);
+ throw streamError;
+ } finally {
+ setIsStreaming(false);
+ }
+ }
return {
isStreaming,
diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx
index 0a3c7de6c8..4a25c84f92 100644
--- a/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx
+++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx
@@ -255,13 +255,18 @@ export function Wallet() {
(notification: WebSocketNotification) => {
if (
notification.type !== "onboarding" ||
- notification.event !== "step_completed" ||
- !walletRef.current
+ notification.event !== "step_completed"
) {
return;
}
- // Only trigger confetti for tasks that are in groups
+ // Always refresh credits when any onboarding step completes
+ fetchCredits();
+
+ // Only trigger confetti for tasks that are in displayed groups
+ if (!walletRef.current) {
+ return;
+ }
const taskIds = groups
.flatMap((group) => group.tasks)
.map((task) => task.id);
@@ -274,7 +279,6 @@ export function Wallet() {
return;
}
- fetchCredits();
party.confetti(walletRef.current, {
count: 30,
spread: 120,
@@ -284,7 +288,7 @@ export function Wallet() {
modules: [fadeOut],
});
},
- [fetchCredits, fadeOut],
+ [fetchCredits, fadeOut, groups],
);
// WebSocket setup for onboarding notifications
diff --git a/autogpt_platform/frontend/src/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel.tsx b/autogpt_platform/frontend/src/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel.tsx
index 2b04c0ed9a..4805508054 100644
--- a/autogpt_platform/frontend/src/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel.tsx
+++ b/autogpt_platform/frontend/src/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel.tsx
@@ -31,6 +31,29 @@ export function FloatingReviewsPanel({
query: {
enabled: !!(graphId && executionId),
select: okData,
+ // Poll while execution is in progress to detect status changes
+ refetchInterval: (q) => {
+ // Note: refetchInterval callback receives raw data before select transform
+ const rawData = q.state.data as
+ | { status: number; data?: { status?: string } }
+ | undefined;
+ if (rawData?.status !== 200) return false;
+
+ const status = rawData?.data?.status;
+ if (!status) return false;
+
+ // Poll every 2 seconds while running or in review
+ if (
+ status === AgentExecutionStatus.RUNNING ||
+ status === AgentExecutionStatus.QUEUED ||
+ status === AgentExecutionStatus.INCOMPLETE ||
+ status === AgentExecutionStatus.REVIEW
+ ) {
+ return 2000;
+ }
+ return false;
+ },
+ refetchIntervalInBackground: true,
},
},
);
@@ -40,28 +63,47 @@ export function FloatingReviewsPanel({
useShallow((state) => state.graphExecutionStatus),
);
+ // Determine if we should poll for pending reviews
+ const isInReviewStatus =
+ executionDetails?.status === AgentExecutionStatus.REVIEW ||
+ graphExecutionStatus === AgentExecutionStatus.REVIEW;
+
const { pendingReviews, isLoading, refetch } = usePendingReviewsForExecution(
executionId || "",
+ {
+ enabled: !!executionId,
+ // Poll every 2 seconds when in REVIEW status to catch new reviews
+ refetchInterval: isInReviewStatus ? 2000 : false,
+ },
);
+ // Refetch pending reviews when execution status changes
useEffect(() => {
- if (executionId) {
+ if (executionId && executionDetails?.status) {
refetch();
}
}, [executionDetails?.status, executionId, refetch]);
- // Refetch when graph execution status changes to REVIEW
- useEffect(() => {
- if (graphExecutionStatus === AgentExecutionStatus.REVIEW && executionId) {
- refetch();
- }
- }, [graphExecutionStatus, executionId, refetch]);
+ // Hide panel if:
+ // 1. No execution ID
+ // 2. No pending reviews and not in REVIEW status
+ // 3. Execution is RUNNING or QUEUED (hasn't paused for review yet)
+ if (!executionId) {
+ return null;
+ }
if (
- !executionId ||
- (!isLoading &&
- pendingReviews.length === 0 &&
- executionDetails?.status !== AgentExecutionStatus.REVIEW)
+ !isLoading &&
+ pendingReviews.length === 0 &&
+ executionDetails?.status !== AgentExecutionStatus.REVIEW
+ ) {
+ return null;
+ }
+
+ // Don't show panel while execution is still running/queued (not paused for review)
+ if (
+ executionDetails?.status === AgentExecutionStatus.RUNNING ||
+ executionDetails?.status === AgentExecutionStatus.QUEUED
) {
return null;
}
diff --git a/autogpt_platform/frontend/src/components/organisms/PendingReviewCard/PendingReviewCard.tsx b/autogpt_platform/frontend/src/components/organisms/PendingReviewCard/PendingReviewCard.tsx
index 3ac636060c..bd456ce771 100644
--- a/autogpt_platform/frontend/src/components/organisms/PendingReviewCard/PendingReviewCard.tsx
+++ b/autogpt_platform/frontend/src/components/organisms/PendingReviewCard/PendingReviewCard.tsx
@@ -1,10 +1,8 @@
import { PendingHumanReviewModel } from "@/app/api/__generated__/models/pendingHumanReviewModel";
import { Text } from "@/components/atoms/Text/Text";
-import { Button } from "@/components/atoms/Button/Button";
import { Input } from "@/components/atoms/Input/Input";
import { Switch } from "@/components/atoms/Switch/Switch";
-import { TrashIcon, EyeSlashIcon } from "@phosphor-icons/react";
-import { useState } from "react";
+import { useEffect, useState } from "react";
interface StructuredReviewPayload {
data: unknown;
@@ -40,37 +38,49 @@ function extractReviewData(payload: unknown): {
interface PendingReviewCardProps {
review: PendingHumanReviewModel;
onReviewDataChange: (nodeExecId: string, data: string) => void;
- reviewMessage?: string;
- onReviewMessageChange?: (nodeExecId: string, message: string) => void;
- isDisabled?: boolean;
- onToggleDisabled?: (nodeExecId: string) => void;
+ autoApproveFuture?: boolean;
+ onAutoApproveFutureChange?: (nodeExecId: string, enabled: boolean) => void;
+ externalDataValue?: string;
+ showAutoApprove?: boolean;
+ nodeId?: string;
}
export function PendingReviewCard({
review,
onReviewDataChange,
- reviewMessage = "",
- onReviewMessageChange,
- isDisabled = false,
- onToggleDisabled,
+ autoApproveFuture = false,
+ onAutoApproveFutureChange,
+ externalDataValue,
+ showAutoApprove = true,
+ nodeId,
}: PendingReviewCardProps) {
const extractedData = extractReviewData(review.payload);
const isDataEditable = review.editable;
- const instructions = extractedData.instructions || review.instructions;
+
+ let instructions = review.instructions;
+
+ const isHITLBlock = instructions && !instructions.includes("Block");
+
+ if (instructions && !isHITLBlock) {
+ instructions = undefined;
+ }
+
const [currentData, setCurrentData] = useState(extractedData.data);
+ useEffect(() => {
+ if (externalDataValue !== undefined) {
+ try {
+ const parsedData = JSON.parse(externalDataValue);
+ setCurrentData(parsedData);
+ } catch {}
+ }
+ }, [externalDataValue]);
+
const handleDataChange = (newValue: unknown) => {
setCurrentData(newValue);
onReviewDataChange(review.node_exec_id, JSON.stringify(newValue, null, 2));
};
- const handleMessageChange = (newMessage: string) => {
- onReviewMessageChange?.(review.node_exec_id, newMessage);
- };
-
- // Show simplified view when no toggle functionality is provided (Screenshot 1 mode)
- const showSimplified = !onToggleDisabled;
-
const renderDataInput = () => {
const data = currentData;
@@ -137,97 +147,59 @@ export function PendingReviewCard({
}
};
- // Helper function to get proper field label
- const getFieldLabel = (instructions?: string) => {
- if (instructions)
- return instructions.charAt(0).toUpperCase() + instructions.slice(1);
- return "Data to Review";
+ const getShortenedNodeId = (id: string) => {
+ if (id.length <= 8) return id;
+ return `${id.slice(0, 4)}...${id.slice(-4)}`;
};
- // Use the existing HITL review interface
return (
- {!showSimplified && (
-
-
- {isDisabled && (
-
- This item will be rejected
-
- )}
+ {nodeId && (
+
+ Node #{getShortenedNodeId(nodeId)}
+
+ )}
+
+
+ {instructions && (
+
+ {instructions}
+
+ )}
+
+ {isDataEditable && !autoApproveFuture ? (
+ renderDataInput()
+ ) : (
+
+
+ {JSON.stringify(currentData, null, 2)}
+
-
-
- )}
+ )}
+
- {/* Show instructions as field label */}
- {instructions && (
-
-
- {getFieldLabel(instructions)}
-
- {isDataEditable && !isDisabled ? (
- renderDataInput()
- ) : (
-
-
- {JSON.stringify(currentData, null, 2)}
-
-
+ {/* Auto-approve toggle for this review */}
+ {showAutoApprove && onAutoApproveFutureChange && (
+
+
+
+ onAutoApproveFutureChange(review.node_exec_id, enabled)
+ }
+ />
+
+ Auto-approve future executions of this block
+
+
+ {autoApproveFuture && (
+
+ Original data will be used for this and all future reviews from
+ this block.
+
)}
)}
-
- {/* If no instructions, show data directly */}
- {!instructions && (
-
-
- Data to Review
- {!isDataEditable && (
-
- (Read-only)
-
- )}
-
- {isDataEditable && !isDisabled ? (
- renderDataInput()
- ) : (
-
-
- {JSON.stringify(currentData, null, 2)}
-
-
- )}
-
- )}
-
- {!showSimplified && isDisabled && (
-
-
- Rejection Reason (Optional):
-
- handleMessageChange(e.target.value)}
- placeholder="Add any notes about why you're rejecting this..."
- />
-
- )}
);
}
diff --git a/autogpt_platform/frontend/src/components/organisms/PendingReviewsList/PendingReviewsList.tsx b/autogpt_platform/frontend/src/components/organisms/PendingReviewsList/PendingReviewsList.tsx
index 3253b0ee6d..5adb3919b6 100644
--- a/autogpt_platform/frontend/src/components/organisms/PendingReviewsList/PendingReviewsList.tsx
+++ b/autogpt_platform/frontend/src/components/organisms/PendingReviewsList/PendingReviewsList.tsx
@@ -1,10 +1,16 @@
-import { useState } from "react";
+import { useMemo, useState } from "react";
import { PendingHumanReviewModel } from "@/app/api/__generated__/models/pendingHumanReviewModel";
import { PendingReviewCard } from "@/components/organisms/PendingReviewCard/PendingReviewCard";
import { Text } from "@/components/atoms/Text/Text";
import { Button } from "@/components/atoms/Button/Button";
+import { Switch } from "@/components/atoms/Switch/Switch";
import { useToast } from "@/components/molecules/Toast/use-toast";
-import { ClockIcon, WarningIcon } from "@phosphor-icons/react";
+import {
+ ClockIcon,
+ WarningIcon,
+ CaretDownIcon,
+ CaretRightIcon,
+} from "@phosphor-icons/react";
import { usePostV2ProcessReviewAction } from "@/app/api/__generated__/endpoints/executions/executions";
interface PendingReviewsListProps {
@@ -32,16 +38,34 @@ export function PendingReviewsList({
},
);
- const [reviewMessageMap, setReviewMessageMap] = useState<
- Record
- >({});
-
const [pendingAction, setPendingAction] = useState<
"approve" | "reject" | null
>(null);
+ const [autoApproveFutureMap, setAutoApproveFutureMap] = useState<
+ Record
+ >({});
+
+ const [collapsedGroups, setCollapsedGroups] = useState<
+ Record
+ >({});
+
const { toast } = useToast();
+ const groupedReviews = useMemo(() => {
+ return reviews.reduce(
+ (acc, review) => {
+ const nodeId = review.node_id || "unknown";
+ if (!acc[nodeId]) {
+ acc[nodeId] = [];
+ }
+ acc[nodeId].push(review);
+ return acc;
+ },
+ {} as Record,
+ );
+ }, [reviews]);
+
const reviewActionMutation = usePostV2ProcessReviewAction({
mutation: {
onSuccess: (res) => {
@@ -88,8 +112,33 @@ export function PendingReviewsList({
setReviewDataMap((prev) => ({ ...prev, [nodeExecId]: data }));
}
- function handleReviewMessageChange(nodeExecId: string, message: string) {
- setReviewMessageMap((prev) => ({ ...prev, [nodeExecId]: message }));
+ function handleAutoApproveFutureToggle(nodeId: string, enabled: boolean) {
+ setAutoApproveFutureMap((prev) => ({
+ ...prev,
+ [nodeId]: enabled,
+ }));
+
+ if (enabled) {
+ const nodeReviews = groupedReviews[nodeId] || [];
+ setReviewDataMap((prev) => {
+ const updated = { ...prev };
+ nodeReviews.forEach((review) => {
+ updated[review.node_exec_id] = JSON.stringify(
+ review.payload,
+ null,
+ 2,
+ );
+ });
+ return updated;
+ });
+ }
+ }
+
+ function toggleGroupCollapse(nodeId: string) {
+ setCollapsedGroups((prev) => ({
+ ...prev,
+ [nodeId]: !prev[nodeId],
+ }));
}
function processReviews(approved: boolean) {
@@ -107,22 +156,25 @@ export function PendingReviewsList({
for (const review of reviews) {
const reviewData = reviewDataMap[review.node_exec_id];
- const reviewMessage = reviewMessageMap[review.node_exec_id];
+ const autoApproveThisNode = autoApproveFutureMap[review.node_id || ""];
- let parsedData: any = review.payload; // Default to original payload
+ let parsedData: any = undefined;
- // Parse edited data if available and editable
- if (review.editable && reviewData) {
- try {
- parsedData = JSON.parse(reviewData);
- } catch (error) {
- toast({
- title: "Invalid JSON",
- description: `Please fix the JSON format in review for node ${review.node_exec_id}: ${error instanceof Error ? error.message : "Invalid syntax"}`,
- variant: "destructive",
- });
- setPendingAction(null);
- return;
+ if (!autoApproveThisNode) {
+ if (review.editable && reviewData) {
+ try {
+ parsedData = JSON.parse(reviewData);
+ } catch (error) {
+ toast({
+ title: "Invalid JSON",
+ description: `Please fix the JSON format in review for node ${review.node_exec_id}: ${error instanceof Error ? error.message : "Invalid syntax"}`,
+ variant: "destructive",
+ });
+ setPendingAction(null);
+ return;
+ }
+ } else {
+ parsedData = review.payload;
}
}
@@ -130,7 +182,7 @@ export function PendingReviewsList({
node_exec_id: review.node_exec_id,
approved,
reviewed_data: parsedData,
- message: reviewMessage || undefined,
+ auto_approve_future: autoApproveThisNode && approved,
});
}
@@ -158,7 +210,6 @@ export function PendingReviewsList({
return (
- {/* Warning Box Header */}
- {reviews.map((review) => (
-
- ))}
+ {Object.entries(groupedReviews).map(([nodeId, nodeReviews]) => {
+ const isCollapsed = collapsedGroups[nodeId] ?? nodeReviews.length > 1;
+ const reviewCount = nodeReviews.length;
+
+ const firstReview = nodeReviews[0];
+ const blockName = firstReview?.instructions;
+ const reviewTitle = `Review required for ${blockName}`;
+
+ const getShortenedNodeId = (id: string) => {
+ if (id.length <= 8) return id;
+ return `${id.slice(0, 4)}...${id.slice(-4)}`;
+ };
+
+ return (
+
+
+
+ {!isCollapsed && (
+
+ {nodeReviews.map((review) => (
+
+ ))}
+
+
+
+ handleAutoApproveFutureToggle(nodeId, enabled)
+ }
+ />
+
+ Auto-approve future executions of this node
+
+
+
+ )}
+
+ );
+ })}
-
-
- Note: Changes you make here apply only to this task
-
-
-
+
+
+
+
+ You can turn auto-approval on or off using the toggle above for each
+ node.
+
);
diff --git a/autogpt_platform/frontend/src/hooks/usePendingReviews.ts b/autogpt_platform/frontend/src/hooks/usePendingReviews.ts
index 8257814fcf..b9d7d711a1 100644
--- a/autogpt_platform/frontend/src/hooks/usePendingReviews.ts
+++ b/autogpt_platform/frontend/src/hooks/usePendingReviews.ts
@@ -15,8 +15,22 @@ export function usePendingReviews() {
};
}
-export function usePendingReviewsForExecution(graphExecId: string) {
- const query = useGetV2GetPendingReviewsForExecution(graphExecId);
+interface UsePendingReviewsForExecutionOptions {
+ enabled?: boolean;
+ refetchInterval?: number | false;
+}
+
+export function usePendingReviewsForExecution(
+ graphExecId: string,
+ options?: UsePendingReviewsForExecutionOptions,
+) {
+ const query = useGetV2GetPendingReviewsForExecution(graphExecId, {
+ query: {
+ enabled: options?.enabled ?? !!graphExecId,
+ refetchInterval: options?.refetchInterval,
+ refetchIntervalInBackground: !!options?.refetchInterval,
+ },
+ });
return {
pendingReviews: okData(query.data) || [],
diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
index 82c03bc9f1..74855f5e28 100644
--- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
+++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
@@ -516,7 +516,7 @@ export type GraphValidationErrorResponse = {
/* *** LIBRARY *** */
-/* Mirror of backend/server/v2/library/model.py:LibraryAgent */
+/* Mirror of backend/api/features/library/model.py:LibraryAgent */
export type LibraryAgent = {
id: LibraryAgentID;
graph_id: GraphID;
@@ -616,7 +616,7 @@ export enum LibraryAgentSortEnum {
/* *** CREDENTIALS *** */
-/* Mirror of backend/server/integrations/router.py:CredentialsMetaResponse */
+/* Mirror of backend/api/features/integrations/router.py:CredentialsMetaResponse */
export type CredentialsMetaResponse = {
id: string;
provider: CredentialsProviderName;
@@ -628,13 +628,13 @@ export type CredentialsMetaResponse = {
is_system?: boolean;
};
-/* Mirror of backend/server/integrations/router.py:CredentialsDeletionResponse */
+/* Mirror of backend/api/features/integrations/router.py:CredentialsDeletionResponse */
export type CredentialsDeleteResponse = {
deleted: true;
revoked: boolean | null;
};
-/* Mirror of backend/server/integrations/router.py:CredentialsDeletionNeedsConfirmationResponse */
+/* Mirror of backend/api/features/integrations/router.py:CredentialsDeletionNeedsConfirmationResponse */
export type CredentialsDeleteNeedConfirmationResponse = {
deleted: false;
need_confirmation: true;
@@ -888,7 +888,7 @@ export type Schedule = {
export type ScheduleID = Brand
;
-/* Mirror of backend/server/routers/v1.py:ScheduleCreationRequest */
+/* Mirror of backend/api/features/v1.py:ScheduleCreationRequest */
export type ScheduleCreatable = {
graph_id: GraphID;
graph_version: number;
@@ -1003,6 +1003,7 @@ export type OnboardingStep =
| "AGENT_INPUT"
| "CONGRATS"
// First Wins
+ | "VISIT_COPILOT"
| "GET_RESULTS"
| "MARKETPLACE_VISIT"
| "MARKETPLACE_ADD_AGENT"
diff --git a/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx b/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx
new file mode 100644
index 0000000000..674f6c55eb
--- /dev/null
+++ b/autogpt_platform/frontend/src/providers/posthog/posthog-provider.tsx
@@ -0,0 +1,72 @@
+"use client";
+
+import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
+import { environment } from "@/services/environment";
+import { PostHogProvider as PHProvider } from "@posthog/react";
+import { usePathname, useSearchParams } from "next/navigation";
+import posthog from "posthog-js";
+import { ReactNode, useEffect, useRef } from "react";
+
+export function PostHogProvider({ children }: { children: ReactNode }) {
+ const isPostHogEnabled = environment.isPostHogEnabled();
+ const postHogCredentials = environment.getPostHogCredentials();
+
+ useEffect(() => {
+ if (postHogCredentials.key) {
+ posthog.init(postHogCredentials.key, {
+ api_host: postHogCredentials.host,
+ defaults: "2025-11-30",
+ capture_pageview: false,
+ capture_pageleave: true,
+ autocapture: true,
+ });
+ }
+ }, []);
+
+ if (!isPostHogEnabled) return <>{children}>;
+
+ return {children};
+}
+
+export function PostHogUserTracker() {
+ const { user, isUserLoading } = useSupabase();
+ const previousUserIdRef = useRef(null);
+ const isPostHogEnabled = environment.isPostHogEnabled();
+
+ useEffect(() => {
+ if (isUserLoading || !isPostHogEnabled) return;
+
+ if (user) {
+ if (previousUserIdRef.current !== user.id) {
+ posthog.identify(user.id, {
+ email: user.email,
+ ...(user.user_metadata?.name && { name: user.user_metadata.name }),
+ });
+ previousUserIdRef.current = user.id;
+ }
+ } else if (previousUserIdRef.current !== null) {
+ posthog.reset();
+ previousUserIdRef.current = null;
+ }
+ }, [user, isUserLoading, isPostHogEnabled]);
+
+ return null;
+}
+
+export function PostHogPageViewTracker() {
+ const pathname = usePathname();
+ const searchParams = useSearchParams();
+ const isPostHogEnabled = environment.isPostHogEnabled();
+
+ useEffect(() => {
+ if (pathname && isPostHogEnabled) {
+ let url = window.origin + pathname;
+ if (searchParams && searchParams.toString()) {
+ url = url + `?${searchParams.toString()}`;
+ }
+ posthog.capture("$pageview", { $current_url: url });
+ }
+ }, [pathname, searchParams, isPostHogEnabled]);
+
+ return null;
+}
diff --git a/autogpt_platform/frontend/src/services/environment/index.ts b/autogpt_platform/frontend/src/services/environment/index.ts
index cdd5b421b5..f19bc417e3 100644
--- a/autogpt_platform/frontend/src/services/environment/index.ts
+++ b/autogpt_platform/frontend/src/services/environment/index.ts
@@ -76,6 +76,13 @@ function getPreviewStealingDev() {
return branch;
}
+function getPostHogCredentials() {
+ return {
+ key: process.env.NEXT_PUBLIC_POSTHOG_KEY,
+ host: process.env.NEXT_PUBLIC_POSTHOG_HOST,
+ };
+}
+
function isProductionBuild() {
return process.env.NODE_ENV === "production";
}
@@ -116,6 +123,13 @@ function areFeatureFlagsEnabled() {
return process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "enabled";
}
+function isPostHogEnabled() {
+ const inCloud = isCloud();
+ const key = process.env.NEXT_PUBLIC_POSTHOG_KEY;
+ const host = process.env.NEXT_PUBLIC_POSTHOG_HOST;
+ return inCloud && key && host;
+}
+
export const environment = {
// Generic
getEnvironmentStr,
@@ -128,6 +142,7 @@ export const environment = {
getSupabaseUrl,
getSupabaseAnonKey,
getPreviewStealingDev,
+ getPostHogCredentials,
// Assertions
isServerSide,
isClientSide,
@@ -138,5 +153,6 @@ export const environment = {
isCloud,
isLocal,
isVercelPreview,
+ isPostHogEnabled,
areFeatureFlagsEnabled,
};
diff --git a/autogpt_platform/frontend/src/services/network-status/NetworkStatusMonitor.tsx b/autogpt_platform/frontend/src/services/network-status/NetworkStatusMonitor.tsx
new file mode 100644
index 0000000000..7552bbf78c
--- /dev/null
+++ b/autogpt_platform/frontend/src/services/network-status/NetworkStatusMonitor.tsx
@@ -0,0 +1,8 @@
+"use client";
+
+import { useNetworkStatus } from "./useNetworkStatus";
+
+export function NetworkStatusMonitor() {
+ useNetworkStatus();
+ return null;
+}
diff --git a/autogpt_platform/frontend/src/services/network-status/useNetworkStatus.ts b/autogpt_platform/frontend/src/services/network-status/useNetworkStatus.ts
new file mode 100644
index 0000000000..472a6e0e90
--- /dev/null
+++ b/autogpt_platform/frontend/src/services/network-status/useNetworkStatus.ts
@@ -0,0 +1,28 @@
+"use client";
+
+import { useEffect } from "react";
+import { toast } from "sonner";
+
+export function useNetworkStatus() {
+ useEffect(function monitorNetworkStatus() {
+ function handleOnline() {
+ toast.success("Connection restored", {
+ description: "You're back online",
+ });
+ }
+
+ function handleOffline() {
+ toast.error("You're offline", {
+ description: "Check your internet connection",
+ });
+ }
+
+ window.addEventListener("online", handleOnline);
+ window.addEventListener("offline", handleOffline);
+
+ return function cleanup() {
+ window.removeEventListener("online", handleOnline);
+ window.removeEventListener("offline", handleOffline);
+ };
+ }, []);
+}
diff --git a/autogpt_platform/frontend/src/services/storage/local-storage.ts b/autogpt_platform/frontend/src/services/storage/local-storage.ts
index 494ddc3ccc..a1aa63741a 100644
--- a/autogpt_platform/frontend/src/services/storage/local-storage.ts
+++ b/autogpt_platform/frontend/src/services/storage/local-storage.ts
@@ -10,6 +10,7 @@ export enum Key {
LIBRARY_AGENTS_CACHE = "library-agents-cache",
CHAT_SESSION_ID = "chat_session_id",
COOKIE_CONSENT = "autogpt_cookie_consent",
+ AI_AGENT_SAFETY_POPUP_SHOWN = "ai-agent-safety-popup-shown",
}
function get(key: Key) {
diff --git a/autogpt_platform/frontend/src/services/storage/session-storage.ts b/autogpt_platform/frontend/src/services/storage/session-storage.ts
index 8404da571c..1be82c98fb 100644
--- a/autogpt_platform/frontend/src/services/storage/session-storage.ts
+++ b/autogpt_platform/frontend/src/services/storage/session-storage.ts
@@ -3,6 +3,7 @@ import { environment } from "../environment";
export enum SessionKey {
CHAT_SENT_INITIAL_PROMPTS = "chat_sent_initial_prompts",
+ CHAT_INITIAL_PROMPTS = "chat_initial_prompts",
}
function get(key: SessionKey) {
diff --git a/autogpt_platform/frontend/src/tests/pages/login.page.ts b/autogpt_platform/frontend/src/tests/pages/login.page.ts
index 9082cc6219..adcb8d908b 100644
--- a/autogpt_platform/frontend/src/tests/pages/login.page.ts
+++ b/autogpt_platform/frontend/src/tests/pages/login.page.ts
@@ -37,9 +37,13 @@ export class LoginPage {
this.page.on("load", (page) => console.log(`ℹ️ Now at URL: ${page.url()}`));
// Start waiting for navigation before clicking
+ // Wait for redirect to marketplace, onboarding, library, or copilot (new landing pages)
const leaveLoginPage = this.page
.waitForURL(
- (url) => /^\/(marketplace|onboarding(\/.*)?)?$/.test(url.pathname),
+ (url: URL) =>
+ /^\/(marketplace|onboarding(\/.*)?|library|copilot)?$/.test(
+ url.pathname,
+ ),
{ timeout: 10_000 },
)
.catch((reason) => {
diff --git a/autogpt_platform/frontend/src/tests/utils/signup.ts b/autogpt_platform/frontend/src/tests/utils/signup.ts
index 7c8fdbe01b..192a9129b9 100644
--- a/autogpt_platform/frontend/src/tests/utils/signup.ts
+++ b/autogpt_platform/frontend/src/tests/utils/signup.ts
@@ -36,14 +36,16 @@ export async function signupTestUser(
const signupButton = getButton("Sign up");
await signupButton.click();
- // Wait for successful signup - could redirect to onboarding or marketplace
+ // Wait for successful signup - could redirect to various pages depending on onboarding state
try {
- // Wait for either onboarding or marketplace redirect
- await Promise.race([
- page.waitForURL(/\/onboarding/, { timeout: 15000 }),
- page.waitForURL(/\/marketplace/, { timeout: 15000 }),
- ]);
+ // Wait for redirect to onboarding, marketplace, copilot, or library
+ // Use a single waitForURL with a callback to avoid Promise.race race conditions
+ await page.waitForURL(
+ (url: URL) =>
+ /\/(onboarding|marketplace|copilot|library)/.test(url.pathname),
+ { timeout: 15000 },
+ );
} catch (error) {
console.error(
"❌ Timeout waiting for redirect, current URL:",
@@ -54,14 +56,19 @@ export async function signupTestUser(
const currentUrl = page.url();
- // Handle onboarding or marketplace redirect
+ // Handle onboarding redirect if needed
if (currentUrl.includes("/onboarding") && ignoreOnboarding) {
await page.goto("http://localhost:3000/marketplace");
await page.waitForLoadState("domcontentloaded", { timeout: 10000 });
}
- // Verify we're on the expected final page
- if (ignoreOnboarding || currentUrl.includes("/marketplace")) {
+ // Verify we're on an expected final page and user is authenticated
+ if (currentUrl.includes("/copilot") || currentUrl.includes("/library")) {
+ // For copilot/library landing pages, just verify user is authenticated
+ await page
+ .getByTestId("profile-popout-menu-trigger")
+ .waitFor({ state: "visible", timeout: 10000 });
+ } else if (ignoreOnboarding || currentUrl.includes("/marketplace")) {
// Verify we're on marketplace
await page
.getByText(
diff --git a/docs/integrations/README.md b/docs/integrations/README.md
index 023e4cbb45..7c0d0f474a 100644
--- a/docs/integrations/README.md
+++ b/docs/integrations/README.md
@@ -53,7 +53,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
| [Block Installation](block-integrations/basic.md#block-installation) | Given a code string, this block allows the verification and installation of a block code into the system |
| [Concatenate Lists](block-integrations/basic.md#concatenate-lists) | Concatenates multiple lists into a single list |
| [Dictionary Is Empty](block-integrations/basic.md#dictionary-is-empty) | Checks if a dictionary is empty |
-| [File Store](block-integrations/basic.md#file-store) | Stores the input file in the temporary directory |
+| [File Store](block-integrations/basic.md#file-store) | Downloads and stores a file from a URL, data URI, or local path |
| [Find In Dictionary](block-integrations/basic.md#find-in-dictionary) | A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value |
| [Find In List](block-integrations/basic.md#find-in-list) | Finds the index of the value in the list |
| [Get All Memories](block-integrations/basic.md#get-all-memories) | Retrieve all memories from Mem0 with optional conversation filtering |
diff --git a/docs/integrations/block-integrations/basic.md b/docs/integrations/block-integrations/basic.md
index f92d19002f..5a73fd5a03 100644
--- a/docs/integrations/block-integrations/basic.md
+++ b/docs/integrations/block-integrations/basic.md
@@ -709,7 +709,7 @@ This is useful for conditional logic where you need to verify if data was return
## File Store
### What it is
-Stores the input file in the temporary directory.
+Downloads and stores a file from a URL, data URI, or local path. Use this to fetch images, documents, or other files for processing. In CoPilot: saves to workspace (use list_workspace_files to see it). In graphs: outputs a data URI to pass to other blocks.
### How it works
@@ -722,15 +722,15 @@ The block outputs a file path that other blocks can use to access the stored fil
| Input | Description | Type | Required |
|-------|-------------|------|----------|
-| file_in | The file to store in the temporary directory, it can be a URL, data URI, or local path. | str (file) | Yes |
-| base_64 | Whether produce an output in base64 format (not recommended, you can pass the string path just fine accross blocks). | bool | No |
+| file_in | The file to download and store. Can be a URL (https://...), data URI, or local path. | str (file) | Yes |
+| base_64 | Whether to produce output in base64 format (not recommended, you can pass the file reference across blocks). | bool | No |
### Outputs
| Output | Description | Type |
|--------|-------------|------|
| error | Error message if the operation failed | str |
-| file_out | The relative path to the stored file in the temporary directory. | str (file) |
+| file_out | Reference to the stored file. In CoPilot: workspace:// URI (visible in list_workspace_files). In graphs: data URI for passing to other blocks. | str (file) |
### Possible use case
diff --git a/docs/integrations/block-integrations/multimedia.md b/docs/integrations/block-integrations/multimedia.md
new file mode 100644
index 0000000000..6b8f261346
--- /dev/null
+++ b/docs/integrations/block-integrations/multimedia.md
@@ -0,0 +1,117 @@
+# Multimedia
+
+Blocks for processing and manipulating video and audio files.
+
+
+## Add Audio To Video
+
+### What it is
+Block to attach an audio file to a video file using moviepy.
+
+### How it works
+
+This block combines a video file with an audio file using the moviepy library. The audio track is attached to the video, optionally with volume adjustment via the volume parameter (1.0 = original volume).
+
+Input files can be URLs, data URIs, or local paths. The output format is automatically determined: `workspace://` URLs in CoPilot, data URIs in graph executions.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| video_in | Video input (URL, data URI, or local path). | str (file) | Yes |
+| audio_in | Audio input (URL, data URI, or local path). | str (file) | Yes |
+| volume | Volume scale for the newly attached audio track (1.0 = original). | float | No |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if the operation failed | str |
+| video_out | Final video (with attached audio), as a path or data URI. | str (file) |
+
+### Possible use case
+
+**Add Voiceover**: Combine generated voiceover audio with video content for narrated videos.
+
+**Background Music**: Add music tracks to silent videos or replace existing audio.
+
+**Audio Replacement**: Swap the audio track of a video for localization or accessibility.
+
+
+---
+
+## Loop Video
+
+### What it is
+Block to loop a video to a given duration or number of repeats.
+
+### How it works
+
+This block extends a video by repeating it to reach a target duration or number of loops. Set duration to specify the total length in seconds, or use n_loops to repeat the video a specific number of times.
+
+The looped video is seamlessly concatenated. The output format is automatically determined: `workspace://` URLs in CoPilot, data URIs in graph executions.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| video_in | The input video (can be a URL, data URI, or local path). | str (file) | Yes |
+| duration | Target duration (in seconds) to loop the video to. If omitted, defaults to no looping. | float | No |
+| n_loops | Number of times to repeat the video. If omitted, defaults to 1 (no repeat). | int | No |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if the operation failed | str |
+| video_out | Looped video returned either as a relative path or a data URI. | str |
+
+### Possible use case
+
+**Background Videos**: Loop short clips to match the duration of longer audio or content.
+
+**GIF-Like Content**: Create seamlessly looping video content for social media.
+
+**Filler Content**: Extend short video clips to meet minimum duration requirements.
+
+
+---
+
+## Media Duration
+
+### What it is
+Block to get the duration of a media file.
+
+### How it works
+
+This block analyzes a media file and returns its duration in seconds. Set is_video to true for video files or false for audio files to ensure proper parsing.
+
+The input can be a URL, data URI, or local file path. The duration is returned as a float for precise timing calculations.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| media_in | Media input (URL, data URI, or local path). | str (file) | Yes |
+| is_video | Whether the media is a video (True) or audio (False). | bool | No |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if the operation failed | str |
+| duration | Duration of the media file (in seconds). | float |
+
+### Possible use case
+
+**Video Processing Prep**: Get video duration before deciding how to loop, trim, or synchronize it.
+
+**Audio Matching**: Determine audio length to generate matching-length video content.
+
+**Content Validation**: Verify that uploaded media meets duration requirements.
+
+
+---
diff --git a/docs/platform/block-sdk-guide.md b/docs/platform/block-sdk-guide.md
index 5b3eda5184..42fd883251 100644
--- a/docs/platform/block-sdk-guide.md
+++ b/docs/platform/block-sdk-guide.md
@@ -277,6 +277,50 @@ async def run(
token = credentials.api_key.get_secret_value()
```
+### Handling Files
+
+When your block works with files (images, videos, documents), use `store_media_file()`:
+
+```python
+from backend.data.execution import ExecutionContext
+from backend.util.file import store_media_file
+from backend.util.type import MediaFileType
+
+async def run(
+ self,
+ input_data: Input,
+ *,
+ execution_context: ExecutionContext,
+ **kwargs,
+):
+ # PROCESSING: Need local file path for tools like ffmpeg, MoviePy, PIL
+ local_path = await store_media_file(
+ file=input_data.video,
+ execution_context=execution_context,
+ return_format="for_local_processing",
+ )
+
+ # EXTERNAL API: Need base64 content for APIs like Replicate, OpenAI
+ image_b64 = await store_media_file(
+ file=input_data.image,
+ execution_context=execution_context,
+ return_format="for_external_api",
+ )
+
+ # OUTPUT: Return to user/next block (auto-adapts to context)
+ result = await store_media_file(
+ file=generated_url,
+ execution_context=execution_context,
+ return_format="for_block_output", # workspace:// in CoPilot, data URI in graphs
+ )
+ yield "image_url", result
+```
+
+**Return format options:**
+- `"for_local_processing"` - Local file path for processing tools
+- `"for_external_api"` - Data URI for external APIs needing base64
+- `"for_block_output"` - **Always use for outputs** - automatically picks best format
+
## Testing Your Block
```bash
diff --git a/docs/platform/contributing/oauth-integration-flow.md b/docs/platform/contributing/oauth-integration-flow.md
index dbc7a54be5..f6c3f7fd17 100644
--- a/docs/platform/contributing/oauth-integration-flow.md
+++ b/docs/platform/contributing/oauth-integration-flow.md
@@ -25,7 +25,7 @@ This document focuses on the **API Integration OAuth flow** used for connecting
### 2. Backend API Trust Boundary
- **Location**: Server-side FastAPI application
- **Components**:
- - Integration router (`/backend/backend/server/integrations/router.py`)
+ - Integration router (`/backend/backend/api/features/integrations/router.py`)
- OAuth handlers (`/backend/backend/integrations/oauth/`)
- Credentials store (`/backend/backend/integrations/credentials_store.py`)
- **Trust Level**: Trusted - server-controlled environment
diff --git a/docs/platform/new_blocks.md b/docs/platform/new_blocks.md
index d9d329ff51..114ff8d9a4 100644
--- a/docs/platform/new_blocks.md
+++ b/docs/platform/new_blocks.md
@@ -111,6 +111,71 @@ Follow these steps to create and test a new block:
- `graph_exec_id`: The ID of the execution of the agent. This changes every time the agent has a new "run"
- `node_exec_id`: The ID of the execution of the node. This changes every time the node is executed
- `node_id`: The ID of the node that is being executed. It changes every version of the graph, but not every time the node is executed.
+ - `execution_context`: An `ExecutionContext` object containing user_id, graph_exec_id, workspace_id, and session_id. Required for file handling.
+
+### Handling Files in Blocks
+
+When your block needs to work with files (images, videos, documents), use `store_media_file()` from `backend.util.file`. This function handles downloading, validation, virus scanning, and storage.
+
+**Import:**
+```python
+from backend.data.execution import ExecutionContext
+from backend.util.file import store_media_file
+from backend.util.type import MediaFileType
+```
+
+**The `return_format` parameter determines what you get back:**
+
+| Format | Use When | Returns |
+|--------|----------|---------|
+| `"for_local_processing"` | Processing with local tools (ffmpeg, MoviePy, PIL) | Local file path (e.g., `"image.png"`) |
+| `"for_external_api"` | Sending content to external APIs (Replicate, OpenAI) | Data URI (e.g., `"data:image/png;base64,..."`) |
+| `"for_block_output"` | Returning output from your block | Smart: `workspace://` in CoPilot, data URI in graphs |
+
+**Examples:**
+
+```python
+async def run(
+ self,
+ input_data: Input,
+ *,
+ execution_context: ExecutionContext,
+ **kwargs,
+) -> BlockOutput:
+ # PROCESSING: Need to work with file locally (ffmpeg, MoviePy, PIL)
+ local_path = await store_media_file(
+ file=input_data.video,
+ execution_context=execution_context,
+ return_format="for_local_processing",
+ )
+ # local_path = "video.mp4" - use with Path, ffmpeg, subprocess, etc.
+ full_path = get_exec_file_path(execution_context.graph_exec_id, local_path)
+
+ # EXTERNAL API: Need to send content to an API like Replicate
+ image_b64 = await store_media_file(
+ file=input_data.image,
+ execution_context=execution_context,
+ return_format="for_external_api",
+ )
+ # image_b64 = "data:image/png;base64,iVBORw0..." - send to external API
+
+ # OUTPUT: Returning result from block to user/next block
+ result_url = await store_media_file(
+ file=generated_image_url,
+ execution_context=execution_context,
+ return_format="for_block_output",
+ )
+ yield "image_url", result_url
+ # In CoPilot: result_url = "workspace://abc123" (persistent, context-efficient)
+ # In graphs: result_url = "data:image/png;base64,..." (for next block/display)
+```
+
+**Key points:**
+
+- `for_block_output` is the **only** format that auto-adapts to execution context
+- Always use `for_block_output` for block outputs unless you have a specific reason not to
+- Never manually check for `workspace_id` - let `for_block_output` handle the logic
+- The function handles URLs, data URIs, `workspace://` references, and local paths as input
### Field Types
diff --git a/docs/platform/ollama.md b/docs/platform/ollama.md
index 392bfabfe8..ecab9b8ae1 100644
--- a/docs/platform/ollama.md
+++ b/docs/platform/ollama.md
@@ -246,7 +246,7 @@ If you encounter any issues, verify that:
```bash
ollama pull llama3.2
```
-- If using a custom model, ensure it's added to the model list in `backend/server/model.py`
+- If using a custom model, ensure it's added to the model list in `backend/api/model.py`
#### Docker Issues
- Ensure Docker daemon is running: