-
-
-
Require human approval
-
- The agent will pause and wait for your review before
- continuing
-
+ {shouldShowToggle ? (
+ <>
+ {showHITLToggle && (
+
+
+
+
+ Human-in-the-loop approval
+
+
+ The agent will pause at human-in-the-loop blocks and
+ wait for your review before continuing
+
+
+
+
-
-
-
+ )}
+ {showSensitiveActionToggle && (
+
+
+
+
+ Sensitive action approval
+
+
+ The agent will pause at sensitive action blocks and wait
+ for your review before continuing
+
+
+
+
+
+ )}
+ >
) : (
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx
index 53a7e8b860..0147c19a5c 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx
@@ -1,8 +1,15 @@
"use client";
-import React, { useCallback, useEffect, useMemo, useState } from "react";
+import React, {
+ useCallback,
+ useContext,
+ useEffect,
+ useMemo,
+ useState,
+} from "react";
import {
CredentialsMetaInput,
+ CredentialsType,
GraphExecutionID,
GraphMeta,
LibraryAgentPreset,
@@ -29,7 +36,11 @@ import {
} from "@/components/__legacy__/ui/icons";
import { Input } from "@/components/__legacy__/ui/input";
import { Button } from "@/components/atoms/Button/Button";
-import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
+import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView";
+import {
+ findSavedCredentialByProviderAndType,
+ findSavedUserCredentialByProviderAndType,
+} from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import {
useToast,
@@ -37,6 +48,7 @@ import {
} from "@/components/molecules/Toast/use-toast";
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { cn, isEmpty } from "@/lib/utils";
+import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider";
import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react";
import { CalendarClockIcon, Trash2Icon } from "lucide-react";
@@ -90,6 +102,7 @@ export function AgentRunDraftView({
const api = useBackendAPI();
const { toast } = useToast();
const toastOnFail = useToastOnFail();
+ const allProviders = useContext(CredentialsProvidersContext);
const [inputValues, setInputValues] = useState>({});
const [inputCredentials, setInputCredentials] = useState<
@@ -128,6 +141,77 @@ export function AgentRunDraftView({
() => graph.credentials_input_schema.properties,
[graph],
);
+ const credentialFields = useMemo(
+ function getCredentialFields() {
+ return Object.entries(agentCredentialsInputFields);
+ },
+ [agentCredentialsInputFields],
+ );
+ const requiredCredentials = useMemo(
+ function getRequiredCredentials() {
+ return new Set(
+ (graph.credentials_input_schema?.required as string[]) || [],
+ );
+ },
+ [graph.credentials_input_schema?.required],
+ );
+
+ useEffect(
+ function initializeDefaultCredentials() {
+ if (!allProviders) return;
+ if (!graph.credentials_input_schema?.properties) return;
+ if (requiredCredentials.size === 0) return;
+
+ setInputCredentials(function updateCredentials(currentCreds) {
+ const next = { ...currentCreds };
+ let didAdd = false;
+
+ for (const key of requiredCredentials) {
+ if (next[key]) continue;
+ const schema = graph.credentials_input_schema.properties[key];
+ if (!schema) continue;
+
+ const providerNames = schema.credentials_provider || [];
+ const credentialTypes = schema.credentials_types || [];
+ const requiredScopes = schema.credentials_scopes;
+
+ const userCredential = findSavedUserCredentialByProviderAndType(
+ providerNames,
+ credentialTypes,
+ requiredScopes,
+ allProviders,
+ );
+
+ const savedCredential =
+ userCredential ||
+ findSavedCredentialByProviderAndType(
+ providerNames,
+ credentialTypes,
+ requiredScopes,
+ allProviders,
+ );
+
+ if (!savedCredential) continue;
+
+ next[key] = {
+ id: savedCredential.id,
+ provider: savedCredential.provider,
+ type: savedCredential.type as CredentialsType,
+ title: savedCredential.title,
+ };
+ didAdd = true;
+ }
+
+ if (!didAdd) return currentCreds;
+ return next;
+ });
+ },
+ [
+ allProviders,
+ graph.credentials_input_schema?.properties,
+ requiredCredentials,
+ ],
+ );
const [allRequiredInputsAreSet, missingInputs] = useMemo(() => {
const nonEmptyInputs = new Set(
@@ -145,18 +229,35 @@ export function AgentRunDraftView({
);
return [isSuperset, difference];
}, [agentInputSchema.required, inputValues]);
- const [allCredentialsAreSet, missingCredentials] = useMemo(() => {
- const availableCredentials = new Set(Object.keys(inputCredentials));
- const allCredentials = new Set(Object.keys(agentCredentialsInputFields));
- // Backwards-compatible implementation of isSupersetOf and difference
- const isSuperset = Array.from(allCredentials).every((item) =>
- availableCredentials.has(item),
- );
- const difference = Array.from(allCredentials).filter(
- (item) => !availableCredentials.has(item),
- );
- return [isSuperset, difference];
- }, [agentCredentialsInputFields, inputCredentials]);
+ const [allCredentialsAreSet, missingCredentials] = useMemo(
+ function getCredentialStatus() {
+ const missing = Array.from(requiredCredentials).filter((key) => {
+ const cred = inputCredentials[key];
+ return !cred || !cred.id;
+ });
+ return [missing.length === 0, missing];
+ },
+ [requiredCredentials, inputCredentials],
+ );
+ function addChangedCredentials(prev: Set) {
+ const next = new Set(prev);
+ next.add("credentials");
+ return next;
+ }
+
+ function handleCredentialChange(key: string, value?: CredentialsMetaInput) {
+ setInputCredentials(function updateInputCredentials(currentCreds) {
+ const next = { ...currentCreds };
+ if (value === undefined) {
+ delete next[key];
+ return next;
+ }
+ next[key] = value;
+ return next;
+ });
+ setChangedPresetAttributes(addChangedCredentials);
+ }
+
const notifyMissingInputs = useCallback(
(needPresetName: boolean = true) => {
const allMissingFields = (
@@ -649,35 +750,6 @@ export function AgentRunDraftView({
>
)}
- {/* Credentials inputs */}
- {Object.entries(agentCredentialsInputFields).map(
- ([key, inputSubSchema]) => (
- {
- setInputCredentials((obj) => {
- const newObj = { ...obj };
- if (value === undefined) {
- delete newObj[key];
- return newObj;
- }
- return {
- ...obj,
- [key]: value,
- };
- });
- setChangedPresetAttributes((prev) =>
- prev.add("credentials"),
- );
- }}
- />
- ),
- )}
-
{/* Regular inputs */}
{Object.entries(agentInputFields).map(([key, inputSubSchema]) => (
))}
+
+ {/* Credentials inputs */}
+ {credentialFields.length > 0 && (
+
+ )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts b/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts
index 4232847226..87e9e9e9bc 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts
@@ -8,6 +8,8 @@ import { useGetV2GetUserProfile } from "@/app/api/__generated__/endpoints/store/
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { okData } from "@/app/api/helpers";
import { useToast } from "@/components/molecules/Toast/use-toast";
+import { isLogoutInProgress } from "@/lib/autogpt-server-api/helpers";
+import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { updateFavoriteInQueries } from "./helpers";
interface Props {
@@ -23,10 +25,14 @@ export function useLibraryAgentCard({ agent }: Props) {
const { toast } = useToast();
const queryClient = getQueryClient();
const { mutateAsync: updateLibraryAgent } = usePatchV2UpdateLibraryAgent();
+ const { user, isLoggedIn } = useSupabase();
+ const logoutInProgress = isLogoutInProgress();
const { data: profile } = useGetV2GetUserProfile({
query: {
select: okData,
+ enabled: isLoggedIn && !!user && !logoutInProgress,
+ queryKey: ["/api/store/profile", user?.id],
},
});
diff --git a/autogpt_platform/frontend/src/app/(platform)/login/actions.ts b/autogpt_platform/frontend/src/app/(platform)/login/actions.ts
index 936c879d69..c4867dd123 100644
--- a/autogpt_platform/frontend/src/app/(platform)/login/actions.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/login/actions.ts
@@ -4,7 +4,7 @@ import BackendAPI from "@/lib/autogpt-server-api";
import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase";
import { loginFormSchema } from "@/types/auth";
import * as Sentry from "@sentry/nextjs";
-import { shouldShowOnboarding } from "../../api/helpers";
+import { getOnboardingStatus } from "../../api/helpers";
export async function login(email: string, password: string) {
try {
@@ -36,11 +36,13 @@ export async function login(email: string, password: string) {
const api = new BackendAPI();
await api.createUser();
- const onboarding = await shouldShowOnboarding();
+ // Get onboarding status from backend (includes chat flag evaluated for this user)
+ const { shouldShowOnboarding } = await getOnboardingStatus();
+ const next = shouldShowOnboarding ? "/onboarding" : "/";
return {
success: true,
- onboarding,
+ next,
};
} catch (err) {
Sentry.captureException(err);
diff --git a/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts b/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts
index 656e1febc2..9b81965c31 100644
--- a/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts
@@ -26,7 +26,7 @@ export function useLoginPage() {
useEffect(() => {
if (isLoggedIn && !isLoggingIn) {
- router.push(nextUrl || "/marketplace");
+ router.push(nextUrl || "/");
}
}, [isLoggedIn, isLoggingIn, nextUrl, router]);
@@ -93,13 +93,8 @@ export function useLoginPage() {
throw new Error(result.error || "Login failed");
}
- if (nextUrl) {
- router.replace(nextUrl);
- } else if (result.onboarding) {
- router.replace("/onboarding");
- } else {
- router.replace("/marketplace");
- }
+ // Prefer URL's next parameter, then use backend-determined route
+ router.replace(nextUrl || result.next || "/");
} catch (error) {
toast({
title:
diff --git a/autogpt_platform/frontend/src/app/(platform)/marketplace/components/MainMarketplacePage/__tests__/main.test.tsx b/autogpt_platform/frontend/src/app/(platform)/marketplace/components/MainMarketplacePage/__tests__/main.test.tsx
new file mode 100644
index 0000000000..bee227a7af
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/marketplace/components/MainMarketplacePage/__tests__/main.test.tsx
@@ -0,0 +1,15 @@
+import { expect, test } from "vitest";
+import { render, screen } from "@/tests/integrations/test-utils";
+import { MainMarkeplacePage } from "../MainMarketplacePage";
+import { server } from "@/mocks/mock-server";
+import { getDeleteV2DeleteStoreSubmissionMockHandler422 } from "@/app/api/__generated__/endpoints/store/store.msw";
+
+// Only for CI testing purpose, will remove it in future PR
+test("MainMarketplacePage", async () => {
+ server.use(getDeleteV2DeleteStoreSubmissionMockHandler422());
+
+ render(
);
+ expect(
+ await screen.findByText("Featured agents", { exact: false }),
+ ).toBeDefined();
+});
diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx
index 260fbc0b52..979b113f55 100644
--- a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx
@@ -3,12 +3,14 @@
import { useGetV2GetUserProfile } from "@/app/api/__generated__/endpoints/store/store";
import { ProfileInfoForm } from "@/components/__legacy__/ProfileInfoForm";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
+import { isLogoutInProgress } from "@/lib/autogpt-server-api/helpers";
import { ProfileDetails } from "@/lib/autogpt-server-api/types";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { ProfileLoading } from "./ProfileLoading";
export default function UserProfilePage() {
const { user } = useSupabase();
+ const logoutInProgress = isLogoutInProgress();
const {
data: profile,
@@ -18,7 +20,7 @@ export default function UserProfilePage() {
refetch,
} = useGetV2GetUserProfile
({
query: {
- enabled: !!user,
+ enabled: !!user && !logoutInProgress,
select: (res) => {
if (res.status === 200) {
return {
diff --git a/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts b/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts
index 68f7ae10ec..204482dbe9 100644
--- a/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts
@@ -4,7 +4,7 @@ import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase";
import { signupFormSchema } from "@/types/auth";
import * as Sentry from "@sentry/nextjs";
import { isWaitlistError, logWaitlistError } from "../../api/auth/utils";
-import { shouldShowOnboarding } from "../../api/helpers";
+import { getOnboardingStatus } from "../../api/helpers";
export async function signup(
email: string,
@@ -57,8 +57,9 @@ export async function signup(
await supabase.auth.setSession(data.session);
}
- const isOnboardingEnabled = await shouldShowOnboarding();
- const next = isOnboardingEnabled ? "/onboarding" : "/";
+ // Get onboarding status from backend (includes chat flag evaluated for this user)
+ const { shouldShowOnboarding } = await getOnboardingStatus();
+ const next = shouldShowOnboarding ? "/onboarding" : "/";
return { success: true, next };
} catch (err) {
diff --git a/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts b/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts
index e6d7c68aef..fd78b48735 100644
--- a/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts
@@ -26,7 +26,7 @@ export function useSignupPage() {
useEffect(() => {
if (isLoggedIn && !isSigningUp) {
- router.push(nextUrl || "/marketplace");
+ router.push(nextUrl || "/");
}
}, [isLoggedIn, isSigningUp, nextUrl, router]);
diff --git a/autogpt_platform/frontend/src/app/api/chat/tasks/[taskId]/stream/route.ts b/autogpt_platform/frontend/src/app/api/chat/tasks/[taskId]/stream/route.ts
new file mode 100644
index 0000000000..336786bfdb
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/api/chat/tasks/[taskId]/stream/route.ts
@@ -0,0 +1,81 @@
+import { environment } from "@/services/environment";
+import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
+import { NextRequest } from "next/server";
+
+/**
+ * SSE Proxy for task stream reconnection.
+ *
+ * This endpoint allows clients to reconnect to an ongoing or recently completed
+ * background task's stream. It replays missed messages from Redis Streams and
+ * subscribes to live updates if the task is still running.
+ *
+ * Client contract:
+ * 1. When receiving an operation_started event, store the task_id
+ * 2. To reconnect: GET /api/chat/tasks/{taskId}/stream?last_message_id={idx}
+ * 3. Messages are replayed from the last_message_id position
+ * 4. Stream ends when "finish" event is received
+ */
+export async function GET(
+ request: NextRequest,
+ { params }: { params: Promise<{ taskId: string }> },
+) {
+ const { taskId } = await params;
+ const searchParams = request.nextUrl.searchParams;
+ const lastMessageId = searchParams.get("last_message_id") || "0-0";
+
+ try {
+ // Get auth token from server-side session
+ const token = await getServerAuthToken();
+
+ // Build backend URL
+ const backendUrl = environment.getAGPTServerBaseUrl();
+ const streamUrl = new URL(`/api/chat/tasks/${taskId}/stream`, backendUrl);
+ streamUrl.searchParams.set("last_message_id", lastMessageId);
+
+ // Forward request to backend with auth header
+ const headers: Record = {
+ Accept: "text/event-stream",
+ "Cache-Control": "no-cache",
+ Connection: "keep-alive",
+ };
+
+ if (token) {
+ headers["Authorization"] = `Bearer ${token}`;
+ }
+
+ const response = await fetch(streamUrl.toString(), {
+ method: "GET",
+ headers,
+ });
+
+ if (!response.ok) {
+ const error = await response.text();
+ return new Response(error, {
+ status: response.status,
+ headers: { "Content-Type": "application/json" },
+ });
+ }
+
+ // Return the SSE stream directly
+ return new Response(response.body, {
+ headers: {
+ "Content-Type": "text/event-stream",
+ "Cache-Control": "no-cache, no-transform",
+ Connection: "keep-alive",
+ "X-Accel-Buffering": "no",
+ },
+ });
+ } catch (error) {
+ console.error("Task stream proxy error:", error);
+ return new Response(
+ JSON.stringify({
+ error: "Failed to connect to task stream",
+ detail: error instanceof Error ? error.message : String(error),
+ }),
+ {
+ status: 500,
+ headers: { "Content-Type": "application/json" },
+ },
+ );
+ }
+}
diff --git a/autogpt_platform/frontend/src/app/api/helpers.ts b/autogpt_platform/frontend/src/app/api/helpers.ts
index e9a708ba4c..226f5fa786 100644
--- a/autogpt_platform/frontend/src/app/api/helpers.ts
+++ b/autogpt_platform/frontend/src/app/api/helpers.ts
@@ -175,9 +175,11 @@ export async function resolveResponse<
return res.data;
}
-export async function shouldShowOnboarding() {
- const isEnabled = await resolveResponse(getV1IsOnboardingEnabled());
+export async function getOnboardingStatus() {
+ const status = await resolveResponse(getV1IsOnboardingEnabled());
const onboarding = await resolveResponse(getV1OnboardingState());
const isCompleted = onboarding.completedSteps.includes("CONGRATS");
- return isEnabled && !isCompleted;
+ return {
+ shouldShowOnboarding: status.is_onboarding_enabled && !isCompleted,
+ };
}
diff --git a/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts b/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts
index 4578ac03fe..3c9eda7785 100644
--- a/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts
+++ b/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts
@@ -4,12 +4,12 @@ import {
getServerAuthToken,
} from "@/lib/autogpt-server-api/helpers";
-import { transformDates } from "./date-transformer";
-import { environment } from "@/services/environment";
import {
IMPERSONATION_HEADER_NAME,
IMPERSONATION_STORAGE_KEY,
} from "@/lib/constants";
+import { environment } from "@/services/environment";
+import { transformDates } from "./date-transformer";
const FRONTEND_BASE_URL =
process.env.NEXT_PUBLIC_FRONTEND_BASE_URL || "http://localhost:3000";
diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json
index 335ff008c4..e507f1b77f 100644
--- a/autogpt_platform/frontend/src/app/api/openapi.json
+++ b/autogpt_platform/frontend/src/app/api/openapi.json
@@ -917,6 +917,28 @@
"security": [{ "HTTPBearerJWT": [] }]
}
},
+ "/api/chat/config/ttl": {
+ "get": {
+ "tags": ["v2", "chat", "chat"],
+ "summary": "Get Ttl Config",
+ "description": "Get the stream TTL configuration.\n\nReturns the Time-To-Live settings for chat streams, which determines\nhow long clients can reconnect to an active stream.\n\nReturns:\n dict: TTL configuration with seconds and milliseconds values.",
+ "operationId": "getV2GetTtlConfig",
+ "responses": {
+ "200": {
+ "description": "Successful Response",
+ "content": {
+ "application/json": {
+ "schema": {
+ "additionalProperties": true,
+ "type": "object",
+ "title": "Response Getv2Getttlconfig"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
"/api/chat/health": {
"get": {
"tags": ["v2", "chat", "chat"],
@@ -939,6 +961,63 @@
}
}
},
+ "/api/chat/operations/{operation_id}/complete": {
+ "post": {
+ "tags": ["v2", "chat", "chat"],
+ "summary": "Complete Operation",
+ "description": "External completion webhook for long-running operations.\n\nCalled by Agent Generator (or other services) when an operation completes.\nThis triggers the stream registry to publish completion and continue LLM generation.\n\nArgs:\n operation_id: The operation ID to complete.\n request: Completion payload with success status and result/error.\n x_api_key: Internal API key for authentication.\n\nReturns:\n dict: Status of the completion.\n\nRaises:\n HTTPException: If API key is invalid or operation not found.",
+ "operationId": "postV2CompleteOperation",
+ "parameters": [
+ {
+ "name": "operation_id",
+ "in": "path",
+ "required": true,
+ "schema": { "type": "string", "title": "Operation Id" }
+ },
+ {
+ "name": "x-api-key",
+ "in": "header",
+ "required": false,
+ "schema": {
+ "anyOf": [{ "type": "string" }, { "type": "null" }],
+ "title": "X-Api-Key"
+ }
+ }
+ ],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/OperationCompleteRequest"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "Successful Response",
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "object",
+ "additionalProperties": true,
+ "title": "Response Postv2Completeoperation"
+ }
+ }
+ }
+ },
+ "422": {
+ "description": "Validation Error",
+ "content": {
+ "application/json": {
+ "schema": { "$ref": "#/components/schemas/HTTPValidationError" }
+ }
+ }
+ }
+ }
+ }
+ },
"/api/chat/sessions": {
"get": {
"tags": ["v2", "chat", "chat"],
@@ -1022,7 +1101,7 @@
"get": {
"tags": ["v2", "chat", "chat"],
"summary": "Get Session",
- "description": "Retrieve the details of a specific chat session.\n\nLooks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.\n\nArgs:\n session_id: The unique identifier for the desired chat session.\n user_id: The optional authenticated user ID, or None for anonymous access.\n\nReturns:\n SessionDetailResponse: Details for the requested session; raises NotFoundError if not found.",
+ "description": "Retrieve the details of a specific chat session.\n\nLooks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.\nIf there's an active stream for this session, returns the task_id for reconnection.\n\nArgs:\n session_id: The unique identifier for the desired chat session.\n user_id: The optional authenticated user ID, or None for anonymous access.\n\nReturns:\n SessionDetailResponse: Details for the requested session, including active_stream info if applicable.",
"operationId": "getV2GetSession",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
@@ -1157,7 +1236,7 @@
"post": {
"tags": ["v2", "chat", "chat"],
"summary": "Stream Chat Post",
- "description": "Stream chat responses for a session (POST with context support).\n\nStreams the AI/completion responses in real time over Server-Sent Events (SSE), including:\n - Text fragments as they are generated\n - Tool call UI elements (if invoked)\n - Tool execution results\n\nArgs:\n session_id: The chat session identifier to associate with the streamed messages.\n request: Request body containing message, is_user_message, and optional context.\n user_id: Optional authenticated user ID.\nReturns:\n StreamingResponse: SSE-formatted response chunks.",
+ "description": "Stream chat responses for a session (POST with context support).\n\nStreams the AI/completion responses in real time over Server-Sent Events (SSE), including:\n - Text fragments as they are generated\n - Tool call UI elements (if invoked)\n - Tool execution results\n\nThe AI generation runs in a background task that continues even if the client disconnects.\nAll chunks are written to Redis for reconnection support. If the client disconnects,\nthey can reconnect using GET /tasks/{task_id}/stream to resume from where they left off.\n\nArgs:\n session_id: The chat session identifier to associate with the streamed messages.\n request: Request body containing message, is_user_message, and optional context.\n user_id: Optional authenticated user ID.\nReturns:\n StreamingResponse: SSE-formatted response chunks. First chunk is a \"start\" event\n containing the task_id for reconnection.",
"operationId": "postV2StreamChatPost",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
@@ -1195,6 +1274,94 @@
}
}
},
+ "/api/chat/tasks/{task_id}": {
+ "get": {
+ "tags": ["v2", "chat", "chat"],
+ "summary": "Get Task Status",
+ "description": "Get the status of a long-running task.\n\nArgs:\n task_id: The task ID to check.\n user_id: Authenticated user ID for ownership validation.\n\nReturns:\n dict: Task status including task_id, status, tool_name, and operation_id.\n\nRaises:\n NotFoundError: If task_id is not found or user doesn't have access.",
+ "operationId": "getV2GetTaskStatus",
+ "security": [{ "HTTPBearerJWT": [] }],
+ "parameters": [
+ {
+ "name": "task_id",
+ "in": "path",
+ "required": true,
+ "schema": { "type": "string", "title": "Task Id" }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Successful Response",
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "object",
+ "additionalProperties": true,
+ "title": "Response Getv2Gettaskstatus"
+ }
+ }
+ }
+ },
+ "401": {
+ "$ref": "#/components/responses/HTTP401NotAuthenticatedError"
+ },
+ "422": {
+ "description": "Validation Error",
+ "content": {
+ "application/json": {
+ "schema": { "$ref": "#/components/schemas/HTTPValidationError" }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/chat/tasks/{task_id}/stream": {
+ "get": {
+ "tags": ["v2", "chat", "chat"],
+ "summary": "Stream Task",
+ "description": "Reconnect to a long-running task's SSE stream.\n\nWhen a long-running operation (like agent generation) starts, the client\nreceives a task_id. If the connection drops, the client can reconnect\nusing this endpoint to resume receiving updates.\n\nArgs:\n task_id: The task ID from the operation_started response.\n user_id: Authenticated user ID for ownership validation.\n last_message_id: Last Redis Stream message ID received (\"0-0\" for full replay).\n\nReturns:\n StreamingResponse: SSE-formatted response chunks starting after last_message_id.\n\nRaises:\n HTTPException: 404 if task not found, 410 if task expired, 403 if access denied.",
+ "operationId": "getV2StreamTask",
+ "security": [{ "HTTPBearerJWT": [] }],
+ "parameters": [
+ {
+ "name": "task_id",
+ "in": "path",
+ "required": true,
+ "schema": { "type": "string", "title": "Task Id" }
+ },
+ {
+ "name": "last_message_id",
+ "in": "query",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "description": "Last Redis Stream message ID received (e.g., '1706540123456-0'). Use '0-0' for full replay.",
+ "default": "0-0",
+ "title": "Last Message Id"
+ },
+ "description": "Last Redis Stream message ID received (e.g., '1706540123456-0'). Use '0-0' for full replay."
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Successful Response",
+ "content": { "application/json": { "schema": {} } }
+ },
+ "401": {
+ "$ref": "#/components/responses/HTTP401NotAuthenticatedError"
+ },
+ "422": {
+ "description": "Validation Error",
+ "content": {
+ "application/json": {
+ "schema": { "$ref": "#/components/schemas/HTTPValidationError" }
+ }
+ }
+ }
+ }
+ }
+ },
"/api/credits": {
"get": {
"tags": ["v1", "credits"],
@@ -3339,7 +3506,7 @@
"get": {
"tags": ["v2", "library", "private"],
"summary": "List Library Agents",
- "description": "Get all agents in the user's library (both created and saved).\n\nArgs:\n user_id: ID of the authenticated user.\n search_term: Optional search term to filter agents by name/description.\n filter_by: List of filters to apply (favorites, created by user).\n sort_by: List of sorting criteria (created date, updated date).\n page: Page number to retrieve.\n page_size: Number of agents per page.\n\nReturns:\n A LibraryAgentResponse containing agents and pagination metadata.\n\nRaises:\n HTTPException: If a server/database error occurs.",
+ "description": "Get all agents in the user's library (both created and saved).",
"operationId": "getV2List library agents",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
@@ -3394,7 +3561,7 @@
],
"responses": {
"200": {
- "description": "List of library agents",
+ "description": "Successful Response",
"content": {
"application/json": {
"schema": {
@@ -3413,17 +3580,13 @@
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
- },
- "500": {
- "description": "Server error",
- "content": { "application/json": {} }
}
}
},
"post": {
"tags": ["v2", "library", "private"],
"summary": "Add Marketplace Agent",
- "description": "Add an agent from the marketplace to the user's library.\n\nArgs:\n store_listing_version_id: ID of the store listing version to add.\n user_id: ID of the authenticated user.\n\nReturns:\n library_model.LibraryAgent: Agent added to the library\n\nRaises:\n HTTPException(404): If the listing version is not found.\n HTTPException(500): If a server/database error occurs.",
+ "description": "Add an agent from the marketplace to the user's library.",
"operationId": "postV2Add marketplace agent",
"security": [{ "HTTPBearerJWT": [] }],
"requestBody": {
@@ -3438,7 +3601,7 @@
},
"responses": {
"201": {
- "description": "Agent added successfully",
+ "description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/LibraryAgent" }
@@ -3448,7 +3611,6 @@
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
},
- "404": { "description": "Store listing version not found" },
"422": {
"description": "Validation Error",
"content": {
@@ -3456,8 +3618,7 @@
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
- },
- "500": { "description": "Server error" }
+ }
}
}
},
@@ -3511,7 +3672,7 @@
"get": {
"tags": ["v2", "library", "private"],
"summary": "List Favorite Library Agents",
- "description": "Get all favorite agents in the user's library.\n\nArgs:\n user_id: ID of the authenticated user.\n page: Page number to retrieve.\n page_size: Number of agents per page.\n\nReturns:\n A LibraryAgentResponse containing favorite agents and pagination metadata.\n\nRaises:\n HTTPException: If a server/database error occurs.",
+ "description": "Get all favorite agents in the user's library.",
"operationId": "getV2List favorite library agents",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
@@ -3563,10 +3724,6 @@
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
- },
- "500": {
- "description": "Server error",
- "content": { "application/json": {} }
}
}
}
@@ -3588,7 +3745,7 @@
],
"responses": {
"200": {
- "description": "Library agent found",
+ "description": "Successful Response",
"content": {
"application/json": {
"schema": {
@@ -3604,7 +3761,6 @@
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
},
- "404": { "description": "Agent not found" },
"422": {
"description": "Validation Error",
"content": {
@@ -3620,7 +3776,7 @@
"delete": {
"tags": ["v2", "library", "private"],
"summary": "Delete Library Agent",
- "description": "Soft-delete the specified library agent.\n\nArgs:\n library_agent_id: ID of the library agent to delete.\n user_id: ID of the authenticated user.\n\nReturns:\n 204 No Content if successful.\n\nRaises:\n HTTPException(404): If the agent does not exist.\n HTTPException(500): If a server/database error occurs.",
+ "description": "Soft-delete the specified library agent.",
"operationId": "deleteV2Delete library agent",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
@@ -3636,11 +3792,9 @@
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
- "204": { "description": "Agent deleted successfully" },
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
},
- "404": { "description": "Agent not found" },
"422": {
"description": "Validation Error",
"content": {
@@ -3648,8 +3802,7 @@
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
- },
- "500": { "description": "Server error" }
+ }
}
},
"get": {
@@ -3690,7 +3843,7 @@
"patch": {
"tags": ["v2", "library", "private"],
"summary": "Update Library Agent",
- "description": "Update the library agent with the given fields.\n\nArgs:\n library_agent_id: ID of the library agent to update.\n payload: Fields to update (auto_update_version, is_favorite, etc.).\n user_id: ID of the authenticated user.\n\nRaises:\n HTTPException(500): If a server/database error occurs.",
+ "description": "Update the library agent with the given fields.",
"operationId": "patchV2Update library agent",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
@@ -3713,7 +3866,7 @@
},
"responses": {
"200": {
- "description": "Agent updated successfully",
+ "description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/LibraryAgent" }
@@ -3730,8 +3883,7 @@
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
- },
- "500": { "description": "Server error" }
+ }
}
}
},
@@ -4540,8 +4692,7 @@
"content": {
"application/json": {
"schema": {
- "type": "boolean",
- "title": "Response Getv1Is Onboarding Enabled"
+ "$ref": "#/components/schemas/OnboardingStatusResponse"
}
}
}
@@ -4594,6 +4745,7 @@
"AGENT_NEW_RUN",
"AGENT_INPUT",
"CONGRATS",
+ "VISIT_COPILOT",
"MARKETPLACE_VISIT",
"BUILDER_OPEN"
],
@@ -6305,6 +6457,28 @@
"schema": { "$ref": "#/components/schemas/StoreWaitlistEntry" }
}
}
+ "/api/workspace/files/{file_id}/download": {
+ "get": {
+ "tags": ["workspace"],
+ "summary": "Download file by ID",
+ "description": "Download a file by its ID.\n\nReturns the file content directly or redirects to a signed URL for GCS.",
+ "operationId": "getWorkspaceDownload file by id",
+ "security": [{ "HTTPBearerJWT": [] }],
+ "parameters": [
+ {
+ "name": "file_id",
+ "in": "path",
+ "required": true,
+ "schema": { "type": "string", "title": "File Id" }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Successful Response",
+ "content": { "application/json": { "schema": {} } }
+ },
+ "401": {
+ "$ref": "#/components/responses/HTTP401NotAuthenticatedError"
},
"422": {
"description": "Validation Error",
@@ -6539,6 +6713,18 @@
"title": "AccuracyTrendsResponse",
"description": "Response model for accuracy trends and alerts."
},
+ "ActiveStreamInfo": {
+ "properties": {
+ "task_id": { "type": "string", "title": "Task Id" },
+ "last_message_id": { "type": "string", "title": "Last Message Id" },
+ "operation_id": { "type": "string", "title": "Operation Id" },
+ "tool_name": { "type": "string", "title": "Tool Name" }
+ },
+ "type": "object",
+ "required": ["task_id", "last_message_id", "operation_id", "tool_name"],
+ "title": "ActiveStreamInfo",
+ "description": "Information about an active stream for reconnection."
+ },
"AddUserCreditsResponse": {
"properties": {
"new_balance": { "type": "integer", "title": "New Balance" },
@@ -6773,6 +6959,11 @@
"title": "Has Human In The Loop",
"readOnly": true
},
+ "has_sensitive_action": {
+ "type": "boolean",
+ "title": "Has Sensitive Action",
+ "readOnly": true
+ },
"trigger_setup_info": {
"anyOf": [
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
@@ -6789,6 +6980,7 @@
"output_schema",
"has_external_trigger",
"has_human_in_the_loop",
+ "has_sensitive_action",
"trigger_setup_info"
],
"title": "BaseGraph"
@@ -8042,6 +8234,11 @@
"title": "Has Human In The Loop",
"readOnly": true
},
+ "has_sensitive_action": {
+ "type": "boolean",
+ "title": "Has Sensitive Action",
+ "readOnly": true
+ },
"trigger_setup_info": {
"anyOf": [
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
@@ -8065,6 +8262,7 @@
"output_schema",
"has_external_trigger",
"has_human_in_the_loop",
+ "has_sensitive_action",
"trigger_setup_info",
"credentials_input_schema"
],
@@ -8143,6 +8341,11 @@
"title": "Has Human In The Loop",
"readOnly": true
},
+ "has_sensitive_action": {
+ "type": "boolean",
+ "title": "Has Sensitive Action",
+ "readOnly": true
+ },
"trigger_setup_info": {
"anyOf": [
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
@@ -8167,6 +8370,7 @@
"output_schema",
"has_external_trigger",
"has_human_in_the_loop",
+ "has_sensitive_action",
"trigger_setup_info",
"credentials_input_schema"
],
@@ -8175,8 +8379,14 @@
"GraphSettings": {
"properties": {
"human_in_the_loop_safe_mode": {
- "anyOf": [{ "type": "boolean" }, { "type": "null" }],
- "title": "Human In The Loop Safe Mode"
+ "type": "boolean",
+ "title": "Human In The Loop Safe Mode",
+ "default": true
+ },
+ "sensitive_action_safe_mode": {
+ "type": "boolean",
+ "title": "Sensitive Action Safe Mode",
+ "default": false
}
},
"type": "object",
@@ -8334,6 +8544,16 @@
"title": "Has External Trigger",
"description": "Whether the agent has an external trigger (e.g. webhook) node"
},
+ "has_human_in_the_loop": {
+ "type": "boolean",
+ "title": "Has Human In The Loop",
+ "description": "Whether the agent has human-in-the-loop blocks"
+ },
+ "has_sensitive_action": {
+ "type": "boolean",
+ "title": "Has Sensitive Action",
+ "description": "Whether the agent has sensitive action blocks"
+ },
"trigger_setup_info": {
"anyOf": [
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
@@ -8341,6 +8561,25 @@
]
},
"new_output": { "type": "boolean", "title": "New Output" },
+ "execution_count": {
+ "type": "integer",
+ "title": "Execution Count",
+ "default": 0
+ },
+ "success_rate": {
+ "anyOf": [{ "type": "number" }, { "type": "null" }],
+ "title": "Success Rate"
+ },
+ "avg_correctness_score": {
+ "anyOf": [{ "type": "number" }, { "type": "null" }],
+ "title": "Avg Correctness Score"
+ },
+ "recent_executions": {
+ "items": { "$ref": "#/components/schemas/RecentExecution" },
+ "type": "array",
+ "title": "Recent Executions",
+ "description": "List of recent executions with status, score, and summary"
+ },
"can_access_graph": {
"type": "boolean",
"title": "Can Access Graph"
@@ -8380,6 +8619,8 @@
"output_schema",
"credentials_input_schema",
"has_external_trigger",
+ "has_human_in_the_loop",
+ "has_sensitive_action",
"new_output",
"can_access_graph",
"is_latest_version",
@@ -9122,6 +9363,19 @@
"title": "OAuthApplicationPublicInfo",
"description": "Public information about an OAuth application (for consent screen)"
},
+ "OnboardingStatusResponse": {
+ "properties": {
+ "is_onboarding_enabled": {
+ "type": "boolean",
+ "title": "Is Onboarding Enabled"
+ },
+ "is_chat_enabled": { "type": "boolean", "title": "Is Chat Enabled" }
+ },
+ "type": "object",
+ "required": ["is_onboarding_enabled", "is_chat_enabled"],
+ "title": "OnboardingStatusResponse",
+ "description": "Response for onboarding status check."
+ },
"OnboardingStep": {
"type": "string",
"enum": [
@@ -9132,6 +9386,7 @@
"AGENT_NEW_RUN",
"AGENT_INPUT",
"CONGRATS",
+ "VISIT_COPILOT",
"GET_RESULTS",
"MARKETPLACE_VISIT",
"MARKETPLACE_ADD_AGENT",
@@ -9149,6 +9404,27 @@
],
"title": "OnboardingStep"
},
+ "OperationCompleteRequest": {
+ "properties": {
+ "success": { "type": "boolean", "title": "Success" },
+ "result": {
+ "anyOf": [
+ { "additionalProperties": true, "type": "object" },
+ { "type": "string" },
+ { "type": "null" }
+ ],
+ "title": "Result"
+ },
+ "error": {
+ "anyOf": [{ "type": "string" }, { "type": "null" }],
+ "title": "Error"
+ }
+ },
+ "type": "object",
+ "required": ["success"],
+ "title": "OperationCompleteRequest",
+ "description": "Request model for external completion webhook."
+ },
"Pagination": {
"properties": {
"total_items": {
@@ -9187,6 +9463,12 @@
"title": "Node Exec Id",
"description": "Node execution ID (primary key)"
},
+ "node_id": {
+ "type": "string",
+ "title": "Node Id",
+ "description": "Node definition ID (for grouping)",
+ "default": ""
+ },
"user_id": {
"type": "string",
"title": "User Id",
@@ -9286,7 +9568,7 @@
"created_at"
],
"title": "PendingHumanReviewModel",
- "description": "Response model for pending human review data.\n\nRepresents a human review request that is awaiting user action.\nContains all necessary information for a user to review and approve\nor reject data from a Human-in-the-Loop block execution.\n\nAttributes:\n id: Unique identifier for the review record\n user_id: ID of the user who must perform the review\n node_exec_id: ID of the node execution that created this review\n graph_exec_id: ID of the graph execution containing the node\n graph_id: ID of the graph template being executed\n graph_version: Version number of the graph template\n payload: The actual data payload awaiting review\n instructions: Instructions or message for the reviewer\n editable: Whether the reviewer can edit the data\n status: Current review status (WAITING, APPROVED, or REJECTED)\n review_message: Optional message from the reviewer\n created_at: Timestamp when review was created\n updated_at: Timestamp when review was last modified\n reviewed_at: Timestamp when review was completed (if applicable)"
+ "description": "Response model for pending human review data.\n\nRepresents a human review request that is awaiting user action.\nContains all necessary information for a user to review and approve\nor reject data from a Human-in-the-Loop block execution.\n\nAttributes:\n id: Unique identifier for the review record\n user_id: ID of the user who must perform the review\n node_exec_id: ID of the node execution that created this review\n node_id: ID of the node definition (for grouping reviews from same node)\n graph_exec_id: ID of the graph execution containing the node\n graph_id: ID of the graph template being executed\n graph_version: Version number of the graph template\n payload: The actual data payload awaiting review\n instructions: Instructions or message for the reviewer\n editable: Whether the reviewer can edit the data\n status: Current review status (WAITING, APPROVED, or REJECTED)\n review_message: Optional message from the reviewer\n created_at: Timestamp when review was created\n updated_at: Timestamp when review was last modified\n reviewed_at: Timestamp when review was completed (if applicable)"
},
"PostmarkBounceEnum": {
"type": "integer",
@@ -9713,6 +9995,23 @@
"required": ["providers", "pagination"],
"title": "ProviderResponse"
},
+ "RecentExecution": {
+ "properties": {
+ "status": { "type": "string", "title": "Status" },
+ "correctness_score": {
+ "anyOf": [{ "type": "number" }, { "type": "null" }],
+ "title": "Correctness Score"
+ },
+ "activity_summary": {
+ "anyOf": [{ "type": "string" }, { "type": "null" }],
+ "title": "Activity Summary"
+ }
+ },
+ "type": "object",
+ "required": ["status"],
+ "title": "RecentExecution",
+ "description": "Summary of a recent execution for quality assessment.\n\nUsed by the LLM to understand the agent's recent performance with specific examples\nrather than just aggregate statistics."
+ },
"RefundRequest": {
"properties": {
"id": { "type": "string", "title": "Id" },
@@ -9789,6 +10088,12 @@
],
"title": "Reviewed Data",
"description": "Optional edited data (ignored if approved=False)"
+ },
+ "auto_approve_future": {
+ "type": "boolean",
+ "title": "Auto Approve Future",
+ "description": "If true and this review is approved, future executions of this same block (node) will be automatically approved. This only affects approved reviews.",
+ "default": false
}
},
"type": "object",
@@ -9808,7 +10113,7 @@
"type": "object",
"required": ["reviews"],
"title": "ReviewRequest",
- "description": "Request model for processing ALL pending reviews for an execution.\n\nThis request must include ALL pending reviews for a graph execution.\nEach review will be either approved (with optional data modifications)\nor rejected (data ignored). The execution will resume only after ALL reviews are processed."
+ "description": "Request model for processing ALL pending reviews for an execution.\n\nThis request must include ALL pending reviews for a graph execution.\nEach review will be either approved (with optional data modifications)\nor rejected (data ignored). The execution will resume only after ALL reviews are processed.\n\nEach review item can individually specify whether to auto-approve future executions\nof the same block via the `auto_approve_future` field on ReviewItem."
},
"ReviewResponse": {
"properties": {
@@ -9975,6 +10280,12 @@
"items": { "additionalProperties": true, "type": "object" },
"type": "array",
"title": "Messages"
+ },
+ "active_stream": {
+ "anyOf": [
+ { "$ref": "#/components/schemas/ActiveStreamInfo" },
+ { "type": "null" }
+ ]
}
},
"type": "object",
@@ -10130,7 +10441,8 @@
"sub_heading": { "type": "string", "title": "Sub Heading" },
"description": { "type": "string", "title": "Description" },
"runs": { "type": "integer", "title": "Runs" },
- "rating": { "type": "number", "title": "Rating" }
+ "rating": { "type": "number", "title": "Rating" },
+ "agent_graph_id": { "type": "string", "title": "Agent Graph Id" }
},
"type": "object",
"required": [
@@ -10142,7 +10454,8 @@
"sub_heading",
"description",
"runs",
- "rating"
+ "rating",
+ "agent_graph_id"
],
"title": "StoreAgent"
},
diff --git a/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts b/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts
index 293c406373..442bd77e0f 100644
--- a/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts
+++ b/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts
@@ -1,5 +1,6 @@
import {
ApiError,
+ getServerAuthToken,
makeAuthenticatedFileUpload,
makeAuthenticatedRequest,
} from "@/lib/autogpt-server-api/helpers";
@@ -15,6 +16,69 @@ function buildBackendUrl(path: string[], queryString: string): string {
return `${environment.getAGPTServerBaseUrl()}/${backendPath}${queryString}`;
}
+/**
+ * Check if this is a workspace file download request that needs binary response handling.
+ */
+function isWorkspaceDownloadRequest(path: string[]): boolean {
+ // Match pattern: api/workspace/files/{id}/download (5 segments)
+ return (
+ path.length == 5 &&
+ path[0] === "api" &&
+ path[1] === "workspace" &&
+ path[2] === "files" &&
+ path[path.length - 1] === "download"
+ );
+}
+
+/**
+ * Handle workspace file download requests with proper binary response streaming.
+ */
+async function handleWorkspaceDownload(
+ req: NextRequest,
+ backendUrl: string,
+): Promise {
+ const token = await getServerAuthToken();
+
+ const headers: Record = {};
+ if (token && token !== "no-token-found") {
+ headers["Authorization"] = `Bearer ${token}`;
+ }
+
+ const response = await fetch(backendUrl, {
+ method: "GET",
+ headers,
+ redirect: "follow", // Follow redirects to signed URLs
+ });
+
+ if (!response.ok) {
+ return NextResponse.json(
+ { error: `Failed to download file: ${response.statusText}` },
+ { status: response.status },
+ );
+ }
+
+ // Get the content type from the backend response
+ const contentType =
+ response.headers.get("Content-Type") || "application/octet-stream";
+ const contentDisposition = response.headers.get("Content-Disposition");
+
+ // Stream the response body
+ const responseHeaders: Record = {
+ "Content-Type": contentType,
+ };
+
+ if (contentDisposition) {
+ responseHeaders["Content-Disposition"] = contentDisposition;
+ }
+
+ // Return the binary content
+ const arrayBuffer = await response.arrayBuffer();
+ return new NextResponse(arrayBuffer, {
+ status: 200,
+ headers: responseHeaders,
+ });
+}
+
async function handleJsonRequest(
req: NextRequest,
method: string,
@@ -180,6 +244,11 @@ async function handler(
};
try {
+ // Handle workspace file downloads separately (binary response)
+ if (method === "GET" && isWorkspaceDownloadRequest(path)) {
+ return await handleWorkspaceDownload(req, backendUrl);
+ }
+
if (method === "GET" || method === "DELETE") {
responseBody = await handleGetDeleteRequest(method, backendUrl, req);
} else if (contentType?.includes("application/json")) {
diff --git a/autogpt_platform/frontend/src/app/api/transcribe/route.ts b/autogpt_platform/frontend/src/app/api/transcribe/route.ts
new file mode 100644
index 0000000000..10c182cdfa
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/api/transcribe/route.ts
@@ -0,0 +1,77 @@
+import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
+import { NextRequest, NextResponse } from "next/server";
+
+const WHISPER_API_URL = "https://api.openai.com/v1/audio/transcriptions";
+const MAX_FILE_SIZE = 25 * 1024 * 1024; // 25MB - Whisper's limit
+
+function getExtensionFromMimeType(mimeType: string): string {
+ const subtype = mimeType.split("/")[1]?.split(";")[0];
+ return subtype || "webm";
+}
+
+export async function POST(request: NextRequest) {
+ const token = await getServerAuthToken();
+
+ if (!token || token === "no-token-found") {
+ return NextResponse.json({ error: "Unauthorized" }, { status: 401 });
+ }
+
+ const apiKey = process.env.OPENAI_API_KEY;
+
+ if (!apiKey) {
+ return NextResponse.json(
+ { error: "OpenAI API key not configured" },
+ { status: 401 },
+ );
+ }
+
+ try {
+ const formData = await request.formData();
+ const audioFile = formData.get("audio");
+
+ if (!audioFile || !(audioFile instanceof Blob)) {
+ return NextResponse.json(
+ { error: "No audio file provided" },
+ { status: 400 },
+ );
+ }
+
+ if (audioFile.size > MAX_FILE_SIZE) {
+ return NextResponse.json(
+ { error: "File too large. Maximum size is 25MB." },
+ { status: 413 },
+ );
+ }
+
+ const ext = getExtensionFromMimeType(audioFile.type);
+ const whisperFormData = new FormData();
+ whisperFormData.append("file", audioFile, `recording.${ext}`);
+ whisperFormData.append("model", "whisper-1");
+
+ const response = await fetch(WHISPER_API_URL, {
+ method: "POST",
+ headers: {
+ Authorization: `Bearer ${apiKey}`,
+ },
+ body: whisperFormData,
+ });
+
+ if (!response.ok) {
+ const errorData = await response.json().catch(() => ({}));
+ console.error("Whisper API error:", errorData);
+ return NextResponse.json(
+ { error: errorData.error?.message || "Transcription failed" },
+ { status: response.status },
+ );
+ }
+
+ const result = await response.json();
+ return NextResponse.json({ text: result.text });
+ } catch (error) {
+ console.error("Transcription error:", error);
+ return NextResponse.json(
+ { error: "Failed to process audio" },
+ { status: 500 },
+ );
+ }
+}
diff --git a/autogpt_platform/frontend/src/app/globals.css b/autogpt_platform/frontend/src/app/globals.css
index 0625c26082..1f782f753b 100644
--- a/autogpt_platform/frontend/src/app/globals.css
+++ b/autogpt_platform/frontend/src/app/globals.css
@@ -141,52 +141,6 @@
}
}
-@keyframes shimmer {
- 0% {
- background-position: -200% 0;
- }
- 100% {
- background-position: 200% 0;
- }
-}
-
-@keyframes l3 {
- 25% {
- background-position:
- 0 0,
- 100% 100%,
- 100% calc(100% - 5px);
- }
- 50% {
- background-position:
- 0 100%,
- 100% 100%,
- 0 calc(100% - 5px);
- }
- 75% {
- background-position:
- 0 100%,
- 100% 0,
- 100% 5px;
- }
-}
-
-.loader {
- width: 80px;
- height: 70px;
- border: 5px solid rgb(241 245 249);
- padding: 0 8px;
- box-sizing: border-box;
- background:
- linear-gradient(rgb(15 23 42) 0 0) 0 0/8px 20px,
- linear-gradient(rgb(15 23 42) 0 0) 100% 0/8px 20px,
- radial-gradient(farthest-side, rgb(15 23 42) 90%, #0000) 0 5px/8px 8px
- content-box,
- transparent;
- background-repeat: no-repeat;
- animation: l3 2s infinite linear;
-}
-
input[type="number"]::-webkit-outer-spin-button,
input[type="number"]::-webkit-inner-spin-button {
-webkit-appearance: none;
diff --git a/autogpt_platform/frontend/src/app/page.tsx b/autogpt_platform/frontend/src/app/page.tsx
index b499a40d71..ce67760eda 100644
--- a/autogpt_platform/frontend/src/app/page.tsx
+++ b/autogpt_platform/frontend/src/app/page.tsx
@@ -1,5 +1,15 @@
-import { redirect } from "next/navigation";
+"use client";
+
+import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
+import { useRouter } from "next/navigation";
+import { useEffect } from "react";
export default function Page() {
- redirect("/marketplace");
+ const router = useRouter();
+
+ useEffect(() => {
+ router.replace("/copilot");
+ }, [router]);
+
+ return ;
}
diff --git a/autogpt_platform/frontend/src/app/providers.tsx b/autogpt_platform/frontend/src/app/providers.tsx
index 8ea199abc8..267814e7c2 100644
--- a/autogpt_platform/frontend/src/app/providers.tsx
+++ b/autogpt_platform/frontend/src/app/providers.tsx
@@ -6,28 +6,40 @@ import { BackendAPIProvider } from "@/lib/autogpt-server-api/context";
import { getQueryClient } from "@/lib/react-query/queryClient";
import CredentialsProvider from "@/providers/agent-credentials/credentials-provider";
import OnboardingProvider from "@/providers/onboarding/onboarding-provider";
+import {
+ PostHogPageViewTracker,
+ PostHogProvider,
+ PostHogUserTracker,
+} from "@/providers/posthog/posthog-provider";
import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider";
import { QueryClientProvider } from "@tanstack/react-query";
import { ThemeProvider, ThemeProviderProps } from "next-themes";
import { NuqsAdapter } from "nuqs/adapters/next/app";
+import { Suspense } from "react";
export function Providers({ children, ...props }: ThemeProviderProps) {
const queryClient = getQueryClient();
return (
-
-
-
-
-
-
- {children}
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+ {children}
+
+
+
+
+
+
);
diff --git a/autogpt_platform/frontend/src/components/atoms/Badge/Badge.test.tsx b/autogpt_platform/frontend/src/components/atoms/Badge/Badge.test.tsx
deleted file mode 100644
index cd8531375b..0000000000
--- a/autogpt_platform/frontend/src/components/atoms/Badge/Badge.test.tsx
+++ /dev/null
@@ -1,81 +0,0 @@
-// import { render, screen } from "@testing-library/react";
-// import { describe, expect, it } from "vitest";
-// import { Badge } from "./Badge";
-
-// describe("Badge Component", () => {
-// it("renders badge with content", () => {
-// render(Success);
-
-// expect(screen.getByText("Success")).toBeInTheDocument();
-// });
-
-// it("applies correct variant styles", () => {
-// const { rerender } = render(Success);
-// let badge = screen.getByText("Success");
-// expect(badge).toHaveClass("bg-green-100", "text-green-800");
-
-// rerender(Error);
-// badge = screen.getByText("Error");
-// expect(badge).toHaveClass("bg-red-100", "text-red-800");
-
-// rerender(Info);
-// badge = screen.getByText("Info");
-// expect(badge).toHaveClass("bg-slate-100", "text-slate-800");
-// });
-
-// it("applies custom className", () => {
-// render(
-//
-// Success
-// ,
-// );
-
-// const badge = screen.getByText("Success");
-// expect(badge).toHaveClass("custom-class");
-// });
-
-// it("renders as span element", () => {
-// render(Success);
-
-// const badge = screen.getByText("Success");
-// expect(badge.tagName).toBe("SPAN");
-// });
-
-// it("renders children correctly", () => {
-// render(
-//
-// Custom Content
-// ,
-// );
-
-// expect(screen.getByText("Custom")).toBeInTheDocument();
-// expect(screen.getByText("Content")).toBeInTheDocument();
-// });
-
-// it("supports all badge variants", () => {
-// const variants = ["success", "error", "info"] as const;
-
-// variants.forEach((variant) => {
-// const { unmount } = render(
-//
-// {variant}
-// ,
-// );
-
-// expect(screen.getByTestId(`badge-${variant}`)).toBeInTheDocument();
-// unmount();
-// });
-// });
-
-// it("handles long text content", () => {
-// render(
-//
-// Very long text that should be handled properly by the component
-// ,
-// );
-
-// const badge = screen.getByText(/Very long text/);
-// expect(badge).toBeInTheDocument();
-// expect(badge).toHaveClass("overflow-hidden", "text-ellipsis");
-// });
-// });
diff --git a/autogpt_platform/frontend/src/components/atoms/Skeleton/Skeleton.tsx b/autogpt_platform/frontend/src/components/atoms/Skeleton/Skeleton.tsx
new file mode 100644
index 0000000000..4789e281ce
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/atoms/Skeleton/Skeleton.tsx
@@ -0,0 +1,14 @@
+import { cn } from "@/lib/utils";
+
+interface Props extends React.HTMLAttributes {
+ className?: string;
+}
+
+export function Skeleton({ className, ...props }: Props) {
+ return (
+
+ );
+}
diff --git a/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx b/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx
index 04d87a6e0e..69bb7c3440 100644
--- a/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx
+++ b/autogpt_platform/frontend/src/components/atoms/Skeleton/skeleton.stories.tsx
@@ -1,4 +1,4 @@
-import { Skeleton } from "@/components/__legacy__/ui/skeleton";
+import { Skeleton } from "./Skeleton";
import type { Meta, StoryObj } from "@storybook/nextjs";
const meta: Meta = {
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx
new file mode 100644
index 0000000000..da454150bf
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx
@@ -0,0 +1,114 @@
+"use client";
+
+import { useCopilotSessionId } from "@/app/(platform)/copilot/useCopilotSessionId";
+import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
+import { Text } from "@/components/atoms/Text/Text";
+import { cn } from "@/lib/utils";
+import { useEffect, useRef } from "react";
+import { ChatContainer } from "./components/ChatContainer/ChatContainer";
+import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState";
+import { useChat } from "./useChat";
+
+export interface ChatProps {
+ className?: string;
+ initialPrompt?: string;
+ onSessionNotFound?: () => void;
+ onStreamingChange?: (isStreaming: boolean) => void;
+}
+
+export function Chat({
+ className,
+ initialPrompt,
+ onSessionNotFound,
+ onStreamingChange,
+}: ChatProps) {
+ const { urlSessionId } = useCopilotSessionId();
+ const hasHandledNotFoundRef = useRef(false);
+ const {
+ session,
+ messages,
+ isLoading,
+ isCreating,
+ error,
+ isSessionNotFound,
+ sessionId,
+ createSession,
+ showLoader,
+ startPollingForOperation,
+ } = useChat({ urlSessionId });
+
+ // Extract active stream info for reconnection
+ const activeStream = (
+ session as {
+ active_stream?: {
+ task_id: string;
+ last_message_id: string;
+ operation_id: string;
+ tool_name: string;
+ };
+ }
+ )?.active_stream;
+
+ useEffect(() => {
+ if (!onSessionNotFound) return;
+ if (!urlSessionId) return;
+ if (!isSessionNotFound || isLoading || isCreating) return;
+ if (hasHandledNotFoundRef.current) return;
+ hasHandledNotFoundRef.current = true;
+ onSessionNotFound();
+ }, [
+ onSessionNotFound,
+ urlSessionId,
+ isSessionNotFound,
+ isLoading,
+ isCreating,
+ ]);
+
+ const shouldShowLoader = showLoader && (isLoading || isCreating);
+
+ return (
+
+ {/* Main Content */}
+
+ {/* Loading State */}
+ {shouldShowLoader && (
+
+
+
+
+ Loading your chat...
+
+
+
+ )}
+
+ {/* Error State */}
+ {error && !isLoading && (
+
+ )}
+
+ {/* Session Content */}
+ {sessionId && !isLoading && !error && (
+
+ )}
+
+
+ );
+}
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/SSE_RECONNECTION.md b/autogpt_platform/frontend/src/components/contextual/Chat/SSE_RECONNECTION.md
new file mode 100644
index 0000000000..9e78679f4e
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/SSE_RECONNECTION.md
@@ -0,0 +1,159 @@
+# SSE Reconnection Contract for Long-Running Operations
+
+This document describes the client-side contract for handling SSE (Server-Sent Events) disconnections and reconnecting to long-running background tasks.
+
+## Overview
+
+When a user triggers a long-running operation (like agent generation), the backend:
+
+1. Spawns a background task that survives SSE disconnections
+2. Returns an `operation_started` response with a `task_id`
+3. Stores stream messages in Redis Streams for replay
+
+Clients can reconnect to the task stream at any time to receive missed messages.
+
+## Client-Side Flow
+
+### 1. Receiving Operation Started
+
+When you receive an `operation_started` tool response:
+
+```typescript
+// The response includes a task_id for reconnection
+{
+ type: "operation_started",
+ tool_name: "generate_agent",
+ operation_id: "uuid-...",
+ task_id: "task-uuid-...", // <-- Store this for reconnection
+ message: "Operation started. You can close this tab."
+}
+```
+
+### 2. Storing Task Info
+
+Use the chat store to track the active task:
+
+```typescript
+import { useChatStore } from "./chat-store";
+
+// When operation_started is received:
+useChatStore.getState().setActiveTask(sessionId, {
+ taskId: response.task_id,
+ operationId: response.operation_id,
+ toolName: response.tool_name,
+ lastMessageId: "0",
+});
+```
+
+### 3. Reconnecting to a Task
+
+To reconnect (e.g., after page refresh or tab reopen):
+
+```typescript
+const { reconnectToTask, getActiveTask } = useChatStore.getState();
+
+// Check if there's an active task for this session
+const activeTask = getActiveTask(sessionId);
+
+if (activeTask) {
+ // Reconnect to the task stream
+ await reconnectToTask(
+ sessionId,
+ activeTask.taskId,
+ activeTask.lastMessageId, // Resume from last position
+ (chunk) => {
+ // Handle incoming chunks
+ console.log("Received chunk:", chunk);
+ },
+ );
+}
+```
+
+### 4. Tracking Message Position
+
+To enable precise replay, update the last message ID as chunks arrive:
+
+```typescript
+const { updateTaskLastMessageId } = useChatStore.getState();
+
+function handleChunk(chunk: StreamChunk) {
+ // If chunk has an index/id, track it
+ if (chunk.idx !== undefined) {
+ updateTaskLastMessageId(sessionId, String(chunk.idx));
+ }
+}
+```
+
+## API Endpoints
+
+### Task Stream Reconnection
+
+```
+GET /api/chat/tasks/{taskId}/stream?last_message_id={idx}
+```
+
+- `taskId`: The task ID from `operation_started`
+- `last_message_id`: Last received message index (default: "0" for full replay)
+
+Returns: SSE stream of missed messages + live updates
+
+## Chunk Types
+
+The reconnected stream follows the same Vercel AI SDK protocol:
+
+| Type | Description |
+| ----------------------- | ----------------------- |
+| `start` | Message lifecycle start |
+| `text-delta` | Streaming text content |
+| `text-end` | Text block completed |
+| `tool-output-available` | Tool result available |
+| `finish` | Stream completed |
+| `error` | Error occurred |
+
+## Error Handling
+
+If reconnection fails:
+
+1. Check if task still exists (may have expired - default TTL: 1 hour)
+2. Fall back to polling the session for final state
+3. Show appropriate UI message to user
+
+## Persistence Considerations
+
+For robust reconnection across browser restarts:
+
+```typescript
+// Store in localStorage/sessionStorage
+const ACTIVE_TASKS_KEY = "chat_active_tasks";
+
+function persistActiveTask(sessionId: string, task: ActiveTaskInfo) {
+ const tasks = JSON.parse(localStorage.getItem(ACTIVE_TASKS_KEY) || "{}");
+ tasks[sessionId] = task;
+ localStorage.setItem(ACTIVE_TASKS_KEY, JSON.stringify(tasks));
+}
+
+function loadPersistedTasks(): Record {
+ return JSON.parse(localStorage.getItem(ACTIVE_TASKS_KEY) || "{}");
+}
+```
+
+## Backend Configuration
+
+The following backend settings affect reconnection behavior:
+
+| Setting | Default | Description |
+| ------------------- | ------- | ---------------------------------- |
+| `stream_ttl` | 3600s | How long streams are kept in Redis |
+| `stream_max_length` | 1000 | Max messages per stream |
+
+## Testing
+
+To test reconnection locally:
+
+1. Start a long-running operation (e.g., agent generation)
+2. Note the `task_id` from the `operation_started` response
+3. Close the browser tab
+4. Reopen and call `reconnectToTask` with the saved `task_id`
+5. Verify that missed messages are replayed
+
+See the main README for full local development setup.
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/chat-constants.ts b/autogpt_platform/frontend/src/components/contextual/Chat/chat-constants.ts
new file mode 100644
index 0000000000..8802de2155
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/chat-constants.ts
@@ -0,0 +1,16 @@
+/**
+ * Constants for the chat system.
+ *
+ * Centralizes magic strings and values used across chat components.
+ */
+
+// LocalStorage keys
+export const STORAGE_KEY_ACTIVE_TASKS = "chat_active_tasks";
+
+// Redis Stream IDs
+export const INITIAL_MESSAGE_ID = "0";
+export const INITIAL_STREAM_ID = "0-0";
+
+// TTL values (in milliseconds)
+export const COMPLETED_STREAM_TTL_MS = 5 * 60 * 1000; // 5 minutes
+export const ACTIVE_TASK_TTL_MS = 60 * 60 * 1000; // 1 hour
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts b/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts
new file mode 100644
index 0000000000..3083f65d2c
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/chat-store.ts
@@ -0,0 +1,501 @@
+"use client";
+
+import { create } from "zustand";
+import {
+ ACTIVE_TASK_TTL_MS,
+ COMPLETED_STREAM_TTL_MS,
+ INITIAL_STREAM_ID,
+ STORAGE_KEY_ACTIVE_TASKS,
+} from "./chat-constants";
+import type {
+ ActiveStream,
+ StreamChunk,
+ StreamCompleteCallback,
+ StreamResult,
+ StreamStatus,
+} from "./chat-types";
+import { executeStream, executeTaskReconnect } from "./stream-executor";
+
+export interface ActiveTaskInfo {
+ taskId: string;
+ sessionId: string;
+ operationId: string;
+ toolName: string;
+ lastMessageId: string;
+ startedAt: number;
+}
+
+/** Load active tasks from localStorage */
+function loadPersistedTasks(): Map {
+ if (typeof window === "undefined") return new Map();
+ try {
+ const stored = localStorage.getItem(STORAGE_KEY_ACTIVE_TASKS);
+ if (!stored) return new Map();
+ const parsed = JSON.parse(stored) as Record;
+ const now = Date.now();
+ const tasks = new Map();
+ // Filter out expired tasks
+ for (const [sessionId, task] of Object.entries(parsed)) {
+ if (now - task.startedAt < ACTIVE_TASK_TTL_MS) {
+ tasks.set(sessionId, task);
+ }
+ }
+ return tasks;
+ } catch {
+ return new Map();
+ }
+}
+
+/** Save active tasks to localStorage */
+function persistTasks(tasks: Map): void {
+ if (typeof window === "undefined") return;
+ try {
+ const obj: Record = {};
+ for (const [sessionId, task] of tasks) {
+ obj[sessionId] = task;
+ }
+ localStorage.setItem(STORAGE_KEY_ACTIVE_TASKS, JSON.stringify(obj));
+ } catch {
+ // Ignore storage errors
+ }
+}
+
+interface ChatStoreState {
+ activeStreams: Map;
+ completedStreams: Map;
+ activeSessions: Set;
+ streamCompleteCallbacks: Set;
+ /** Active tasks for SSE reconnection - keyed by sessionId */
+ activeTasks: Map;
+}
+
+interface ChatStoreActions {
+ startStream: (
+ sessionId: string,
+ message: string,
+ isUserMessage: boolean,
+ context?: { url: string; content: string },
+ onChunk?: (chunk: StreamChunk) => void,
+ ) => Promise;
+ stopStream: (sessionId: string) => void;
+ subscribeToStream: (
+ sessionId: string,
+ onChunk: (chunk: StreamChunk) => void,
+ skipReplay?: boolean,
+ ) => () => void;
+ getStreamStatus: (sessionId: string) => StreamStatus;
+ getCompletedStream: (sessionId: string) => StreamResult | undefined;
+ clearCompletedStream: (sessionId: string) => void;
+ isStreaming: (sessionId: string) => boolean;
+ registerActiveSession: (sessionId: string) => void;
+ unregisterActiveSession: (sessionId: string) => void;
+ isSessionActive: (sessionId: string) => boolean;
+ onStreamComplete: (callback: StreamCompleteCallback) => () => void;
+ /** Track active task for SSE reconnection */
+ setActiveTask: (
+ sessionId: string,
+ taskInfo: Omit,
+ ) => void;
+ /** Get active task for a session */
+ getActiveTask: (sessionId: string) => ActiveTaskInfo | undefined;
+ /** Clear active task when operation completes */
+ clearActiveTask: (sessionId: string) => void;
+ /** Reconnect to an existing task stream */
+ reconnectToTask: (
+ sessionId: string,
+ taskId: string,
+ lastMessageId?: string,
+ onChunk?: (chunk: StreamChunk) => void,
+ ) => Promise;
+ /** Update last message ID for a task (for tracking replay position) */
+ updateTaskLastMessageId: (sessionId: string, lastMessageId: string) => void;
+}
+
+type ChatStore = ChatStoreState & ChatStoreActions;
+
+function notifyStreamComplete(
+ callbacks: Set,
+ sessionId: string,
+) {
+ for (const callback of callbacks) {
+ try {
+ callback(sessionId);
+ } catch (err) {
+ console.warn("[ChatStore] Stream complete callback error:", err);
+ }
+ }
+}
+
+function cleanupExpiredStreams(
+ completedStreams: Map,
+): Map {
+ const now = Date.now();
+ const cleaned = new Map(completedStreams);
+ for (const [sessionId, result] of cleaned) {
+ if (now - result.completedAt > COMPLETED_STREAM_TTL_MS) {
+ cleaned.delete(sessionId);
+ }
+ }
+ return cleaned;
+}
+
+/**
+ * Finalize a stream by moving it from activeStreams to completedStreams.
+ * Also handles cleanup and notifications.
+ */
+function finalizeStream(
+ sessionId: string,
+ stream: ActiveStream,
+ onChunk: ((chunk: StreamChunk) => void) | undefined,
+ get: () => ChatStoreState & ChatStoreActions,
+ set: (state: Partial) => void,
+): void {
+ if (onChunk) stream.onChunkCallbacks.delete(onChunk);
+
+ if (stream.status !== "streaming") {
+ const currentState = get();
+ const finalActiveStreams = new Map(currentState.activeStreams);
+ let finalCompletedStreams = new Map(currentState.completedStreams);
+
+ const storedStream = finalActiveStreams.get(sessionId);
+ if (storedStream === stream) {
+ const result: StreamResult = {
+ sessionId,
+ status: stream.status,
+ chunks: stream.chunks,
+ completedAt: Date.now(),
+ error: stream.error,
+ };
+ finalCompletedStreams.set(sessionId, result);
+ finalActiveStreams.delete(sessionId);
+ finalCompletedStreams = cleanupExpiredStreams(finalCompletedStreams);
+ set({
+ activeStreams: finalActiveStreams,
+ completedStreams: finalCompletedStreams,
+ });
+
+ if (stream.status === "completed" || stream.status === "error") {
+ notifyStreamComplete(currentState.streamCompleteCallbacks, sessionId);
+ }
+ }
+ }
+}
+
+/**
+ * Clean up an existing stream for a session and move it to completed streams.
+ * Returns updated maps for both active and completed streams.
+ */
+function cleanupExistingStream(
+ sessionId: string,
+ activeStreams: Map,
+ completedStreams: Map,
+ callbacks: Set,
+): {
+ activeStreams: Map;
+ completedStreams: Map;
+} {
+ const newActiveStreams = new Map(activeStreams);
+ let newCompletedStreams = new Map(completedStreams);
+
+ const existingStream = newActiveStreams.get(sessionId);
+ if (existingStream) {
+ existingStream.abortController.abort();
+ const normalizedStatus =
+ existingStream.status === "streaming"
+ ? "completed"
+ : existingStream.status;
+ const result: StreamResult = {
+ sessionId,
+ status: normalizedStatus,
+ chunks: existingStream.chunks,
+ completedAt: Date.now(),
+ error: existingStream.error,
+ };
+ newCompletedStreams.set(sessionId, result);
+ newActiveStreams.delete(sessionId);
+ newCompletedStreams = cleanupExpiredStreams(newCompletedStreams);
+ if (normalizedStatus === "completed" || normalizedStatus === "error") {
+ notifyStreamComplete(callbacks, sessionId);
+ }
+ }
+
+ return {
+ activeStreams: newActiveStreams,
+ completedStreams: newCompletedStreams,
+ };
+}
+
+/**
+ * Create a new active stream with initial state.
+ */
+function createActiveStream(
+ sessionId: string,
+ onChunk?: (chunk: StreamChunk) => void,
+): ActiveStream {
+ const abortController = new AbortController();
+ const initialCallbacks = new Set<(chunk: StreamChunk) => void>();
+ if (onChunk) initialCallbacks.add(onChunk);
+
+ return {
+ sessionId,
+ abortController,
+ status: "streaming",
+ startedAt: Date.now(),
+ chunks: [],
+ onChunkCallbacks: initialCallbacks,
+ };
+}
+
+export const useChatStore = create((set, get) => ({
+ activeStreams: new Map(),
+ completedStreams: new Map(),
+ activeSessions: new Set(),
+ streamCompleteCallbacks: new Set(),
+ activeTasks: loadPersistedTasks(),
+
+ startStream: async function startStream(
+ sessionId,
+ message,
+ isUserMessage,
+ context,
+ onChunk,
+ ) {
+ const state = get();
+ const callbacks = state.streamCompleteCallbacks;
+
+ // Clean up any existing stream for this session
+ const {
+ activeStreams: newActiveStreams,
+ completedStreams: newCompletedStreams,
+ } = cleanupExistingStream(
+ sessionId,
+ state.activeStreams,
+ state.completedStreams,
+ callbacks,
+ );
+
+ // Create new stream
+ const stream = createActiveStream(sessionId, onChunk);
+ newActiveStreams.set(sessionId, stream);
+ set({
+ activeStreams: newActiveStreams,
+ completedStreams: newCompletedStreams,
+ });
+
+ try {
+ await executeStream(stream, message, isUserMessage, context);
+ } finally {
+ finalizeStream(sessionId, stream, onChunk, get, set);
+ }
+ },
+
+ stopStream: function stopStream(sessionId) {
+ const state = get();
+ const stream = state.activeStreams.get(sessionId);
+ if (!stream) return;
+
+ stream.abortController.abort();
+ stream.status = "completed";
+
+ const newActiveStreams = new Map(state.activeStreams);
+ let newCompletedStreams = new Map(state.completedStreams);
+
+ const result: StreamResult = {
+ sessionId,
+ status: stream.status,
+ chunks: stream.chunks,
+ completedAt: Date.now(),
+ error: stream.error,
+ };
+ newCompletedStreams.set(sessionId, result);
+ newActiveStreams.delete(sessionId);
+ newCompletedStreams = cleanupExpiredStreams(newCompletedStreams);
+
+ set({
+ activeStreams: newActiveStreams,
+ completedStreams: newCompletedStreams,
+ });
+
+ notifyStreamComplete(state.streamCompleteCallbacks, sessionId);
+ },
+
+ subscribeToStream: function subscribeToStream(
+ sessionId,
+ onChunk,
+ skipReplay = false,
+ ) {
+ const state = get();
+ const stream = state.activeStreams.get(sessionId);
+
+ if (stream) {
+ if (!skipReplay) {
+ for (const chunk of stream.chunks) {
+ onChunk(chunk);
+ }
+ }
+
+ stream.onChunkCallbacks.add(onChunk);
+
+ return function unsubscribe() {
+ stream.onChunkCallbacks.delete(onChunk);
+ };
+ }
+
+ return function noop() {};
+ },
+
+ getStreamStatus: function getStreamStatus(sessionId) {
+ const { activeStreams, completedStreams } = get();
+
+ const active = activeStreams.get(sessionId);
+ if (active) return active.status;
+
+ const completed = completedStreams.get(sessionId);
+ if (completed) return completed.status;
+
+ return "idle";
+ },
+
+ getCompletedStream: function getCompletedStream(sessionId) {
+ return get().completedStreams.get(sessionId);
+ },
+
+ clearCompletedStream: function clearCompletedStream(sessionId) {
+ const state = get();
+ if (!state.completedStreams.has(sessionId)) return;
+
+ const newCompletedStreams = new Map(state.completedStreams);
+ newCompletedStreams.delete(sessionId);
+ set({ completedStreams: newCompletedStreams });
+ },
+
+ isStreaming: function isStreaming(sessionId) {
+ const stream = get().activeStreams.get(sessionId);
+ return stream?.status === "streaming";
+ },
+
+ registerActiveSession: function registerActiveSession(sessionId) {
+ const state = get();
+ if (state.activeSessions.has(sessionId)) return;
+
+ const newActiveSessions = new Set(state.activeSessions);
+ newActiveSessions.add(sessionId);
+ set({ activeSessions: newActiveSessions });
+ },
+
+ unregisterActiveSession: function unregisterActiveSession(sessionId) {
+ const state = get();
+ if (!state.activeSessions.has(sessionId)) return;
+
+ const newActiveSessions = new Set(state.activeSessions);
+ newActiveSessions.delete(sessionId);
+ set({ activeSessions: newActiveSessions });
+ },
+
+ isSessionActive: function isSessionActive(sessionId) {
+ return get().activeSessions.has(sessionId);
+ },
+
+ onStreamComplete: function onStreamComplete(callback) {
+ const state = get();
+ const newCallbacks = new Set(state.streamCompleteCallbacks);
+ newCallbacks.add(callback);
+ set({ streamCompleteCallbacks: newCallbacks });
+
+ return function unsubscribe() {
+ const currentState = get();
+ const cleanedCallbacks = new Set(currentState.streamCompleteCallbacks);
+ cleanedCallbacks.delete(callback);
+ set({ streamCompleteCallbacks: cleanedCallbacks });
+ };
+ },
+
+ setActiveTask: function setActiveTask(sessionId, taskInfo) {
+ const state = get();
+ const newActiveTasks = new Map(state.activeTasks);
+ newActiveTasks.set(sessionId, {
+ ...taskInfo,
+ sessionId,
+ startedAt: Date.now(),
+ });
+ set({ activeTasks: newActiveTasks });
+ persistTasks(newActiveTasks);
+ },
+
+ getActiveTask: function getActiveTask(sessionId) {
+ return get().activeTasks.get(sessionId);
+ },
+
+ clearActiveTask: function clearActiveTask(sessionId) {
+ const state = get();
+ if (!state.activeTasks.has(sessionId)) return;
+
+ const newActiveTasks = new Map(state.activeTasks);
+ newActiveTasks.delete(sessionId);
+ set({ activeTasks: newActiveTasks });
+ persistTasks(newActiveTasks);
+ },
+
+ reconnectToTask: async function reconnectToTask(
+ sessionId,
+ taskId,
+ lastMessageId = INITIAL_STREAM_ID,
+ onChunk,
+ ) {
+ const state = get();
+ const callbacks = state.streamCompleteCallbacks;
+
+ // Clean up any existing stream for this session
+ const {
+ activeStreams: newActiveStreams,
+ completedStreams: newCompletedStreams,
+ } = cleanupExistingStream(
+ sessionId,
+ state.activeStreams,
+ state.completedStreams,
+ callbacks,
+ );
+
+ // Create new stream for reconnection
+ const stream = createActiveStream(sessionId, onChunk);
+ newActiveStreams.set(sessionId, stream);
+ set({
+ activeStreams: newActiveStreams,
+ completedStreams: newCompletedStreams,
+ });
+
+ try {
+ await executeTaskReconnect(stream, taskId, lastMessageId);
+ } finally {
+ finalizeStream(sessionId, stream, onChunk, get, set);
+
+ // Clear active task on completion
+ if (stream.status === "completed" || stream.status === "error") {
+ const taskState = get();
+ if (taskState.activeTasks.has(sessionId)) {
+ const newActiveTasks = new Map(taskState.activeTasks);
+ newActiveTasks.delete(sessionId);
+ set({ activeTasks: newActiveTasks });
+ persistTasks(newActiveTasks);
+ }
+ }
+ }
+ },
+
+ updateTaskLastMessageId: function updateTaskLastMessageId(
+ sessionId,
+ lastMessageId,
+ ) {
+ const state = get();
+ const task = state.activeTasks.get(sessionId);
+ if (!task) return;
+
+ const newActiveTasks = new Map(state.activeTasks);
+ newActiveTasks.set(sessionId, {
+ ...task,
+ lastMessageId,
+ });
+ set({ activeTasks: newActiveTasks });
+ persistTasks(newActiveTasks);
+ },
+}));
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts b/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts
new file mode 100644
index 0000000000..34813e17fe
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/chat-types.ts
@@ -0,0 +1,163 @@
+import type { ToolArguments, ToolResult } from "@/types/chat";
+
+export type StreamStatus = "idle" | "streaming" | "completed" | "error";
+
+export interface StreamChunk {
+ type:
+ | "stream_start"
+ | "text_chunk"
+ | "text_ended"
+ | "tool_call"
+ | "tool_call_start"
+ | "tool_response"
+ | "login_needed"
+ | "need_login"
+ | "credentials_needed"
+ | "error"
+ | "usage"
+ | "stream_end";
+ taskId?: string;
+ timestamp?: string;
+ content?: string;
+ message?: string;
+ code?: string;
+ details?: Record;
+ tool_id?: string;
+ tool_name?: string;
+ arguments?: ToolArguments;
+ result?: ToolResult;
+ success?: boolean;
+ idx?: number;
+ session_id?: string;
+ agent_info?: {
+ graph_id: string;
+ name: string;
+ trigger_type: string;
+ };
+ provider?: string;
+ provider_name?: string;
+ credential_type?: string;
+ scopes?: string[];
+ title?: string;
+ [key: string]: unknown;
+}
+
+export type VercelStreamChunk =
+ | { type: "start"; messageId: string; taskId?: string }
+ | { type: "finish" }
+ | { type: "text-start"; id: string }
+ | { type: "text-delta"; id: string; delta: string }
+ | { type: "text-end"; id: string }
+ | { type: "tool-input-start"; toolCallId: string; toolName: string }
+ | {
+ type: "tool-input-available";
+ toolCallId: string;
+ toolName: string;
+ input: Record;
+ }
+ | {
+ type: "tool-output-available";
+ toolCallId: string;
+ toolName?: string;
+ output: unknown;
+ success?: boolean;
+ }
+ | {
+ type: "usage";
+ promptTokens: number;
+ completionTokens: number;
+ totalTokens: number;
+ }
+ | {
+ type: "error";
+ errorText: string;
+ code?: string;
+ details?: Record;
+ };
+
+export interface ActiveStream {
+ sessionId: string;
+ abortController: AbortController;
+ status: StreamStatus;
+ startedAt: number;
+ chunks: StreamChunk[];
+ error?: Error;
+ onChunkCallbacks: Set<(chunk: StreamChunk) => void>;
+}
+
+export interface StreamResult {
+ sessionId: string;
+ status: StreamStatus;
+ chunks: StreamChunk[];
+ completedAt: number;
+ error?: Error;
+}
+
+export type StreamCompleteCallback = (sessionId: string) => void;
+
+// Type guards for message types
+
+/**
+ * Check if a message has a toolId property.
+ */
+export function hasToolId(
+ msg: T,
+): msg is T & { toolId: string } {
+ return (
+ "toolId" in msg &&
+ typeof (msg as Record).toolId === "string"
+ );
+}
+
+/**
+ * Check if a message has an operationId property.
+ */
+export function hasOperationId(
+ msg: T,
+): msg is T & { operationId: string } {
+ return (
+ "operationId" in msg &&
+ typeof (msg as Record).operationId === "string"
+ );
+}
+
+/**
+ * Check if a message has a toolCallId property.
+ */
+export function hasToolCallId(
+ msg: T,
+): msg is T & { toolCallId: string } {
+ return (
+ "toolCallId" in msg &&
+ typeof (msg as Record).toolCallId === "string"
+ );
+}
+
+/**
+ * Check if a message is an operation message type.
+ */
+export function isOperationMessage(
+ msg: T,
+): msg is T & {
+ type: "operation_started" | "operation_pending" | "operation_in_progress";
+} {
+ return (
+ msg.type === "operation_started" ||
+ msg.type === "operation_pending" ||
+ msg.type === "operation_in_progress"
+ );
+}
+
+/**
+ * Get the tool ID from a message if available.
+ * Checks toolId, operationId, and toolCallId properties.
+ */
+export function getToolIdFromMessage(
+ msg: T,
+): string | undefined {
+ const record = msg as Record;
+ if (typeof record.toolId === "string") return record.toolId;
+ if (typeof record.operationId === "string") return record.operationId;
+ if (typeof record.toolCallId === "string") return record.toolCallId;
+ return undefined;
+}
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx
new file mode 100644
index 0000000000..f5d56fcb15
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx
@@ -0,0 +1,15 @@
+import { cn } from "@/lib/utils";
+import { ReactNode } from "react";
+
+export interface AIChatBubbleProps {
+ children: ReactNode;
+ className?: string;
+}
+
+export function AIChatBubble({ children, className }: AIChatBubbleProps) {
+ return (
+
+ {children}
+
+ );
+}
diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx
similarity index 100%
rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx
rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx
diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx
similarity index 100%
rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx
rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx
diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts
similarity index 100%
rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts
rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts
diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx
similarity index 99%
rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx
rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx
index 33f02e660f..b2cf92ec56 100644
--- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx
@@ -21,7 +21,7 @@ export function AuthPromptWidget({
message,
sessionId,
agentInfo,
- returnUrl = "/chat",
+ returnUrl = "/copilot/chat",
className,
}: AuthPromptWidgetProps) {
const router = useRouter();
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx
new file mode 100644
index 0000000000..fbf2d5d143
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx
@@ -0,0 +1,130 @@
+import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
+import { Button } from "@/components/atoms/Button/Button";
+import { Text } from "@/components/atoms/Text/Text";
+import { Dialog } from "@/components/molecules/Dialog/Dialog";
+import { cn } from "@/lib/utils";
+import { GlobeHemisphereEastIcon } from "@phosphor-icons/react";
+import { useEffect } from "react";
+import { ChatInput } from "../ChatInput/ChatInput";
+import { MessageList } from "../MessageList/MessageList";
+import { useChatContainer } from "./useChatContainer";
+
+export interface ChatContainerProps {
+ sessionId: string | null;
+ initialMessages: SessionDetailResponse["messages"];
+ initialPrompt?: string;
+ className?: string;
+ onStreamingChange?: (isStreaming: boolean) => void;
+ onOperationStarted?: () => void;
+ /** Active stream info from the server for reconnection */
+ activeStream?: {
+ taskId: string;
+ lastMessageId: string;
+ operationId: string;
+ toolName: string;
+ };
+}
+
+export function ChatContainer({
+ sessionId,
+ initialMessages,
+ initialPrompt,
+ className,
+ onStreamingChange,
+ onOperationStarted,
+ activeStream,
+}: ChatContainerProps) {
+ const {
+ messages,
+ streamingChunks,
+ isStreaming,
+ stopStreaming,
+ isRegionBlockedModalOpen,
+ sendMessageWithContext,
+ handleRegionModalOpenChange,
+ handleRegionModalClose,
+ } = useChatContainer({
+ sessionId,
+ initialMessages,
+ initialPrompt,
+ onOperationStarted,
+ activeStream,
+ });
+
+ useEffect(() => {
+ onStreamingChange?.(isStreaming);
+ }, [isStreaming, onStreamingChange]);
+
+ return (
+
+
+ }
+ controlled={{
+ isOpen: isRegionBlockedModalOpen,
+ set: handleRegionModalOpenChange,
+ }}
+ onClose={handleRegionModalClose}
+ styling={{ maxWidth: 550, width: "100%", minWidth: "auto" }}
+ >
+
+
+
+ The Autogpt AI model is not available in your region or your
+ connection is blocking it. Please try again with a different
+ connection.
+
+
+
+
+
+
+
+ {/* Messages - Scrollable */}
+
+
+ {/* Input - Fixed at bottom */}
+
+
+ );
+}
diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/createStreamEventDispatcher.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts
similarity index 51%
rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/createStreamEventDispatcher.ts
rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts
index 844f126d49..af3b3329b7 100644
--- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/createStreamEventDispatcher.ts
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts
@@ -1,7 +1,8 @@
import { toast } from "sonner";
-import { StreamChunk } from "../../useChatStream";
-import type { HandlerDependencies } from "./useChatContainer.handlers";
+import type { StreamChunk } from "../../chat-types";
+import type { HandlerDependencies } from "./handlers";
import {
+ getErrorDisplayMessage,
handleError,
handleLoginNeeded,
handleStreamEnd,
@@ -9,13 +10,37 @@ import {
handleTextEnded,
handleToolCallStart,
handleToolResponse,
-} from "./useChatContainer.handlers";
+ isRegionBlockedError,
+} from "./handlers";
export function createStreamEventDispatcher(
deps: HandlerDependencies,
): (chunk: StreamChunk) => void {
return function dispatchStreamEvent(chunk: StreamChunk): void {
+ if (
+ chunk.type === "text_chunk" ||
+ chunk.type === "tool_call_start" ||
+ chunk.type === "tool_response" ||
+ chunk.type === "login_needed" ||
+ chunk.type === "need_login" ||
+ chunk.type === "error"
+ ) {
+ deps.hasResponseRef.current = true;
+ }
+
switch (chunk.type) {
+ case "stream_start":
+ // Store task ID for SSE reconnection
+ if (chunk.taskId && deps.onActiveTaskStarted) {
+ deps.onActiveTaskStarted({
+ taskId: chunk.taskId,
+ operationId: chunk.taskId,
+ toolName: "chat",
+ toolCallId: "chat_stream",
+ });
+ }
+ break;
+
case "text_chunk":
handleTextChunk(chunk, deps);
break;
@@ -38,15 +63,19 @@ export function createStreamEventDispatcher(
break;
case "stream_end":
+ // Note: "finish" type from backend gets normalized to "stream_end" by normalizeStreamChunk
handleStreamEnd(chunk, deps);
break;
case "error":
+ const isRegionBlocked = isRegionBlockedError(chunk);
handleError(chunk, deps);
// Show toast at dispatcher level to avoid circular dependencies
- toast.error("Chat Error", {
- description: chunk.message || chunk.content || "An error occurred",
- });
+ if (!isRegionBlocked) {
+ toast.error("Chat Error", {
+ description: getErrorDisplayMessage(chunk),
+ });
+ }
break;
case "usage":
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts
new file mode 100644
index 0000000000..5aec5b9818
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts
@@ -0,0 +1,362 @@
+import type { Dispatch, MutableRefObject, SetStateAction } from "react";
+import { StreamChunk } from "../../useChatStream";
+import type { ChatMessageData } from "../ChatMessage/useChatMessage";
+import {
+ extractCredentialsNeeded,
+ extractInputsNeeded,
+ parseToolResponse,
+} from "./helpers";
+
+function isToolCallMessage(
+ message: ChatMessageData,
+): message is Extract