- Error: {error.message}
+
+
Something went wrong
+
+ The assistant encountered an error. Please try sending your
+ message again.
+
)}
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx
index 0d023d0529..88b1c491d7 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx
@@ -4,7 +4,6 @@ import { WarningDiamondIcon } from "@phosphor-icons/react";
import type { ToolUIPart } from "ai";
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
-import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
import {
ContentCardDescription,
@@ -77,7 +76,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
isOperationInProgressOutput(output)
) {
return {
- icon:
,
+ icon,
title: "Creating agent, this may take a few minutes. Sit back and relax.",
};
}
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx
index 816c661230..2b75ed9c97 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx
@@ -203,7 +203,7 @@ export function getAccordionMeta(output: RunAgentToolOutput): {
? output.status.trim()
: "started";
return {
- icon:
,
+ icon,
title: output.graph_name,
description: `Status: ${statusText}`,
};
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx
index c9b903876a..b8625988cd 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx
@@ -149,7 +149,7 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
if (isRunBlockBlockOutput(output)) {
const keys = Object.keys(output.outputs ?? {});
return {
- icon:
,
+ icon,
title: output.block_name,
description:
keys.length > 0
diff --git a/autogpt_platform/frontend/src/app/api/chat/sessions/[sessionId]/stream/route.ts b/autogpt_platform/frontend/src/app/api/chat/sessions/[sessionId]/stream/route.ts
index 6facf80c58..bd27c77963 100644
--- a/autogpt_platform/frontend/src/app/api/chat/sessions/[sessionId]/stream/route.ts
+++ b/autogpt_platform/frontend/src/app/api/chat/sessions/[sessionId]/stream/route.ts
@@ -1,11 +1,8 @@
import { environment } from "@/services/environment";
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
import { NextRequest } from "next/server";
+import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
-/**
- * SSE Proxy for chat streaming.
- * Supports POST with context (page content + URL) in the request body.
- */
export async function POST(
request: NextRequest,
{ params }: { params: Promise<{ sessionId: string }> },
@@ -23,17 +20,14 @@ export async function POST(
);
}
- // Get auth token from server-side session
const token = await getServerAuthToken();
- // Build backend URL
const backendUrl = environment.getAGPTServerBaseUrl();
const streamUrl = new URL(
`/api/chat/sessions/${sessionId}/stream`,
backendUrl,
);
- // Forward request to backend with auth header
const headers: Record
= {
"Content-Type": "application/json",
Accept: "text/event-stream",
@@ -63,14 +57,15 @@ export async function POST(
});
}
- // Return the SSE stream directly
- return new Response(response.body, {
- headers: {
- "Content-Type": "text/event-stream",
- "Cache-Control": "no-cache, no-transform",
- Connection: "keep-alive",
- "X-Accel-Buffering": "no",
- },
+ if (!response.body) {
+ return new Response(
+ JSON.stringify({ error: "Empty response from chat service" }),
+ { status: 502, headers: { "Content-Type": "application/json" } },
+ );
+ }
+
+ return new Response(normalizeSSEStream(response.body), {
+ headers: SSE_HEADERS,
});
} catch (error) {
console.error("SSE proxy error:", error);
@@ -87,13 +82,6 @@ export async function POST(
}
}
-/**
- * Resume an active stream for a session.
- *
- * Called by the AI SDK's `useChat(resume: true)` on page load.
- * Proxies to the backend which checks for an active stream and either
- * replays it (200 + SSE) or returns 204 No Content.
- */
export async function GET(
_request: NextRequest,
{ params }: { params: Promise<{ sessionId: string }> },
@@ -124,7 +112,6 @@ export async function GET(
headers,
});
- // 204 = no active stream to resume
if (response.status === 204) {
return new Response(null, { status: 204 });
}
@@ -137,12 +124,13 @@ export async function GET(
});
}
- return new Response(response.body, {
+ if (!response.body) {
+ return new Response(null, { status: 204 });
+ }
+
+ return new Response(normalizeSSEStream(response.body), {
headers: {
- "Content-Type": "text/event-stream",
- "Cache-Control": "no-cache, no-transform",
- Connection: "keep-alive",
- "X-Accel-Buffering": "no",
+ ...SSE_HEADERS,
"x-vercel-ai-ui-message-stream": "v1",
},
});
diff --git a/autogpt_platform/frontend/src/app/api/chat/sse-helpers.ts b/autogpt_platform/frontend/src/app/api/chat/sse-helpers.ts
new file mode 100644
index 0000000000..a5c76cf872
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/api/chat/sse-helpers.ts
@@ -0,0 +1,72 @@
+export const SSE_HEADERS = {
+ "Content-Type": "text/event-stream",
+ "Cache-Control": "no-cache, no-transform",
+ Connection: "keep-alive",
+ "X-Accel-Buffering": "no",
+} as const;
+
+export function normalizeSSEStream(
+ input: ReadableStream,
+): ReadableStream {
+ const decoder = new TextDecoder();
+ const encoder = new TextEncoder();
+ let buffer = "";
+
+ return input.pipeThrough(
+ new TransformStream({
+ transform(chunk, controller) {
+ buffer += decoder.decode(chunk, { stream: true });
+
+ const parts = buffer.split("\n\n");
+ buffer = parts.pop() ?? "";
+
+ for (const part of parts) {
+ const normalized = normalizeSSEEvent(part);
+ controller.enqueue(encoder.encode(normalized + "\n\n"));
+ }
+ },
+ flush(controller) {
+ if (buffer.trim()) {
+ const normalized = normalizeSSEEvent(buffer);
+ controller.enqueue(encoder.encode(normalized + "\n\n"));
+ }
+ },
+ }),
+ );
+}
+
+function normalizeSSEEvent(event: string): string {
+ const lines = event.split("\n");
+ const dataLines: string[] = [];
+ const otherLines: string[] = [];
+
+ for (const line of lines) {
+ if (line.startsWith("data: ")) {
+ dataLines.push(line.slice(6));
+ } else {
+ otherLines.push(line);
+ }
+ }
+
+ if (dataLines.length === 0) return event;
+
+ const dataStr = dataLines.join("\n");
+ try {
+ const parsed = JSON.parse(dataStr) as Record;
+ if (parsed.type === "error") {
+ const normalized = {
+ type: "error",
+ errorText:
+ typeof parsed.errorText === "string"
+ ? parsed.errorText
+ : "An unexpected error occurred",
+ };
+ const newData = `data: ${JSON.stringify(normalized)}`;
+ return [...otherLines.filter((l) => l.length > 0), newData].join("\n");
+ }
+ } catch {
+ // Not valid JSON — pass through as-is
+ }
+
+ return event;
+}
diff --git a/autogpt_platform/frontend/src/app/api/chat/tasks/[taskId]/stream/route.ts b/autogpt_platform/frontend/src/app/api/chat/tasks/[taskId]/stream/route.ts
index 336786bfdb..238fdebb06 100644
--- a/autogpt_platform/frontend/src/app/api/chat/tasks/[taskId]/stream/route.ts
+++ b/autogpt_platform/frontend/src/app/api/chat/tasks/[taskId]/stream/route.ts
@@ -1,20 +1,8 @@
import { environment } from "@/services/environment";
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
import { NextRequest } from "next/server";
+import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
-/**
- * SSE Proxy for task stream reconnection.
- *
- * This endpoint allows clients to reconnect to an ongoing or recently completed
- * background task's stream. It replays missed messages from Redis Streams and
- * subscribes to live updates if the task is still running.
- *
- * Client contract:
- * 1. When receiving an operation_started event, store the task_id
- * 2. To reconnect: GET /api/chat/tasks/{taskId}/stream?last_message_id={idx}
- * 3. Messages are replayed from the last_message_id position
- * 4. Stream ends when "finish" event is received
- */
export async function GET(
request: NextRequest,
{ params }: { params: Promise<{ taskId: string }> },
@@ -24,15 +12,12 @@ export async function GET(
const lastMessageId = searchParams.get("last_message_id") || "0-0";
try {
- // Get auth token from server-side session
const token = await getServerAuthToken();
- // Build backend URL
const backendUrl = environment.getAGPTServerBaseUrl();
const streamUrl = new URL(`/api/chat/tasks/${taskId}/stream`, backendUrl);
streamUrl.searchParams.set("last_message_id", lastMessageId);
- // Forward request to backend with auth header
const headers: Record = {
Accept: "text/event-stream",
"Cache-Control": "no-cache",
@@ -56,14 +41,12 @@ export async function GET(
});
}
- // Return the SSE stream directly
- return new Response(response.body, {
- headers: {
- "Content-Type": "text/event-stream",
- "Cache-Control": "no-cache, no-transform",
- Connection: "keep-alive",
- "X-Accel-Buffering": "no",
- },
+ if (!response.body) {
+ return new Response(null, { status: 204 });
+ }
+
+ return new Response(normalizeSSEStream(response.body), {
+ headers: SSE_HEADERS,
});
} catch (error) {
console.error("Task stream proxy error:", error);
From 36aeb0b2b3d0ee0e8f23236771108040425a6cd5 Mon Sep 17 00:00:00 2001
From: Otto
Date: Wed, 11 Feb 2026 15:43:58 +0000
Subject: [PATCH 3/3] docs(blocks): clarify HumanInTheLoop output descriptions
for agent builder (#12069)
## Problem
The agent builder (LLM) misinterprets the HumanInTheLoop block outputs.
It thinks `approved_data` and `rejected_data` will yield status strings
like "APPROVED" or "REJECTED" instead of understanding that the actual
input data passes through.
This leads to unnecessary complexity - the agent builder adds comparison
blocks to check for status strings that don't exist.
## Solution
Enriched the block docstring and all input/output field descriptions to
make it explicit that:
1. The output is the actual data itself, not a status string
2. The routing is determined by which output pin fires
3. How to use the block correctly (connect downstream blocks to
appropriate output pins)
## Changes
- Updated block docstring with clear "How it works" and "Example usage"
sections
- Enhanced `data` input description to explain data flow
- Enhanced `name` input description for reviewer context
- Enhanced `approved_data` output to explicitly state it's NOT a status
string
- Enhanced `rejected_data` output to explicitly state it's NOT a status
string
- Enhanced `review_message` output for clarity
## Testing
Documentation-only change to schema descriptions. No functional changes.
Fixes SECRT-1930
Greptile Overview
Greptile Summary
Enhanced documentation for the `HumanInTheLoopBlock` to clarify how
output pins work. The key improvement explicitly states that output pins
(`approved_data` and `rejected_data`) yield the actual input data, not
status strings like "APPROVED" or "REJECTED". This prevents the agent
builder (LLM) from misinterpreting the block's behavior and adding
unnecessary comparison blocks.
**Key changes:**
- Added "How it works" and "Example usage" sections to the block
docstring
- Clarified that routing is determined by which output pin fires, not by
comparing output values
- Enhanced all input/output field descriptions with explicit data flow
explanations
- Emphasized that downstream blocks should be connected to the
appropriate output pin based on desired workflow path
This is a documentation-only change with no functional modifications to
the code logic.
Confidence Score: 5/5
- This PR is safe to merge with no risk
- Documentation-only change that accurately reflects the existing code
behavior. No functional changes, no runtime impact, and the enhanced
descriptions correctly explain how the block outputs work based on
verification of the implementation code.
- No files require special attention
Co-authored-by: Zamil Majdy
---
.../backend/blocks/human_in_the_loop.py | 56 ++++++++++++++-----
docs/integrations/README.md | 2 +-
docs/integrations/block-integrations/basic.md | 14 ++---
3 files changed, 50 insertions(+), 22 deletions(-)
diff --git a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py
index 568ac4b33f..d31f90ec81 100644
--- a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py
+++ b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py
@@ -21,43 +21,71 @@ logger = logging.getLogger(__name__)
class HumanInTheLoopBlock(Block):
"""
- This block pauses execution and waits for human approval or modification of the data.
+ Pauses execution and waits for human approval or rejection of the data.
- When executed, it creates a pending review entry and sets the node execution status
- to REVIEW. The execution will remain paused until a human user either:
- - Approves the data (with or without modifications)
- - Rejects the data
+ When executed, this block creates a pending review entry and sets the node execution
+ status to REVIEW. The execution remains paused until a human user either approves
+ or rejects the data.
- This is useful for workflows that require human validation or intervention before
- proceeding to the next steps.
+ **How it works:**
+ - The input data is presented to a human reviewer
+ - The reviewer can approve or reject (and optionally modify the data if editable)
+ - On approval: the data flows out through the `approved_data` output pin
+ - On rejection: the data flows out through the `rejected_data` output pin
+
+ **Important:** The output pins yield the actual data itself, NOT status strings.
+ The approval/rejection decision determines WHICH output pin fires, not the value.
+ You do NOT need to compare the output to "APPROVED" or "REJECTED" - simply connect
+ downstream blocks to the appropriate output pin for each case.
+
+ **Example usage:**
+ - Connect `approved_data` → next step in your workflow (data was approved)
+ - Connect `rejected_data` → error handling or notification (data was rejected)
"""
class Input(BlockSchemaInput):
- data: Any = SchemaField(description="The data to be reviewed by a human user")
+ data: Any = SchemaField(
+ description="The data to be reviewed by a human user. "
+ "This exact data will be passed through to either approved_data or "
+ "rejected_data output based on the reviewer's decision."
+ )
name: str = SchemaField(
- description="A descriptive name for what this data represents",
+ description="A descriptive name for what this data represents. "
+ "This helps the reviewer understand what they are reviewing.",
)
editable: bool = SchemaField(
- description="Whether the human reviewer can edit the data",
+ description="Whether the human reviewer can edit the data before "
+ "approving or rejecting it",
default=True,
advanced=True,
)
class Output(BlockSchemaOutput):
approved_data: Any = SchemaField(
- description="The data when approved (may be modified by reviewer)"
+ description="Outputs the input data when the reviewer APPROVES it. "
+ "The value is the actual data itself (not a status string like 'APPROVED'). "
+ "If the reviewer edited the data, this contains the modified version. "
+ "Connect downstream blocks here for the 'approved' workflow path."
)
rejected_data: Any = SchemaField(
- description="The data when rejected (may be modified by reviewer)"
+ description="Outputs the input data when the reviewer REJECTS it. "
+ "The value is the actual data itself (not a status string like 'REJECTED'). "
+ "If the reviewer edited the data, this contains the modified version. "
+ "Connect downstream blocks here for the 'rejected' workflow path."
)
review_message: str = SchemaField(
- description="Any message provided by the reviewer", default=""
+ description="Optional message provided by the reviewer explaining their "
+ "decision. Only outputs when the reviewer provides a message; "
+ "this pin does not fire if no message was given.",
+ default="",
)
def __init__(self):
super().__init__(
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
- description="Pause execution and wait for human approval or modification of data",
+ description="Pause execution for human review. Data flows through "
+ "approved_data or rejected_data output based on the reviewer's decision. "
+ "Outputs contain the actual data, not status strings.",
categories={BlockCategory.BASIC},
input_schema=HumanInTheLoopBlock.Input,
output_schema=HumanInTheLoopBlock.Output,
diff --git a/docs/integrations/README.md b/docs/integrations/README.md
index 97a4d98709..a471ef3533 100644
--- a/docs/integrations/README.md
+++ b/docs/integrations/README.md
@@ -61,7 +61,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
| [Get List Item](block-integrations/basic.md#get-list-item) | Returns the element at the given index |
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
-| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution and wait for human approval or modification of data |
+| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution for human review |
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
diff --git a/docs/integrations/block-integrations/basic.md b/docs/integrations/block-integrations/basic.md
index 5a73fd5a03..08def38ede 100644
--- a/docs/integrations/block-integrations/basic.md
+++ b/docs/integrations/block-integrations/basic.md
@@ -975,7 +975,7 @@ A travel planning application could use this block to provide users with current
## Human In The Loop
### What it is
-Pause execution and wait for human approval or modification of data
+Pause execution for human review. Data flows through approved_data or rejected_data output based on the reviewer's decision. Outputs contain the actual data, not status strings.
### How it works
@@ -988,18 +988,18 @@ This enables human oversight at critical points in automated workflows, ensuring
| Input | Description | Type | Required |
|-------|-------------|------|----------|
-| data | The data to be reviewed by a human user | Data | Yes |
-| name | A descriptive name for what this data represents | str | Yes |
-| editable | Whether the human reviewer can edit the data | bool | No |
+| data | The data to be reviewed by a human user. This exact data will be passed through to either approved_data or rejected_data output based on the reviewer's decision. | Data | Yes |
+| name | A descriptive name for what this data represents. This helps the reviewer understand what they are reviewing. | str | Yes |
+| editable | Whether the human reviewer can edit the data before approving or rejecting it | bool | No |
### Outputs
| Output | Description | Type |
|--------|-------------|------|
| error | Error message if the operation failed | str |
-| approved_data | The data when approved (may be modified by reviewer) | Approved Data |
-| rejected_data | The data when rejected (may be modified by reviewer) | Rejected Data |
-| review_message | Any message provided by the reviewer | str |
+| approved_data | Outputs the input data when the reviewer APPROVES it. The value is the actual data itself (not a status string like 'APPROVED'). If the reviewer edited the data, this contains the modified version. Connect downstream blocks here for the 'approved' workflow path. | Approved Data |
+| rejected_data | Outputs the input data when the reviewer REJECTS it. The value is the actual data itself (not a status string like 'REJECTED'). If the reviewer edited the data, this contains the modified version. Connect downstream blocks here for the 'rejected' workflow path. | Rejected Data |
+| review_message | Optional message provided by the reviewer explaining their decision. Only outputs when the reviewer provides a message; this pin does not fire if no message was given. | str |
### Possible use case