Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
8d70ca8502 chore(backend/deps): bump e2b-code-interpreter
Bumps [e2b-code-interpreter](https://github.com/e2b-dev/code-interpreter) from 1.5.2 to 2.4.1.
- [Release notes](https://github.com/e2b-dev/code-interpreter/releases)
- [Commits](https://github.com/e2b-dev/code-interpreter/compare/@e2b/code-interpreter-python@1.5.2...@e2b/code-interpreter-python@2.4.1)

---
updated-dependencies:
- dependency-name: e2b-code-interpreter
  dependency-version: 2.4.1
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-02-10 23:52:58 +00:00
37 changed files with 211 additions and 714 deletions

View File

@@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
ref: ${{ github.event.workflow_run.head_branch }}
fetch-depth: 0

View File

@@ -30,7 +30,7 @@ jobs:
actions: read # Required for CI access
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 1

View File

@@ -40,7 +40,7 @@ jobs:
actions: read # Required for CI access
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 1

View File

@@ -58,7 +58,7 @@ jobs:
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL

View File

@@ -27,7 +27,7 @@ jobs:
# If you do not check out your code, Copilot will do this for you.
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true

View File

@@ -23,7 +23,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 1

View File

@@ -23,7 +23,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 0

View File

@@ -28,7 +28,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 1

View File

@@ -25,7 +25,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.git_ref || github.ref_name }}

View File

@@ -17,7 +17,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
ref: ${{ github.ref_name || 'master' }}

View File

@@ -68,7 +68,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true

View File

@@ -31,7 +31,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
- name: Check for component changes
uses: dorny/paths-filter@v3
@@ -71,7 +71,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v6
@@ -107,7 +107,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -148,7 +148,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
submodules: recursive
@@ -277,7 +277,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
submodules: recursive

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v6
@@ -63,7 +63,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
submodules: recursive

View File

@@ -11,7 +11,7 @@ jobs:
steps:
# - name: Wait some time for all actions to start
# run: sleep 30
- uses: actions/checkout@v6
- uses: actions/checkout@v4
# with:
# fetch-depth: 0
- name: Set up Python

View File

@@ -2,7 +2,7 @@ import asyncio
import logging
import uuid
from datetime import UTC, datetime
from typing import Any, cast
from typing import Any
from weakref import WeakValueDictionary
from openai.types.chat import (
@@ -104,26 +104,6 @@ class ChatSession(BaseModel):
successful_agent_runs: dict[str, int] = {}
successful_agent_schedules: dict[str, int] = {}
def add_tool_call_to_current_turn(self, tool_call: dict) -> None:
"""Attach a tool_call to the current turn's assistant message.
Searches backwards for the most recent assistant message (stopping at
any user message boundary). If found, appends the tool_call to it.
Otherwise creates a new assistant message with the tool_call.
"""
for msg in reversed(self.messages):
if msg.role == "user":
break
if msg.role == "assistant":
if not msg.tool_calls:
msg.tool_calls = []
msg.tool_calls.append(tool_call)
return
self.messages.append(
ChatMessage(role="assistant", content="", tool_calls=[tool_call])
)
@staticmethod
def new(user_id: str) -> "ChatSession":
return ChatSession(
@@ -192,47 +172,6 @@ class ChatSession(BaseModel):
successful_agent_schedules=successful_agent_schedules,
)
@staticmethod
def _merge_consecutive_assistant_messages(
messages: list[ChatCompletionMessageParam],
) -> list[ChatCompletionMessageParam]:
"""Merge consecutive assistant messages into single messages.
Long-running tool flows can create split assistant messages: one with
text content and another with tool_calls. Anthropic's API requires
tool_result blocks to reference a tool_use in the immediately preceding
assistant message, so these splits cause 400 errors via OpenRouter.
"""
if len(messages) < 2:
return messages
result: list[ChatCompletionMessageParam] = [messages[0]]
for msg in messages[1:]:
prev = result[-1]
if prev.get("role") != "assistant" or msg.get("role") != "assistant":
result.append(msg)
continue
prev = cast(ChatCompletionAssistantMessageParam, prev)
curr = cast(ChatCompletionAssistantMessageParam, msg)
curr_content = curr.get("content") or ""
if curr_content:
prev_content = prev.get("content") or ""
prev["content"] = (
f"{prev_content}\n{curr_content}" if prev_content else curr_content
)
curr_tool_calls = curr.get("tool_calls")
if curr_tool_calls:
prev_tool_calls = prev.get("tool_calls")
prev["tool_calls"] = (
list(prev_tool_calls) + list(curr_tool_calls)
if prev_tool_calls
else list(curr_tool_calls)
)
return result
def to_openai_messages(self) -> list[ChatCompletionMessageParam]:
messages = []
for message in self.messages:
@@ -319,7 +258,7 @@ class ChatSession(BaseModel):
name=message.name or "",
)
)
return self._merge_consecutive_assistant_messages(messages)
return messages
async def _get_session_from_cache(session_id: str) -> ChatSession | None:

View File

@@ -1,16 +1,4 @@
from typing import cast
import pytest
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ChatCompletionToolMessageParam,
ChatCompletionUserMessageParam,
)
from openai.types.chat.chat_completion_message_tool_call_param import (
ChatCompletionMessageToolCallParam,
Function,
)
from .model import (
ChatMessage,
@@ -129,205 +117,3 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id):
loaded.tool_calls is not None
), f"Tool calls missing for {orig.role} message"
assert len(orig.tool_calls) == len(loaded.tool_calls)
# --------------------------------------------------------------------------- #
# _merge_consecutive_assistant_messages #
# --------------------------------------------------------------------------- #
_tc = ChatCompletionMessageToolCallParam(
id="tc1", type="function", function=Function(name="do_stuff", arguments="{}")
)
_tc2 = ChatCompletionMessageToolCallParam(
id="tc2", type="function", function=Function(name="other", arguments="{}")
)
def test_merge_noop_when_no_consecutive_assistants():
"""Messages without consecutive assistants are returned unchanged."""
msgs = [
ChatCompletionUserMessageParam(role="user", content="hi"),
ChatCompletionAssistantMessageParam(role="assistant", content="hello"),
ChatCompletionUserMessageParam(role="user", content="bye"),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs)
assert len(merged) == 3
assert [m["role"] for m in merged] == ["user", "assistant", "user"]
def test_merge_splits_text_and_tool_calls():
"""The exact bug scenario: text-only assistant followed by tool_calls-only assistant."""
msgs = [
ChatCompletionUserMessageParam(role="user", content="build agent"),
ChatCompletionAssistantMessageParam(
role="assistant", content="Let me build that"
),
ChatCompletionAssistantMessageParam(
role="assistant", content="", tool_calls=[_tc]
),
ChatCompletionToolMessageParam(role="tool", content="ok", tool_call_id="tc1"),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs)
assert len(merged) == 3
assert merged[0]["role"] == "user"
assert merged[2]["role"] == "tool"
a = cast(ChatCompletionAssistantMessageParam, merged[1])
assert a["role"] == "assistant"
assert a.get("content") == "Let me build that"
assert a.get("tool_calls") == [_tc]
def test_merge_combines_tool_calls_from_both():
"""Both consecutive assistants have tool_calls — they get merged."""
msgs: list[ChatCompletionAssistantMessageParam] = [
ChatCompletionAssistantMessageParam(
role="assistant", content="text", tool_calls=[_tc]
),
ChatCompletionAssistantMessageParam(
role="assistant", content="", tool_calls=[_tc2]
),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs) # type: ignore[arg-type]
assert len(merged) == 1
a = cast(ChatCompletionAssistantMessageParam, merged[0])
assert a.get("tool_calls") == [_tc, _tc2]
assert a.get("content") == "text"
def test_merge_three_consecutive_assistants():
"""Three consecutive assistants collapse into one."""
msgs: list[ChatCompletionAssistantMessageParam] = [
ChatCompletionAssistantMessageParam(role="assistant", content="a"),
ChatCompletionAssistantMessageParam(role="assistant", content="b"),
ChatCompletionAssistantMessageParam(
role="assistant", content="", tool_calls=[_tc]
),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs) # type: ignore[arg-type]
assert len(merged) == 1
a = cast(ChatCompletionAssistantMessageParam, merged[0])
assert a.get("content") == "a\nb"
assert a.get("tool_calls") == [_tc]
def test_merge_empty_and_single_message():
"""Edge cases: empty list and single message."""
assert ChatSession._merge_consecutive_assistant_messages([]) == []
single: list[ChatCompletionMessageParam] = [
ChatCompletionUserMessageParam(role="user", content="hi")
]
assert ChatSession._merge_consecutive_assistant_messages(single) == single
# --------------------------------------------------------------------------- #
# add_tool_call_to_current_turn #
# --------------------------------------------------------------------------- #
_raw_tc = {
"id": "tc1",
"type": "function",
"function": {"name": "f", "arguments": "{}"},
}
_raw_tc2 = {
"id": "tc2",
"type": "function",
"function": {"name": "g", "arguments": "{}"},
}
def test_add_tool_call_appends_to_existing_assistant():
"""When the last assistant is from the current turn, tool_call is added to it."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="hi"),
ChatMessage(role="assistant", content="working on it"),
]
session.add_tool_call_to_current_turn(_raw_tc)
assert len(session.messages) == 2 # no new message created
assert session.messages[1].tool_calls == [_raw_tc]
def test_add_tool_call_creates_assistant_when_none_exists():
"""When there's no current-turn assistant, a new one is created."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="hi"),
]
session.add_tool_call_to_current_turn(_raw_tc)
assert len(session.messages) == 2
assert session.messages[1].role == "assistant"
assert session.messages[1].tool_calls == [_raw_tc]
def test_add_tool_call_does_not_cross_user_boundary():
"""A user message acts as a boundary — previous assistant is not modified."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="assistant", content="old turn"),
ChatMessage(role="user", content="new message"),
]
session.add_tool_call_to_current_turn(_raw_tc)
assert len(session.messages) == 3 # new assistant was created
assert session.messages[0].tool_calls is None # old assistant untouched
assert session.messages[2].role == "assistant"
assert session.messages[2].tool_calls == [_raw_tc]
def test_add_tool_call_multiple_times():
"""Multiple long-running tool calls accumulate on the same assistant."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="hi"),
ChatMessage(role="assistant", content="doing stuff"),
]
session.add_tool_call_to_current_turn(_raw_tc)
# Simulate a pending tool result in between (like _yield_tool_call does)
session.messages.append(
ChatMessage(role="tool", content="pending", tool_call_id="tc1")
)
session.add_tool_call_to_current_turn(_raw_tc2)
assert len(session.messages) == 3 # user, assistant, tool — no extra assistant
assert session.messages[1].tool_calls == [_raw_tc, _raw_tc2]
def test_to_openai_messages_merges_split_assistants():
"""End-to-end: session with split assistants produces valid OpenAI messages."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="build agent"),
ChatMessage(role="assistant", content="Let me build that"),
ChatMessage(
role="assistant",
content="",
tool_calls=[
{
"id": "tc1",
"type": "function",
"function": {"name": "create_agent", "arguments": "{}"},
}
],
),
ChatMessage(role="tool", content="done", tool_call_id="tc1"),
ChatMessage(role="assistant", content="Saved!"),
ChatMessage(role="user", content="show me an example run"),
]
openai_msgs = session.to_openai_messages()
# The two consecutive assistants at index 1,2 should be merged
roles = [m["role"] for m in openai_msgs]
assert roles == ["user", "assistant", "tool", "assistant", "user"]
# The merged assistant should have both content and tool_calls
merged = cast(ChatCompletionAssistantMessageParam, openai_msgs[1])
assert merged.get("content") == "Let me build that"
tc_list = merged.get("tool_calls")
assert tc_list is not None and len(list(tc_list)) == 1
assert list(tc_list)[0]["id"] == "tc1"

View File

@@ -10,8 +10,6 @@ from typing import Any
from pydantic import BaseModel, Field
from backend.util.json import dumps as json_dumps
class ResponseType(str, Enum):
"""Types of streaming responses following AI SDK protocol."""
@@ -195,18 +193,6 @@ class StreamError(StreamBaseResponse):
default=None, description="Additional error details"
)
def to_sse(self) -> str:
"""Convert to SSE format, only emitting fields required by AI SDK protocol.
The AI SDK uses z.strictObject({type, errorText}) which rejects
any extra fields like `code` or `details`.
"""
data = {
"type": self.type.value,
"errorText": self.errorText,
}
return f"data: {json_dumps(data)}\n\n"
class StreamHeartbeat(StreamBaseResponse):
"""Heartbeat to keep SSE connection alive during long-running operations.

View File

@@ -800,13 +800,9 @@ async def stream_chat_completion(
# Build the messages list in the correct order
messages_to_save: list[ChatMessage] = []
# Add assistant message with tool_calls if any.
# Use extend (not assign) to preserve tool_calls already added by
# _yield_tool_call for long-running tools.
# Add assistant message with tool_calls if any
if accumulated_tool_calls:
if not assistant_response.tool_calls:
assistant_response.tool_calls = []
assistant_response.tool_calls.extend(accumulated_tool_calls)
assistant_response.tool_calls = accumulated_tool_calls
logger.info(
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
)
@@ -1408,9 +1404,13 @@ async def _yield_tool_call(
operation_id=operation_id,
)
# Attach the tool_call to the current turn's assistant message
# (or create one if this is a tool-only response with no text).
session.add_tool_call_to_current_turn(tool_calls[yield_idx])
# Save assistant message with tool_call FIRST (required by LLM)
assistant_message = ChatMessage(
role="assistant",
content="",
tool_calls=[tool_calls[yield_idx]],
)
session.messages.append(assistant_message)
# Then save pending tool result
pending_message = ChatMessage(

View File

@@ -1,6 +1,4 @@
import base64
import json
import logging
import shlex
import uuid
from typing import Literal, Optional
@@ -23,11 +21,6 @@ from backend.data.model import (
)
from backend.integrations.providers import ProviderName
logger = logging.getLogger(__name__)
# Maximum size for binary files to extract (50MB)
MAX_BINARY_FILE_SIZE = 50 * 1024 * 1024
class ClaudeCodeExecutionError(Exception):
"""Exception raised when Claude Code execution fails.
@@ -187,9 +180,7 @@ class ClaudeCodeBlock(Block):
path: str
relative_path: str # Path relative to working directory (for GitHub, etc.)
name: str
content: str # Text content for text files, empty string for binary files
is_binary: bool = False # True if this is a binary file
content_base64: Optional[str] = None # Base64-encoded content for binary files
content: str
class Output(BlockSchemaOutput):
response: str = SchemaField(
@@ -197,11 +188,8 @@ class ClaudeCodeBlock(Block):
)
files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
description=(
"List of files created/modified by Claude Code during this execution. "
"Each file has 'path', 'relative_path', 'name', 'content', 'is_binary', "
"and 'content_base64' fields. For text files, 'content' contains the text "
"and 'is_binary' is False. For binary files (PDFs, images, etc.), "
"'is_binary' is True and 'content_base64' contains the base64-encoded data."
"List of text files created/modified by Claude Code during this execution. "
"Each file has 'path', 'relative_path', 'name', and 'content' fields."
)
)
conversation_history: str = SchemaField(
@@ -264,8 +252,6 @@ class ClaudeCodeBlock(Block):
"relative_path": "index.html",
"name": "index.html",
"content": "<html>Hello World</html>",
"is_binary": False,
"content_base64": None,
}
],
),
@@ -286,8 +272,6 @@ class ClaudeCodeBlock(Block):
relative_path="index.html",
name="index.html",
content="<html>Hello World</html>",
is_binary=False,
content_base64=None,
)
], # files
"User: Create a hello world HTML file\n"
@@ -547,6 +531,7 @@ class ClaudeCodeBlock(Block):
".env",
".gitignore",
".dockerfile",
"Dockerfile",
".vue",
".svelte",
".astro",
@@ -555,44 +540,6 @@ class ClaudeCodeBlock(Block):
".tex",
".csv",
".log",
".svg", # SVG is XML-based text
}
# Binary file extensions we can read and base64-encode
binary_extensions = {
# Images
".png",
".jpg",
".jpeg",
".gif",
".webp",
".ico",
".bmp",
".tiff",
".tif",
# Documents
".pdf",
# Archives (useful for downloads)
".zip",
".tar",
".gz",
".7z",
# Audio/Video (if small enough)
".mp3",
".wav",
".mp4",
".webm",
# Other binary formats
".woff",
".woff2",
".ttf",
".otf",
".eot",
".bin",
".exe",
".dll",
".so",
".dylib",
}
try:
@@ -617,26 +564,10 @@ class ClaudeCodeBlock(Block):
if not file_path:
continue
# Check if it's a text file we can read (case-insensitive)
file_path_lower = file_path.lower()
# Check if it's a text file we can read
is_text = any(
file_path_lower.endswith(ext) for ext in text_extensions
) or file_path_lower.endswith("dockerfile")
# Check if it's a binary file we should extract
is_binary = any(
file_path_lower.endswith(ext) for ext in binary_extensions
)
# Helper to extract filename and relative path
def get_file_info(path: str, work_dir: str) -> tuple[str, str]:
name = path.split("/")[-1]
rel_path = path
if path.startswith(work_dir):
rel_path = path[len(work_dir) :]
if rel_path.startswith("/"):
rel_path = rel_path[1:]
return name, rel_path
file_path.endswith(ext) for ext in text_extensions
) or file_path.endswith("Dockerfile")
if is_text:
try:
@@ -645,75 +576,32 @@ class ClaudeCodeBlock(Block):
if isinstance(content, bytes):
content = content.decode("utf-8", errors="replace")
file_name, relative_path = get_file_info(
file_path, working_directory
)
# Extract filename from path
file_name = file_path.split("/")[-1]
# Calculate relative path by stripping working directory
relative_path = file_path
if file_path.startswith(working_directory):
relative_path = file_path[len(working_directory) :]
# Remove leading slash if present
if relative_path.startswith("/"):
relative_path = relative_path[1:]
files.append(
ClaudeCodeBlock.FileOutput(
path=file_path,
relative_path=relative_path,
name=file_name,
content=content,
is_binary=False,
content_base64=None,
)
)
except Exception as e:
logger.warning(f"Failed to read text file {file_path}: {e}")
elif is_binary:
try:
# Check file size before reading to avoid OOM
stat_result = await sandbox.commands.run(
f"stat -c %s {shlex.quote(file_path)} 2>/dev/null"
)
if (
stat_result.exit_code != 0
or not stat_result.stdout
):
logger.warning(
f"Skipping binary file {file_path}: "
f"could not determine file size"
)
continue
file_size = int(stat_result.stdout.strip())
if file_size > MAX_BINARY_FILE_SIZE:
logger.warning(
f"Skipping binary file {file_path}: "
f"size {file_size} exceeds limit "
f"{MAX_BINARY_FILE_SIZE}"
)
continue
except Exception:
# Skip files that can't be read
pass
# Read binary file as bytes using format="bytes"
content_bytes = await sandbox.files.read(
file_path, format="bytes"
)
# Base64 encode the binary content
content_b64 = base64.b64encode(content_bytes).decode(
"ascii"
)
file_name, relative_path = get_file_info(
file_path, working_directory
)
files.append(
ClaudeCodeBlock.FileOutput(
path=file_path,
relative_path=relative_path,
name=file_name,
content="", # Empty for binary files
is_binary=True,
content_base64=content_b64,
)
)
except Exception as e:
logger.warning(
f"Failed to read binary file {file_path}: {e}"
)
except Exception as e:
logger.warning(f"File extraction failed: {e}")
except Exception:
# If file extraction fails, return empty results
pass
return files

View File

@@ -21,71 +21,43 @@ logger = logging.getLogger(__name__)
class HumanInTheLoopBlock(Block):
"""
Pauses execution and waits for human approval or rejection of the data.
This block pauses execution and waits for human approval or modification of the data.
When executed, this block creates a pending review entry and sets the node execution
status to REVIEW. The execution remains paused until a human user either approves
or rejects the data.
When executed, it creates a pending review entry and sets the node execution status
to REVIEW. The execution will remain paused until a human user either:
- Approves the data (with or without modifications)
- Rejects the data
**How it works:**
- The input data is presented to a human reviewer
- The reviewer can approve or reject (and optionally modify the data if editable)
- On approval: the data flows out through the `approved_data` output pin
- On rejection: the data flows out through the `rejected_data` output pin
**Important:** The output pins yield the actual data itself, NOT status strings.
The approval/rejection decision determines WHICH output pin fires, not the value.
You do NOT need to compare the output to "APPROVED" or "REJECTED" - simply connect
downstream blocks to the appropriate output pin for each case.
**Example usage:**
- Connect `approved_data` → next step in your workflow (data was approved)
- Connect `rejected_data` → error handling or notification (data was rejected)
This is useful for workflows that require human validation or intervention before
proceeding to the next steps.
"""
class Input(BlockSchemaInput):
data: Any = SchemaField(
description="The data to be reviewed by a human user. "
"This exact data will be passed through to either approved_data or "
"rejected_data output based on the reviewer's decision."
)
data: Any = SchemaField(description="The data to be reviewed by a human user")
name: str = SchemaField(
description="A descriptive name for what this data represents. "
"This helps the reviewer understand what they are reviewing.",
description="A descriptive name for what this data represents",
)
editable: bool = SchemaField(
description="Whether the human reviewer can edit the data before "
"approving or rejecting it",
description="Whether the human reviewer can edit the data",
default=True,
advanced=True,
)
class Output(BlockSchemaOutput):
approved_data: Any = SchemaField(
description="Outputs the input data when the reviewer APPROVES it. "
"The value is the actual data itself (not a status string like 'APPROVED'). "
"If the reviewer edited the data, this contains the modified version. "
"Connect downstream blocks here for the 'approved' workflow path."
description="The data when approved (may be modified by reviewer)"
)
rejected_data: Any = SchemaField(
description="Outputs the input data when the reviewer REJECTS it. "
"The value is the actual data itself (not a status string like 'REJECTED'). "
"If the reviewer edited the data, this contains the modified version. "
"Connect downstream blocks here for the 'rejected' workflow path."
description="The data when rejected (may be modified by reviewer)"
)
review_message: str = SchemaField(
description="Optional message provided by the reviewer explaining their "
"decision. Only outputs when the reviewer provides a message; "
"this pin does not fire if no message was given.",
default="",
description="Any message provided by the reviewer", default=""
)
def __init__(self):
super().__init__(
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
description="Pause execution for human review. Data flows through "
"approved_data or rejected_data output based on the reviewer's decision. "
"Outputs contain the actual data, not status strings.",
description="Pause execution and wait for human approval or modification of data",
categories={BlockCategory.BASIC},
input_schema=HumanInTheLoopBlock.Input,
output_schema=HumanInTheLoopBlock.Output,

View File

@@ -743,11 +743,6 @@ class GraphModel(Graph, GraphMeta):
# For invalid blocks, we still raise immediately as this is a structural issue
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
if block.disabled:
raise ValueError(
f"Block {node.block_id} is disabled and cannot be used in graphs"
)
node_input_mask = (
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
)

View File

@@ -213,9 +213,6 @@ async def execute_node(
block_name=node_block.name,
)
if node_block.disabled:
raise ValueError(f"Block {node_block.id} is disabled and cannot be executed")
# Sanity check: validate the execution input.
input_data, error = validate_exec(node, data.inputs, resolve_input=False)
if input_data is None:

View File

@@ -364,44 +364,6 @@ def _remove_orphan_tool_responses(
return result
def validate_and_remove_orphan_tool_responses(
messages: list[dict],
log_warning: bool = True,
) -> list[dict]:
"""
Validate tool_call/tool_response pairs and remove orphaned responses.
Scans messages in order, tracking all tool_call IDs. Any tool response
referencing an ID not seen in a preceding message is considered orphaned
and removed. This prevents API errors like Anthropic's "unexpected tool_use_id".
Args:
messages: List of messages to validate (OpenAI or Anthropic format)
log_warning: Whether to log a warning when orphans are found
Returns:
A new list with orphaned tool responses removed
"""
available_ids: set[str] = set()
orphan_ids: set[str] = set()
for msg in messages:
available_ids |= _extract_tool_call_ids_from_message(msg)
for resp_id in _extract_tool_response_ids_from_message(msg):
if resp_id not in available_ids:
orphan_ids.add(resp_id)
if not orphan_ids:
return messages
if log_warning:
logger.warning(
f"Removing {len(orphan_ids)} orphan tool response(s): {orphan_ids}"
)
return _remove_orphan_tool_responses(messages, orphan_ids)
def _ensure_tool_pairs_intact(
recent_messages: list[dict],
all_messages: list[dict],
@@ -761,13 +723,6 @@ async def compress_context(
# Filter out any None values that may have been introduced
final_msgs: list[dict] = [m for m in msgs if m is not None]
# ---- STEP 6: Final tool-pair validation ---------------------------------
# After all compression steps, verify that every tool response has a
# matching tool_call in a preceding assistant message. Remove orphans
# to prevent API errors (e.g., Anthropic's "unexpected tool_use_id").
final_msgs = validate_and_remove_orphan_tool_responses(final_msgs)
final_count = sum(_msg_tokens(m, enc) for m in final_msgs)
error = None
if final_count + reserve > target_tokens:

View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand.
[[package]]
name = "aio-pika"
@@ -374,7 +374,7 @@ description = "LTS Port of Python audioop"
optional = false
python-versions = ">=3.13"
groups = ["main"]
markers = "python_version >= \"3.13\""
markers = "python_version == \"3.13\""
files = [
{file = "audioop_lts-0.2.2-cp313-abi3-macosx_10_13_universal2.whl", hash = "sha256:fd3d4602dc64914d462924a08c1a9816435a2155d74f325853c1f1ac3b2d9800"},
{file = "audioop_lts-0.2.2-cp313-abi3-macosx_10_13_x86_64.whl", hash = "sha256:550c114a8df0aafe9a05442a1162dfc8fec37e9af1d625ae6060fed6e756f303"},
@@ -474,7 +474,7 @@ description = "Backport of asyncio.Runner, a context manager that controls event
optional = false
python-versions = "<3.11,>=3.8"
groups = ["main"]
markers = "python_version < \"3.11\""
markers = "python_version == \"3.10\""
files = [
{file = "backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5"},
{file = "backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162"},
@@ -487,7 +487,7 @@ description = "Backport of CPython tarfile module"
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "python_version <= \"3.11\""
markers = "python_version < \"3.12\""
files = [
{file = "backports.tarfile-1.2.0-py3-none-any.whl", hash = "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34"},
{file = "backports_tarfile-1.2.0.tar.gz", hash = "sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991"},
@@ -563,6 +563,18 @@ webencodings = "*"
[package.extras]
css = ["tinycss2 (>=1.1.0,<1.5)"]
[[package]]
name = "bracex"
version = "2.6"
description = "Bash style brace expander."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "bracex-2.6-py3-none-any.whl", hash = "sha256:0b0049264e7340b3ec782b5cb99beb325f36c3782a32e36e876452fd49a09952"},
{file = "bracex-2.6.tar.gz", hash = "sha256:98f1347cd77e22ee8d967a30ad4e310b233f7754dbf31ff3fceb76145ba47dc7"},
]
[[package]]
name = "browserbase"
version = "1.4.0"
@@ -659,7 +671,6 @@ description = "Foreign Function Interface for Python calling C code."
optional = false
python-versions = ">=3.9"
groups = ["main"]
markers = "platform_python_implementation != \"PyPy\" or sys_platform == \"darwin\""
files = [
{file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"},
{file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"},
@@ -1149,6 +1160,18 @@ idna = ["idna (>=3.10)"]
trio = ["trio (>=0.30)"]
wmi = ["wmi (>=1.5.1) ; platform_system == \"Windows\""]
[[package]]
name = "dockerfile-parse"
version = "2.0.1"
description = "Python library for Dockerfile manipulation"
optional = false
python-versions = ">=3.6"
groups = ["main"]
files = [
{file = "dockerfile-parse-2.0.1.tar.gz", hash = "sha256:3184ccdc513221983e503ac00e1aa504a2aa8f84e5de673c46b0b6eee99ec7bc"},
{file = "dockerfile_parse-2.0.1-py2.py3-none-any.whl", hash = "sha256:bdffd126d2eb26acf1066acb54cb2e336682e1d72b974a40894fac76a4df17f6"},
]
[[package]]
name = "docstring-parser"
version = "0.17.0"
@@ -1235,40 +1258,43 @@ pgp = ["gpg"]
[[package]]
name = "e2b"
version = "1.11.1"
version = "2.13.2"
description = "E2B SDK that give agents cloud environments"
optional = false
python-versions = "<4.0,>=3.9"
python-versions = "<4.0,>=3.10"
groups = ["main"]
files = [
{file = "e2b-1.11.1-py3-none-any.whl", hash = "sha256:1ecb123873788472731c101939a494ab852cbcce0f913df6f7ecb194ae932130"},
{file = "e2b-1.11.1.tar.gz", hash = "sha256:7f7b6f238208d0a23353bb0da01f91a924321b57c61b176506862cbc1493ce8c"},
{file = "e2b-2.13.2-py3-none-any.whl", hash = "sha256:d91d5293bc0dd1917c72a6e6b35e86513607be2666a14ae18c57b921e7864de4"},
{file = "e2b-2.13.2.tar.gz", hash = "sha256:c0e81a3920091874fdf73c0b8f376b28766212db9f1cea5d8bd56a2e95d2436c"},
]
[package.dependencies]
attrs = ">=23.2.0"
dockerfile-parse = ">=2.0.1,<3.0.0"
httpcore = ">=1.0.5,<2.0.0"
httpx = ">=0.27.0,<1.0.0"
packaging = ">=24.1"
protobuf = ">=4.21.0"
python-dateutil = ">=2.8.2"
rich = ">=14.0.0"
typing-extensions = ">=4.1.0"
wcmatch = ">=10.1,<11.0"
[[package]]
name = "e2b-code-interpreter"
version = "1.5.2"
version = "2.4.1"
description = "E2B Code Interpreter - Stateful code execution"
optional = false
python-versions = "<4.0,>=3.9"
groups = ["main"]
files = [
{file = "e2b_code_interpreter-1.5.2-py3-none-any.whl", hash = "sha256:5c3188d8f25226b28fef4b255447cc6a4c36afb748bdd5180b45be486d5169f3"},
{file = "e2b_code_interpreter-1.5.2.tar.gz", hash = "sha256:3bd6ea70596290e85aaf0a2f19f28bf37a5e73d13086f5e6a0080bb591c5a547"},
{file = "e2b_code_interpreter-2.4.1-py3-none-any.whl", hash = "sha256:15d35f025b4a15033e119f2e12e7ac65657ad2b5a013fa9149e74581fbee778a"},
{file = "e2b_code_interpreter-2.4.1.tar.gz", hash = "sha256:4b15014ee0d0dfcdc3072e1f409cbb87ca48f48d53d75629b7257e5513b9e7dd"},
]
[package.dependencies]
attrs = ">=21.3.0"
e2b = ">=1.5.4,<2.0.0"
e2b = ">=2.7.0,<3.0.0"
httpx = ">=0.20.0,<1.0.0"
[[package]]
@@ -1338,7 +1364,7 @@ description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
groups = ["main", "dev"]
markers = "python_version < \"3.11\""
markers = "python_version == \"3.10\""
files = [
{file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"},
{file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"},
@@ -1820,16 +1846,16 @@ files = [
google-auth = ">=2.14.1,<3.0.0"
googleapis-common-protos = ">=1.56.2,<2.0.0"
grpcio = [
{version = ">=1.33.2,<2.0.0", optional = true, markers = "extra == \"grpc\""},
{version = ">=1.49.1,<2.0.0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
{version = ">=1.33.2,<2.0.0", optional = true, markers = "extra == \"grpc\""},
]
grpcio-status = [
{version = ">=1.33.2,<2.0.0", optional = true, markers = "extra == \"grpc\""},
{version = ">=1.49.1,<2.0.0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
{version = ">=1.33.2,<2.0.0", optional = true, markers = "extra == \"grpc\""},
]
proto-plus = [
{version = ">=1.22.3,<2.0.0"},
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
{version = ">=1.22.3,<2.0.0"},
]
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
requests = ">=2.18.0,<3.0.0"
@@ -1940,8 +1966,8 @@ google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0", extras
google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0"
grpcio = ">=1.33.2,<2.0.0"
proto-plus = [
{version = ">=1.22.3,<2.0.0"},
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
{version = ">=1.22.3,<2.0.0"},
]
protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
@@ -2001,9 +2027,9 @@ google-cloud-core = ">=2.0.0,<3.0.0"
grpc-google-iam-v1 = ">=0.12.4,<1.0.0"
opentelemetry-api = ">=1.9.0"
proto-plus = [
{version = ">=1.22.0,<2.0.0"},
{version = ">=1.22.2,<2.0.0", markers = "python_version >= \"3.11\""},
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
{version = ">=1.22.2,<2.0.0", markers = "python_version >= \"3.11\" and python_version < \"3.13\""},
{version = ">=1.22.0,<2.0.0", markers = "python_version < \"3.11\""},
]
protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
@@ -3802,7 +3828,7 @@ description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.10"
groups = ["main"]
markers = "python_version < \"3.11\""
markers = "python_version == \"3.10\""
files = [
{file = "numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb"},
{file = "numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90"},
@@ -4287,9 +4313,9 @@ files = [
[package.dependencies]
numpy = [
{version = ">=1.22.4", markers = "python_version < \"3.11\""},
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
{version = ">=1.22.4", markers = "python_version < \"3.11\""},
]
python-dateutil = ">=2.8.2"
pytz = ">=2020.1"
@@ -4532,8 +4558,8 @@ pinecone-plugin-interface = ">=0.0.7,<0.0.8"
python-dateutil = ">=2.5.3"
typing-extensions = ">=3.7.4"
urllib3 = [
{version = ">=1.26.0", markers = "python_version >= \"3.8\" and python_version < \"3.12\""},
{version = ">=1.26.5", markers = "python_version >= \"3.12\" and python_version < \"4.0\""},
{version = ">=1.26.0", markers = "python_version >= \"3.8\" and python_version < \"3.12\""},
]
[package.extras]
@@ -5361,7 +5387,7 @@ description = "C parser in Python"
optional = false
python-versions = ">=3.10"
groups = ["main"]
markers = "(platform_python_implementation != \"PyPy\" or sys_platform == \"darwin\") and implementation_name != \"PyPy\""
markers = "implementation_name != \"PyPy\""
files = [
{file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"},
{file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"},
@@ -6130,10 +6156,10 @@ files = [
grpcio = ">=1.41.0"
httpx = {version = ">=0.20.0", extras = ["http2"]}
numpy = [
{version = ">=1.21,<2.3.0", markers = "python_version == \"3.10\""},
{version = ">=1.21", markers = "python_version == \"3.11\""},
{version = ">=2.1.0", markers = "python_version == \"3.13\""},
{version = ">=1.21", markers = "python_version == \"3.11\""},
{version = ">=1.26", markers = "python_version == \"3.12\""},
{version = ">=1.21,<2.3.0", markers = "python_version == \"3.10\""},
]
portalocker = ">=2.7.0,<4.0"
protobuf = ">=3.20.0"
@@ -7317,7 +7343,7 @@ description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
groups = ["main", "dev"]
markers = "python_version < \"3.11\""
markers = "python_version == \"3.10\""
files = [
{file = "tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867"},
{file = "tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9"},
@@ -7841,6 +7867,21 @@ files = [
[package.dependencies]
anyio = ">=3.0.0"
[[package]]
name = "wcmatch"
version = "10.1"
description = "Wildcard/glob file name matcher."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "wcmatch-10.1-py3-none-any.whl", hash = "sha256:5848ace7dbb0476e5e55ab63c6bbd529745089343427caa5537f230cc01beb8a"},
{file = "wcmatch-10.1.tar.gz", hash = "sha256:f11f94208c8c8484a16f4f48638a85d771d9513f4ab3f37595978801cb9465af"},
]
[package.dependencies]
bracex = ">=2.1.1"
[[package]]
name = "webencodings"
version = "0.5.1"
@@ -8440,4 +8481,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.14"
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
content-hash = "e2416f607a681b3863b6e0f5595b88daf70aa851b973a938a5b79ece445d65d3"

View File

@@ -19,7 +19,7 @@ bleach = { extras = ["css"], version = "^6.2.0" }
click = "^8.2.0"
cryptography = "^46.0"
discord-py = "^2.5.2"
e2b-code-interpreter = "^1.5.2"
e2b-code-interpreter = "^2.4.1"
elevenlabs = "^1.50.0"
fastapi = "^0.128.6"
feedparser = "^6.0.11"

View File

@@ -10,9 +10,8 @@ import {
MessageResponse,
} from "@/components/ai-elements/message";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { toast } from "@/components/molecules/Toast/use-toast";
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
import { useEffect, useRef, useState } from "react";
import { useEffect, useState } from "react";
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
@@ -122,7 +121,6 @@ export const ChatMessagesContainer = ({
isLoading,
}: ChatMessagesContainerProps) => {
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
const lastToastTimeRef = useRef(0);
useEffect(() => {
if (status === "submitted") {
@@ -130,20 +128,6 @@ export const ChatMessagesContainer = ({
}
}, [status]);
// Show a toast when a new error occurs, debounced to avoid spam
useEffect(() => {
if (!error) return;
const now = Date.now();
if (now - lastToastTimeRef.current < 3_000) return;
lastToastTimeRef.current = now;
toast({
variant: "destructive",
title: "Something went wrong",
description:
"The assistant encountered an error. Please try sending your message again.",
});
}, [error]);
const lastMessage = messages[messages.length - 1];
const lastAssistantHasVisibleContent =
lastMessage?.role === "assistant" &&
@@ -279,12 +263,8 @@ export const ChatMessagesContainer = ({
</Message>
)}
{error && (
<div className="rounded-lg bg-red-50 p-4 text-sm text-red-700">
<p className="font-medium">Something went wrong</p>
<p className="mt-1 text-red-600">
The assistant encountered an error. Please try sending your
message again.
</p>
<div className="rounded-lg bg-red-50 p-3 text-red-600">
Error: {error.message}
</div>
)}
</ConversationContent>

View File

@@ -30,7 +30,7 @@ export function ContentCard({
return (
<div
className={cn(
"min-w-0 rounded-lg bg-gradient-to-r from-purple-500/30 to-blue-500/30 p-[1px]",
"rounded-lg bg-gradient-to-r from-purple-500/30 to-blue-500/30 p-[1px]",
className,
)}
>

View File

@@ -4,6 +4,7 @@ import { WarningDiamondIcon } from "@phosphor-icons/react";
import type { ToolUIPart } from "ai";
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
import {
ContentCardDescription,
@@ -76,7 +77,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
isOperationInProgressOutput(output)
) {
return {
icon,
icon: <OrbitLoader size={32} />,
title: "Creating agent, this may take a few minutes. Sit back and relax.",
};
}

View File

@@ -203,7 +203,7 @@ export function getAccordionMeta(output: RunAgentToolOutput): {
? output.status.trim()
: "started";
return {
icon,
icon: <OrbitLoader size={28} className="text-neutral-700" />,
title: output.graph_name,
description: `Status: ${statusText}`,
};

View File

@@ -149,7 +149,7 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
if (isRunBlockBlockOutput(output)) {
const keys = Object.keys(output.outputs ?? {});
return {
icon,
icon: <OrbitLoader size={24} className="text-neutral-700" />,
title: output.block_name,
description:
keys.length > 0

View File

@@ -1,8 +1,11 @@
import { environment } from "@/services/environment";
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
import { NextRequest } from "next/server";
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
/**
* SSE Proxy for chat streaming.
* Supports POST with context (page content + URL) in the request body.
*/
export async function POST(
request: NextRequest,
{ params }: { params: Promise<{ sessionId: string }> },
@@ -20,14 +23,17 @@ export async function POST(
);
}
// Get auth token from server-side session
const token = await getServerAuthToken();
// Build backend URL
const backendUrl = environment.getAGPTServerBaseUrl();
const streamUrl = new URL(
`/api/chat/sessions/${sessionId}/stream`,
backendUrl,
);
// Forward request to backend with auth header
const headers: Record<string, string> = {
"Content-Type": "application/json",
Accept: "text/event-stream",
@@ -57,15 +63,14 @@ export async function POST(
});
}
if (!response.body) {
return new Response(
JSON.stringify({ error: "Empty response from chat service" }),
{ status: 502, headers: { "Content-Type": "application/json" } },
);
}
return new Response(normalizeSSEStream(response.body), {
headers: SSE_HEADERS,
// Return the SSE stream directly
return new Response(response.body, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
Connection: "keep-alive",
"X-Accel-Buffering": "no",
},
});
} catch (error) {
console.error("SSE proxy error:", error);
@@ -82,6 +87,13 @@ export async function POST(
}
}
/**
* Resume an active stream for a session.
*
* Called by the AI SDK's `useChat(resume: true)` on page load.
* Proxies to the backend which checks for an active stream and either
* replays it (200 + SSE) or returns 204 No Content.
*/
export async function GET(
_request: NextRequest,
{ params }: { params: Promise<{ sessionId: string }> },
@@ -112,6 +124,7 @@ export async function GET(
headers,
});
// 204 = no active stream to resume
if (response.status === 204) {
return new Response(null, { status: 204 });
}
@@ -124,13 +137,12 @@ export async function GET(
});
}
if (!response.body) {
return new Response(null, { status: 204 });
}
return new Response(normalizeSSEStream(response.body), {
return new Response(response.body, {
headers: {
...SSE_HEADERS,
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
Connection: "keep-alive",
"X-Accel-Buffering": "no",
"x-vercel-ai-ui-message-stream": "v1",
},
});

View File

@@ -1,72 +0,0 @@
export const SSE_HEADERS = {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
Connection: "keep-alive",
"X-Accel-Buffering": "no",
} as const;
export function normalizeSSEStream(
input: ReadableStream<Uint8Array>,
): ReadableStream<Uint8Array> {
const decoder = new TextDecoder();
const encoder = new TextEncoder();
let buffer = "";
return input.pipeThrough(
new TransformStream<Uint8Array, Uint8Array>({
transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
const parts = buffer.split("\n\n");
buffer = parts.pop() ?? "";
for (const part of parts) {
const normalized = normalizeSSEEvent(part);
controller.enqueue(encoder.encode(normalized + "\n\n"));
}
},
flush(controller) {
if (buffer.trim()) {
const normalized = normalizeSSEEvent(buffer);
controller.enqueue(encoder.encode(normalized + "\n\n"));
}
},
}),
);
}
function normalizeSSEEvent(event: string): string {
const lines = event.split("\n");
const dataLines: string[] = [];
const otherLines: string[] = [];
for (const line of lines) {
if (line.startsWith("data: ")) {
dataLines.push(line.slice(6));
} else {
otherLines.push(line);
}
}
if (dataLines.length === 0) return event;
const dataStr = dataLines.join("\n");
try {
const parsed = JSON.parse(dataStr) as Record<string, unknown>;
if (parsed.type === "error") {
const normalized = {
type: "error",
errorText:
typeof parsed.errorText === "string"
? parsed.errorText
: "An unexpected error occurred",
};
const newData = `data: ${JSON.stringify(normalized)}`;
return [...otherLines.filter((l) => l.length > 0), newData].join("\n");
}
} catch {
// Not valid JSON — pass through as-is
}
return event;
}

View File

@@ -1,8 +1,20 @@
import { environment } from "@/services/environment";
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
import { NextRequest } from "next/server";
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
/**
* SSE Proxy for task stream reconnection.
*
* This endpoint allows clients to reconnect to an ongoing or recently completed
* background task's stream. It replays missed messages from Redis Streams and
* subscribes to live updates if the task is still running.
*
* Client contract:
* 1. When receiving an operation_started event, store the task_id
* 2. To reconnect: GET /api/chat/tasks/{taskId}/stream?last_message_id={idx}
* 3. Messages are replayed from the last_message_id position
* 4. Stream ends when "finish" event is received
*/
export async function GET(
request: NextRequest,
{ params }: { params: Promise<{ taskId: string }> },
@@ -12,12 +24,15 @@ export async function GET(
const lastMessageId = searchParams.get("last_message_id") || "0-0";
try {
// Get auth token from server-side session
const token = await getServerAuthToken();
// Build backend URL
const backendUrl = environment.getAGPTServerBaseUrl();
const streamUrl = new URL(`/api/chat/tasks/${taskId}/stream`, backendUrl);
streamUrl.searchParams.set("last_message_id", lastMessageId);
// Forward request to backend with auth header
const headers: Record<string, string> = {
Accept: "text/event-stream",
"Cache-Control": "no-cache",
@@ -41,12 +56,14 @@ export async function GET(
});
}
if (!response.body) {
return new Response(null, { status: 204 });
}
return new Response(normalizeSSEStream(response.body), {
headers: SSE_HEADERS,
// Return the SSE stream directly
return new Response(response.body, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
Connection: "keep-alive",
"X-Accel-Buffering": "no",
},
});
} catch (error) {
console.error("Task stream proxy error:", error);

View File

@@ -61,7 +61,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
| [Get List Item](block-integrations/basic.md#get-list-item) | Returns the element at the given index |
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution for human review |
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution and wait for human approval or modification of data |
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |

View File

@@ -975,7 +975,7 @@ A travel planning application could use this block to provide users with current
## Human In The Loop
### What it is
Pause execution for human review. Data flows through approved_data or rejected_data output based on the reviewer's decision. Outputs contain the actual data, not status strings.
Pause execution and wait for human approval or modification of data
### How it works
<!-- MANUAL: how_it_works -->
@@ -988,18 +988,18 @@ This enables human oversight at critical points in automated workflows, ensuring
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| data | The data to be reviewed by a human user. This exact data will be passed through to either approved_data or rejected_data output based on the reviewer's decision. | Data | Yes |
| name | A descriptive name for what this data represents. This helps the reviewer understand what they are reviewing. | str | Yes |
| editable | Whether the human reviewer can edit the data before approving or rejecting it | bool | No |
| data | The data to be reviewed by a human user | Data | Yes |
| name | A descriptive name for what this data represents | str | Yes |
| editable | Whether the human reviewer can edit the data | bool | No |
### Outputs
| Output | Description | Type |
|--------|-------------|------|
| error | Error message if the operation failed | str |
| approved_data | Outputs the input data when the reviewer APPROVES it. The value is the actual data itself (not a status string like 'APPROVED'). If the reviewer edited the data, this contains the modified version. Connect downstream blocks here for the 'approved' workflow path. | Approved Data |
| rejected_data | Outputs the input data when the reviewer REJECTS it. The value is the actual data itself (not a status string like 'REJECTED'). If the reviewer edited the data, this contains the modified version. Connect downstream blocks here for the 'rejected' workflow path. | Rejected Data |
| review_message | Optional message provided by the reviewer explaining their decision. Only outputs when the reviewer provides a message; this pin does not fire if no message was given. | str |
| approved_data | The data when approved (may be modified by reviewer) | Approved Data |
| rejected_data | The data when rejected (may be modified by reviewer) | Rejected Data |
| review_message | Any message provided by the reviewer | str |
### Possible use case
<!-- MANUAL: use_case -->

View File

@@ -16,7 +16,7 @@ When activated, the block:
- Install dependencies (npm, pip, etc.)
- Run terminal commands
- Build and test applications
5. Extracts all text and binary files created/modified during execution
5. Extracts all text files created/modified during execution
6. Returns the response and files, optionally keeping the sandbox alive for follow-up tasks
The block supports conversation continuation through three mechanisms:
@@ -42,7 +42,7 @@ The block supports conversation continuation through three mechanisms:
| Output | Description |
|--------|-------------|
| Response | The output/response from Claude Code execution |
| Files | List of files created/modified during execution. Each file includes path, relative_path, name, content, is_binary, and content_base64 fields. For text files, content contains the text and is_binary is False. For binary files (PDFs, images, etc.), is_binary is True and content_base64 contains the base64-encoded data |
| Files | List of text files created/modified during execution. Each file includes path, relative_path, name, and content fields |
| Conversation History | Full conversation history including this turn. Use to restore context on a fresh sandbox |
| Session ID | Session ID for this conversation. Pass back with sandbox_id to continue the conversation |
| Sandbox ID | ID of the sandbox instance (null if disposed). Pass back with session_id to continue the conversation |

View File

@@ -535,7 +535,7 @@ When activated, the block:
2. Installs the latest version of Claude Code in the sandbox
3. Optionally runs setup commands to prepare the environment
4. Executes your prompt using Claude Code, which can create/edit files, install dependencies, run terminal commands, and build applications
5. Extracts all text and binary files created/modified during execution
5. Extracts all text files created/modified during execution
6. Returns the response and files, optionally keeping the sandbox alive for follow-up tasks
The block supports conversation continuation through three mechanisms:
@@ -563,7 +563,7 @@ The block supports conversation continuation through three mechanisms:
|--------|-------------|------|
| error | Error message if execution failed | str |
| response | The output/response from Claude Code execution | str |
| files | List of files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', 'content', 'is_binary', and 'content_base64' fields. For text files, 'content' contains the text and 'is_binary' is False. For binary files (PDFs, images, etc.), 'is_binary' is True and 'content_base64' contains the base64-encoded data. | List[FileOutput] |
| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', and 'content' fields. | List[FileOutput] |
| conversation_history | Full conversation history including this turn. Pass this to conversation_history input to continue on a fresh sandbox if the previous sandbox timed out. | str |
| session_id | Session ID for this conversation. Pass this back along with sandbox_id to continue the conversation. | str |
| sandbox_id | ID of the sandbox instance. Pass this back along with session_id to continue the conversation. This is None if dispose_sandbox was True (sandbox was disposed). | str |