mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-12 07:45:14 -05:00
Compare commits
17 Commits
master
...
feat/sandb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
343854c0cf | ||
|
|
1026f437a9 | ||
|
|
a2856c1863 | ||
|
|
d9daf3e6db | ||
|
|
66f9f3a12a | ||
|
|
9c4c29b096 | ||
|
|
2f2a031b2c | ||
|
|
e72c6681d8 | ||
|
|
8bed3aee27 | ||
|
|
488ba642c6 | ||
|
|
c839fee53d | ||
|
|
931c1c2fcd | ||
|
|
3f36be2d7a | ||
|
|
b98fbc40ee | ||
|
|
36aeb0b2b3 | ||
|
|
2a189c44c4 | ||
|
|
508759610f |
@@ -10,6 +10,8 @@ from typing import Any
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from backend.util.json import dumps as json_dumps
|
||||||
|
|
||||||
|
|
||||||
class ResponseType(str, Enum):
|
class ResponseType(str, Enum):
|
||||||
"""Types of streaming responses following AI SDK protocol."""
|
"""Types of streaming responses following AI SDK protocol."""
|
||||||
@@ -193,6 +195,18 @@ class StreamError(StreamBaseResponse):
|
|||||||
default=None, description="Additional error details"
|
default=None, description="Additional error details"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def to_sse(self) -> str:
|
||||||
|
"""Convert to SSE format, only emitting fields required by AI SDK protocol.
|
||||||
|
|
||||||
|
The AI SDK uses z.strictObject({type, errorText}) which rejects
|
||||||
|
any extra fields like `code` or `details`.
|
||||||
|
"""
|
||||||
|
data = {
|
||||||
|
"type": self.type.value,
|
||||||
|
"errorText": self.errorText,
|
||||||
|
}
|
||||||
|
return f"data: {json_dumps(data)}\n\n"
|
||||||
|
|
||||||
|
|
||||||
class StreamHeartbeat(StreamBaseResponse):
|
class StreamHeartbeat(StreamBaseResponse):
|
||||||
"""Heartbeat to keep SSE connection alive during long-running operations.
|
"""Heartbeat to keep SSE connection alive during long-running operations.
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
import json
|
import json
|
||||||
import shlex
|
import shlex
|
||||||
import uuid
|
import uuid
|
||||||
from typing import Literal, Optional
|
from typing import TYPE_CHECKING, Literal, Optional
|
||||||
|
|
||||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
@@ -20,6 +20,13 @@ from backend.data.model import (
|
|||||||
SchemaField,
|
SchemaField,
|
||||||
)
|
)
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
|
from backend.util.sandbox_files import (
|
||||||
|
SandboxFileOutput,
|
||||||
|
extract_and_store_sandbox_files,
|
||||||
|
)
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.executor.utils import ExecutionContext
|
||||||
|
|
||||||
|
|
||||||
class ClaudeCodeExecutionError(Exception):
|
class ClaudeCodeExecutionError(Exception):
|
||||||
@@ -174,22 +181,15 @@ class ClaudeCodeBlock(Block):
|
|||||||
advanced=True,
|
advanced=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
class FileOutput(BaseModel):
|
|
||||||
"""A file extracted from the sandbox."""
|
|
||||||
|
|
||||||
path: str
|
|
||||||
relative_path: str # Path relative to working directory (for GitHub, etc.)
|
|
||||||
name: str
|
|
||||||
content: str
|
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
class Output(BlockSchemaOutput):
|
||||||
response: str = SchemaField(
|
response: str = SchemaField(
|
||||||
description="The output/response from Claude Code execution"
|
description="The output/response from Claude Code execution"
|
||||||
)
|
)
|
||||||
files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
|
files: list[SandboxFileOutput] = SchemaField(
|
||||||
description=(
|
description=(
|
||||||
"List of text files created/modified by Claude Code during this execution. "
|
"List of text files created/modified by Claude Code during this execution. "
|
||||||
"Each file has 'path', 'relative_path', 'name', and 'content' fields."
|
"Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. "
|
||||||
|
"workspace_ref contains a workspace:// URI if the file was stored to workspace."
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
conversation_history: str = SchemaField(
|
conversation_history: str = SchemaField(
|
||||||
@@ -252,6 +252,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
"relative_path": "index.html",
|
"relative_path": "index.html",
|
||||||
"name": "index.html",
|
"name": "index.html",
|
||||||
"content": "<html>Hello World</html>",
|
"content": "<html>Hello World</html>",
|
||||||
|
"workspace_ref": None,
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
@@ -267,11 +268,12 @@ class ClaudeCodeBlock(Block):
|
|||||||
"execute_claude_code": lambda *args, **kwargs: (
|
"execute_claude_code": lambda *args, **kwargs: (
|
||||||
"Created index.html with hello world content", # response
|
"Created index.html with hello world content", # response
|
||||||
[
|
[
|
||||||
ClaudeCodeBlock.FileOutput(
|
SandboxFileOutput(
|
||||||
path="/home/user/index.html",
|
path="/home/user/index.html",
|
||||||
relative_path="index.html",
|
relative_path="index.html",
|
||||||
name="index.html",
|
name="index.html",
|
||||||
content="<html>Hello World</html>",
|
content="<html>Hello World</html>",
|
||||||
|
workspace_ref=None,
|
||||||
)
|
)
|
||||||
], # files
|
], # files
|
||||||
"User: Create a hello world HTML file\n"
|
"User: Create a hello world HTML file\n"
|
||||||
@@ -294,7 +296,8 @@ class ClaudeCodeBlock(Block):
|
|||||||
existing_sandbox_id: str,
|
existing_sandbox_id: str,
|
||||||
conversation_history: str,
|
conversation_history: str,
|
||||||
dispose_sandbox: bool,
|
dispose_sandbox: bool,
|
||||||
) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]:
|
execution_context: "ExecutionContext",
|
||||||
|
) -> tuple[str, list[SandboxFileOutput], str, str, str]:
|
||||||
"""
|
"""
|
||||||
Execute Claude Code in an E2B sandbox.
|
Execute Claude Code in an E2B sandbox.
|
||||||
|
|
||||||
@@ -449,14 +452,18 @@ class ClaudeCodeBlock(Block):
|
|||||||
else:
|
else:
|
||||||
new_conversation_history = turn_entry
|
new_conversation_history = turn_entry
|
||||||
|
|
||||||
# Extract files created/modified during this run
|
# Extract files created/modified during this run and store to workspace
|
||||||
files = await self._extract_files(
|
sandbox_files = await extract_and_store_sandbox_files(
|
||||||
sandbox, working_directory, start_timestamp
|
sandbox=sandbox,
|
||||||
|
working_directory=working_directory,
|
||||||
|
execution_context=execution_context,
|
||||||
|
since_timestamp=start_timestamp,
|
||||||
|
text_only=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
return (
|
return (
|
||||||
response,
|
response,
|
||||||
files,
|
sandbox_files, # Already SandboxFileOutput objects
|
||||||
new_conversation_history,
|
new_conversation_history,
|
||||||
current_session_id,
|
current_session_id,
|
||||||
sandbox_id,
|
sandbox_id,
|
||||||
@@ -471,140 +478,6 @@ class ClaudeCodeBlock(Block):
|
|||||||
if dispose_sandbox and sandbox:
|
if dispose_sandbox and sandbox:
|
||||||
await sandbox.kill()
|
await sandbox.kill()
|
||||||
|
|
||||||
async def _extract_files(
|
|
||||||
self,
|
|
||||||
sandbox: BaseAsyncSandbox,
|
|
||||||
working_directory: str,
|
|
||||||
since_timestamp: str | None = None,
|
|
||||||
) -> list["ClaudeCodeBlock.FileOutput"]:
|
|
||||||
"""
|
|
||||||
Extract text files created/modified during this Claude Code execution.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sandbox: The E2B sandbox instance
|
|
||||||
working_directory: Directory to search for files
|
|
||||||
since_timestamp: ISO timestamp - only return files modified after this time
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of FileOutput objects with path, relative_path, name, and content
|
|
||||||
"""
|
|
||||||
files: list[ClaudeCodeBlock.FileOutput] = []
|
|
||||||
|
|
||||||
# Text file extensions we can safely read as text
|
|
||||||
text_extensions = {
|
|
||||||
".txt",
|
|
||||||
".md",
|
|
||||||
".html",
|
|
||||||
".htm",
|
|
||||||
".css",
|
|
||||||
".js",
|
|
||||||
".ts",
|
|
||||||
".jsx",
|
|
||||||
".tsx",
|
|
||||||
".json",
|
|
||||||
".xml",
|
|
||||||
".yaml",
|
|
||||||
".yml",
|
|
||||||
".toml",
|
|
||||||
".ini",
|
|
||||||
".cfg",
|
|
||||||
".conf",
|
|
||||||
".py",
|
|
||||||
".rb",
|
|
||||||
".php",
|
|
||||||
".java",
|
|
||||||
".c",
|
|
||||||
".cpp",
|
|
||||||
".h",
|
|
||||||
".hpp",
|
|
||||||
".cs",
|
|
||||||
".go",
|
|
||||||
".rs",
|
|
||||||
".swift",
|
|
||||||
".kt",
|
|
||||||
".scala",
|
|
||||||
".sh",
|
|
||||||
".bash",
|
|
||||||
".zsh",
|
|
||||||
".sql",
|
|
||||||
".graphql",
|
|
||||||
".env",
|
|
||||||
".gitignore",
|
|
||||||
".dockerfile",
|
|
||||||
"Dockerfile",
|
|
||||||
".vue",
|
|
||||||
".svelte",
|
|
||||||
".astro",
|
|
||||||
".mdx",
|
|
||||||
".rst",
|
|
||||||
".tex",
|
|
||||||
".csv",
|
|
||||||
".log",
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
# List files recursively using find command
|
|
||||||
# Exclude node_modules and .git directories, but allow hidden files
|
|
||||||
# like .env and .gitignore (they're filtered by text_extensions later)
|
|
||||||
# Filter by timestamp to only get files created/modified during this run
|
|
||||||
safe_working_dir = shlex.quote(working_directory)
|
|
||||||
timestamp_filter = ""
|
|
||||||
if since_timestamp:
|
|
||||||
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
|
||||||
find_result = await sandbox.commands.run(
|
|
||||||
f"find {safe_working_dir} -type f "
|
|
||||||
f"{timestamp_filter}"
|
|
||||||
f"-not -path '*/node_modules/*' "
|
|
||||||
f"-not -path '*/.git/*' "
|
|
||||||
f"2>/dev/null"
|
|
||||||
)
|
|
||||||
|
|
||||||
if find_result.stdout:
|
|
||||||
for file_path in find_result.stdout.strip().split("\n"):
|
|
||||||
if not file_path:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if it's a text file we can read
|
|
||||||
is_text = any(
|
|
||||||
file_path.endswith(ext) for ext in text_extensions
|
|
||||||
) or file_path.endswith("Dockerfile")
|
|
||||||
|
|
||||||
if is_text:
|
|
||||||
try:
|
|
||||||
content = await sandbox.files.read(file_path)
|
|
||||||
# Handle bytes or string
|
|
||||||
if isinstance(content, bytes):
|
|
||||||
content = content.decode("utf-8", errors="replace")
|
|
||||||
|
|
||||||
# Extract filename from path
|
|
||||||
file_name = file_path.split("/")[-1]
|
|
||||||
|
|
||||||
# Calculate relative path by stripping working directory
|
|
||||||
relative_path = file_path
|
|
||||||
if file_path.startswith(working_directory):
|
|
||||||
relative_path = file_path[len(working_directory) :]
|
|
||||||
# Remove leading slash if present
|
|
||||||
if relative_path.startswith("/"):
|
|
||||||
relative_path = relative_path[1:]
|
|
||||||
|
|
||||||
files.append(
|
|
||||||
ClaudeCodeBlock.FileOutput(
|
|
||||||
path=file_path,
|
|
||||||
relative_path=relative_path,
|
|
||||||
name=file_name,
|
|
||||||
content=content,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
# Skip files that can't be read
|
|
||||||
pass
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
# If file extraction fails, return empty results
|
|
||||||
pass
|
|
||||||
|
|
||||||
return files
|
|
||||||
|
|
||||||
def _escape_prompt(self, prompt: str) -> str:
|
def _escape_prompt(self, prompt: str) -> str:
|
||||||
"""Escape the prompt for safe shell execution."""
|
"""Escape the prompt for safe shell execution."""
|
||||||
# Use single quotes and escape any single quotes in the prompt
|
# Use single quotes and escape any single quotes in the prompt
|
||||||
@@ -617,6 +490,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
*,
|
*,
|
||||||
e2b_credentials: APIKeyCredentials,
|
e2b_credentials: APIKeyCredentials,
|
||||||
anthropic_credentials: APIKeyCredentials,
|
anthropic_credentials: APIKeyCredentials,
|
||||||
|
execution_context: "ExecutionContext",
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
@@ -637,6 +511,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
existing_sandbox_id=input_data.sandbox_id,
|
existing_sandbox_id=input_data.sandbox_id,
|
||||||
conversation_history=input_data.conversation_history,
|
conversation_history=input_data.conversation_history,
|
||||||
dispose_sandbox=input_data.dispose_sandbox,
|
dispose_sandbox=input_data.dispose_sandbox,
|
||||||
|
execution_context=execution_context,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield "response", response
|
yield "response", response
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any, Literal, Optional
|
from typing import TYPE_CHECKING, Any, Literal, Optional
|
||||||
|
|
||||||
from e2b_code_interpreter import AsyncSandbox
|
from e2b_code_interpreter import AsyncSandbox
|
||||||
from e2b_code_interpreter import Result as E2BExecutionResult
|
from e2b_code_interpreter import Result as E2BExecutionResult
|
||||||
@@ -20,6 +20,13 @@ from backend.data.model import (
|
|||||||
SchemaField,
|
SchemaField,
|
||||||
)
|
)
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
|
from backend.util.sandbox_files import (
|
||||||
|
SandboxFileOutput,
|
||||||
|
extract_and_store_sandbox_files,
|
||||||
|
)
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.executor.utils import ExecutionContext
|
||||||
|
|
||||||
TEST_CREDENTIALS = APIKeyCredentials(
|
TEST_CREDENTIALS = APIKeyCredentials(
|
||||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||||
@@ -85,6 +92,9 @@ class CodeExecutionResult(MainCodeExecutionResult):
|
|||||||
class BaseE2BExecutorMixin:
|
class BaseE2BExecutorMixin:
|
||||||
"""Shared implementation methods for E2B executor blocks."""
|
"""Shared implementation methods for E2B executor blocks."""
|
||||||
|
|
||||||
|
# Default working directory in E2B sandboxes
|
||||||
|
WORKING_DIR = "/home/user"
|
||||||
|
|
||||||
async def execute_code(
|
async def execute_code(
|
||||||
self,
|
self,
|
||||||
api_key: str,
|
api_key: str,
|
||||||
@@ -95,14 +105,21 @@ class BaseE2BExecutorMixin:
|
|||||||
timeout: Optional[int] = None,
|
timeout: Optional[int] = None,
|
||||||
sandbox_id: Optional[str] = None,
|
sandbox_id: Optional[str] = None,
|
||||||
dispose_sandbox: bool = False,
|
dispose_sandbox: bool = False,
|
||||||
|
execution_context: Optional["ExecutionContext"] = None,
|
||||||
|
extract_files: bool = False,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Unified code execution method that handles all three use cases:
|
Unified code execution method that handles all three use cases:
|
||||||
1. Create new sandbox and execute (ExecuteCodeBlock)
|
1. Create new sandbox and execute (ExecuteCodeBlock)
|
||||||
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
||||||
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
extract_files: If True and execution_context provided, extract files
|
||||||
|
created/modified during execution and store to workspace.
|
||||||
""" # noqa
|
""" # noqa
|
||||||
sandbox = None
|
sandbox = None
|
||||||
|
files: list[SandboxFileOutput] = []
|
||||||
try:
|
try:
|
||||||
if sandbox_id:
|
if sandbox_id:
|
||||||
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
||||||
@@ -118,6 +135,12 @@ class BaseE2BExecutorMixin:
|
|||||||
for cmd in setup_commands:
|
for cmd in setup_commands:
|
||||||
await sandbox.commands.run(cmd)
|
await sandbox.commands.run(cmd)
|
||||||
|
|
||||||
|
# Capture timestamp before execution to scope file extraction
|
||||||
|
start_timestamp = None
|
||||||
|
if extract_files:
|
||||||
|
ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S")
|
||||||
|
start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None
|
||||||
|
|
||||||
# Execute the code
|
# Execute the code
|
||||||
execution = await sandbox.run_code(
|
execution = await sandbox.run_code(
|
||||||
code,
|
code,
|
||||||
@@ -133,7 +156,24 @@ class BaseE2BExecutorMixin:
|
|||||||
stdout_logs = "".join(execution.logs.stdout)
|
stdout_logs = "".join(execution.logs.stdout)
|
||||||
stderr_logs = "".join(execution.logs.stderr)
|
stderr_logs = "".join(execution.logs.stderr)
|
||||||
|
|
||||||
return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id
|
# Extract files created/modified during this execution
|
||||||
|
if extract_files and execution_context:
|
||||||
|
files = await extract_and_store_sandbox_files(
|
||||||
|
sandbox=sandbox,
|
||||||
|
working_directory=self.WORKING_DIR,
|
||||||
|
execution_context=execution_context,
|
||||||
|
since_timestamp=start_timestamp,
|
||||||
|
text_only=False, # Include binary files too
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
results,
|
||||||
|
text_output,
|
||||||
|
stdout_logs,
|
||||||
|
stderr_logs,
|
||||||
|
sandbox.sandbox_id,
|
||||||
|
files,
|
||||||
|
)
|
||||||
finally:
|
finally:
|
||||||
# Dispose of sandbox if requested to reduce usage costs
|
# Dispose of sandbox if requested to reduce usage costs
|
||||||
if dispose_sandbox and sandbox:
|
if dispose_sandbox and sandbox:
|
||||||
@@ -238,6 +278,12 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
description="Standard output logs from execution"
|
description="Standard output logs from execution"
|
||||||
)
|
)
|
||||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||||
|
files: list[SandboxFileOutput] = SchemaField(
|
||||||
|
description=(
|
||||||
|
"Files created or modified during execution. "
|
||||||
|
"Each file has path, name, content, and workspace_ref (if stored)."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
@@ -259,23 +305,30 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
("results", []),
|
("results", []),
|
||||||
("response", "Hello World"),
|
("response", "Hello World"),
|
||||||
("stdout_logs", "Hello World\n"),
|
("stdout_logs", "Hello World\n"),
|
||||||
|
("files", []),
|
||||||
],
|
],
|
||||||
test_mock={
|
test_mock={
|
||||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa
|
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa
|
||||||
[], # results
|
[], # results
|
||||||
"Hello World", # text_output
|
"Hello World", # text_output
|
||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
"sandbox_id", # sandbox_id
|
"sandbox_id", # sandbox_id
|
||||||
|
[], # files
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
async def run(
|
async def run(
|
||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
credentials: APIKeyCredentials,
|
||||||
|
execution_context: "ExecutionContext",
|
||||||
|
**kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
results, text_output, stdout, stderr, _, files = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.code,
|
code=input_data.code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
@@ -283,6 +336,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
setup_commands=input_data.setup_commands,
|
setup_commands=input_data.setup_commands,
|
||||||
timeout=input_data.timeout,
|
timeout=input_data.timeout,
|
||||||
dispose_sandbox=input_data.dispose_sandbox,
|
dispose_sandbox=input_data.dispose_sandbox,
|
||||||
|
execution_context=execution_context,
|
||||||
|
extract_files=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Determine result object shape & filter out empty formats
|
# Determine result object shape & filter out empty formats
|
||||||
@@ -296,6 +351,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
yield "stdout_logs", stdout
|
yield "stdout_logs", stdout
|
||||||
if stderr:
|
if stderr:
|
||||||
yield "stderr_logs", stderr
|
yield "stderr_logs", stderr
|
||||||
|
# Always yield files (empty list if none)
|
||||||
|
yield "files", [f.model_dump() for f in files]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "error", str(e)
|
yield "error", str(e)
|
||||||
|
|
||||||
@@ -393,6 +450,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
|||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
"sandbox_id", # sandbox_id
|
"sandbox_id", # sandbox_id
|
||||||
|
[], # files
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -401,7 +459,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
|||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
_, text_output, stdout, stderr, sandbox_id = await self.execute_code(
|
_, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.setup_code,
|
code=input_data.setup_code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
@@ -500,6 +558,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
|||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
sandbox_id, # sandbox_id
|
sandbox_id, # sandbox_id
|
||||||
|
[], # files
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -508,7 +567,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
|||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
results, text_output, stdout, stderr, _, _ = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.step_code,
|
code=input_data.step_code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
|
|||||||
@@ -21,43 +21,71 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class HumanInTheLoopBlock(Block):
|
class HumanInTheLoopBlock(Block):
|
||||||
"""
|
"""
|
||||||
This block pauses execution and waits for human approval or modification of the data.
|
Pauses execution and waits for human approval or rejection of the data.
|
||||||
|
|
||||||
When executed, it creates a pending review entry and sets the node execution status
|
When executed, this block creates a pending review entry and sets the node execution
|
||||||
to REVIEW. The execution will remain paused until a human user either:
|
status to REVIEW. The execution remains paused until a human user either approves
|
||||||
- Approves the data (with or without modifications)
|
or rejects the data.
|
||||||
- Rejects the data
|
|
||||||
|
|
||||||
This is useful for workflows that require human validation or intervention before
|
**How it works:**
|
||||||
proceeding to the next steps.
|
- The input data is presented to a human reviewer
|
||||||
|
- The reviewer can approve or reject (and optionally modify the data if editable)
|
||||||
|
- On approval: the data flows out through the `approved_data` output pin
|
||||||
|
- On rejection: the data flows out through the `rejected_data` output pin
|
||||||
|
|
||||||
|
**Important:** The output pins yield the actual data itself, NOT status strings.
|
||||||
|
The approval/rejection decision determines WHICH output pin fires, not the value.
|
||||||
|
You do NOT need to compare the output to "APPROVED" or "REJECTED" - simply connect
|
||||||
|
downstream blocks to the appropriate output pin for each case.
|
||||||
|
|
||||||
|
**Example usage:**
|
||||||
|
- Connect `approved_data` → next step in your workflow (data was approved)
|
||||||
|
- Connect `rejected_data` → error handling or notification (data was rejected)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class Input(BlockSchemaInput):
|
class Input(BlockSchemaInput):
|
||||||
data: Any = SchemaField(description="The data to be reviewed by a human user")
|
data: Any = SchemaField(
|
||||||
|
description="The data to be reviewed by a human user. "
|
||||||
|
"This exact data will be passed through to either approved_data or "
|
||||||
|
"rejected_data output based on the reviewer's decision."
|
||||||
|
)
|
||||||
name: str = SchemaField(
|
name: str = SchemaField(
|
||||||
description="A descriptive name for what this data represents",
|
description="A descriptive name for what this data represents. "
|
||||||
|
"This helps the reviewer understand what they are reviewing.",
|
||||||
)
|
)
|
||||||
editable: bool = SchemaField(
|
editable: bool = SchemaField(
|
||||||
description="Whether the human reviewer can edit the data",
|
description="Whether the human reviewer can edit the data before "
|
||||||
|
"approving or rejecting it",
|
||||||
default=True,
|
default=True,
|
||||||
advanced=True,
|
advanced=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
class Output(BlockSchemaOutput):
|
||||||
approved_data: Any = SchemaField(
|
approved_data: Any = SchemaField(
|
||||||
description="The data when approved (may be modified by reviewer)"
|
description="Outputs the input data when the reviewer APPROVES it. "
|
||||||
|
"The value is the actual data itself (not a status string like 'APPROVED'). "
|
||||||
|
"If the reviewer edited the data, this contains the modified version. "
|
||||||
|
"Connect downstream blocks here for the 'approved' workflow path."
|
||||||
)
|
)
|
||||||
rejected_data: Any = SchemaField(
|
rejected_data: Any = SchemaField(
|
||||||
description="The data when rejected (may be modified by reviewer)"
|
description="Outputs the input data when the reviewer REJECTS it. "
|
||||||
|
"The value is the actual data itself (not a status string like 'REJECTED'). "
|
||||||
|
"If the reviewer edited the data, this contains the modified version. "
|
||||||
|
"Connect downstream blocks here for the 'rejected' workflow path."
|
||||||
)
|
)
|
||||||
review_message: str = SchemaField(
|
review_message: str = SchemaField(
|
||||||
description="Any message provided by the reviewer", default=""
|
description="Optional message provided by the reviewer explaining their "
|
||||||
|
"decision. Only outputs when the reviewer provides a message; "
|
||||||
|
"this pin does not fire if no message was given.",
|
||||||
|
default="",
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
|
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
|
||||||
description="Pause execution and wait for human approval or modification of data",
|
description="Pause execution for human review. Data flows through "
|
||||||
|
"approved_data or rejected_data output based on the reviewer's decision. "
|
||||||
|
"Outputs contain the actual data, not status strings.",
|
||||||
categories={BlockCategory.BASIC},
|
categories={BlockCategory.BASIC},
|
||||||
input_schema=HumanInTheLoopBlock.Input,
|
input_schema=HumanInTheLoopBlock.Input,
|
||||||
output_schema=HumanInTheLoopBlock.Output,
|
output_schema=HumanInTheLoopBlock.Output,
|
||||||
|
|||||||
@@ -364,6 +364,44 @@ def _remove_orphan_tool_responses(
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def validate_and_remove_orphan_tool_responses(
|
||||||
|
messages: list[dict],
|
||||||
|
log_warning: bool = True,
|
||||||
|
) -> list[dict]:
|
||||||
|
"""
|
||||||
|
Validate tool_call/tool_response pairs and remove orphaned responses.
|
||||||
|
|
||||||
|
Scans messages in order, tracking all tool_call IDs. Any tool response
|
||||||
|
referencing an ID not seen in a preceding message is considered orphaned
|
||||||
|
and removed. This prevents API errors like Anthropic's "unexpected tool_use_id".
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages: List of messages to validate (OpenAI or Anthropic format)
|
||||||
|
log_warning: Whether to log a warning when orphans are found
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A new list with orphaned tool responses removed
|
||||||
|
"""
|
||||||
|
available_ids: set[str] = set()
|
||||||
|
orphan_ids: set[str] = set()
|
||||||
|
|
||||||
|
for msg in messages:
|
||||||
|
available_ids |= _extract_tool_call_ids_from_message(msg)
|
||||||
|
for resp_id in _extract_tool_response_ids_from_message(msg):
|
||||||
|
if resp_id not in available_ids:
|
||||||
|
orphan_ids.add(resp_id)
|
||||||
|
|
||||||
|
if not orphan_ids:
|
||||||
|
return messages
|
||||||
|
|
||||||
|
if log_warning:
|
||||||
|
logger.warning(
|
||||||
|
f"Removing {len(orphan_ids)} orphan tool response(s): {orphan_ids}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return _remove_orphan_tool_responses(messages, orphan_ids)
|
||||||
|
|
||||||
|
|
||||||
def _ensure_tool_pairs_intact(
|
def _ensure_tool_pairs_intact(
|
||||||
recent_messages: list[dict],
|
recent_messages: list[dict],
|
||||||
all_messages: list[dict],
|
all_messages: list[dict],
|
||||||
@@ -723,6 +761,13 @@ async def compress_context(
|
|||||||
|
|
||||||
# Filter out any None values that may have been introduced
|
# Filter out any None values that may have been introduced
|
||||||
final_msgs: list[dict] = [m for m in msgs if m is not None]
|
final_msgs: list[dict] = [m for m in msgs if m is not None]
|
||||||
|
|
||||||
|
# ---- STEP 6: Final tool-pair validation ---------------------------------
|
||||||
|
# After all compression steps, verify that every tool response has a
|
||||||
|
# matching tool_call in a preceding assistant message. Remove orphans
|
||||||
|
# to prevent API errors (e.g., Anthropic's "unexpected tool_use_id").
|
||||||
|
final_msgs = validate_and_remove_orphan_tool_responses(final_msgs)
|
||||||
|
|
||||||
final_count = sum(_msg_tokens(m, enc) for m in final_msgs)
|
final_count = sum(_msg_tokens(m, enc) for m in final_msgs)
|
||||||
error = None
|
error = None
|
||||||
if final_count + reserve > target_tokens:
|
if final_count + reserve > target_tokens:
|
||||||
|
|||||||
288
autogpt_platform/backend/backend/util/sandbox_files.py
Normal file
288
autogpt_platform/backend/backend/util/sandbox_files.py
Normal file
@@ -0,0 +1,288 @@
|
|||||||
|
"""
|
||||||
|
Shared utilities for extracting and storing files from E2B sandboxes.
|
||||||
|
|
||||||
|
This module provides common file extraction and workspace storage functionality
|
||||||
|
for blocks that run code in E2B sandboxes (Claude Code, Code Executor, etc.).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import logging
|
||||||
|
import mimetypes
|
||||||
|
import shlex
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from backend.util.file import store_media_file
|
||||||
|
from backend.util.type import MediaFileType
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||||
|
|
||||||
|
from backend.executor.utils import ExecutionContext
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Text file extensions that can be safely read and stored as text
|
||||||
|
TEXT_EXTENSIONS = {
|
||||||
|
".txt",
|
||||||
|
".md",
|
||||||
|
".html",
|
||||||
|
".htm",
|
||||||
|
".css",
|
||||||
|
".js",
|
||||||
|
".ts",
|
||||||
|
".jsx",
|
||||||
|
".tsx",
|
||||||
|
".json",
|
||||||
|
".xml",
|
||||||
|
".yaml",
|
||||||
|
".yml",
|
||||||
|
".toml",
|
||||||
|
".ini",
|
||||||
|
".cfg",
|
||||||
|
".conf",
|
||||||
|
".py",
|
||||||
|
".rb",
|
||||||
|
".php",
|
||||||
|
".java",
|
||||||
|
".c",
|
||||||
|
".cpp",
|
||||||
|
".h",
|
||||||
|
".hpp",
|
||||||
|
".cs",
|
||||||
|
".go",
|
||||||
|
".rs",
|
||||||
|
".swift",
|
||||||
|
".kt",
|
||||||
|
".scala",
|
||||||
|
".sh",
|
||||||
|
".bash",
|
||||||
|
".zsh",
|
||||||
|
".sql",
|
||||||
|
".graphql",
|
||||||
|
".env",
|
||||||
|
".gitignore",
|
||||||
|
".dockerfile",
|
||||||
|
"Dockerfile",
|
||||||
|
".vue",
|
||||||
|
".svelte",
|
||||||
|
".astro",
|
||||||
|
".mdx",
|
||||||
|
".rst",
|
||||||
|
".tex",
|
||||||
|
".csv",
|
||||||
|
".log",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class SandboxFileOutput(BaseModel):
|
||||||
|
"""A file extracted from a sandbox and optionally stored in workspace."""
|
||||||
|
|
||||||
|
path: str
|
||||||
|
"""Full path in the sandbox."""
|
||||||
|
|
||||||
|
relative_path: str
|
||||||
|
"""Path relative to the working directory."""
|
||||||
|
|
||||||
|
name: str
|
||||||
|
"""Filename only."""
|
||||||
|
|
||||||
|
content: str
|
||||||
|
"""File content as text (for backward compatibility)."""
|
||||||
|
|
||||||
|
workspace_ref: str | None = None
|
||||||
|
"""Workspace reference (workspace://{id}#mime) if stored, None otherwise."""
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ExtractedFile:
|
||||||
|
"""Internal representation of an extracted file before storage."""
|
||||||
|
|
||||||
|
path: str
|
||||||
|
relative_path: str
|
||||||
|
name: str
|
||||||
|
content: bytes
|
||||||
|
is_text: bool
|
||||||
|
|
||||||
|
|
||||||
|
async def extract_sandbox_files(
|
||||||
|
sandbox: "BaseAsyncSandbox",
|
||||||
|
working_directory: str,
|
||||||
|
since_timestamp: str | None = None,
|
||||||
|
text_only: bool = True,
|
||||||
|
) -> list[ExtractedFile]:
|
||||||
|
"""
|
||||||
|
Extract files from an E2B sandbox.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sandbox: The E2B sandbox instance
|
||||||
|
working_directory: Directory to search for files
|
||||||
|
since_timestamp: ISO timestamp - only return files modified after this time
|
||||||
|
text_only: If True, only extract text files (default). If False, extract all files.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of ExtractedFile objects with path, content, and metadata
|
||||||
|
"""
|
||||||
|
files: list[ExtractedFile] = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Build find command
|
||||||
|
safe_working_dir = shlex.quote(working_directory)
|
||||||
|
timestamp_filter = ""
|
||||||
|
if since_timestamp:
|
||||||
|
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
||||||
|
|
||||||
|
find_result = await sandbox.commands.run(
|
||||||
|
f"find {safe_working_dir} -type f "
|
||||||
|
f"{timestamp_filter}"
|
||||||
|
f"-not -path '*/node_modules/*' "
|
||||||
|
f"-not -path '*/.git/*' "
|
||||||
|
f"2>/dev/null"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not find_result.stdout:
|
||||||
|
return files
|
||||||
|
|
||||||
|
for file_path in find_result.stdout.strip().split("\n"):
|
||||||
|
if not file_path:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if it's a text file
|
||||||
|
is_text = any(file_path.endswith(ext) for ext in TEXT_EXTENSIONS)
|
||||||
|
|
||||||
|
# Skip non-text files if text_only mode
|
||||||
|
if text_only and not is_text:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Read file content as bytes
|
||||||
|
content = await sandbox.files.read(file_path, format="bytes")
|
||||||
|
if isinstance(content, str):
|
||||||
|
content = content.encode("utf-8")
|
||||||
|
elif isinstance(content, bytearray):
|
||||||
|
content = bytes(content)
|
||||||
|
|
||||||
|
# Extract filename from path
|
||||||
|
file_name = file_path.split("/")[-1]
|
||||||
|
|
||||||
|
# Calculate relative path
|
||||||
|
relative_path = file_path
|
||||||
|
if file_path.startswith(working_directory):
|
||||||
|
relative_path = file_path[len(working_directory) :]
|
||||||
|
if relative_path.startswith("/"):
|
||||||
|
relative_path = relative_path[1:]
|
||||||
|
|
||||||
|
files.append(
|
||||||
|
ExtractedFile(
|
||||||
|
path=file_path,
|
||||||
|
relative_path=relative_path,
|
||||||
|
name=file_name,
|
||||||
|
content=content,
|
||||||
|
is_text=is_text,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Failed to read file {file_path}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"File extraction failed: {e}")
|
||||||
|
|
||||||
|
return files
|
||||||
|
|
||||||
|
|
||||||
|
async def store_sandbox_files(
|
||||||
|
extracted_files: list[ExtractedFile],
|
||||||
|
execution_context: "ExecutionContext",
|
||||||
|
) -> list[SandboxFileOutput]:
|
||||||
|
"""
|
||||||
|
Store extracted sandbox files to workspace and return output objects.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
extracted_files: List of files extracted from sandbox
|
||||||
|
execution_context: Execution context for workspace storage
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of SandboxFileOutput objects with workspace refs
|
||||||
|
"""
|
||||||
|
outputs: list[SandboxFileOutput] = []
|
||||||
|
|
||||||
|
for file in extracted_files:
|
||||||
|
# Decode content for text files (for backward compat content field)
|
||||||
|
if file.is_text:
|
||||||
|
try:
|
||||||
|
content_str = file.content.decode("utf-8", errors="replace")
|
||||||
|
except Exception:
|
||||||
|
content_str = ""
|
||||||
|
else:
|
||||||
|
content_str = f"[Binary file: {len(file.content)} bytes]"
|
||||||
|
|
||||||
|
# Build data URI (needed for storage and as binary fallback)
|
||||||
|
mime_type = mimetypes.guess_type(file.name)[0] or "application/octet-stream"
|
||||||
|
data_uri = f"data:{mime_type};base64,{base64.b64encode(file.content).decode()}"
|
||||||
|
|
||||||
|
# Try to store in workspace
|
||||||
|
workspace_ref: str | None = None
|
||||||
|
try:
|
||||||
|
result = await store_media_file(
|
||||||
|
file=MediaFileType(data_uri),
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_block_output",
|
||||||
|
)
|
||||||
|
if result.startswith("workspace://"):
|
||||||
|
workspace_ref = result
|
||||||
|
elif not file.is_text:
|
||||||
|
# Non-workspace context (graph execution): store_media_file
|
||||||
|
# returned a data URI — use it as content so binary data isn't lost.
|
||||||
|
content_str = result
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to store file {file.name} to workspace: {e}")
|
||||||
|
# For binary files, fall back to data URI to prevent data loss
|
||||||
|
if not file.is_text:
|
||||||
|
content_str = data_uri
|
||||||
|
|
||||||
|
outputs.append(
|
||||||
|
SandboxFileOutput(
|
||||||
|
path=file.path,
|
||||||
|
relative_path=file.relative_path,
|
||||||
|
name=file.name,
|
||||||
|
content=content_str,
|
||||||
|
workspace_ref=workspace_ref,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return outputs
|
||||||
|
|
||||||
|
|
||||||
|
async def extract_and_store_sandbox_files(
|
||||||
|
sandbox: "BaseAsyncSandbox",
|
||||||
|
working_directory: str,
|
||||||
|
execution_context: "ExecutionContext",
|
||||||
|
since_timestamp: str | None = None,
|
||||||
|
text_only: bool = True,
|
||||||
|
) -> list[SandboxFileOutput]:
|
||||||
|
"""
|
||||||
|
Extract files from sandbox and store them in workspace.
|
||||||
|
|
||||||
|
This is the main entry point combining extraction and storage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sandbox: The E2B sandbox instance
|
||||||
|
working_directory: Directory to search for files
|
||||||
|
execution_context: Execution context for workspace storage
|
||||||
|
since_timestamp: ISO timestamp - only return files modified after this time
|
||||||
|
text_only: If True, only extract text files
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of SandboxFileOutput objects with content and workspace refs
|
||||||
|
"""
|
||||||
|
extracted = await extract_sandbox_files(
|
||||||
|
sandbox=sandbox,
|
||||||
|
working_directory=working_directory,
|
||||||
|
since_timestamp=since_timestamp,
|
||||||
|
text_only=text_only,
|
||||||
|
)
|
||||||
|
|
||||||
|
return await store_sandbox_files(extracted, execution_context)
|
||||||
@@ -10,8 +10,9 @@ import {
|
|||||||
MessageResponse,
|
MessageResponse,
|
||||||
} from "@/components/ai-elements/message";
|
} from "@/components/ai-elements/message";
|
||||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||||
|
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||||
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
||||||
import { useEffect, useState } from "react";
|
import { useEffect, useRef, useState } from "react";
|
||||||
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
||||||
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
||||||
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
|
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
|
||||||
@@ -121,6 +122,7 @@ export const ChatMessagesContainer = ({
|
|||||||
isLoading,
|
isLoading,
|
||||||
}: ChatMessagesContainerProps) => {
|
}: ChatMessagesContainerProps) => {
|
||||||
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
|
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
|
||||||
|
const lastToastTimeRef = useRef(0);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (status === "submitted") {
|
if (status === "submitted") {
|
||||||
@@ -128,6 +130,20 @@ export const ChatMessagesContainer = ({
|
|||||||
}
|
}
|
||||||
}, [status]);
|
}, [status]);
|
||||||
|
|
||||||
|
// Show a toast when a new error occurs, debounced to avoid spam
|
||||||
|
useEffect(() => {
|
||||||
|
if (!error) return;
|
||||||
|
const now = Date.now();
|
||||||
|
if (now - lastToastTimeRef.current < 3_000) return;
|
||||||
|
lastToastTimeRef.current = now;
|
||||||
|
toast({
|
||||||
|
variant: "destructive",
|
||||||
|
title: "Something went wrong",
|
||||||
|
description:
|
||||||
|
"The assistant encountered an error. Please try sending your message again.",
|
||||||
|
});
|
||||||
|
}, [error]);
|
||||||
|
|
||||||
const lastMessage = messages[messages.length - 1];
|
const lastMessage = messages[messages.length - 1];
|
||||||
const lastAssistantHasVisibleContent =
|
const lastAssistantHasVisibleContent =
|
||||||
lastMessage?.role === "assistant" &&
|
lastMessage?.role === "assistant" &&
|
||||||
@@ -263,8 +279,12 @@ export const ChatMessagesContainer = ({
|
|||||||
</Message>
|
</Message>
|
||||||
)}
|
)}
|
||||||
{error && (
|
{error && (
|
||||||
<div className="rounded-lg bg-red-50 p-3 text-red-600">
|
<div className="rounded-lg bg-red-50 p-4 text-sm text-red-700">
|
||||||
Error: {error.message}
|
<p className="font-medium">Something went wrong</p>
|
||||||
|
<p className="mt-1 text-red-600">
|
||||||
|
The assistant encountered an error. Please try sending your
|
||||||
|
message again.
|
||||||
|
</p>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
</ConversationContent>
|
</ConversationContent>
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ export function ContentCard({
|
|||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"rounded-lg bg-gradient-to-r from-purple-500/30 to-blue-500/30 p-[1px]",
|
"min-w-0 rounded-lg bg-gradient-to-r from-purple-500/30 to-blue-500/30 p-[1px]",
|
||||||
className,
|
className,
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import { WarningDiamondIcon } from "@phosphor-icons/react";
|
|||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
||||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
|
||||||
import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
|
import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
|
||||||
import {
|
import {
|
||||||
ContentCardDescription,
|
ContentCardDescription,
|
||||||
@@ -77,7 +76,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
|
|||||||
isOperationInProgressOutput(output)
|
isOperationInProgressOutput(output)
|
||||||
) {
|
) {
|
||||||
return {
|
return {
|
||||||
icon: <OrbitLoader size={32} />,
|
icon,
|
||||||
title: "Creating agent, this may take a few minutes. Sit back and relax.",
|
title: "Creating agent, this may take a few minutes. Sit back and relax.",
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -203,7 +203,7 @@ export function getAccordionMeta(output: RunAgentToolOutput): {
|
|||||||
? output.status.trim()
|
? output.status.trim()
|
||||||
: "started";
|
: "started";
|
||||||
return {
|
return {
|
||||||
icon: <OrbitLoader size={28} className="text-neutral-700" />,
|
icon,
|
||||||
title: output.graph_name,
|
title: output.graph_name,
|
||||||
description: `Status: ${statusText}`,
|
description: `Status: ${statusText}`,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -149,7 +149,7 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
|||||||
if (isRunBlockBlockOutput(output)) {
|
if (isRunBlockBlockOutput(output)) {
|
||||||
const keys = Object.keys(output.outputs ?? {});
|
const keys = Object.keys(output.outputs ?? {});
|
||||||
return {
|
return {
|
||||||
icon: <OrbitLoader size={24} className="text-neutral-700" />,
|
icon,
|
||||||
title: output.block_name,
|
title: output.block_name,
|
||||||
description:
|
description:
|
||||||
keys.length > 0
|
keys.length > 0
|
||||||
|
|||||||
@@ -1,11 +1,8 @@
|
|||||||
import { environment } from "@/services/environment";
|
import { environment } from "@/services/environment";
|
||||||
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
||||||
import { NextRequest } from "next/server";
|
import { NextRequest } from "next/server";
|
||||||
|
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
|
||||||
|
|
||||||
/**
|
|
||||||
* SSE Proxy for chat streaming.
|
|
||||||
* Supports POST with context (page content + URL) in the request body.
|
|
||||||
*/
|
|
||||||
export async function POST(
|
export async function POST(
|
||||||
request: NextRequest,
|
request: NextRequest,
|
||||||
{ params }: { params: Promise<{ sessionId: string }> },
|
{ params }: { params: Promise<{ sessionId: string }> },
|
||||||
@@ -23,17 +20,14 @@ export async function POST(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get auth token from server-side session
|
|
||||||
const token = await getServerAuthToken();
|
const token = await getServerAuthToken();
|
||||||
|
|
||||||
// Build backend URL
|
|
||||||
const backendUrl = environment.getAGPTServerBaseUrl();
|
const backendUrl = environment.getAGPTServerBaseUrl();
|
||||||
const streamUrl = new URL(
|
const streamUrl = new URL(
|
||||||
`/api/chat/sessions/${sessionId}/stream`,
|
`/api/chat/sessions/${sessionId}/stream`,
|
||||||
backendUrl,
|
backendUrl,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Forward request to backend with auth header
|
|
||||||
const headers: Record<string, string> = {
|
const headers: Record<string, string> = {
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
Accept: "text/event-stream",
|
Accept: "text/event-stream",
|
||||||
@@ -63,14 +57,15 @@ export async function POST(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the SSE stream directly
|
if (!response.body) {
|
||||||
return new Response(response.body, {
|
return new Response(
|
||||||
headers: {
|
JSON.stringify({ error: "Empty response from chat service" }),
|
||||||
"Content-Type": "text/event-stream",
|
{ status: 502, headers: { "Content-Type": "application/json" } },
|
||||||
"Cache-Control": "no-cache, no-transform",
|
);
|
||||||
Connection: "keep-alive",
|
}
|
||||||
"X-Accel-Buffering": "no",
|
|
||||||
},
|
return new Response(normalizeSSEStream(response.body), {
|
||||||
|
headers: SSE_HEADERS,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("SSE proxy error:", error);
|
console.error("SSE proxy error:", error);
|
||||||
@@ -87,13 +82,6 @@ export async function POST(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Resume an active stream for a session.
|
|
||||||
*
|
|
||||||
* Called by the AI SDK's `useChat(resume: true)` on page load.
|
|
||||||
* Proxies to the backend which checks for an active stream and either
|
|
||||||
* replays it (200 + SSE) or returns 204 No Content.
|
|
||||||
*/
|
|
||||||
export async function GET(
|
export async function GET(
|
||||||
_request: NextRequest,
|
_request: NextRequest,
|
||||||
{ params }: { params: Promise<{ sessionId: string }> },
|
{ params }: { params: Promise<{ sessionId: string }> },
|
||||||
@@ -124,7 +112,6 @@ export async function GET(
|
|||||||
headers,
|
headers,
|
||||||
});
|
});
|
||||||
|
|
||||||
// 204 = no active stream to resume
|
|
||||||
if (response.status === 204) {
|
if (response.status === 204) {
|
||||||
return new Response(null, { status: 204 });
|
return new Response(null, { status: 204 });
|
||||||
}
|
}
|
||||||
@@ -137,12 +124,13 @@ export async function GET(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Response(response.body, {
|
if (!response.body) {
|
||||||
|
return new Response(null, { status: 204 });
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Response(normalizeSSEStream(response.body), {
|
||||||
headers: {
|
headers: {
|
||||||
"Content-Type": "text/event-stream",
|
...SSE_HEADERS,
|
||||||
"Cache-Control": "no-cache, no-transform",
|
|
||||||
Connection: "keep-alive",
|
|
||||||
"X-Accel-Buffering": "no",
|
|
||||||
"x-vercel-ai-ui-message-stream": "v1",
|
"x-vercel-ai-ui-message-stream": "v1",
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|||||||
72
autogpt_platform/frontend/src/app/api/chat/sse-helpers.ts
Normal file
72
autogpt_platform/frontend/src/app/api/chat/sse-helpers.ts
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
export const SSE_HEADERS = {
|
||||||
|
"Content-Type": "text/event-stream",
|
||||||
|
"Cache-Control": "no-cache, no-transform",
|
||||||
|
Connection: "keep-alive",
|
||||||
|
"X-Accel-Buffering": "no",
|
||||||
|
} as const;
|
||||||
|
|
||||||
|
export function normalizeSSEStream(
|
||||||
|
input: ReadableStream<Uint8Array>,
|
||||||
|
): ReadableStream<Uint8Array> {
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
const encoder = new TextEncoder();
|
||||||
|
let buffer = "";
|
||||||
|
|
||||||
|
return input.pipeThrough(
|
||||||
|
new TransformStream<Uint8Array, Uint8Array>({
|
||||||
|
transform(chunk, controller) {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
const parts = buffer.split("\n\n");
|
||||||
|
buffer = parts.pop() ?? "";
|
||||||
|
|
||||||
|
for (const part of parts) {
|
||||||
|
const normalized = normalizeSSEEvent(part);
|
||||||
|
controller.enqueue(encoder.encode(normalized + "\n\n"));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer.trim()) {
|
||||||
|
const normalized = normalizeSSEEvent(buffer);
|
||||||
|
controller.enqueue(encoder.encode(normalized + "\n\n"));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeSSEEvent(event: string): string {
|
||||||
|
const lines = event.split("\n");
|
||||||
|
const dataLines: string[] = [];
|
||||||
|
const otherLines: string[] = [];
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.startsWith("data: ")) {
|
||||||
|
dataLines.push(line.slice(6));
|
||||||
|
} else {
|
||||||
|
otherLines.push(line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dataLines.length === 0) return event;
|
||||||
|
|
||||||
|
const dataStr = dataLines.join("\n");
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(dataStr) as Record<string, unknown>;
|
||||||
|
if (parsed.type === "error") {
|
||||||
|
const normalized = {
|
||||||
|
type: "error",
|
||||||
|
errorText:
|
||||||
|
typeof parsed.errorText === "string"
|
||||||
|
? parsed.errorText
|
||||||
|
: "An unexpected error occurred",
|
||||||
|
};
|
||||||
|
const newData = `data: ${JSON.stringify(normalized)}`;
|
||||||
|
return [...otherLines.filter((l) => l.length > 0), newData].join("\n");
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Not valid JSON — pass through as-is
|
||||||
|
}
|
||||||
|
|
||||||
|
return event;
|
||||||
|
}
|
||||||
@@ -1,20 +1,8 @@
|
|||||||
import { environment } from "@/services/environment";
|
import { environment } from "@/services/environment";
|
||||||
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
||||||
import { NextRequest } from "next/server";
|
import { NextRequest } from "next/server";
|
||||||
|
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
|
||||||
|
|
||||||
/**
|
|
||||||
* SSE Proxy for task stream reconnection.
|
|
||||||
*
|
|
||||||
* This endpoint allows clients to reconnect to an ongoing or recently completed
|
|
||||||
* background task's stream. It replays missed messages from Redis Streams and
|
|
||||||
* subscribes to live updates if the task is still running.
|
|
||||||
*
|
|
||||||
* Client contract:
|
|
||||||
* 1. When receiving an operation_started event, store the task_id
|
|
||||||
* 2. To reconnect: GET /api/chat/tasks/{taskId}/stream?last_message_id={idx}
|
|
||||||
* 3. Messages are replayed from the last_message_id position
|
|
||||||
* 4. Stream ends when "finish" event is received
|
|
||||||
*/
|
|
||||||
export async function GET(
|
export async function GET(
|
||||||
request: NextRequest,
|
request: NextRequest,
|
||||||
{ params }: { params: Promise<{ taskId: string }> },
|
{ params }: { params: Promise<{ taskId: string }> },
|
||||||
@@ -24,15 +12,12 @@ export async function GET(
|
|||||||
const lastMessageId = searchParams.get("last_message_id") || "0-0";
|
const lastMessageId = searchParams.get("last_message_id") || "0-0";
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Get auth token from server-side session
|
|
||||||
const token = await getServerAuthToken();
|
const token = await getServerAuthToken();
|
||||||
|
|
||||||
// Build backend URL
|
|
||||||
const backendUrl = environment.getAGPTServerBaseUrl();
|
const backendUrl = environment.getAGPTServerBaseUrl();
|
||||||
const streamUrl = new URL(`/api/chat/tasks/${taskId}/stream`, backendUrl);
|
const streamUrl = new URL(`/api/chat/tasks/${taskId}/stream`, backendUrl);
|
||||||
streamUrl.searchParams.set("last_message_id", lastMessageId);
|
streamUrl.searchParams.set("last_message_id", lastMessageId);
|
||||||
|
|
||||||
// Forward request to backend with auth header
|
|
||||||
const headers: Record<string, string> = {
|
const headers: Record<string, string> = {
|
||||||
Accept: "text/event-stream",
|
Accept: "text/event-stream",
|
||||||
"Cache-Control": "no-cache",
|
"Cache-Control": "no-cache",
|
||||||
@@ -56,14 +41,12 @@ export async function GET(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the SSE stream directly
|
if (!response.body) {
|
||||||
return new Response(response.body, {
|
return new Response(null, { status: 204 });
|
||||||
headers: {
|
}
|
||||||
"Content-Type": "text/event-stream",
|
|
||||||
"Cache-Control": "no-cache, no-transform",
|
return new Response(normalizeSSEStream(response.body), {
|
||||||
Connection: "keep-alive",
|
headers: SSE_HEADERS,
|
||||||
"X-Accel-Buffering": "no",
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Task stream proxy error:", error);
|
console.error("Task stream proxy error:", error);
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
|||||||
| [Get List Item](block-integrations/basic.md#get-list-item) | Returns the element at the given index |
|
| [Get List Item](block-integrations/basic.md#get-list-item) | Returns the element at the given index |
|
||||||
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
|
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
|
||||||
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
|
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
|
||||||
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution and wait for human approval or modification of data |
|
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution for human review |
|
||||||
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
|
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
|
||||||
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
|
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
|
||||||
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
|
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
|
||||||
|
|||||||
@@ -975,7 +975,7 @@ A travel planning application could use this block to provide users with current
|
|||||||
## Human In The Loop
|
## Human In The Loop
|
||||||
|
|
||||||
### What it is
|
### What it is
|
||||||
Pause execution and wait for human approval or modification of data
|
Pause execution for human review. Data flows through approved_data or rejected_data output based on the reviewer's decision. Outputs contain the actual data, not status strings.
|
||||||
|
|
||||||
### How it works
|
### How it works
|
||||||
<!-- MANUAL: how_it_works -->
|
<!-- MANUAL: how_it_works -->
|
||||||
@@ -988,18 +988,18 @@ This enables human oversight at critical points in automated workflows, ensuring
|
|||||||
|
|
||||||
| Input | Description | Type | Required |
|
| Input | Description | Type | Required |
|
||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| data | The data to be reviewed by a human user | Data | Yes |
|
| data | The data to be reviewed by a human user. This exact data will be passed through to either approved_data or rejected_data output based on the reviewer's decision. | Data | Yes |
|
||||||
| name | A descriptive name for what this data represents | str | Yes |
|
| name | A descriptive name for what this data represents. This helps the reviewer understand what they are reviewing. | str | Yes |
|
||||||
| editable | Whether the human reviewer can edit the data | bool | No |
|
| editable | Whether the human reviewer can edit the data before approving or rejecting it | bool | No |
|
||||||
|
|
||||||
### Outputs
|
### Outputs
|
||||||
|
|
||||||
| Output | Description | Type |
|
| Output | Description | Type |
|
||||||
|--------|-------------|------|
|
|--------|-------------|------|
|
||||||
| error | Error message if the operation failed | str |
|
| error | Error message if the operation failed | str |
|
||||||
| approved_data | The data when approved (may be modified by reviewer) | Approved Data |
|
| approved_data | Outputs the input data when the reviewer APPROVES it. The value is the actual data itself (not a status string like 'APPROVED'). If the reviewer edited the data, this contains the modified version. Connect downstream blocks here for the 'approved' workflow path. | Approved Data |
|
||||||
| rejected_data | The data when rejected (may be modified by reviewer) | Rejected Data |
|
| rejected_data | Outputs the input data when the reviewer REJECTS it. The value is the actual data itself (not a status string like 'REJECTED'). If the reviewer edited the data, this contains the modified version. Connect downstream blocks here for the 'rejected' workflow path. | Rejected Data |
|
||||||
| review_message | Any message provided by the reviewer | str |
|
| review_message | Optional message provided by the reviewer explaining their decision. Only outputs when the reviewer provides a message; this pin does not fire if no message was given. | str |
|
||||||
|
|
||||||
### Possible use case
|
### Possible use case
|
||||||
<!-- MANUAL: use_case -->
|
<!-- MANUAL: use_case -->
|
||||||
|
|||||||
@@ -563,7 +563,7 @@ The block supports conversation continuation through three mechanisms:
|
|||||||
|--------|-------------|------|
|
|--------|-------------|------|
|
||||||
| error | Error message if execution failed | str |
|
| error | Error message if execution failed | str |
|
||||||
| response | The output/response from Claude Code execution | str |
|
| response | The output/response from Claude Code execution | str |
|
||||||
| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', and 'content' fields. | List[FileOutput] |
|
| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. workspace_ref contains a workspace:// URI if the file was stored to workspace. | List[SandboxFileOutput] |
|
||||||
| conversation_history | Full conversation history including this turn. Pass this to conversation_history input to continue on a fresh sandbox if the previous sandbox timed out. | str |
|
| conversation_history | Full conversation history including this turn. Pass this to conversation_history input to continue on a fresh sandbox if the previous sandbox timed out. | str |
|
||||||
| session_id | Session ID for this conversation. Pass this back along with sandbox_id to continue the conversation. | str |
|
| session_id | Session ID for this conversation. Pass this back along with sandbox_id to continue the conversation. | str |
|
||||||
| sandbox_id | ID of the sandbox instance. Pass this back along with session_id to continue the conversation. This is None if dispose_sandbox was True (sandbox was disposed). | str |
|
| sandbox_id | ID of the sandbox instance. Pass this back along with session_id to continue the conversation. This is None if dispose_sandbox was True (sandbox was disposed). | str |
|
||||||
|
|||||||
@@ -215,6 +215,7 @@ The sandbox includes pip and npm pre-installed. Set timeout to limit execution t
|
|||||||
| response | Text output (if any) of the main execution result | str |
|
| response | Text output (if any) of the main execution result | str |
|
||||||
| stdout_logs | Standard output logs from execution | str |
|
| stdout_logs | Standard output logs from execution | str |
|
||||||
| stderr_logs | Standard error logs from execution | str |
|
| stderr_logs | Standard error logs from execution | str |
|
||||||
|
| files | Files created or modified during execution. Each file has path, name, content, and workspace_ref (if stored). | List[SandboxFileOutput] |
|
||||||
|
|
||||||
### Possible use case
|
### Possible use case
|
||||||
<!-- MANUAL: use_case -->
|
<!-- MANUAL: use_case -->
|
||||||
|
|||||||
Reference in New Issue
Block a user