mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-11 15:25:16 -05:00
Compare commits
1 Commits
feat/sandb
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f1a4bb8cc2 |
@@ -22,7 +22,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.workflow_run.head_branch }}
|
||||
fetch-depth: 0
|
||||
|
||||
2
.github/workflows/claude-dependabot.yml
vendored
2
.github/workflows/claude-dependabot.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
actions: read # Required for CI access
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
actions: read # Required for CI access
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
||||
2
.github/workflows/copilot-setup-steps.yml
vendored
2
.github/workflows/copilot-setup-steps.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
# If you do not check out your code, Copilot will do this for you.
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
2
.github/workflows/docs-block-sync.yml
vendored
2
.github/workflows/docs-block-sync.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
2
.github/workflows/docs-claude-review.yml
vendored
2
.github/workflows/docs-claude-review.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
2
.github/workflows/docs-enhance.yml
vendored
2
.github/workflows/docs-enhance.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.ref_name || 'master' }}
|
||||
|
||||
|
||||
2
.github/workflows/platform-backend-ci.yml
vendored
2
.github/workflows/platform-backend-ci.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
10
.github/workflows/platform-frontend-ci.yml
vendored
10
.github/workflows/platform-frontend-ci.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check for component changes
|
||||
uses: dorny/paths-filter@v3
|
||||
@@ -71,7 +71,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
@@ -107,7 +107,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -148,7 +148,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
@@ -277,7 +277,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
|
||||
4
.github/workflows/platform-fullstack-ci.yml
vendored
4
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
|
||||
2
.github/workflows/repo-workflow-checker.yml
vendored
2
.github/workflows/repo-workflow-checker.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
steps:
|
||||
# - name: Wait some time for all actions to start
|
||||
# run: sleep 30
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v4
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
- name: Set up Python
|
||||
|
||||
@@ -10,8 +10,6 @@ from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.util.json import dumps as json_dumps
|
||||
|
||||
|
||||
class ResponseType(str, Enum):
|
||||
"""Types of streaming responses following AI SDK protocol."""
|
||||
@@ -195,18 +193,6 @@ class StreamError(StreamBaseResponse):
|
||||
default=None, description="Additional error details"
|
||||
)
|
||||
|
||||
def to_sse(self) -> str:
|
||||
"""Convert to SSE format, only emitting fields required by AI SDK protocol.
|
||||
|
||||
The AI SDK uses z.strictObject({type, errorText}) which rejects
|
||||
any extra fields like `code` or `details`.
|
||||
"""
|
||||
data = {
|
||||
"type": self.type.value,
|
||||
"errorText": self.errorText,
|
||||
}
|
||||
return f"data: {json_dumps(data)}\n\n"
|
||||
|
||||
|
||||
class StreamHeartbeat(StreamBaseResponse):
|
||||
"""Heartbeat to keep SSE connection alive during long-running operations.
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import json
|
||||
import shlex
|
||||
import uuid
|
||||
from typing import TYPE_CHECKING, Literal, Optional
|
||||
from typing import Literal, Optional
|
||||
|
||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||
from pydantic import SecretStr
|
||||
from pydantic import BaseModel, SecretStr
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
@@ -20,13 +20,6 @@ from backend.data.model import (
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.sandbox_files import (
|
||||
SandboxFileOutput,
|
||||
extract_and_store_sandbox_files,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.executor.utils import ExecutionContext
|
||||
|
||||
|
||||
class ClaudeCodeExecutionError(Exception):
|
||||
@@ -181,15 +174,22 @@ class ClaudeCodeBlock(Block):
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class FileOutput(BaseModel):
|
||||
"""A file extracted from the sandbox."""
|
||||
|
||||
path: str
|
||||
relative_path: str # Path relative to working directory (for GitHub, etc.)
|
||||
name: str
|
||||
content: str
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
response: str = SchemaField(
|
||||
description="The output/response from Claude Code execution"
|
||||
)
|
||||
files: list[SandboxFileOutput] = SchemaField(
|
||||
files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
|
||||
description=(
|
||||
"List of text files created/modified by Claude Code during this execution. "
|
||||
"Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. "
|
||||
"workspace_ref contains a workspace:// URI if the file was stored to workspace."
|
||||
"Each file has 'path', 'relative_path', 'name', and 'content' fields."
|
||||
)
|
||||
)
|
||||
conversation_history: str = SchemaField(
|
||||
@@ -252,7 +252,6 @@ class ClaudeCodeBlock(Block):
|
||||
"relative_path": "index.html",
|
||||
"name": "index.html",
|
||||
"content": "<html>Hello World</html>",
|
||||
"workspace_ref": None,
|
||||
}
|
||||
],
|
||||
),
|
||||
@@ -268,12 +267,11 @@ class ClaudeCodeBlock(Block):
|
||||
"execute_claude_code": lambda *args, **kwargs: (
|
||||
"Created index.html with hello world content", # response
|
||||
[
|
||||
SandboxFileOutput(
|
||||
ClaudeCodeBlock.FileOutput(
|
||||
path="/home/user/index.html",
|
||||
relative_path="index.html",
|
||||
name="index.html",
|
||||
content="<html>Hello World</html>",
|
||||
workspace_ref=None,
|
||||
)
|
||||
], # files
|
||||
"User: Create a hello world HTML file\n"
|
||||
@@ -296,8 +294,7 @@ class ClaudeCodeBlock(Block):
|
||||
existing_sandbox_id: str,
|
||||
conversation_history: str,
|
||||
dispose_sandbox: bool,
|
||||
execution_context: "ExecutionContext",
|
||||
) -> tuple[str, list[SandboxFileOutput], str, str, str]:
|
||||
) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]:
|
||||
"""
|
||||
Execute Claude Code in an E2B sandbox.
|
||||
|
||||
@@ -452,18 +449,14 @@ class ClaudeCodeBlock(Block):
|
||||
else:
|
||||
new_conversation_history = turn_entry
|
||||
|
||||
# Extract files created/modified during this run and store to workspace
|
||||
sandbox_files = await extract_and_store_sandbox_files(
|
||||
sandbox=sandbox,
|
||||
working_directory=working_directory,
|
||||
execution_context=execution_context,
|
||||
since_timestamp=start_timestamp,
|
||||
text_only=True,
|
||||
# Extract files created/modified during this run
|
||||
files = await self._extract_files(
|
||||
sandbox, working_directory, start_timestamp
|
||||
)
|
||||
|
||||
return (
|
||||
response,
|
||||
sandbox_files, # Already SandboxFileOutput objects
|
||||
files,
|
||||
new_conversation_history,
|
||||
current_session_id,
|
||||
sandbox_id,
|
||||
@@ -478,6 +471,140 @@ class ClaudeCodeBlock(Block):
|
||||
if dispose_sandbox and sandbox:
|
||||
await sandbox.kill()
|
||||
|
||||
async def _extract_files(
|
||||
self,
|
||||
sandbox: BaseAsyncSandbox,
|
||||
working_directory: str,
|
||||
since_timestamp: str | None = None,
|
||||
) -> list["ClaudeCodeBlock.FileOutput"]:
|
||||
"""
|
||||
Extract text files created/modified during this Claude Code execution.
|
||||
|
||||
Args:
|
||||
sandbox: The E2B sandbox instance
|
||||
working_directory: Directory to search for files
|
||||
since_timestamp: ISO timestamp - only return files modified after this time
|
||||
|
||||
Returns:
|
||||
List of FileOutput objects with path, relative_path, name, and content
|
||||
"""
|
||||
files: list[ClaudeCodeBlock.FileOutput] = []
|
||||
|
||||
# Text file extensions we can safely read as text
|
||||
text_extensions = {
|
||||
".txt",
|
||||
".md",
|
||||
".html",
|
||||
".htm",
|
||||
".css",
|
||||
".js",
|
||||
".ts",
|
||||
".jsx",
|
||||
".tsx",
|
||||
".json",
|
||||
".xml",
|
||||
".yaml",
|
||||
".yml",
|
||||
".toml",
|
||||
".ini",
|
||||
".cfg",
|
||||
".conf",
|
||||
".py",
|
||||
".rb",
|
||||
".php",
|
||||
".java",
|
||||
".c",
|
||||
".cpp",
|
||||
".h",
|
||||
".hpp",
|
||||
".cs",
|
||||
".go",
|
||||
".rs",
|
||||
".swift",
|
||||
".kt",
|
||||
".scala",
|
||||
".sh",
|
||||
".bash",
|
||||
".zsh",
|
||||
".sql",
|
||||
".graphql",
|
||||
".env",
|
||||
".gitignore",
|
||||
".dockerfile",
|
||||
"Dockerfile",
|
||||
".vue",
|
||||
".svelte",
|
||||
".astro",
|
||||
".mdx",
|
||||
".rst",
|
||||
".tex",
|
||||
".csv",
|
||||
".log",
|
||||
}
|
||||
|
||||
try:
|
||||
# List files recursively using find command
|
||||
# Exclude node_modules and .git directories, but allow hidden files
|
||||
# like .env and .gitignore (they're filtered by text_extensions later)
|
||||
# Filter by timestamp to only get files created/modified during this run
|
||||
safe_working_dir = shlex.quote(working_directory)
|
||||
timestamp_filter = ""
|
||||
if since_timestamp:
|
||||
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
||||
find_result = await sandbox.commands.run(
|
||||
f"find {safe_working_dir} -type f "
|
||||
f"{timestamp_filter}"
|
||||
f"-not -path '*/node_modules/*' "
|
||||
f"-not -path '*/.git/*' "
|
||||
f"2>/dev/null"
|
||||
)
|
||||
|
||||
if find_result.stdout:
|
||||
for file_path in find_result.stdout.strip().split("\n"):
|
||||
if not file_path:
|
||||
continue
|
||||
|
||||
# Check if it's a text file we can read
|
||||
is_text = any(
|
||||
file_path.endswith(ext) for ext in text_extensions
|
||||
) or file_path.endswith("Dockerfile")
|
||||
|
||||
if is_text:
|
||||
try:
|
||||
content = await sandbox.files.read(file_path)
|
||||
# Handle bytes or string
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode("utf-8", errors="replace")
|
||||
|
||||
# Extract filename from path
|
||||
file_name = file_path.split("/")[-1]
|
||||
|
||||
# Calculate relative path by stripping working directory
|
||||
relative_path = file_path
|
||||
if file_path.startswith(working_directory):
|
||||
relative_path = file_path[len(working_directory) :]
|
||||
# Remove leading slash if present
|
||||
if relative_path.startswith("/"):
|
||||
relative_path = relative_path[1:]
|
||||
|
||||
files.append(
|
||||
ClaudeCodeBlock.FileOutput(
|
||||
path=file_path,
|
||||
relative_path=relative_path,
|
||||
name=file_name,
|
||||
content=content,
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
# Skip files that can't be read
|
||||
pass
|
||||
|
||||
except Exception:
|
||||
# If file extraction fails, return empty results
|
||||
pass
|
||||
|
||||
return files
|
||||
|
||||
def _escape_prompt(self, prompt: str) -> str:
|
||||
"""Escape the prompt for safe shell execution."""
|
||||
# Use single quotes and escape any single quotes in the prompt
|
||||
@@ -490,7 +617,6 @@ class ClaudeCodeBlock(Block):
|
||||
*,
|
||||
e2b_credentials: APIKeyCredentials,
|
||||
anthropic_credentials: APIKeyCredentials,
|
||||
execution_context: "ExecutionContext",
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
@@ -511,7 +637,6 @@ class ClaudeCodeBlock(Block):
|
||||
existing_sandbox_id=input_data.sandbox_id,
|
||||
conversation_history=input_data.conversation_history,
|
||||
dispose_sandbox=input_data.dispose_sandbox,
|
||||
execution_context=execution_context,
|
||||
)
|
||||
|
||||
yield "response", response
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, Any, Literal, Optional
|
||||
from typing import Any, Literal, Optional
|
||||
|
||||
from e2b_code_interpreter import AsyncSandbox
|
||||
from e2b_code_interpreter import Result as E2BExecutionResult
|
||||
@@ -20,13 +20,6 @@ from backend.data.model import (
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.sandbox_files import (
|
||||
SandboxFileOutput,
|
||||
extract_and_store_sandbox_files,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.executor.utils import ExecutionContext
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -92,9 +85,6 @@ class CodeExecutionResult(MainCodeExecutionResult):
|
||||
class BaseE2BExecutorMixin:
|
||||
"""Shared implementation methods for E2B executor blocks."""
|
||||
|
||||
# Default output directory for file extraction
|
||||
OUTPUT_DIR = "/output"
|
||||
|
||||
async def execute_code(
|
||||
self,
|
||||
api_key: str,
|
||||
@@ -105,21 +95,14 @@ class BaseE2BExecutorMixin:
|
||||
timeout: Optional[int] = None,
|
||||
sandbox_id: Optional[str] = None,
|
||||
dispose_sandbox: bool = False,
|
||||
execution_context: Optional["ExecutionContext"] = None,
|
||||
extract_files: bool = False,
|
||||
):
|
||||
"""
|
||||
Unified code execution method that handles all three use cases:
|
||||
1. Create new sandbox and execute (ExecuteCodeBlock)
|
||||
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
||||
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
||||
|
||||
Args:
|
||||
extract_files: If True and execution_context provided, extract files from
|
||||
/output directory and store to workspace.
|
||||
""" # noqa
|
||||
sandbox = None
|
||||
files: list[SandboxFileOutput] = []
|
||||
try:
|
||||
if sandbox_id:
|
||||
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
||||
@@ -131,9 +114,6 @@ class BaseE2BExecutorMixin:
|
||||
sandbox = await AsyncSandbox.create(
|
||||
api_key=api_key, template=template_id, timeout=timeout
|
||||
)
|
||||
# Create /output directory for file extraction
|
||||
if extract_files:
|
||||
await sandbox.commands.run(f"mkdir -p {self.OUTPUT_DIR}")
|
||||
if setup_commands:
|
||||
for cmd in setup_commands:
|
||||
await sandbox.commands.run(cmd)
|
||||
@@ -153,24 +133,7 @@ class BaseE2BExecutorMixin:
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
# Extract files from /output if requested
|
||||
if extract_files and execution_context:
|
||||
files = await extract_and_store_sandbox_files(
|
||||
sandbox=sandbox,
|
||||
working_directory=self.OUTPUT_DIR,
|
||||
execution_context=execution_context,
|
||||
since_timestamp=None, # Get all files in /output
|
||||
text_only=False, # Include binary files too
|
||||
)
|
||||
|
||||
return (
|
||||
results,
|
||||
text_output,
|
||||
stdout_logs,
|
||||
stderr_logs,
|
||||
sandbox.sandbox_id,
|
||||
files,
|
||||
)
|
||||
return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id
|
||||
finally:
|
||||
# Dispose of sandbox if requested to reduce usage costs
|
||||
if dispose_sandbox and sandbox:
|
||||
@@ -275,12 +238,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
||||
description="Standard output logs from execution"
|
||||
)
|
||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||
files: list[SandboxFileOutput] = SchemaField(
|
||||
description=(
|
||||
"Files written to /output directory during execution. "
|
||||
"Each file has path, name, content, and workspace_ref (if stored)."
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -302,30 +259,23 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
||||
("results", []),
|
||||
("response", "Hello World"),
|
||||
("stdout_logs", "Hello World\n"),
|
||||
("files", []),
|
||||
],
|
||||
test_mock={
|
||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa
|
||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa
|
||||
[], # results
|
||||
"Hello World", # text_output
|
||||
"Hello World\n", # stdout_logs
|
||||
"", # stderr_logs
|
||||
"sandbox_id", # sandbox_id
|
||||
[], # files
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
execution_context: "ExecutionContext",
|
||||
**kwargs,
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
results, text_output, stdout, stderr, _, files = await self.execute_code(
|
||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
code=input_data.code,
|
||||
language=input_data.language,
|
||||
@@ -333,8 +283,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
||||
setup_commands=input_data.setup_commands,
|
||||
timeout=input_data.timeout,
|
||||
dispose_sandbox=input_data.dispose_sandbox,
|
||||
execution_context=execution_context,
|
||||
extract_files=True,
|
||||
)
|
||||
|
||||
# Determine result object shape & filter out empty formats
|
||||
@@ -348,8 +296,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
||||
yield "stdout_logs", stdout
|
||||
if stderr:
|
||||
yield "stderr_logs", stderr
|
||||
# Always yield files (empty list if none)
|
||||
yield "files", [f.model_dump() for f in files]
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@@ -447,7 +393,6 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
||||
"Hello World\n", # stdout_logs
|
||||
"", # stderr_logs
|
||||
"sandbox_id", # sandbox_id
|
||||
[], # files
|
||||
),
|
||||
},
|
||||
)
|
||||
@@ -456,7 +401,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
_, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code(
|
||||
_, text_output, stdout, stderr, sandbox_id = await self.execute_code(
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
code=input_data.setup_code,
|
||||
language=input_data.language,
|
||||
@@ -555,7 +500,6 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
||||
"Hello World\n", # stdout_logs
|
||||
"", # stderr_logs
|
||||
sandbox_id, # sandbox_id
|
||||
[], # files
|
||||
),
|
||||
},
|
||||
)
|
||||
@@ -564,7 +508,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
results, text_output, stdout, stderr, _, _ = await self.execute_code(
|
||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
code=input_data.step_code,
|
||||
language=input_data.language,
|
||||
|
||||
@@ -21,71 +21,43 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class HumanInTheLoopBlock(Block):
|
||||
"""
|
||||
Pauses execution and waits for human approval or rejection of the data.
|
||||
This block pauses execution and waits for human approval or modification of the data.
|
||||
|
||||
When executed, this block creates a pending review entry and sets the node execution
|
||||
status to REVIEW. The execution remains paused until a human user either approves
|
||||
or rejects the data.
|
||||
When executed, it creates a pending review entry and sets the node execution status
|
||||
to REVIEW. The execution will remain paused until a human user either:
|
||||
- Approves the data (with or without modifications)
|
||||
- Rejects the data
|
||||
|
||||
**How it works:**
|
||||
- The input data is presented to a human reviewer
|
||||
- The reviewer can approve or reject (and optionally modify the data if editable)
|
||||
- On approval: the data flows out through the `approved_data` output pin
|
||||
- On rejection: the data flows out through the `rejected_data` output pin
|
||||
|
||||
**Important:** The output pins yield the actual data itself, NOT status strings.
|
||||
The approval/rejection decision determines WHICH output pin fires, not the value.
|
||||
You do NOT need to compare the output to "APPROVED" or "REJECTED" - simply connect
|
||||
downstream blocks to the appropriate output pin for each case.
|
||||
|
||||
**Example usage:**
|
||||
- Connect `approved_data` → next step in your workflow (data was approved)
|
||||
- Connect `rejected_data` → error handling or notification (data was rejected)
|
||||
This is useful for workflows that require human validation or intervention before
|
||||
proceeding to the next steps.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
data: Any = SchemaField(
|
||||
description="The data to be reviewed by a human user. "
|
||||
"This exact data will be passed through to either approved_data or "
|
||||
"rejected_data output based on the reviewer's decision."
|
||||
)
|
||||
data: Any = SchemaField(description="The data to be reviewed by a human user")
|
||||
name: str = SchemaField(
|
||||
description="A descriptive name for what this data represents. "
|
||||
"This helps the reviewer understand what they are reviewing.",
|
||||
description="A descriptive name for what this data represents",
|
||||
)
|
||||
editable: bool = SchemaField(
|
||||
description="Whether the human reviewer can edit the data before "
|
||||
"approving or rejecting it",
|
||||
description="Whether the human reviewer can edit the data",
|
||||
default=True,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
approved_data: Any = SchemaField(
|
||||
description="Outputs the input data when the reviewer APPROVES it. "
|
||||
"The value is the actual data itself (not a status string like 'APPROVED'). "
|
||||
"If the reviewer edited the data, this contains the modified version. "
|
||||
"Connect downstream blocks here for the 'approved' workflow path."
|
||||
description="The data when approved (may be modified by reviewer)"
|
||||
)
|
||||
rejected_data: Any = SchemaField(
|
||||
description="Outputs the input data when the reviewer REJECTS it. "
|
||||
"The value is the actual data itself (not a status string like 'REJECTED'). "
|
||||
"If the reviewer edited the data, this contains the modified version. "
|
||||
"Connect downstream blocks here for the 'rejected' workflow path."
|
||||
description="The data when rejected (may be modified by reviewer)"
|
||||
)
|
||||
review_message: str = SchemaField(
|
||||
description="Optional message provided by the reviewer explaining their "
|
||||
"decision. Only outputs when the reviewer provides a message; "
|
||||
"this pin does not fire if no message was given.",
|
||||
default="",
|
||||
description="Any message provided by the reviewer", default=""
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
|
||||
description="Pause execution for human review. Data flows through "
|
||||
"approved_data or rejected_data output based on the reviewer's decision. "
|
||||
"Outputs contain the actual data, not status strings.",
|
||||
description="Pause execution and wait for human approval or modification of data",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=HumanInTheLoopBlock.Input,
|
||||
output_schema=HumanInTheLoopBlock.Output,
|
||||
|
||||
@@ -743,11 +743,6 @@ class GraphModel(Graph, GraphMeta):
|
||||
# For invalid blocks, we still raise immediately as this is a structural issue
|
||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||
|
||||
if block.disabled:
|
||||
raise ValueError(
|
||||
f"Block {node.block_id} is disabled and cannot be used in graphs"
|
||||
)
|
||||
|
||||
node_input_mask = (
|
||||
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
|
||||
)
|
||||
|
||||
@@ -213,9 +213,6 @@ async def execute_node(
|
||||
block_name=node_block.name,
|
||||
)
|
||||
|
||||
if node_block.disabled:
|
||||
raise ValueError(f"Block {node_block.id} is disabled and cannot be executed")
|
||||
|
||||
# Sanity check: validate the execution input.
|
||||
input_data, error = validate_exec(node, data.inputs, resolve_input=False)
|
||||
if input_data is None:
|
||||
|
||||
@@ -364,44 +364,6 @@ def _remove_orphan_tool_responses(
|
||||
return result
|
||||
|
||||
|
||||
def validate_and_remove_orphan_tool_responses(
|
||||
messages: list[dict],
|
||||
log_warning: bool = True,
|
||||
) -> list[dict]:
|
||||
"""
|
||||
Validate tool_call/tool_response pairs and remove orphaned responses.
|
||||
|
||||
Scans messages in order, tracking all tool_call IDs. Any tool response
|
||||
referencing an ID not seen in a preceding message is considered orphaned
|
||||
and removed. This prevents API errors like Anthropic's "unexpected tool_use_id".
|
||||
|
||||
Args:
|
||||
messages: List of messages to validate (OpenAI or Anthropic format)
|
||||
log_warning: Whether to log a warning when orphans are found
|
||||
|
||||
Returns:
|
||||
A new list with orphaned tool responses removed
|
||||
"""
|
||||
available_ids: set[str] = set()
|
||||
orphan_ids: set[str] = set()
|
||||
|
||||
for msg in messages:
|
||||
available_ids |= _extract_tool_call_ids_from_message(msg)
|
||||
for resp_id in _extract_tool_response_ids_from_message(msg):
|
||||
if resp_id not in available_ids:
|
||||
orphan_ids.add(resp_id)
|
||||
|
||||
if not orphan_ids:
|
||||
return messages
|
||||
|
||||
if log_warning:
|
||||
logger.warning(
|
||||
f"Removing {len(orphan_ids)} orphan tool response(s): {orphan_ids}"
|
||||
)
|
||||
|
||||
return _remove_orphan_tool_responses(messages, orphan_ids)
|
||||
|
||||
|
||||
def _ensure_tool_pairs_intact(
|
||||
recent_messages: list[dict],
|
||||
all_messages: list[dict],
|
||||
@@ -761,13 +723,6 @@ async def compress_context(
|
||||
|
||||
# Filter out any None values that may have been introduced
|
||||
final_msgs: list[dict] = [m for m in msgs if m is not None]
|
||||
|
||||
# ---- STEP 6: Final tool-pair validation ---------------------------------
|
||||
# After all compression steps, verify that every tool response has a
|
||||
# matching tool_call in a preceding assistant message. Remove orphans
|
||||
# to prevent API errors (e.g., Anthropic's "unexpected tool_use_id").
|
||||
final_msgs = validate_and_remove_orphan_tool_responses(final_msgs)
|
||||
|
||||
final_count = sum(_msg_tokens(m, enc) for m in final_msgs)
|
||||
error = None
|
||||
if final_count + reserve > target_tokens:
|
||||
|
||||
@@ -1,288 +0,0 @@
|
||||
"""
|
||||
Shared utilities for extracting and storing files from E2B sandboxes.
|
||||
|
||||
This module provides common file extraction and workspace storage functionality
|
||||
for blocks that run code in E2B sandboxes (Claude Code, Code Executor, etc.).
|
||||
"""
|
||||
|
||||
import logging
|
||||
import shlex
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.util.file import store_media_file
|
||||
from backend.util.type import MediaFileType
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||
|
||||
from backend.executor.utils import ExecutionContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Text file extensions that can be safely read and stored as text
|
||||
TEXT_EXTENSIONS = {
|
||||
".txt",
|
||||
".md",
|
||||
".html",
|
||||
".htm",
|
||||
".css",
|
||||
".js",
|
||||
".ts",
|
||||
".jsx",
|
||||
".tsx",
|
||||
".json",
|
||||
".xml",
|
||||
".yaml",
|
||||
".yml",
|
||||
".toml",
|
||||
".ini",
|
||||
".cfg",
|
||||
".conf",
|
||||
".py",
|
||||
".rb",
|
||||
".php",
|
||||
".java",
|
||||
".c",
|
||||
".cpp",
|
||||
".h",
|
||||
".hpp",
|
||||
".cs",
|
||||
".go",
|
||||
".rs",
|
||||
".swift",
|
||||
".kt",
|
||||
".scala",
|
||||
".sh",
|
||||
".bash",
|
||||
".zsh",
|
||||
".sql",
|
||||
".graphql",
|
||||
".env",
|
||||
".gitignore",
|
||||
".dockerfile",
|
||||
"Dockerfile",
|
||||
".vue",
|
||||
".svelte",
|
||||
".astro",
|
||||
".mdx",
|
||||
".rst",
|
||||
".tex",
|
||||
".csv",
|
||||
".log",
|
||||
}
|
||||
|
||||
|
||||
class SandboxFileOutput(BaseModel):
|
||||
"""A file extracted from a sandbox and optionally stored in workspace."""
|
||||
|
||||
path: str
|
||||
"""Full path in the sandbox."""
|
||||
|
||||
relative_path: str
|
||||
"""Path relative to the working directory."""
|
||||
|
||||
name: str
|
||||
"""Filename only."""
|
||||
|
||||
content: str
|
||||
"""File content as text (for backward compatibility)."""
|
||||
|
||||
workspace_ref: str | None = None
|
||||
"""Workspace reference (workspace://{id}#mime) if stored, None otherwise."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtractedFile:
|
||||
"""Internal representation of an extracted file before storage."""
|
||||
|
||||
path: str
|
||||
relative_path: str
|
||||
name: str
|
||||
content: bytes
|
||||
is_text: bool
|
||||
|
||||
|
||||
async def extract_sandbox_files(
|
||||
sandbox: "BaseAsyncSandbox",
|
||||
working_directory: str,
|
||||
since_timestamp: str | None = None,
|
||||
text_only: bool = True,
|
||||
) -> list[ExtractedFile]:
|
||||
"""
|
||||
Extract files from an E2B sandbox.
|
||||
|
||||
Args:
|
||||
sandbox: The E2B sandbox instance
|
||||
working_directory: Directory to search for files
|
||||
since_timestamp: ISO timestamp - only return files modified after this time
|
||||
text_only: If True, only extract text files (default). If False, extract all files.
|
||||
|
||||
Returns:
|
||||
List of ExtractedFile objects with path, content, and metadata
|
||||
"""
|
||||
files: list[ExtractedFile] = []
|
||||
|
||||
try:
|
||||
# Build find command
|
||||
safe_working_dir = shlex.quote(working_directory)
|
||||
timestamp_filter = ""
|
||||
if since_timestamp:
|
||||
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
||||
|
||||
find_result = await sandbox.commands.run(
|
||||
f"find {safe_working_dir} -type f "
|
||||
f"{timestamp_filter}"
|
||||
f"-not -path '*/node_modules/*' "
|
||||
f"-not -path '*/.git/*' "
|
||||
f"2>/dev/null"
|
||||
)
|
||||
|
||||
if not find_result.stdout:
|
||||
return files
|
||||
|
||||
for file_path in find_result.stdout.strip().split("\n"):
|
||||
if not file_path:
|
||||
continue
|
||||
|
||||
# Check if it's a text file
|
||||
is_text = any(file_path.endswith(ext) for ext in TEXT_EXTENSIONS)
|
||||
|
||||
# Skip non-text files if text_only mode
|
||||
if text_only and not is_text:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Read file content as bytes
|
||||
content = await sandbox.files.read(file_path, format="bytes")
|
||||
if isinstance(content, str):
|
||||
content = content.encode("utf-8")
|
||||
elif isinstance(content, bytearray):
|
||||
content = bytes(content)
|
||||
|
||||
# Extract filename from path
|
||||
file_name = file_path.split("/")[-1]
|
||||
|
||||
# Calculate relative path
|
||||
relative_path = file_path
|
||||
if file_path.startswith(working_directory):
|
||||
relative_path = file_path[len(working_directory) :]
|
||||
if relative_path.startswith("/"):
|
||||
relative_path = relative_path[1:]
|
||||
|
||||
files.append(
|
||||
ExtractedFile(
|
||||
path=file_path,
|
||||
relative_path=relative_path,
|
||||
name=file_name,
|
||||
content=content,
|
||||
is_text=is_text,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to read file {file_path}: {e}")
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"File extraction failed: {e}")
|
||||
|
||||
return files
|
||||
|
||||
|
||||
async def store_sandbox_files(
|
||||
extracted_files: list[ExtractedFile],
|
||||
execution_context: "ExecutionContext",
|
||||
) -> list[SandboxFileOutput]:
|
||||
"""
|
||||
Store extracted sandbox files to workspace and return output objects.
|
||||
|
||||
Args:
|
||||
extracted_files: List of files extracted from sandbox
|
||||
execution_context: Execution context for workspace storage
|
||||
|
||||
Returns:
|
||||
List of SandboxFileOutput objects with workspace refs
|
||||
"""
|
||||
outputs: list[SandboxFileOutput] = []
|
||||
|
||||
for file in extracted_files:
|
||||
# Decode content for text files (for backward compat content field)
|
||||
if file.is_text:
|
||||
try:
|
||||
content_str = file.content.decode("utf-8", errors="replace")
|
||||
except Exception:
|
||||
content_str = ""
|
||||
else:
|
||||
content_str = f"[Binary file: {len(file.content)} bytes]"
|
||||
|
||||
# Try to store in workspace
|
||||
workspace_ref: str | None = None
|
||||
try:
|
||||
# Convert bytes to data URI for store_media_file
|
||||
import base64
|
||||
import mimetypes
|
||||
|
||||
mime_type = mimetypes.guess_type(file.name)[0] or "application/octet-stream"
|
||||
data_uri = (
|
||||
f"data:{mime_type};base64,{base64.b64encode(file.content).decode()}"
|
||||
)
|
||||
|
||||
result = await store_media_file(
|
||||
file=MediaFileType(data_uri),
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
# Result is workspace://... or data:... depending on context
|
||||
if result.startswith("workspace://"):
|
||||
workspace_ref = result
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to store file {file.name} to workspace: {e}")
|
||||
# For binary files, fall back to data URI to prevent data loss
|
||||
if not file.is_text:
|
||||
content_str = data_uri
|
||||
|
||||
outputs.append(
|
||||
SandboxFileOutput(
|
||||
path=file.path,
|
||||
relative_path=file.relative_path,
|
||||
name=file.name,
|
||||
content=content_str,
|
||||
workspace_ref=workspace_ref,
|
||||
)
|
||||
)
|
||||
|
||||
return outputs
|
||||
|
||||
|
||||
async def extract_and_store_sandbox_files(
|
||||
sandbox: "BaseAsyncSandbox",
|
||||
working_directory: str,
|
||||
execution_context: "ExecutionContext",
|
||||
since_timestamp: str | None = None,
|
||||
text_only: bool = True,
|
||||
) -> list[SandboxFileOutput]:
|
||||
"""
|
||||
Extract files from sandbox and store them in workspace.
|
||||
|
||||
This is the main entry point combining extraction and storage.
|
||||
|
||||
Args:
|
||||
sandbox: The E2B sandbox instance
|
||||
working_directory: Directory to search for files
|
||||
execution_context: Execution context for workspace storage
|
||||
since_timestamp: ISO timestamp - only return files modified after this time
|
||||
text_only: If True, only extract text files
|
||||
|
||||
Returns:
|
||||
List of SandboxFileOutput objects with content and workspace refs
|
||||
"""
|
||||
extracted = await extract_sandbox_files(
|
||||
sandbox=sandbox,
|
||||
working_directory=working_directory,
|
||||
since_timestamp=since_timestamp,
|
||||
text_only=text_only,
|
||||
)
|
||||
|
||||
return await store_sandbox_files(extracted, execution_context)
|
||||
82
autogpt_platform/backend/poetry.lock
generated
82
autogpt_platform/backend/poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aio-pika"
|
||||
@@ -374,7 +374,7 @@ description = "LTS Port of Python audioop"
|
||||
optional = false
|
||||
python-versions = ">=3.13"
|
||||
groups = ["main"]
|
||||
markers = "python_version >= \"3.13\""
|
||||
markers = "python_version == \"3.13\""
|
||||
files = [
|
||||
{file = "audioop_lts-0.2.2-cp313-abi3-macosx_10_13_universal2.whl", hash = "sha256:fd3d4602dc64914d462924a08c1a9816435a2155d74f325853c1f1ac3b2d9800"},
|
||||
{file = "audioop_lts-0.2.2-cp313-abi3-macosx_10_13_x86_64.whl", hash = "sha256:550c114a8df0aafe9a05442a1162dfc8fec37e9af1d625ae6060fed6e756f303"},
|
||||
@@ -474,7 +474,7 @@ description = "Backport of asyncio.Runner, a context manager that controls event
|
||||
optional = false
|
||||
python-versions = "<3.11,>=3.8"
|
||||
groups = ["main"]
|
||||
markers = "python_version < \"3.11\""
|
||||
markers = "python_version == \"3.10\""
|
||||
files = [
|
||||
{file = "backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5"},
|
||||
{file = "backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162"},
|
||||
@@ -487,7 +487,7 @@ description = "Backport of CPython tarfile module"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "python_version <= \"3.11\""
|
||||
markers = "python_version < \"3.12\""
|
||||
files = [
|
||||
{file = "backports.tarfile-1.2.0-py3-none-any.whl", hash = "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34"},
|
||||
{file = "backports_tarfile-1.2.0.tar.gz", hash = "sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991"},
|
||||
@@ -659,7 +659,6 @@ description = "Foreign Function Interface for Python calling C code."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "platform_python_implementation != \"PyPy\" or sys_platform == \"darwin\""
|
||||
files = [
|
||||
{file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"},
|
||||
{file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"},
|
||||
@@ -1042,6 +1041,33 @@ ssh = ["bcrypt (>=3.1.5)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test-randomorder = ["pytest-randomly"]
|
||||
|
||||
[[package]]
|
||||
name = "dataclass-wizard"
|
||||
version = "0.39.1"
|
||||
description = "A wizard-like JSON serialization library for Python dataclasses"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "dataclass_wizard-0.39.1-py3-none-any.whl", hash = "sha256:3324e59eca705882eb34e2b3989b2beadd8c2b523e6269d4002cf1a4a5bf703b"},
|
||||
{file = "dataclass_wizard-0.39.1.tar.gz", hash = "sha256:1679948ed7c62103f40b34df97d03b35e6b2ad50f58173fdbe30074e2e4730f2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = {version = ">=4.13.0", markers = "python_version <= \"3.12\""}
|
||||
|
||||
[package.extras]
|
||||
all = ["PyYAML (>=6,<7)", "Sphinx (==7.4.7) ; python_version == \"3.9\"", "Sphinx (==8.1.3) ; python_version >= \"3.10\"", "attrs (==25.4.0)", "bump-my-version (==1.2.5)", "coverage (>=6.2)", "dacite (==1.9.2)", "dataclass-factory (==2.16)", "dataclasses-json (==0.6.7)", "flake8 (>=3)", "jsons (==1.6.3)", "mashumaro (==3.17)", "matplotlib", "mypy (>=1.19,<2)", "pydantic (==2.12.5)", "pytest (==8.3.4)", "pytest-benchmark[histogram]", "pytest-cov (==6.0.0)", "pytest-mock (>=3.6.1)", "python-dotenv (>=1,<2)", "pytimeparse (==1.1.8)", "pytimeparse (>=1.1.7)", "sphinx-autodoc-typehints (==2.5.0) ; python_version >= \"3.10\"", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.0)", "tomli (>=2,<3) ; python_version == \"3.10\"", "tomli (>=2,<3) ; python_version == \"3.9\"", "tomli-w (>=1,<2)", "tox (==4.23.2)", "twine (==6.0.1)", "typing-extensions (>=4.9.0)", "tzdata (>=2024.1) ; platform_system == \"Windows\"", "watchdog[watchmedo] (==6.0.0)", "wheel (==0.45.1)"]
|
||||
bench = ["attrs (==25.4.0)", "dacite (==1.9.2)", "dataclass-factory (==2.16)", "dataclasses-json (==0.6.7)", "jsons (==1.6.3)", "mashumaro (==3.17)", "matplotlib", "pydantic (==2.12.5)", "pytest-benchmark[histogram]"]
|
||||
dev = ["Sphinx (==7.4.7) ; python_version == \"3.9\"", "Sphinx (==8.1.3) ; python_version >= \"3.10\"", "bump-my-version (==1.2.5)", "coverage (>=6.2)", "dataclass-wizard[toml]", "flake8 (>=3)", "mypy (>=1.19,<2)", "pip (>=21.3.1)", "python-dotenv (>=1,<2)", "pytimeparse (==1.1.8)", "tomli (>=2,<3) ; python_version == \"3.10\"", "tomli (>=2,<3) ; python_version == \"3.9\"", "tomli-w (>=1,<2)", "tox (==4.23.2)", "twine (==6.0.1)", "tzdata (>=2024.1) ; platform_system == \"Windows\"", "watchdog[watchmedo] (==6.0.0)", "wheel (==0.45.1)"]
|
||||
docs = ["sphinx-autodoc-typehints (==2.5.0) ; python_version >= \"3.10\"", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.0)", "typing-extensions (>=4.9.0)"]
|
||||
dotenv = ["python-dotenv (>=1,<2)"]
|
||||
test = ["pytest (==8.3.4)", "pytest-cov (==6.0.0)", "pytest-mock (>=3.6.1)", "tzdata (>=2024.1) ; platform_system == \"Windows\""]
|
||||
timedelta = ["pytimeparse (>=1.1.7)"]
|
||||
toml = ["tomli (>=2,<3) ; python_version == \"3.10\"", "tomli (>=2,<3) ; python_version == \"3.9\"", "tomli-w (>=1,<2)"]
|
||||
tz = ["tzdata (>=2024.1)"]
|
||||
yaml = ["PyYAML (>=6,<7)"]
|
||||
|
||||
[[package]]
|
||||
name = "decorator"
|
||||
version = "5.2.1"
|
||||
@@ -1338,7 +1364,7 @@ description = "Backport of PEP 654 (exception groups)"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main", "dev"]
|
||||
markers = "python_version < \"3.11\""
|
||||
markers = "python_version == \"3.10\""
|
||||
files = [
|
||||
{file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"},
|
||||
{file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"},
|
||||
@@ -1820,16 +1846,16 @@ files = [
|
||||
google-auth = ">=2.14.1,<3.0.0"
|
||||
googleapis-common-protos = ">=1.56.2,<2.0.0"
|
||||
grpcio = [
|
||||
{version = ">=1.33.2,<2.0.0", optional = true, markers = "extra == \"grpc\""},
|
||||
{version = ">=1.49.1,<2.0.0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
{version = ">=1.33.2,<2.0.0", optional = true, markers = "extra == \"grpc\""},
|
||||
]
|
||||
grpcio-status = [
|
||||
{version = ">=1.33.2,<2.0.0", optional = true, markers = "extra == \"grpc\""},
|
||||
{version = ">=1.49.1,<2.0.0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
{version = ">=1.33.2,<2.0.0", optional = true, markers = "extra == \"grpc\""},
|
||||
]
|
||||
proto-plus = [
|
||||
{version = ">=1.22.3,<2.0.0"},
|
||||
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
|
||||
{version = ">=1.22.3,<2.0.0"},
|
||||
]
|
||||
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
|
||||
requests = ">=2.18.0,<3.0.0"
|
||||
@@ -1940,8 +1966,8 @@ google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0", extras
|
||||
google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0"
|
||||
grpcio = ">=1.33.2,<2.0.0"
|
||||
proto-plus = [
|
||||
{version = ">=1.22.3,<2.0.0"},
|
||||
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
|
||||
{version = ">=1.22.3,<2.0.0"},
|
||||
]
|
||||
protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
|
||||
|
||||
@@ -2001,9 +2027,9 @@ google-cloud-core = ">=2.0.0,<3.0.0"
|
||||
grpc-google-iam-v1 = ">=0.12.4,<1.0.0"
|
||||
opentelemetry-api = ">=1.9.0"
|
||||
proto-plus = [
|
||||
{version = ">=1.22.0,<2.0.0"},
|
||||
{version = ">=1.22.2,<2.0.0", markers = "python_version >= \"3.11\""},
|
||||
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
|
||||
{version = ">=1.22.2,<2.0.0", markers = "python_version >= \"3.11\" and python_version < \"3.13\""},
|
||||
{version = ">=1.22.0,<2.0.0", markers = "python_version < \"3.11\""},
|
||||
]
|
||||
protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
|
||||
|
||||
@@ -3802,7 +3828,7 @@ description = "Fundamental package for array computing in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
markers = "python_version < \"3.11\""
|
||||
markers = "python_version == \"3.10\""
|
||||
files = [
|
||||
{file = "numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb"},
|
||||
{file = "numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90"},
|
||||
@@ -4287,9 +4313,9 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
numpy = [
|
||||
{version = ">=1.22.4", markers = "python_version < \"3.11\""},
|
||||
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
|
||||
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
|
||||
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
|
||||
{version = ">=1.22.4", markers = "python_version < \"3.11\""},
|
||||
]
|
||||
python-dateutil = ">=2.8.2"
|
||||
pytz = ">=2020.1"
|
||||
@@ -4532,8 +4558,8 @@ pinecone-plugin-interface = ">=0.0.7,<0.0.8"
|
||||
python-dateutil = ">=2.5.3"
|
||||
typing-extensions = ">=3.7.4"
|
||||
urllib3 = [
|
||||
{version = ">=1.26.0", markers = "python_version >= \"3.8\" and python_version < \"3.12\""},
|
||||
{version = ">=1.26.5", markers = "python_version >= \"3.12\" and python_version < \"4.0\""},
|
||||
{version = ">=1.26.0", markers = "python_version >= \"3.8\" and python_version < \"3.12\""},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -5361,7 +5387,7 @@ description = "C parser in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
markers = "(platform_python_implementation != \"PyPy\" or sys_platform == \"darwin\") and implementation_name != \"PyPy\""
|
||||
markers = "implementation_name != \"PyPy\""
|
||||
files = [
|
||||
{file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"},
|
||||
{file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"},
|
||||
@@ -6130,10 +6156,10 @@ files = [
|
||||
grpcio = ">=1.41.0"
|
||||
httpx = {version = ">=0.20.0", extras = ["http2"]}
|
||||
numpy = [
|
||||
{version = ">=1.21,<2.3.0", markers = "python_version == \"3.10\""},
|
||||
{version = ">=1.21", markers = "python_version == \"3.11\""},
|
||||
{version = ">=2.1.0", markers = "python_version == \"3.13\""},
|
||||
{version = ">=1.21", markers = "python_version == \"3.11\""},
|
||||
{version = ">=1.26", markers = "python_version == \"3.12\""},
|
||||
{version = ">=1.21,<2.3.0", markers = "python_version == \"3.10\""},
|
||||
]
|
||||
portalocker = ">=2.7.0,<4.0"
|
||||
protobuf = ">=3.20.0"
|
||||
@@ -7255,18 +7281,20 @@ test = ["pytest", "ruff"]
|
||||
|
||||
[[package]]
|
||||
name = "todoist-api-python"
|
||||
version = "2.1.7"
|
||||
description = "Official Python SDK for the Todoist REST API."
|
||||
version = "3.2.1"
|
||||
description = "Official Python SDK for the Todoist API."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
python-versions = "~=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "todoist_api_python-2.1.7-py3-none-any.whl", hash = "sha256:278bfe851b9bd19bde5ff5de09d813d671ef7310ba55e1962131fca5b59bb735"},
|
||||
{file = "todoist_api_python-2.1.7.tar.gz", hash = "sha256:84934a19ccd83fb61010a8126362a5d7d6486c92454c111307ba55bc74903f5c"},
|
||||
{file = "todoist_api_python-3.2.1-py3-none-any.whl", hash = "sha256:1091e9b557291380abd79245efe961b0f2261c7b4eb16e52beccfc6357320e95"},
|
||||
{file = "todoist_api_python-3.2.1.tar.gz", hash = "sha256:22c60230cc616437846f9a6469dddd97548545ef916d2bea98b2ca2f522d92f9"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
requests = ">=2.32.3,<3.0.0"
|
||||
annotated-types = "*"
|
||||
dataclass-wizard = ">=0.35.0,<1.0"
|
||||
requests = ">=2.32.3,<3"
|
||||
|
||||
[[package]]
|
||||
name = "tokenizers"
|
||||
@@ -7317,7 +7345,7 @@ description = "A lil' TOML parser"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main", "dev"]
|
||||
markers = "python_version < \"3.11\""
|
||||
markers = "python_version == \"3.10\""
|
||||
files = [
|
||||
{file = "tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867"},
|
||||
{file = "tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9"},
|
||||
@@ -8440,4 +8468,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
|
||||
content-hash = "13affcbd2e6912d60a5298c968ca473aa9ba4b98be8fbed43ada026ff49987b7"
|
||||
|
||||
@@ -67,7 +67,7 @@ strenum = "^0.4.9"
|
||||
stripe = "^11.5.0"
|
||||
supabase = "2.27.3"
|
||||
tenacity = "^9.1.4"
|
||||
todoist-api-python = "^2.1.7"
|
||||
todoist-api-python = "^3.2.1"
|
||||
tweepy = "^4.16.0"
|
||||
uvicorn = { extras = ["standard"], version = "^0.40.0" }
|
||||
websockets = "^15.0"
|
||||
|
||||
@@ -10,9 +10,8 @@ import {
|
||||
MessageResponse,
|
||||
} from "@/components/ai-elements/message";
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { useEffect, useState } from "react";
|
||||
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
||||
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
||||
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
|
||||
@@ -122,7 +121,6 @@ export const ChatMessagesContainer = ({
|
||||
isLoading,
|
||||
}: ChatMessagesContainerProps) => {
|
||||
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
|
||||
const lastToastTimeRef = useRef(0);
|
||||
|
||||
useEffect(() => {
|
||||
if (status === "submitted") {
|
||||
@@ -130,20 +128,6 @@ export const ChatMessagesContainer = ({
|
||||
}
|
||||
}, [status]);
|
||||
|
||||
// Show a toast when a new error occurs, debounced to avoid spam
|
||||
useEffect(() => {
|
||||
if (!error) return;
|
||||
const now = Date.now();
|
||||
if (now - lastToastTimeRef.current < 3_000) return;
|
||||
lastToastTimeRef.current = now;
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Something went wrong",
|
||||
description:
|
||||
"The assistant encountered an error. Please try sending your message again.",
|
||||
});
|
||||
}, [error]);
|
||||
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
const lastAssistantHasVisibleContent =
|
||||
lastMessage?.role === "assistant" &&
|
||||
@@ -279,12 +263,8 @@ export const ChatMessagesContainer = ({
|
||||
</Message>
|
||||
)}
|
||||
{error && (
|
||||
<div className="rounded-lg bg-red-50 p-4 text-sm text-red-700">
|
||||
<p className="font-medium">Something went wrong</p>
|
||||
<p className="mt-1 text-red-600">
|
||||
The assistant encountered an error. Please try sending your
|
||||
message again.
|
||||
</p>
|
||||
<div className="rounded-lg bg-red-50 p-3 text-red-600">
|
||||
Error: {error.message}
|
||||
</div>
|
||||
)}
|
||||
</ConversationContent>
|
||||
|
||||
@@ -30,7 +30,7 @@ export function ContentCard({
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"min-w-0 rounded-lg bg-gradient-to-r from-purple-500/30 to-blue-500/30 p-[1px]",
|
||||
"rounded-lg bg-gradient-to-r from-purple-500/30 to-blue-500/30 p-[1px]",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
|
||||
@@ -4,6 +4,7 @@ import { WarningDiamondIcon } from "@phosphor-icons/react";
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||
import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
|
||||
import {
|
||||
ContentCardDescription,
|
||||
@@ -76,7 +77,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
|
||||
isOperationInProgressOutput(output)
|
||||
) {
|
||||
return {
|
||||
icon,
|
||||
icon: <OrbitLoader size={32} />,
|
||||
title: "Creating agent, this may take a few minutes. Sit back and relax.",
|
||||
};
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ export function getAccordionMeta(output: RunAgentToolOutput): {
|
||||
? output.status.trim()
|
||||
: "started";
|
||||
return {
|
||||
icon,
|
||||
icon: <OrbitLoader size={28} className="text-neutral-700" />,
|
||||
title: output.graph_name,
|
||||
description: `Status: ${statusText}`,
|
||||
};
|
||||
|
||||
@@ -149,7 +149,7 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
||||
if (isRunBlockBlockOutput(output)) {
|
||||
const keys = Object.keys(output.outputs ?? {});
|
||||
return {
|
||||
icon,
|
||||
icon: <OrbitLoader size={24} className="text-neutral-700" />,
|
||||
title: output.block_name,
|
||||
description:
|
||||
keys.length > 0
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import { environment } from "@/services/environment";
|
||||
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
||||
import { NextRequest } from "next/server";
|
||||
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
|
||||
|
||||
/**
|
||||
* SSE Proxy for chat streaming.
|
||||
* Supports POST with context (page content + URL) in the request body.
|
||||
*/
|
||||
export async function POST(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ sessionId: string }> },
|
||||
@@ -20,14 +23,17 @@ export async function POST(
|
||||
);
|
||||
}
|
||||
|
||||
// Get auth token from server-side session
|
||||
const token = await getServerAuthToken();
|
||||
|
||||
// Build backend URL
|
||||
const backendUrl = environment.getAGPTServerBaseUrl();
|
||||
const streamUrl = new URL(
|
||||
`/api/chat/sessions/${sessionId}/stream`,
|
||||
backendUrl,
|
||||
);
|
||||
|
||||
// Forward request to backend with auth header
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
Accept: "text/event-stream",
|
||||
@@ -57,15 +63,14 @@ export async function POST(
|
||||
});
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
return new Response(
|
||||
JSON.stringify({ error: "Empty response from chat service" }),
|
||||
{ status: 502, headers: { "Content-Type": "application/json" } },
|
||||
);
|
||||
}
|
||||
|
||||
return new Response(normalizeSSEStream(response.body), {
|
||||
headers: SSE_HEADERS,
|
||||
// Return the SSE stream directly
|
||||
return new Response(response.body, {
|
||||
headers: {
|
||||
"Content-Type": "text/event-stream",
|
||||
"Cache-Control": "no-cache, no-transform",
|
||||
Connection: "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("SSE proxy error:", error);
|
||||
@@ -82,6 +87,13 @@ export async function POST(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume an active stream for a session.
|
||||
*
|
||||
* Called by the AI SDK's `useChat(resume: true)` on page load.
|
||||
* Proxies to the backend which checks for an active stream and either
|
||||
* replays it (200 + SSE) or returns 204 No Content.
|
||||
*/
|
||||
export async function GET(
|
||||
_request: NextRequest,
|
||||
{ params }: { params: Promise<{ sessionId: string }> },
|
||||
@@ -112,6 +124,7 @@ export async function GET(
|
||||
headers,
|
||||
});
|
||||
|
||||
// 204 = no active stream to resume
|
||||
if (response.status === 204) {
|
||||
return new Response(null, { status: 204 });
|
||||
}
|
||||
@@ -124,13 +137,12 @@ export async function GET(
|
||||
});
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
return new Response(null, { status: 204 });
|
||||
}
|
||||
|
||||
return new Response(normalizeSSEStream(response.body), {
|
||||
return new Response(response.body, {
|
||||
headers: {
|
||||
...SSE_HEADERS,
|
||||
"Content-Type": "text/event-stream",
|
||||
"Cache-Control": "no-cache, no-transform",
|
||||
Connection: "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
"x-vercel-ai-ui-message-stream": "v1",
|
||||
},
|
||||
});
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
export const SSE_HEADERS = {
|
||||
"Content-Type": "text/event-stream",
|
||||
"Cache-Control": "no-cache, no-transform",
|
||||
Connection: "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
} as const;
|
||||
|
||||
export function normalizeSSEStream(
|
||||
input: ReadableStream<Uint8Array>,
|
||||
): ReadableStream<Uint8Array> {
|
||||
const decoder = new TextDecoder();
|
||||
const encoder = new TextEncoder();
|
||||
let buffer = "";
|
||||
|
||||
return input.pipeThrough(
|
||||
new TransformStream<Uint8Array, Uint8Array>({
|
||||
transform(chunk, controller) {
|
||||
buffer += decoder.decode(chunk, { stream: true });
|
||||
|
||||
const parts = buffer.split("\n\n");
|
||||
buffer = parts.pop() ?? "";
|
||||
|
||||
for (const part of parts) {
|
||||
const normalized = normalizeSSEEvent(part);
|
||||
controller.enqueue(encoder.encode(normalized + "\n\n"));
|
||||
}
|
||||
},
|
||||
flush(controller) {
|
||||
if (buffer.trim()) {
|
||||
const normalized = normalizeSSEEvent(buffer);
|
||||
controller.enqueue(encoder.encode(normalized + "\n\n"));
|
||||
}
|
||||
},
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
function normalizeSSEEvent(event: string): string {
|
||||
const lines = event.split("\n");
|
||||
const dataLines: string[] = [];
|
||||
const otherLines: string[] = [];
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith("data: ")) {
|
||||
dataLines.push(line.slice(6));
|
||||
} else {
|
||||
otherLines.push(line);
|
||||
}
|
||||
}
|
||||
|
||||
if (dataLines.length === 0) return event;
|
||||
|
||||
const dataStr = dataLines.join("\n");
|
||||
try {
|
||||
const parsed = JSON.parse(dataStr) as Record<string, unknown>;
|
||||
if (parsed.type === "error") {
|
||||
const normalized = {
|
||||
type: "error",
|
||||
errorText:
|
||||
typeof parsed.errorText === "string"
|
||||
? parsed.errorText
|
||||
: "An unexpected error occurred",
|
||||
};
|
||||
const newData = `data: ${JSON.stringify(normalized)}`;
|
||||
return [...otherLines.filter((l) => l.length > 0), newData].join("\n");
|
||||
}
|
||||
} catch {
|
||||
// Not valid JSON — pass through as-is
|
||||
}
|
||||
|
||||
return event;
|
||||
}
|
||||
@@ -1,8 +1,20 @@
|
||||
import { environment } from "@/services/environment";
|
||||
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
||||
import { NextRequest } from "next/server";
|
||||
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
|
||||
|
||||
/**
|
||||
* SSE Proxy for task stream reconnection.
|
||||
*
|
||||
* This endpoint allows clients to reconnect to an ongoing or recently completed
|
||||
* background task's stream. It replays missed messages from Redis Streams and
|
||||
* subscribes to live updates if the task is still running.
|
||||
*
|
||||
* Client contract:
|
||||
* 1. When receiving an operation_started event, store the task_id
|
||||
* 2. To reconnect: GET /api/chat/tasks/{taskId}/stream?last_message_id={idx}
|
||||
* 3. Messages are replayed from the last_message_id position
|
||||
* 4. Stream ends when "finish" event is received
|
||||
*/
|
||||
export async function GET(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ taskId: string }> },
|
||||
@@ -12,12 +24,15 @@ export async function GET(
|
||||
const lastMessageId = searchParams.get("last_message_id") || "0-0";
|
||||
|
||||
try {
|
||||
// Get auth token from server-side session
|
||||
const token = await getServerAuthToken();
|
||||
|
||||
// Build backend URL
|
||||
const backendUrl = environment.getAGPTServerBaseUrl();
|
||||
const streamUrl = new URL(`/api/chat/tasks/${taskId}/stream`, backendUrl);
|
||||
streamUrl.searchParams.set("last_message_id", lastMessageId);
|
||||
|
||||
// Forward request to backend with auth header
|
||||
const headers: Record<string, string> = {
|
||||
Accept: "text/event-stream",
|
||||
"Cache-Control": "no-cache",
|
||||
@@ -41,12 +56,14 @@ export async function GET(
|
||||
});
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
return new Response(null, { status: 204 });
|
||||
}
|
||||
|
||||
return new Response(normalizeSSEStream(response.body), {
|
||||
headers: SSE_HEADERS,
|
||||
// Return the SSE stream directly
|
||||
return new Response(response.body, {
|
||||
headers: {
|
||||
"Content-Type": "text/event-stream",
|
||||
"Cache-Control": "no-cache, no-transform",
|
||||
Connection: "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Task stream proxy error:", error);
|
||||
|
||||
@@ -61,7 +61,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
||||
| [Get List Item](block-integrations/basic.md#get-list-item) | Returns the element at the given index |
|
||||
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
|
||||
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
|
||||
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution for human review |
|
||||
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution and wait for human approval or modification of data |
|
||||
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
|
||||
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
|
||||
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
|
||||
|
||||
@@ -975,7 +975,7 @@ A travel planning application could use this block to provide users with current
|
||||
## Human In The Loop
|
||||
|
||||
### What it is
|
||||
Pause execution for human review. Data flows through approved_data or rejected_data output based on the reviewer's decision. Outputs contain the actual data, not status strings.
|
||||
Pause execution and wait for human approval or modification of data
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
@@ -988,18 +988,18 @@ This enables human oversight at critical points in automated workflows, ensuring
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| data | The data to be reviewed by a human user. This exact data will be passed through to either approved_data or rejected_data output based on the reviewer's decision. | Data | Yes |
|
||||
| name | A descriptive name for what this data represents. This helps the reviewer understand what they are reviewing. | str | Yes |
|
||||
| editable | Whether the human reviewer can edit the data before approving or rejecting it | bool | No |
|
||||
| data | The data to be reviewed by a human user | Data | Yes |
|
||||
| name | A descriptive name for what this data represents | str | Yes |
|
||||
| editable | Whether the human reviewer can edit the data | bool | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| approved_data | Outputs the input data when the reviewer APPROVES it. The value is the actual data itself (not a status string like 'APPROVED'). If the reviewer edited the data, this contains the modified version. Connect downstream blocks here for the 'approved' workflow path. | Approved Data |
|
||||
| rejected_data | Outputs the input data when the reviewer REJECTS it. The value is the actual data itself (not a status string like 'REJECTED'). If the reviewer edited the data, this contains the modified version. Connect downstream blocks here for the 'rejected' workflow path. | Rejected Data |
|
||||
| review_message | Optional message provided by the reviewer explaining their decision. Only outputs when the reviewer provides a message; this pin does not fire if no message was given. | str |
|
||||
| approved_data | The data when approved (may be modified by reviewer) | Approved Data |
|
||||
| rejected_data | The data when rejected (may be modified by reviewer) | Rejected Data |
|
||||
| review_message | Any message provided by the reviewer | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
|
||||
@@ -563,7 +563,7 @@ The block supports conversation continuation through three mechanisms:
|
||||
|--------|-------------|------|
|
||||
| error | Error message if execution failed | str |
|
||||
| response | The output/response from Claude Code execution | str |
|
||||
| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. workspace_ref contains a workspace:// URI if the file was stored to workspace. | List[SandboxFileOutput] |
|
||||
| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', and 'content' fields. | List[FileOutput] |
|
||||
| conversation_history | Full conversation history including this turn. Pass this to conversation_history input to continue on a fresh sandbox if the previous sandbox timed out. | str |
|
||||
| session_id | Session ID for this conversation. Pass this back along with sandbox_id to continue the conversation. | str |
|
||||
| sandbox_id | ID of the sandbox instance. Pass this back along with session_id to continue the conversation. This is None if dispose_sandbox was True (sandbox was disposed). | str |
|
||||
|
||||
@@ -215,7 +215,6 @@ The sandbox includes pip and npm pre-installed. Set timeout to limit execution t
|
||||
| response | Text output (if any) of the main execution result | str |
|
||||
| stdout_logs | Standard output logs from execution | str |
|
||||
| stderr_logs | Standard error logs from execution | str |
|
||||
| files | Files written to /output directory during execution. Each file has path, name, content, and workspace_ref (if stored). | List[SandboxFileOutput] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
|
||||
Reference in New Issue
Block a user