Compare commits
3 Commits
abhi/marke
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82d7134fc6 | ||
|
|
90466908a8 | ||
|
|
f9f984a8f4 |
659
autogpt_platform/backend/backend/blocks/claude_code.py
Normal file
@@ -0,0 +1,659 @@
|
||||
import json
|
||||
import shlex
|
||||
import uuid
|
||||
from typing import Literal, Optional
|
||||
|
||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||
from pydantic import BaseModel, SecretStr
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
|
||||
class ClaudeCodeExecutionError(Exception):
|
||||
"""Exception raised when Claude Code execution fails.
|
||||
|
||||
Carries the sandbox_id so it can be returned to the user for cleanup
|
||||
when dispose_sandbox=False.
|
||||
"""
|
||||
|
||||
def __init__(self, message: str, sandbox_id: str = ""):
|
||||
super().__init__(message)
|
||||
self.sandbox_id = sandbox_id
|
||||
|
||||
|
||||
# Test credentials for E2B
|
||||
TEST_E2B_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="e2b",
|
||||
api_key=SecretStr("mock-e2b-api-key"),
|
||||
title="Mock E2B API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_E2B_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_E2B_CREDENTIALS.provider,
|
||||
"id": TEST_E2B_CREDENTIALS.id,
|
||||
"type": TEST_E2B_CREDENTIALS.type,
|
||||
"title": TEST_E2B_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
# Test credentials for Anthropic
|
||||
TEST_ANTHROPIC_CREDENTIALS = APIKeyCredentials(
|
||||
id="2e568a2b-b2ea-475a-8564-9a676bf31c56",
|
||||
provider="anthropic",
|
||||
api_key=SecretStr("mock-anthropic-api-key"),
|
||||
title="Mock Anthropic API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_ANTHROPIC_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_ANTHROPIC_CREDENTIALS.provider,
|
||||
"id": TEST_ANTHROPIC_CREDENTIALS.id,
|
||||
"type": TEST_ANTHROPIC_CREDENTIALS.type,
|
||||
"title": TEST_ANTHROPIC_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
|
||||
class ClaudeCodeBlock(Block):
|
||||
"""
|
||||
Execute tasks using Claude Code (Anthropic's AI coding assistant) in an E2B sandbox.
|
||||
|
||||
Claude Code can create files, install tools, run commands, and perform complex
|
||||
coding tasks autonomously within a secure sandbox environment.
|
||||
"""
|
||||
|
||||
# Use base template - we'll install Claude Code ourselves for latest version
|
||||
DEFAULT_TEMPLATE = "base"
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
e2b_credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.E2B], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description=(
|
||||
"API key for the E2B platform to create the sandbox. "
|
||||
"Get one on the [e2b website](https://e2b.dev/docs)"
|
||||
),
|
||||
)
|
||||
|
||||
anthropic_credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.ANTHROPIC], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description=(
|
||||
"API key for Anthropic to power Claude Code. "
|
||||
"Get one at [Anthropic's website](https://console.anthropic.com)"
|
||||
),
|
||||
)
|
||||
|
||||
prompt: str = SchemaField(
|
||||
description=(
|
||||
"The task or instruction for Claude Code to execute. "
|
||||
"Claude Code can create files, install packages, run commands, "
|
||||
"and perform complex coding tasks."
|
||||
),
|
||||
placeholder="Create a hello world index.html file",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
timeout: int = SchemaField(
|
||||
description=(
|
||||
"Sandbox timeout in seconds. Claude Code tasks can take "
|
||||
"a while, so set this appropriately for your task complexity. "
|
||||
"Note: This only applies when creating a new sandbox. "
|
||||
"When reconnecting to an existing sandbox via sandbox_id, "
|
||||
"the original timeout is retained."
|
||||
),
|
||||
default=300, # 5 minutes default
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
setup_commands: list[str] = SchemaField(
|
||||
description=(
|
||||
"Optional shell commands to run before executing Claude Code. "
|
||||
"Useful for installing dependencies or setting up the environment."
|
||||
),
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
working_directory: str = SchemaField(
|
||||
description="Working directory for Claude Code to operate in.",
|
||||
default="/home/user",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
# Session/continuation support
|
||||
session_id: str = SchemaField(
|
||||
description=(
|
||||
"Session ID to resume a previous conversation. "
|
||||
"Leave empty for a new conversation. "
|
||||
"Use the session_id from a previous run to continue that conversation."
|
||||
),
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
sandbox_id: str = SchemaField(
|
||||
description=(
|
||||
"Sandbox ID to reconnect to an existing sandbox. "
|
||||
"Required when resuming a session (along with session_id). "
|
||||
"Use the sandbox_id from a previous run where dispose_sandbox was False."
|
||||
),
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
conversation_history: str = SchemaField(
|
||||
description=(
|
||||
"Previous conversation history to continue from. "
|
||||
"Use this to restore context on a fresh sandbox if the previous one timed out. "
|
||||
"Pass the conversation_history output from a previous run."
|
||||
),
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
dispose_sandbox: bool = SchemaField(
|
||||
description=(
|
||||
"Whether to dispose of the sandbox immediately after execution. "
|
||||
"Set to False if you want to continue the conversation later "
|
||||
"(you'll need both sandbox_id and session_id from the output)."
|
||||
),
|
||||
default=True,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class FileOutput(BaseModel):
|
||||
"""A file extracted from the sandbox."""
|
||||
|
||||
path: str
|
||||
relative_path: str # Path relative to working directory (for GitHub, etc.)
|
||||
name: str
|
||||
content: str
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
response: str = SchemaField(
|
||||
description="The output/response from Claude Code execution"
|
||||
)
|
||||
files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
|
||||
description=(
|
||||
"List of text files created/modified by Claude Code during this execution. "
|
||||
"Each file has 'path', 'relative_path', 'name', and 'content' fields."
|
||||
)
|
||||
)
|
||||
conversation_history: str = SchemaField(
|
||||
description=(
|
||||
"Full conversation history including this turn. "
|
||||
"Pass this to conversation_history input to continue on a fresh sandbox "
|
||||
"if the previous sandbox timed out."
|
||||
)
|
||||
)
|
||||
session_id: str = SchemaField(
|
||||
description=(
|
||||
"Session ID for this conversation. "
|
||||
"Pass this back along with sandbox_id to continue the conversation."
|
||||
)
|
||||
)
|
||||
sandbox_id: Optional[str] = SchemaField(
|
||||
description=(
|
||||
"ID of the sandbox instance. "
|
||||
"Pass this back along with session_id to continue the conversation. "
|
||||
"This is None if dispose_sandbox was True (sandbox was disposed)."
|
||||
),
|
||||
default=None,
|
||||
)
|
||||
error: str = SchemaField(description="Error message if execution failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="4e34f4a5-9b89-4326-ba77-2dd6750b7194",
|
||||
description=(
|
||||
"Execute tasks using Claude Code in an E2B sandbox. "
|
||||
"Claude Code can create files, install tools, run commands, "
|
||||
"and perform complex coding tasks autonomously."
|
||||
),
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.AI},
|
||||
input_schema=ClaudeCodeBlock.Input,
|
||||
output_schema=ClaudeCodeBlock.Output,
|
||||
test_credentials={
|
||||
"e2b_credentials": TEST_E2B_CREDENTIALS,
|
||||
"anthropic_credentials": TEST_ANTHROPIC_CREDENTIALS,
|
||||
},
|
||||
test_input={
|
||||
"e2b_credentials": TEST_E2B_CREDENTIALS_INPUT,
|
||||
"anthropic_credentials": TEST_ANTHROPIC_CREDENTIALS_INPUT,
|
||||
"prompt": "Create a hello world HTML file",
|
||||
"timeout": 300,
|
||||
"setup_commands": [],
|
||||
"working_directory": "/home/user",
|
||||
"session_id": "",
|
||||
"sandbox_id": "",
|
||||
"conversation_history": "",
|
||||
"dispose_sandbox": True,
|
||||
},
|
||||
test_output=[
|
||||
("response", "Created index.html with hello world content"),
|
||||
(
|
||||
"files",
|
||||
[
|
||||
{
|
||||
"path": "/home/user/index.html",
|
||||
"relative_path": "index.html",
|
||||
"name": "index.html",
|
||||
"content": "<html>Hello World</html>",
|
||||
}
|
||||
],
|
||||
),
|
||||
(
|
||||
"conversation_history",
|
||||
"User: Create a hello world HTML file\n"
|
||||
"Claude: Created index.html with hello world content",
|
||||
),
|
||||
("session_id", str),
|
||||
("sandbox_id", None), # None because dispose_sandbox=True in test_input
|
||||
],
|
||||
test_mock={
|
||||
"execute_claude_code": lambda *args, **kwargs: (
|
||||
"Created index.html with hello world content", # response
|
||||
[
|
||||
ClaudeCodeBlock.FileOutput(
|
||||
path="/home/user/index.html",
|
||||
relative_path="index.html",
|
||||
name="index.html",
|
||||
content="<html>Hello World</html>",
|
||||
)
|
||||
], # files
|
||||
"User: Create a hello world HTML file\n"
|
||||
"Claude: Created index.html with hello world content", # conversation_history
|
||||
"test-session-id", # session_id
|
||||
"sandbox_id", # sandbox_id
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
async def execute_claude_code(
|
||||
self,
|
||||
e2b_api_key: str,
|
||||
anthropic_api_key: str,
|
||||
prompt: str,
|
||||
timeout: int,
|
||||
setup_commands: list[str],
|
||||
working_directory: str,
|
||||
session_id: str,
|
||||
existing_sandbox_id: str,
|
||||
conversation_history: str,
|
||||
dispose_sandbox: bool,
|
||||
) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]:
|
||||
"""
|
||||
Execute Claude Code in an E2B sandbox.
|
||||
|
||||
Returns:
|
||||
Tuple of (response, files, conversation_history, session_id, sandbox_id)
|
||||
"""
|
||||
|
||||
# Validate that sandbox_id is provided when resuming a session
|
||||
if session_id and not existing_sandbox_id:
|
||||
raise ValueError(
|
||||
"sandbox_id is required when resuming a session with session_id. "
|
||||
"The session state is stored in the original sandbox. "
|
||||
"If the sandbox has timed out, use conversation_history instead "
|
||||
"to restore context on a fresh sandbox."
|
||||
)
|
||||
|
||||
sandbox = None
|
||||
sandbox_id = ""
|
||||
|
||||
try:
|
||||
# Either reconnect to existing sandbox or create a new one
|
||||
if existing_sandbox_id:
|
||||
# Reconnect to existing sandbox for conversation continuation
|
||||
sandbox = await BaseAsyncSandbox.connect(
|
||||
sandbox_id=existing_sandbox_id,
|
||||
api_key=e2b_api_key,
|
||||
)
|
||||
else:
|
||||
# Create new sandbox
|
||||
sandbox = await BaseAsyncSandbox.create(
|
||||
template=self.DEFAULT_TEMPLATE,
|
||||
api_key=e2b_api_key,
|
||||
timeout=timeout,
|
||||
envs={"ANTHROPIC_API_KEY": anthropic_api_key},
|
||||
)
|
||||
|
||||
# Install Claude Code from npm (ensures we get the latest version)
|
||||
install_result = await sandbox.commands.run(
|
||||
"npm install -g @anthropic-ai/claude-code@latest",
|
||||
timeout=120, # 2 min timeout for install
|
||||
)
|
||||
if install_result.exit_code != 0:
|
||||
raise Exception(
|
||||
f"Failed to install Claude Code: {install_result.stderr}"
|
||||
)
|
||||
|
||||
# Run any user-provided setup commands
|
||||
for cmd in setup_commands:
|
||||
setup_result = await sandbox.commands.run(cmd)
|
||||
if setup_result.exit_code != 0:
|
||||
raise Exception(
|
||||
f"Setup command failed: {cmd}\n"
|
||||
f"Exit code: {setup_result.exit_code}\n"
|
||||
f"Stdout: {setup_result.stdout}\n"
|
||||
f"Stderr: {setup_result.stderr}"
|
||||
)
|
||||
|
||||
# Capture sandbox_id immediately after creation/connection
|
||||
# so it's available for error recovery if dispose_sandbox=False
|
||||
sandbox_id = sandbox.sandbox_id
|
||||
|
||||
# Generate or use provided session ID
|
||||
current_session_id = session_id if session_id else str(uuid.uuid4())
|
||||
|
||||
# Build base Claude flags
|
||||
base_flags = "-p --dangerously-skip-permissions --output-format json"
|
||||
|
||||
# Add conversation history context if provided (for fresh sandbox continuation)
|
||||
history_flag = ""
|
||||
if conversation_history and not session_id:
|
||||
# Inject previous conversation as context via system prompt
|
||||
# Use consistent escaping via _escape_prompt helper
|
||||
escaped_history = self._escape_prompt(
|
||||
f"Previous conversation context: {conversation_history}"
|
||||
)
|
||||
history_flag = f" --append-system-prompt {escaped_history}"
|
||||
|
||||
# Build Claude command based on whether we're resuming or starting new
|
||||
# Use shlex.quote for working_directory and session IDs to prevent injection
|
||||
safe_working_dir = shlex.quote(working_directory)
|
||||
if session_id:
|
||||
# Resuming existing session (sandbox still alive)
|
||||
safe_session_id = shlex.quote(session_id)
|
||||
claude_command = (
|
||||
f"cd {safe_working_dir} && "
|
||||
f"echo {self._escape_prompt(prompt)} | "
|
||||
f"claude --resume {safe_session_id} {base_flags}"
|
||||
)
|
||||
else:
|
||||
# New session with specific ID
|
||||
safe_current_session_id = shlex.quote(current_session_id)
|
||||
claude_command = (
|
||||
f"cd {safe_working_dir} && "
|
||||
f"echo {self._escape_prompt(prompt)} | "
|
||||
f"claude --session-id {safe_current_session_id} {base_flags}{history_flag}"
|
||||
)
|
||||
|
||||
# Capture timestamp before running Claude Code to filter files later
|
||||
# Capture timestamp 1 second in the past to avoid race condition with file creation
|
||||
timestamp_result = await sandbox.commands.run(
|
||||
"date -u -d '1 second ago' +%Y-%m-%dT%H:%M:%S"
|
||||
)
|
||||
if timestamp_result.exit_code != 0:
|
||||
raise RuntimeError(
|
||||
f"Failed to capture timestamp: {timestamp_result.stderr}"
|
||||
)
|
||||
start_timestamp = (
|
||||
timestamp_result.stdout.strip() if timestamp_result.stdout else None
|
||||
)
|
||||
|
||||
result = await sandbox.commands.run(
|
||||
claude_command,
|
||||
timeout=0, # No command timeout - let sandbox timeout handle it
|
||||
)
|
||||
|
||||
# Check for command failure
|
||||
if result.exit_code != 0:
|
||||
error_msg = result.stderr or result.stdout or "Unknown error"
|
||||
raise Exception(
|
||||
f"Claude Code command failed with exit code {result.exit_code}:\n"
|
||||
f"{error_msg}"
|
||||
)
|
||||
|
||||
raw_output = result.stdout or ""
|
||||
|
||||
# Parse JSON output to extract response and build conversation history
|
||||
response = ""
|
||||
new_conversation_history = conversation_history or ""
|
||||
|
||||
try:
|
||||
# The JSON output contains the result
|
||||
output_data = json.loads(raw_output)
|
||||
response = output_data.get("result", raw_output)
|
||||
|
||||
# Build conversation history entry
|
||||
turn_entry = f"User: {prompt}\nClaude: {response}"
|
||||
if new_conversation_history:
|
||||
new_conversation_history = (
|
||||
f"{new_conversation_history}\n\n{turn_entry}"
|
||||
)
|
||||
else:
|
||||
new_conversation_history = turn_entry
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# If not valid JSON, use raw output
|
||||
response = raw_output
|
||||
turn_entry = f"User: {prompt}\nClaude: {response}"
|
||||
if new_conversation_history:
|
||||
new_conversation_history = (
|
||||
f"{new_conversation_history}\n\n{turn_entry}"
|
||||
)
|
||||
else:
|
||||
new_conversation_history = turn_entry
|
||||
|
||||
# Extract files created/modified during this run
|
||||
files = await self._extract_files(
|
||||
sandbox, working_directory, start_timestamp
|
||||
)
|
||||
|
||||
return (
|
||||
response,
|
||||
files,
|
||||
new_conversation_history,
|
||||
current_session_id,
|
||||
sandbox_id,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Wrap exception with sandbox_id so caller can access/cleanup
|
||||
# the preserved sandbox when dispose_sandbox=False
|
||||
raise ClaudeCodeExecutionError(str(e), sandbox_id) from e
|
||||
|
||||
finally:
|
||||
if dispose_sandbox and sandbox:
|
||||
await sandbox.kill()
|
||||
|
||||
async def _extract_files(
|
||||
self,
|
||||
sandbox: BaseAsyncSandbox,
|
||||
working_directory: str,
|
||||
since_timestamp: str | None = None,
|
||||
) -> list["ClaudeCodeBlock.FileOutput"]:
|
||||
"""
|
||||
Extract text files created/modified during this Claude Code execution.
|
||||
|
||||
Args:
|
||||
sandbox: The E2B sandbox instance
|
||||
working_directory: Directory to search for files
|
||||
since_timestamp: ISO timestamp - only return files modified after this time
|
||||
|
||||
Returns:
|
||||
List of FileOutput objects with path, relative_path, name, and content
|
||||
"""
|
||||
files: list[ClaudeCodeBlock.FileOutput] = []
|
||||
|
||||
# Text file extensions we can safely read as text
|
||||
text_extensions = {
|
||||
".txt",
|
||||
".md",
|
||||
".html",
|
||||
".htm",
|
||||
".css",
|
||||
".js",
|
||||
".ts",
|
||||
".jsx",
|
||||
".tsx",
|
||||
".json",
|
||||
".xml",
|
||||
".yaml",
|
||||
".yml",
|
||||
".toml",
|
||||
".ini",
|
||||
".cfg",
|
||||
".conf",
|
||||
".py",
|
||||
".rb",
|
||||
".php",
|
||||
".java",
|
||||
".c",
|
||||
".cpp",
|
||||
".h",
|
||||
".hpp",
|
||||
".cs",
|
||||
".go",
|
||||
".rs",
|
||||
".swift",
|
||||
".kt",
|
||||
".scala",
|
||||
".sh",
|
||||
".bash",
|
||||
".zsh",
|
||||
".sql",
|
||||
".graphql",
|
||||
".env",
|
||||
".gitignore",
|
||||
".dockerfile",
|
||||
"Dockerfile",
|
||||
".vue",
|
||||
".svelte",
|
||||
".astro",
|
||||
".mdx",
|
||||
".rst",
|
||||
".tex",
|
||||
".csv",
|
||||
".log",
|
||||
}
|
||||
|
||||
try:
|
||||
# List files recursively using find command
|
||||
# Exclude node_modules and .git directories, but allow hidden files
|
||||
# like .env and .gitignore (they're filtered by text_extensions later)
|
||||
# Filter by timestamp to only get files created/modified during this run
|
||||
safe_working_dir = shlex.quote(working_directory)
|
||||
timestamp_filter = ""
|
||||
if since_timestamp:
|
||||
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
||||
find_result = await sandbox.commands.run(
|
||||
f"find {safe_working_dir} -type f "
|
||||
f"{timestamp_filter}"
|
||||
f"-not -path '*/node_modules/*' "
|
||||
f"-not -path '*/.git/*' "
|
||||
f"2>/dev/null"
|
||||
)
|
||||
|
||||
if find_result.stdout:
|
||||
for file_path in find_result.stdout.strip().split("\n"):
|
||||
if not file_path:
|
||||
continue
|
||||
|
||||
# Check if it's a text file we can read
|
||||
is_text = any(
|
||||
file_path.endswith(ext) for ext in text_extensions
|
||||
) or file_path.endswith("Dockerfile")
|
||||
|
||||
if is_text:
|
||||
try:
|
||||
content = await sandbox.files.read(file_path)
|
||||
# Handle bytes or string
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode("utf-8", errors="replace")
|
||||
|
||||
# Extract filename from path
|
||||
file_name = file_path.split("/")[-1]
|
||||
|
||||
# Calculate relative path by stripping working directory
|
||||
relative_path = file_path
|
||||
if file_path.startswith(working_directory):
|
||||
relative_path = file_path[len(working_directory) :]
|
||||
# Remove leading slash if present
|
||||
if relative_path.startswith("/"):
|
||||
relative_path = relative_path[1:]
|
||||
|
||||
files.append(
|
||||
ClaudeCodeBlock.FileOutput(
|
||||
path=file_path,
|
||||
relative_path=relative_path,
|
||||
name=file_name,
|
||||
content=content,
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
# Skip files that can't be read
|
||||
pass
|
||||
|
||||
except Exception:
|
||||
# If file extraction fails, return empty results
|
||||
pass
|
||||
|
||||
return files
|
||||
|
||||
def _escape_prompt(self, prompt: str) -> str:
|
||||
"""Escape the prompt for safe shell execution."""
|
||||
# Use single quotes and escape any single quotes in the prompt
|
||||
escaped = prompt.replace("'", "'\"'\"'")
|
||||
return f"'{escaped}'"
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
e2b_credentials: APIKeyCredentials,
|
||||
anthropic_credentials: APIKeyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
(
|
||||
response,
|
||||
files,
|
||||
conversation_history,
|
||||
session_id,
|
||||
sandbox_id,
|
||||
) = await self.execute_claude_code(
|
||||
e2b_api_key=e2b_credentials.api_key.get_secret_value(),
|
||||
anthropic_api_key=anthropic_credentials.api_key.get_secret_value(),
|
||||
prompt=input_data.prompt,
|
||||
timeout=input_data.timeout,
|
||||
setup_commands=input_data.setup_commands,
|
||||
working_directory=input_data.working_directory,
|
||||
session_id=input_data.session_id,
|
||||
existing_sandbox_id=input_data.sandbox_id,
|
||||
conversation_history=input_data.conversation_history,
|
||||
dispose_sandbox=input_data.dispose_sandbox,
|
||||
)
|
||||
|
||||
yield "response", response
|
||||
# Always yield files (empty list if none) to match Output schema
|
||||
yield "files", [f.model_dump() for f in files]
|
||||
# Always yield conversation_history so user can restore context on fresh sandbox
|
||||
yield "conversation_history", conversation_history
|
||||
# Always yield session_id so user can continue conversation
|
||||
yield "session_id", session_id
|
||||
# Always yield sandbox_id (None if disposed) to match Output schema
|
||||
yield "sandbox_id", sandbox_id if not input_data.dispose_sandbox else None
|
||||
|
||||
except ClaudeCodeExecutionError as e:
|
||||
yield "error", str(e)
|
||||
# If sandbox was preserved (dispose_sandbox=False), yield sandbox_id
|
||||
# so user can reconnect to or clean up the orphaned sandbox
|
||||
if not input_data.dispose_sandbox and e.sandbox_id:
|
||||
yield "sandbox_id", e.sandbox_id
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,12 +1,37 @@
|
||||
-- CreateExtension
|
||||
-- Supabase: pgvector must be enabled via Dashboard → Database → Extensions first
|
||||
-- Creates extension in current schema (determined by search_path from DATABASE_URL ?schema= param)
|
||||
-- Ensures vector extension is in the current schema (from DATABASE_URL ?schema= param)
|
||||
-- If it exists in a different schema (e.g., public), we drop and recreate it in the current schema
|
||||
-- This ensures vector type is in the same schema as tables, making ::vector work without explicit qualification
|
||||
DO $$
|
||||
DECLARE
|
||||
current_schema_name text;
|
||||
vector_schema text;
|
||||
BEGIN
|
||||
CREATE EXTENSION IF NOT EXISTS "vector";
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE NOTICE 'vector extension not available or already exists, skipping';
|
||||
-- Get the current schema from search_path
|
||||
SELECT current_schema() INTO current_schema_name;
|
||||
|
||||
-- Check if vector extension exists and which schema it's in
|
||||
SELECT n.nspname INTO vector_schema
|
||||
FROM pg_extension e
|
||||
JOIN pg_namespace n ON e.extnamespace = n.oid
|
||||
WHERE e.extname = 'vector';
|
||||
|
||||
-- Handle removal if in wrong schema
|
||||
IF vector_schema IS NOT NULL AND vector_schema != current_schema_name THEN
|
||||
BEGIN
|
||||
-- Vector exists in a different schema, drop it first
|
||||
RAISE WARNING 'pgvector found in schema "%" but need it in "%". Dropping and reinstalling...',
|
||||
vector_schema, current_schema_name;
|
||||
EXECUTE 'DROP EXTENSION IF EXISTS vector CASCADE';
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE EXCEPTION 'Failed to drop pgvector from schema "%": %. You may need to drop it manually.',
|
||||
vector_schema, SQLERRM;
|
||||
END;
|
||||
END IF;
|
||||
|
||||
-- Create extension in current schema (let it fail naturally if not available)
|
||||
EXECUTE format('CREATE EXTENSION IF NOT EXISTS vector SCHEMA %I', current_schema_name);
|
||||
END $$;
|
||||
|
||||
-- CreateEnum
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
-- Acknowledge Supabase-managed extensions to prevent drift warnings
|
||||
-- These extensions are pre-installed by Supabase in specific schemas
|
||||
-- This migration ensures they exist where available (Supabase) or skips gracefully (CI)
|
||||
|
||||
-- Create schemas (safe in both CI and Supabase)
|
||||
CREATE SCHEMA IF NOT EXISTS "extensions";
|
||||
|
||||
-- Extensions that exist in both CI and Supabase
|
||||
DO $$
|
||||
BEGIN
|
||||
CREATE EXTENSION IF NOT EXISTS "pgcrypto" WITH SCHEMA "extensions";
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE NOTICE 'pgcrypto extension not available, skipping';
|
||||
END $$;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA "extensions";
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE NOTICE 'uuid-ossp extension not available, skipping';
|
||||
END $$;
|
||||
|
||||
-- Supabase-specific extensions (skip gracefully in CI)
|
||||
DO $$
|
||||
BEGIN
|
||||
CREATE EXTENSION IF NOT EXISTS "pg_stat_statements" WITH SCHEMA "extensions";
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE NOTICE 'pg_stat_statements extension not available, skipping';
|
||||
END $$;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
CREATE EXTENSION IF NOT EXISTS "pg_net" WITH SCHEMA "extensions";
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE NOTICE 'pg_net extension not available, skipping';
|
||||
END $$;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
CREATE EXTENSION IF NOT EXISTS "pgjwt" WITH SCHEMA "extensions";
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE NOTICE 'pgjwt extension not available, skipping';
|
||||
END $$;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
CREATE SCHEMA IF NOT EXISTS "graphql";
|
||||
CREATE EXTENSION IF NOT EXISTS "pg_graphql" WITH SCHEMA "graphql";
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE NOTICE 'pg_graphql extension not available, skipping';
|
||||
END $$;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
CREATE SCHEMA IF NOT EXISTS "pgsodium";
|
||||
CREATE EXTENSION IF NOT EXISTS "pgsodium" WITH SCHEMA "pgsodium";
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE NOTICE 'pgsodium extension not available, skipping';
|
||||
END $$;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
CREATE SCHEMA IF NOT EXISTS "vault";
|
||||
CREATE EXTENSION IF NOT EXISTS "supabase_vault" WITH SCHEMA "vault";
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE NOTICE 'supabase_vault extension not available, skipping';
|
||||
END $$;
|
||||
|
||||
|
||||
-- Return to platform
|
||||
CREATE SCHEMA IF NOT EXISTS "platform";
|
||||
@@ -34,7 +34,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# Default output directory relative to repo root
|
||||
DEFAULT_OUTPUT_DIR = (
|
||||
Path(__file__).parent.parent.parent.parent / "docs" / "integrations"
|
||||
Path(__file__).parent.parent.parent.parent
|
||||
/ "docs"
|
||||
/ "integrations"
|
||||
/ "block-integrations"
|
||||
)
|
||||
|
||||
|
||||
@@ -421,6 +424,14 @@ def generate_block_markdown(
|
||||
lines.append("<!-- END MANUAL -->")
|
||||
lines.append("")
|
||||
|
||||
# Optional per-block extras (only include if has content)
|
||||
extras = manual_content.get("extras", "")
|
||||
if extras:
|
||||
lines.append("<!-- MANUAL: extras -->")
|
||||
lines.append(extras)
|
||||
lines.append("<!-- END MANUAL -->")
|
||||
lines.append("")
|
||||
|
||||
lines.append("---")
|
||||
lines.append("")
|
||||
|
||||
@@ -456,25 +467,52 @@ def get_block_file_mapping(blocks: list[BlockDoc]) -> dict[str, list[BlockDoc]]:
|
||||
return dict(file_mapping)
|
||||
|
||||
|
||||
def generate_overview_table(blocks: list[BlockDoc]) -> str:
|
||||
"""Generate the overview table markdown (blocks.md)."""
|
||||
def generate_overview_table(blocks: list[BlockDoc], block_dir_prefix: str = "") -> str:
|
||||
"""Generate the overview table markdown (blocks.md).
|
||||
|
||||
Args:
|
||||
blocks: List of block documentation objects
|
||||
block_dir_prefix: Prefix for block file links (e.g., "block-integrations/")
|
||||
"""
|
||||
lines = []
|
||||
|
||||
# GitBook YAML frontmatter
|
||||
lines.append("---")
|
||||
lines.append("layout:")
|
||||
lines.append(" width: default")
|
||||
lines.append(" title:")
|
||||
lines.append(" visible: true")
|
||||
lines.append(" description:")
|
||||
lines.append(" visible: true")
|
||||
lines.append(" tableOfContents:")
|
||||
lines.append(" visible: false")
|
||||
lines.append(" outline:")
|
||||
lines.append(" visible: true")
|
||||
lines.append(" pagination:")
|
||||
lines.append(" visible: true")
|
||||
lines.append(" metadata:")
|
||||
lines.append(" visible: true")
|
||||
lines.append("---")
|
||||
lines.append("")
|
||||
|
||||
lines.append("# AutoGPT Blocks Overview")
|
||||
lines.append("")
|
||||
lines.append(
|
||||
'AutoGPT uses a modular approach with various "blocks" to handle different tasks. These blocks are the building blocks of AutoGPT workflows, allowing users to create complex automations by combining simple, specialized components.'
|
||||
)
|
||||
lines.append("")
|
||||
lines.append('!!! info "Creating Your Own Blocks"')
|
||||
lines.append(" Want to create your own custom blocks? Check out our guides:")
|
||||
lines.append(" ")
|
||||
lines.append('{% hint style="info" %}')
|
||||
lines.append("**Creating Your Own Blocks**")
|
||||
lines.append("")
|
||||
lines.append("Want to create your own custom blocks? Check out our guides:")
|
||||
lines.append("")
|
||||
lines.append(
|
||||
" - [Build your own Blocks](https://docs.agpt.co/platform/new_blocks/) - Step-by-step tutorial with examples"
|
||||
"* [Build your own Blocks](https://docs.agpt.co/platform/new_blocks/) - Step-by-step tutorial with examples"
|
||||
)
|
||||
lines.append(
|
||||
" - [Block SDK Guide](https://docs.agpt.co/platform/block-sdk-guide/) - Advanced SDK patterns with OAuth, webhooks, and provider configuration"
|
||||
"* [Block SDK Guide](https://docs.agpt.co/platform/block-sdk-guide/) - Advanced SDK patterns with OAuth, webhooks, and provider configuration"
|
||||
)
|
||||
lines.append("{% endhint %}")
|
||||
lines.append("")
|
||||
lines.append(
|
||||
"Below is a comprehensive list of all available blocks, categorized by their primary function. Click on any block name to view its detailed documentation."
|
||||
@@ -537,7 +575,8 @@ def generate_overview_table(blocks: list[BlockDoc]) -> str:
|
||||
else "No description"
|
||||
)
|
||||
short_desc = short_desc.replace("\n", " ").replace("|", "\\|")
|
||||
lines.append(f"| [{block.name}]({file_path}#{anchor}) | {short_desc} |")
|
||||
link_path = f"{block_dir_prefix}{file_path}"
|
||||
lines.append(f"| [{block.name}]({link_path}#{anchor}) | {short_desc} |")
|
||||
lines.append("")
|
||||
continue
|
||||
|
||||
@@ -563,13 +602,55 @@ def generate_overview_table(blocks: list[BlockDoc]) -> str:
|
||||
)
|
||||
short_desc = short_desc.replace("\n", " ").replace("|", "\\|")
|
||||
|
||||
lines.append(f"| [{block.name}]({file_path}#{anchor}) | {short_desc} |")
|
||||
link_path = f"{block_dir_prefix}{file_path}"
|
||||
lines.append(f"| [{block.name}]({link_path}#{anchor}) | {short_desc} |")
|
||||
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def generate_summary_md(
|
||||
blocks: list[BlockDoc], root_dir: Path, block_dir_prefix: str = ""
|
||||
) -> str:
|
||||
"""Generate SUMMARY.md for GitBook navigation.
|
||||
|
||||
Args:
|
||||
blocks: List of block documentation objects
|
||||
root_dir: The root docs directory (e.g., docs/integrations/)
|
||||
block_dir_prefix: Prefix for block file links (e.g., "block-integrations/")
|
||||
"""
|
||||
lines = []
|
||||
lines.append("# Table of contents")
|
||||
lines.append("")
|
||||
lines.append("* [AutoGPT Blocks Overview](README.md)")
|
||||
lines.append("")
|
||||
|
||||
# Check for guides/ directory at the root level (docs/integrations/guides/)
|
||||
guides_dir = root_dir / "guides"
|
||||
if guides_dir.exists():
|
||||
lines.append("## Guides")
|
||||
lines.append("")
|
||||
for guide_file in sorted(guides_dir.glob("*.md")):
|
||||
# Use just the file name for title (replace hyphens/underscores with spaces)
|
||||
title = file_path_to_title(guide_file.stem.replace("-", "_") + ".md")
|
||||
lines.append(f"* [{title}](guides/{guide_file.name})")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Block Integrations")
|
||||
lines.append("")
|
||||
|
||||
file_mapping = get_block_file_mapping(blocks)
|
||||
for file_path in sorted(file_mapping.keys()):
|
||||
title = file_path_to_title(file_path)
|
||||
link_path = f"{block_dir_prefix}{file_path}"
|
||||
lines.append(f"* [{title}]({link_path})")
|
||||
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def load_all_blocks_for_docs() -> list[BlockDoc]:
|
||||
"""Load all blocks and extract documentation."""
|
||||
from backend.blocks import load_all_blocks
|
||||
@@ -653,6 +734,16 @@ def write_block_docs(
|
||||
)
|
||||
)
|
||||
|
||||
# Add file-level additional_content section if present
|
||||
file_additional = extract_manual_content(existing_content).get(
|
||||
"additional_content", ""
|
||||
)
|
||||
if file_additional:
|
||||
content_parts.append("<!-- MANUAL: additional_content -->")
|
||||
content_parts.append(file_additional)
|
||||
content_parts.append("<!-- END MANUAL -->")
|
||||
content_parts.append("")
|
||||
|
||||
full_content = file_header + "\n" + "\n".join(content_parts)
|
||||
generated_files[str(file_path)] = full_content
|
||||
|
||||
@@ -661,14 +752,28 @@ def write_block_docs(
|
||||
|
||||
full_path.write_text(full_content)
|
||||
|
||||
# Generate overview file
|
||||
overview_content = generate_overview_table(blocks)
|
||||
overview_path = output_dir / "README.md"
|
||||
# Generate overview file at the parent directory (docs/integrations/)
|
||||
# with links prefixed to point into block-integrations/
|
||||
root_dir = output_dir.parent
|
||||
block_dir_name = output_dir.name # "block-integrations"
|
||||
block_dir_prefix = f"{block_dir_name}/"
|
||||
|
||||
overview_content = generate_overview_table(blocks, block_dir_prefix)
|
||||
overview_path = root_dir / "README.md"
|
||||
generated_files["README.md"] = overview_content
|
||||
overview_path.write_text(overview_content)
|
||||
|
||||
if verbose:
|
||||
print(" Writing README.md (overview)")
|
||||
print(" Writing README.md (overview) to parent directory")
|
||||
|
||||
# Generate SUMMARY.md for GitBook navigation at the parent directory
|
||||
summary_content = generate_summary_md(blocks, root_dir, block_dir_prefix)
|
||||
summary_path = root_dir / "SUMMARY.md"
|
||||
generated_files["SUMMARY.md"] = summary_content
|
||||
summary_path.write_text(summary_content)
|
||||
|
||||
if verbose:
|
||||
print(" Writing SUMMARY.md (navigation) to parent directory")
|
||||
|
||||
return generated_files
|
||||
|
||||
@@ -748,6 +853,16 @@ def check_docs_in_sync(output_dir: Path, blocks: list[BlockDoc]) -> bool:
|
||||
elif block_match.group(1).strip() != expected_block_content.strip():
|
||||
mismatched_blocks.append(block.name)
|
||||
|
||||
# Add file-level additional_content to expected content (matches write_block_docs)
|
||||
file_additional = extract_manual_content(existing_content).get(
|
||||
"additional_content", ""
|
||||
)
|
||||
if file_additional:
|
||||
content_parts.append("<!-- MANUAL: additional_content -->")
|
||||
content_parts.append(file_additional)
|
||||
content_parts.append("<!-- END MANUAL -->")
|
||||
content_parts.append("")
|
||||
|
||||
expected_content = file_header + "\n" + "\n".join(content_parts)
|
||||
|
||||
if existing_content.strip() != expected_content.strip():
|
||||
@@ -757,11 +872,15 @@ def check_docs_in_sync(output_dir: Path, blocks: list[BlockDoc]) -> bool:
|
||||
out_of_sync_details.append((file_path, mismatched_blocks))
|
||||
all_match = False
|
||||
|
||||
# Check overview
|
||||
overview_path = output_dir / "README.md"
|
||||
# Check overview at the parent directory (docs/integrations/)
|
||||
root_dir = output_dir.parent
|
||||
block_dir_name = output_dir.name # "block-integrations"
|
||||
block_dir_prefix = f"{block_dir_name}/"
|
||||
|
||||
overview_path = root_dir / "README.md"
|
||||
if overview_path.exists():
|
||||
existing_overview = overview_path.read_text()
|
||||
expected_overview = generate_overview_table(blocks)
|
||||
expected_overview = generate_overview_table(blocks, block_dir_prefix)
|
||||
if existing_overview.strip() != expected_overview.strip():
|
||||
print("OUT OF SYNC: README.md (overview)")
|
||||
print(" The blocks overview table needs regeneration")
|
||||
@@ -772,6 +891,21 @@ def check_docs_in_sync(output_dir: Path, blocks: list[BlockDoc]) -> bool:
|
||||
out_of_sync_details.append(("README.md", ["overview table"]))
|
||||
all_match = False
|
||||
|
||||
# Check SUMMARY.md at the parent directory
|
||||
summary_path = root_dir / "SUMMARY.md"
|
||||
if summary_path.exists():
|
||||
existing_summary = summary_path.read_text()
|
||||
expected_summary = generate_summary_md(blocks, root_dir, block_dir_prefix)
|
||||
if existing_summary.strip() != expected_summary.strip():
|
||||
print("OUT OF SYNC: SUMMARY.md (navigation)")
|
||||
print(" The GitBook navigation needs regeneration")
|
||||
out_of_sync_details.append(("SUMMARY.md", ["navigation"]))
|
||||
all_match = False
|
||||
else:
|
||||
print("MISSING: SUMMARY.md (navigation)")
|
||||
out_of_sync_details.append(("SUMMARY.md", ["navigation"]))
|
||||
all_match = False
|
||||
|
||||
# Check for unfilled manual sections
|
||||
unfilled_patterns = [
|
||||
"_Add a description of this category of blocks._",
|
||||
|
||||
BIN
docs/integrations/.gitbook/assets/Ollama-Add-Prompts.png
Normal file
|
After Width: | Height: | Size: 115 KiB |
BIN
docs/integrations/.gitbook/assets/Ollama-Output.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
docs/integrations/.gitbook/assets/Ollama-Remote-Host.png
Normal file
|
After Width: | Height: | Size: 6.0 KiB |
BIN
docs/integrations/.gitbook/assets/Ollama-Select-Llama32.png
Normal file
|
After Width: | Height: | Size: 81 KiB |
BIN
docs/integrations/.gitbook/assets/Select-AI-block.png
Normal file
|
After Width: | Height: | Size: 116 KiB |
BIN
docs/integrations/.gitbook/assets/e2b-dashboard.png
Normal file
|
After Width: | Height: | Size: 504 KiB |
BIN
docs/integrations/.gitbook/assets/e2b-log-url.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
BIN
docs/integrations/.gitbook/assets/e2b-new-tag.png
Normal file
|
After Width: | Height: | Size: 47 KiB |
BIN
docs/integrations/.gitbook/assets/e2b-tag-button.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
BIN
docs/integrations/.gitbook/assets/get-repo-dialog.png
Normal file
|
After Width: | Height: | Size: 68 KiB |
133
docs/integrations/SUMMARY.md
Normal file
@@ -0,0 +1,133 @@
|
||||
# Table of contents
|
||||
|
||||
* [AutoGPT Blocks Overview](README.md)
|
||||
|
||||
## Guides
|
||||
|
||||
* [LLM Providers](guides/llm-providers.md)
|
||||
* [Voice Providers](guides/voice-providers.md)
|
||||
|
||||
## Block Integrations
|
||||
|
||||
* [Airtable Bases](block-integrations/airtable/bases.md)
|
||||
* [Airtable Records](block-integrations/airtable/records.md)
|
||||
* [Airtable Schema](block-integrations/airtable/schema.md)
|
||||
* [Airtable Triggers](block-integrations/airtable/triggers.md)
|
||||
* [Apollo Organization](block-integrations/apollo/organization.md)
|
||||
* [Apollo People](block-integrations/apollo/people.md)
|
||||
* [Apollo Person](block-integrations/apollo/person.md)
|
||||
* [Ayrshare Post To Bluesky](block-integrations/ayrshare/post_to_bluesky.md)
|
||||
* [Ayrshare Post To Facebook](block-integrations/ayrshare/post_to_facebook.md)
|
||||
* [Ayrshare Post To GMB](block-integrations/ayrshare/post_to_gmb.md)
|
||||
* [Ayrshare Post To Instagram](block-integrations/ayrshare/post_to_instagram.md)
|
||||
* [Ayrshare Post To LinkedIn](block-integrations/ayrshare/post_to_linkedin.md)
|
||||
* [Ayrshare Post To Pinterest](block-integrations/ayrshare/post_to_pinterest.md)
|
||||
* [Ayrshare Post To Reddit](block-integrations/ayrshare/post_to_reddit.md)
|
||||
* [Ayrshare Post To Snapchat](block-integrations/ayrshare/post_to_snapchat.md)
|
||||
* [Ayrshare Post To Telegram](block-integrations/ayrshare/post_to_telegram.md)
|
||||
* [Ayrshare Post To Threads](block-integrations/ayrshare/post_to_threads.md)
|
||||
* [Ayrshare Post To TikTok](block-integrations/ayrshare/post_to_tiktok.md)
|
||||
* [Ayrshare Post To X](block-integrations/ayrshare/post_to_x.md)
|
||||
* [Ayrshare Post To YouTube](block-integrations/ayrshare/post_to_youtube.md)
|
||||
* [Baas Bots](block-integrations/baas/bots.md)
|
||||
* [Bannerbear Text Overlay](block-integrations/bannerbear/text_overlay.md)
|
||||
* [Basic](block-integrations/basic.md)
|
||||
* [Compass Triggers](block-integrations/compass/triggers.md)
|
||||
* [Data](block-integrations/data.md)
|
||||
* [Dataforseo Keyword Suggestions](block-integrations/dataforseo/keyword_suggestions.md)
|
||||
* [Dataforseo Related Keywords](block-integrations/dataforseo/related_keywords.md)
|
||||
* [Discord Bot Blocks](block-integrations/discord/bot_blocks.md)
|
||||
* [Discord OAuth Blocks](block-integrations/discord/oauth_blocks.md)
|
||||
* [Enrichlayer LinkedIn](block-integrations/enrichlayer/linkedin.md)
|
||||
* [Exa Answers](block-integrations/exa/answers.md)
|
||||
* [Exa Code Context](block-integrations/exa/code_context.md)
|
||||
* [Exa Contents](block-integrations/exa/contents.md)
|
||||
* [Exa Research](block-integrations/exa/research.md)
|
||||
* [Exa Search](block-integrations/exa/search.md)
|
||||
* [Exa Similar](block-integrations/exa/similar.md)
|
||||
* [Exa Webhook Blocks](block-integrations/exa/webhook_blocks.md)
|
||||
* [Exa Websets](block-integrations/exa/websets.md)
|
||||
* [Exa Websets Enrichment](block-integrations/exa/websets_enrichment.md)
|
||||
* [Exa Websets Import Export](block-integrations/exa/websets_import_export.md)
|
||||
* [Exa Websets Items](block-integrations/exa/websets_items.md)
|
||||
* [Exa Websets Monitor](block-integrations/exa/websets_monitor.md)
|
||||
* [Exa Websets Polling](block-integrations/exa/websets_polling.md)
|
||||
* [Exa Websets Search](block-integrations/exa/websets_search.md)
|
||||
* [Fal AI Video Generator](block-integrations/fal/ai_video_generator.md)
|
||||
* [Firecrawl Crawl](block-integrations/firecrawl/crawl.md)
|
||||
* [Firecrawl Extract](block-integrations/firecrawl/extract.md)
|
||||
* [Firecrawl Map](block-integrations/firecrawl/map.md)
|
||||
* [Firecrawl Scrape](block-integrations/firecrawl/scrape.md)
|
||||
* [Firecrawl Search](block-integrations/firecrawl/search.md)
|
||||
* [Generic Webhook Triggers](block-integrations/generic_webhook/triggers.md)
|
||||
* [GitHub Checks](block-integrations/github/checks.md)
|
||||
* [GitHub CI](block-integrations/github/ci.md)
|
||||
* [GitHub Issues](block-integrations/github/issues.md)
|
||||
* [GitHub Pull Requests](block-integrations/github/pull_requests.md)
|
||||
* [GitHub Repo](block-integrations/github/repo.md)
|
||||
* [GitHub Reviews](block-integrations/github/reviews.md)
|
||||
* [GitHub Statuses](block-integrations/github/statuses.md)
|
||||
* [GitHub Triggers](block-integrations/github/triggers.md)
|
||||
* [Google Calendar](block-integrations/google/calendar.md)
|
||||
* [Google Docs](block-integrations/google/docs.md)
|
||||
* [Google Gmail](block-integrations/google/gmail.md)
|
||||
* [Google Sheets](block-integrations/google/sheets.md)
|
||||
* [HubSpot Company](block-integrations/hubspot/company.md)
|
||||
* [HubSpot Contact](block-integrations/hubspot/contact.md)
|
||||
* [HubSpot Engagement](block-integrations/hubspot/engagement.md)
|
||||
* [Jina Chunking](block-integrations/jina/chunking.md)
|
||||
* [Jina Embeddings](block-integrations/jina/embeddings.md)
|
||||
* [Jina Fact Checker](block-integrations/jina/fact_checker.md)
|
||||
* [Jina Search](block-integrations/jina/search.md)
|
||||
* [Linear Comment](block-integrations/linear/comment.md)
|
||||
* [Linear Issues](block-integrations/linear/issues.md)
|
||||
* [Linear Projects](block-integrations/linear/projects.md)
|
||||
* [LLM](block-integrations/llm.md)
|
||||
* [Logic](block-integrations/logic.md)
|
||||
* [Misc](block-integrations/misc.md)
|
||||
* [Multimedia](block-integrations/multimedia.md)
|
||||
* [Notion Create Page](block-integrations/notion/create_page.md)
|
||||
* [Notion Read Database](block-integrations/notion/read_database.md)
|
||||
* [Notion Read Page](block-integrations/notion/read_page.md)
|
||||
* [Notion Read Page Markdown](block-integrations/notion/read_page_markdown.md)
|
||||
* [Notion Search](block-integrations/notion/search.md)
|
||||
* [Nvidia Deepfake](block-integrations/nvidia/deepfake.md)
|
||||
* [Replicate Flux Advanced](block-integrations/replicate/flux_advanced.md)
|
||||
* [Replicate Replicate Block](block-integrations/replicate/replicate_block.md)
|
||||
* [Search](block-integrations/search.md)
|
||||
* [Slant3D Filament](block-integrations/slant3d/filament.md)
|
||||
* [Slant3D Order](block-integrations/slant3d/order.md)
|
||||
* [Slant3D Slicing](block-integrations/slant3d/slicing.md)
|
||||
* [Slant3D Webhook](block-integrations/slant3d/webhook.md)
|
||||
* [Smartlead Campaign](block-integrations/smartlead/campaign.md)
|
||||
* [Stagehand Blocks](block-integrations/stagehand/blocks.md)
|
||||
* [System Library Operations](block-integrations/system/library_operations.md)
|
||||
* [System Store Operations](block-integrations/system/store_operations.md)
|
||||
* [Text](block-integrations/text.md)
|
||||
* [Todoist Comments](block-integrations/todoist/comments.md)
|
||||
* [Todoist Labels](block-integrations/todoist/labels.md)
|
||||
* [Todoist Projects](block-integrations/todoist/projects.md)
|
||||
* [Todoist Sections](block-integrations/todoist/sections.md)
|
||||
* [Todoist Tasks](block-integrations/todoist/tasks.md)
|
||||
* [Twitter Blocks](block-integrations/twitter/blocks.md)
|
||||
* [Twitter Bookmark](block-integrations/twitter/bookmark.md)
|
||||
* [Twitter Follows](block-integrations/twitter/follows.md)
|
||||
* [Twitter Hide](block-integrations/twitter/hide.md)
|
||||
* [Twitter Like](block-integrations/twitter/like.md)
|
||||
* [Twitter List Follows](block-integrations/twitter/list_follows.md)
|
||||
* [Twitter List Lookup](block-integrations/twitter/list_lookup.md)
|
||||
* [Twitter List Members](block-integrations/twitter/list_members.md)
|
||||
* [Twitter List Tweets Lookup](block-integrations/twitter/list_tweets_lookup.md)
|
||||
* [Twitter Manage](block-integrations/twitter/manage.md)
|
||||
* [Twitter Manage Lists](block-integrations/twitter/manage_lists.md)
|
||||
* [Twitter Mutes](block-integrations/twitter/mutes.md)
|
||||
* [Twitter Pinned Lists](block-integrations/twitter/pinned_lists.md)
|
||||
* [Twitter Quote](block-integrations/twitter/quote.md)
|
||||
* [Twitter Retweet](block-integrations/twitter/retweet.md)
|
||||
* [Twitter Search Spaces](block-integrations/twitter/search_spaces.md)
|
||||
* [Twitter Spaces Lookup](block-integrations/twitter/spaces_lookup.md)
|
||||
* [Twitter Timeline](block-integrations/twitter/timeline.md)
|
||||
* [Twitter Tweet Lookup](block-integrations/twitter/tweet_lookup.md)
|
||||
* [Twitter User Lookup](block-integrations/twitter/user_lookup.md)
|
||||
* [Wolfram LLM API](block-integrations/wolfram/llm_api.md)
|
||||
* [Zerobounce Validate Emails](block-integrations/zerobounce/validate_emails.md)
|
||||
67
docs/integrations/block-integrations/claude_code.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Claude Code Execution
|
||||
|
||||
## What it is
|
||||
The Claude Code block executes complex coding tasks using Anthropic's Claude Code AI assistant in a secure E2B sandbox environment.
|
||||
|
||||
## What it does
|
||||
This block allows you to delegate coding tasks to Claude Code, which can autonomously create files, install packages, run commands, and build complete applications within a sandboxed environment. Claude Code can handle multi-step development tasks and maintain conversation context across multiple turns.
|
||||
|
||||
## How it works
|
||||
When activated, the block:
|
||||
1. Creates or connects to an E2B sandbox (a secure, isolated Linux environment)
|
||||
2. Installs the latest version of Claude Code in the sandbox
|
||||
3. Optionally runs setup commands to prepare the environment
|
||||
4. Executes your prompt using Claude Code, which can:
|
||||
- Create and edit files
|
||||
- Install dependencies (npm, pip, etc.)
|
||||
- Run terminal commands
|
||||
- Build and test applications
|
||||
5. Extracts all text files created/modified during execution
|
||||
6. Returns the response and files, optionally keeping the sandbox alive for follow-up tasks
|
||||
|
||||
The block supports conversation continuation through three mechanisms:
|
||||
- **Same sandbox continuation** (via `session_id` + `sandbox_id`): Resume on the same live sandbox
|
||||
- **Fresh sandbox continuation** (via `conversation_history`): Restore context on a new sandbox if the previous one timed out
|
||||
- **Dispose control** (`dispose_sandbox` flag): Keep sandbox alive for multi-turn conversations
|
||||
|
||||
## Inputs
|
||||
| Input | Description |
|
||||
|-------|-------------|
|
||||
| E2B Credentials | API key for the E2B platform to create the sandbox. Get one at [e2b.dev](https://e2b.dev/docs) |
|
||||
| Anthropic Credentials | API key for Anthropic to power Claude Code. Get one at [Anthropic's website](https://console.anthropic.com) |
|
||||
| Prompt | The task or instruction for Claude Code to execute. Claude Code can create files, install packages, run commands, and perform complex coding tasks |
|
||||
| Timeout | Sandbox timeout in seconds (default: 300). Set higher for complex tasks. Note: Only applies when creating a new sandbox |
|
||||
| Setup Commands | Optional shell commands to run before executing Claude Code (e.g., installing dependencies) |
|
||||
| Working Directory | Working directory for Claude Code to operate in (default: /home/user) |
|
||||
| Session ID | Session ID to resume a previous conversation. Leave empty for new conversations |
|
||||
| Sandbox ID | Sandbox ID to reconnect to an existing sandbox. Required when resuming a session |
|
||||
| Conversation History | Previous conversation history to restore context on a fresh sandbox if the previous one timed out |
|
||||
| Dispose Sandbox | Whether to dispose of the sandbox after execution (default: true). Set to false to continue conversations later |
|
||||
|
||||
## Outputs
|
||||
| Output | Description |
|
||||
|--------|-------------|
|
||||
| Response | The output/response from Claude Code execution |
|
||||
| Files | List of text files created/modified during execution. Each file includes path, relative_path, name, and content fields |
|
||||
| Conversation History | Full conversation history including this turn. Use to restore context on a fresh sandbox |
|
||||
| Session ID | Session ID for this conversation. Pass back with sandbox_id to continue the conversation |
|
||||
| Sandbox ID | ID of the sandbox instance (null if disposed). Pass back with session_id to continue the conversation |
|
||||
| Error | Error message if execution failed |
|
||||
|
||||
## Possible use case
|
||||
**API Documentation to Full Application:**
|
||||
A product team wants to quickly prototype applications based on API documentation. They create an agent that:
|
||||
1. Uses Firecrawl to fetch API documentation from a URL
|
||||
2. Passes the docs to Claude Code with a prompt like "Create a web app that demonstrates all the key features of this API"
|
||||
3. Claude Code builds a complete application with HTML/CSS/JS frontend, proper error handling, and example API calls
|
||||
4. The Files output is used with GitHub blocks to push the generated code to a new repository
|
||||
|
||||
The team can then iterate on the application by passing the sandbox_id and session_id back to Claude Code with refinement requests like "Add authentication" or "Improve the UI", and Claude Code will modify the existing files in the same sandbox.
|
||||
|
||||
**Multi-turn Development:**
|
||||
A developer uses Claude Code to scaffold a new project:
|
||||
- Turn 1: "Create a Python FastAPI project with user authentication" (dispose_sandbox=false)
|
||||
- Turn 2: Uses the returned session_id + sandbox_id to ask "Add rate limiting middleware"
|
||||
- Turn 3: Continues with "Add comprehensive tests"
|
||||
|
||||
Each turn builds on the previous work in the same sandbox environment.
|
||||