mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-05 20:35:10 -05:00
Compare commits
19 Commits
fix/code-r
...
fix/secrt-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d12a929472 | ||
|
|
85b6520710 | ||
|
|
5c0a30e728 | ||
|
|
bfa942e032 | ||
|
|
11256076d8 | ||
|
|
3ca2387631 | ||
|
|
ed07f02738 | ||
|
|
b121030c94 | ||
|
|
c22c18374d | ||
|
|
e40233a3ac | ||
|
|
3ae5eabf9d | ||
|
|
a077ba9f03 | ||
|
|
5401d54eaa | ||
|
|
5ac89d7c0b | ||
|
|
4f908d5cb3 | ||
|
|
c1aa684743 | ||
|
|
7e5b84cc5c | ||
|
|
09cb313211 | ||
|
|
c026485023 |
@@ -152,6 +152,7 @@ REPLICATE_API_KEY=
|
||||
REVID_API_KEY=
|
||||
SCREENSHOTONE_API_KEY=
|
||||
UNREAL_SPEECH_API_KEY=
|
||||
ELEVENLABS_API_KEY=
|
||||
|
||||
# Data & Search Services
|
||||
E2B_API_KEY=
|
||||
|
||||
3
autogpt_platform/backend/.gitignore
vendored
3
autogpt_platform/backend/.gitignore
vendored
@@ -19,3 +19,6 @@ load-tests/*.json
|
||||
load-tests/*.log
|
||||
load-tests/node_modules/*
|
||||
migrations/*/rollback*.sql
|
||||
|
||||
# Workspace files
|
||||
workspaces/
|
||||
|
||||
@@ -62,10 +62,12 @@ ENV POETRY_HOME=/opt/poetry \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
# Install Python without upgrading system-managed packages
|
||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
ffmpeg \
|
||||
imagemagick \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy only necessary files from builder
|
||||
|
||||
@@ -11,7 +11,7 @@ class ChatConfig(BaseSettings):
|
||||
|
||||
# OpenAI API Configuration
|
||||
model: str = Field(
|
||||
default="anthropic/claude-opus-4.5", description="Default model to use"
|
||||
default="anthropic/claude-opus-4.6", description="Default model to use"
|
||||
)
|
||||
title_model: str = Field(
|
||||
default="openai/gpt-4o-mini",
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
import uuid as uuid_module
|
||||
from asyncio import CancelledError
|
||||
from collections.abc import AsyncGenerator
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
import openai
|
||||
|
||||
from backend.util.prompt import compress_context
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.util.prompt import CompressResult
|
||||
|
||||
@@ -36,7 +33,7 @@ from backend.data.understanding import (
|
||||
get_business_understanding,
|
||||
)
|
||||
from backend.util.exceptions import NotFoundError
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.settings import AppEnvironment, Settings
|
||||
|
||||
from . import db as chat_db
|
||||
from . import stream_registry
|
||||
@@ -225,8 +222,18 @@ async def _get_system_prompt_template(context: str) -> str:
|
||||
try:
|
||||
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
|
||||
# Use asyncio.to_thread to avoid blocking the event loop
|
||||
# In non-production environments, fetch the latest prompt version
|
||||
# instead of the production-labeled version for easier testing
|
||||
label = (
|
||||
None
|
||||
if settings.config.app_env == AppEnvironment.PRODUCTION
|
||||
else "latest"
|
||||
)
|
||||
prompt = await asyncio.to_thread(
|
||||
langfuse.get_prompt, config.langfuse_prompt_name, cache_ttl_seconds=0
|
||||
langfuse.get_prompt,
|
||||
config.langfuse_prompt_name,
|
||||
label=label,
|
||||
cache_ttl_seconds=0,
|
||||
)
|
||||
return prompt.compile(users_information=context)
|
||||
except Exception as e:
|
||||
@@ -470,6 +477,8 @@ async def stream_chat_completion(
|
||||
should_retry = False
|
||||
|
||||
# Generate unique IDs for AI SDK protocol
|
||||
import uuid as uuid_module
|
||||
|
||||
message_id = str(uuid_module.uuid4())
|
||||
text_block_id = str(uuid_module.uuid4())
|
||||
|
||||
@@ -619,6 +628,9 @@ async def stream_chat_completion(
|
||||
total_tokens=chunk.totalTokens,
|
||||
)
|
||||
)
|
||||
elif isinstance(chunk, StreamHeartbeat):
|
||||
# Pass through heartbeat to keep SSE connection alive
|
||||
yield chunk
|
||||
else:
|
||||
logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True)
|
||||
|
||||
@@ -827,6 +839,10 @@ async def _manage_context_window(
|
||||
Returns:
|
||||
CompressResult with compacted messages and metadata
|
||||
"""
|
||||
import openai
|
||||
|
||||
from backend.util.prompt import compress_context
|
||||
|
||||
# Convert messages to dict format
|
||||
messages_dict = []
|
||||
for msg in messages:
|
||||
@@ -1137,6 +1153,8 @@ async def _yield_tool_call(
|
||||
KeyError: If expected tool call fields are missing
|
||||
TypeError: If tool call structure is invalid
|
||||
"""
|
||||
import uuid as uuid_module
|
||||
|
||||
tool_name = tool_calls[yield_idx]["function"]["name"]
|
||||
tool_call_id = tool_calls[yield_idx]["id"]
|
||||
|
||||
@@ -1757,6 +1775,8 @@ async def _generate_llm_continuation_with_streaming(
|
||||
after a tool result is saved. Chunks are published to the stream registry
|
||||
so reconnecting clients can receive them.
|
||||
"""
|
||||
import uuid as uuid_module
|
||||
|
||||
try:
|
||||
# Load fresh session from DB (bypass cache to get the updated tool result)
|
||||
await invalidate_session_cache(session_id)
|
||||
@@ -1792,6 +1812,10 @@ async def _generate_llm_continuation_with_streaming(
|
||||
extra_body["session_id"] = session_id[:128]
|
||||
|
||||
# Make streaming LLM call (no tools - just text response)
|
||||
from typing import cast
|
||||
|
||||
from openai.types.chat import ChatCompletionMessageParam
|
||||
|
||||
# Generate unique IDs for AI SDK protocol
|
||||
message_id = str(uuid_module.uuid4())
|
||||
text_block_id = str(uuid_module.uuid4())
|
||||
|
||||
@@ -7,15 +7,7 @@ from typing import Any, NotRequired, TypedDict
|
||||
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.data.graph import (
|
||||
Graph,
|
||||
Link,
|
||||
Node,
|
||||
create_graph,
|
||||
get_graph,
|
||||
get_graph_all_versions,
|
||||
get_store_listed_graphs,
|
||||
)
|
||||
from backend.data.graph import Graph, Link, Node, get_graph, get_store_listed_graphs
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
|
||||
from .service import (
|
||||
@@ -28,8 +20,6 @@ from .service import (
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
AGENT_EXECUTOR_BLOCK_ID = "e189baac-8c20-45a1-94a7-55177ea42565"
|
||||
|
||||
|
||||
class ExecutionSummary(TypedDict):
|
||||
"""Summary of a single execution for quality assessment."""
|
||||
@@ -669,45 +659,6 @@ def json_to_graph(agent_json: dict[str, Any]) -> Graph:
|
||||
)
|
||||
|
||||
|
||||
def _reassign_node_ids(graph: Graph) -> None:
|
||||
"""Reassign all node and link IDs to new UUIDs.
|
||||
|
||||
This is needed when creating a new version to avoid unique constraint violations.
|
||||
"""
|
||||
id_map = {node.id: str(uuid.uuid4()) for node in graph.nodes}
|
||||
|
||||
for node in graph.nodes:
|
||||
node.id = id_map[node.id]
|
||||
|
||||
for link in graph.links:
|
||||
link.id = str(uuid.uuid4())
|
||||
if link.source_id in id_map:
|
||||
link.source_id = id_map[link.source_id]
|
||||
if link.sink_id in id_map:
|
||||
link.sink_id = id_map[link.sink_id]
|
||||
|
||||
|
||||
def _populate_agent_executor_user_ids(agent_json: dict[str, Any], user_id: str) -> None:
|
||||
"""Populate user_id in AgentExecutorBlock nodes.
|
||||
|
||||
The external agent generator creates AgentExecutorBlock nodes with empty user_id.
|
||||
This function fills in the actual user_id so sub-agents run with correct permissions.
|
||||
|
||||
Args:
|
||||
agent_json: Agent JSON dict (modified in place)
|
||||
user_id: User ID to set
|
||||
"""
|
||||
for node in agent_json.get("nodes", []):
|
||||
if node.get("block_id") == AGENT_EXECUTOR_BLOCK_ID:
|
||||
input_default = node.get("input_default") or {}
|
||||
if not input_default.get("user_id"):
|
||||
input_default["user_id"] = user_id
|
||||
node["input_default"] = input_default
|
||||
logger.debug(
|
||||
f"Set user_id for AgentExecutorBlock node {node.get('id')}"
|
||||
)
|
||||
|
||||
|
||||
async def save_agent_to_library(
|
||||
agent_json: dict[str, Any], user_id: str, is_update: bool = False
|
||||
) -> tuple[Graph, Any]:
|
||||
@@ -721,35 +672,10 @@ async def save_agent_to_library(
|
||||
Returns:
|
||||
Tuple of (created Graph, LibraryAgent)
|
||||
"""
|
||||
# Populate user_id in AgentExecutorBlock nodes before conversion
|
||||
_populate_agent_executor_user_ids(agent_json, user_id)
|
||||
|
||||
graph = json_to_graph(agent_json)
|
||||
|
||||
if is_update:
|
||||
if graph.id:
|
||||
existing_versions = await get_graph_all_versions(graph.id, user_id)
|
||||
if existing_versions:
|
||||
latest_version = max(v.version for v in existing_versions)
|
||||
graph.version = latest_version + 1
|
||||
_reassign_node_ids(graph)
|
||||
logger.info(f"Updating agent {graph.id} to version {graph.version}")
|
||||
else:
|
||||
graph.id = str(uuid.uuid4())
|
||||
graph.version = 1
|
||||
_reassign_node_ids(graph)
|
||||
logger.info(f"Creating new agent with ID {graph.id}")
|
||||
|
||||
created_graph = await create_graph(graph, user_id)
|
||||
|
||||
library_agents = await library_db.create_library_agent(
|
||||
graph=created_graph,
|
||||
user_id=user_id,
|
||||
sensitive_action_safe_mode=True,
|
||||
create_library_agents_for_sub_graphs=False,
|
||||
)
|
||||
|
||||
return created_graph, library_agents[0]
|
||||
return await library_db.update_graph_in_library(graph, user_id)
|
||||
return await library_db.create_graph_in_library(graph, user_id)
|
||||
|
||||
|
||||
def graph_to_json(graph: Graph) -> dict[str, Any]:
|
||||
|
||||
@@ -206,9 +206,9 @@ async def search_agents(
|
||||
]
|
||||
)
|
||||
no_results_msg = (
|
||||
f"No agents found matching '{query}'. Try different keywords or browse the marketplace."
|
||||
f"No agents found matching '{query}'. Let the user know they can try different keywords or browse the marketplace. Also let them know you can create a custom agent for them based on their needs."
|
||||
if source == "marketplace"
|
||||
else f"No agents matching '{query}' found in your library."
|
||||
else f"No agents matching '{query}' found in your library. Let the user know you can create a custom agent for them based on their needs."
|
||||
)
|
||||
return NoResultsResponse(
|
||||
message=no_results_msg, session_id=session_id, suggestions=suggestions
|
||||
@@ -224,10 +224,10 @@ async def search_agents(
|
||||
message = (
|
||||
"Now you have found some options for the user to choose from. "
|
||||
"You can add a link to a recommended agent at: /marketplace/agent/agent_id "
|
||||
"Please ask the user if they would like to use any of these agents."
|
||||
"Please ask the user if they would like to use any of these agents. Let the user know we can create a custom agent for them based on their needs."
|
||||
if source == "marketplace"
|
||||
else "Found agents in the user's library. You can provide a link to view an agent at: "
|
||||
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute."
|
||||
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute. Let the user know we can create a custom agent for them based on their needs."
|
||||
)
|
||||
|
||||
return AgentsFoundResponse(
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
|
||||
from .agent_generator import (
|
||||
@@ -30,26 +28,6 @@ from .models import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CreateAgentInput(BaseModel):
|
||||
"""Input parameters for the create_agent tool."""
|
||||
|
||||
description: str = ""
|
||||
context: str = ""
|
||||
save: bool = True
|
||||
# Internal async processing params (passed by long-running tool handler)
|
||||
_operation_id: str | None = None
|
||||
_task_id: str | None = None
|
||||
|
||||
@field_validator("description", "context", mode="before")
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> str:
|
||||
"""Strip whitespace from string fields."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
class Config:
|
||||
extra = "allow" # Allow _operation_id, _task_id from kwargs
|
||||
|
||||
|
||||
class CreateAgentTool(BaseTool):
|
||||
"""Tool for creating agents from natural language descriptions."""
|
||||
|
||||
@@ -107,7 +85,7 @@ class CreateAgentTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute the create_agent tool.
|
||||
|
||||
@@ -116,14 +94,16 @@ class CreateAgentTool(BaseTool):
|
||||
2. Generate agent JSON (external service handles fixing and validation)
|
||||
3. Preview or save based on the save parameter
|
||||
"""
|
||||
params = CreateAgentInput(**kwargs)
|
||||
description = kwargs.get("description", "").strip()
|
||||
context = kwargs.get("context", "")
|
||||
save = kwargs.get("save", True)
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
# Extract async processing params
|
||||
# Extract async processing params (passed by long-running tool handler)
|
||||
operation_id = kwargs.get("_operation_id")
|
||||
task_id = kwargs.get("_task_id")
|
||||
|
||||
if not params.description:
|
||||
if not description:
|
||||
return ErrorResponse(
|
||||
message="Please provide a description of what the agent should do.",
|
||||
error="Missing description parameter",
|
||||
@@ -135,7 +115,7 @@ class CreateAgentTool(BaseTool):
|
||||
try:
|
||||
library_agents = await get_all_relevant_agents_for_generation(
|
||||
user_id=user_id,
|
||||
search_query=params.description,
|
||||
search_query=description,
|
||||
include_marketplace=True,
|
||||
)
|
||||
logger.debug(
|
||||
@@ -146,7 +126,7 @@ class CreateAgentTool(BaseTool):
|
||||
|
||||
try:
|
||||
decomposition_result = await decompose_goal(
|
||||
params.description, params.context, library_agents
|
||||
description, context, library_agents
|
||||
)
|
||||
except AgentGeneratorNotConfiguredError:
|
||||
return ErrorResponse(
|
||||
@@ -162,7 +142,7 @@ class CreateAgentTool(BaseTool):
|
||||
return ErrorResponse(
|
||||
message="Failed to analyze the goal. The agent generation service may be unavailable. Please try again.",
|
||||
error="decomposition_failed",
|
||||
details={"description": params.description[:100]},
|
||||
details={"description": description[:100]},
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
@@ -178,7 +158,7 @@ class CreateAgentTool(BaseTool):
|
||||
message=user_message,
|
||||
error=f"decomposition_failed:{error_type}",
|
||||
details={
|
||||
"description": params.description[:100],
|
||||
"description": description[:100],
|
||||
"service_error": error_msg,
|
||||
"error_type": error_type,
|
||||
},
|
||||
@@ -264,7 +244,7 @@ class CreateAgentTool(BaseTool):
|
||||
return ErrorResponse(
|
||||
message="Failed to generate the agent. The agent generation service may be unavailable. Please try again.",
|
||||
error="generation_failed",
|
||||
details={"description": params.description[:100]},
|
||||
details={"description": description[:100]},
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
@@ -286,7 +266,7 @@ class CreateAgentTool(BaseTool):
|
||||
message=user_message,
|
||||
error=f"generation_failed:{error_type}",
|
||||
details={
|
||||
"description": params.description[:100],
|
||||
"description": description[:100],
|
||||
"service_error": error_msg,
|
||||
"error_type": error_type,
|
||||
},
|
||||
@@ -311,7 +291,7 @@ class CreateAgentTool(BaseTool):
|
||||
node_count = len(agent_json.get("nodes", []))
|
||||
link_count = len(agent_json.get("links", []))
|
||||
|
||||
if not params.save:
|
||||
if not save:
|
||||
return AgentPreviewResponse(
|
||||
message=(
|
||||
f"I've generated an agent called '{agent_name}' with {node_count} blocks. "
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.api.features.store.exceptions import AgentNotFoundError
|
||||
@@ -29,23 +27,6 @@ from .models import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CustomizeAgentInput(BaseModel):
|
||||
"""Input parameters for the customize_agent tool."""
|
||||
|
||||
agent_id: str = ""
|
||||
modifications: str = ""
|
||||
context: str = ""
|
||||
save: bool = True
|
||||
|
||||
@field_validator("agent_id", "modifications", "context", mode="before")
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> str:
|
||||
"""Strip whitespace from string fields."""
|
||||
if isinstance(v, str):
|
||||
return v.strip()
|
||||
return v if v is not None else ""
|
||||
|
||||
|
||||
class CustomizeAgentTool(BaseTool):
|
||||
"""Tool for customizing marketplace/template agents using natural language."""
|
||||
|
||||
@@ -111,7 +92,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute the customize_agent tool.
|
||||
|
||||
@@ -121,17 +102,20 @@ class CustomizeAgentTool(BaseTool):
|
||||
3. Call customize_template with the modification request
|
||||
4. Preview or save based on the save parameter
|
||||
"""
|
||||
params = CustomizeAgentInput(**kwargs)
|
||||
agent_id = kwargs.get("agent_id", "").strip()
|
||||
modifications = kwargs.get("modifications", "").strip()
|
||||
context = kwargs.get("context", "")
|
||||
save = kwargs.get("save", True)
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not params.agent_id:
|
||||
if not agent_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide the marketplace agent ID (e.g., 'creator/agent-name').",
|
||||
error="missing_agent_id",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.modifications:
|
||||
if not modifications:
|
||||
return ErrorResponse(
|
||||
message="Please describe how you want to customize this agent.",
|
||||
error="missing_modifications",
|
||||
@@ -139,11 +123,11 @@ class CustomizeAgentTool(BaseTool):
|
||||
)
|
||||
|
||||
# Parse agent_id in format "creator/slug"
|
||||
parts = [p.strip() for p in params.agent_id.split("/")]
|
||||
parts = [p.strip() for p in agent_id.split("/")]
|
||||
if len(parts) != 2 or not parts[0] or not parts[1]:
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"Invalid agent ID format: '{params.agent_id}'. "
|
||||
f"Invalid agent ID format: '{agent_id}'. "
|
||||
"Expected format is 'creator/agent-name' "
|
||||
"(e.g., 'autogpt/newsletter-writer')."
|
||||
),
|
||||
@@ -161,14 +145,14 @@ class CustomizeAgentTool(BaseTool):
|
||||
except AgentNotFoundError:
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"Could not find marketplace agent '{params.agent_id}'. "
|
||||
f"Could not find marketplace agent '{agent_id}'. "
|
||||
"Please check the agent ID and try again."
|
||||
),
|
||||
error="agent_not_found",
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching marketplace agent {params.agent_id}: {e}")
|
||||
logger.error(f"Error fetching marketplace agent {agent_id}: {e}")
|
||||
return ErrorResponse(
|
||||
message="Failed to fetch the marketplace agent. Please try again.",
|
||||
error="fetch_error",
|
||||
@@ -178,7 +162,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
if not agent_details.store_listing_version_id:
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"The agent '{params.agent_id}' does not have an available version. "
|
||||
f"The agent '{agent_id}' does not have an available version. "
|
||||
"Please try a different agent."
|
||||
),
|
||||
error="no_version_available",
|
||||
@@ -190,7 +174,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
graph = await store_db.get_agent(agent_details.store_listing_version_id)
|
||||
template_agent = graph_to_json(graph)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching agent graph for {params.agent_id}: {e}")
|
||||
logger.error(f"Error fetching agent graph for {agent_id}: {e}")
|
||||
return ErrorResponse(
|
||||
message="Failed to fetch the agent configuration. Please try again.",
|
||||
error="graph_fetch_error",
|
||||
@@ -201,8 +185,8 @@ class CustomizeAgentTool(BaseTool):
|
||||
try:
|
||||
result = await customize_template(
|
||||
template_agent=template_agent,
|
||||
modification_request=params.modifications,
|
||||
context=params.context,
|
||||
modification_request=modifications,
|
||||
context=context,
|
||||
)
|
||||
except AgentGeneratorNotConfiguredError:
|
||||
return ErrorResponse(
|
||||
@@ -214,7 +198,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error calling customize_template for {params.agent_id}: {e}")
|
||||
logger.error(f"Error calling customize_template for {agent_id}: {e}")
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
"Failed to customize the agent due to a service error. "
|
||||
@@ -235,25 +219,55 @@ class CustomizeAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Handle response using match/case for cleaner pattern matching
|
||||
return await self._handle_customization_result(
|
||||
result=result,
|
||||
params=params,
|
||||
agent_details=agent_details,
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
)
|
||||
# Handle error response
|
||||
if isinstance(result, dict) and result.get("type") == "error":
|
||||
error_msg = result.get("error", "Unknown error")
|
||||
error_type = result.get("error_type", "unknown")
|
||||
user_message = get_user_message_for_error(
|
||||
error_type,
|
||||
operation="customize the agent",
|
||||
llm_parse_message=(
|
||||
"The AI had trouble customizing the agent. "
|
||||
"Please try again or simplify your request."
|
||||
),
|
||||
validation_message=(
|
||||
"The customized agent failed validation. "
|
||||
"Please try rephrasing your request."
|
||||
),
|
||||
error_details=error_msg,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=user_message,
|
||||
error=f"customization_failed:{error_type}",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
async def _handle_customization_result(
|
||||
self,
|
||||
result: dict[str, Any],
|
||||
params: CustomizeAgentInput,
|
||||
agent_details: Any,
|
||||
user_id: str | None,
|
||||
session_id: str | None,
|
||||
) -> ToolResponseBase:
|
||||
"""Handle the result from customize_template using pattern matching."""
|
||||
# Ensure result is a dict
|
||||
# Handle clarifying questions
|
||||
if isinstance(result, dict) and result.get("type") == "clarifying_questions":
|
||||
questions = result.get("questions") or []
|
||||
if not isinstance(questions, list):
|
||||
logger.error(
|
||||
f"Unexpected clarifying questions format: {type(questions)}"
|
||||
)
|
||||
questions = []
|
||||
return ClarificationNeededResponse(
|
||||
message=(
|
||||
"I need some more information to customize this agent. "
|
||||
"Please answer the following questions:"
|
||||
),
|
||||
questions=[
|
||||
ClarifyingQuestion(
|
||||
question=q.get("question", ""),
|
||||
keyword=q.get("keyword", ""),
|
||||
example=q.get("example"),
|
||||
)
|
||||
for q in questions
|
||||
if isinstance(q, dict)
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Result should be the customized agent JSON
|
||||
if not isinstance(result, dict):
|
||||
logger.error(f"Unexpected customize_template response type: {type(result)}")
|
||||
return ErrorResponse(
|
||||
@@ -262,77 +276,8 @@ class CustomizeAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
result_type = result.get("type")
|
||||
customized_agent = result
|
||||
|
||||
match result_type:
|
||||
case "error":
|
||||
error_msg = result.get("error", "Unknown error")
|
||||
error_type = result.get("error_type", "unknown")
|
||||
user_message = get_user_message_for_error(
|
||||
error_type,
|
||||
operation="customize the agent",
|
||||
llm_parse_message=(
|
||||
"The AI had trouble customizing the agent. "
|
||||
"Please try again or simplify your request."
|
||||
),
|
||||
validation_message=(
|
||||
"The customized agent failed validation. "
|
||||
"Please try rephrasing your request."
|
||||
),
|
||||
error_details=error_msg,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=user_message,
|
||||
error=f"customization_failed:{error_type}",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
case "clarifying_questions":
|
||||
questions_data = result.get("questions") or []
|
||||
if not isinstance(questions_data, list):
|
||||
logger.error(
|
||||
f"Unexpected clarifying questions format: {type(questions_data)}"
|
||||
)
|
||||
questions_data = []
|
||||
|
||||
questions = [
|
||||
ClarifyingQuestion(
|
||||
question=q.get("question", "") if isinstance(q, dict) else "",
|
||||
keyword=q.get("keyword", "") if isinstance(q, dict) else "",
|
||||
example=q.get("example") if isinstance(q, dict) else None,
|
||||
)
|
||||
for q in questions_data
|
||||
if isinstance(q, dict)
|
||||
]
|
||||
|
||||
return ClarificationNeededResponse(
|
||||
message=(
|
||||
"I need some more information to customize this agent. "
|
||||
"Please answer the following questions:"
|
||||
),
|
||||
questions=questions,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
case _:
|
||||
# Default case: result is the customized agent JSON
|
||||
return await self._save_or_preview_agent(
|
||||
customized_agent=result,
|
||||
params=params,
|
||||
agent_details=agent_details,
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
async def _save_or_preview_agent(
|
||||
self,
|
||||
customized_agent: dict[str, Any],
|
||||
params: CustomizeAgentInput,
|
||||
agent_details: Any,
|
||||
user_id: str | None,
|
||||
session_id: str | None,
|
||||
) -> ToolResponseBase:
|
||||
"""Save or preview the customized agent based on params.save."""
|
||||
agent_name = customized_agent.get(
|
||||
"name", f"Customized {agent_details.agent_name}"
|
||||
)
|
||||
@@ -342,7 +287,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
node_count = len(nodes) if isinstance(nodes, list) else 0
|
||||
link_count = len(links) if isinstance(links, list) else 0
|
||||
|
||||
if not params.save:
|
||||
if not save:
|
||||
return AgentPreviewResponse(
|
||||
message=(
|
||||
f"I've customized the agent '{agent_details.agent_name}'. "
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
|
||||
from .agent_generator import (
|
||||
@@ -29,20 +27,6 @@ from .models import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EditAgentInput(BaseModel):
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
||||
agent_id: str = ""
|
||||
changes: str = ""
|
||||
context: str = ""
|
||||
save: bool = True
|
||||
|
||||
@field_validator("agent_id", "changes", "context", mode="before")
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> str:
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class EditAgentTool(BaseTool):
|
||||
"""Tool for editing existing agents using natural language."""
|
||||
|
||||
@@ -106,7 +90,7 @@ class EditAgentTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute the edit_agent tool.
|
||||
|
||||
@@ -115,32 +99,35 @@ class EditAgentTool(BaseTool):
|
||||
2. Generate updated agent (external service handles fixing and validation)
|
||||
3. Preview or save based on the save parameter
|
||||
"""
|
||||
params = EditAgentInput(**kwargs)
|
||||
agent_id = kwargs.get("agent_id", "").strip()
|
||||
changes = kwargs.get("changes", "").strip()
|
||||
context = kwargs.get("context", "")
|
||||
save = kwargs.get("save", True)
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
# Extract async processing params (passed by long-running tool handler)
|
||||
operation_id = kwargs.get("_operation_id")
|
||||
task_id = kwargs.get("_task_id")
|
||||
|
||||
if not params.agent_id:
|
||||
if not agent_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide the agent ID to edit.",
|
||||
error="Missing agent_id parameter",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.changes:
|
||||
if not changes:
|
||||
return ErrorResponse(
|
||||
message="Please describe what changes you want to make.",
|
||||
error="Missing changes parameter",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
current_agent = await get_agent_as_json(params.agent_id, user_id)
|
||||
current_agent = await get_agent_as_json(agent_id, user_id)
|
||||
|
||||
if current_agent is None:
|
||||
return ErrorResponse(
|
||||
message=f"Could not find agent '{params.agent_id}' in your library.",
|
||||
message=f"Could not find agent with ID '{agent_id}' in your library.",
|
||||
error="agent_not_found",
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -151,7 +138,7 @@ class EditAgentTool(BaseTool):
|
||||
graph_id = current_agent.get("id")
|
||||
library_agents = await get_all_relevant_agents_for_generation(
|
||||
user_id=user_id,
|
||||
search_query=params.changes,
|
||||
search_query=changes,
|
||||
exclude_graph_id=graph_id,
|
||||
include_marketplace=True,
|
||||
)
|
||||
@@ -161,11 +148,9 @@ class EditAgentTool(BaseTool):
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch library agents: {e}")
|
||||
|
||||
update_request = params.changes
|
||||
if params.context:
|
||||
update_request = (
|
||||
f"{params.changes}\n\nAdditional context:\n{params.context}"
|
||||
)
|
||||
update_request = changes
|
||||
if context:
|
||||
update_request = f"{changes}\n\nAdditional context:\n{context}"
|
||||
|
||||
try:
|
||||
result = await generate_agent_patch(
|
||||
@@ -189,7 +174,7 @@ class EditAgentTool(BaseTool):
|
||||
return ErrorResponse(
|
||||
message="Failed to generate changes. The agent generation service may be unavailable or timed out. Please try again.",
|
||||
error="update_generation_failed",
|
||||
details={"agent_id": params.agent_id, "changes": params.changes[:100]},
|
||||
details={"agent_id": agent_id, "changes": changes[:100]},
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
@@ -221,8 +206,8 @@ class EditAgentTool(BaseTool):
|
||||
message=user_message,
|
||||
error=f"update_generation_failed:{error_type}",
|
||||
details={
|
||||
"agent_id": params.agent_id,
|
||||
"changes": params.changes[:100],
|
||||
"agent_id": agent_id,
|
||||
"changes": changes[:100],
|
||||
"service_error": error_msg,
|
||||
"error_type": error_type,
|
||||
},
|
||||
@@ -254,7 +239,7 @@ class EditAgentTool(BaseTool):
|
||||
node_count = len(updated_agent.get("nodes", []))
|
||||
link_count = len(updated_agent.get("links", []))
|
||||
|
||||
if not params.save:
|
||||
if not save:
|
||||
return AgentPreviewResponse(
|
||||
message=(
|
||||
f"I've updated the agent. "
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
|
||||
from .agent_search import search_agents
|
||||
@@ -11,18 +9,6 @@ from .base import BaseTool
|
||||
from .models import ToolResponseBase
|
||||
|
||||
|
||||
class FindAgentInput(BaseModel):
|
||||
"""Input parameters for the find_agent tool."""
|
||||
|
||||
query: str = ""
|
||||
|
||||
@field_validator("query", mode="before")
|
||||
@classmethod
|
||||
def strip_string(cls, v: Any) -> str:
|
||||
"""Strip whitespace from query."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class FindAgentTool(BaseTool):
|
||||
"""Tool for discovering agents from the marketplace."""
|
||||
|
||||
@@ -50,11 +36,10 @@ class FindAgentTool(BaseTool):
|
||||
}
|
||||
|
||||
async def _execute(
|
||||
self, user_id: str | None, session: ChatSession, **kwargs: Any
|
||||
self, user_id: str | None, session: ChatSession, **kwargs
|
||||
) -> ToolResponseBase:
|
||||
params = FindAgentInput(**kwargs)
|
||||
return await search_agents(
|
||||
query=params.query,
|
||||
query=kwargs.get("query", "").strip(),
|
||||
source="marketplace",
|
||||
session_id=session.session_id,
|
||||
user_id=user_id,
|
||||
|
||||
@@ -2,7 +2,6 @@ import logging
|
||||
from typing import Any
|
||||
|
||||
from prisma.enums import ContentType
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
||||
@@ -19,18 +18,6 @@ from backend.data.block import get_block
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FindBlockInput(BaseModel):
|
||||
"""Input parameters for the find_block tool."""
|
||||
|
||||
query: str = ""
|
||||
|
||||
@field_validator("query", mode="before")
|
||||
@classmethod
|
||||
def strip_string(cls, v: Any) -> str:
|
||||
"""Strip whitespace from query."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class FindBlockTool(BaseTool):
|
||||
"""Tool for searching available blocks."""
|
||||
|
||||
@@ -72,24 +59,24 @@ class FindBlockTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Search for blocks matching the query.
|
||||
|
||||
Args:
|
||||
user_id: User ID (required)
|
||||
session: Chat session
|
||||
**kwargs: Tool parameters
|
||||
query: Search query
|
||||
|
||||
Returns:
|
||||
BlockListResponse: List of matching blocks
|
||||
NoResultsResponse: No blocks found
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
params = FindBlockInput(**kwargs)
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id
|
||||
|
||||
if not params.query:
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query",
|
||||
session_id=session_id,
|
||||
@@ -98,7 +85,7 @@ class FindBlockTool(BaseTool):
|
||||
try:
|
||||
# Search for blocks using hybrid search
|
||||
results, total = await unified_hybrid_search(
|
||||
query=params.query,
|
||||
query=query,
|
||||
content_types=[ContentType.BLOCK],
|
||||
page=1,
|
||||
page_size=10,
|
||||
@@ -106,7 +93,7 @@ class FindBlockTool(BaseTool):
|
||||
|
||||
if not results:
|
||||
return NoResultsResponse(
|
||||
message=f"No blocks found for '{params.query}'",
|
||||
message=f"No blocks found for '{query}'",
|
||||
suggestions=[
|
||||
"Try broader keywords like 'email', 'http', 'text', 'ai'",
|
||||
"Check spelling of technical terms",
|
||||
@@ -178,7 +165,7 @@ class FindBlockTool(BaseTool):
|
||||
|
||||
if not blocks:
|
||||
return NoResultsResponse(
|
||||
message=f"No blocks found for '{params.query}'",
|
||||
message=f"No blocks found for '{query}'",
|
||||
suggestions=[
|
||||
"Try broader keywords like 'email', 'http', 'text', 'ai'",
|
||||
],
|
||||
@@ -187,13 +174,13 @@ class FindBlockTool(BaseTool):
|
||||
|
||||
return BlockListResponse(
|
||||
message=(
|
||||
f"Found {len(blocks)} block(s) matching '{params.query}'. "
|
||||
f"Found {len(blocks)} block(s) matching '{query}'. "
|
||||
"To execute a block, use run_block with the block's 'id' field "
|
||||
"and provide 'input_data' matching the block's input_schema."
|
||||
),
|
||||
blocks=blocks,
|
||||
count=len(blocks),
|
||||
query=params.query,
|
||||
query=query,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
|
||||
from .agent_search import search_agents
|
||||
@@ -11,15 +9,6 @@ from .base import BaseTool
|
||||
from .models import ToolResponseBase
|
||||
|
||||
|
||||
class FindLibraryAgentInput(BaseModel):
|
||||
query: str = ""
|
||||
|
||||
@field_validator("query", mode="before")
|
||||
@classmethod
|
||||
def strip_string(cls, v: Any) -> str:
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class FindLibraryAgentTool(BaseTool):
|
||||
"""Tool for searching agents in the user's library."""
|
||||
|
||||
@@ -53,11 +42,10 @@ class FindLibraryAgentTool(BaseTool):
|
||||
return True
|
||||
|
||||
async def _execute(
|
||||
self, user_id: str | None, session: ChatSession, **kwargs: Any
|
||||
self, user_id: str | None, session: ChatSession, **kwargs
|
||||
) -> ToolResponseBase:
|
||||
params = FindLibraryAgentInput(**kwargs)
|
||||
return await search_agents(
|
||||
query=params.query,
|
||||
query=kwargs.get("query", "").strip(),
|
||||
source="library",
|
||||
session_id=session.session_id,
|
||||
user_id=user_id,
|
||||
|
||||
@@ -4,8 +4,6 @@ import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
from backend.api.features.chat.tools.models import (
|
||||
@@ -20,18 +18,6 @@ logger = logging.getLogger(__name__)
|
||||
DOCS_BASE_URL = "https://docs.agpt.co"
|
||||
|
||||
|
||||
class GetDocPageInput(BaseModel):
|
||||
"""Input parameters for the get_doc_page tool."""
|
||||
|
||||
path: str = ""
|
||||
|
||||
@field_validator("path", mode="before")
|
||||
@classmethod
|
||||
def strip_string(cls, v: Any) -> str:
|
||||
"""Strip whitespace from path."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class GetDocPageTool(BaseTool):
|
||||
"""Tool for fetching full content of a documentation page."""
|
||||
|
||||
@@ -89,23 +75,23 @@ class GetDocPageTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Fetch full content of a documentation page.
|
||||
|
||||
Args:
|
||||
user_id: User ID (not required for docs)
|
||||
session: Chat session
|
||||
**kwargs: Tool parameters
|
||||
path: Path to the documentation file
|
||||
|
||||
Returns:
|
||||
DocPageResponse: Full document content
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
params = GetDocPageInput(**kwargs)
|
||||
path = kwargs.get("path", "").strip()
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not params.path:
|
||||
if not path:
|
||||
return ErrorResponse(
|
||||
message="Please provide a documentation path.",
|
||||
error="Missing path parameter",
|
||||
@@ -113,7 +99,7 @@ class GetDocPageTool(BaseTool):
|
||||
)
|
||||
|
||||
# Sanitize path to prevent directory traversal
|
||||
if ".." in params.path or params.path.startswith("/"):
|
||||
if ".." in path or path.startswith("/"):
|
||||
return ErrorResponse(
|
||||
message="Invalid documentation path.",
|
||||
error="invalid_path",
|
||||
@@ -121,11 +107,11 @@ class GetDocPageTool(BaseTool):
|
||||
)
|
||||
|
||||
docs_root = self._get_docs_root()
|
||||
full_path = docs_root / params.path
|
||||
full_path = docs_root / path
|
||||
|
||||
if not full_path.exists():
|
||||
return ErrorResponse(
|
||||
message=f"Documentation page not found: {params.path}",
|
||||
message=f"Documentation page not found: {path}",
|
||||
error="not_found",
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -142,19 +128,19 @@ class GetDocPageTool(BaseTool):
|
||||
|
||||
try:
|
||||
content = full_path.read_text(encoding="utf-8")
|
||||
title = self._extract_title(content, params.path)
|
||||
title = self._extract_title(content, path)
|
||||
|
||||
return DocPageResponse(
|
||||
message=f"Retrieved documentation page: {title}",
|
||||
title=title,
|
||||
path=params.path,
|
||||
path=path,
|
||||
content=content,
|
||||
doc_url=self._make_doc_url(params.path),
|
||||
doc_url=self._make_doc_url(path),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to read documentation page {params.path}: {e}")
|
||||
logger.error(f"Failed to read documentation page {path}: {e}")
|
||||
return ErrorResponse(
|
||||
message=f"Failed to read documentation page: {str(e)}",
|
||||
error="read_failed",
|
||||
|
||||
@@ -5,7 +5,6 @@ import uuid
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic_core import PydanticUndefined
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
@@ -30,25 +29,6 @@ from .utils import build_missing_credentials_from_field_info
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RunBlockInput(BaseModel):
|
||||
"""Input parameters for the run_block tool."""
|
||||
|
||||
block_id: str = ""
|
||||
input_data: dict[str, Any] = {}
|
||||
|
||||
@field_validator("block_id", mode="before")
|
||||
@classmethod
|
||||
def strip_block_id(cls, v: Any) -> str:
|
||||
"""Strip whitespace from block_id."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
@field_validator("input_data", mode="before")
|
||||
@classmethod
|
||||
def ensure_dict(cls, v: Any) -> dict[str, Any]:
|
||||
"""Ensure input_data is a dict."""
|
||||
return v if isinstance(v, dict) else {}
|
||||
|
||||
|
||||
class RunBlockTool(BaseTool):
|
||||
"""Tool for executing a block and returning its outputs."""
|
||||
|
||||
@@ -182,29 +162,37 @@ class RunBlockTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute a block with the given input data.
|
||||
|
||||
Args:
|
||||
user_id: User ID (required)
|
||||
session: Chat session
|
||||
**kwargs: Tool parameters
|
||||
block_id: Block UUID to execute
|
||||
input_data: Input values for the block
|
||||
|
||||
Returns:
|
||||
BlockOutputResponse: Block execution outputs
|
||||
SetupRequirementsResponse: Missing credentials
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
params = RunBlockInput(**kwargs)
|
||||
block_id = kwargs.get("block_id", "").strip()
|
||||
input_data = kwargs.get("input_data", {})
|
||||
session_id = session.session_id
|
||||
|
||||
if not params.block_id:
|
||||
if not block_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide a block_id",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not isinstance(input_data, dict):
|
||||
return ErrorResponse(
|
||||
message="input_data must be an object",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="Authentication required",
|
||||
@@ -212,25 +200,23 @@ class RunBlockTool(BaseTool):
|
||||
)
|
||||
|
||||
# Get the block
|
||||
block = get_block(params.block_id)
|
||||
block = get_block(block_id)
|
||||
if not block:
|
||||
return ErrorResponse(
|
||||
message=f"Block '{params.block_id}' not found",
|
||||
message=f"Block '{block_id}' not found",
|
||||
session_id=session_id,
|
||||
)
|
||||
if block.disabled:
|
||||
return ErrorResponse(
|
||||
message=f"Block '{params.block_id}' is disabled",
|
||||
message=f"Block '{block_id}' is disabled",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Executing block {block.name} ({params.block_id}) for user {user_id}"
|
||||
)
|
||||
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
|
||||
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
matched_credentials, missing_credentials = await self._check_block_credentials(
|
||||
user_id, block, params.input_data
|
||||
user_id, block, input_data
|
||||
)
|
||||
|
||||
if missing_credentials:
|
||||
@@ -248,7 +234,7 @@ class RunBlockTool(BaseTool):
|
||||
),
|
||||
session_id=session_id,
|
||||
setup_info=SetupInfo(
|
||||
agent_id=params.block_id,
|
||||
agent_id=block_id,
|
||||
agent_name=block.name,
|
||||
user_readiness=UserReadiness(
|
||||
has_all_credentials=False,
|
||||
@@ -277,7 +263,7 @@ class RunBlockTool(BaseTool):
|
||||
# - node_exec_id = unique per block execution
|
||||
synthetic_graph_id = f"copilot-session-{session.session_id}"
|
||||
synthetic_graph_exec_id = f"copilot-session-{session.session_id}"
|
||||
synthetic_node_id = f"copilot-node-{params.block_id}"
|
||||
synthetic_node_id = f"copilot-node-{block_id}"
|
||||
synthetic_node_exec_id = (
|
||||
f"copilot-{session.session_id}-{uuid.uuid4().hex[:8]}"
|
||||
)
|
||||
@@ -312,8 +298,8 @@ class RunBlockTool(BaseTool):
|
||||
|
||||
for field_name, cred_meta in matched_credentials.items():
|
||||
# Inject metadata into input_data (for validation)
|
||||
if field_name not in params.input_data:
|
||||
params.input_data[field_name] = cred_meta.model_dump()
|
||||
if field_name not in input_data:
|
||||
input_data[field_name] = cred_meta.model_dump()
|
||||
|
||||
# Fetch actual credentials and pass as kwargs (for execution)
|
||||
actual_credentials = await creds_manager.get(
|
||||
@@ -330,14 +316,14 @@ class RunBlockTool(BaseTool):
|
||||
# Execute the block and collect outputs
|
||||
outputs: dict[str, list[Any]] = defaultdict(list)
|
||||
async for output_name, output_data in block.execute(
|
||||
params.input_data,
|
||||
input_data,
|
||||
**exec_kwargs,
|
||||
):
|
||||
outputs[output_name].append(output_data)
|
||||
|
||||
return BlockOutputResponse(
|
||||
message=f"Block '{block.name}' executed successfully",
|
||||
block_id=params.block_id,
|
||||
block_id=block_id,
|
||||
block_name=block.name,
|
||||
outputs=dict(outputs),
|
||||
success=True,
|
||||
|
||||
@@ -4,7 +4,6 @@ import logging
|
||||
from typing import Any
|
||||
|
||||
from prisma.enums import ContentType
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
@@ -29,18 +28,6 @@ MAX_RESULTS = 5
|
||||
SNIPPET_LENGTH = 200
|
||||
|
||||
|
||||
class SearchDocsInput(BaseModel):
|
||||
"""Input parameters for the search_docs tool."""
|
||||
|
||||
query: str = ""
|
||||
|
||||
@field_validator("query", mode="before")
|
||||
@classmethod
|
||||
def strip_string(cls, v: Any) -> str:
|
||||
"""Strip whitespace from query."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class SearchDocsTool(BaseTool):
|
||||
"""Tool for searching AutoGPT platform documentation."""
|
||||
|
||||
@@ -104,24 +91,24 @@ class SearchDocsTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Search documentation and return relevant sections.
|
||||
|
||||
Args:
|
||||
user_id: User ID (not required for docs)
|
||||
session: Chat session
|
||||
**kwargs: Tool parameters
|
||||
query: Search query
|
||||
|
||||
Returns:
|
||||
DocSearchResultsResponse: List of matching documentation sections
|
||||
NoResultsResponse: No results found
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
params = SearchDocsInput(**kwargs)
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not params.query:
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query.",
|
||||
error="Missing query parameter",
|
||||
@@ -131,7 +118,7 @@ class SearchDocsTool(BaseTool):
|
||||
try:
|
||||
# Search using hybrid search for DOCUMENTATION content type only
|
||||
results, total = await unified_hybrid_search(
|
||||
query=params.query,
|
||||
query=query,
|
||||
content_types=[ContentType.DOCUMENTATION],
|
||||
page=1,
|
||||
page_size=MAX_RESULTS * 2, # Fetch extra for deduplication
|
||||
@@ -140,7 +127,7 @@ class SearchDocsTool(BaseTool):
|
||||
|
||||
if not results:
|
||||
return NoResultsResponse(
|
||||
message=f"No documentation found for '{params.query}'.",
|
||||
message=f"No documentation found for '{query}'.",
|
||||
suggestions=[
|
||||
"Try different keywords",
|
||||
"Use more general terms",
|
||||
@@ -175,7 +162,7 @@ class SearchDocsTool(BaseTool):
|
||||
|
||||
if not deduplicated:
|
||||
return NoResultsResponse(
|
||||
message=f"No documentation found for '{params.query}'.",
|
||||
message=f"No documentation found for '{query}'.",
|
||||
suggestions=[
|
||||
"Try different keywords",
|
||||
"Use more general terms",
|
||||
@@ -208,7 +195,7 @@ class SearchDocsTool(BaseTool):
|
||||
message=f"Found {len(doc_results)} relevant documentation sections.",
|
||||
results=doc_results,
|
||||
count=len(doc_results),
|
||||
query=params.query,
|
||||
query=query,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
@@ -8,7 +8,12 @@ from backend.api.features.library import model as library_model
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.data.model import Credentials, CredentialsFieldInfo, CredentialsMetaInput
|
||||
from backend.data.model import (
|
||||
CredentialsFieldInfo,
|
||||
CredentialsMetaInput,
|
||||
HostScopedCredentials,
|
||||
OAuth2Credentials,
|
||||
)
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
@@ -273,7 +278,14 @@ async def match_user_credentials_to_graph(
|
||||
for cred in available_creds
|
||||
if cred.provider in credential_requirements.provider
|
||||
and cred.type in credential_requirements.supported_types
|
||||
and _credential_has_required_scopes(cred, credential_requirements)
|
||||
and (
|
||||
cred.type != "oauth2"
|
||||
or _credential_has_required_scopes(cred, credential_requirements)
|
||||
)
|
||||
and (
|
||||
cred.type != "host_scoped"
|
||||
or _credential_is_for_host(cred, credential_requirements)
|
||||
)
|
||||
),
|
||||
None,
|
||||
)
|
||||
@@ -318,19 +330,10 @@ async def match_user_credentials_to_graph(
|
||||
|
||||
|
||||
def _credential_has_required_scopes(
|
||||
credential: Credentials,
|
||||
credential: OAuth2Credentials,
|
||||
requirements: CredentialsFieldInfo,
|
||||
) -> bool:
|
||||
"""
|
||||
Check if a credential has all the scopes required by the block.
|
||||
|
||||
For OAuth2 credentials, verifies that the credential's scopes are a superset
|
||||
of the required scopes. For other credential types, returns True (no scope check).
|
||||
"""
|
||||
# Only OAuth2 credentials have scopes to check
|
||||
if credential.type != "oauth2":
|
||||
return True
|
||||
|
||||
"""Check if an OAuth2 credential has all the scopes required by the input."""
|
||||
# If no scopes are required, any credential matches
|
||||
if not requirements.required_scopes:
|
||||
return True
|
||||
@@ -339,6 +342,22 @@ def _credential_has_required_scopes(
|
||||
return set(credential.scopes).issuperset(requirements.required_scopes)
|
||||
|
||||
|
||||
def _credential_is_for_host(
|
||||
credential: HostScopedCredentials,
|
||||
requirements: CredentialsFieldInfo,
|
||||
) -> bool:
|
||||
"""Check if a host-scoped credential matches the host required by the input."""
|
||||
# We need to know the host to match host-scoped credentials to.
|
||||
# Graph.aggregate_credentials_inputs() adds the node's set URL value (if any)
|
||||
# to discriminator_values. No discriminator_values -> no host to match against.
|
||||
if not requirements.discriminator_values:
|
||||
return True
|
||||
|
||||
# Check that credential host matches required host.
|
||||
# Host-scoped credential inputs are grouped by host, so any item from the set works.
|
||||
return credential.matches_url(list(requirements.discriminator_values)[0])
|
||||
|
||||
|
||||
async def check_user_has_required_credentials(
|
||||
user_id: str,
|
||||
required_credentials: list[CredentialsMetaInput],
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
import base64
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.data.workspace import get_or_create_workspace
|
||||
@@ -78,65 +78,6 @@ class WorkspaceDeleteResponse(ToolResponseBase):
|
||||
success: bool
|
||||
|
||||
|
||||
# Input models for workspace tools
|
||||
class ListWorkspaceFilesInput(BaseModel):
|
||||
"""Input parameters for list_workspace_files tool."""
|
||||
|
||||
path_prefix: str | None = None
|
||||
limit: int = 50
|
||||
include_all_sessions: bool = False
|
||||
|
||||
@field_validator("path_prefix", mode="before")
|
||||
@classmethod
|
||||
def strip_path(cls, v: Any) -> str | None:
|
||||
return v.strip() if isinstance(v, str) else None
|
||||
|
||||
|
||||
class ReadWorkspaceFileInput(BaseModel):
|
||||
"""Input parameters for read_workspace_file tool."""
|
||||
|
||||
file_id: str | None = None
|
||||
path: str | None = None
|
||||
force_download_url: bool = False
|
||||
|
||||
@field_validator("file_id", "path", mode="before")
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> str | None:
|
||||
return v.strip() if isinstance(v, str) else None
|
||||
|
||||
|
||||
class WriteWorkspaceFileInput(BaseModel):
|
||||
"""Input parameters for write_workspace_file tool."""
|
||||
|
||||
filename: str = ""
|
||||
content_base64: str = ""
|
||||
path: str | None = None
|
||||
mime_type: str | None = None
|
||||
overwrite: bool = False
|
||||
|
||||
@field_validator("filename", "content_base64", mode="before")
|
||||
@classmethod
|
||||
def strip_required(cls, v: Any) -> str:
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
@field_validator("path", "mime_type", mode="before")
|
||||
@classmethod
|
||||
def strip_optional(cls, v: Any) -> str | None:
|
||||
return v.strip() if isinstance(v, str) else None
|
||||
|
||||
|
||||
class DeleteWorkspaceFileInput(BaseModel):
|
||||
"""Input parameters for delete_workspace_file tool."""
|
||||
|
||||
file_id: str | None = None
|
||||
path: str | None = None
|
||||
|
||||
@field_validator("file_id", "path", mode="before")
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> str | None:
|
||||
return v.strip() if isinstance(v, str) else None
|
||||
|
||||
|
||||
class ListWorkspaceFilesTool(BaseTool):
|
||||
"""Tool for listing files in user's workspace."""
|
||||
|
||||
@@ -190,9 +131,8 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
params = ListWorkspaceFilesInput(**kwargs)
|
||||
session_id = session.session_id
|
||||
|
||||
if not user_id:
|
||||
@@ -201,7 +141,9 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
limit = min(params.limit, 100)
|
||||
path_prefix: Optional[str] = kwargs.get("path_prefix")
|
||||
limit = min(kwargs.get("limit", 50), 100)
|
||||
include_all_sessions: bool = kwargs.get("include_all_sessions", False)
|
||||
|
||||
try:
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
@@ -209,13 +151,13 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
manager = WorkspaceManager(user_id, workspace.id, session_id)
|
||||
|
||||
files = await manager.list_files(
|
||||
path=params.path_prefix,
|
||||
path=path_prefix,
|
||||
limit=limit,
|
||||
include_all_sessions=params.include_all_sessions,
|
||||
include_all_sessions=include_all_sessions,
|
||||
)
|
||||
total = await manager.get_file_count(
|
||||
path=params.path_prefix,
|
||||
include_all_sessions=params.include_all_sessions,
|
||||
path=path_prefix,
|
||||
include_all_sessions=include_all_sessions,
|
||||
)
|
||||
|
||||
file_infos = [
|
||||
@@ -229,9 +171,7 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
for f in files
|
||||
]
|
||||
|
||||
scope_msg = (
|
||||
"all sessions" if params.include_all_sessions else "current session"
|
||||
)
|
||||
scope_msg = "all sessions" if include_all_sessions else "current session"
|
||||
return WorkspaceFileListResponse(
|
||||
files=file_infos,
|
||||
total_count=total,
|
||||
@@ -319,9 +259,8 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
params = ReadWorkspaceFileInput(**kwargs)
|
||||
session_id = session.session_id
|
||||
|
||||
if not user_id:
|
||||
@@ -330,7 +269,11 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.file_id and not params.path:
|
||||
file_id: Optional[str] = kwargs.get("file_id")
|
||||
path: Optional[str] = kwargs.get("path")
|
||||
force_download_url: bool = kwargs.get("force_download_url", False)
|
||||
|
||||
if not file_id and not path:
|
||||
return ErrorResponse(
|
||||
message="Please provide either file_id or path",
|
||||
session_id=session_id,
|
||||
@@ -342,21 +285,21 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
manager = WorkspaceManager(user_id, workspace.id, session_id)
|
||||
|
||||
# Get file info
|
||||
if params.file_id:
|
||||
file_info = await manager.get_file_info(params.file_id)
|
||||
if file_id:
|
||||
file_info = await manager.get_file_info(file_id)
|
||||
if file_info is None:
|
||||
return ErrorResponse(
|
||||
message=f"File not found: {params.file_id}",
|
||||
message=f"File not found: {file_id}",
|
||||
session_id=session_id,
|
||||
)
|
||||
target_file_id = params.file_id
|
||||
target_file_id = file_id
|
||||
else:
|
||||
# path is guaranteed to be non-None here due to the check above
|
||||
assert params.path is not None
|
||||
file_info = await manager.get_file_info_by_path(params.path)
|
||||
assert path is not None
|
||||
file_info = await manager.get_file_info_by_path(path)
|
||||
if file_info is None:
|
||||
return ErrorResponse(
|
||||
message=f"File not found at path: {params.path}",
|
||||
message=f"File not found at path: {path}",
|
||||
session_id=session_id,
|
||||
)
|
||||
target_file_id = file_info.id
|
||||
@@ -366,7 +309,7 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
is_text_file = self._is_text_mime_type(file_info.mimeType)
|
||||
|
||||
# Return inline content for small text files (unless force_download_url)
|
||||
if is_small_file and is_text_file and not params.force_download_url:
|
||||
if is_small_file and is_text_file and not force_download_url:
|
||||
content = await manager.read_file_by_id(target_file_id)
|
||||
content_b64 = base64.b64encode(content).decode("utf-8")
|
||||
|
||||
@@ -486,9 +429,8 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
params = WriteWorkspaceFileInput(**kwargs)
|
||||
session_id = session.session_id
|
||||
|
||||
if not user_id:
|
||||
@@ -497,13 +439,19 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.filename:
|
||||
filename: str = kwargs.get("filename", "")
|
||||
content_b64: str = kwargs.get("content_base64", "")
|
||||
path: Optional[str] = kwargs.get("path")
|
||||
mime_type: Optional[str] = kwargs.get("mime_type")
|
||||
overwrite: bool = kwargs.get("overwrite", False)
|
||||
|
||||
if not filename:
|
||||
return ErrorResponse(
|
||||
message="Please provide a filename",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.content_base64:
|
||||
if not content_b64:
|
||||
return ErrorResponse(
|
||||
message="Please provide content_base64",
|
||||
session_id=session_id,
|
||||
@@ -511,7 +459,7 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
|
||||
# Decode content
|
||||
try:
|
||||
content = base64.b64decode(params.content_base64)
|
||||
content = base64.b64decode(content_b64)
|
||||
except Exception:
|
||||
return ErrorResponse(
|
||||
message="Invalid base64-encoded content",
|
||||
@@ -528,7 +476,7 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
|
||||
try:
|
||||
# Virus scan
|
||||
await scan_content_safe(content, filename=params.filename)
|
||||
await scan_content_safe(content, filename=filename)
|
||||
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
# Pass session_id for session-scoped file access
|
||||
@@ -536,10 +484,10 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
|
||||
file_record = await manager.write_file(
|
||||
content=content,
|
||||
filename=params.filename,
|
||||
path=params.path,
|
||||
mime_type=params.mime_type,
|
||||
overwrite=params.overwrite,
|
||||
filename=filename,
|
||||
path=path,
|
||||
mime_type=mime_type,
|
||||
overwrite=overwrite,
|
||||
)
|
||||
|
||||
return WorkspaceWriteResponse(
|
||||
@@ -609,9 +557,8 @@ class DeleteWorkspaceFileTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
params = DeleteWorkspaceFileInput(**kwargs)
|
||||
session_id = session.session_id
|
||||
|
||||
if not user_id:
|
||||
@@ -620,7 +567,10 @@ class DeleteWorkspaceFileTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.file_id and not params.path:
|
||||
file_id: Optional[str] = kwargs.get("file_id")
|
||||
path: Optional[str] = kwargs.get("path")
|
||||
|
||||
if not file_id and not path:
|
||||
return ErrorResponse(
|
||||
message="Please provide either file_id or path",
|
||||
session_id=session_id,
|
||||
@@ -633,15 +583,15 @@ class DeleteWorkspaceFileTool(BaseTool):
|
||||
|
||||
# Determine the file_id to delete
|
||||
target_file_id: str
|
||||
if params.file_id:
|
||||
target_file_id = params.file_id
|
||||
if file_id:
|
||||
target_file_id = file_id
|
||||
else:
|
||||
# path is guaranteed to be non-None here due to the check above
|
||||
assert params.path is not None
|
||||
file_info = await manager.get_file_info_by_path(params.path)
|
||||
assert path is not None
|
||||
file_info = await manager.get_file_info_by_path(path)
|
||||
if file_info is None:
|
||||
return ErrorResponse(
|
||||
message=f"File not found at path: {params.path}",
|
||||
message=f"File not found at path: {path}",
|
||||
session_id=session_id,
|
||||
)
|
||||
target_file_id = file_info.id
|
||||
|
||||
@@ -19,7 +19,10 @@ from backend.data.graph import GraphSettings
|
||||
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
||||
on_graph_activate,
|
||||
on_graph_deactivate,
|
||||
)
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.exceptions import DatabaseError, InvalidInputError, NotFoundError
|
||||
from backend.util.json import SafeJson
|
||||
@@ -537,6 +540,92 @@ async def update_agent_version_in_library(
|
||||
return library_model.LibraryAgent.from_db(lib)
|
||||
|
||||
|
||||
async def create_graph_in_library(
|
||||
graph: graph_db.Graph,
|
||||
user_id: str,
|
||||
) -> tuple[graph_db.GraphModel, library_model.LibraryAgent]:
|
||||
"""Create a new graph and add it to the user's library."""
|
||||
graph.version = 1
|
||||
graph_model = graph_db.make_graph_model(graph, user_id)
|
||||
graph_model.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
||||
|
||||
created_graph = await graph_db.create_graph(graph_model, user_id)
|
||||
|
||||
library_agents = await create_library_agent(
|
||||
graph=created_graph,
|
||||
user_id=user_id,
|
||||
sensitive_action_safe_mode=True,
|
||||
create_library_agents_for_sub_graphs=False,
|
||||
)
|
||||
|
||||
if created_graph.is_active:
|
||||
created_graph = await on_graph_activate(created_graph, user_id=user_id)
|
||||
|
||||
return created_graph, library_agents[0]
|
||||
|
||||
|
||||
async def update_graph_in_library(
|
||||
graph: graph_db.Graph,
|
||||
user_id: str,
|
||||
) -> tuple[graph_db.GraphModel, library_model.LibraryAgent]:
|
||||
"""Create a new version of an existing graph and update the library entry."""
|
||||
existing_versions = await graph_db.get_graph_all_versions(graph.id, user_id)
|
||||
current_active_version = (
|
||||
next((v for v in existing_versions if v.is_active), None)
|
||||
if existing_versions
|
||||
else None
|
||||
)
|
||||
graph.version = (
|
||||
max(v.version for v in existing_versions) + 1 if existing_versions else 1
|
||||
)
|
||||
|
||||
graph_model = graph_db.make_graph_model(graph, user_id)
|
||||
graph_model.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
||||
|
||||
created_graph = await graph_db.create_graph(graph_model, user_id)
|
||||
|
||||
library_agent = await get_library_agent_by_graph_id(user_id, created_graph.id)
|
||||
if not library_agent:
|
||||
raise NotFoundError(f"Library agent not found for graph {created_graph.id}")
|
||||
|
||||
library_agent = await update_library_agent_version_and_settings(
|
||||
user_id, created_graph
|
||||
)
|
||||
|
||||
if created_graph.is_active:
|
||||
created_graph = await on_graph_activate(created_graph, user_id=user_id)
|
||||
await graph_db.set_graph_active_version(
|
||||
graph_id=created_graph.id,
|
||||
version=created_graph.version,
|
||||
user_id=user_id,
|
||||
)
|
||||
if current_active_version:
|
||||
await on_graph_deactivate(current_active_version, user_id=user_id)
|
||||
|
||||
return created_graph, library_agent
|
||||
|
||||
|
||||
async def update_library_agent_version_and_settings(
|
||||
user_id: str, agent_graph: graph_db.GraphModel
|
||||
) -> library_model.LibraryAgent:
|
||||
"""Update library agent to point to new graph version and sync settings."""
|
||||
library = await update_agent_version_in_library(
|
||||
user_id, agent_graph.id, agent_graph.version
|
||||
)
|
||||
updated_settings = GraphSettings.from_graph(
|
||||
graph=agent_graph,
|
||||
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
||||
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
||||
)
|
||||
if updated_settings != library.settings:
|
||||
library = await update_library_agent(
|
||||
library_agent_id=library.id,
|
||||
user_id=user_id,
|
||||
settings=updated_settings,
|
||||
)
|
||||
return library
|
||||
|
||||
|
||||
async def update_library_agent(
|
||||
library_agent_id: str,
|
||||
user_id: str,
|
||||
|
||||
@@ -101,7 +101,6 @@ from backend.util.timezone_utils import (
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
|
||||
from .library import db as library_db
|
||||
from .library import model as library_model
|
||||
from .store.model import StoreAgentDetails
|
||||
|
||||
|
||||
@@ -823,18 +822,16 @@ async def update_graph(
|
||||
graph: graph_db.Graph,
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
) -> graph_db.GraphModel:
|
||||
# Sanity check
|
||||
if graph.id and graph.id != graph_id:
|
||||
raise HTTPException(400, detail="Graph ID does not match ID in URI")
|
||||
|
||||
# Determine new version
|
||||
existing_versions = await graph_db.get_graph_all_versions(graph_id, user_id=user_id)
|
||||
if not existing_versions:
|
||||
raise HTTPException(404, detail=f"Graph #{graph_id} not found")
|
||||
latest_version_number = max(g.version for g in existing_versions)
|
||||
graph.version = latest_version_number + 1
|
||||
|
||||
graph.version = max(g.version for g in existing_versions) + 1
|
||||
current_active_version = next((v for v in existing_versions if v.is_active), None)
|
||||
|
||||
graph = graph_db.make_graph_model(graph, user_id)
|
||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
||||
graph.validate_graph(for_run=False)
|
||||
@@ -842,27 +839,23 @@ async def update_graph(
|
||||
new_graph_version = await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
if new_graph_version.is_active:
|
||||
# Keep the library agent up to date with the new active version
|
||||
await _update_library_agent_version_and_settings(user_id, new_graph_version)
|
||||
|
||||
# Handle activation of the new graph first to ensure continuity
|
||||
await library_db.update_library_agent_version_and_settings(
|
||||
user_id, new_graph_version
|
||||
)
|
||||
new_graph_version = await on_graph_activate(new_graph_version, user_id=user_id)
|
||||
# Ensure new version is the only active version
|
||||
await graph_db.set_graph_active_version(
|
||||
graph_id=graph_id, version=new_graph_version.version, user_id=user_id
|
||||
)
|
||||
if current_active_version:
|
||||
# Handle deactivation of the previously active version
|
||||
await on_graph_deactivate(current_active_version, user_id=user_id)
|
||||
|
||||
# Fetch new graph version *with sub-graphs* (needed for credentials input schema)
|
||||
new_graph_version_with_subgraphs = await graph_db.get_graph(
|
||||
graph_id,
|
||||
new_graph_version.version,
|
||||
user_id=user_id,
|
||||
include_subgraphs=True,
|
||||
)
|
||||
assert new_graph_version_with_subgraphs # make type checker happy
|
||||
assert new_graph_version_with_subgraphs
|
||||
return new_graph_version_with_subgraphs
|
||||
|
||||
|
||||
@@ -900,33 +893,15 @@ async def set_graph_active_version(
|
||||
)
|
||||
|
||||
# Keep the library agent up to date with the new active version
|
||||
await _update_library_agent_version_and_settings(user_id, new_active_graph)
|
||||
await library_db.update_library_agent_version_and_settings(
|
||||
user_id, new_active_graph
|
||||
)
|
||||
|
||||
if current_active_graph and current_active_graph.version != new_active_version:
|
||||
# Handle deactivation of the previously active version
|
||||
await on_graph_deactivate(current_active_graph, user_id=user_id)
|
||||
|
||||
|
||||
async def _update_library_agent_version_and_settings(
|
||||
user_id: str, agent_graph: graph_db.GraphModel
|
||||
) -> library_model.LibraryAgent:
|
||||
library = await library_db.update_agent_version_in_library(
|
||||
user_id, agent_graph.id, agent_graph.version
|
||||
)
|
||||
updated_settings = GraphSettings.from_graph(
|
||||
graph=agent_graph,
|
||||
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
||||
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
||||
)
|
||||
if updated_settings != library.settings:
|
||||
library = await library_db.update_library_agent(
|
||||
library_agent_id=library.id,
|
||||
user_id=user_id,
|
||||
settings=updated_settings,
|
||||
)
|
||||
return library
|
||||
|
||||
|
||||
@v1_router.patch(
|
||||
path="/graphs/{graph_id}/settings",
|
||||
summary="Update graph settings",
|
||||
|
||||
28
autogpt_platform/backend/backend/blocks/elevenlabs/_auth.py
Normal file
28
autogpt_platform/backend/backend/blocks/elevenlabs/_auth.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""ElevenLabs integration blocks - test credentials and shared utilities."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="elevenlabs",
|
||||
api_key=SecretStr("mock-elevenlabs-api-key"),
|
||||
title="Mock ElevenLabs API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
ElevenLabsCredentials = APIKeyCredentials
|
||||
ElevenLabsCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.ELEVENLABS], Literal["api_key"]
|
||||
]
|
||||
77
autogpt_platform/backend/backend/blocks/encoder_block.py
Normal file
77
autogpt_platform/backend/backend/blocks/encoder_block.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""Text encoding block for converting special characters to escape sequences."""
|
||||
|
||||
import codecs
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TextEncoderBlock(Block):
|
||||
"""
|
||||
Encodes a string by converting special characters into escape sequences.
|
||||
|
||||
This block is the inverse of TextDecoderBlock. It takes text containing
|
||||
special characters (like newlines, tabs, etc.) and converts them into
|
||||
their escape sequence representations (e.g., newline becomes \\n).
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
"""Input schema for TextEncoderBlock."""
|
||||
|
||||
text: str = SchemaField(
|
||||
description="A string containing special characters to be encoded",
|
||||
placeholder="Your text with newlines and quotes to encode",
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
"""Output schema for TextEncoderBlock."""
|
||||
|
||||
encoded_text: str = SchemaField(
|
||||
description="The encoded text with special characters converted to escape sequences"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if encoding fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5185f32e-4b65-4ecf-8fbb-873f003f09d6",
|
||||
description="Encodes a string by converting special characters into escape sequences",
|
||||
categories={BlockCategory.TEXT},
|
||||
input_schema=TextEncoderBlock.Input,
|
||||
output_schema=TextEncoderBlock.Output,
|
||||
test_input={
|
||||
"text": """Hello
|
||||
World!
|
||||
This is a "quoted" string."""
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"encoded_text",
|
||||
"""Hello\\nWorld!\\nThis is a "quoted" string.""",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
"""
|
||||
Encode the input text by converting special characters to escape sequences.
|
||||
|
||||
Args:
|
||||
input_data: The input containing the text to encode.
|
||||
**kwargs: Additional keyword arguments (unused).
|
||||
|
||||
Yields:
|
||||
The encoded text with escape sequences, or an error message if encoding fails.
|
||||
"""
|
||||
try:
|
||||
encoded_text = codecs.encode(input_data.text, "unicode_escape").decode(
|
||||
"utf-8"
|
||||
)
|
||||
yield "encoded_text", encoded_text
|
||||
except Exception as e:
|
||||
yield "error", f"Encoding error: {str(e)}"
|
||||
@@ -162,8 +162,16 @@ class LinearClient:
|
||||
"searchTerm": team_name,
|
||||
}
|
||||
|
||||
team_id = await self.query(query, variables)
|
||||
return team_id["teams"]["nodes"][0]["id"]
|
||||
result = await self.query(query, variables)
|
||||
nodes = result["teams"]["nodes"]
|
||||
|
||||
if not nodes:
|
||||
raise LinearAPIException(
|
||||
f"Team '{team_name}' not found. Check the team name or key and try again.",
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
return nodes[0]["id"]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
@@ -240,17 +248,44 @@ class LinearClient:
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
async def try_search_issues(self, term: str) -> list[Issue]:
|
||||
async def try_search_issues(
|
||||
self,
|
||||
term: str,
|
||||
max_results: int = 10,
|
||||
team_id: str | None = None,
|
||||
) -> list[Issue]:
|
||||
try:
|
||||
query = """
|
||||
query SearchIssues($term: String!, $includeComments: Boolean!) {
|
||||
searchIssues(term: $term, includeComments: $includeComments) {
|
||||
query SearchIssues(
|
||||
$term: String!,
|
||||
$first: Int,
|
||||
$teamId: String
|
||||
) {
|
||||
searchIssues(
|
||||
term: $term,
|
||||
first: $first,
|
||||
teamId: $teamId
|
||||
) {
|
||||
nodes {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
description
|
||||
priority
|
||||
createdAt
|
||||
state {
|
||||
id
|
||||
name
|
||||
type
|
||||
}
|
||||
project {
|
||||
id
|
||||
name
|
||||
}
|
||||
assignee {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -258,7 +293,8 @@ class LinearClient:
|
||||
|
||||
variables: dict[str, Any] = {
|
||||
"term": term,
|
||||
"includeComments": True,
|
||||
"first": max_results,
|
||||
"teamId": team_id,
|
||||
}
|
||||
|
||||
issues = await self.query(query, variables)
|
||||
|
||||
@@ -17,7 +17,7 @@ from ._config import (
|
||||
LinearScope,
|
||||
linear,
|
||||
)
|
||||
from .models import CreateIssueResponse, Issue
|
||||
from .models import CreateIssueResponse, Issue, State
|
||||
|
||||
|
||||
class LinearCreateIssueBlock(Block):
|
||||
@@ -135,9 +135,20 @@ class LinearSearchIssuesBlock(Block):
|
||||
description="Linear credentials with read permissions",
|
||||
required_scopes={LinearScope.READ},
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="Maximum number of results to return",
|
||||
default=10,
|
||||
ge=1,
|
||||
le=100,
|
||||
)
|
||||
team_name: str | None = SchemaField(
|
||||
description="Optional team name to filter results (e.g., 'Internal', 'Open Source')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
issues: list[Issue] = SchemaField(description="List of issues")
|
||||
error: str = SchemaField(description="Error message if the search failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -145,8 +156,11 @@ class LinearSearchIssuesBlock(Block):
|
||||
description="Searches for issues on Linear",
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
|
||||
test_input={
|
||||
"term": "Test issue",
|
||||
"max_results": 10,
|
||||
"team_name": None,
|
||||
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS_OAUTH,
|
||||
@@ -156,10 +170,14 @@ class LinearSearchIssuesBlock(Block):
|
||||
[
|
||||
Issue(
|
||||
id="abc123",
|
||||
identifier="abc123",
|
||||
identifier="TST-123",
|
||||
title="Test issue",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
state=State(
|
||||
id="state1", name="In Progress", type="started"
|
||||
),
|
||||
createdAt="2026-01-15T10:00:00.000Z",
|
||||
)
|
||||
],
|
||||
)
|
||||
@@ -168,10 +186,12 @@ class LinearSearchIssuesBlock(Block):
|
||||
"search_issues": lambda *args, **kwargs: [
|
||||
Issue(
|
||||
id="abc123",
|
||||
identifier="abc123",
|
||||
identifier="TST-123",
|
||||
title="Test issue",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
state=State(id="state1", name="In Progress", type="started"),
|
||||
createdAt="2026-01-15T10:00:00.000Z",
|
||||
)
|
||||
]
|
||||
},
|
||||
@@ -181,10 +201,22 @@ class LinearSearchIssuesBlock(Block):
|
||||
async def search_issues(
|
||||
credentials: OAuth2Credentials | APIKeyCredentials,
|
||||
term: str,
|
||||
max_results: int = 10,
|
||||
team_name: str | None = None,
|
||||
) -> list[Issue]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
response: list[Issue] = await client.try_search_issues(term=term)
|
||||
return response
|
||||
|
||||
# Resolve team name to ID if provided
|
||||
# Raises LinearAPIException with descriptive message if team not found
|
||||
team_id: str | None = None
|
||||
if team_name:
|
||||
team_id = await client.try_get_team_by_name(team_name=team_name)
|
||||
|
||||
return await client.try_search_issues(
|
||||
term=term,
|
||||
max_results=max_results,
|
||||
team_id=team_id,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
@@ -196,7 +228,10 @@ class LinearSearchIssuesBlock(Block):
|
||||
"""Execute the issue search"""
|
||||
try:
|
||||
issues = await self.search_issues(
|
||||
credentials=credentials, term=input_data.term
|
||||
credentials=credentials,
|
||||
term=input_data.term,
|
||||
max_results=input_data.max_results,
|
||||
team_name=input_data.team_name,
|
||||
)
|
||||
yield "issues", issues
|
||||
except LinearAPIException as e:
|
||||
|
||||
@@ -36,12 +36,21 @@ class Project(BaseModel):
|
||||
content: str | None = None
|
||||
|
||||
|
||||
class State(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
type: str | None = (
|
||||
None # Workflow state type (e.g., "triage", "backlog", "started", "completed", "canceled")
|
||||
)
|
||||
|
||||
|
||||
class Issue(BaseModel):
|
||||
id: str
|
||||
identifier: str
|
||||
title: str
|
||||
description: str | None
|
||||
priority: int
|
||||
state: State | None = None
|
||||
project: Project | None = None
|
||||
createdAt: str | None = None
|
||||
comments: list[Comment] | None = None
|
||||
|
||||
@@ -115,6 +115,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
||||
CLAUDE_4_6_OPUS = "claude-opus-4-6"
|
||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||
# AI/ML API models
|
||||
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
||||
@@ -270,6 +271,9 @@ MODEL_METADATA = {
|
||||
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2
|
||||
), # claude-4-sonnet-20250514
|
||||
LlmModel.CLAUDE_4_6_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 128000, "Claude Opus 4.6", "Anthropic", "Anthropic", 3
|
||||
), # claude-opus-4-6
|
||||
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3
|
||||
), # claude-opus-4-5-20251101
|
||||
|
||||
@@ -1,246 +0,0 @@
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Optional
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.fx.Loop import Loop
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class MediaDurationBlock(Block):
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
media_in: MediaFileType = SchemaField(
|
||||
description="Media input (URL, data URI, or local path)."
|
||||
)
|
||||
is_video: bool = SchemaField(
|
||||
description="Whether the media is a video (True) or audio (False).",
|
||||
default=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
duration: float = SchemaField(
|
||||
description="Duration of the media file (in seconds)."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d8b91fd4-da26-42d4-8ecb-8b196c6d84b6",
|
||||
description="Block to get the duration of a media file.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=MediaDurationBlock.Input,
|
||||
output_schema=MediaDurationBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the input media locally
|
||||
local_media_path = await store_media_file(
|
||||
file=input_data.media_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
assert execution_context.graph_exec_id is not None
|
||||
media_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_media_path
|
||||
)
|
||||
|
||||
# 2) Load the clip
|
||||
if input_data.is_video:
|
||||
clip = VideoFileClip(media_abspath)
|
||||
else:
|
||||
clip = AudioFileClip(media_abspath)
|
||||
|
||||
yield "duration", clip.duration
|
||||
|
||||
|
||||
class LoopVideoBlock(Block):
|
||||
"""
|
||||
Block for looping (repeating) a video clip until a given duration or number of loops.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="The input video (can be a URL, data URI, or local path)."
|
||||
)
|
||||
# Provide EITHER a `duration` or `n_loops` or both. We'll demonstrate `duration`.
|
||||
duration: Optional[float] = SchemaField(
|
||||
description="Target duration (in seconds) to loop the video to. If omitted, defaults to no looping.",
|
||||
default=None,
|
||||
ge=0.0,
|
||||
)
|
||||
n_loops: Optional[int] = SchemaField(
|
||||
description="Number of times to repeat the video. If omitted, defaults to 1 (no repeat).",
|
||||
default=None,
|
||||
ge=1,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Looped video returned either as a relative path or a data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8bf9eef6-5451-4213-b265-25306446e94b",
|
||||
description="Block to loop a video to a given duration or number of repeats.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=LoopVideoBlock.Input,
|
||||
output_schema=LoopVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the input video locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
input_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||
|
||||
# 2) Load the clip
|
||||
clip = VideoFileClip(input_abspath)
|
||||
|
||||
# 3) Apply the loop effect
|
||||
looped_clip = clip
|
||||
if input_data.duration:
|
||||
# Loop until we reach the specified duration
|
||||
looped_clip = looped_clip.with_effects([Loop(duration=input_data.duration)])
|
||||
elif input_data.n_loops:
|
||||
looped_clip = looped_clip.with_effects([Loop(n=input_data.n_loops)])
|
||||
else:
|
||||
raise ValueError("Either 'duration' or 'n_loops' must be provided.")
|
||||
|
||||
assert isinstance(looped_clip, VideoFileClip)
|
||||
|
||||
# 4) Save the looped output
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_looped_{os.path.basename(local_video_path)}"
|
||||
)
|
||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||
|
||||
looped_clip = looped_clip.with_audio(clip.audio)
|
||||
looped_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
||||
|
||||
# Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
|
||||
|
||||
class AddAudioToVideoBlock(Block):
|
||||
"""
|
||||
Block that adds (attaches) an audio track to an existing video.
|
||||
Optionally scale the volume of the new track.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Video input (URL, data URI, or local path)."
|
||||
)
|
||||
audio_in: MediaFileType = SchemaField(
|
||||
description="Audio input (URL, data URI, or local path)."
|
||||
)
|
||||
volume: float = SchemaField(
|
||||
description="Volume scale for the newly attached audio track (1.0 = original).",
|
||||
default=1.0,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Final video (with attached audio), as a path or data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3503748d-62b6-4425-91d6-725b064af509",
|
||||
description="Block to attach an audio file to a video file using moviepy.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=AddAudioToVideoBlock.Input,
|
||||
output_schema=AddAudioToVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the inputs locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
local_audio_path = await store_media_file(
|
||||
file=input_data.audio_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
abs_temp_dir = os.path.join(tempfile.gettempdir(), "exec_file", graph_exec_id)
|
||||
video_abspath = os.path.join(abs_temp_dir, local_video_path)
|
||||
audio_abspath = os.path.join(abs_temp_dir, local_audio_path)
|
||||
|
||||
# 2) Load video + audio with moviepy
|
||||
video_clip = VideoFileClip(video_abspath)
|
||||
audio_clip = AudioFileClip(audio_abspath)
|
||||
# Optionally scale volume
|
||||
if input_data.volume != 1.0:
|
||||
audio_clip = audio_clip.with_volume_scaled(input_data.volume)
|
||||
|
||||
# 3) Attach the new audio track
|
||||
final_clip = video_clip.with_audio(audio_clip)
|
||||
|
||||
# 4) Write to output file
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_audio_attached_{os.path.basename(local_video_path)}"
|
||||
)
|
||||
output_abspath = os.path.join(abs_temp_dir, output_filename)
|
||||
final_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
||||
|
||||
# 5) Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
@@ -0,0 +1,77 @@
|
||||
import pytest
|
||||
|
||||
from backend.blocks.encoder_block import TextEncoderBlock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_basic():
|
||||
"""Test basic encoding of newlines and special characters."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(TextEncoderBlock.Input(text="Hello\nWorld")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
assert result[0][1] == "Hello\\nWorld"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_multiple_escapes():
|
||||
"""Test encoding of multiple escape sequences."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(
|
||||
TextEncoderBlock.Input(text="Line1\nLine2\tTabbed\rCarriage")
|
||||
):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
assert "\\n" in result[0][1]
|
||||
assert "\\t" in result[0][1]
|
||||
assert "\\r" in result[0][1]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_unicode():
|
||||
"""Test that unicode characters are handled correctly."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(TextEncoderBlock.Input(text="Hello 世界\n")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
# Unicode characters should be escaped as \uXXXX sequences
|
||||
assert "\\n" in result[0][1]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_empty_string():
|
||||
"""Test encoding of an empty string."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(TextEncoderBlock.Input(text="")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
assert result[0][1] == ""
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_error_handling():
|
||||
"""Test that encoding errors are handled gracefully."""
|
||||
from unittest.mock import patch
|
||||
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
|
||||
with patch("codecs.encode", side_effect=Exception("Mocked encoding error")):
|
||||
async for output in block.run(TextEncoderBlock.Input(text="test")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "error"
|
||||
assert "Mocked encoding error" in result[0][1]
|
||||
37
autogpt_platform/backend/backend/blocks/video/__init__.py
Normal file
37
autogpt_platform/backend/backend/blocks/video/__init__.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Video editing blocks for AutoGPT Platform.
|
||||
|
||||
This module provides blocks for:
|
||||
- Downloading videos from URLs (YouTube, Vimeo, news sites, direct links)
|
||||
- Clipping/trimming video segments
|
||||
- Concatenating multiple videos
|
||||
- Adding text overlays
|
||||
- Adding AI-generated narration
|
||||
- Getting media duration
|
||||
- Looping videos
|
||||
- Adding audio to videos
|
||||
|
||||
Dependencies:
|
||||
- yt-dlp: For video downloading
|
||||
- moviepy: For video editing operations
|
||||
- elevenlabs: For AI narration (optional)
|
||||
"""
|
||||
|
||||
from backend.blocks.video.add_audio import AddAudioToVideoBlock
|
||||
from backend.blocks.video.clip import VideoClipBlock
|
||||
from backend.blocks.video.concat import VideoConcatBlock
|
||||
from backend.blocks.video.download import VideoDownloadBlock
|
||||
from backend.blocks.video.duration import MediaDurationBlock
|
||||
from backend.blocks.video.loop import LoopVideoBlock
|
||||
from backend.blocks.video.narration import VideoNarrationBlock
|
||||
from backend.blocks.video.text_overlay import VideoTextOverlayBlock
|
||||
|
||||
__all__ = [
|
||||
"AddAudioToVideoBlock",
|
||||
"LoopVideoBlock",
|
||||
"MediaDurationBlock",
|
||||
"VideoClipBlock",
|
||||
"VideoConcatBlock",
|
||||
"VideoDownloadBlock",
|
||||
"VideoNarrationBlock",
|
||||
"VideoTextOverlayBlock",
|
||||
]
|
||||
131
autogpt_platform/backend/backend/blocks/video/_utils.py
Normal file
131
autogpt_platform/backend/backend/blocks/video/_utils.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""Shared utilities for video blocks."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Known operation tags added by video blocks
|
||||
_VIDEO_OPS = (
|
||||
r"(?:clip|overlay|narrated|looped|concat|audio_attached|with_audio|narration)"
|
||||
)
|
||||
|
||||
# Matches: {node_exec_id}_{operation}_ where node_exec_id contains a UUID
|
||||
_BLOCK_PREFIX_RE = re.compile(
|
||||
r"^[a-zA-Z0-9_-]*"
|
||||
r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||
r"[a-zA-Z0-9_-]*"
|
||||
r"_" + _VIDEO_OPS + r"_"
|
||||
)
|
||||
|
||||
# Matches: a lone {node_exec_id}_ prefix (no operation keyword, e.g. download output)
|
||||
_UUID_PREFIX_RE = re.compile(
|
||||
r"^[a-zA-Z0-9_-]*"
|
||||
r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||
r"[a-zA-Z0-9_-]*_"
|
||||
)
|
||||
|
||||
|
||||
def extract_source_name(input_path: str, max_length: int = 50) -> str:
|
||||
"""Extract the original source filename by stripping block-generated prefixes.
|
||||
|
||||
Iteratively removes {node_exec_id}_{operation}_ prefixes that accumulate
|
||||
when chaining video blocks, recovering the original human-readable name.
|
||||
|
||||
Safe for plain filenames (no UUID -> no stripping).
|
||||
Falls back to "video" if everything is stripped.
|
||||
"""
|
||||
stem = Path(input_path).stem
|
||||
|
||||
# Pass 1: strip {node_exec_id}_{operation}_ prefixes iteratively
|
||||
while _BLOCK_PREFIX_RE.match(stem):
|
||||
stem = _BLOCK_PREFIX_RE.sub("", stem, count=1)
|
||||
|
||||
# Pass 2: strip a lone {node_exec_id}_ prefix (e.g. from download block)
|
||||
if _UUID_PREFIX_RE.match(stem):
|
||||
stem = _UUID_PREFIX_RE.sub("", stem, count=1)
|
||||
|
||||
if not stem:
|
||||
return "video"
|
||||
|
||||
return stem[:max_length]
|
||||
|
||||
|
||||
def get_video_codecs(output_path: str) -> tuple[str, str]:
|
||||
"""Get appropriate video and audio codecs based on output file extension.
|
||||
|
||||
Args:
|
||||
output_path: Path to the output file (used to determine extension)
|
||||
|
||||
Returns:
|
||||
Tuple of (video_codec, audio_codec)
|
||||
|
||||
Codec mappings:
|
||||
- .mp4: H.264 + AAC (universal compatibility)
|
||||
- .webm: VP8 + Vorbis (web streaming)
|
||||
- .mkv: H.264 + AAC (container supports many codecs)
|
||||
- .mov: H.264 + AAC (Apple QuickTime, widely compatible)
|
||||
- .m4v: H.264 + AAC (Apple iTunes/devices)
|
||||
- .avi: MPEG-4 + MP3 (legacy Windows)
|
||||
"""
|
||||
ext = os.path.splitext(output_path)[1].lower()
|
||||
|
||||
codec_map: dict[str, tuple[str, str]] = {
|
||||
".mp4": ("libx264", "aac"),
|
||||
".webm": ("libvpx", "libvorbis"),
|
||||
".mkv": ("libx264", "aac"),
|
||||
".mov": ("libx264", "aac"),
|
||||
".m4v": ("libx264", "aac"),
|
||||
".avi": ("mpeg4", "libmp3lame"),
|
||||
}
|
||||
|
||||
return codec_map.get(ext, ("libx264", "aac"))
|
||||
|
||||
|
||||
def strip_chapters_inplace(video_path: str) -> None:
|
||||
"""Strip chapter metadata from a media file in-place using ffmpeg.
|
||||
|
||||
MoviePy 2.x crashes with IndexError when parsing files with embedded
|
||||
chapter metadata (https://github.com/Zulko/moviepy/issues/2419).
|
||||
This strips chapters without re-encoding.
|
||||
|
||||
Args:
|
||||
video_path: Absolute path to the media file to strip chapters from.
|
||||
"""
|
||||
base, ext = os.path.splitext(video_path)
|
||||
tmp_path = base + ".tmp" + ext
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[
|
||||
"ffmpeg",
|
||||
"-y",
|
||||
"-i",
|
||||
video_path,
|
||||
"-map_chapters",
|
||||
"-1",
|
||||
"-codec",
|
||||
"copy",
|
||||
tmp_path,
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.warning(
|
||||
"ffmpeg chapter strip failed (rc=%d): %s",
|
||||
result.returncode,
|
||||
result.stderr,
|
||||
)
|
||||
return
|
||||
os.replace(tmp_path, video_path)
|
||||
except FileNotFoundError:
|
||||
logger.warning("ffmpeg not found; skipping chapter strip")
|
||||
finally:
|
||||
if os.path.exists(tmp_path):
|
||||
os.unlink(tmp_path)
|
||||
113
autogpt_platform/backend/backend/blocks/video/add_audio.py
Normal file
113
autogpt_platform/backend/backend/blocks/video/add_audio.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""AddAudioToVideoBlock - Attach an audio track to a video file."""
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class AddAudioToVideoBlock(Block):
|
||||
"""Add (attach) an audio track to an existing video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Video input (URL, data URI, or local path)."
|
||||
)
|
||||
audio_in: MediaFileType = SchemaField(
|
||||
description="Audio input (URL, data URI, or local path)."
|
||||
)
|
||||
volume: float = SchemaField(
|
||||
description="Volume scale for the newly attached audio track (1.0 = original).",
|
||||
default=1.0,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Final video (with attached audio), as a path or data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3503748d-62b6-4425-91d6-725b064af509",
|
||||
description="Block to attach an audio file to a video file using moviepy.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=AddAudioToVideoBlock.Input,
|
||||
output_schema=AddAudioToVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the inputs locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
local_audio_path = await store_media_file(
|
||||
file=input_data.audio_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
video_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||
audio_abspath = get_exec_file_path(graph_exec_id, local_audio_path)
|
||||
|
||||
# 2) Load video + audio with moviepy
|
||||
strip_chapters_inplace(video_abspath)
|
||||
strip_chapters_inplace(audio_abspath)
|
||||
video_clip = None
|
||||
audio_clip = None
|
||||
final_clip = None
|
||||
try:
|
||||
video_clip = VideoFileClip(video_abspath)
|
||||
audio_clip = AudioFileClip(audio_abspath)
|
||||
# Optionally scale volume
|
||||
if input_data.volume != 1.0:
|
||||
audio_clip = audio_clip.with_volume_scaled(input_data.volume)
|
||||
|
||||
# 3) Attach the new audio track
|
||||
final_clip = video_clip.with_audio(audio_clip)
|
||||
|
||||
# 4) Write to output file
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_with_audio_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||
final_clip.write_videofile(
|
||||
output_abspath, codec="libx264", audio_codec="aac"
|
||||
)
|
||||
finally:
|
||||
if final_clip:
|
||||
final_clip.close()
|
||||
if audio_clip:
|
||||
audio_clip.close()
|
||||
if video_clip:
|
||||
video_clip.close()
|
||||
|
||||
# 5) Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
167
autogpt_platform/backend/backend/blocks/video/clip.py
Normal file
167
autogpt_platform/backend/backend/blocks/video/clip.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""VideoClipBlock - Extract a segment from a video file."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoClipBlock(Block):
|
||||
"""Extract a time segment from a video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Input video (URL, data URI, or local path)"
|
||||
)
|
||||
start_time: float = SchemaField(description="Start time in seconds", ge=0.0)
|
||||
end_time: float = SchemaField(description="End time in seconds", ge=0.0)
|
||||
output_format: Literal["mp4", "webm", "mkv", "mov"] = SchemaField(
|
||||
description="Output format", default="mp4", advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Clipped video file (path or data URI)"
|
||||
)
|
||||
duration: float = SchemaField(description="Clip duration in seconds")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8f539119-e580-4d86-ad41-86fbcb22abb1",
|
||||
description="Extract a time segment from a video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"video_in": "/tmp/test.mp4",
|
||||
"start_time": 0.0,
|
||||
"end_time": 10.0,
|
||||
},
|
||||
test_output=[("video_out", str), ("duration", float)],
|
||||
test_mock={
|
||||
"_clip_video": lambda *args: 10.0,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "clip_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _clip_video(
|
||||
self,
|
||||
video_abspath: str,
|
||||
output_abspath: str,
|
||||
start_time: float,
|
||||
end_time: float,
|
||||
) -> float:
|
||||
"""Extract a clip from a video. Extracted for testability."""
|
||||
clip = None
|
||||
subclip = None
|
||||
try:
|
||||
strip_chapters_inplace(video_abspath)
|
||||
clip = VideoFileClip(video_abspath)
|
||||
subclip = clip.subclipped(start_time, end_time)
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
subclip.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
return subclip.duration
|
||||
finally:
|
||||
if subclip:
|
||||
subclip.close()
|
||||
if clip:
|
||||
clip.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# Validate time range
|
||||
if input_data.end_time <= input_data.start_time:
|
||||
raise BlockExecutionError(
|
||||
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store the input video locally
|
||||
local_video_path = await self._store_input_video(
|
||||
execution_context, input_data.video_in
|
||||
)
|
||||
video_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_video_path
|
||||
)
|
||||
|
||||
# Build output path
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_clip_{source}.{input_data.output_format}"
|
||||
)
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
duration = self._clip_video(
|
||||
video_abspath,
|
||||
output_abspath,
|
||||
input_data.start_time,
|
||||
input_data.end_time,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
yield "duration", duration
|
||||
|
||||
except BlockExecutionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to clip video: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
227
autogpt_platform/backend/backend/blocks/video/concat.py
Normal file
227
autogpt_platform/backend/backend/blocks/video/concat.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""VideoConcatBlock - Concatenate multiple video clips into one."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from moviepy import concatenate_videoclips
|
||||
from moviepy.video.fx import CrossFadeIn, CrossFadeOut, FadeIn, FadeOut
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoConcatBlock(Block):
|
||||
"""Merge multiple video clips into one continuous video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
videos: list[MediaFileType] = SchemaField(
|
||||
description="List of video files to concatenate (in order)"
|
||||
)
|
||||
transition: Literal["none", "crossfade", "fade_black"] = SchemaField(
|
||||
description="Transition between clips", default="none"
|
||||
)
|
||||
transition_duration: int = SchemaField(
|
||||
description="Transition duration in seconds",
|
||||
default=1,
|
||||
ge=0,
|
||||
advanced=True,
|
||||
)
|
||||
output_format: Literal["mp4", "webm", "mkv", "mov"] = SchemaField(
|
||||
description="Output format", default="mp4", advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Concatenated video file (path or data URI)"
|
||||
)
|
||||
total_duration: float = SchemaField(description="Total duration in seconds")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9b0f531a-1118-487f-aeec-3fa63ea8900a",
|
||||
description="Merge multiple video clips into one continuous video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"videos": ["/tmp/a.mp4", "/tmp/b.mp4"],
|
||||
},
|
||||
test_output=[
|
||||
("video_out", str),
|
||||
("total_duration", float),
|
||||
],
|
||||
test_mock={
|
||||
"_concat_videos": lambda *args: 20.0,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "concat_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _concat_videos(
|
||||
self,
|
||||
video_abspaths: list[str],
|
||||
output_abspath: str,
|
||||
transition: str,
|
||||
transition_duration: int,
|
||||
) -> float:
|
||||
"""Concatenate videos. Extracted for testability.
|
||||
|
||||
Returns:
|
||||
Total duration of the concatenated video.
|
||||
"""
|
||||
clips = []
|
||||
faded_clips = []
|
||||
final = None
|
||||
try:
|
||||
# Load clips
|
||||
for v in video_abspaths:
|
||||
strip_chapters_inplace(v)
|
||||
clips.append(VideoFileClip(v))
|
||||
|
||||
# Validate transition_duration against shortest clip
|
||||
if transition in {"crossfade", "fade_black"} and transition_duration > 0:
|
||||
min_duration = min(c.duration for c in clips)
|
||||
if transition_duration >= min_duration:
|
||||
raise BlockExecutionError(
|
||||
message=(
|
||||
f"transition_duration ({transition_duration}s) must be "
|
||||
f"shorter than the shortest clip ({min_duration:.2f}s)"
|
||||
),
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
if transition == "crossfade":
|
||||
for i, clip in enumerate(clips):
|
||||
effects = []
|
||||
if i > 0:
|
||||
effects.append(CrossFadeIn(transition_duration))
|
||||
if i < len(clips) - 1:
|
||||
effects.append(CrossFadeOut(transition_duration))
|
||||
if effects:
|
||||
clip = clip.with_effects(effects)
|
||||
faded_clips.append(clip)
|
||||
final = concatenate_videoclips(
|
||||
faded_clips,
|
||||
method="compose",
|
||||
padding=-transition_duration,
|
||||
)
|
||||
elif transition == "fade_black":
|
||||
for clip in clips:
|
||||
faded = clip.with_effects(
|
||||
[FadeIn(transition_duration), FadeOut(transition_duration)]
|
||||
)
|
||||
faded_clips.append(faded)
|
||||
final = concatenate_videoclips(faded_clips)
|
||||
else:
|
||||
final = concatenate_videoclips(clips)
|
||||
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
final.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
|
||||
return final.duration
|
||||
finally:
|
||||
if final:
|
||||
final.close()
|
||||
for clip in faded_clips:
|
||||
clip.close()
|
||||
for clip in clips:
|
||||
clip.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# Validate minimum clips
|
||||
if len(input_data.videos) < 2:
|
||||
raise BlockExecutionError(
|
||||
message="At least 2 videos are required for concatenation",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store all input videos locally
|
||||
video_abspaths = []
|
||||
for video in input_data.videos:
|
||||
local_path = await self._store_input_video(execution_context, video)
|
||||
video_abspaths.append(
|
||||
get_exec_file_path(execution_context.graph_exec_id, local_path)
|
||||
)
|
||||
|
||||
# Build output path
|
||||
source = (
|
||||
extract_source_name(video_abspaths[0]) if video_abspaths else "video"
|
||||
)
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_concat_{source}.{input_data.output_format}"
|
||||
)
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
total_duration = self._concat_videos(
|
||||
video_abspaths,
|
||||
output_abspath,
|
||||
input_data.transition,
|
||||
input_data.transition_duration,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
yield "total_duration", total_duration
|
||||
|
||||
except BlockExecutionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to concatenate videos: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
172
autogpt_platform/backend/backend/blocks/video/download.py
Normal file
172
autogpt_platform/backend/backend/blocks/video/download.py
Normal file
@@ -0,0 +1,172 @@
|
||||
"""VideoDownloadBlock - Download video from URL (YouTube, Vimeo, news sites, direct links)."""
|
||||
|
||||
import os
|
||||
import typing
|
||||
from typing import Literal
|
||||
|
||||
import yt_dlp
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from yt_dlp import _Params
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoDownloadBlock(Block):
|
||||
"""Download video from URL using yt-dlp."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
url: str = SchemaField(
|
||||
description="URL of the video to download (YouTube, Vimeo, direct link, etc.)",
|
||||
placeholder="https://www.youtube.com/watch?v=...",
|
||||
)
|
||||
quality: Literal["best", "1080p", "720p", "480p", "audio_only"] = SchemaField(
|
||||
description="Video quality preference", default="720p"
|
||||
)
|
||||
output_format: Literal["mp4", "webm", "mkv"] = SchemaField(
|
||||
description="Output video format", default="mp4", advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_file: MediaFileType = SchemaField(
|
||||
description="Downloaded video (path or data URI)"
|
||||
)
|
||||
duration: float = SchemaField(description="Video duration in seconds")
|
||||
title: str = SchemaField(description="Video title from source")
|
||||
source_url: str = SchemaField(description="Original source URL")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c35daabb-cd60-493b-b9ad-51f1fe4b50c4",
|
||||
description="Download video from URL (YouTube, Vimeo, news sites, direct links)",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
disabled=True, # Disable until we can sandbox yt-dlp and handle security implications
|
||||
test_input={
|
||||
"url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ",
|
||||
"quality": "480p",
|
||||
},
|
||||
test_output=[
|
||||
("video_file", str),
|
||||
("duration", float),
|
||||
("title", str),
|
||||
("source_url", str),
|
||||
],
|
||||
test_mock={
|
||||
"_download_video": lambda *args: (
|
||||
"video.mp4",
|
||||
212.0,
|
||||
"Test Video",
|
||||
),
|
||||
"_store_output_video": lambda *args, **kwargs: "video.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _get_format_string(self, quality: str) -> str:
|
||||
formats = {
|
||||
"best": "bestvideo+bestaudio/best",
|
||||
"1080p": "bestvideo[height<=1080]+bestaudio/best[height<=1080]",
|
||||
"720p": "bestvideo[height<=720]+bestaudio/best[height<=720]",
|
||||
"480p": "bestvideo[height<=480]+bestaudio/best[height<=480]",
|
||||
"audio_only": "bestaudio/best",
|
||||
}
|
||||
return formats.get(quality, formats["720p"])
|
||||
|
||||
def _download_video(
|
||||
self,
|
||||
url: str,
|
||||
quality: str,
|
||||
output_format: str,
|
||||
output_dir: str,
|
||||
node_exec_id: str,
|
||||
) -> tuple[str, float, str]:
|
||||
"""Download video. Extracted for testability."""
|
||||
output_template = os.path.join(
|
||||
output_dir, f"{node_exec_id}_%(title).50s.%(ext)s"
|
||||
)
|
||||
|
||||
ydl_opts: "_Params" = {
|
||||
"format": f"{self._get_format_string(quality)}/best",
|
||||
"outtmpl": output_template,
|
||||
"merge_output_format": output_format,
|
||||
"quiet": True,
|
||||
"no_warnings": True,
|
||||
}
|
||||
|
||||
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
||||
info = ydl.extract_info(url, download=True)
|
||||
video_path = ydl.prepare_filename(info)
|
||||
|
||||
# Handle format conversion in filename
|
||||
if not video_path.endswith(f".{output_format}"):
|
||||
video_path = video_path.rsplit(".", 1)[0] + f".{output_format}"
|
||||
|
||||
# Return just the filename, not the full path
|
||||
filename = os.path.basename(video_path)
|
||||
|
||||
return (
|
||||
filename,
|
||||
info.get("duration") or 0.0,
|
||||
info.get("title") or "Unknown",
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Get the exec file directory
|
||||
output_dir = get_exec_file_path(execution_context.graph_exec_id, "")
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
filename, duration, title = self._download_video(
|
||||
input_data.url,
|
||||
input_data.quality,
|
||||
input_data.output_format,
|
||||
output_dir,
|
||||
node_exec_id,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, MediaFileType(filename)
|
||||
)
|
||||
|
||||
yield "video_file", video_out
|
||||
yield "duration", duration
|
||||
yield "title", title
|
||||
yield "source_url", input_data.url
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to download video: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
77
autogpt_platform/backend/backend/blocks/video/duration.py
Normal file
77
autogpt_platform/backend/backend/blocks/video/duration.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""MediaDurationBlock - Get the duration of a media file."""
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import strip_chapters_inplace
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class MediaDurationBlock(Block):
|
||||
"""Get the duration of a media file (video or audio)."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
media_in: MediaFileType = SchemaField(
|
||||
description="Media input (URL, data URI, or local path)."
|
||||
)
|
||||
is_video: bool = SchemaField(
|
||||
description="Whether the media is a video (True) or audio (False).",
|
||||
default=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
duration: float = SchemaField(
|
||||
description="Duration of the media file (in seconds)."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d8b91fd4-da26-42d4-8ecb-8b196c6d84b6",
|
||||
description="Block to get the duration of a media file.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=MediaDurationBlock.Input,
|
||||
output_schema=MediaDurationBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the input media locally
|
||||
local_media_path = await store_media_file(
|
||||
file=input_data.media_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
assert execution_context.graph_exec_id is not None
|
||||
media_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_media_path
|
||||
)
|
||||
|
||||
# 2) Strip chapters to avoid MoviePy crash, then load the clip
|
||||
strip_chapters_inplace(media_abspath)
|
||||
clip = None
|
||||
try:
|
||||
if input_data.is_video:
|
||||
clip = VideoFileClip(media_abspath)
|
||||
else:
|
||||
clip = AudioFileClip(media_abspath)
|
||||
|
||||
duration = clip.duration
|
||||
finally:
|
||||
if clip:
|
||||
clip.close()
|
||||
|
||||
yield "duration", duration
|
||||
115
autogpt_platform/backend/backend/blocks/video/loop.py
Normal file
115
autogpt_platform/backend/backend/blocks/video/loop.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""LoopVideoBlock - Loop a video to a given duration or number of repeats."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from moviepy.video.fx.Loop import Loop
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class LoopVideoBlock(Block):
|
||||
"""Loop (repeat) a video clip until a given duration or number of loops."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="The input video (can be a URL, data URI, or local path)."
|
||||
)
|
||||
duration: Optional[float] = SchemaField(
|
||||
description="Target duration (in seconds) to loop the video to. Either duration or n_loops must be provided.",
|
||||
default=None,
|
||||
ge=0.0,
|
||||
le=3600.0, # Max 1 hour to prevent disk exhaustion
|
||||
)
|
||||
n_loops: Optional[int] = SchemaField(
|
||||
description="Number of times to repeat the video. Either n_loops or duration must be provided.",
|
||||
default=None,
|
||||
ge=1,
|
||||
le=10, # Max 10 loops to prevent disk exhaustion
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Looped video returned either as a relative path or a data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8bf9eef6-5451-4213-b265-25306446e94b",
|
||||
description="Block to loop a video to a given duration or number of repeats.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=LoopVideoBlock.Input,
|
||||
output_schema=LoopVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the input video locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
input_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||
|
||||
# 2) Load the clip
|
||||
strip_chapters_inplace(input_abspath)
|
||||
clip = None
|
||||
looped_clip = None
|
||||
try:
|
||||
clip = VideoFileClip(input_abspath)
|
||||
|
||||
# 3) Apply the loop effect
|
||||
if input_data.duration:
|
||||
# Loop until we reach the specified duration
|
||||
looped_clip = clip.with_effects([Loop(duration=input_data.duration)])
|
||||
elif input_data.n_loops:
|
||||
looped_clip = clip.with_effects([Loop(n=input_data.n_loops)])
|
||||
else:
|
||||
raise ValueError("Either 'duration' or 'n_loops' must be provided.")
|
||||
|
||||
assert isinstance(looped_clip, VideoFileClip)
|
||||
|
||||
# 4) Save the looped output
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_looped_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||
|
||||
looped_clip = looped_clip.with_audio(clip.audio)
|
||||
looped_clip.write_videofile(
|
||||
output_abspath, codec="libx264", audio_codec="aac"
|
||||
)
|
||||
finally:
|
||||
if looped_clip:
|
||||
looped_clip.close()
|
||||
if clip:
|
||||
clip.close()
|
||||
|
||||
# Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
267
autogpt_platform/backend/backend/blocks/video/narration.py
Normal file
267
autogpt_platform/backend/backend/blocks/video/narration.py
Normal file
@@ -0,0 +1,267 @@
|
||||
"""VideoNarrationBlock - Generate AI voice narration and add to video."""
|
||||
|
||||
import os
|
||||
from typing import Literal
|
||||
|
||||
from elevenlabs import ElevenLabs
|
||||
from moviepy import CompositeAudioClip
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.elevenlabs._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
ElevenLabsCredentials,
|
||||
ElevenLabsCredentialsInput,
|
||||
)
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import CredentialsField, SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoNarrationBlock(Block):
|
||||
"""Generate AI narration and add to video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: ElevenLabsCredentialsInput = CredentialsField(
|
||||
description="ElevenLabs API key for voice synthesis"
|
||||
)
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Input video (URL, data URI, or local path)"
|
||||
)
|
||||
script: str = SchemaField(description="Narration script text")
|
||||
voice_id: str = SchemaField(
|
||||
description="ElevenLabs voice ID", default="21m00Tcm4TlvDq8ikWAM" # Rachel
|
||||
)
|
||||
model_id: Literal[
|
||||
"eleven_multilingual_v2",
|
||||
"eleven_flash_v2_5",
|
||||
"eleven_turbo_v2_5",
|
||||
"eleven_turbo_v2",
|
||||
] = SchemaField(
|
||||
description="ElevenLabs TTS model",
|
||||
default="eleven_multilingual_v2",
|
||||
)
|
||||
mix_mode: Literal["replace", "mix", "ducking"] = SchemaField(
|
||||
description="How to combine with original audio. 'ducking' applies stronger attenuation than 'mix'.",
|
||||
default="ducking",
|
||||
)
|
||||
narration_volume: float = SchemaField(
|
||||
description="Narration volume (0.0 to 2.0)",
|
||||
default=1.0,
|
||||
ge=0.0,
|
||||
le=2.0,
|
||||
advanced=True,
|
||||
)
|
||||
original_volume: float = SchemaField(
|
||||
description="Original audio volume when mixing (0.0 to 1.0)",
|
||||
default=0.3,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Video with narration (path or data URI)"
|
||||
)
|
||||
audio_file: MediaFileType = SchemaField(
|
||||
description="Generated audio file (path or data URI)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3d036b53-859c-4b17-9826-ca340f736e0e",
|
||||
description="Generate AI narration and add to video",
|
||||
categories={BlockCategory.MULTIMEDIA, BlockCategory.AI},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"video_in": "/tmp/test.mp4",
|
||||
"script": "Hello world",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("video_out", str), ("audio_file", str)],
|
||||
test_mock={
|
||||
"_generate_narration_audio": lambda *args: b"mock audio content",
|
||||
"_add_narration_to_video": lambda *args: None,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "narrated_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _generate_narration_audio(
|
||||
self, api_key: str, script: str, voice_id: str, model_id: str
|
||||
) -> bytes:
|
||||
"""Generate narration audio via ElevenLabs API."""
|
||||
client = ElevenLabs(api_key=api_key)
|
||||
audio_generator = client.text_to_speech.convert(
|
||||
voice_id=voice_id,
|
||||
text=script,
|
||||
model_id=model_id,
|
||||
)
|
||||
# The SDK returns a generator, collect all chunks
|
||||
return b"".join(audio_generator)
|
||||
|
||||
def _add_narration_to_video(
|
||||
self,
|
||||
video_abspath: str,
|
||||
audio_abspath: str,
|
||||
output_abspath: str,
|
||||
mix_mode: str,
|
||||
narration_volume: float,
|
||||
original_volume: float,
|
||||
) -> None:
|
||||
"""Add narration audio to video. Extracted for testability."""
|
||||
video = None
|
||||
final = None
|
||||
narration_original = None
|
||||
narration_scaled = None
|
||||
original = None
|
||||
|
||||
try:
|
||||
strip_chapters_inplace(video_abspath)
|
||||
video = VideoFileClip(video_abspath)
|
||||
narration_original = AudioFileClip(audio_abspath)
|
||||
narration_scaled = narration_original.with_volume_scaled(narration_volume)
|
||||
narration = narration_scaled
|
||||
|
||||
if mix_mode == "replace":
|
||||
final_audio = narration
|
||||
elif mix_mode == "mix":
|
||||
if video.audio:
|
||||
original = video.audio.with_volume_scaled(original_volume)
|
||||
final_audio = CompositeAudioClip([original, narration])
|
||||
else:
|
||||
final_audio = narration
|
||||
else: # ducking - apply stronger attenuation
|
||||
if video.audio:
|
||||
# Ducking uses a much lower volume for original audio
|
||||
ducking_volume = original_volume * 0.3
|
||||
original = video.audio.with_volume_scaled(ducking_volume)
|
||||
final_audio = CompositeAudioClip([original, narration])
|
||||
else:
|
||||
final_audio = narration
|
||||
|
||||
final = video.with_audio(final_audio)
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
final.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
|
||||
finally:
|
||||
if original:
|
||||
original.close()
|
||||
if narration_scaled:
|
||||
narration_scaled.close()
|
||||
if narration_original:
|
||||
narration_original.close()
|
||||
if final:
|
||||
final.close()
|
||||
if video:
|
||||
video.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: ElevenLabsCredentials,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store the input video locally
|
||||
local_video_path = await self._store_input_video(
|
||||
execution_context, input_data.video_in
|
||||
)
|
||||
video_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_video_path
|
||||
)
|
||||
|
||||
# Generate narration audio via ElevenLabs
|
||||
audio_content = self._generate_narration_audio(
|
||||
credentials.api_key.get_secret_value(),
|
||||
input_data.script,
|
||||
input_data.voice_id,
|
||||
input_data.model_id,
|
||||
)
|
||||
|
||||
# Save audio to exec file path
|
||||
audio_filename = MediaFileType(f"{node_exec_id}_narration.mp3")
|
||||
audio_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, audio_filename
|
||||
)
|
||||
os.makedirs(os.path.dirname(audio_abspath), exist_ok=True)
|
||||
with open(audio_abspath, "wb") as f:
|
||||
f.write(audio_content)
|
||||
|
||||
# Add narration to video
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_narrated_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
self._add_narration_to_video(
|
||||
video_abspath,
|
||||
audio_abspath,
|
||||
output_abspath,
|
||||
input_data.mix_mode,
|
||||
input_data.narration_volume,
|
||||
input_data.original_volume,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
audio_out = await self._store_output_video(
|
||||
execution_context, audio_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
yield "audio_file", audio_out
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to add narration: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
231
autogpt_platform/backend/backend/blocks/video/text_overlay.py
Normal file
231
autogpt_platform/backend/backend/blocks/video/text_overlay.py
Normal file
@@ -0,0 +1,231 @@
|
||||
"""VideoTextOverlayBlock - Add text overlay to video."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from moviepy import CompositeVideoClip, TextClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoTextOverlayBlock(Block):
|
||||
"""Add text overlay/caption to video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Input video (URL, data URI, or local path)"
|
||||
)
|
||||
text: str = SchemaField(description="Text to overlay on video")
|
||||
position: Literal[
|
||||
"top",
|
||||
"center",
|
||||
"bottom",
|
||||
"top-left",
|
||||
"top-right",
|
||||
"bottom-left",
|
||||
"bottom-right",
|
||||
] = SchemaField(description="Position of text on screen", default="bottom")
|
||||
start_time: float | None = SchemaField(
|
||||
description="When to show text (seconds). None = entire video",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
end_time: float | None = SchemaField(
|
||||
description="When to hide text (seconds). None = until end",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
font_size: int = SchemaField(
|
||||
description="Font size", default=48, ge=12, le=200, advanced=True
|
||||
)
|
||||
font_color: str = SchemaField(
|
||||
description="Font color (hex or name)", default="white", advanced=True
|
||||
)
|
||||
bg_color: str | None = SchemaField(
|
||||
description="Background color behind text (None for transparent)",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Video with text overlay (path or data URI)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8ef14de6-cc90-430a-8cfa-3a003be92454",
|
||||
description="Add text overlay/caption to video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
disabled=True, # Disable until we can lockdown imagemagick security policy
|
||||
test_input={"video_in": "/tmp/test.mp4", "text": "Hello World"},
|
||||
test_output=[("video_out", str)],
|
||||
test_mock={
|
||||
"_add_text_overlay": lambda *args: None,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "overlay_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _add_text_overlay(
|
||||
self,
|
||||
video_abspath: str,
|
||||
output_abspath: str,
|
||||
text: str,
|
||||
position: str,
|
||||
start_time: float | None,
|
||||
end_time: float | None,
|
||||
font_size: int,
|
||||
font_color: str,
|
||||
bg_color: str | None,
|
||||
) -> None:
|
||||
"""Add text overlay to video. Extracted for testability."""
|
||||
video = None
|
||||
final = None
|
||||
txt_clip = None
|
||||
try:
|
||||
strip_chapters_inplace(video_abspath)
|
||||
video = VideoFileClip(video_abspath)
|
||||
|
||||
txt_clip = TextClip(
|
||||
text=text,
|
||||
font_size=font_size,
|
||||
color=font_color,
|
||||
bg_color=bg_color,
|
||||
)
|
||||
|
||||
# Position mapping
|
||||
pos_map = {
|
||||
"top": ("center", "top"),
|
||||
"center": ("center", "center"),
|
||||
"bottom": ("center", "bottom"),
|
||||
"top-left": ("left", "top"),
|
||||
"top-right": ("right", "top"),
|
||||
"bottom-left": ("left", "bottom"),
|
||||
"bottom-right": ("right", "bottom"),
|
||||
}
|
||||
|
||||
txt_clip = txt_clip.with_position(pos_map[position])
|
||||
|
||||
# Set timing
|
||||
start = start_time or 0
|
||||
end = end_time or video.duration
|
||||
duration = max(0, end - start)
|
||||
txt_clip = txt_clip.with_start(start).with_end(end).with_duration(duration)
|
||||
|
||||
final = CompositeVideoClip([video, txt_clip])
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
final.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
|
||||
finally:
|
||||
if txt_clip:
|
||||
txt_clip.close()
|
||||
if final:
|
||||
final.close()
|
||||
if video:
|
||||
video.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# Validate time range if both are provided
|
||||
if (
|
||||
input_data.start_time is not None
|
||||
and input_data.end_time is not None
|
||||
and input_data.end_time <= input_data.start_time
|
||||
):
|
||||
raise BlockExecutionError(
|
||||
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store the input video locally
|
||||
local_video_path = await self._store_input_video(
|
||||
execution_context, input_data.video_in
|
||||
)
|
||||
video_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_video_path
|
||||
)
|
||||
|
||||
# Build output path
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_overlay_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
self._add_text_overlay(
|
||||
video_abspath,
|
||||
output_abspath,
|
||||
input_data.text,
|
||||
input_data.position,
|
||||
input_data.start_time,
|
||||
input_data.end_time,
|
||||
input_data.font_size,
|
||||
input_data.font_color,
|
||||
input_data.bg_color,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
|
||||
except BlockExecutionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to add text overlay: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
@@ -165,10 +165,13 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
credentials: WebshareProxyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
video_id = self.extract_video_id(input_data.youtube_url)
|
||||
yield "video_id", video_id
|
||||
try:
|
||||
video_id = self.extract_video_id(input_data.youtube_url)
|
||||
transcript = self.get_transcript(video_id, credentials)
|
||||
transcript_text = self.format_transcript(transcript=transcript)
|
||||
|
||||
transcript = self.get_transcript(video_id, credentials)
|
||||
transcript_text = self.format_transcript(transcript=transcript)
|
||||
|
||||
yield "transcript", transcript_text
|
||||
# Only yield after all operations succeed
|
||||
yield "video_id", video_id
|
||||
yield "transcript", transcript_text
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@@ -36,12 +36,14 @@ from backend.blocks.replicate.replicate_block import ReplicateModelBlock
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
||||
from backend.blocks.video.narration import VideoNarrationBlock
|
||||
from backend.data.block import Block, BlockCost, BlockCostType
|
||||
from backend.integrations.credentials_store import (
|
||||
aiml_api_credentials,
|
||||
anthropic_credentials,
|
||||
apollo_credentials,
|
||||
did_credentials,
|
||||
elevenlabs_credentials,
|
||||
enrichlayer_credentials,
|
||||
groq_credentials,
|
||||
ideogram_credentials,
|
||||
@@ -78,6 +80,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_SONNET: 5,
|
||||
LlmModel.CLAUDE_4_6_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
||||
LlmModel.CLAUDE_4_5_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||
@@ -639,4 +642,16 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
},
|
||||
),
|
||||
],
|
||||
VideoNarrationBlock: [
|
||||
BlockCost(
|
||||
cost_amount=5, # ElevenLabs TTS cost
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": elevenlabs_credentials.id,
|
||||
"provider": elevenlabs_credentials.provider,
|
||||
"type": elevenlabs_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
}
|
||||
|
||||
@@ -134,6 +134,16 @@ async def test_block_credit_reset(server: SpinTestServer):
|
||||
month1 = datetime.now(timezone.utc).replace(month=1, day=1)
|
||||
user_credit.time_now = lambda: month1
|
||||
|
||||
# IMPORTANT: Set updatedAt to December of previous year to ensure it's
|
||||
# in a different month than month1 (January). This fixes a timing bug
|
||||
# where if the test runs in early February, 35 days ago would be January,
|
||||
# matching the mocked month1 and preventing the refill from triggering.
|
||||
dec_previous_year = month1.replace(year=month1.year - 1, month=12, day=15)
|
||||
await UserBalance.prisma().update(
|
||||
where={"userId": DEFAULT_USER_ID},
|
||||
data={"updatedAt": dec_previous_year},
|
||||
)
|
||||
|
||||
# First call in month 1 should trigger refill
|
||||
balance = await user_credit.get_credits(DEFAULT_USER_ID)
|
||||
assert balance == REFILL_VALUE # Should get 1000 credits
|
||||
|
||||
@@ -19,7 +19,6 @@ from typing import (
|
||||
cast,
|
||||
get_args,
|
||||
)
|
||||
from urllib.parse import urlparse
|
||||
from uuid import uuid4
|
||||
|
||||
from prisma.enums import CreditTransactionType, OnboardingStep
|
||||
@@ -42,6 +41,7 @@ from typing_extensions import TypedDict
|
||||
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.json import loads as json_loads
|
||||
from backend.util.request import parse_url
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
# Type alias for any provider name (including custom ones)
|
||||
@@ -397,19 +397,25 @@ class HostScopedCredentials(_BaseCredentials):
|
||||
def matches_url(self, url: str) -> bool:
|
||||
"""Check if this credential should be applied to the given URL."""
|
||||
|
||||
parsed_url = urlparse(url)
|
||||
# Extract hostname without port
|
||||
request_host = parsed_url.hostname
|
||||
request_host, request_port = _extract_host_from_url(url)
|
||||
cred_scope_host, cred_scope_port = _extract_host_from_url(self.host)
|
||||
if not request_host:
|
||||
return False
|
||||
|
||||
# Simple host matching - exact match or wildcard subdomain match
|
||||
if self.host == request_host:
|
||||
# If a port is specified in credential host, the request host port must match
|
||||
if cred_scope_port is not None and request_port != cred_scope_port:
|
||||
return False
|
||||
# Non-standard ports are only allowed if explicitly specified in credential host
|
||||
elif cred_scope_port is None and request_port not in (80, 443, None):
|
||||
return False
|
||||
|
||||
# Simple host matching
|
||||
if cred_scope_host == request_host:
|
||||
return True
|
||||
|
||||
# Support wildcard matching (e.g., "*.example.com" matches "api.example.com")
|
||||
if self.host.startswith("*."):
|
||||
domain = self.host[2:] # Remove "*."
|
||||
if cred_scope_host.startswith("*."):
|
||||
domain = cred_scope_host[2:] # Remove "*."
|
||||
return request_host.endswith(f".{domain}") or request_host == domain
|
||||
|
||||
return False
|
||||
@@ -551,13 +557,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
||||
)
|
||||
|
||||
|
||||
def _extract_host_from_url(url: str) -> str:
|
||||
"""Extract host from URL for grouping host-scoped credentials."""
|
||||
def _extract_host_from_url(url: str) -> tuple[str, int | None]:
|
||||
"""Extract host and port from URL for grouping host-scoped credentials."""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
return parsed.hostname or url
|
||||
parsed = parse_url(url)
|
||||
return parsed.hostname or url, parsed.port
|
||||
except Exception:
|
||||
return ""
|
||||
return "", None
|
||||
|
||||
|
||||
class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
@@ -606,7 +612,7 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
providers = frozenset(
|
||||
[cast(CP, "http")]
|
||||
+ [
|
||||
cast(CP, _extract_host_from_url(str(value)))
|
||||
cast(CP, parse_url(str(value)).netloc)
|
||||
for value in field.discriminator_values
|
||||
]
|
||||
)
|
||||
|
||||
@@ -79,10 +79,23 @@ class TestHostScopedCredentials:
|
||||
headers={"Authorization": SecretStr("Bearer token")},
|
||||
)
|
||||
|
||||
assert creds.matches_url("http://localhost:8080/api/v1")
|
||||
# Non-standard ports require explicit port in credential host
|
||||
assert not creds.matches_url("http://localhost:8080/api/v1")
|
||||
assert creds.matches_url("https://localhost:443/secure/endpoint")
|
||||
assert creds.matches_url("http://localhost/simple")
|
||||
|
||||
def test_matches_url_with_explicit_port(self):
|
||||
"""Test URL matching with explicit port in credential host."""
|
||||
creds = HostScopedCredentials(
|
||||
provider="custom",
|
||||
host="localhost:8080",
|
||||
headers={"Authorization": SecretStr("Bearer token")},
|
||||
)
|
||||
|
||||
assert creds.matches_url("http://localhost:8080/api/v1")
|
||||
assert not creds.matches_url("http://localhost:3000/api/v1")
|
||||
assert not creds.matches_url("http://localhost/simple")
|
||||
|
||||
def test_empty_headers_dict(self):
|
||||
"""Test HostScopedCredentials with empty headers."""
|
||||
creds = HostScopedCredentials(
|
||||
@@ -128,8 +141,20 @@ class TestHostScopedCredentials:
|
||||
("*.example.com", "https://sub.api.example.com/test", True),
|
||||
("*.example.com", "https://example.com/test", True),
|
||||
("*.example.com", "https://example.org/test", False),
|
||||
("localhost", "http://localhost:3000/test", True),
|
||||
# Non-standard ports require explicit port in credential host
|
||||
("localhost", "http://localhost:3000/test", False),
|
||||
("localhost:3000", "http://localhost:3000/test", True),
|
||||
("localhost", "http://127.0.0.1:3000/test", False),
|
||||
# IPv6 addresses (frontend stores with brackets via URL.hostname)
|
||||
("[::1]", "http://[::1]/test", True),
|
||||
("[::1]", "http://[::1]:80/test", True),
|
||||
("[::1]", "https://[::1]:443/test", True),
|
||||
("[::1]", "http://[::1]:8080/test", False), # Non-standard port
|
||||
("[::1]:8080", "http://[::1]:8080/test", True),
|
||||
("[::1]:8080", "http://[::1]:9090/test", False),
|
||||
("[2001:db8::1]", "http://[2001:db8::1]/path", True),
|
||||
("[2001:db8::1]", "https://[2001:db8::1]:443/path", True),
|
||||
("[2001:db8::1]", "http://[2001:db8::ff]/path", False),
|
||||
],
|
||||
)
|
||||
def test_url_matching_parametrized(self, host: str, test_url: str, expected: bool):
|
||||
|
||||
@@ -224,6 +224,14 @@ openweathermap_credentials = APIKeyCredentials(
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
elevenlabs_credentials = APIKeyCredentials(
|
||||
id="f4a8b6c2-3d1e-4f5a-9b8c-7d6e5f4a3b2c",
|
||||
provider="elevenlabs",
|
||||
api_key=SecretStr(settings.secrets.elevenlabs_api_key),
|
||||
title="Use Credits for ElevenLabs",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
@@ -252,6 +260,7 @@ DEFAULT_CREDENTIALS = [
|
||||
v0_credentials,
|
||||
webshare_proxy_credentials,
|
||||
openweathermap_credentials,
|
||||
elevenlabs_credentials,
|
||||
]
|
||||
|
||||
SYSTEM_CREDENTIAL_IDS = {cred.id for cred in DEFAULT_CREDENTIALS}
|
||||
@@ -366,6 +375,8 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(webshare_proxy_credentials)
|
||||
if settings.secrets.openweathermap_api_key:
|
||||
all_credentials.append(openweathermap_credentials)
|
||||
if settings.secrets.elevenlabs_api_key:
|
||||
all_credentials.append(elevenlabs_credentials)
|
||||
return all_credentials
|
||||
|
||||
async def get_creds_by_id(
|
||||
|
||||
@@ -18,6 +18,7 @@ class ProviderName(str, Enum):
|
||||
DISCORD = "discord"
|
||||
D_ID = "d_id"
|
||||
E2B = "e2b"
|
||||
ELEVENLABS = "elevenlabs"
|
||||
FAL = "fal"
|
||||
GITHUB = "github"
|
||||
GOOGLE = "google"
|
||||
|
||||
@@ -8,6 +8,8 @@ from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Literal
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.util.cloud_storage import get_cloud_storage_handler
|
||||
from backend.util.request import Requests
|
||||
from backend.util.settings import Config
|
||||
@@ -17,6 +19,35 @@ from backend.util.virus_scanner import scan_content_safe
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
|
||||
class WorkspaceUri(BaseModel):
|
||||
"""Parsed workspace:// URI."""
|
||||
|
||||
file_ref: str # File ID or path (e.g. "abc123" or "/path/to/file.txt")
|
||||
mime_type: str | None = None # MIME type from fragment (e.g. "video/mp4")
|
||||
is_path: bool = False # True if file_ref is a path (starts with "/")
|
||||
|
||||
|
||||
def parse_workspace_uri(uri: str) -> WorkspaceUri:
|
||||
"""Parse a workspace:// URI into its components.
|
||||
|
||||
Examples:
|
||||
"workspace://abc123" → WorkspaceUri(file_ref="abc123", mime_type=None, is_path=False)
|
||||
"workspace://abc123#video/mp4" → WorkspaceUri(file_ref="abc123", mime_type="video/mp4", is_path=False)
|
||||
"workspace:///path/to/file.txt" → WorkspaceUri(file_ref="/path/to/file.txt", mime_type=None, is_path=True)
|
||||
"""
|
||||
raw = uri.removeprefix("workspace://")
|
||||
mime_type: str | None = None
|
||||
if "#" in raw:
|
||||
raw, fragment = raw.split("#", 1)
|
||||
mime_type = fragment or None
|
||||
return WorkspaceUri(
|
||||
file_ref=raw,
|
||||
mime_type=mime_type,
|
||||
is_path=raw.startswith("/"),
|
||||
)
|
||||
|
||||
|
||||
# Return format options for store_media_file
|
||||
# - "for_local_processing": Returns local file path - use with ffmpeg, MoviePy, PIL, etc.
|
||||
# - "for_external_api": Returns data URI (base64) - use when sending content to external APIs
|
||||
@@ -183,22 +214,20 @@ async def store_media_file(
|
||||
"This file type is only available in CoPilot sessions."
|
||||
)
|
||||
|
||||
# Parse workspace reference
|
||||
# workspace://abc123 - by file ID
|
||||
# workspace:///path/to/file.txt - by virtual path
|
||||
file_ref = file[12:] # Remove "workspace://"
|
||||
# Parse workspace reference (strips #mimeType fragment from file ID)
|
||||
ws = parse_workspace_uri(file)
|
||||
|
||||
if file_ref.startswith("/"):
|
||||
# Path reference
|
||||
workspace_content = await workspace_manager.read_file(file_ref)
|
||||
file_info = await workspace_manager.get_file_info_by_path(file_ref)
|
||||
if ws.is_path:
|
||||
# Path reference: workspace:///path/to/file.txt
|
||||
workspace_content = await workspace_manager.read_file(ws.file_ref)
|
||||
file_info = await workspace_manager.get_file_info_by_path(ws.file_ref)
|
||||
filename = sanitize_filename(
|
||||
file_info.name if file_info else f"{uuid.uuid4()}.bin"
|
||||
)
|
||||
else:
|
||||
# ID reference
|
||||
workspace_content = await workspace_manager.read_file_by_id(file_ref)
|
||||
file_info = await workspace_manager.get_file_info(file_ref)
|
||||
# ID reference: workspace://abc123 or workspace://abc123#video/mp4
|
||||
workspace_content = await workspace_manager.read_file_by_id(ws.file_ref)
|
||||
file_info = await workspace_manager.get_file_info(ws.file_ref)
|
||||
filename = sanitize_filename(
|
||||
file_info.name if file_info else f"{uuid.uuid4()}.bin"
|
||||
)
|
||||
@@ -334,7 +363,21 @@ async def store_media_file(
|
||||
|
||||
# Don't re-save if input was already from workspace
|
||||
if is_from_workspace:
|
||||
# Return original workspace reference
|
||||
# Return original workspace reference, ensuring MIME type fragment
|
||||
ws = parse_workspace_uri(file)
|
||||
if not ws.mime_type:
|
||||
# Add MIME type fragment if missing (older refs without it)
|
||||
try:
|
||||
if ws.is_path:
|
||||
info = await workspace_manager.get_file_info_by_path(
|
||||
ws.file_ref
|
||||
)
|
||||
else:
|
||||
info = await workspace_manager.get_file_info(ws.file_ref)
|
||||
if info:
|
||||
return MediaFileType(f"{file}#{info.mimeType}")
|
||||
except Exception:
|
||||
pass
|
||||
return MediaFileType(file)
|
||||
|
||||
# Save new content to workspace
|
||||
@@ -346,7 +389,7 @@ async def store_media_file(
|
||||
filename=filename,
|
||||
overwrite=True,
|
||||
)
|
||||
return MediaFileType(f"workspace://{file_record.id}")
|
||||
return MediaFileType(f"workspace://{file_record.id}#{file_record.mimeType}")
|
||||
|
||||
else:
|
||||
raise ValueError(f"Invalid return_format: {return_format}")
|
||||
|
||||
@@ -157,12 +157,7 @@ async def validate_url(
|
||||
is_trusted: Boolean indicating if the hostname is in trusted_origins
|
||||
ip_addresses: List of IP addresses for the host; empty if the host is trusted
|
||||
"""
|
||||
# Canonicalize URL
|
||||
url = url.strip("/ ").replace("\\", "/")
|
||||
parsed = urlparse(url)
|
||||
if not parsed.scheme:
|
||||
url = f"http://{url}"
|
||||
parsed = urlparse(url)
|
||||
parsed = parse_url(url)
|
||||
|
||||
# Check scheme
|
||||
if parsed.scheme not in ALLOWED_SCHEMES:
|
||||
@@ -220,6 +215,17 @@ async def validate_url(
|
||||
)
|
||||
|
||||
|
||||
def parse_url(url: str) -> URL:
|
||||
"""Canonicalizes and parses a URL string."""
|
||||
url = url.strip("/ ").replace("\\", "/")
|
||||
|
||||
# Ensure scheme is present for proper parsing
|
||||
if not re.match(r"[a-z0-9+.\-]+://", url):
|
||||
url = f"http://{url}"
|
||||
|
||||
return urlparse(url)
|
||||
|
||||
|
||||
def pin_url(url: URL, ip_addresses: Optional[list[str]] = None) -> URL:
|
||||
"""
|
||||
Pins a URL to a specific IP address to prevent DNS rebinding attacks.
|
||||
|
||||
@@ -656,6 +656,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
e2b_api_key: str = Field(default="", description="E2B API key")
|
||||
nvidia_api_key: str = Field(default="", description="Nvidia API key")
|
||||
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
||||
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
||||
|
||||
linear_client_id: str = Field(default="", description="Linear client ID")
|
||||
linear_client_secret: str = Field(default="", description="Linear client secret")
|
||||
|
||||
47
autogpt_platform/backend/poetry.lock
generated
47
autogpt_platform/backend/poetry.lock
generated
@@ -1169,6 +1169,29 @@ attrs = ">=21.3.0"
|
||||
e2b = ">=1.5.4,<2.0.0"
|
||||
httpx = ">=0.20.0,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "elevenlabs"
|
||||
version = "1.59.0"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "elevenlabs-1.59.0-py3-none-any.whl", hash = "sha256:468145db81a0bc867708b4a8619699f75583e9481b395ec1339d0b443da771ed"},
|
||||
{file = "elevenlabs-1.59.0.tar.gz", hash = "sha256:16e735bd594e86d415dd445d249c8cc28b09996cfd627fbc10102c0a84698859"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.21.2"
|
||||
pydantic = ">=1.9.2"
|
||||
pydantic-core = ">=2.18.2,<3.0.0"
|
||||
requests = ">=2.20"
|
||||
typing_extensions = ">=4.0.0"
|
||||
websockets = ">=11.0"
|
||||
|
||||
[package.extras]
|
||||
pyaudio = ["pyaudio (>=0.2.14)"]
|
||||
|
||||
[[package]]
|
||||
name = "email-validator"
|
||||
version = "2.2.0"
|
||||
@@ -7361,6 +7384,28 @@ files = [
|
||||
defusedxml = ">=0.7.1,<0.8.0"
|
||||
requests = "*"
|
||||
|
||||
[[package]]
|
||||
name = "yt-dlp"
|
||||
version = "2025.12.8"
|
||||
description = "A feature-rich command-line audio/video downloader"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "yt_dlp-2025.12.8-py3-none-any.whl", hash = "sha256:36e2584342e409cfbfa0b5e61448a1c5189e345cf4564294456ee509e7d3e065"},
|
||||
{file = "yt_dlp-2025.12.8.tar.gz", hash = "sha256:b773c81bb6b71cb2c111cfb859f453c7a71cf2ef44eff234ff155877184c3e4f"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
build = ["build", "hatchling (>=1.27.0)", "pip", "setuptools (>=71.0.2)", "wheel"]
|
||||
curl-cffi = ["curl-cffi (>=0.5.10,<0.6.dev0 || >=0.10.dev0,<0.14) ; implementation_name == \"cpython\""]
|
||||
default = ["brotli ; implementation_name == \"cpython\"", "brotlicffi ; implementation_name != \"cpython\"", "certifi", "mutagen", "pycryptodomex", "requests (>=2.32.2,<3)", "urllib3 (>=2.0.2,<3)", "websockets (>=13.0)", "yt-dlp-ejs (==0.3.2)"]
|
||||
dev = ["autopep8 (>=2.0,<3.0)", "pre-commit", "pytest (>=8.1,<9.0)", "pytest-rerunfailures (>=14.0,<15.0)", "ruff (>=0.14.0,<0.15.0)"]
|
||||
pyinstaller = ["pyinstaller (>=6.17.0)"]
|
||||
secretstorage = ["cffi", "secretstorage"]
|
||||
static-analysis = ["autopep8 (>=2.0,<3.0)", "ruff (>=0.14.0,<0.15.0)"]
|
||||
test = ["pytest (>=8.1,<9.0)", "pytest-rerunfailures (>=14.0,<15.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "zerobouncesdk"
|
||||
version = "1.1.2"
|
||||
@@ -7512,4 +7557,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "ee5742dc1a9df50dfc06d4b26a1682cbb2b25cab6b79ce5625ec272f93e4f4bf"
|
||||
content-hash = "8239323f9ae6713224dffd1fe8ba8b449fe88b6c3c7a90940294a74f43a0387a"
|
||||
|
||||
@@ -20,6 +20,7 @@ click = "^8.2.0"
|
||||
cryptography = "^45.0"
|
||||
discord-py = "^2.5.2"
|
||||
e2b-code-interpreter = "^1.5.2"
|
||||
elevenlabs = "^1.50.0"
|
||||
fastapi = "^0.116.1"
|
||||
feedparser = "^6.0.11"
|
||||
flake8 = "^7.3.0"
|
||||
@@ -71,6 +72,7 @@ tweepy = "^4.16.0"
|
||||
uvicorn = { extras = ["standard"], version = "^0.35.0" }
|
||||
websockets = "^15.0"
|
||||
youtube-transcript-api = "^1.2.1"
|
||||
yt-dlp = "2025.12.08"
|
||||
zerobouncesdk = "^1.1.2"
|
||||
# NOTE: please insert new dependencies in their alphabetical location
|
||||
pytest-snapshot = "^0.9.0"
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
import { OAuthPopupResultMessage } from "./types";
|
||||
import { NextResponse } from "next/server";
|
||||
|
||||
/**
|
||||
* Safely encode a value as JSON for embedding in a script tag.
|
||||
* Escapes characters that could break out of the script context to prevent XSS.
|
||||
*/
|
||||
function safeJsonStringify(value: unknown): string {
|
||||
return JSON.stringify(value)
|
||||
.replace(/</g, "\\u003c")
|
||||
.replace(/>/g, "\\u003e")
|
||||
.replace(/&/g, "\\u0026");
|
||||
}
|
||||
|
||||
// This route is intended to be used as the callback for integration OAuth flows,
|
||||
// controlled by the CredentialsInput component. The CredentialsInput opens the login
|
||||
// page in a pop-up window, which then redirects to this route to close the loop.
|
||||
@@ -23,12 +34,13 @@ export async function GET(request: Request) {
|
||||
console.debug("Sending message to opener:", message);
|
||||
|
||||
// Return a response with the message as JSON and a script to close the window
|
||||
// Use safeJsonStringify to prevent XSS by escaping <, >, and & characters
|
||||
return new NextResponse(
|
||||
`
|
||||
<html>
|
||||
<body>
|
||||
<script>
|
||||
window.opener.postMessage(${JSON.stringify(message)});
|
||||
window.opener.postMessage(${safeJsonStringify(message)});
|
||||
window.close();
|
||||
</script>
|
||||
</body>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { Clipboard, Maximize2 } from "lucide-react";
|
||||
import React, { useState } from "react";
|
||||
import React, { useMemo, useState } from "react";
|
||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
|
||||
import {
|
||||
@@ -11,6 +11,12 @@ import {
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "../../../../../components/__legacy__/ui/table";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
|
||||
import ExpandableOutputDialog from "./ExpandableOutputDialog";
|
||||
|
||||
@@ -26,6 +32,9 @@ export default function DataTable({
|
||||
data,
|
||||
}: DataTableProps) {
|
||||
const { toast } = useToast();
|
||||
const enableEnhancedOutputHandling = useGetFlag(
|
||||
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||
);
|
||||
const [expandedDialog, setExpandedDialog] = useState<{
|
||||
isOpen: boolean;
|
||||
execId: string;
|
||||
@@ -33,6 +42,15 @@ export default function DataTable({
|
||||
data: any[];
|
||||
} | null>(null);
|
||||
|
||||
// Prepare renderers for each item when enhanced mode is enabled
|
||||
const getItemRenderer = useMemo(() => {
|
||||
if (!enableEnhancedOutputHandling) return null;
|
||||
return (item: unknown) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
return globalRegistry.getRenderer(item, metadata);
|
||||
};
|
||||
}, [enableEnhancedOutputHandling]);
|
||||
|
||||
const copyData = (pin: string, data: string) => {
|
||||
navigator.clipboard.writeText(data).then(() => {
|
||||
toast({
|
||||
@@ -102,15 +120,31 @@ export default function DataTable({
|
||||
<Clipboard size={18} />
|
||||
</Button>
|
||||
</div>
|
||||
{value.map((item, index) => (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
))}
|
||||
{value.map((item, index) => {
|
||||
const renderer = getItemRenderer?.(item);
|
||||
if (enableEnhancedOutputHandling && renderer) {
|
||||
const metadata: OutputMetadata = {};
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<OutputItem
|
||||
value={item}
|
||||
metadata={metadata}
|
||||
renderer={renderer}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
import React, { useContext, useState } from "react";
|
||||
import React, { useContext, useMemo, useState } from "react";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Maximize2 } from "lucide-react";
|
||||
import * as Separator from "@radix-ui/react-separator";
|
||||
import { ContentRenderer } from "@/components/__legacy__/ui/render";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
|
||||
@@ -21,6 +27,9 @@ export default function NodeOutputs({
|
||||
data,
|
||||
}: NodeOutputsProps) {
|
||||
const builderContext = useContext(BuilderContext);
|
||||
const enableEnhancedOutputHandling = useGetFlag(
|
||||
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||
);
|
||||
|
||||
const [expandedDialog, setExpandedDialog] = useState<{
|
||||
isOpen: boolean;
|
||||
@@ -37,6 +46,15 @@ export default function NodeOutputs({
|
||||
|
||||
const { getNodeTitle } = builderContext;
|
||||
|
||||
// Prepare renderers for each item when enhanced mode is enabled
|
||||
const getItemRenderer = useMemo(() => {
|
||||
if (!enableEnhancedOutputHandling) return null;
|
||||
return (item: unknown) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
return globalRegistry.getRenderer(item, metadata);
|
||||
};
|
||||
}, [enableEnhancedOutputHandling]);
|
||||
|
||||
const getBeautifiedPinName = (pin: string) => {
|
||||
if (!pin.startsWith("tools_^_")) {
|
||||
return beautifyString(pin);
|
||||
@@ -87,15 +105,31 @@ export default function NodeOutputs({
|
||||
<div className="mt-2">
|
||||
<strong className="mr-2">Data:</strong>
|
||||
<div className="mt-1">
|
||||
{dataArray.slice(0, 10).map((item, index) => (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
))}
|
||||
{dataArray.slice(0, 10).map((item, index) => {
|
||||
const renderer = getItemRenderer?.(item);
|
||||
if (enableEnhancedOutputHandling && renderer) {
|
||||
const metadata: OutputMetadata = {};
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<OutputItem
|
||||
value={item}
|
||||
metadata={metadata}
|
||||
renderer={renderer}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
{dataArray.length > 10 && (
|
||||
<span style={{ color: "#888" }}>
|
||||
<br />
|
||||
|
||||
@@ -26,8 +26,20 @@ export function buildCopilotChatUrl(prompt: string): string {
|
||||
|
||||
export function getQuickActions(): string[] {
|
||||
return [
|
||||
"Show me what I can automate",
|
||||
"Design a custom workflow",
|
||||
"Help me with content creation",
|
||||
"I don't know where to start, just ask me stuff",
|
||||
"I do the same thing every week and it's killing me",
|
||||
"Help me find where I'm wasting my time",
|
||||
];
|
||||
}
|
||||
|
||||
export function getInputPlaceholder(width?: number) {
|
||||
if (!width) return "What's your role and what eats up most of your day?";
|
||||
|
||||
if (width < 500) {
|
||||
return "I'm a chef and I hate...";
|
||||
}
|
||||
if (width <= 1080) {
|
||||
return "What's your role and what eats up most of your day?";
|
||||
}
|
||||
return "What's your role and what eats up most of your day? e.g. 'I'm a recruiter and I hate...'";
|
||||
}
|
||||
|
||||
@@ -6,7 +6,9 @@ import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Chat } from "@/components/contextual/Chat/Chat";
|
||||
import { ChatInput } from "@/components/contextual/Chat/components/ChatInput/ChatInput";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { useEffect, useState } from "react";
|
||||
import { useCopilotStore } from "./copilot-page-store";
|
||||
import { getInputPlaceholder } from "./helpers";
|
||||
import { useCopilotPage } from "./useCopilotPage";
|
||||
|
||||
export default function CopilotPage() {
|
||||
@@ -14,8 +16,25 @@ export default function CopilotPage() {
|
||||
const isInterruptModalOpen = useCopilotStore((s) => s.isInterruptModalOpen);
|
||||
const confirmInterrupt = useCopilotStore((s) => s.confirmInterrupt);
|
||||
const cancelInterrupt = useCopilotStore((s) => s.cancelInterrupt);
|
||||
|
||||
const [inputPlaceholder, setInputPlaceholder] = useState(
|
||||
getInputPlaceholder(),
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
const handleResize = () => {
|
||||
setInputPlaceholder(getInputPlaceholder(window.innerWidth));
|
||||
};
|
||||
|
||||
handleResize();
|
||||
|
||||
window.addEventListener("resize", handleResize);
|
||||
return () => window.removeEventListener("resize", handleResize);
|
||||
}, []);
|
||||
|
||||
const { greetingName, quickActions, isLoading, hasSession, initialPrompt } =
|
||||
state;
|
||||
|
||||
const {
|
||||
handleQuickAction,
|
||||
startChatWithPrompt,
|
||||
@@ -73,7 +92,7 @@ export default function CopilotPage() {
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex h-full flex-1 items-center justify-center overflow-y-auto bg-[#f8f8f9] px-6 py-10">
|
||||
<div className="flex h-full flex-1 items-center justify-center overflow-y-auto bg-[#f8f8f9] px-3 py-5 md:px-6 md:py-10">
|
||||
<div className="w-full text-center">
|
||||
{isLoading ? (
|
||||
<div className="mx-auto max-w-2xl">
|
||||
@@ -90,25 +109,25 @@ export default function CopilotPage() {
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<div className="mx-auto max-w-2xl">
|
||||
<div className="mx-auto max-w-3xl">
|
||||
<Text
|
||||
variant="h3"
|
||||
className="mb-3 !text-[1.375rem] text-zinc-700"
|
||||
className="mb-1 !text-[1.375rem] text-zinc-700"
|
||||
>
|
||||
Hey, <span className="text-violet-600">{greetingName}</span>
|
||||
</Text>
|
||||
<Text variant="h3" className="mb-8 !font-normal">
|
||||
What do you want to automate?
|
||||
Tell me about your work — I'll find what to automate.
|
||||
</Text>
|
||||
|
||||
<div className="mb-6">
|
||||
<ChatInput
|
||||
onSend={startChatWithPrompt}
|
||||
placeholder='You can search or just ask - e.g. "create a blog post outline"'
|
||||
placeholder={inputPlaceholder}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex flex-nowrap items-center justify-center gap-3 overflow-x-auto [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
|
||||
<div className="flex flex-wrap items-center justify-center gap-3 overflow-x-auto [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
|
||||
{quickActions.map((action) => (
|
||||
<Button
|
||||
key={action}
|
||||
@@ -116,7 +135,7 @@ export default function CopilotPage() {
|
||||
variant="outline"
|
||||
size="small"
|
||||
onClick={() => handleQuickAction(action)}
|
||||
className="h-auto shrink-0 border-zinc-600 !px-4 !py-2 text-[1rem] text-zinc-600"
|
||||
className="h-auto shrink-0 border-zinc-300 px-3 py-2 text-[.9rem] text-zinc-600"
|
||||
>
|
||||
{action}
|
||||
</Button>
|
||||
|
||||
@@ -22,7 +22,7 @@ const isValidVideoUrl = (url: string): boolean => {
|
||||
if (url.startsWith("data:video")) {
|
||||
return true;
|
||||
}
|
||||
const videoExtensions = /\.(mp4|webm|ogg)$/i;
|
||||
const videoExtensions = /\.(mp4|webm|ogg|mov|avi|mkv|m4v)$/i;
|
||||
const youtubeRegex = /^(https?:\/\/)?(www\.)?(youtube\.com|youtu\.?be)\/.+$/;
|
||||
const cleanedUrl = url.split("?")[0];
|
||||
return (
|
||||
@@ -44,11 +44,29 @@ const isValidAudioUrl = (url: string): boolean => {
|
||||
if (url.startsWith("data:audio")) {
|
||||
return true;
|
||||
}
|
||||
const audioExtensions = /\.(mp3|wav)$/i;
|
||||
const audioExtensions = /\.(mp3|wav|ogg|m4a|aac|flac)$/i;
|
||||
const cleanedUrl = url.split("?")[0];
|
||||
return isValidMediaUri(url) && audioExtensions.test(cleanedUrl);
|
||||
};
|
||||
|
||||
const getVideoMimeType = (url: string): string => {
|
||||
if (url.startsWith("data:video/")) {
|
||||
const match = url.match(/^data:(video\/[^;]+)/);
|
||||
return match?.[1] || "video/mp4";
|
||||
}
|
||||
const extension = url.split("?")[0].split(".").pop()?.toLowerCase();
|
||||
const mimeMap: Record<string, string> = {
|
||||
mp4: "video/mp4",
|
||||
webm: "video/webm",
|
||||
ogg: "video/ogg",
|
||||
mov: "video/quicktime",
|
||||
avi: "video/x-msvideo",
|
||||
mkv: "video/x-matroska",
|
||||
m4v: "video/mp4",
|
||||
};
|
||||
return mimeMap[extension || ""] || "video/mp4";
|
||||
};
|
||||
|
||||
const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
||||
const videoId = getYouTubeVideoId(videoUrl);
|
||||
return (
|
||||
@@ -63,7 +81,7 @@ const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
||||
></iframe>
|
||||
) : (
|
||||
<video controls width="100%" height="315">
|
||||
<source src={videoUrl} type="video/mp4" />
|
||||
<source src={videoUrl} type={getVideoMimeType(videoUrl)} />
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
)}
|
||||
|
||||
@@ -2,7 +2,6 @@ import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessi
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { GlobeHemisphereEastIcon } from "@phosphor-icons/react";
|
||||
import { useEffect } from "react";
|
||||
@@ -56,10 +55,6 @@ export function ChatContainer({
|
||||
onStreamingChange?.(isStreaming);
|
||||
}, [isStreaming, onStreamingChange]);
|
||||
|
||||
const breakpoint = useBreakpoint();
|
||||
const isMobile =
|
||||
breakpoint === "base" || breakpoint === "sm" || breakpoint === "md";
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
@@ -127,11 +122,7 @@ export function ChatContainer({
|
||||
disabled={isStreaming || !sessionId}
|
||||
isStreaming={isStreaming}
|
||||
onStop={stopStreaming}
|
||||
placeholder={
|
||||
isMobile
|
||||
? "You can search or just ask"
|
||||
: 'You can search or just ask — e.g. "create a blog post outline"'
|
||||
}
|
||||
placeholder="What else can I help with?"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -74,19 +74,20 @@ export function ChatInput({
|
||||
hasMultipleLines ? "rounded-xlarge" : "rounded-full",
|
||||
)}
|
||||
>
|
||||
{!value && !isRecording && (
|
||||
<div
|
||||
className="pointer-events-none absolute inset-0 top-0.5 flex items-center justify-start pl-14 text-[1rem] text-zinc-400"
|
||||
aria-hidden="true"
|
||||
>
|
||||
{isTranscribing ? "Transcribing..." : placeholder}
|
||||
</div>
|
||||
)}
|
||||
<textarea
|
||||
id={inputId}
|
||||
aria-label="Chat message input"
|
||||
value={value}
|
||||
onChange={handleChange}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder={
|
||||
isTranscribing
|
||||
? "Transcribing..."
|
||||
: isRecording
|
||||
? ""
|
||||
: placeholder
|
||||
}
|
||||
disabled={isInputDisabled}
|
||||
rows={1}
|
||||
className={cn(
|
||||
@@ -122,13 +123,14 @@ export function ChatInput({
|
||||
size="icon"
|
||||
aria-label={isRecording ? "Stop recording" : "Start recording"}
|
||||
onClick={toggleRecording}
|
||||
disabled={disabled || isTranscribing}
|
||||
disabled={disabled || isTranscribing || isStreaming}
|
||||
className={cn(
|
||||
isRecording
|
||||
? "animate-pulse border-red-500 bg-red-500 text-white hover:border-red-600 hover:bg-red-600"
|
||||
: isTranscribing
|
||||
? "border-zinc-300 bg-zinc-100 text-zinc-400"
|
||||
: "border-zinc-300 bg-white text-zinc-500 hover:border-zinc-400 hover:bg-zinc-50 hover:text-zinc-700",
|
||||
isStreaming && "opacity-40",
|
||||
)}
|
||||
>
|
||||
{isTranscribing ? (
|
||||
|
||||
@@ -38,8 +38,8 @@ export function AudioWaveform({
|
||||
// Create audio context and analyser
|
||||
const audioContext = new AudioContext();
|
||||
const analyser = audioContext.createAnalyser();
|
||||
analyser.fftSize = 512;
|
||||
analyser.smoothingTimeConstant = 0.8;
|
||||
analyser.fftSize = 256;
|
||||
analyser.smoothingTimeConstant = 0.3;
|
||||
|
||||
// Connect the stream to the analyser
|
||||
const source = audioContext.createMediaStreamSource(stream);
|
||||
@@ -73,10 +73,11 @@ export function AudioWaveform({
|
||||
maxAmplitude = Math.max(maxAmplitude, amplitude);
|
||||
}
|
||||
|
||||
// Map amplitude (0-128) to bar height
|
||||
const normalized = (maxAmplitude / 128) * 255;
|
||||
const height =
|
||||
minBarHeight + (normalized / 255) * (maxBarHeight - minBarHeight);
|
||||
// Normalize amplitude (0-128 range) to 0-1
|
||||
const normalized = maxAmplitude / 128;
|
||||
// Apply sensitivity boost (multiply by 4) and use sqrt curve to amplify quiet sounds
|
||||
const boosted = Math.min(1, Math.sqrt(normalized) * 4);
|
||||
const height = minBarHeight + boosted * (maxBarHeight - minBarHeight);
|
||||
newBars.push(height);
|
||||
}
|
||||
|
||||
|
||||
@@ -224,7 +224,7 @@ export function useVoiceRecording({
|
||||
[value, isTranscribing, toggleRecording, baseHandleKeyDown],
|
||||
);
|
||||
|
||||
const showMicButton = isSupported && !isStreaming;
|
||||
const showMicButton = isSupported;
|
||||
const isInputDisabled = disabled || isStreaming || isTranscribing;
|
||||
|
||||
// Cleanup on unmount
|
||||
|
||||
@@ -102,18 +102,6 @@ export function ChatMessage({
|
||||
}
|
||||
}
|
||||
|
||||
function handleClarificationAnswers(answers: Record<string, string>) {
|
||||
if (onSendMessage) {
|
||||
const contextMessage = Object.entries(answers)
|
||||
.map(([keyword, answer]) => `${keyword}: ${answer}`)
|
||||
.join("\n");
|
||||
|
||||
onSendMessage(
|
||||
`I have the answers to your questions:\n\n${contextMessage}\n\nPlease proceed with creating the agent.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const handleCopy = useCallback(
|
||||
async function handleCopy() {
|
||||
if (message.type !== "message") return;
|
||||
@@ -162,6 +150,22 @@ export function ChatMessage({
|
||||
.slice(index + 1)
|
||||
.some((m) => m.type === "message" && m.role === "user");
|
||||
|
||||
const handleClarificationAnswers = (answers: Record<string, string>) => {
|
||||
if (onSendMessage) {
|
||||
// Iterate over questions (preserves original order) instead of answers
|
||||
const contextMessage = message.questions
|
||||
.map((q) => {
|
||||
const answer = answers[q.keyword] || "";
|
||||
return `> ${q.question}\n\n${answer}`;
|
||||
})
|
||||
.join("\n\n");
|
||||
|
||||
onSendMessage(
|
||||
`**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with creating the agent.`,
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<ClarificationQuestionsWidget
|
||||
questions={message.questions}
|
||||
@@ -346,6 +350,7 @@ export function ChatMessage({
|
||||
toolId={message.toolId}
|
||||
toolName={message.toolName}
|
||||
result={message.result}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import { getGetWorkspaceDownloadFileByIdUrl } from "@/app/api/__generated__/endpoints/workspace/workspace";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { EyeSlash } from "@phosphor-icons/react";
|
||||
import React from "react";
|
||||
import React, { useState } from "react";
|
||||
import ReactMarkdown from "react-markdown";
|
||||
import remarkGfm from "remark-gfm";
|
||||
|
||||
@@ -48,7 +48,9 @@ interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
|
||||
*/
|
||||
function resolveWorkspaceUrl(src: string): string {
|
||||
if (src.startsWith("workspace://")) {
|
||||
const fileId = src.replace("workspace://", "");
|
||||
// Strip MIME type fragment if present (e.g., workspace://abc123#video/mp4 → abc123)
|
||||
const withoutPrefix = src.replace("workspace://", "");
|
||||
const fileId = withoutPrefix.split("#")[0];
|
||||
// Use the generated API URL helper to get the correct path
|
||||
const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId);
|
||||
// Route through the Next.js proxy (same pattern as customMutator for client-side)
|
||||
@@ -65,13 +67,49 @@ function isWorkspaceImage(src: string | undefined): boolean {
|
||||
return src?.includes("/workspace/files/") ?? false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Renders a workspace video with controls and an optional "AI cannot see" badge.
|
||||
*/
|
||||
function WorkspaceVideo({
|
||||
src,
|
||||
aiCannotSee,
|
||||
}: {
|
||||
src: string;
|
||||
aiCannotSee: boolean;
|
||||
}) {
|
||||
return (
|
||||
<span className="relative my-2 inline-block">
|
||||
<video
|
||||
controls
|
||||
className="h-auto max-w-full rounded-md border border-zinc-200"
|
||||
preload="metadata"
|
||||
>
|
||||
<source src={src} />
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
{aiCannotSee && (
|
||||
<span
|
||||
className="absolute bottom-2 right-2 flex items-center gap-1 rounded bg-black/70 px-2 py-1 text-xs text-white"
|
||||
title="The AI cannot see this video"
|
||||
>
|
||||
<EyeSlash size={14} />
|
||||
<span>AI cannot see this video</span>
|
||||
</span>
|
||||
)}
|
||||
</span>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom image component that shows an indicator when the AI cannot see the image.
|
||||
* Also handles the "video:" alt-text prefix convention to render <video> elements.
|
||||
* For workspace files with unknown types, falls back to <video> if <img> fails.
|
||||
* Note: src is already transformed by urlTransform, so workspace:// is now /api/workspace/...
|
||||
*/
|
||||
function MarkdownImage(props: Record<string, unknown>) {
|
||||
const src = props.src as string | undefined;
|
||||
const alt = props.alt as string | undefined;
|
||||
const [imgFailed, setImgFailed] = useState(false);
|
||||
|
||||
const aiCannotSee = isWorkspaceImage(src);
|
||||
|
||||
@@ -84,6 +122,18 @@ function MarkdownImage(props: Record<string, unknown>) {
|
||||
);
|
||||
}
|
||||
|
||||
// Detect video: prefix in alt text (set by formatOutputValue in helpers.ts)
|
||||
if (alt?.startsWith("video:")) {
|
||||
return <WorkspaceVideo src={src} aiCannotSee={aiCannotSee} />;
|
||||
}
|
||||
|
||||
// If the <img> failed to load and this is a workspace file, try as video.
|
||||
// This handles generic output keys like "file_out" where the MIME type
|
||||
// isn't known from the key name alone.
|
||||
if (imgFailed && aiCannotSee) {
|
||||
return <WorkspaceVideo src={src} aiCannotSee={aiCannotSee} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<span className="relative my-2 inline-block">
|
||||
{/* eslint-disable-next-line @next/next/no-img-element */}
|
||||
@@ -92,6 +142,9 @@ function MarkdownImage(props: Record<string, unknown>) {
|
||||
alt={alt || "Image"}
|
||||
className="h-auto max-w-full rounded-md border border-zinc-200"
|
||||
loading="lazy"
|
||||
onError={() => {
|
||||
if (aiCannotSee) setImgFailed(true);
|
||||
}}
|
||||
/>
|
||||
{aiCannotSee && (
|
||||
<span
|
||||
|
||||
@@ -73,6 +73,7 @@ export function MessageList({
|
||||
key={index}
|
||||
message={message}
|
||||
prevMessage={messages[index - 1]}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -5,11 +5,13 @@ import { shouldSkipAgentOutput } from "../../helpers";
|
||||
export interface LastToolResponseProps {
|
||||
message: ChatMessageData;
|
||||
prevMessage: ChatMessageData | undefined;
|
||||
onSendMessage?: (content: string) => void;
|
||||
}
|
||||
|
||||
export function LastToolResponse({
|
||||
message,
|
||||
prevMessage,
|
||||
onSendMessage,
|
||||
}: LastToolResponseProps) {
|
||||
if (message.type !== "tool_response") return null;
|
||||
|
||||
@@ -21,6 +23,7 @@ export function LastToolResponse({
|
||||
toolId={message.toolId}
|
||||
toolName={message.toolName}
|
||||
result={message.result}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { Progress } from "@/components/atoms/Progress/Progress";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||
import { useAsymptoticProgress } from "../ToolCallMessage/useAsymptoticProgress";
|
||||
|
||||
export interface ThinkingMessageProps {
|
||||
className?: string;
|
||||
@@ -11,18 +13,19 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
|
||||
const [showCoffeeMessage, setShowCoffeeMessage] = useState(false);
|
||||
const timerRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const coffeeTimerRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const progress = useAsymptoticProgress(showCoffeeMessage);
|
||||
|
||||
useEffect(() => {
|
||||
if (timerRef.current === null) {
|
||||
timerRef.current = setTimeout(() => {
|
||||
setShowSlowLoader(true);
|
||||
}, 8000);
|
||||
}, 3000);
|
||||
}
|
||||
|
||||
if (coffeeTimerRef.current === null) {
|
||||
coffeeTimerRef.current = setTimeout(() => {
|
||||
setShowCoffeeMessage(true);
|
||||
}, 10000);
|
||||
}, 8000);
|
||||
}
|
||||
|
||||
return () => {
|
||||
@@ -49,9 +52,18 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
|
||||
<AIChatBubble>
|
||||
<div className="transition-all duration-500 ease-in-out">
|
||||
{showCoffeeMessage ? (
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
This could take a few minutes, grab a coffee ☕️
|
||||
</span>
|
||||
<div className="flex flex-col items-center gap-3">
|
||||
<div className="flex w-full max-w-[280px] flex-col gap-1.5">
|
||||
<div className="flex items-center justify-between text-xs text-neutral-500">
|
||||
<span>Working on it...</span>
|
||||
<span>{Math.round(progress)}%</span>
|
||||
</div>
|
||||
<Progress value={progress} className="h-2 w-full" />
|
||||
</div>
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
This could take a few minutes, grab a coffee ☕️
|
||||
</span>
|
||||
</div>
|
||||
) : showSlowLoader ? (
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
Taking a bit more time...
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
|
||||
/**
|
||||
* Hook that returns a progress value that starts fast and slows down,
|
||||
* asymptotically approaching but never reaching the max value.
|
||||
*
|
||||
* Uses a half-life formula: progress = max * (1 - 0.5^(time/halfLife))
|
||||
* This creates the "game loading bar" effect where:
|
||||
* - 50% is reached at halfLifeSeconds
|
||||
* - 75% is reached at 2 * halfLifeSeconds
|
||||
* - 87.5% is reached at 3 * halfLifeSeconds
|
||||
* - and so on...
|
||||
*
|
||||
* @param isActive - Whether the progress should be animating
|
||||
* @param halfLifeSeconds - Time in seconds to reach 50% progress (default: 30)
|
||||
* @param maxProgress - Maximum progress value to approach (default: 100)
|
||||
* @param intervalMs - Update interval in milliseconds (default: 100)
|
||||
* @returns Current progress value (0-maxProgress)
|
||||
*/
|
||||
export function useAsymptoticProgress(
|
||||
isActive: boolean,
|
||||
halfLifeSeconds = 30,
|
||||
maxProgress = 100,
|
||||
intervalMs = 100,
|
||||
) {
|
||||
const [progress, setProgress] = useState(0);
|
||||
const elapsedTimeRef = useRef(0);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isActive) {
|
||||
setProgress(0);
|
||||
elapsedTimeRef.current = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
const interval = setInterval(() => {
|
||||
elapsedTimeRef.current += intervalMs / 1000;
|
||||
// Half-life approach: progress = max * (1 - 0.5^(time/halfLife))
|
||||
// At t=halfLife: 50%, at t=2*halfLife: 75%, at t=3*halfLife: 87.5%, etc.
|
||||
const newProgress =
|
||||
maxProgress *
|
||||
(1 - Math.pow(0.5, elapsedTimeRef.current / halfLifeSeconds));
|
||||
setProgress(newProgress);
|
||||
}, intervalMs);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [isActive, halfLifeSeconds, maxProgress, intervalMs]);
|
||||
|
||||
return progress;
|
||||
}
|
||||
@@ -0,0 +1,128 @@
|
||||
"use client";
|
||||
|
||||
import { useGetV2GetLibraryAgent } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
|
||||
import { RunAgentModal } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import {
|
||||
CheckCircleIcon,
|
||||
PencilLineIcon,
|
||||
PlayIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||
|
||||
interface Props {
|
||||
agentName: string;
|
||||
libraryAgentId: string;
|
||||
onSendMessage?: (content: string) => void;
|
||||
}
|
||||
|
||||
export function AgentCreatedPrompt({
|
||||
agentName,
|
||||
libraryAgentId,
|
||||
onSendMessage,
|
||||
}: Props) {
|
||||
// Fetch library agent eagerly so modal is ready when user clicks
|
||||
const { data: libraryAgentResponse, isLoading } = useGetV2GetLibraryAgent(
|
||||
libraryAgentId,
|
||||
{
|
||||
query: {
|
||||
enabled: !!libraryAgentId,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const libraryAgent =
|
||||
libraryAgentResponse?.status === 200 ? libraryAgentResponse.data : null;
|
||||
|
||||
function handleRunWithPlaceholders() {
|
||||
onSendMessage?.(
|
||||
`Run the agent "${agentName}" with placeholder/example values so I can test it.`,
|
||||
);
|
||||
}
|
||||
|
||||
function handleRunCreated(execution: GraphExecutionMeta) {
|
||||
onSendMessage?.(
|
||||
`I've started the agent "${agentName}". The execution ID is ${execution.id}. Please monitor its progress and let me know when it completes.`,
|
||||
);
|
||||
}
|
||||
|
||||
function handleScheduleCreated(schedule: GraphExecutionJobInfo) {
|
||||
const scheduleInfo = schedule.cron
|
||||
? `with cron schedule "${schedule.cron}"`
|
||||
: "to run on the specified schedule";
|
||||
onSendMessage?.(
|
||||
`I've scheduled the agent "${agentName}" ${scheduleInfo}. The schedule ID is ${schedule.id}.`,
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<AIChatBubble>
|
||||
<div className="flex flex-col gap-4">
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="flex h-8 w-8 items-center justify-center rounded-full bg-green-100">
|
||||
<CheckCircleIcon
|
||||
size={18}
|
||||
weight="fill"
|
||||
className="text-green-600"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<Text variant="body-medium" className="text-neutral-900">
|
||||
Agent Created Successfully
|
||||
</Text>
|
||||
<Text variant="small" className="text-neutral-500">
|
||||
"{agentName}" is ready to test
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col gap-2">
|
||||
<Text variant="small-medium" className="text-neutral-700">
|
||||
Ready to test?
|
||||
</Text>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="small"
|
||||
onClick={handleRunWithPlaceholders}
|
||||
className="gap-2"
|
||||
>
|
||||
<PlayIcon size={16} />
|
||||
Run with example values
|
||||
</Button>
|
||||
{libraryAgent ? (
|
||||
<RunAgentModal
|
||||
triggerSlot={
|
||||
<Button variant="outline" size="small" className="gap-2">
|
||||
<PencilLineIcon size={16} />
|
||||
Run with my inputs
|
||||
</Button>
|
||||
}
|
||||
agent={libraryAgent}
|
||||
onRunCreated={handleRunCreated}
|
||||
onScheduleCreated={handleScheduleCreated}
|
||||
/>
|
||||
) : (
|
||||
<Button
|
||||
variant="outline"
|
||||
size="small"
|
||||
loading={isLoading}
|
||||
disabled
|
||||
className="gap-2"
|
||||
>
|
||||
<PencilLineIcon size={16} />
|
||||
Run with my inputs
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
<Text variant="small" className="text-neutral-500">
|
||||
or just ask me
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
</AIChatBubble>
|
||||
);
|
||||
}
|
||||
@@ -2,11 +2,13 @@ import { Text } from "@/components/atoms/Text/Text";
|
||||
import { cn } from "@/lib/utils";
|
||||
import type { ToolResult } from "@/types/chat";
|
||||
import { WarningCircleIcon } from "@phosphor-icons/react";
|
||||
import { AgentCreatedPrompt } from "./AgentCreatedPrompt";
|
||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||
import { MarkdownContent } from "../MarkdownContent/MarkdownContent";
|
||||
import {
|
||||
formatToolResponse,
|
||||
getErrorMessage,
|
||||
isAgentSavedResponse,
|
||||
isErrorResponse,
|
||||
} from "./helpers";
|
||||
|
||||
@@ -16,6 +18,7 @@ export interface ToolResponseMessageProps {
|
||||
result?: ToolResult;
|
||||
success?: boolean;
|
||||
className?: string;
|
||||
onSendMessage?: (content: string) => void;
|
||||
}
|
||||
|
||||
export function ToolResponseMessage({
|
||||
@@ -24,6 +27,7 @@ export function ToolResponseMessage({
|
||||
result,
|
||||
success: _success,
|
||||
className,
|
||||
onSendMessage,
|
||||
}: ToolResponseMessageProps) {
|
||||
if (isErrorResponse(result)) {
|
||||
const errorMessage = getErrorMessage(result);
|
||||
@@ -43,6 +47,18 @@ export function ToolResponseMessage({
|
||||
);
|
||||
}
|
||||
|
||||
// Check for agent_saved response - show special prompt
|
||||
const agentSavedData = isAgentSavedResponse(result);
|
||||
if (agentSavedData.isSaved) {
|
||||
return (
|
||||
<AgentCreatedPrompt
|
||||
agentName={agentSavedData.agentName}
|
||||
libraryAgentId={agentSavedData.libraryAgentId}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const formattedText = formatToolResponse(result, toolName);
|
||||
|
||||
return (
|
||||
|
||||
@@ -6,6 +6,43 @@ function stripInternalReasoning(content: string): string {
|
||||
.trim();
|
||||
}
|
||||
|
||||
export interface AgentSavedData {
|
||||
isSaved: boolean;
|
||||
agentName: string;
|
||||
agentId: string;
|
||||
libraryAgentId: string;
|
||||
libraryAgentLink: string;
|
||||
}
|
||||
|
||||
export function isAgentSavedResponse(result: unknown): AgentSavedData {
|
||||
if (typeof result !== "object" || result === null) {
|
||||
return {
|
||||
isSaved: false,
|
||||
agentName: "",
|
||||
agentId: "",
|
||||
libraryAgentId: "",
|
||||
libraryAgentLink: "",
|
||||
};
|
||||
}
|
||||
const response = result as Record<string, unknown>;
|
||||
if (response.type === "agent_saved") {
|
||||
return {
|
||||
isSaved: true,
|
||||
agentName: (response.agent_name as string) || "Agent",
|
||||
agentId: (response.agent_id as string) || "",
|
||||
libraryAgentId: (response.library_agent_id as string) || "",
|
||||
libraryAgentLink: (response.library_agent_link as string) || "",
|
||||
};
|
||||
}
|
||||
return {
|
||||
isSaved: false,
|
||||
agentName: "",
|
||||
agentId: "",
|
||||
libraryAgentId: "",
|
||||
libraryAgentLink: "",
|
||||
};
|
||||
}
|
||||
|
||||
export function isErrorResponse(result: unknown): boolean {
|
||||
if (typeof result === "string") {
|
||||
const lower = result.toLowerCase();
|
||||
@@ -39,69 +76,101 @@ export function getErrorMessage(result: unknown): string {
|
||||
|
||||
/**
|
||||
* Check if a value is a workspace file reference.
|
||||
* Format: workspace://{fileId} or workspace://{fileId}#{mimeType}
|
||||
*/
|
||||
function isWorkspaceRef(value: unknown): value is string {
|
||||
return typeof value === "string" && value.startsWith("workspace://");
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a workspace reference appears to be an image based on common patterns.
|
||||
* Since workspace refs don't have extensions, we check the context or assume image
|
||||
* for certain block types.
|
||||
*
|
||||
* TODO: Replace keyword matching with MIME type encoded in workspace ref.
|
||||
* e.g., workspace://abc123#image/png or workspace://abc123#video/mp4
|
||||
* This would let frontend render correctly without fragile keyword matching.
|
||||
* Extract MIME type from a workspace reference fragment.
|
||||
* e.g., "workspace://abc123#video/mp4" → "video/mp4"
|
||||
* Returns undefined if no fragment is present.
|
||||
*/
|
||||
function isLikelyImageRef(value: string, outputKey?: string): boolean {
|
||||
if (!isWorkspaceRef(value)) return false;
|
||||
|
||||
// Check output key name for video-related hints (these are NOT images)
|
||||
const videoKeywords = ["video", "mp4", "mov", "avi", "webm", "movie", "clip"];
|
||||
if (outputKey) {
|
||||
const lowerKey = outputKey.toLowerCase();
|
||||
if (videoKeywords.some((kw) => lowerKey.includes(kw))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Check output key name for image-related hints
|
||||
const imageKeywords = [
|
||||
"image",
|
||||
"img",
|
||||
"photo",
|
||||
"picture",
|
||||
"thumbnail",
|
||||
"avatar",
|
||||
"icon",
|
||||
"screenshot",
|
||||
];
|
||||
if (outputKey) {
|
||||
const lowerKey = outputKey.toLowerCase();
|
||||
if (imageKeywords.some((kw) => lowerKey.includes(kw))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Default to treating workspace refs as potential images
|
||||
// since that's the most common case for generated content
|
||||
return true;
|
||||
function getWorkspaceMimeType(value: string): string | undefined {
|
||||
const hashIndex = value.indexOf("#");
|
||||
if (hashIndex === -1) return undefined;
|
||||
return value.slice(hashIndex + 1) || undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a single output value, converting workspace refs to markdown images.
|
||||
* Determine the media category of a workspace ref or data URI.
|
||||
* Uses the MIME type fragment on workspace refs when available,
|
||||
* falls back to output key keyword matching for older refs without it.
|
||||
*/
|
||||
function formatOutputValue(value: unknown, outputKey?: string): string {
|
||||
if (isWorkspaceRef(value) && isLikelyImageRef(value, outputKey)) {
|
||||
// Format as markdown image
|
||||
return ``;
|
||||
function getMediaCategory(
|
||||
value: string,
|
||||
outputKey?: string,
|
||||
): "video" | "image" | "audio" | "unknown" {
|
||||
// Data URIs carry their own MIME type
|
||||
if (value.startsWith("data:video/")) return "video";
|
||||
if (value.startsWith("data:image/")) return "image";
|
||||
if (value.startsWith("data:audio/")) return "audio";
|
||||
|
||||
// Workspace refs: prefer MIME type fragment
|
||||
if (isWorkspaceRef(value)) {
|
||||
const mime = getWorkspaceMimeType(value);
|
||||
if (mime) {
|
||||
if (mime.startsWith("video/")) return "video";
|
||||
if (mime.startsWith("image/")) return "image";
|
||||
if (mime.startsWith("audio/")) return "audio";
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
// Fallback: keyword matching on output key for older refs without fragment
|
||||
if (outputKey) {
|
||||
const lowerKey = outputKey.toLowerCase();
|
||||
|
||||
const videoKeywords = [
|
||||
"video",
|
||||
"mp4",
|
||||
"mov",
|
||||
"avi",
|
||||
"webm",
|
||||
"movie",
|
||||
"clip",
|
||||
];
|
||||
if (videoKeywords.some((kw) => lowerKey.includes(kw))) return "video";
|
||||
|
||||
const imageKeywords = [
|
||||
"image",
|
||||
"img",
|
||||
"photo",
|
||||
"picture",
|
||||
"thumbnail",
|
||||
"avatar",
|
||||
"icon",
|
||||
"screenshot",
|
||||
];
|
||||
if (imageKeywords.some((kw) => lowerKey.includes(kw))) return "image";
|
||||
}
|
||||
|
||||
// Default to image for backward compatibility
|
||||
return "image";
|
||||
}
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a single output value, converting workspace refs to markdown images/videos.
|
||||
* Videos use a "video:" alt-text prefix so the MarkdownContent renderer can
|
||||
* distinguish them from images and render a <video> element.
|
||||
*/
|
||||
function formatOutputValue(value: unknown, outputKey?: string): string {
|
||||
if (typeof value === "string") {
|
||||
// Check for data URIs (images)
|
||||
if (value.startsWith("data:image/")) {
|
||||
const category = getMediaCategory(value, outputKey);
|
||||
|
||||
if (category === "video") {
|
||||
// Format with "video:" prefix so MarkdownContent renders <video>
|
||||
return ``;
|
||||
}
|
||||
|
||||
if (category === "image") {
|
||||
return ``;
|
||||
}
|
||||
|
||||
// For audio, unknown workspace refs, data URIs, etc. - return as-is
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,17 @@ export function HostScopedCredentialsModal({
|
||||
const currentHost = currentUrl ? getHostFromUrl(currentUrl) : "";
|
||||
|
||||
const formSchema = z.object({
|
||||
host: z.string().min(1, "Host is required"),
|
||||
host: z
|
||||
.string()
|
||||
.min(1, "Host is required")
|
||||
.refine((val) => !/^[a-zA-Z][a-zA-Z\d+\-.]*:\/\//.test(val), {
|
||||
message: "Enter only the host (e.g. api.example.com), not a full URL",
|
||||
})
|
||||
.refine((val) => !val.includes("/"), {
|
||||
message:
|
||||
"Enter only the host (e.g. api.example.com), without a trailing path. " +
|
||||
"You may specify a port (e.g. api.example.com:8080) if needed.",
|
||||
}),
|
||||
title: z.string().optional(),
|
||||
headers: z.record(z.string()).optional(),
|
||||
});
|
||||
|
||||
@@ -26,6 +26,7 @@ export const providerIcons: Partial<
|
||||
nvidia: fallbackIcon,
|
||||
discord: FaDiscord,
|
||||
d_id: fallbackIcon,
|
||||
elevenlabs: fallbackIcon,
|
||||
google_maps: FaGoogle,
|
||||
jina: fallbackIcon,
|
||||
ideogram: fallbackIcon,
|
||||
|
||||
@@ -47,7 +47,7 @@ export function Navbar() {
|
||||
|
||||
const actualLoggedInLinks = [
|
||||
{ name: "Home", href: homeHref },
|
||||
...(isChatEnabled === true ? [{ name: "Tasks", href: "/library" }] : []),
|
||||
...(isChatEnabled === true ? [{ name: "Agents", href: "/library" }] : []),
|
||||
...loggedInLinks,
|
||||
];
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ import {
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useOnboarding } from "@/providers/onboarding/onboarding-provider";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { storage, Key as StorageKey } from "@/services/storage/local-storage";
|
||||
import { WalletIcon } from "@phosphor-icons/react";
|
||||
import { PopoverClose } from "@radix-ui/react-popover";
|
||||
import { X } from "lucide-react";
|
||||
@@ -175,7 +174,6 @@ export function Wallet() {
|
||||
const [prevCredits, setPrevCredits] = useState<number | null>(credits);
|
||||
const [flash, setFlash] = useState(false);
|
||||
const [walletOpen, setWalletOpen] = useState(false);
|
||||
const [lastSeenCredits, setLastSeenCredits] = useState<number | null>(null);
|
||||
|
||||
const totalCount = useMemo(() => {
|
||||
return groups.reduce((acc, group) => acc + group.tasks.length, 0);
|
||||
@@ -200,38 +198,6 @@ export function Wallet() {
|
||||
setCompletedCount(completed);
|
||||
}, [groups, state?.completedSteps]);
|
||||
|
||||
// Load last seen credits from localStorage once on mount
|
||||
useEffect(() => {
|
||||
const stored = storage.get(StorageKey.WALLET_LAST_SEEN_CREDITS);
|
||||
if (stored !== undefined && stored !== null) {
|
||||
const parsed = parseFloat(stored);
|
||||
if (!Number.isNaN(parsed)) setLastSeenCredits(parsed);
|
||||
else setLastSeenCredits(0);
|
||||
} else {
|
||||
setLastSeenCredits(0);
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Auto-open once if never shown, otherwise open only when credits increase beyond last seen
|
||||
useEffect(() => {
|
||||
if (typeof credits !== "number") return;
|
||||
// Open once for first-time users
|
||||
if (state && state.walletShown === false) {
|
||||
requestAnimationFrame(() => setWalletOpen(true));
|
||||
// Mark as shown so it won't reopen on every reload
|
||||
updateState({ walletShown: true });
|
||||
return;
|
||||
}
|
||||
// Open if user gained more credits than last acknowledged
|
||||
if (
|
||||
lastSeenCredits !== null &&
|
||||
credits > lastSeenCredits &&
|
||||
walletOpen === false
|
||||
) {
|
||||
requestAnimationFrame(() => setWalletOpen(true));
|
||||
}
|
||||
}, [credits, lastSeenCredits, state?.walletShown, updateState, walletOpen]);
|
||||
|
||||
const onWalletOpen = useCallback(async () => {
|
||||
if (!state?.walletShown) {
|
||||
updateState({ walletShown: true });
|
||||
@@ -324,19 +290,7 @@ export function Wallet() {
|
||||
if (credits === null || !state) return null;
|
||||
|
||||
return (
|
||||
<Popover
|
||||
open={walletOpen}
|
||||
onOpenChange={(open) => {
|
||||
setWalletOpen(open);
|
||||
if (!open) {
|
||||
// Persist the latest acknowledged credits so we only auto-open on future gains
|
||||
if (typeof credits === "number") {
|
||||
storage.set(StorageKey.WALLET_LAST_SEEN_CREDITS, String(credits));
|
||||
setLastSeenCredits(credits);
|
||||
}
|
||||
}
|
||||
}}
|
||||
>
|
||||
<Popover open={walletOpen} onOpenChange={(open) => setWalletOpen(open)}>
|
||||
<PopoverTrigger asChild>
|
||||
<div className="relative inline-block">
|
||||
<button
|
||||
|
||||
@@ -62,7 +62,6 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
||||
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
|
||||
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
|
||||
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution and wait for human approval or modification of data |
|
||||
| [Linear Search Issues](block-integrations/linear/issues.md#linear-search-issues) | Searches for issues on Linear |
|
||||
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
|
||||
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
|
||||
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
|
||||
@@ -193,6 +192,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
||||
| [Get Current Time](block-integrations/text.md#get-current-time) | This block outputs the current time |
|
||||
| [Match Text Pattern](block-integrations/text.md#match-text-pattern) | Matches text against a regex pattern and forwards data to positive or negative output based on the match |
|
||||
| [Text Decoder](block-integrations/text.md#text-decoder) | Decodes a string containing escape sequences into actual text |
|
||||
| [Text Encoder](block-integrations/text.md#text-encoder) | Encodes a string by converting special characters into escape sequences |
|
||||
| [Text Replace](block-integrations/text.md#text-replace) | This block is used to replace a text with a new text |
|
||||
| [Text Split](block-integrations/text.md#text-split) | This block is used to split a text into a list of strings |
|
||||
| [Word Character Count](block-integrations/text.md#word-character-count) | Counts the number of words and characters in a given text |
|
||||
@@ -233,6 +233,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
||||
| [Stagehand Extract](block-integrations/stagehand/blocks.md#stagehand-extract) | Extract structured data from a webpage |
|
||||
| [Stagehand Observe](block-integrations/stagehand/blocks.md#stagehand-observe) | Find suggested actions for your workflows |
|
||||
| [Unreal Text To Speech](block-integrations/llm.md#unreal-text-to-speech) | Converts text to speech using the Unreal Speech API |
|
||||
| [Video Narration](block-integrations/video/narration.md#video-narration) | Generate AI narration and add to video |
|
||||
|
||||
## Search and Information Retrieval
|
||||
|
||||
@@ -472,9 +473,13 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
||||
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Add Audio To Video](block-integrations/multimedia.md#add-audio-to-video) | Block to attach an audio file to a video file using moviepy |
|
||||
| [Loop Video](block-integrations/multimedia.md#loop-video) | Block to loop a video to a given duration or number of repeats |
|
||||
| [Media Duration](block-integrations/multimedia.md#media-duration) | Block to get the duration of a media file |
|
||||
| [Add Audio To Video](block-integrations/video/add_audio.md#add-audio-to-video) | Block to attach an audio file to a video file using moviepy |
|
||||
| [Loop Video](block-integrations/video/loop.md#loop-video) | Block to loop a video to a given duration or number of repeats |
|
||||
| [Media Duration](block-integrations/video/duration.md#media-duration) | Block to get the duration of a media file |
|
||||
| [Video Clip](block-integrations/video/clip.md#video-clip) | Extract a time segment from a video |
|
||||
| [Video Concat](block-integrations/video/concat.md#video-concat) | Merge multiple video clips into one continuous video |
|
||||
| [Video Download](block-integrations/video/download.md#video-download) | Download video from URL (YouTube, Vimeo, news sites, direct links) |
|
||||
| [Video Text Overlay](block-integrations/video/text_overlay.md#video-text-overlay) | Add text overlay/caption to video |
|
||||
|
||||
## Productivity
|
||||
|
||||
@@ -571,6 +576,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
||||
| [Linear Create Comment](block-integrations/linear/comment.md#linear-create-comment) | Creates a new comment on a Linear issue |
|
||||
| [Linear Create Issue](block-integrations/linear/issues.md#linear-create-issue) | Creates a new issue on Linear |
|
||||
| [Linear Get Project Issues](block-integrations/linear/issues.md#linear-get-project-issues) | Gets issues from a Linear project filtered by status and assignee |
|
||||
| [Linear Search Issues](block-integrations/linear/issues.md#linear-search-issues) | Searches for issues on Linear |
|
||||
| [Linear Search Projects](block-integrations/linear/projects.md#linear-search-projects) | Searches for projects on Linear |
|
||||
|
||||
## Hardware
|
||||
|
||||
@@ -85,7 +85,6 @@
|
||||
* [LLM](block-integrations/llm.md)
|
||||
* [Logic](block-integrations/logic.md)
|
||||
* [Misc](block-integrations/misc.md)
|
||||
* [Multimedia](block-integrations/multimedia.md)
|
||||
* [Notion Create Page](block-integrations/notion/create_page.md)
|
||||
* [Notion Read Database](block-integrations/notion/read_database.md)
|
||||
* [Notion Read Page](block-integrations/notion/read_page.md)
|
||||
@@ -129,5 +128,13 @@
|
||||
* [Twitter Timeline](block-integrations/twitter/timeline.md)
|
||||
* [Twitter Tweet Lookup](block-integrations/twitter/tweet_lookup.md)
|
||||
* [Twitter User Lookup](block-integrations/twitter/user_lookup.md)
|
||||
* [Video Add Audio](block-integrations/video/add_audio.md)
|
||||
* [Video Clip](block-integrations/video/clip.md)
|
||||
* [Video Concat](block-integrations/video/concat.md)
|
||||
* [Video Download](block-integrations/video/download.md)
|
||||
* [Video Duration](block-integrations/video/duration.md)
|
||||
* [Video Loop](block-integrations/video/loop.md)
|
||||
* [Video Narration](block-integrations/video/narration.md)
|
||||
* [Video Text Overlay](block-integrations/video/text_overlay.md)
|
||||
* [Wolfram LLM API](block-integrations/wolfram/llm_api.md)
|
||||
* [Zerobounce Validate Emails](block-integrations/zerobounce/validate_emails.md)
|
||||
|
||||
@@ -90,9 +90,9 @@ Searches for issues on Linear
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block searches for issues in Linear using a text query. It searches across issue titles, descriptions, and other fields to find matching issues.
|
||||
This block searches for issues in Linear using a text query. It searches across issue titles, descriptions, and other fields to find matching issues. You can limit the number of results returned using the `max_results` parameter (default: 10, max: 100) to control token consumption and response size.
|
||||
|
||||
Returns a list of issues matching the search term.
|
||||
Optionally filter results by team name to narrow searches to specific workspaces. If a team name is provided, the block resolves it to a team ID before searching. Returns matching issues with their state, creation date, project, and assignee information. If the search or team resolution fails, an error message is returned.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
@@ -100,12 +100,14 @@ Returns a list of issues matching the search term.
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| term | Term to search for issues | str | Yes |
|
||||
| max_results | Maximum number of results to return | int | No |
|
||||
| team_name | Optional team name to filter results (e.g., 'Internal', 'Open Source') | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| error | Error message if the search failed | str |
|
||||
| issues | List of issues | List[Issue] |
|
||||
|
||||
### Possible use case
|
||||
|
||||
@@ -65,7 +65,7 @@ The result routes data to yes_output or no_output, enabling intelligent branchin
|
||||
| condition | A plaintext English description of the condition to evaluate | str | Yes |
|
||||
| yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No |
|
||||
| no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No |
|
||||
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
@@ -103,7 +103,7 @@ The block sends the entire conversation history to the chosen LLM, including sys
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. | str | No |
|
||||
| messages | List of messages in the conversation. | List[Any] | Yes |
|
||||
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
| ollama_host | Ollama host for local models | str | No |
|
||||
|
||||
@@ -257,7 +257,7 @@ The block formulates a prompt based on the given focus or source data, sends it
|
||||
|-------|-------------|------|----------|
|
||||
| focus | The focus of the list to generate. | str | No |
|
||||
| source_data | The data to generate the list from. | str | No |
|
||||
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| max_retries | Maximum number of retries for generating a valid list. | int | No |
|
||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
@@ -424,7 +424,7 @@ The block sends the input prompt to a chosen LLM, along with any system prompts
|
||||
| prompt | The prompt to send to the language model. | str | Yes |
|
||||
| expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes |
|
||||
| list_result | Whether the response should be a list of objects in the expected format. | bool | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||
@@ -464,7 +464,7 @@ The block sends the input prompt to a chosen LLM, processes the response, and re
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No |
|
||||
| prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No |
|
||||
@@ -501,7 +501,7 @@ The block splits the input text into smaller chunks, sends each chunk to an LLM
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| text | The text to summarize. | str | Yes |
|
||||
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| focus | The topic to focus on in the summary | str | No |
|
||||
| style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
@@ -763,7 +763,7 @@ Configure agent_mode_max_iterations to control loop behavior: 0 for single decis
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. | str | Yes |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||
|
||||
@@ -380,6 +380,42 @@ This is useful when working with data from APIs or files where escape sequences
|
||||
|
||||
---
|
||||
|
||||
## Text Encoder
|
||||
|
||||
### What it is
|
||||
Encodes a string by converting special characters into escape sequences
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The Text Encoder takes the input string and applies Python's `unicode_escape` encoding (equivalent to `codecs.encode(text, "unicode_escape").decode("utf-8")`) to transform special characters like newlines, tabs, and backslashes into their escaped forms.
|
||||
|
||||
The block relies on the input schema to ensure the value is a string; non-string inputs are rejected by validation, and any encoding failures surface as block errors. Non-ASCII characters are emitted as `\uXXXX` sequences, which is useful for ASCII-only payloads.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| text | A string containing special characters to be encoded | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if encoding fails | str |
|
||||
| encoded_text | The encoded text with special characters converted to escape sequences | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**JSON Payload Preparation**: Encode multiline or quoted text before embedding it in JSON string fields to ensure proper escaping.
|
||||
|
||||
**Config/ENV Generation**: Convert template text into escaped strings for `.env` or YAML values that require special character handling.
|
||||
|
||||
**Snapshot Fixtures**: Produce stable escaped strings for golden files or API tests where consistent text representation is needed.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Text Replace
|
||||
|
||||
### What it is
|
||||
|
||||
39
docs/integrations/block-integrations/video/add_audio.md
Normal file
39
docs/integrations/block-integrations/video/add_audio.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Video Add Audio
|
||||
<!-- MANUAL: file_description -->
|
||||
This block allows you to attach a separate audio track to a video file, replacing or combining with the original audio.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Add Audio To Video
|
||||
|
||||
### What it is
|
||||
Block to attach an audio file to a video file using moviepy.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block uses MoviePy to combine video and audio files. It loads the video and audio inputs (which can be URLs, data URIs, or local paths), optionally scales the audio volume, then writes the combined result to a new video file using H.264 video codec and AAC audio codec.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| video_in | Video input (URL, data URI, or local path). | str (file) | Yes |
|
||||
| audio_in | Audio input (URL, data URI, or local path). | str (file) | Yes |
|
||||
| volume | Volume scale for the newly attached audio track (1.0 = original). | float | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| video_out | Final video (with attached audio), as a path or data URI. | str (file) |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- Adding background music to a silent screen recording
|
||||
- Replacing original audio with a voiceover or translated audio track
|
||||
- Combining AI-generated speech with stock footage
|
||||
- Adding sound effects to video content
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
41
docs/integrations/block-integrations/video/clip.md
Normal file
41
docs/integrations/block-integrations/video/clip.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Video Clip
|
||||
<!-- MANUAL: file_description -->
|
||||
This block extracts a specific time segment from a video file, allowing you to trim videos to precise start and end times.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Video Clip
|
||||
|
||||
### What it is
|
||||
Extract a time segment from a video
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block uses MoviePy's `subclipped` function to extract a portion of the video between specified start and end times. It validates that end time is greater than start time, then creates a new video file containing only the selected segment. The output is encoded with H.264 video codec and AAC audio codec, preserving both video and audio from the original clip.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| video_in | Input video (URL, data URI, or local path) | str (file) | Yes |
|
||||
| start_time | Start time in seconds | float | Yes |
|
||||
| end_time | End time in seconds | float | Yes |
|
||||
| output_format | Output format | "mp4" \| "webm" \| "mkv" \| "mov" | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| video_out | Clipped video file (path or data URI) | str (file) |
|
||||
| duration | Clip duration in seconds | float |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- Extracting highlights from a longer video
|
||||
- Trimming intro/outro from recorded content
|
||||
- Creating short clips for social media from longer videos
|
||||
- Isolating specific segments for further processing in a workflow
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
41
docs/integrations/block-integrations/video/concat.md
Normal file
41
docs/integrations/block-integrations/video/concat.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Video Concat
|
||||
<!-- MANUAL: file_description -->
|
||||
This block merges multiple video clips into a single continuous video, with optional transitions between clips.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Video Concat
|
||||
|
||||
### What it is
|
||||
Merge multiple video clips into one continuous video
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block uses MoviePy's `concatenate_videoclips` function to join multiple videos in sequence. It supports three transition modes: **none** (direct concatenation), **crossfade** (smooth blending where clips overlap), and **fade_black** (each clip fades out to black and the next fades in). At least 2 videos are required. The output is encoded with H.264 video codec and AAC audio codec.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| videos | List of video files to concatenate (in order) | List[str (file)] | Yes |
|
||||
| transition | Transition between clips | "none" \| "crossfade" \| "fade_black" | No |
|
||||
| transition_duration | Transition duration in seconds | int | No |
|
||||
| output_format | Output format | "mp4" \| "webm" \| "mkv" \| "mov" | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| video_out | Concatenated video file (path or data URI) | str (file) |
|
||||
| total_duration | Total duration in seconds | float |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- Combining multiple clips into a compilation video
|
||||
- Assembling intro, main content, and outro segments
|
||||
- Creating montages from multiple source videos
|
||||
- Building video playlists or slideshows with transitions
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
42
docs/integrations/block-integrations/video/download.md
Normal file
42
docs/integrations/block-integrations/video/download.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Video Download
|
||||
<!-- MANUAL: file_description -->
|
||||
This block downloads videos from URLs, supporting a wide range of video platforms and direct links.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Video Download
|
||||
|
||||
### What it is
|
||||
Download video from URL (YouTube, Vimeo, news sites, direct links)
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block uses yt-dlp, a powerful video downloading library that supports over 1000 websites. It accepts a URL, quality preference, and output format, then downloads the video while merging the best available video and audio streams for the selected quality. Quality options: **best** (highest available), **1080p/720p/480p** (maximum resolution at that height), **audio_only** (extracts just the audio track).
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| url | URL of the video to download (YouTube, Vimeo, direct link, etc.) | str | Yes |
|
||||
| quality | Video quality preference | "best" \| "1080p" \| "720p" \| "480p" \| "audio_only" | No |
|
||||
| output_format | Output video format | "mp4" \| "webm" \| "mkv" | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| video_file | Downloaded video (path or data URI) | str (file) |
|
||||
| duration | Video duration in seconds | float |
|
||||
| title | Video title from source | str |
|
||||
| source_url | Original source URL | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- Downloading source videos for editing or remixing
|
||||
- Archiving video content for offline processing
|
||||
- Extracting audio from videos for transcription or podcast creation
|
||||
- Gathering video content for automated content pipelines
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
38
docs/integrations/block-integrations/video/duration.md
Normal file
38
docs/integrations/block-integrations/video/duration.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Video Duration
|
||||
<!-- MANUAL: file_description -->
|
||||
This block retrieves the duration of video or audio files, useful for planning and conditional logic in media workflows.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Media Duration
|
||||
|
||||
### What it is
|
||||
Block to get the duration of a media file.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block uses MoviePy to load the media file and extract its duration property. It supports both video files (using VideoFileClip) and audio files (using AudioFileClip), determined by the `is_video` flag. The media can be provided as a URL, data URI, or local file path. The duration is returned in seconds as a floating-point number.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| media_in | Media input (URL, data URI, or local path). | str (file) | Yes |
|
||||
| is_video | Whether the media is a video (True) or audio (False). | bool | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| duration | Duration of the media file (in seconds). | float |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- Checking video length before processing to avoid timeout issues
|
||||
- Calculating how many times to loop a video to reach a target duration
|
||||
- Validating that uploaded content meets length requirements
|
||||
- Building conditional workflows based on media duration
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
39
docs/integrations/block-integrations/video/loop.md
Normal file
39
docs/integrations/block-integrations/video/loop.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Video Loop
|
||||
<!-- MANUAL: file_description -->
|
||||
This block repeats a video to extend its duration, either to a specific length or a set number of repetitions.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Loop Video
|
||||
|
||||
### What it is
|
||||
Block to loop a video to a given duration or number of repeats.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block uses MoviePy's Loop effect to repeat a video clip. You can specify either a target duration (the video will repeat until reaching that length) or a number of loops (the video will repeat that many times). The Loop effect handles both video and audio looping automatically, maintaining sync. Either `duration` or `n_loops` must be provided. The output is encoded with H.264 video codec and AAC audio codec.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| video_in | The input video (can be a URL, data URI, or local path). | str (file) | Yes |
|
||||
| duration | Target duration (in seconds) to loop the video to. Either duration or n_loops must be provided. | float | No |
|
||||
| n_loops | Number of times to repeat the video. Either n_loops or duration must be provided. | int | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| video_out | Looped video returned either as a relative path or a data URI. | str (file) |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- Extending a short background video to match the length of narration audio
|
||||
- Creating seamless looping content for digital signage
|
||||
- Repeating a product demo video multiple times for emphasis
|
||||
- Extending short clips to meet minimum duration requirements for platforms
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
44
docs/integrations/block-integrations/video/narration.md
Normal file
44
docs/integrations/block-integrations/video/narration.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Video Narration
|
||||
<!-- MANUAL: file_description -->
|
||||
This block generates AI voiceover narration using ElevenLabs and adds it to a video, with flexible audio mixing options.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Video Narration
|
||||
|
||||
### What it is
|
||||
Generate AI narration and add to video
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block uses ElevenLabs text-to-speech API to generate natural-sounding narration from your script. It then combines the narration with the video using MoviePy. Three audio mixing modes are available: **replace** (completely replaces original audio), **mix** (blends narration with original audio at configurable volumes), and **ducking** (similar to mix but applies stronger attenuation to original audio, making narration more prominent). The block outputs both the final video and the generated audio file separately.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| video_in | Input video (URL, data URI, or local path) | str (file) | Yes |
|
||||
| script | Narration script text | str | Yes |
|
||||
| voice_id | ElevenLabs voice ID | str | No |
|
||||
| model_id | ElevenLabs TTS model | "eleven_multilingual_v2" \| "eleven_flash_v2_5" \| "eleven_turbo_v2_5" \| "eleven_turbo_v2" | No |
|
||||
| mix_mode | How to combine with original audio. 'ducking' applies stronger attenuation than 'mix'. | "replace" \| "mix" \| "ducking" | No |
|
||||
| narration_volume | Narration volume (0.0 to 2.0) | float | No |
|
||||
| original_volume | Original audio volume when mixing (0.0 to 1.0) | float | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| video_out | Video with narration (path or data URI) | str (file) |
|
||||
| audio_file | Generated audio file (path or data URI) | str (file) |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- Adding professional voiceover to product demos or tutorials
|
||||
- Creating narrated explainer videos from screen recordings
|
||||
- Generating multi-language versions of video content
|
||||
- Adding commentary to gameplay or walkthrough videos
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
44
docs/integrations/block-integrations/video/text_overlay.md
Normal file
44
docs/integrations/block-integrations/video/text_overlay.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Video Text Overlay
|
||||
<!-- MANUAL: file_description -->
|
||||
This block adds customizable text captions or titles to videos, with control over positioning, timing, and styling.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Video Text Overlay
|
||||
|
||||
### What it is
|
||||
Add text overlay/caption to video
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block uses MoviePy's TextClip and CompositeVideoClip to render text onto video frames. The text is created as a separate clip with configurable font size, color, and optional background color, then composited over the video at the specified position. Timing can be controlled to show text only during specific portions of the video. Position options include center alignments (top, center, bottom) and corner positions (top-left, top-right, bottom-left, bottom-right). The output is encoded with H.264 video codec and AAC audio codec.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| video_in | Input video (URL, data URI, or local path) | str (file) | Yes |
|
||||
| text | Text to overlay on video | str | Yes |
|
||||
| position | Position of text on screen | "top" \| "center" \| "bottom" \| "top-left" \| "top-right" \| "bottom-left" \| "bottom-right" | No |
|
||||
| start_time | When to show text (seconds). None = entire video | float | No |
|
||||
| end_time | When to hide text (seconds). None = until end | float | No |
|
||||
| font_size | Font size | int | No |
|
||||
| font_color | Font color (hex or name) | str | No |
|
||||
| bg_color | Background color behind text (None for transparent) | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| video_out | Video with text overlay (path or data URI) | str (file) |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- Adding titles or chapter headings to video content
|
||||
- Creating lower-thirds with speaker names or captions
|
||||
- Watermarking videos with branding text
|
||||
- Adding call-to-action text at specific moments in a video
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
Reference in New Issue
Block a user