mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-19 20:18:22 -05:00
Compare commits
11 Commits
feature/vi
...
feat/ai-ge
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5b86afaaed | ||
|
|
3485d2ae85 | ||
|
|
31d0ae19e0 | ||
|
|
d00e6efe9b | ||
|
|
610a659c52 | ||
|
|
4eac081cf9 | ||
|
|
eef7cf8cf8 | ||
|
|
7c90b16209 | ||
|
|
3ec2f1953f | ||
|
|
8d3dfe2cf8 | ||
|
|
7133580d43 |
@@ -28,7 +28,6 @@ from backend.executor.manager import get_db_async_client
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
|
||||
|
||||
class ExecutionAnalyticsRequest(BaseModel):
|
||||
@@ -64,8 +63,6 @@ class ExecutionAnalyticsResult(BaseModel):
|
||||
score: Optional[float]
|
||||
status: str # "success", "failed", "skipped"
|
||||
error_message: Optional[str] = None
|
||||
started_at: Optional[datetime] = None
|
||||
ended_at: Optional[datetime] = None
|
||||
|
||||
|
||||
class ExecutionAnalyticsResponse(BaseModel):
|
||||
@@ -227,6 +224,11 @@ async def generate_execution_analytics(
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate model configuration
|
||||
settings = Settings()
|
||||
if not settings.secrets.openai_internal_api_key:
|
||||
raise HTTPException(status_code=500, detail="OpenAI API key not configured")
|
||||
|
||||
# Get database client
|
||||
db_client = get_db_async_client()
|
||||
|
||||
@@ -318,8 +320,6 @@ async def generate_execution_analytics(
|
||||
),
|
||||
status="skipped",
|
||||
error_message=None, # Not an error - just already processed
|
||||
started_at=execution.started_at,
|
||||
ended_at=execution.ended_at,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -349,9 +349,6 @@ async def _process_batch(
|
||||
) -> list[ExecutionAnalyticsResult]:
|
||||
"""Process a batch of executions concurrently."""
|
||||
|
||||
if not settings.secrets.openai_internal_api_key:
|
||||
raise HTTPException(status_code=500, detail="OpenAI API key not configured")
|
||||
|
||||
async def process_single_execution(execution) -> ExecutionAnalyticsResult:
|
||||
try:
|
||||
# Generate activity status and score using the specified model
|
||||
@@ -390,8 +387,6 @@ async def _process_batch(
|
||||
score=None,
|
||||
status="skipped",
|
||||
error_message="Activity generation returned None",
|
||||
started_at=execution.started_at,
|
||||
ended_at=execution.ended_at,
|
||||
)
|
||||
|
||||
# Update the execution stats
|
||||
@@ -421,8 +416,6 @@ async def _process_batch(
|
||||
summary_text=activity_response["activity_status"],
|
||||
score=activity_response["correctness_score"],
|
||||
status="success",
|
||||
started_at=execution.started_at,
|
||||
ended_at=execution.ended_at,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -436,8 +429,6 @@ async def _process_batch(
|
||||
score=None,
|
||||
status="failed",
|
||||
error_message=str(e),
|
||||
started_at=execution.started_at,
|
||||
ended_at=execution.ended_at,
|
||||
)
|
||||
|
||||
# Process all executions in the batch concurrently
|
||||
|
||||
@@ -218,6 +218,7 @@ async def save_agent_to_library(
|
||||
library_agents = await library_db.create_library_agent(
|
||||
graph=created_graph,
|
||||
user_id=user_id,
|
||||
sensitive_action_safe_mode=True,
|
||||
create_library_agents_for_sub_graphs=False,
|
||||
)
|
||||
|
||||
|
||||
@@ -401,27 +401,10 @@ async def add_generated_agent_image(
|
||||
)
|
||||
|
||||
|
||||
def _initialize_graph_settings(graph: graph_db.GraphModel) -> GraphSettings:
|
||||
"""
|
||||
Initialize GraphSettings based on graph content.
|
||||
|
||||
Args:
|
||||
graph: The graph to analyze
|
||||
|
||||
Returns:
|
||||
GraphSettings with appropriate human_in_the_loop_safe_mode value
|
||||
"""
|
||||
if graph.has_human_in_the_loop:
|
||||
# Graph has HITL blocks - set safe mode to True by default
|
||||
return GraphSettings(human_in_the_loop_safe_mode=True)
|
||||
else:
|
||||
# Graph has no HITL blocks - keep None
|
||||
return GraphSettings(human_in_the_loop_safe_mode=None)
|
||||
|
||||
|
||||
async def create_library_agent(
|
||||
graph: graph_db.GraphModel,
|
||||
user_id: str,
|
||||
sensitive_action_safe_mode: bool = False,
|
||||
create_library_agents_for_sub_graphs: bool = True,
|
||||
) -> list[library_model.LibraryAgent]:
|
||||
"""
|
||||
@@ -430,6 +413,7 @@ async def create_library_agent(
|
||||
Args:
|
||||
agent: The agent/Graph to add to the library.
|
||||
user_id: The user to whom the agent will be added.
|
||||
sensitive_action_safe_mode: Whether sensitive action blocks require review.
|
||||
create_library_agents_for_sub_graphs: If True, creates LibraryAgent records for sub-graphs as well.
|
||||
|
||||
Returns:
|
||||
@@ -465,7 +449,10 @@ async def create_library_agent(
|
||||
}
|
||||
},
|
||||
settings=SafeJson(
|
||||
_initialize_graph_settings(graph_entry).model_dump()
|
||||
GraphSettings.from_graph(
|
||||
graph_entry,
|
||||
sensitive_action_safe_mode=sensitive_action_safe_mode,
|
||||
).model_dump()
|
||||
),
|
||||
),
|
||||
include=library_agent_include(
|
||||
@@ -627,33 +614,6 @@ async def update_library_agent(
|
||||
raise DatabaseError("Failed to update library agent") from e
|
||||
|
||||
|
||||
async def update_library_agent_settings(
|
||||
user_id: str,
|
||||
agent_id: str,
|
||||
settings: GraphSettings,
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
Updates the settings for a specific LibraryAgent.
|
||||
|
||||
Args:
|
||||
user_id: The owner of the LibraryAgent.
|
||||
agent_id: The ID of the LibraryAgent to update.
|
||||
settings: New GraphSettings to apply.
|
||||
|
||||
Returns:
|
||||
The updated LibraryAgent.
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the specified LibraryAgent does not exist.
|
||||
DatabaseError: If there's an error in the update operation.
|
||||
"""
|
||||
return await update_library_agent(
|
||||
library_agent_id=agent_id,
|
||||
user_id=user_id,
|
||||
settings=settings,
|
||||
)
|
||||
|
||||
|
||||
async def delete_library_agent(
|
||||
library_agent_id: str, user_id: str, soft_delete: bool = True
|
||||
) -> None:
|
||||
@@ -838,7 +798,7 @@ async def add_store_agent_to_library(
|
||||
"isCreatedByUser": False,
|
||||
"useGraphIsActiveVersion": False,
|
||||
"settings": SafeJson(
|
||||
_initialize_graph_settings(graph_model).model_dump()
|
||||
GraphSettings.from_graph(graph_model).model_dump()
|
||||
),
|
||||
},
|
||||
include=library_agent_include(
|
||||
@@ -1228,8 +1188,14 @@ async def fork_library_agent(
|
||||
)
|
||||
new_graph = await on_graph_activate(new_graph, user_id=user_id)
|
||||
|
||||
# Create a library agent for the new graph
|
||||
return (await create_library_agent(new_graph, user_id))[0]
|
||||
# Create a library agent for the new graph, preserving sensitive_action_safe_mode
|
||||
return (
|
||||
await create_library_agent(
|
||||
new_graph,
|
||||
user_id,
|
||||
sensitive_action_safe_mode=original_agent.settings.sensitive_action_safe_mode,
|
||||
)
|
||||
)[0]
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error cloning library agent: {e}")
|
||||
raise DatabaseError("Failed to fork library agent") from e
|
||||
|
||||
@@ -73,6 +73,12 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
has_external_trigger: bool = pydantic.Field(
|
||||
description="Whether the agent has an external trigger (e.g. webhook) node"
|
||||
)
|
||||
has_human_in_the_loop: bool = pydantic.Field(
|
||||
description="Whether the agent has human-in-the-loop blocks"
|
||||
)
|
||||
has_sensitive_action: bool = pydantic.Field(
|
||||
description="Whether the agent has sensitive action blocks"
|
||||
)
|
||||
trigger_setup_info: Optional[GraphTriggerInfo] = None
|
||||
|
||||
# Indicates whether there's a new output (based on recent runs)
|
||||
@@ -180,6 +186,8 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
graph.credentials_input_schema if sub_graphs is not None else None
|
||||
),
|
||||
has_external_trigger=graph.has_external_trigger,
|
||||
has_human_in_the_loop=graph.has_human_in_the_loop,
|
||||
has_sensitive_action=graph.has_sensitive_action,
|
||||
trigger_setup_info=graph.trigger_setup_info,
|
||||
new_output=new_output,
|
||||
can_access_graph=can_access_graph,
|
||||
|
||||
@@ -52,6 +52,8 @@ async def test_get_library_agents_success(
|
||||
output_schema={"type": "object", "properties": {}},
|
||||
credentials_input_schema={"type": "object", "properties": {}},
|
||||
has_external_trigger=False,
|
||||
has_human_in_the_loop=False,
|
||||
has_sensitive_action=False,
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
recommended_schedule_cron=None,
|
||||
new_output=False,
|
||||
@@ -75,6 +77,8 @@ async def test_get_library_agents_success(
|
||||
output_schema={"type": "object", "properties": {}},
|
||||
credentials_input_schema={"type": "object", "properties": {}},
|
||||
has_external_trigger=False,
|
||||
has_human_in_the_loop=False,
|
||||
has_sensitive_action=False,
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
recommended_schedule_cron=None,
|
||||
new_output=False,
|
||||
@@ -150,6 +154,8 @@ async def test_get_favorite_library_agents_success(
|
||||
output_schema={"type": "object", "properties": {}},
|
||||
credentials_input_schema={"type": "object", "properties": {}},
|
||||
has_external_trigger=False,
|
||||
has_human_in_the_loop=False,
|
||||
has_sensitive_action=False,
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
recommended_schedule_cron=None,
|
||||
new_output=False,
|
||||
@@ -218,6 +224,8 @@ def test_add_agent_to_library_success(
|
||||
output_schema={"type": "object", "properties": {}},
|
||||
credentials_input_schema={"type": "object", "properties": {}},
|
||||
has_external_trigger=False,
|
||||
has_human_in_the_loop=False,
|
||||
has_sensitive_action=False,
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
new_output=False,
|
||||
can_access_graph=True,
|
||||
|
||||
@@ -761,10 +761,8 @@ async def create_new_graph(
|
||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
||||
graph.validate_graph(for_run=False)
|
||||
|
||||
# The return value of the create graph & library function is intentionally not used here,
|
||||
# as the graph already valid and no sub-graphs are returned back.
|
||||
await graph_db.create_graph(graph, user_id=user_id)
|
||||
await library_db.create_library_agent(graph, user_id=user_id)
|
||||
await library_db.create_library_agent(graph, user_id)
|
||||
activated_graph = await on_graph_activate(graph, user_id=user_id)
|
||||
|
||||
if create_graph.source == "builder":
|
||||
@@ -888,21 +886,19 @@ async def set_graph_active_version(
|
||||
async def _update_library_agent_version_and_settings(
|
||||
user_id: str, agent_graph: graph_db.GraphModel
|
||||
) -> library_model.LibraryAgent:
|
||||
# Keep the library agent up to date with the new active version
|
||||
library = await library_db.update_agent_version_in_library(
|
||||
user_id, agent_graph.id, agent_graph.version
|
||||
)
|
||||
# If the graph has HITL node, initialize the setting if it's not already set.
|
||||
if (
|
||||
agent_graph.has_human_in_the_loop
|
||||
and library.settings.human_in_the_loop_safe_mode is None
|
||||
):
|
||||
await library_db.update_library_agent_settings(
|
||||
updated_settings = GraphSettings.from_graph(
|
||||
graph=agent_graph,
|
||||
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
||||
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
||||
)
|
||||
if updated_settings != library.settings:
|
||||
library = await library_db.update_library_agent(
|
||||
library_agent_id=library.id,
|
||||
user_id=user_id,
|
||||
agent_id=library.id,
|
||||
settings=library.settings.model_copy(
|
||||
update={"human_in_the_loop_safe_mode": True}
|
||||
),
|
||||
settings=updated_settings,
|
||||
)
|
||||
return library
|
||||
|
||||
@@ -919,21 +915,18 @@ async def update_graph_settings(
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
) -> GraphSettings:
|
||||
"""Update graph settings for the user's library agent."""
|
||||
# Get the library agent for this graph
|
||||
library_agent = await library_db.get_library_agent_by_graph_id(
|
||||
graph_id=graph_id, user_id=user_id
|
||||
)
|
||||
if not library_agent:
|
||||
raise HTTPException(404, f"Graph #{graph_id} not found in user's library")
|
||||
|
||||
# Update the library agent settings
|
||||
updated_agent = await library_db.update_library_agent_settings(
|
||||
updated_agent = await library_db.update_library_agent(
|
||||
library_agent_id=library_agent.id,
|
||||
user_id=user_id,
|
||||
agent_id=library_agent.id,
|
||||
settings=settings,
|
||||
)
|
||||
|
||||
# Return the updated settings
|
||||
return GraphSettings.model_validate(updated_agent.settings)
|
||||
|
||||
|
||||
|
||||
@@ -84,7 +84,7 @@ class HITLReviewHelper:
|
||||
Exception: If review creation or status update fails
|
||||
"""
|
||||
# Skip review if safe mode is disabled - return auto-approved result
|
||||
if not execution_context.safe_mode:
|
||||
if not execution_context.human_in_the_loop_safe_mode:
|
||||
logger.info(
|
||||
f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled"
|
||||
)
|
||||
|
||||
@@ -104,7 +104,7 @@ class HumanInTheLoopBlock(Block):
|
||||
execution_context: ExecutionContext,
|
||||
**_kwargs,
|
||||
) -> BlockOutput:
|
||||
if not execution_context.safe_mode:
|
||||
if not execution_context.human_in_the_loop_safe_mode:
|
||||
logger.info(
|
||||
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
|
||||
)
|
||||
|
||||
@@ -242,7 +242,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
@@ -343,7 +343,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
@@ -409,7 +409,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
@@ -471,7 +471,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
@@ -535,7 +535,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
@@ -658,7 +658,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
@@ -730,7 +730,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
@@ -786,7 +786,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
@@ -905,7 +905,7 @@ async def test_smart_decision_maker_agent_mode():
|
||||
# Create a mock execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(
|
||||
safe_mode=False,
|
||||
human_in_the_loop_safe_mode=False,
|
||||
)
|
||||
|
||||
# Create a mock execution processor for agent mode tests
|
||||
@@ -1027,7 +1027,7 @@ async def test_smart_decision_maker_traditional_mode_default():
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
|
||||
@@ -386,7 +386,7 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
outputs = {}
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_value in block.run(
|
||||
@@ -609,7 +609,9 @@ async def test_validation_errors_dont_pollute_conversation():
|
||||
outputs = {}
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_context = ExecutionContext(
|
||||
human_in_the_loop_safe_mode=False
|
||||
)
|
||||
|
||||
# Create a proper mock execution processor for agent mode
|
||||
from collections import defaultdict
|
||||
|
||||
@@ -104,7 +104,7 @@ async def get_accuracy_trends_and_alerts(
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
{user_filter}
|
||||
GROUP BY DATE(e."createdAt")
|
||||
HAVING COUNT(*) >= 1 -- Include all days with at least 1 execution
|
||||
HAVING COUNT(*) >= 3 -- Need at least 3 executions per day
|
||||
),
|
||||
trends AS (
|
||||
SELECT
|
||||
|
||||
@@ -474,7 +474,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
self.block_type = block_type
|
||||
self.webhook_config = webhook_config
|
||||
self.execution_stats: NodeExecutionStats = NodeExecutionStats()
|
||||
self.requires_human_review: bool = False
|
||||
self.is_sensitive_action: bool = False
|
||||
|
||||
if self.webhook_config:
|
||||
if isinstance(self.webhook_config, BlockWebhookConfig):
|
||||
@@ -637,8 +637,9 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
- should_pause: True if execution should be paused for review
|
||||
- input_data_to_use: The input data to use (may be modified by reviewer)
|
||||
"""
|
||||
# Skip review if not required or safe mode is disabled
|
||||
if not self.requires_human_review or not execution_context.safe_mode:
|
||||
if not (
|
||||
self.is_sensitive_action and execution_context.sensitive_action_safe_mode
|
||||
):
|
||||
return False, input_data
|
||||
|
||||
from backend.blocks.helpers.review import HITLReviewHelper
|
||||
|
||||
@@ -81,7 +81,8 @@ class ExecutionContext(BaseModel):
|
||||
This includes information needed by blocks, sub-graphs, and execution management.
|
||||
"""
|
||||
|
||||
safe_mode: bool = True
|
||||
human_in_the_loop_safe_mode: bool = True
|
||||
sensitive_action_safe_mode: bool = False
|
||||
user_timezone: str = "UTC"
|
||||
root_execution_id: Optional[str] = None
|
||||
parent_execution_id: Optional[str] = None
|
||||
@@ -153,14 +154,8 @@ class GraphExecutionMeta(BaseDbModel):
|
||||
nodes_input_masks: Optional[dict[str, BlockInput]]
|
||||
preset_id: Optional[str]
|
||||
status: ExecutionStatus
|
||||
started_at: Optional[datetime] = Field(
|
||||
None,
|
||||
description="When execution started running. Null if not yet started (QUEUED).",
|
||||
)
|
||||
ended_at: Optional[datetime] = Field(
|
||||
None,
|
||||
description="When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW).",
|
||||
)
|
||||
started_at: datetime
|
||||
ended_at: datetime
|
||||
is_shared: bool = False
|
||||
share_token: Optional[str] = None
|
||||
|
||||
@@ -235,8 +230,10 @@ class GraphExecutionMeta(BaseDbModel):
|
||||
|
||||
@staticmethod
|
||||
def from_db(_graph_exec: AgentGraphExecution):
|
||||
start_time = _graph_exec.startedAt
|
||||
end_time = _graph_exec.endedAt
|
||||
now = datetime.now(timezone.utc)
|
||||
# TODO: make started_at and ended_at optional
|
||||
start_time = _graph_exec.startedAt or _graph_exec.createdAt
|
||||
end_time = _graph_exec.updatedAt or now
|
||||
|
||||
try:
|
||||
stats = GraphExecutionStats.model_validate(_graph_exec.stats)
|
||||
@@ -906,14 +903,6 @@ async def update_graph_execution_stats(
|
||||
|
||||
if status:
|
||||
update_data["executionStatus"] = status
|
||||
# Set endedAt when execution reaches a terminal status
|
||||
terminal_statuses = [
|
||||
ExecutionStatus.COMPLETED,
|
||||
ExecutionStatus.FAILED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
]
|
||||
if status in terminal_statuses:
|
||||
update_data["endedAt"] = datetime.now(tz=timezone.utc)
|
||||
|
||||
where_clause: AgentGraphExecutionWhereInput = {"id": graph_exec_id}
|
||||
|
||||
|
||||
@@ -62,7 +62,23 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GraphSettings(BaseModel):
|
||||
human_in_the_loop_safe_mode: bool | None = None
|
||||
human_in_the_loop_safe_mode: bool = True
|
||||
sensitive_action_safe_mode: bool = False
|
||||
|
||||
@classmethod
|
||||
def from_graph(
|
||||
cls,
|
||||
graph: "GraphModel",
|
||||
hitl_safe_mode: bool | None = None,
|
||||
sensitive_action_safe_mode: bool = False,
|
||||
) -> "GraphSettings":
|
||||
# Default to True if not explicitly set
|
||||
if hitl_safe_mode is None:
|
||||
hitl_safe_mode = True
|
||||
return cls(
|
||||
human_in_the_loop_safe_mode=hitl_safe_mode,
|
||||
sensitive_action_safe_mode=sensitive_action_safe_mode,
|
||||
)
|
||||
|
||||
|
||||
class Link(BaseDbModel):
|
||||
@@ -244,10 +260,14 @@ class BaseGraph(BaseDbModel):
|
||||
return any(
|
||||
node.block_id
|
||||
for node in self.nodes
|
||||
if (
|
||||
node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
|
||||
or node.block.requires_human_review
|
||||
)
|
||||
if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
|
||||
)
|
||||
|
||||
@computed_field
|
||||
@property
|
||||
def has_sensitive_action(self) -> bool:
|
||||
return any(
|
||||
node.block_id for node in self.nodes if node.block.is_sensitive_action
|
||||
)
|
||||
|
||||
@property
|
||||
|
||||
@@ -873,11 +873,8 @@ async def add_graph_execution(
|
||||
settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id)
|
||||
|
||||
execution_context = ExecutionContext(
|
||||
safe_mode=(
|
||||
settings.human_in_the_loop_safe_mode
|
||||
if settings.human_in_the_loop_safe_mode is not None
|
||||
else True
|
||||
),
|
||||
human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode,
|
||||
sensitive_action_safe_mode=settings.sensitive_action_safe_mode,
|
||||
user_timezone=(
|
||||
user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC"
|
||||
),
|
||||
|
||||
@@ -386,6 +386,7 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
|
||||
mock_user.timezone = "UTC"
|
||||
mock_settings = mocker.MagicMock()
|
||||
mock_settings.human_in_the_loop_safe_mode = True
|
||||
mock_settings.sensitive_action_safe_mode = False
|
||||
|
||||
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
||||
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
||||
@@ -651,6 +652,7 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture):
|
||||
mock_user.timezone = "UTC"
|
||||
mock_settings = mocker.MagicMock()
|
||||
mock_settings.human_in_the_loop_safe_mode = True
|
||||
mock_settings.sensitive_action_safe_mode = False
|
||||
|
||||
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
||||
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
||||
|
||||
@@ -96,9 +96,9 @@ jina_credentials = APIKeyCredentials(
|
||||
)
|
||||
unreal_credentials = APIKeyCredentials(
|
||||
id="66f20754-1b81-48e4-91d0-f4f0dd82145f",
|
||||
provider="unreal_speech",
|
||||
provider="unreal",
|
||||
api_key=SecretStr(settings.secrets.unreal_speech_api_key),
|
||||
title="Use Credits for Unreal Speech",
|
||||
title="Use Credits for Unreal",
|
||||
expires_at=None,
|
||||
)
|
||||
open_router_credentials = APIKeyCredentials(
|
||||
@@ -216,14 +216,6 @@ webshare_proxy_credentials = UserPasswordCredentials(
|
||||
title="Use Credits for Webshare Proxy",
|
||||
)
|
||||
|
||||
openweathermap_credentials = APIKeyCredentials(
|
||||
id="8b3d4e5f-6a7b-8c9d-0e1f-2a3b4c5d6e7f",
|
||||
provider="openweathermap",
|
||||
api_key=SecretStr(settings.secrets.openweathermap_api_key),
|
||||
title="Use Credits for OpenWeatherMap",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
@@ -251,7 +243,6 @@ DEFAULT_CREDENTIALS = [
|
||||
llama_api_credentials,
|
||||
v0_credentials,
|
||||
webshare_proxy_credentials,
|
||||
openweathermap_credentials,
|
||||
]
|
||||
|
||||
SYSTEM_CREDENTIAL_IDS = {cred.id for cred in DEFAULT_CREDENTIALS}
|
||||
@@ -355,17 +346,11 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(zerobounce_credentials)
|
||||
if settings.secrets.google_maps_api_key:
|
||||
all_credentials.append(google_maps_credentials)
|
||||
if settings.secrets.llama_api_key:
|
||||
all_credentials.append(llama_api_credentials)
|
||||
if settings.secrets.v0_api_key:
|
||||
all_credentials.append(v0_credentials)
|
||||
if (
|
||||
settings.secrets.webshare_proxy_username
|
||||
and settings.secrets.webshare_proxy_password
|
||||
):
|
||||
all_credentials.append(webshare_proxy_credentials)
|
||||
if settings.secrets.openweathermap_api_key:
|
||||
all_credentials.append(openweathermap_credentials)
|
||||
return all_credentials
|
||||
|
||||
async def get_creds_by_id(
|
||||
|
||||
@@ -60,10 +60,8 @@ class LateExecutionMonitor:
|
||||
if not all_late_executions:
|
||||
return "No late executions detected."
|
||||
|
||||
# Sort by started time (oldest first), with None values (unstarted) first
|
||||
all_late_executions.sort(
|
||||
key=lambda x: x.started_at or datetime.min.replace(tzinfo=timezone.utc)
|
||||
)
|
||||
# Sort by created time (oldest first)
|
||||
all_late_executions.sort(key=lambda x: x.started_at)
|
||||
|
||||
num_total_late = len(all_late_executions)
|
||||
num_queued = len(queued_late_executions)
|
||||
@@ -76,7 +74,7 @@ class LateExecutionMonitor:
|
||||
was_truncated = num_total_late > tuncate_size
|
||||
|
||||
late_execution_details = [
|
||||
f"* `Execution ID: {exec.id}, Graph ID: {exec.graph_id}v{exec.graph_version}, User ID: {exec.user_id}, Status: {exec.status}, Started At: {exec.started_at.isoformat() if exec.started_at else 'Not started'}`"
|
||||
f"* `Execution ID: {exec.id}, Graph ID: {exec.graph_id}v{exec.graph_version}, User ID: {exec.user_id}, Status: {exec.status}, Created At: {exec.started_at.isoformat()}`"
|
||||
for exec in truncated_executions
|
||||
]
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
-- AlterTable
|
||||
ALTER TABLE "AgentGraphExecution" ADD COLUMN "endedAt" TIMESTAMP(3);
|
||||
|
||||
-- Set endedAt to updatedAt for existing records with terminal status only
|
||||
UPDATE "AgentGraphExecution"
|
||||
SET "endedAt" = "updatedAt"
|
||||
WHERE "endedAt" IS NULL
|
||||
AND "executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED');
|
||||
@@ -450,7 +450,6 @@ model AgentGraphExecution {
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime? @updatedAt
|
||||
startedAt DateTime?
|
||||
endedAt DateTime?
|
||||
|
||||
isDeleted Boolean @default(false)
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
"forked_from_version": null,
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"has_sensitive_action": false,
|
||||
"id": "graph-123",
|
||||
"input_schema": {
|
||||
"properties": {},
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
"forked_from_version": null,
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"has_sensitive_action": false,
|
||||
"id": "graph-123",
|
||||
"input_schema": {
|
||||
"properties": {},
|
||||
|
||||
@@ -27,6 +27,8 @@
|
||||
"properties": {}
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"has_sensitive_action": false,
|
||||
"trigger_setup_info": null,
|
||||
"new_output": false,
|
||||
"can_access_graph": true,
|
||||
@@ -34,7 +36,8 @@
|
||||
"is_favorite": false,
|
||||
"recommended_schedule_cron": null,
|
||||
"settings": {
|
||||
"human_in_the_loop_safe_mode": null
|
||||
"human_in_the_loop_safe_mode": true,
|
||||
"sensitive_action_safe_mode": false
|
||||
},
|
||||
"marketplace_listing": null
|
||||
},
|
||||
@@ -65,6 +68,8 @@
|
||||
"properties": {}
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"has_sensitive_action": false,
|
||||
"trigger_setup_info": null,
|
||||
"new_output": false,
|
||||
"can_access_graph": false,
|
||||
@@ -72,7 +77,8 @@
|
||||
"is_favorite": false,
|
||||
"recommended_schedule_cron": null,
|
||||
"settings": {
|
||||
"human_in_the_loop_safe_mode": null
|
||||
"human_in_the_loop_safe_mode": true,
|
||||
"sensitive_action_safe_mode": false
|
||||
},
|
||||
"marketplace_listing": null
|
||||
}
|
||||
|
||||
@@ -51,8 +51,6 @@ export function AnalyticsResultsTable({ results }: Props) {
|
||||
"Execution ID",
|
||||
"Status",
|
||||
"Score",
|
||||
"Started At",
|
||||
"Ended At",
|
||||
"Summary Text",
|
||||
"Error Message",
|
||||
];
|
||||
@@ -64,8 +62,6 @@ export function AnalyticsResultsTable({ results }: Props) {
|
||||
result.exec_id,
|
||||
result.status,
|
||||
result.score?.toString() || "",
|
||||
result.started_at ? new Date(result.started_at).toLocaleString() : "",
|
||||
result.ended_at ? new Date(result.ended_at).toLocaleString() : "",
|
||||
`"${(result.summary_text || "").replace(/"/g, '""')}"`, // Escape quotes in summary
|
||||
`"${(result.error_message || "").replace(/"/g, '""')}"`, // Escape quotes in error
|
||||
]);
|
||||
@@ -252,13 +248,15 @@ export function AnalyticsResultsTable({ results }: Props) {
|
||||
)}
|
||||
</td>
|
||||
<td className="px-4 py-3">
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => toggleRowExpansion(result.exec_id)}
|
||||
>
|
||||
<EyeIcon size={16} />
|
||||
</Button>
|
||||
{(result.summary_text || result.error_message) && (
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => toggleRowExpansion(result.exec_id)}
|
||||
>
|
||||
<EyeIcon size={16} />
|
||||
</Button>
|
||||
)}
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
@@ -266,44 +264,6 @@ export function AnalyticsResultsTable({ results }: Props) {
|
||||
<tr>
|
||||
<td colSpan={7} className="bg-gray-50 px-4 py-3">
|
||||
<div className="space-y-3">
|
||||
{/* Timestamps section */}
|
||||
<div className="grid grid-cols-2 gap-4 border-b border-gray-200 pb-3">
|
||||
<div>
|
||||
<Text
|
||||
variant="body"
|
||||
className="text-xs font-medium text-gray-600"
|
||||
>
|
||||
Started At:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body"
|
||||
className="text-sm text-gray-700"
|
||||
>
|
||||
{result.started_at
|
||||
? new Date(
|
||||
result.started_at,
|
||||
).toLocaleString()
|
||||
: "—"}
|
||||
</Text>
|
||||
</div>
|
||||
<div>
|
||||
<Text
|
||||
variant="body"
|
||||
className="text-xs font-medium text-gray-600"
|
||||
>
|
||||
Ended At:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body"
|
||||
className="text-sm text-gray-700"
|
||||
>
|
||||
{result.ended_at
|
||||
? new Date(result.ended_at).toLocaleString()
|
||||
: "—"}
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{result.summary_text && (
|
||||
<div>
|
||||
<Text
|
||||
|
||||
@@ -541,19 +541,7 @@ export function ExecutionAnalyticsForm() {
|
||||
{/* Accuracy Trends Display */}
|
||||
{trendsData && (
|
||||
<div className="space-y-4">
|
||||
<div className="flex items-start justify-between">
|
||||
<h3 className="text-lg font-semibold">Execution Accuracy Trends</h3>
|
||||
<div className="rounded-md bg-blue-50 px-3 py-2 text-xs text-blue-700">
|
||||
<p className="font-medium">
|
||||
Chart Filters (matches monitoring system):
|
||||
</p>
|
||||
<ul className="mt-1 list-inside list-disc space-y-1">
|
||||
<li>Only days with ≥1 execution with correctness score</li>
|
||||
<li>Last 30 days</li>
|
||||
<li>Averages calculated from scored executions only</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<h3 className="text-lg font-semibold">Execution Accuracy Trends</h3>
|
||||
|
||||
{/* Alert Section */}
|
||||
{trendsData.alert && (
|
||||
|
||||
@@ -31,10 +31,18 @@ export function AgentSettingsModal({
|
||||
}
|
||||
}
|
||||
|
||||
const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } =
|
||||
useAgentSafeMode(agent);
|
||||
const {
|
||||
currentHITLSafeMode,
|
||||
showHITLToggle,
|
||||
handleHITLToggle,
|
||||
currentSensitiveActionSafeMode,
|
||||
showSensitiveActionToggle,
|
||||
handleSensitiveActionToggle,
|
||||
isPending,
|
||||
shouldShowToggle,
|
||||
} = useAgentSafeMode(agent);
|
||||
|
||||
if (!hasHITLBlocks) return null;
|
||||
if (!shouldShowToggle) return null;
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
@@ -57,23 +65,48 @@ export function AgentSettingsModal({
|
||||
)}
|
||||
<Dialog.Content>
|
||||
<div className="space-y-6">
|
||||
<div className="flex w-full flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||
<div className="flex w-full items-start justify-between gap-4">
|
||||
<div className="flex-1">
|
||||
<Text variant="large-semibold">Require human approval</Text>
|
||||
<Text variant="large" className="mt-1 text-zinc-900">
|
||||
The agent will pause and wait for your review before
|
||||
continuing
|
||||
</Text>
|
||||
{showHITLToggle && (
|
||||
<div className="flex w-full flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||
<div className="flex w-full items-start justify-between gap-4">
|
||||
<div className="flex-1">
|
||||
<Text variant="large-semibold">
|
||||
Human-in-the-loop approval
|
||||
</Text>
|
||||
<Text variant="large" className="mt-1 text-zinc-900">
|
||||
The agent will pause at human-in-the-loop blocks and wait
|
||||
for your review before continuing
|
||||
</Text>
|
||||
</div>
|
||||
<Switch
|
||||
checked={currentHITLSafeMode || false}
|
||||
onCheckedChange={handleHITLToggle}
|
||||
disabled={isPending}
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
<Switch
|
||||
checked={currentSafeMode || false}
|
||||
onCheckedChange={handleToggle}
|
||||
disabled={isPending}
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
{showSensitiveActionToggle && (
|
||||
<div className="flex w-full flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||
<div className="flex w-full items-start justify-between gap-4">
|
||||
<div className="flex-1">
|
||||
<Text variant="large-semibold">
|
||||
Sensitive action approval
|
||||
</Text>
|
||||
<Text variant="large" className="mt-1 text-zinc-900">
|
||||
The agent will pause at sensitive action blocks and wait for
|
||||
your review before continuing
|
||||
</Text>
|
||||
</div>
|
||||
<Switch
|
||||
checked={currentSensitiveActionSafeMode}
|
||||
onCheckedChange={handleSensitiveActionToggle}
|
||||
disabled={isPending}
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
|
||||
@@ -13,8 +13,16 @@ interface Props {
|
||||
}
|
||||
|
||||
export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
|
||||
const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } =
|
||||
useAgentSafeMode(agent);
|
||||
const {
|
||||
currentHITLSafeMode,
|
||||
showHITLToggle,
|
||||
handleHITLToggle,
|
||||
currentSensitiveActionSafeMode,
|
||||
showSensitiveActionToggle,
|
||||
handleSensitiveActionToggle,
|
||||
isPending,
|
||||
shouldShowToggle,
|
||||
} = useAgentSafeMode(agent);
|
||||
|
||||
return (
|
||||
<SelectedViewLayout agent={agent}>
|
||||
@@ -34,24 +42,51 @@ export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
|
||||
</div>
|
||||
|
||||
<div className={`${AGENT_LIBRARY_SECTION_PADDING_X} space-y-6`}>
|
||||
{hasHITLBlocks ? (
|
||||
<div className="flex w-full max-w-2xl flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||
<div className="flex w-full items-start justify-between gap-4">
|
||||
<div className="flex-1">
|
||||
<Text variant="large-semibold">Require human approval</Text>
|
||||
<Text variant="large" className="mt-1 text-zinc-900">
|
||||
The agent will pause and wait for your review before
|
||||
continuing
|
||||
</Text>
|
||||
{shouldShowToggle ? (
|
||||
<>
|
||||
{showHITLToggle && (
|
||||
<div className="flex w-full max-w-2xl flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||
<div className="flex w-full items-start justify-between gap-4">
|
||||
<div className="flex-1">
|
||||
<Text variant="large-semibold">
|
||||
Human-in-the-loop approval
|
||||
</Text>
|
||||
<Text variant="large" className="mt-1 text-zinc-900">
|
||||
The agent will pause at human-in-the-loop blocks and
|
||||
wait for your review before continuing
|
||||
</Text>
|
||||
</div>
|
||||
<Switch
|
||||
checked={currentHITLSafeMode || false}
|
||||
onCheckedChange={handleHITLToggle}
|
||||
disabled={isPending}
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<Switch
|
||||
checked={currentSafeMode || false}
|
||||
onCheckedChange={handleToggle}
|
||||
disabled={isPending}
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
{showSensitiveActionToggle && (
|
||||
<div className="flex w-full max-w-2xl flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||
<div className="flex w-full items-start justify-between gap-4">
|
||||
<div className="flex-1">
|
||||
<Text variant="large-semibold">
|
||||
Sensitive action approval
|
||||
</Text>
|
||||
<Text variant="large" className="mt-1 text-zinc-900">
|
||||
The agent will pause at sensitive action blocks and wait
|
||||
for your review before continuing
|
||||
</Text>
|
||||
</div>
|
||||
<Switch
|
||||
checked={currentSensitiveActionSafeMode}
|
||||
onCheckedChange={handleSensitiveActionToggle}
|
||||
disabled={isPending}
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
) : (
|
||||
<div className="rounded-xl border border-zinc-100 bg-white p-6">
|
||||
<Text variant="body" className="text-muted-foreground">
|
||||
|
||||
@@ -173,9 +173,8 @@ export function OldAgentLibraryView() {
|
||||
if (agentRuns.length > 0) {
|
||||
// select latest run
|
||||
const latestRun = agentRuns.reduce((latest, current) => {
|
||||
if (!latest.started_at && !current.started_at) return latest;
|
||||
if (!latest.started_at) return current;
|
||||
if (!current.started_at) return latest;
|
||||
if (latest.started_at && !current.started_at) return current;
|
||||
else if (!latest.started_at) return latest;
|
||||
return latest.started_at > current.started_at ? latest : current;
|
||||
}, agentRuns[0]);
|
||||
selectRun(latestRun.id as GraphExecutionID);
|
||||
|
||||
@@ -184,11 +184,9 @@ export function AgentRunsSelectorList({
|
||||
))}
|
||||
{agentPresets.length > 0 && <Separator className="my-1" />}
|
||||
{agentRuns
|
||||
.toSorted((a, b) => {
|
||||
const aTime = a.started_at?.getTime() ?? 0;
|
||||
const bTime = b.started_at?.getTime() ?? 0;
|
||||
return bTime - aTime;
|
||||
})
|
||||
.toSorted(
|
||||
(a, b) => b.started_at.getTime() - a.started_at.getTime(),
|
||||
)
|
||||
.map((run) => (
|
||||
<AgentRunSummaryCard
|
||||
className={listItemClasses}
|
||||
@@ -201,7 +199,7 @@ export function AgentRunsSelectorList({
|
||||
?.name
|
||||
: null) ?? agent.name
|
||||
}
|
||||
timestamp={run.started_at ?? undefined}
|
||||
timestamp={run.started_at}
|
||||
selected={selectedView.id === run.id}
|
||||
onClick={() => onSelectRun(run.id)}
|
||||
onDelete={() => doDeleteRun(run as GraphExecutionMeta)}
|
||||
|
||||
@@ -120,11 +120,9 @@ export const AgentFlowList = ({
|
||||
lastRun =
|
||||
runCount == 0
|
||||
? null
|
||||
: _flowRuns.reduce((a, c) => {
|
||||
const aTime = a.started_at?.getTime() ?? 0;
|
||||
const cTime = c.started_at?.getTime() ?? 0;
|
||||
return aTime > cTime ? a : c;
|
||||
});
|
||||
: _flowRuns.reduce((a, c) =>
|
||||
a.started_at > c.started_at ? a : c,
|
||||
);
|
||||
}
|
||||
return { flow, runCount, lastRun };
|
||||
})
|
||||
@@ -132,9 +130,10 @@ export const AgentFlowList = ({
|
||||
if (!a.lastRun && !b.lastRun) return 0;
|
||||
if (!a.lastRun) return 1;
|
||||
if (!b.lastRun) return -1;
|
||||
const bTime = b.lastRun.started_at?.getTime() ?? 0;
|
||||
const aTime = a.lastRun.started_at?.getTime() ?? 0;
|
||||
return bTime - aTime;
|
||||
return (
|
||||
b.lastRun.started_at.getTime() -
|
||||
a.lastRun.started_at.getTime()
|
||||
);
|
||||
})
|
||||
.map(({ flow, runCount, lastRun }) => (
|
||||
<TableRow
|
||||
|
||||
@@ -29,10 +29,7 @@ export const FlowRunsStatus: React.FC<{
|
||||
: statsSince;
|
||||
const filteredFlowRuns =
|
||||
statsSinceTimestamp != null
|
||||
? executions.filter(
|
||||
(fr) =>
|
||||
fr.started_at && fr.started_at.getTime() > statsSinceTimestamp,
|
||||
)
|
||||
? executions.filter((fr) => fr.started_at.getTime() > statsSinceTimestamp)
|
||||
: executions;
|
||||
|
||||
return (
|
||||
|
||||
@@ -98,43 +98,40 @@ export const FlowRunsTimeline = ({
|
||||
<Scatter
|
||||
key={flow.id}
|
||||
data={executions
|
||||
.filter((e) => e.graph_id == flow.graph_id && e.started_at)
|
||||
.filter((e) => e.graph_id == flow.graph_id)
|
||||
.map((e) => ({
|
||||
...e,
|
||||
time:
|
||||
(e.started_at?.getTime() ?? 0) +
|
||||
(e.stats?.node_exec_time ?? 0) * 1000,
|
||||
e.started_at.getTime() + (e.stats?.node_exec_time ?? 0) * 1000,
|
||||
_duration: e.stats?.node_exec_time ?? 0,
|
||||
}))}
|
||||
name={flow.name}
|
||||
fill={`hsl(${(hashString(flow.id) * 137.5) % 360}, 70%, 50%)`}
|
||||
/>
|
||||
))}
|
||||
{executions
|
||||
.filter((e) => e.started_at && e.ended_at)
|
||||
.map((execution) => (
|
||||
<Line
|
||||
key={execution.id}
|
||||
type="linear"
|
||||
dataKey="_duration"
|
||||
data={[
|
||||
{
|
||||
...execution,
|
||||
time: execution.started_at!.getTime(),
|
||||
_duration: 0,
|
||||
},
|
||||
{
|
||||
...execution,
|
||||
time: execution.ended_at!.getTime(),
|
||||
_duration: execution.stats?.node_exec_time ?? 0,
|
||||
},
|
||||
]}
|
||||
stroke={`hsl(${(hashString(execution.graph_id) * 137.5) % 360}, 70%, 50%)`}
|
||||
strokeWidth={2}
|
||||
dot={false}
|
||||
legendType="none"
|
||||
/>
|
||||
))}
|
||||
{executions.map((execution) => (
|
||||
<Line
|
||||
key={execution.id}
|
||||
type="linear"
|
||||
dataKey="_duration"
|
||||
data={[
|
||||
{
|
||||
...execution,
|
||||
time: execution.started_at.getTime(),
|
||||
_duration: 0,
|
||||
},
|
||||
{
|
||||
...execution,
|
||||
time: execution.ended_at.getTime(),
|
||||
_duration: execution.stats?.node_exec_time ?? 0,
|
||||
},
|
||||
]}
|
||||
stroke={`hsl(${(hashString(execution.graph_id) * 137.5) % 360}, 70%, 50%)`}
|
||||
strokeWidth={2}
|
||||
dot={false}
|
||||
legendType="none"
|
||||
/>
|
||||
))}
|
||||
<Legend
|
||||
content={<ScrollableLegend />}
|
||||
wrapperStyle={{
|
||||
|
||||
@@ -98,11 +98,7 @@ const Monitor = () => {
|
||||
...(selectedFlow
|
||||
? executions.filter((v) => v.graph_id == selectedFlow.graph_id)
|
||||
: executions),
|
||||
].sort((a, b) => {
|
||||
const aTime = a.started_at?.getTime() ?? 0;
|
||||
const bTime = b.started_at?.getTime() ?? 0;
|
||||
return bTime - aTime;
|
||||
})}
|
||||
].sort((a, b) => b.started_at.getTime() - a.started_at.getTime())}
|
||||
selectedRun={selectedRun}
|
||||
onSelectRun={(r) => setSelectedRun(r.id == selectedRun?.id ? null : r)}
|
||||
/>
|
||||
|
||||
@@ -116,9 +116,6 @@ export default function UserIntegrationsPage() {
|
||||
"63a6e279-2dc2-448e-bf57-85776f7176dc", // ZeroBounce
|
||||
"9aa1bde0-4947-4a70-a20c-84daa3850d52", // Google Maps
|
||||
"d44045af-1c33-4833-9e19-752313214de2", // Llama API
|
||||
"c4e6d1a0-3b5f-4789-a8e2-9b123456789f", // V0 by Vercel
|
||||
"a5b3c7d9-2e4f-4a6b-8c1d-9e0f1a2b3c4d", // Webshare Proxy
|
||||
"8b3d4e5f-6a7b-8c9d-0e1f-2a3b4c5d6e7f", // OpenWeatherMap
|
||||
],
|
||||
[],
|
||||
);
|
||||
|
||||
@@ -6383,6 +6383,11 @@
|
||||
"title": "Has Human In The Loop",
|
||||
"readOnly": true
|
||||
},
|
||||
"has_sensitive_action": {
|
||||
"type": "boolean",
|
||||
"title": "Has Sensitive Action",
|
||||
"readOnly": true
|
||||
},
|
||||
"trigger_setup_info": {
|
||||
"anyOf": [
|
||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||
@@ -6399,6 +6404,7 @@
|
||||
"output_schema",
|
||||
"has_external_trigger",
|
||||
"has_human_in_the_loop",
|
||||
"has_sensitive_action",
|
||||
"trigger_setup_info"
|
||||
],
|
||||
"title": "BaseGraph"
|
||||
@@ -7148,20 +7154,6 @@
|
||||
"error_message": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Error Message"
|
||||
},
|
||||
"started_at": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Started At"
|
||||
},
|
||||
"ended_at": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Ended At"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
@@ -7268,20 +7260,14 @@
|
||||
},
|
||||
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
|
||||
"started_at": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Started At",
|
||||
"description": "When execution started running. Null if not yet started (QUEUED)."
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Started At"
|
||||
},
|
||||
"ended_at": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Ended At",
|
||||
"description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Ended At"
|
||||
},
|
||||
"is_shared": {
|
||||
"type": "boolean",
|
||||
@@ -7315,6 +7301,8 @@
|
||||
"nodes_input_masks",
|
||||
"preset_id",
|
||||
"status",
|
||||
"started_at",
|
||||
"ended_at",
|
||||
"stats",
|
||||
"outputs"
|
||||
],
|
||||
@@ -7413,20 +7401,14 @@
|
||||
},
|
||||
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
|
||||
"started_at": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Started At",
|
||||
"description": "When execution started running. Null if not yet started (QUEUED)."
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Started At"
|
||||
},
|
||||
"ended_at": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Ended At",
|
||||
"description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Ended At"
|
||||
},
|
||||
"is_shared": {
|
||||
"type": "boolean",
|
||||
@@ -7455,6 +7437,8 @@
|
||||
"nodes_input_masks",
|
||||
"preset_id",
|
||||
"status",
|
||||
"started_at",
|
||||
"ended_at",
|
||||
"stats"
|
||||
],
|
||||
"title": "GraphExecutionMeta"
|
||||
@@ -7501,20 +7485,14 @@
|
||||
},
|
||||
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
|
||||
"started_at": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Started At",
|
||||
"description": "When execution started running. Null if not yet started (QUEUED)."
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Started At"
|
||||
},
|
||||
"ended_at": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Ended At",
|
||||
"description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Ended At"
|
||||
},
|
||||
"is_shared": {
|
||||
"type": "boolean",
|
||||
@@ -7553,6 +7531,8 @@
|
||||
"nodes_input_masks",
|
||||
"preset_id",
|
||||
"status",
|
||||
"started_at",
|
||||
"ended_at",
|
||||
"stats",
|
||||
"outputs",
|
||||
"node_executions"
|
||||
@@ -7629,6 +7609,11 @@
|
||||
"title": "Has Human In The Loop",
|
||||
"readOnly": true
|
||||
},
|
||||
"has_sensitive_action": {
|
||||
"type": "boolean",
|
||||
"title": "Has Sensitive Action",
|
||||
"readOnly": true
|
||||
},
|
||||
"trigger_setup_info": {
|
||||
"anyOf": [
|
||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||
@@ -7652,6 +7637,7 @@
|
||||
"output_schema",
|
||||
"has_external_trigger",
|
||||
"has_human_in_the_loop",
|
||||
"has_sensitive_action",
|
||||
"trigger_setup_info",
|
||||
"credentials_input_schema"
|
||||
],
|
||||
@@ -7730,6 +7716,11 @@
|
||||
"title": "Has Human In The Loop",
|
||||
"readOnly": true
|
||||
},
|
||||
"has_sensitive_action": {
|
||||
"type": "boolean",
|
||||
"title": "Has Sensitive Action",
|
||||
"readOnly": true
|
||||
},
|
||||
"trigger_setup_info": {
|
||||
"anyOf": [
|
||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||
@@ -7754,6 +7745,7 @@
|
||||
"output_schema",
|
||||
"has_external_trigger",
|
||||
"has_human_in_the_loop",
|
||||
"has_sensitive_action",
|
||||
"trigger_setup_info",
|
||||
"credentials_input_schema"
|
||||
],
|
||||
@@ -7762,8 +7754,14 @@
|
||||
"GraphSettings": {
|
||||
"properties": {
|
||||
"human_in_the_loop_safe_mode": {
|
||||
"anyOf": [{ "type": "boolean" }, { "type": "null" }],
|
||||
"title": "Human In The Loop Safe Mode"
|
||||
"type": "boolean",
|
||||
"title": "Human In The Loop Safe Mode",
|
||||
"default": true
|
||||
},
|
||||
"sensitive_action_safe_mode": {
|
||||
"type": "boolean",
|
||||
"title": "Sensitive Action Safe Mode",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
@@ -7921,6 +7919,16 @@
|
||||
"title": "Has External Trigger",
|
||||
"description": "Whether the agent has an external trigger (e.g. webhook) node"
|
||||
},
|
||||
"has_human_in_the_loop": {
|
||||
"type": "boolean",
|
||||
"title": "Has Human In The Loop",
|
||||
"description": "Whether the agent has human-in-the-loop blocks"
|
||||
},
|
||||
"has_sensitive_action": {
|
||||
"type": "boolean",
|
||||
"title": "Has Sensitive Action",
|
||||
"description": "Whether the agent has sensitive action blocks"
|
||||
},
|
||||
"trigger_setup_info": {
|
||||
"anyOf": [
|
||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||
@@ -7967,6 +7975,8 @@
|
||||
"output_schema",
|
||||
"credentials_input_schema",
|
||||
"has_external_trigger",
|
||||
"has_human_in_the_loop",
|
||||
"has_sensitive_action",
|
||||
"new_output",
|
||||
"can_access_graph",
|
||||
"is_latest_version",
|
||||
|
||||
@@ -50,9 +50,7 @@ export function ActivityItem({ execution }: Props) {
|
||||
execution.status === AgentExecutionStatus.QUEUED;
|
||||
|
||||
if (isActiveStatus) {
|
||||
const timeAgo = execution.started_at
|
||||
? formatTimeAgo(execution.started_at.toString())
|
||||
: "recently";
|
||||
const timeAgo = formatTimeAgo(execution.started_at.toString());
|
||||
const statusText =
|
||||
execution.status === AgentExecutionStatus.QUEUED ? "queued" : "running";
|
||||
return [
|
||||
@@ -63,9 +61,7 @@ export function ActivityItem({ execution }: Props) {
|
||||
// Handle all other statuses with time display
|
||||
const timeAgo = execution.ended_at
|
||||
? formatTimeAgo(execution.ended_at.toString())
|
||||
: execution.started_at
|
||||
? formatTimeAgo(execution.started_at.toString())
|
||||
: "recently";
|
||||
: formatTimeAgo(execution.started_at.toString());
|
||||
|
||||
let statusText = "ended";
|
||||
switch (execution.status) {
|
||||
|
||||
@@ -20,11 +20,15 @@ function hasHITLBlocks(graph: GraphModel | LibraryAgent | Graph): boolean {
|
||||
if ("has_human_in_the_loop" in graph) {
|
||||
return !!graph.has_human_in_the_loop;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isLibraryAgent(graph)) {
|
||||
return graph.settings?.human_in_the_loop_safe_mode !== null;
|
||||
function hasSensitiveActionBlocks(
|
||||
graph: GraphModel | LibraryAgent | Graph,
|
||||
): boolean {
|
||||
if ("has_sensitive_action" in graph) {
|
||||
return !!graph.has_sensitive_action;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -40,7 +44,9 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
||||
|
||||
const graphId = getGraphId(graph);
|
||||
const isAgent = isLibraryAgent(graph);
|
||||
const shouldShowToggle = hasHITLBlocks(graph);
|
||||
const showHITLToggle = hasHITLBlocks(graph);
|
||||
const showSensitiveActionToggle = hasSensitiveActionBlocks(graph);
|
||||
const shouldShowToggle = showHITLToggle || showSensitiveActionToggle;
|
||||
|
||||
const { mutateAsync: updateGraphSettings, isPending } =
|
||||
usePatchV1UpdateGraphSettings();
|
||||
@@ -56,27 +62,37 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
||||
},
|
||||
);
|
||||
|
||||
const [localSafeMode, setLocalSafeMode] = useState<boolean | null>(null);
|
||||
const [localHITLSafeMode, setLocalHITLSafeMode] = useState<boolean>(true);
|
||||
const [localSensitiveActionSafeMode, setLocalSensitiveActionSafeMode] =
|
||||
useState<boolean>(false);
|
||||
const [isLocalStateLoaded, setIsLocalStateLoaded] = useState<boolean>(false);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isAgent && libraryAgent) {
|
||||
const backendValue = libraryAgent.settings?.human_in_the_loop_safe_mode;
|
||||
if (backendValue !== undefined) {
|
||||
setLocalSafeMode(backendValue);
|
||||
}
|
||||
setLocalHITLSafeMode(
|
||||
libraryAgent.settings?.human_in_the_loop_safe_mode ?? true,
|
||||
);
|
||||
setLocalSensitiveActionSafeMode(
|
||||
libraryAgent.settings?.sensitive_action_safe_mode ?? false,
|
||||
);
|
||||
setIsLocalStateLoaded(true);
|
||||
}
|
||||
}, [isAgent, libraryAgent]);
|
||||
|
||||
const currentSafeMode = isAgent
|
||||
? graph.settings?.human_in_the_loop_safe_mode
|
||||
: localSafeMode;
|
||||
const currentHITLSafeMode = isAgent
|
||||
? (graph.settings?.human_in_the_loop_safe_mode ?? true)
|
||||
: localHITLSafeMode;
|
||||
|
||||
const isStateUndetermined = isAgent
|
||||
? graph.settings?.human_in_the_loop_safe_mode == null
|
||||
: isLoading || localSafeMode === null;
|
||||
const currentSensitiveActionSafeMode = isAgent
|
||||
? (graph.settings?.sensitive_action_safe_mode ?? false)
|
||||
: localSensitiveActionSafeMode;
|
||||
|
||||
const handleToggle = useCallback(async () => {
|
||||
const newSafeMode = !currentSafeMode;
|
||||
const isHITLStateUndetermined = isAgent
|
||||
? false
|
||||
: isLoading || !isLocalStateLoaded;
|
||||
|
||||
const handleHITLToggle = useCallback(async () => {
|
||||
const newSafeMode = !currentHITLSafeMode;
|
||||
|
||||
try {
|
||||
await updateGraphSettings({
|
||||
@@ -85,7 +101,7 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
||||
});
|
||||
|
||||
if (!isAgent) {
|
||||
setLocalSafeMode(newSafeMode);
|
||||
setLocalHITLSafeMode(newSafeMode);
|
||||
}
|
||||
|
||||
if (isAgent) {
|
||||
@@ -101,37 +117,62 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
||||
queryClient.invalidateQueries({ queryKey: ["v2", "executions"] });
|
||||
|
||||
toast({
|
||||
title: `Safe mode ${newSafeMode ? "enabled" : "disabled"}`,
|
||||
title: `HITL safe mode ${newSafeMode ? "enabled" : "disabled"}`,
|
||||
description: newSafeMode
|
||||
? "Human-in-the-loop blocks will require manual review"
|
||||
: "Human-in-the-loop blocks will proceed automatically",
|
||||
duration: 2000,
|
||||
});
|
||||
} catch (error) {
|
||||
const isNotFoundError =
|
||||
error instanceof Error &&
|
||||
(error.message.includes("404") || error.message.includes("not found"));
|
||||
|
||||
if (!isAgent && isNotFoundError) {
|
||||
toast({
|
||||
title: "Safe mode not available",
|
||||
description:
|
||||
"To configure safe mode, please save this graph to your library first.",
|
||||
variant: "destructive",
|
||||
});
|
||||
} else {
|
||||
toast({
|
||||
title: "Failed to update safe mode",
|
||||
description:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
handleToggleError(error, isAgent, toast);
|
||||
}
|
||||
}, [
|
||||
currentSafeMode,
|
||||
currentHITLSafeMode,
|
||||
graphId,
|
||||
isAgent,
|
||||
graph.id,
|
||||
updateGraphSettings,
|
||||
queryClient,
|
||||
toast,
|
||||
]);
|
||||
|
||||
const handleSensitiveActionToggle = useCallback(async () => {
|
||||
const newSafeMode = !currentSensitiveActionSafeMode;
|
||||
|
||||
try {
|
||||
await updateGraphSettings({
|
||||
graphId,
|
||||
data: { sensitive_action_safe_mode: newSafeMode },
|
||||
});
|
||||
|
||||
if (!isAgent) {
|
||||
setLocalSensitiveActionSafeMode(newSafeMode);
|
||||
}
|
||||
|
||||
if (isAgent) {
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2GetLibraryAgentQueryOptions(graph.id.toString())
|
||||
.queryKey,
|
||||
});
|
||||
}
|
||||
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: ["v1", "graphs", graphId, "executions"],
|
||||
});
|
||||
queryClient.invalidateQueries({ queryKey: ["v2", "executions"] });
|
||||
|
||||
toast({
|
||||
title: `Sensitive action safe mode ${newSafeMode ? "enabled" : "disabled"}`,
|
||||
description: newSafeMode
|
||||
? "Sensitive action blocks will require manual review"
|
||||
: "Sensitive action blocks will proceed automatically",
|
||||
duration: 2000,
|
||||
});
|
||||
} catch (error) {
|
||||
handleToggleError(error, isAgent, toast);
|
||||
}
|
||||
}, [
|
||||
currentSensitiveActionSafeMode,
|
||||
graphId,
|
||||
isAgent,
|
||||
graph.id,
|
||||
@@ -141,11 +182,53 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
||||
]);
|
||||
|
||||
return {
|
||||
currentSafeMode,
|
||||
// HITL safe mode
|
||||
currentHITLSafeMode,
|
||||
showHITLToggle,
|
||||
isHITLStateUndetermined,
|
||||
handleHITLToggle,
|
||||
|
||||
// Sensitive action safe mode
|
||||
currentSensitiveActionSafeMode,
|
||||
showSensitiveActionToggle,
|
||||
handleSensitiveActionToggle,
|
||||
|
||||
// General
|
||||
isPending,
|
||||
shouldShowToggle,
|
||||
isStateUndetermined,
|
||||
handleToggle,
|
||||
hasHITLBlocks: shouldShowToggle,
|
||||
|
||||
// Backwards compatibility
|
||||
currentSafeMode: currentHITLSafeMode,
|
||||
isStateUndetermined: isHITLStateUndetermined,
|
||||
handleToggle: handleHITLToggle,
|
||||
hasHITLBlocks: showHITLToggle,
|
||||
};
|
||||
}
|
||||
|
||||
function handleToggleError(
|
||||
error: unknown,
|
||||
isAgent: boolean,
|
||||
toast: ReturnType<typeof useToast>["toast"],
|
||||
) {
|
||||
const isNotFoundError =
|
||||
error instanceof Error &&
|
||||
(error.message.includes("404") || error.message.includes("not found"));
|
||||
|
||||
if (!isAgent && isNotFoundError) {
|
||||
toast({
|
||||
title: "Safe mode not available",
|
||||
description:
|
||||
"To configure safe mode, please save this graph to your library first.",
|
||||
variant: "destructive",
|
||||
});
|
||||
} else {
|
||||
toast({
|
||||
title: "Failed to update safe mode",
|
||||
description:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -327,8 +327,8 @@ export type GraphExecutionMeta = {
|
||||
| "FAILED"
|
||||
| "INCOMPLETE"
|
||||
| "REVIEW";
|
||||
started_at: Date | null;
|
||||
ended_at: Date | null;
|
||||
started_at: Date;
|
||||
ended_at: Date;
|
||||
stats: {
|
||||
error: string | null;
|
||||
cost: number;
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
"""Video editing blocks for AutoGPT Platform.
|
||||
|
||||
This module provides blocks for:
|
||||
- Downloading videos from URLs (YouTube, Vimeo, news sites, direct links)
|
||||
- Clipping/trimming video segments
|
||||
- Concatenating multiple videos
|
||||
- Adding text overlays
|
||||
- Adding AI-generated narration
|
||||
|
||||
Dependencies:
|
||||
- yt-dlp: For video downloading
|
||||
- moviepy: For video editing operations
|
||||
- requests: For API calls (narration block)
|
||||
"""
|
||||
|
||||
from .download import VideoDownloadBlock
|
||||
from .clip import VideoClipBlock
|
||||
from .concat import VideoConcatBlock
|
||||
from .text_overlay import VideoTextOverlayBlock
|
||||
from .narration import VideoNarrationBlock
|
||||
|
||||
__all__ = [
|
||||
"VideoClipBlock",
|
||||
"VideoConcatBlock",
|
||||
"VideoDownloadBlock",
|
||||
"VideoNarrationBlock",
|
||||
"VideoTextOverlayBlock",
|
||||
]
|
||||
@@ -1,93 +0,0 @@
|
||||
"""
|
||||
VideoClipBlock - Extract a segment from a video file
|
||||
"""
|
||||
import uuid
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
|
||||
|
||||
class VideoClipBlock(Block):
|
||||
"""Extract a time segment from a video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: str = SchemaField(
|
||||
description="Input video (URL, data URI, or file path)",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
start_time: float = SchemaField(
|
||||
description="Start time in seconds",
|
||||
ge=0.0
|
||||
)
|
||||
end_time: float = SchemaField(
|
||||
description="End time in seconds",
|
||||
ge=0.0
|
||||
)
|
||||
output_format: str = SchemaField(
|
||||
description="Output format",
|
||||
default="mp4",
|
||||
advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Clipped video file",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
duration: float = SchemaField(description="Clip duration in seconds")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b2c3d4e5-f6a7-8901-bcde-f23456789012",
|
||||
description="Extract a time segment from a video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={"video_in": "/tmp/test.mp4", "start_time": 0.0, "end_time": 10.0},
|
||||
test_output=[("video_out", str), ("duration", float)],
|
||||
test_mock={"_clip_video": lambda *args: ("/tmp/clip.mp4", 10.0)}
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Validate time range
|
||||
if input_data.end_time <= input_data.start_time:
|
||||
raise BlockExecutionError(
|
||||
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
)
|
||||
|
||||
try:
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
except ImportError as e:
|
||||
raise BlockExecutionError(
|
||||
message="moviepy is not installed. Please install it with: pip install moviepy",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
|
||||
clip = None
|
||||
subclip = None
|
||||
try:
|
||||
clip = VideoFileClip(input_data.video_in)
|
||||
subclip = clip.subclip(input_data.start_time, input_data.end_time)
|
||||
|
||||
output_path = f"/tmp/clip_{uuid.uuid4()}.{input_data.output_format}"
|
||||
subclip.write_videofile(output_path, logger=None)
|
||||
|
||||
yield "video_out", output_path
|
||||
yield "duration", subclip.duration
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to clip video: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
finally:
|
||||
if subclip:
|
||||
subclip.close()
|
||||
if clip:
|
||||
clip.close()
|
||||
@@ -1,123 +0,0 @@
|
||||
"""
|
||||
VideoConcatBlock - Concatenate multiple video clips into one
|
||||
"""
|
||||
import uuid
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
|
||||
|
||||
class VideoConcatBlock(Block):
|
||||
"""Merge multiple video clips into one continuous video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
videos: list[str] = SchemaField(
|
||||
description="List of video files to concatenate (in order)"
|
||||
)
|
||||
transition: str = SchemaField(
|
||||
description="Transition between clips",
|
||||
default="none",
|
||||
enum=["none", "crossfade", "fade_black"]
|
||||
)
|
||||
transition_duration: float = SchemaField(
|
||||
description="Transition duration in seconds",
|
||||
default=0.5,
|
||||
advanced=True
|
||||
)
|
||||
output_format: str = SchemaField(
|
||||
description="Output format",
|
||||
default="mp4",
|
||||
advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Concatenated video file",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
total_duration: float = SchemaField(description="Total duration in seconds")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c3d4e5f6-a7b8-9012-cdef-345678901234",
|
||||
description="Merge multiple video clips into one continuous video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={"videos": ["/tmp/a.mp4", "/tmp/b.mp4"]},
|
||||
test_output=[("video_out", str), ("total_duration", float)],
|
||||
test_mock={"_concat_videos": lambda *args: ("/tmp/concat.mp4", 20.0)}
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
from moviepy.editor import VideoFileClip, concatenate_videoclips
|
||||
except ImportError as e:
|
||||
raise BlockExecutionError(
|
||||
message="moviepy is not installed. Please install it with: pip install moviepy",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
|
||||
# Validate minimum clips
|
||||
if len(input_data.videos) < 2:
|
||||
raise BlockExecutionError(
|
||||
message="At least 2 videos are required for concatenation",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
)
|
||||
|
||||
clips = []
|
||||
faded_clips = []
|
||||
final = None
|
||||
try:
|
||||
# Load clips one by one to handle partial failures
|
||||
for v in input_data.videos:
|
||||
clips.append(VideoFileClip(v))
|
||||
|
||||
if input_data.transition == "crossfade":
|
||||
# Apply crossfade between clips using crossfadein/crossfadeout
|
||||
transition_dur = input_data.transition_duration
|
||||
for i, clip in enumerate(clips):
|
||||
if i > 0:
|
||||
clip = clip.crossfadein(transition_dur)
|
||||
if i < len(clips) - 1:
|
||||
clip = clip.crossfadeout(transition_dur)
|
||||
faded_clips.append(clip)
|
||||
final = concatenate_videoclips(
|
||||
faded_clips,
|
||||
method="compose",
|
||||
padding=-transition_dur
|
||||
)
|
||||
elif input_data.transition == "fade_black":
|
||||
# Fade to black between clips
|
||||
for clip in clips:
|
||||
faded = clip.fadein(input_data.transition_duration).fadeout(
|
||||
input_data.transition_duration
|
||||
)
|
||||
faded_clips.append(faded)
|
||||
final = concatenate_videoclips(faded_clips)
|
||||
else:
|
||||
final = concatenate_videoclips(clips)
|
||||
|
||||
output_path = f"/tmp/concat_{uuid.uuid4()}.{input_data.output_format}"
|
||||
final.write_videofile(output_path, logger=None)
|
||||
|
||||
yield "video_out", output_path
|
||||
yield "total_duration", final.duration
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to concatenate videos: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
finally:
|
||||
if final:
|
||||
final.close()
|
||||
for clip in faded_clips:
|
||||
clip.close()
|
||||
for clip in clips:
|
||||
clip.close()
|
||||
@@ -1,102 +0,0 @@
|
||||
"""
|
||||
VideoDownloadBlock - Download video from URL (YouTube, Vimeo, news sites, direct links)
|
||||
"""
|
||||
import uuid
|
||||
from typing import Literal
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
|
||||
|
||||
class VideoDownloadBlock(Block):
|
||||
"""Download video from URL using yt-dlp."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
url: str = SchemaField(
|
||||
description="URL of the video to download (YouTube, Vimeo, direct link, etc.)",
|
||||
placeholder="https://www.youtube.com/watch?v=..."
|
||||
)
|
||||
quality: Literal["best", "1080p", "720p", "480p", "audio_only"] = SchemaField(
|
||||
description="Video quality preference",
|
||||
default="720p"
|
||||
)
|
||||
output_format: Literal["mp4", "webm", "mkv"] = SchemaField(
|
||||
description="Output video format",
|
||||
default="mp4",
|
||||
advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_file: str = SchemaField(
|
||||
description="Path or data URI of downloaded video",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
duration: float = SchemaField(description="Video duration in seconds")
|
||||
title: str = SchemaField(description="Video title from source")
|
||||
source_url: str = SchemaField(description="Original source URL")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
description="Download video from URL (YouTube, Vimeo, news sites, direct links)",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={"url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ", "quality": "480p"},
|
||||
test_output=[("video_file", str), ("duration", float), ("title", str), ("source_url", str)],
|
||||
test_mock={"_download_video": lambda *args: ("/tmp/video.mp4", 212.0, "Test Video")}
|
||||
)
|
||||
|
||||
def _get_format_string(self, quality: str) -> str:
|
||||
formats = {
|
||||
"best": "bestvideo+bestaudio/best",
|
||||
"1080p": "bestvideo[height<=1080]+bestaudio/best[height<=1080]",
|
||||
"720p": "bestvideo[height<=720]+bestaudio/best[height<=720]",
|
||||
"480p": "bestvideo[height<=480]+bestaudio/best[height<=480]",
|
||||
"audio_only": "bestaudio/best"
|
||||
}
|
||||
return formats.get(quality, formats["720p"])
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
import yt_dlp
|
||||
except ImportError as e:
|
||||
raise BlockExecutionError(
|
||||
message="yt-dlp is not installed. Please install it with: pip install yt-dlp",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
|
||||
video_id = str(uuid.uuid4())[:8]
|
||||
output_template = f"/tmp/{video_id}.%(ext)s"
|
||||
|
||||
ydl_opts = {
|
||||
"format": self._get_format_string(input_data.quality),
|
||||
"outtmpl": output_template,
|
||||
"merge_output_format": input_data.output_format,
|
||||
"quiet": True,
|
||||
"no_warnings": True,
|
||||
}
|
||||
|
||||
try:
|
||||
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
||||
info = ydl.extract_info(input_data.url, download=True)
|
||||
video_path = ydl.prepare_filename(info)
|
||||
|
||||
# Handle format conversion in filename
|
||||
if not video_path.endswith(f".{input_data.output_format}"):
|
||||
video_path = video_path.rsplit(".", 1)[0] + f".{input_data.output_format}"
|
||||
|
||||
yield "video_file", video_path
|
||||
yield "duration", info.get("duration") or 0.0
|
||||
yield "title", info.get("title") or "Unknown"
|
||||
yield "source_url", input_data.url
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to download video: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
@@ -1,167 +0,0 @@
|
||||
"""
|
||||
VideoNarrationBlock - Generate AI voice narration and add to video
|
||||
"""
|
||||
import uuid
|
||||
from typing import Literal
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField, CredentialsMetaInput, APIKeyCredentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
|
||||
|
||||
class VideoNarrationBlock(Block):
|
||||
"""Generate AI narration and add to video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.ELEVENLABS], Literal["api_key"]
|
||||
] = SchemaField(
|
||||
description="ElevenLabs API key for voice synthesis"
|
||||
)
|
||||
video_in: str = SchemaField(
|
||||
description="Input video file",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
script: str = SchemaField(
|
||||
description="Narration script text"
|
||||
)
|
||||
voice_id: str = SchemaField(
|
||||
description="ElevenLabs voice ID",
|
||||
default="21m00Tcm4TlvDq8ikWAM" # Rachel
|
||||
)
|
||||
mix_mode: Literal["replace", "mix", "ducking"] = SchemaField(
|
||||
description="How to combine with original audio",
|
||||
default="ducking"
|
||||
)
|
||||
narration_volume: float = SchemaField(
|
||||
description="Narration volume (0.0 to 2.0)",
|
||||
default=1.0,
|
||||
ge=0.0,
|
||||
le=2.0,
|
||||
advanced=True
|
||||
)
|
||||
original_volume: float = SchemaField(
|
||||
description="Original audio volume when mixing (0.0 to 1.0)",
|
||||
default=0.3,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Video with narration",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
audio_file: str = SchemaField(
|
||||
description="Generated audio file",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="e5f6a7b8-c9d0-1234-ef56-789012345678",
|
||||
description="Generate AI narration and add to video",
|
||||
categories={BlockCategory.MULTIMEDIA, BlockCategory.AI},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"video_in": "/tmp/test.mp4",
|
||||
"script": "Hello world",
|
||||
"credentials": {"provider": "elevenlabs", "id": "test", "type": "api_key"}
|
||||
},
|
||||
test_output=[("video_out", str), ("audio_file", str)],
|
||||
test_mock={"_generate_narration": lambda *args: ("/tmp/narrated.mp4", "/tmp/audio.mp3")}
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
**kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
import requests
|
||||
from moviepy.editor import VideoFileClip, AudioFileClip, CompositeAudioClip
|
||||
except ImportError as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Missing dependency: {e}. Install moviepy and requests.",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
|
||||
video = None
|
||||
final = None
|
||||
narration = None
|
||||
try:
|
||||
# Generate narration via ElevenLabs
|
||||
response = requests.post(
|
||||
f"https://api.elevenlabs.io/v1/text-to-speech/{input_data.voice_id}",
|
||||
headers={
|
||||
"xi-api-key": credentials.api_key.get_secret_value(),
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
json={
|
||||
"text": input_data.script,
|
||||
"model_id": "eleven_monolingual_v1"
|
||||
},
|
||||
timeout=120
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
audio_path = f"/tmp/narration_{uuid.uuid4()}.mp3"
|
||||
with open(audio_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
# Combine with video
|
||||
video = VideoFileClip(input_data.video_in)
|
||||
narration = AudioFileClip(audio_path)
|
||||
narration = narration.volumex(input_data.narration_volume)
|
||||
|
||||
if input_data.mix_mode == "replace":
|
||||
final_audio = narration
|
||||
elif input_data.mix_mode == "mix":
|
||||
if video.audio:
|
||||
original = video.audio.volumex(input_data.original_volume)
|
||||
final_audio = CompositeAudioClip([original, narration])
|
||||
else:
|
||||
final_audio = narration
|
||||
else: # ducking - lower original volume more when narration plays
|
||||
if video.audio:
|
||||
# Apply stronger attenuation for ducking effect
|
||||
ducking_volume = input_data.original_volume * 0.3
|
||||
original = video.audio.volumex(ducking_volume)
|
||||
final_audio = CompositeAudioClip([original, narration])
|
||||
else:
|
||||
final_audio = narration
|
||||
|
||||
final = video.set_audio(final_audio)
|
||||
|
||||
output_path = f"/tmp/narrated_{uuid.uuid4()}.mp4"
|
||||
final.write_videofile(output_path, logger=None)
|
||||
|
||||
yield "video_out", output_path
|
||||
yield "audio_file", audio_path
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"ElevenLabs API error: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to add narration: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
finally:
|
||||
if narration:
|
||||
narration.close()
|
||||
if final:
|
||||
final.close()
|
||||
if video:
|
||||
video.close()
|
||||
@@ -1,149 +0,0 @@
|
||||
"""
|
||||
VideoTextOverlayBlock - Add text overlay to video
|
||||
"""
|
||||
import uuid
|
||||
from typing import Literal
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
|
||||
|
||||
class VideoTextOverlayBlock(Block):
|
||||
"""Add text overlay/caption to video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: str = SchemaField(
|
||||
description="Input video file",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
text: str = SchemaField(
|
||||
description="Text to overlay on video"
|
||||
)
|
||||
position: Literal[
|
||||
"top", "center", "bottom",
|
||||
"top-left", "top-right",
|
||||
"bottom-left", "bottom-right"
|
||||
] = SchemaField(
|
||||
description="Position of text on screen",
|
||||
default="bottom"
|
||||
)
|
||||
start_time: float | None = SchemaField(
|
||||
description="When to show text (seconds). None = entire video",
|
||||
default=None,
|
||||
advanced=True
|
||||
)
|
||||
end_time: float | None = SchemaField(
|
||||
description="When to hide text (seconds). None = until end",
|
||||
default=None,
|
||||
advanced=True
|
||||
)
|
||||
font_size: int = SchemaField(
|
||||
description="Font size",
|
||||
default=48,
|
||||
ge=12,
|
||||
le=200,
|
||||
advanced=True
|
||||
)
|
||||
font_color: str = SchemaField(
|
||||
description="Font color (hex or name)",
|
||||
default="white",
|
||||
advanced=True
|
||||
)
|
||||
bg_color: str | None = SchemaField(
|
||||
description="Background color behind text (None for transparent)",
|
||||
default=None,
|
||||
advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Video with text overlay",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d4e5f6a7-b8c9-0123-def4-567890123456",
|
||||
description="Add text overlay/caption to video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={"video_in": "/tmp/test.mp4", "text": "Hello World"},
|
||||
test_output=[("video_out", str)],
|
||||
test_mock={"_add_text": lambda *args: "/tmp/overlay.mp4"}
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip
|
||||
except ImportError as e:
|
||||
raise BlockExecutionError(
|
||||
message="moviepy is not installed. Please install it with: pip install moviepy",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
|
||||
# Validate time range if both are provided
|
||||
if (input_data.start_time is not None and
|
||||
input_data.end_time is not None and
|
||||
input_data.end_time <= input_data.start_time):
|
||||
raise BlockExecutionError(
|
||||
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
)
|
||||
|
||||
video = None
|
||||
final = None
|
||||
txt_clip = None
|
||||
try:
|
||||
video = VideoFileClip(input_data.video_in)
|
||||
|
||||
txt_clip = TextClip(
|
||||
input_data.text,
|
||||
fontsize=input_data.font_size,
|
||||
color=input_data.font_color,
|
||||
bg_color=input_data.bg_color,
|
||||
)
|
||||
|
||||
# Position mapping
|
||||
pos_map = {
|
||||
"top": ("center", "top"),
|
||||
"center": ("center", "center"),
|
||||
"bottom": ("center", "bottom"),
|
||||
"top-left": ("left", "top"),
|
||||
"top-right": ("right", "top"),
|
||||
"bottom-left": ("left", "bottom"),
|
||||
"bottom-right": ("right", "bottom"),
|
||||
}
|
||||
|
||||
txt_clip = txt_clip.set_position(pos_map[input_data.position])
|
||||
|
||||
# Set timing
|
||||
start = input_data.start_time or 0
|
||||
end = input_data.end_time or video.duration
|
||||
duration = max(0, end - start)
|
||||
txt_clip = txt_clip.set_start(start).set_end(end).set_duration(duration)
|
||||
|
||||
final = CompositeVideoClip([video, txt_clip])
|
||||
|
||||
output_path = f"/tmp/overlay_{uuid.uuid4()}.mp4"
|
||||
final.write_videofile(output_path, logger=None)
|
||||
|
||||
yield "video_out", output_path
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to add text overlay: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
finally:
|
||||
if txt_clip:
|
||||
txt_clip.close()
|
||||
if final:
|
||||
final.close()
|
||||
if video:
|
||||
video.close()
|
||||
Reference in New Issue
Block a user