Compare commits

..

2 Commits

Author SHA1 Message Date
claude[bot]
2a3e428d9e fix(backend): sort imports alphabetically in run_agent.py
Co-authored-by: Nicholas Tindle <ntindle@users.noreply.github.com>
2026-01-19 07:18:56 +00:00
Nicholas Tindle
7d80f4f0e0 fix(platform): make chat credentials type selection deterministic
Previously, when a block/agent supported multiple credential types (e.g., both
api_key and oauth2), the backend would pick one type using:

    cred_type = next(iter(field_info.supported_types), "api_key")

Since `supported_types` is a frozenset, iteration order is non-deterministic
due to Python's hash randomization. This caused the UI to randomly show either
"Add API key" or "Connect account (OAuth)" between requests or server restarts.

This fix:
- Adds utility functions that serialize ALL supported credential types using
  sorted() for deterministic ordering
- Returns both `type` (first sorted type, for backwards compat) and `types`
  (full array) from the backend
- Updates frontend to read the `types` array and pass all supported types to
  the CredentialsInput component

Now useCredentials.ts correctly sets both supportsApiKey=true AND
supportsOAuth2=true when both are supported, ensuring saved credentials of
all supported types are shown in the selection list.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-18 15:31:46 -06:00
33 changed files with 208 additions and 968 deletions

View File

@@ -28,7 +28,6 @@ from backend.executor.manager import get_db_async_client
from backend.util.settings import Settings
logger = logging.getLogger(__name__)
settings = Settings()
class ExecutionAnalyticsRequest(BaseModel):
@@ -64,8 +63,6 @@ class ExecutionAnalyticsResult(BaseModel):
score: Optional[float]
status: str # "success", "failed", "skipped"
error_message: Optional[str] = None
started_at: Optional[datetime] = None
ended_at: Optional[datetime] = None
class ExecutionAnalyticsResponse(BaseModel):
@@ -227,6 +224,11 @@ async def generate_execution_analytics(
)
try:
# Validate model configuration
settings = Settings()
if not settings.secrets.openai_internal_api_key:
raise HTTPException(status_code=500, detail="OpenAI API key not configured")
# Get database client
db_client = get_db_async_client()
@@ -318,8 +320,6 @@ async def generate_execution_analytics(
),
status="skipped",
error_message=None, # Not an error - just already processed
started_at=execution.started_at,
ended_at=execution.ended_at,
)
)
@@ -349,9 +349,6 @@ async def _process_batch(
) -> list[ExecutionAnalyticsResult]:
"""Process a batch of executions concurrently."""
if not settings.secrets.openai_internal_api_key:
raise HTTPException(status_code=500, detail="OpenAI API key not configured")
async def process_single_execution(execution) -> ExecutionAnalyticsResult:
try:
# Generate activity status and score using the specified model
@@ -390,8 +387,6 @@ async def _process_batch(
score=None,
status="skipped",
error_message="Activity generation returned None",
started_at=execution.started_at,
ended_at=execution.ended_at,
)
# Update the execution stats
@@ -421,8 +416,6 @@ async def _process_batch(
summary_text=activity_response["activity_status"],
score=activity_response["correctness_score"],
status="success",
started_at=execution.started_at,
ended_at=execution.ended_at,
)
except Exception as e:
@@ -436,8 +429,6 @@ async def _process_batch(
score=None,
status="failed",
error_message=str(e),
started_at=execution.started_at,
ended_at=execution.ended_at,
)
# Process all executions in the batch concurrently

View File

@@ -32,7 +32,7 @@ from .models import (
UserReadiness,
)
from .utils import (
check_user_has_required_credentials,
build_missing_credentials_from_graph,
extract_credentials_from_schema,
fetch_graph_from_store_slug,
get_or_create_library_agent,
@@ -235,15 +235,13 @@ class RunAgentTool(BaseTool):
# Return credentials needed response with input data info
# The UI handles credential setup automatically, so the message
# focuses on asking about input data
credentials = extract_credentials_from_schema(
graph.credentials_input_schema
requirements_creds_dict = build_missing_credentials_from_graph(
graph, None
)
missing_creds_check = await check_user_has_required_credentials(
user_id, credentials
missing_credentials_dict = build_missing_credentials_from_graph(
graph, graph_credentials
)
missing_credentials_dict = {
c.id: c.model_dump() for c in missing_creds_check
}
requirements_creds_list = list(requirements_creds_dict.values())
return SetupRequirementsResponse(
message=self._build_inputs_message(graph, MSG_WHAT_VALUES_TO_USE),
@@ -257,7 +255,7 @@ class RunAgentTool(BaseTool):
ready_to_run=False,
),
requirements={
"credentials": [c.model_dump() for c in credentials],
"credentials": requirements_creds_list,
"inputs": self._get_inputs_list(graph.input_schema),
"execution_modes": self._get_execution_modes(graph),
},

View File

@@ -20,6 +20,7 @@ from .models import (
ToolResponseBase,
UserReadiness,
)
from .utils import build_missing_credentials_from_field_info
logger = logging.getLogger(__name__)
@@ -186,7 +187,11 @@ class RunBlockTool(BaseTool):
if missing_credentials:
# Return setup requirements response with missing credentials
missing_creds_dict = {c.id: c.model_dump() for c in missing_credentials}
credentials_fields_info = block.input_schema.get_credentials_fields_info()
missing_creds_dict = build_missing_credentials_from_field_info(
credentials_fields_info, set(matched_credentials.keys())
)
missing_creds_list = list(missing_creds_dict.values())
return SetupRequirementsResponse(
message=(
@@ -203,7 +208,7 @@ class RunBlockTool(BaseTool):
ready_to_run=False,
),
requirements={
"credentials": [c.model_dump() for c in missing_credentials],
"credentials": missing_creds_list,
"inputs": self._get_inputs_list(block),
"execution_modes": ["immediate"],
},

View File

@@ -8,7 +8,7 @@ from backend.api.features.library import model as library_model
from backend.api.features.store import db as store_db
from backend.data import graph as graph_db
from backend.data.graph import GraphModel
from backend.data.model import CredentialsMetaInput
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.util.exceptions import NotFoundError
@@ -89,6 +89,59 @@ def extract_credentials_from_schema(
return credentials
def _serialize_missing_credential(
field_key: str, field_info: CredentialsFieldInfo
) -> dict[str, Any]:
"""
Convert credential field info into a serializable dict that preserves all supported
credential types (e.g., api_key + oauth2) so the UI can offer multiple options.
"""
supported_types = sorted(field_info.supported_types)
provider = next(iter(field_info.provider), "unknown")
scopes = sorted(field_info.required_scopes or [])
return {
"id": field_key,
"title": field_key.replace("_", " ").title(),
"provider": provider,
"provider_name": provider.replace("_", " ").title(),
"type": supported_types[0] if supported_types else "api_key",
"types": supported_types,
"scopes": scopes,
}
def build_missing_credentials_from_graph(
graph: GraphModel, matched_credentials: dict[str, CredentialsMetaInput] | None
) -> dict[str, Any]:
"""
Build a missing_credentials mapping from a graph's aggregated credentials inputs,
preserving all supported credential types for each field.
"""
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
aggregated_fields = graph.aggregate_credentials_inputs()
return {
field_key: _serialize_missing_credential(field_key, field_info)
for field_key, (field_info, _node_fields) in aggregated_fields.items()
if field_key not in matched_keys
}
def build_missing_credentials_from_field_info(
credential_fields: dict[str, CredentialsFieldInfo],
matched_keys: set[str],
) -> dict[str, Any]:
"""
Build missing_credentials mapping from a simple credentials field info dictionary.
"""
return {
field_key: _serialize_missing_credential(field_key, field_info)
for field_key, field_info in credential_fields.items()
if field_key not in matched_keys
}
def extract_credentials_as_dict(
credentials_input_schema: dict[str, Any] | None,
) -> dict[str, CredentialsMetaInput]:

View File

@@ -104,7 +104,7 @@ async def get_accuracy_trends_and_alerts(
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
{user_filter}
GROUP BY DATE(e."createdAt")
HAVING COUNT(*) >= 1 -- Include all days with at least 1 execution
HAVING COUNT(*) >= 3 -- Need at least 3 executions per day
),
trends AS (
SELECT

View File

@@ -153,14 +153,8 @@ class GraphExecutionMeta(BaseDbModel):
nodes_input_masks: Optional[dict[str, BlockInput]]
preset_id: Optional[str]
status: ExecutionStatus
started_at: Optional[datetime] = Field(
None,
description="When execution started running. Null if not yet started (QUEUED).",
)
ended_at: Optional[datetime] = Field(
None,
description="When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW).",
)
started_at: datetime
ended_at: datetime
is_shared: bool = False
share_token: Optional[str] = None
@@ -235,8 +229,10 @@ class GraphExecutionMeta(BaseDbModel):
@staticmethod
def from_db(_graph_exec: AgentGraphExecution):
start_time = _graph_exec.startedAt
end_time = _graph_exec.endedAt
now = datetime.now(timezone.utc)
# TODO: make started_at and ended_at optional
start_time = _graph_exec.startedAt or _graph_exec.createdAt
end_time = _graph_exec.updatedAt or now
try:
stats = GraphExecutionStats.model_validate(_graph_exec.stats)
@@ -906,14 +902,6 @@ async def update_graph_execution_stats(
if status:
update_data["executionStatus"] = status
# Set endedAt when execution reaches a terminal status
terminal_statuses = [
ExecutionStatus.COMPLETED,
ExecutionStatus.FAILED,
ExecutionStatus.TERMINATED,
]
if status in terminal_statuses:
update_data["endedAt"] = datetime.now(tz=timezone.utc)
where_clause: AgentGraphExecutionWhereInput = {"id": graph_exec_id}

View File

@@ -96,9 +96,9 @@ jina_credentials = APIKeyCredentials(
)
unreal_credentials = APIKeyCredentials(
id="66f20754-1b81-48e4-91d0-f4f0dd82145f",
provider="unreal_speech",
provider="unreal",
api_key=SecretStr(settings.secrets.unreal_speech_api_key),
title="Use Credits for Unreal Speech",
title="Use Credits for Unreal",
expires_at=None,
)
open_router_credentials = APIKeyCredentials(
@@ -216,14 +216,6 @@ webshare_proxy_credentials = UserPasswordCredentials(
title="Use Credits for Webshare Proxy",
)
openweathermap_credentials = APIKeyCredentials(
id="8b3d4e5f-6a7b-8c9d-0e1f-2a3b4c5d6e7f",
provider="openweathermap",
api_key=SecretStr(settings.secrets.openweathermap_api_key),
title="Use Credits for OpenWeatherMap",
expires_at=None,
)
DEFAULT_CREDENTIALS = [
ollama_credentials,
revid_credentials,
@@ -251,7 +243,6 @@ DEFAULT_CREDENTIALS = [
llama_api_credentials,
v0_credentials,
webshare_proxy_credentials,
openweathermap_credentials,
]
SYSTEM_CREDENTIAL_IDS = {cred.id for cred in DEFAULT_CREDENTIALS}
@@ -355,17 +346,11 @@ class IntegrationCredentialsStore:
all_credentials.append(zerobounce_credentials)
if settings.secrets.google_maps_api_key:
all_credentials.append(google_maps_credentials)
if settings.secrets.llama_api_key:
all_credentials.append(llama_api_credentials)
if settings.secrets.v0_api_key:
all_credentials.append(v0_credentials)
if (
settings.secrets.webshare_proxy_username
and settings.secrets.webshare_proxy_password
):
all_credentials.append(webshare_proxy_credentials)
if settings.secrets.openweathermap_api_key:
all_credentials.append(openweathermap_credentials)
return all_credentials
async def get_creds_by_id(

View File

@@ -60,10 +60,8 @@ class LateExecutionMonitor:
if not all_late_executions:
return "No late executions detected."
# Sort by started time (oldest first), with None values (unstarted) first
all_late_executions.sort(
key=lambda x: x.started_at or datetime.min.replace(tzinfo=timezone.utc)
)
# Sort by created time (oldest first)
all_late_executions.sort(key=lambda x: x.started_at)
num_total_late = len(all_late_executions)
num_queued = len(queued_late_executions)
@@ -76,7 +74,7 @@ class LateExecutionMonitor:
was_truncated = num_total_late > tuncate_size
late_execution_details = [
f"* `Execution ID: {exec.id}, Graph ID: {exec.graph_id}v{exec.graph_version}, User ID: {exec.user_id}, Status: {exec.status}, Started At: {exec.started_at.isoformat() if exec.started_at else 'Not started'}`"
f"* `Execution ID: {exec.id}, Graph ID: {exec.graph_id}v{exec.graph_version}, User ID: {exec.user_id}, Status: {exec.status}, Created At: {exec.started_at.isoformat()}`"
for exec in truncated_executions
]

View File

@@ -1,8 +0,0 @@
-- AlterTable
ALTER TABLE "AgentGraphExecution" ADD COLUMN "endedAt" TIMESTAMP(3);
-- Set endedAt to updatedAt for existing records with terminal status only
UPDATE "AgentGraphExecution"
SET "endedAt" = "updatedAt"
WHERE "endedAt" IS NULL
AND "executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED');

View File

@@ -450,7 +450,6 @@ model AgentGraphExecution {
createdAt DateTime @default(now())
updatedAt DateTime? @updatedAt
startedAt DateTime?
endedAt DateTime?
isDeleted Boolean @default(false)

View File

@@ -51,8 +51,6 @@ export function AnalyticsResultsTable({ results }: Props) {
"Execution ID",
"Status",
"Score",
"Started At",
"Ended At",
"Summary Text",
"Error Message",
];
@@ -64,8 +62,6 @@ export function AnalyticsResultsTable({ results }: Props) {
result.exec_id,
result.status,
result.score?.toString() || "",
result.started_at ? new Date(result.started_at).toLocaleString() : "",
result.ended_at ? new Date(result.ended_at).toLocaleString() : "",
`"${(result.summary_text || "").replace(/"/g, '""')}"`, // Escape quotes in summary
`"${(result.error_message || "").replace(/"/g, '""')}"`, // Escape quotes in error
]);
@@ -252,13 +248,15 @@ export function AnalyticsResultsTable({ results }: Props) {
)}
</td>
<td className="px-4 py-3">
<Button
variant="ghost"
size="small"
onClick={() => toggleRowExpansion(result.exec_id)}
>
<EyeIcon size={16} />
</Button>
{(result.summary_text || result.error_message) && (
<Button
variant="ghost"
size="small"
onClick={() => toggleRowExpansion(result.exec_id)}
>
<EyeIcon size={16} />
</Button>
)}
</td>
</tr>
@@ -266,44 +264,6 @@ export function AnalyticsResultsTable({ results }: Props) {
<tr>
<td colSpan={7} className="bg-gray-50 px-4 py-3">
<div className="space-y-3">
{/* Timestamps section */}
<div className="grid grid-cols-2 gap-4 border-b border-gray-200 pb-3">
<div>
<Text
variant="body"
className="text-xs font-medium text-gray-600"
>
Started At:
</Text>
<Text
variant="body"
className="text-sm text-gray-700"
>
{result.started_at
? new Date(
result.started_at,
).toLocaleString()
: "—"}
</Text>
</div>
<div>
<Text
variant="body"
className="text-xs font-medium text-gray-600"
>
Ended At:
</Text>
<Text
variant="body"
className="text-sm text-gray-700"
>
{result.ended_at
? new Date(result.ended_at).toLocaleString()
: "—"}
</Text>
</div>
</div>
{result.summary_text && (
<div>
<Text

View File

@@ -541,19 +541,7 @@ export function ExecutionAnalyticsForm() {
{/* Accuracy Trends Display */}
{trendsData && (
<div className="space-y-4">
<div className="flex items-start justify-between">
<h3 className="text-lg font-semibold">Execution Accuracy Trends</h3>
<div className="rounded-md bg-blue-50 px-3 py-2 text-xs text-blue-700">
<p className="font-medium">
Chart Filters (matches monitoring system):
</p>
<ul className="mt-1 list-inside list-disc space-y-1">
<li>Only days with 1 execution with correctness score</li>
<li>Last 30 days</li>
<li>Averages calculated from scored executions only</li>
</ul>
</div>
</div>
<h3 className="text-lg font-semibold">Execution Accuracy Trends</h3>
{/* Alert Section */}
{trendsData.alert && (

View File

@@ -267,23 +267,34 @@ export function extractCredentialsNeeded(
| undefined;
if (missingCreds && Object.keys(missingCreds).length > 0) {
const agentName = (setupInfo?.agent_name as string) || "this block";
const credentials = Object.values(missingCreds).map((credInfo) => ({
provider: (credInfo.provider as string) || "unknown",
providerName:
(credInfo.provider_name as string) ||
(credInfo.provider as string) ||
"Unknown Provider",
credentialType:
const credentials = Object.values(missingCreds).map((credInfo) => {
// Normalize to array at boundary - prefer 'types' array, fall back to single 'type'
const typesArray = credInfo.types as
| Array<"api_key" | "oauth2" | "user_password" | "host_scoped">
| undefined;
const singleType =
(credInfo.type as
| "api_key"
| "oauth2"
| "user_password"
| "host_scoped") || "api_key",
title:
(credInfo.title as string) ||
`${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`,
scopes: credInfo.scopes as string[] | undefined,
}));
| "host_scoped"
| undefined) || "api_key";
const credentialTypes =
typesArray && typesArray.length > 0 ? typesArray : [singleType];
return {
provider: (credInfo.provider as string) || "unknown",
providerName:
(credInfo.provider_name as string) ||
(credInfo.provider as string) ||
"Unknown Provider",
credentialTypes,
title:
(credInfo.title as string) ||
`${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`,
scopes: credInfo.scopes as string[] | undefined,
};
});
return {
type: "credentials_needed",
toolName,
@@ -358,11 +369,14 @@ export function extractInputsNeeded(
credentials.forEach((cred) => {
const id = cred.id as string;
if (id) {
const credentialTypes = Array.isArray(cred.types)
? cred.types
: [(cred.type as string) || "api_key"];
credentialsSchema[id] = {
type: "object",
properties: {},
credentials_provider: [cred.provider as string],
credentials_types: [(cred.type as string) || "api_key"],
credentials_types: credentialTypes,
credentials_scopes: cred.scopes as string[] | undefined,
};
}

View File

@@ -9,7 +9,9 @@ import { useChatCredentialsSetup } from "./useChatCredentialsSetup";
export interface CredentialInfo {
provider: string;
providerName: string;
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
credentialTypes: Array<
"api_key" | "oauth2" | "user_password" | "host_scoped"
>;
title: string;
scopes?: string[];
}
@@ -30,7 +32,7 @@ function createSchemaFromCredentialInfo(
type: "object",
properties: {},
credentials_provider: [credential.provider],
credentials_types: [credential.credentialType],
credentials_types: credential.credentialTypes,
credentials_scopes: credential.scopes,
discriminator: undefined,
discriminator_mapping: undefined,

View File

@@ -41,7 +41,9 @@ export type ChatMessageData =
credentials: Array<{
provider: string;
providerName: string;
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
credentialTypes: Array<
"api_key" | "oauth2" | "user_password" | "host_scoped"
>;
title: string;
scopes?: string[];
}>;

View File

@@ -173,9 +173,8 @@ export function OldAgentLibraryView() {
if (agentRuns.length > 0) {
// select latest run
const latestRun = agentRuns.reduce((latest, current) => {
if (!latest.started_at && !current.started_at) return latest;
if (!latest.started_at) return current;
if (!current.started_at) return latest;
if (latest.started_at && !current.started_at) return current;
else if (!latest.started_at) return latest;
return latest.started_at > current.started_at ? latest : current;
}, agentRuns[0]);
selectRun(latestRun.id as GraphExecutionID);

View File

@@ -184,11 +184,9 @@ export function AgentRunsSelectorList({
))}
{agentPresets.length > 0 && <Separator className="my-1" />}
{agentRuns
.toSorted((a, b) => {
const aTime = a.started_at?.getTime() ?? 0;
const bTime = b.started_at?.getTime() ?? 0;
return bTime - aTime;
})
.toSorted(
(a, b) => b.started_at.getTime() - a.started_at.getTime(),
)
.map((run) => (
<AgentRunSummaryCard
className={listItemClasses}
@@ -201,7 +199,7 @@ export function AgentRunsSelectorList({
?.name
: null) ?? agent.name
}
timestamp={run.started_at ?? undefined}
timestamp={run.started_at}
selected={selectedView.id === run.id}
onClick={() => onSelectRun(run.id)}
onDelete={() => doDeleteRun(run as GraphExecutionMeta)}

View File

@@ -120,11 +120,9 @@ export const AgentFlowList = ({
lastRun =
runCount == 0
? null
: _flowRuns.reduce((a, c) => {
const aTime = a.started_at?.getTime() ?? 0;
const cTime = c.started_at?.getTime() ?? 0;
return aTime > cTime ? a : c;
});
: _flowRuns.reduce((a, c) =>
a.started_at > c.started_at ? a : c,
);
}
return { flow, runCount, lastRun };
})
@@ -132,9 +130,10 @@ export const AgentFlowList = ({
if (!a.lastRun && !b.lastRun) return 0;
if (!a.lastRun) return 1;
if (!b.lastRun) return -1;
const bTime = b.lastRun.started_at?.getTime() ?? 0;
const aTime = a.lastRun.started_at?.getTime() ?? 0;
return bTime - aTime;
return (
b.lastRun.started_at.getTime() -
a.lastRun.started_at.getTime()
);
})
.map(({ flow, runCount, lastRun }) => (
<TableRow

View File

@@ -29,10 +29,7 @@ export const FlowRunsStatus: React.FC<{
: statsSince;
const filteredFlowRuns =
statsSinceTimestamp != null
? executions.filter(
(fr) =>
fr.started_at && fr.started_at.getTime() > statsSinceTimestamp,
)
? executions.filter((fr) => fr.started_at.getTime() > statsSinceTimestamp)
: executions;
return (

View File

@@ -98,43 +98,40 @@ export const FlowRunsTimeline = ({
<Scatter
key={flow.id}
data={executions
.filter((e) => e.graph_id == flow.graph_id && e.started_at)
.filter((e) => e.graph_id == flow.graph_id)
.map((e) => ({
...e,
time:
(e.started_at?.getTime() ?? 0) +
(e.stats?.node_exec_time ?? 0) * 1000,
e.started_at.getTime() + (e.stats?.node_exec_time ?? 0) * 1000,
_duration: e.stats?.node_exec_time ?? 0,
}))}
name={flow.name}
fill={`hsl(${(hashString(flow.id) * 137.5) % 360}, 70%, 50%)`}
/>
))}
{executions
.filter((e) => e.started_at && e.ended_at)
.map((execution) => (
<Line
key={execution.id}
type="linear"
dataKey="_duration"
data={[
{
...execution,
time: execution.started_at!.getTime(),
_duration: 0,
},
{
...execution,
time: execution.ended_at!.getTime(),
_duration: execution.stats?.node_exec_time ?? 0,
},
]}
stroke={`hsl(${(hashString(execution.graph_id) * 137.5) % 360}, 70%, 50%)`}
strokeWidth={2}
dot={false}
legendType="none"
/>
))}
{executions.map((execution) => (
<Line
key={execution.id}
type="linear"
dataKey="_duration"
data={[
{
...execution,
time: execution.started_at.getTime(),
_duration: 0,
},
{
...execution,
time: execution.ended_at.getTime(),
_duration: execution.stats?.node_exec_time ?? 0,
},
]}
stroke={`hsl(${(hashString(execution.graph_id) * 137.5) % 360}, 70%, 50%)`}
strokeWidth={2}
dot={false}
legendType="none"
/>
))}
<Legend
content={<ScrollableLegend />}
wrapperStyle={{

View File

@@ -98,11 +98,7 @@ const Monitor = () => {
...(selectedFlow
? executions.filter((v) => v.graph_id == selectedFlow.graph_id)
: executions),
].sort((a, b) => {
const aTime = a.started_at?.getTime() ?? 0;
const bTime = b.started_at?.getTime() ?? 0;
return bTime - aTime;
})}
].sort((a, b) => b.started_at.getTime() - a.started_at.getTime())}
selectedRun={selectedRun}
onSelectRun={(r) => setSelectedRun(r.id == selectedRun?.id ? null : r)}
/>

View File

@@ -116,9 +116,6 @@ export default function UserIntegrationsPage() {
"63a6e279-2dc2-448e-bf57-85776f7176dc", // ZeroBounce
"9aa1bde0-4947-4a70-a20c-84daa3850d52", // Google Maps
"d44045af-1c33-4833-9e19-752313214de2", // Llama API
"c4e6d1a0-3b5f-4789-a8e2-9b123456789f", // V0 by Vercel
"a5b3c7d9-2e4f-4a6b-8c1d-9e0f1a2b3c4d", // Webshare Proxy
"8b3d4e5f-6a7b-8c9d-0e1f-2a3b4c5d6e7f", // OpenWeatherMap
],
[],
);

View File

@@ -7148,20 +7148,6 @@
"error_message": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Error Message"
},
"started_at": {
"anyOf": [
{ "type": "string", "format": "date-time" },
{ "type": "null" }
],
"title": "Started At"
},
"ended_at": {
"anyOf": [
{ "type": "string", "format": "date-time" },
{ "type": "null" }
],
"title": "Ended At"
}
},
"type": "object",
@@ -7268,20 +7254,14 @@
},
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
"started_at": {
"anyOf": [
{ "type": "string", "format": "date-time" },
{ "type": "null" }
],
"title": "Started At",
"description": "When execution started running. Null if not yet started (QUEUED)."
"type": "string",
"format": "date-time",
"title": "Started At"
},
"ended_at": {
"anyOf": [
{ "type": "string", "format": "date-time" },
{ "type": "null" }
],
"title": "Ended At",
"description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
"type": "string",
"format": "date-time",
"title": "Ended At"
},
"is_shared": {
"type": "boolean",
@@ -7315,6 +7295,8 @@
"nodes_input_masks",
"preset_id",
"status",
"started_at",
"ended_at",
"stats",
"outputs"
],
@@ -7413,20 +7395,14 @@
},
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
"started_at": {
"anyOf": [
{ "type": "string", "format": "date-time" },
{ "type": "null" }
],
"title": "Started At",
"description": "When execution started running. Null if not yet started (QUEUED)."
"type": "string",
"format": "date-time",
"title": "Started At"
},
"ended_at": {
"anyOf": [
{ "type": "string", "format": "date-time" },
{ "type": "null" }
],
"title": "Ended At",
"description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
"type": "string",
"format": "date-time",
"title": "Ended At"
},
"is_shared": {
"type": "boolean",
@@ -7455,6 +7431,8 @@
"nodes_input_masks",
"preset_id",
"status",
"started_at",
"ended_at",
"stats"
],
"title": "GraphExecutionMeta"
@@ -7501,20 +7479,14 @@
},
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
"started_at": {
"anyOf": [
{ "type": "string", "format": "date-time" },
{ "type": "null" }
],
"title": "Started At",
"description": "When execution started running. Null if not yet started (QUEUED)."
"type": "string",
"format": "date-time",
"title": "Started At"
},
"ended_at": {
"anyOf": [
{ "type": "string", "format": "date-time" },
{ "type": "null" }
],
"title": "Ended At",
"description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
"type": "string",
"format": "date-time",
"title": "Ended At"
},
"is_shared": {
"type": "boolean",
@@ -7553,6 +7525,8 @@
"nodes_input_masks",
"preset_id",
"status",
"started_at",
"ended_at",
"stats",
"outputs",
"node_executions"

View File

@@ -50,9 +50,7 @@ export function ActivityItem({ execution }: Props) {
execution.status === AgentExecutionStatus.QUEUED;
if (isActiveStatus) {
const timeAgo = execution.started_at
? formatTimeAgo(execution.started_at.toString())
: "recently";
const timeAgo = formatTimeAgo(execution.started_at.toString());
const statusText =
execution.status === AgentExecutionStatus.QUEUED ? "queued" : "running";
return [
@@ -63,9 +61,7 @@ export function ActivityItem({ execution }: Props) {
// Handle all other statuses with time display
const timeAgo = execution.ended_at
? formatTimeAgo(execution.ended_at.toString())
: execution.started_at
? formatTimeAgo(execution.started_at.toString())
: "recently";
: formatTimeAgo(execution.started_at.toString());
let statusText = "ended";
switch (execution.status) {

View File

@@ -327,8 +327,8 @@ export type GraphExecutionMeta = {
| "FAILED"
| "INCOMPLETE"
| "REVIEW";
started_at: Date | null;
ended_at: Date | null;
started_at: Date;
ended_at: Date;
stats: {
error: string | null;
cost: number;

View File

@@ -39,9 +39,10 @@ test.beforeEach(async ({ page }) => {
await page.waitForTimeout(1000);
await page.goto("/library");
// Navigate to the specific agent we just created, not just the first one
await LibraryPage.navigateToAgentByName(page, "Test Agent");
await LibraryPage.clickFirstAgent(page);
await LibraryPage.waitForAgentPageLoad(page);
const { getRole } = getSelectors(page);
await isVisible(getRole("heading", "Test Agent"), 8000);
});
test("shows badge with count when agent is running", async ({ page }) => {

View File

@@ -450,72 +450,45 @@ export async function navigateToAgentByName(
agentName: string,
): Promise<void> {
const agentCard = getAgentCards(page).filter({ hasText: agentName }).first();
// Wait for the agent card to be visible before clicking
// This handles async loading of agents after page navigation
await agentCard.waitFor({ state: "visible", timeout: 15000 });
await agentCard.click();
}
export async function clickRunButton(page: Page): Promise<void> {
const { getId } = getSelectors(page);
// Wait for page to stabilize and buttons to render
// The NewAgentLibraryView shows either "Setup your task" (empty state)
// or "New task" (with items) button
const setupTaskButton = page.getByRole("button", {
name: /Setup your task/i,
});
const newTaskButton = page.getByRole("button", { name: /New task/i });
const runButton = getId("agent-run-button");
const runAgainButton = getId("run-again-button");
// Use Promise.race with waitFor to wait for any of the buttons to appear
// This handles the async rendering in CI environments
try {
await Promise.race([
setupTaskButton.waitFor({ state: "visible", timeout: 15000 }),
newTaskButton.waitFor({ state: "visible", timeout: 15000 }),
runButton.waitFor({ state: "visible", timeout: 15000 }),
runAgainButton.waitFor({ state: "visible", timeout: 15000 }),
]);
} catch {
throw new Error(
"Could not find run/start task button - none of the expected buttons appeared",
);
}
// Now check which button is visible and click it
if (await setupTaskButton.isVisible()) {
await setupTaskButton.click();
const startTaskButton = page
.getByRole("button", { name: /Start Task/i })
.first();
await startTaskButton.waitFor({ state: "visible", timeout: 10000 });
await startTaskButton.click();
return;
}
const newTaskButton = page.getByRole("button", { name: /New task/i });
if (await newTaskButton.isVisible()) {
await newTaskButton.click();
const startTaskButton = page
.getByRole("button", { name: /Start Task/i })
.first();
await startTaskButton.waitFor({ state: "visible", timeout: 10000 });
await startTaskButton.click();
return;
}
const runButton = getId("agent-run-button");
const runAgainButton = getId("run-again-button");
if (await runButton.isVisible()) {
await runButton.click();
return;
}
if (await runAgainButton.isVisible()) {
} else if (await runAgainButton.isVisible()) {
await runAgainButton.click();
return;
} else {
throw new Error("Could not find run/start task button");
}
throw new Error("Could not find run/start task button");
}
export async function clickNewRunButton(page: Page): Promise<void> {

View File

@@ -1,28 +0,0 @@
"""Video editing blocks for AutoGPT Platform.
This module provides blocks for:
- Downloading videos from URLs (YouTube, Vimeo, news sites, direct links)
- Clipping/trimming video segments
- Concatenating multiple videos
- Adding text overlays
- Adding AI-generated narration
Dependencies:
- yt-dlp: For video downloading
- moviepy: For video editing operations
- requests: For API calls (narration block)
"""
from .download import VideoDownloadBlock
from .clip import VideoClipBlock
from .concat import VideoConcatBlock
from .text_overlay import VideoTextOverlayBlock
from .narration import VideoNarrationBlock
__all__ = [
"VideoClipBlock",
"VideoConcatBlock",
"VideoDownloadBlock",
"VideoNarrationBlock",
"VideoTextOverlayBlock",
]

View File

@@ -1,93 +0,0 @@
"""
VideoClipBlock - Extract a segment from a video file
"""
import uuid
from backend.data.block import Block, BlockCategory, BlockOutput
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
from backend.data.model import SchemaField
from backend.util.exceptions import BlockExecutionError
class VideoClipBlock(Block):
"""Extract a time segment from a video."""
class Input(BlockSchemaInput):
video_in: str = SchemaField(
description="Input video (URL, data URI, or file path)",
json_schema_extra={"format": "file"}
)
start_time: float = SchemaField(
description="Start time in seconds",
ge=0.0
)
end_time: float = SchemaField(
description="End time in seconds",
ge=0.0
)
output_format: str = SchemaField(
description="Output format",
default="mp4",
advanced=True
)
class Output(BlockSchemaOutput):
video_out: str = SchemaField(
description="Clipped video file",
json_schema_extra={"format": "file"}
)
duration: float = SchemaField(description="Clip duration in seconds")
def __init__(self):
super().__init__(
id="b2c3d4e5-f6a7-8901-bcde-f23456789012",
description="Extract a time segment from a video",
categories={BlockCategory.MULTIMEDIA},
input_schema=self.Input,
output_schema=self.Output,
test_input={"video_in": "/tmp/test.mp4", "start_time": 0.0, "end_time": 10.0},
test_output=[("video_out", str), ("duration", float)],
test_mock={"_clip_video": lambda *args: ("/tmp/clip.mp4", 10.0)}
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
# Validate time range
if input_data.end_time <= input_data.start_time:
raise BlockExecutionError(
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
block_name=self.name,
block_id=str(self.id)
)
try:
from moviepy.video.io.VideoFileClip import VideoFileClip
except ImportError as e:
raise BlockExecutionError(
message="moviepy is not installed. Please install it with: pip install moviepy",
block_name=self.name,
block_id=str(self.id)
) from e
clip = None
subclip = None
try:
clip = VideoFileClip(input_data.video_in)
subclip = clip.subclip(input_data.start_time, input_data.end_time)
output_path = f"/tmp/clip_{uuid.uuid4()}.{input_data.output_format}"
subclip.write_videofile(output_path, logger=None)
yield "video_out", output_path
yield "duration", subclip.duration
except Exception as e:
raise BlockExecutionError(
message=f"Failed to clip video: {e}",
block_name=self.name,
block_id=str(self.id)
) from e
finally:
if subclip:
subclip.close()
if clip:
clip.close()

View File

@@ -1,123 +0,0 @@
"""
VideoConcatBlock - Concatenate multiple video clips into one
"""
import uuid
from backend.data.block import Block, BlockCategory, BlockOutput
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
from backend.data.model import SchemaField
from backend.util.exceptions import BlockExecutionError
class VideoConcatBlock(Block):
"""Merge multiple video clips into one continuous video."""
class Input(BlockSchemaInput):
videos: list[str] = SchemaField(
description="List of video files to concatenate (in order)"
)
transition: str = SchemaField(
description="Transition between clips",
default="none",
enum=["none", "crossfade", "fade_black"]
)
transition_duration: float = SchemaField(
description="Transition duration in seconds",
default=0.5,
advanced=True
)
output_format: str = SchemaField(
description="Output format",
default="mp4",
advanced=True
)
class Output(BlockSchemaOutput):
video_out: str = SchemaField(
description="Concatenated video file",
json_schema_extra={"format": "file"}
)
total_duration: float = SchemaField(description="Total duration in seconds")
def __init__(self):
super().__init__(
id="c3d4e5f6-a7b8-9012-cdef-345678901234",
description="Merge multiple video clips into one continuous video",
categories={BlockCategory.MULTIMEDIA},
input_schema=self.Input,
output_schema=self.Output,
test_input={"videos": ["/tmp/a.mp4", "/tmp/b.mp4"]},
test_output=[("video_out", str), ("total_duration", float)],
test_mock={"_concat_videos": lambda *args: ("/tmp/concat.mp4", 20.0)}
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
from moviepy.editor import VideoFileClip, concatenate_videoclips
except ImportError as e:
raise BlockExecutionError(
message="moviepy is not installed. Please install it with: pip install moviepy",
block_name=self.name,
block_id=str(self.id)
) from e
# Validate minimum clips
if len(input_data.videos) < 2:
raise BlockExecutionError(
message="At least 2 videos are required for concatenation",
block_name=self.name,
block_id=str(self.id)
)
clips = []
faded_clips = []
final = None
try:
# Load clips one by one to handle partial failures
for v in input_data.videos:
clips.append(VideoFileClip(v))
if input_data.transition == "crossfade":
# Apply crossfade between clips using crossfadein/crossfadeout
transition_dur = input_data.transition_duration
for i, clip in enumerate(clips):
if i > 0:
clip = clip.crossfadein(transition_dur)
if i < len(clips) - 1:
clip = clip.crossfadeout(transition_dur)
faded_clips.append(clip)
final = concatenate_videoclips(
faded_clips,
method="compose",
padding=-transition_dur
)
elif input_data.transition == "fade_black":
# Fade to black between clips
for clip in clips:
faded = clip.fadein(input_data.transition_duration).fadeout(
input_data.transition_duration
)
faded_clips.append(faded)
final = concatenate_videoclips(faded_clips)
else:
final = concatenate_videoclips(clips)
output_path = f"/tmp/concat_{uuid.uuid4()}.{input_data.output_format}"
final.write_videofile(output_path, logger=None)
yield "video_out", output_path
yield "total_duration", final.duration
except Exception as e:
raise BlockExecutionError(
message=f"Failed to concatenate videos: {e}",
block_name=self.name,
block_id=str(self.id)
) from e
finally:
if final:
final.close()
for clip in faded_clips:
clip.close()
for clip in clips:
clip.close()

View File

@@ -1,102 +0,0 @@
"""
VideoDownloadBlock - Download video from URL (YouTube, Vimeo, news sites, direct links)
"""
import uuid
from typing import Literal
from backend.data.block import Block, BlockCategory, BlockOutput
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
from backend.data.model import SchemaField
from backend.util.exceptions import BlockExecutionError
class VideoDownloadBlock(Block):
"""Download video from URL using yt-dlp."""
class Input(BlockSchemaInput):
url: str = SchemaField(
description="URL of the video to download (YouTube, Vimeo, direct link, etc.)",
placeholder="https://www.youtube.com/watch?v=..."
)
quality: Literal["best", "1080p", "720p", "480p", "audio_only"] = SchemaField(
description="Video quality preference",
default="720p"
)
output_format: Literal["mp4", "webm", "mkv"] = SchemaField(
description="Output video format",
default="mp4",
advanced=True
)
class Output(BlockSchemaOutput):
video_file: str = SchemaField(
description="Path or data URI of downloaded video",
json_schema_extra={"format": "file"}
)
duration: float = SchemaField(description="Video duration in seconds")
title: str = SchemaField(description="Video title from source")
source_url: str = SchemaField(description="Original source URL")
def __init__(self):
super().__init__(
id="a1b2c3d4-e5f6-7890-abcd-ef1234567890",
description="Download video from URL (YouTube, Vimeo, news sites, direct links)",
categories={BlockCategory.MULTIMEDIA},
input_schema=self.Input,
output_schema=self.Output,
test_input={"url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ", "quality": "480p"},
test_output=[("video_file", str), ("duration", float), ("title", str), ("source_url", str)],
test_mock={"_download_video": lambda *args: ("/tmp/video.mp4", 212.0, "Test Video")}
)
def _get_format_string(self, quality: str) -> str:
formats = {
"best": "bestvideo+bestaudio/best",
"1080p": "bestvideo[height<=1080]+bestaudio/best[height<=1080]",
"720p": "bestvideo[height<=720]+bestaudio/best[height<=720]",
"480p": "bestvideo[height<=480]+bestaudio/best[height<=480]",
"audio_only": "bestaudio/best"
}
return formats.get(quality, formats["720p"])
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
import yt_dlp
except ImportError as e:
raise BlockExecutionError(
message="yt-dlp is not installed. Please install it with: pip install yt-dlp",
block_name=self.name,
block_id=str(self.id)
) from e
video_id = str(uuid.uuid4())[:8]
output_template = f"/tmp/{video_id}.%(ext)s"
ydl_opts = {
"format": self._get_format_string(input_data.quality),
"outtmpl": output_template,
"merge_output_format": input_data.output_format,
"quiet": True,
"no_warnings": True,
}
try:
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(input_data.url, download=True)
video_path = ydl.prepare_filename(info)
# Handle format conversion in filename
if not video_path.endswith(f".{input_data.output_format}"):
video_path = video_path.rsplit(".", 1)[0] + f".{input_data.output_format}"
yield "video_file", video_path
yield "duration", info.get("duration") or 0.0
yield "title", info.get("title") or "Unknown"
yield "source_url", input_data.url
except Exception as e:
raise BlockExecutionError(
message=f"Failed to download video: {e}",
block_name=self.name,
block_id=str(self.id)
) from e

View File

@@ -1,167 +0,0 @@
"""
VideoNarrationBlock - Generate AI voice narration and add to video
"""
import uuid
from typing import Literal
from backend.data.block import Block, BlockCategory, BlockOutput
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
from backend.data.model import SchemaField, CredentialsMetaInput, APIKeyCredentials
from backend.integrations.providers import ProviderName
from backend.util.exceptions import BlockExecutionError
class VideoNarrationBlock(Block):
"""Generate AI narration and add to video."""
class Input(BlockSchemaInput):
credentials: CredentialsMetaInput[
Literal[ProviderName.ELEVENLABS], Literal["api_key"]
] = SchemaField(
description="ElevenLabs API key for voice synthesis"
)
video_in: str = SchemaField(
description="Input video file",
json_schema_extra={"format": "file"}
)
script: str = SchemaField(
description="Narration script text"
)
voice_id: str = SchemaField(
description="ElevenLabs voice ID",
default="21m00Tcm4TlvDq8ikWAM" # Rachel
)
mix_mode: Literal["replace", "mix", "ducking"] = SchemaField(
description="How to combine with original audio",
default="ducking"
)
narration_volume: float = SchemaField(
description="Narration volume (0.0 to 2.0)",
default=1.0,
ge=0.0,
le=2.0,
advanced=True
)
original_volume: float = SchemaField(
description="Original audio volume when mixing (0.0 to 1.0)",
default=0.3,
ge=0.0,
le=1.0,
advanced=True
)
class Output(BlockSchemaOutput):
video_out: str = SchemaField(
description="Video with narration",
json_schema_extra={"format": "file"}
)
audio_file: str = SchemaField(
description="Generated audio file",
json_schema_extra={"format": "file"}
)
def __init__(self):
super().__init__(
id="e5f6a7b8-c9d0-1234-ef56-789012345678",
description="Generate AI narration and add to video",
categories={BlockCategory.MULTIMEDIA, BlockCategory.AI},
input_schema=self.Input,
output_schema=self.Output,
test_input={
"video_in": "/tmp/test.mp4",
"script": "Hello world",
"credentials": {"provider": "elevenlabs", "id": "test", "type": "api_key"}
},
test_output=[("video_out", str), ("audio_file", str)],
test_mock={"_generate_narration": lambda *args: ("/tmp/narrated.mp4", "/tmp/audio.mp3")}
)
async def run(
self,
input_data: Input,
*,
credentials: APIKeyCredentials,
**kwargs
) -> BlockOutput:
try:
import requests
from moviepy.editor import VideoFileClip, AudioFileClip, CompositeAudioClip
except ImportError as e:
raise BlockExecutionError(
message=f"Missing dependency: {e}. Install moviepy and requests.",
block_name=self.name,
block_id=str(self.id)
) from e
video = None
final = None
narration = None
try:
# Generate narration via ElevenLabs
response = requests.post(
f"https://api.elevenlabs.io/v1/text-to-speech/{input_data.voice_id}",
headers={
"xi-api-key": credentials.api_key.get_secret_value(),
"Content-Type": "application/json"
},
json={
"text": input_data.script,
"model_id": "eleven_monolingual_v1"
},
timeout=120
)
response.raise_for_status()
audio_path = f"/tmp/narration_{uuid.uuid4()}.mp3"
with open(audio_path, "wb") as f:
f.write(response.content)
# Combine with video
video = VideoFileClip(input_data.video_in)
narration = AudioFileClip(audio_path)
narration = narration.volumex(input_data.narration_volume)
if input_data.mix_mode == "replace":
final_audio = narration
elif input_data.mix_mode == "mix":
if video.audio:
original = video.audio.volumex(input_data.original_volume)
final_audio = CompositeAudioClip([original, narration])
else:
final_audio = narration
else: # ducking - lower original volume more when narration plays
if video.audio:
# Apply stronger attenuation for ducking effect
ducking_volume = input_data.original_volume * 0.3
original = video.audio.volumex(ducking_volume)
final_audio = CompositeAudioClip([original, narration])
else:
final_audio = narration
final = video.set_audio(final_audio)
output_path = f"/tmp/narrated_{uuid.uuid4()}.mp4"
final.write_videofile(output_path, logger=None)
yield "video_out", output_path
yield "audio_file", audio_path
except requests.exceptions.RequestException as e:
raise BlockExecutionError(
message=f"ElevenLabs API error: {e}",
block_name=self.name,
block_id=str(self.id)
) from e
except Exception as e:
raise BlockExecutionError(
message=f"Failed to add narration: {e}",
block_name=self.name,
block_id=str(self.id)
) from e
finally:
if narration:
narration.close()
if final:
final.close()
if video:
video.close()

View File

@@ -1,149 +0,0 @@
"""
VideoTextOverlayBlock - Add text overlay to video
"""
import uuid
from typing import Literal
from backend.data.block import Block, BlockCategory, BlockOutput
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
from backend.data.model import SchemaField
from backend.util.exceptions import BlockExecutionError
class VideoTextOverlayBlock(Block):
"""Add text overlay/caption to video."""
class Input(BlockSchemaInput):
video_in: str = SchemaField(
description="Input video file",
json_schema_extra={"format": "file"}
)
text: str = SchemaField(
description="Text to overlay on video"
)
position: Literal[
"top", "center", "bottom",
"top-left", "top-right",
"bottom-left", "bottom-right"
] = SchemaField(
description="Position of text on screen",
default="bottom"
)
start_time: float | None = SchemaField(
description="When to show text (seconds). None = entire video",
default=None,
advanced=True
)
end_time: float | None = SchemaField(
description="When to hide text (seconds). None = until end",
default=None,
advanced=True
)
font_size: int = SchemaField(
description="Font size",
default=48,
ge=12,
le=200,
advanced=True
)
font_color: str = SchemaField(
description="Font color (hex or name)",
default="white",
advanced=True
)
bg_color: str | None = SchemaField(
description="Background color behind text (None for transparent)",
default=None,
advanced=True
)
class Output(BlockSchemaOutput):
video_out: str = SchemaField(
description="Video with text overlay",
json_schema_extra={"format": "file"}
)
def __init__(self):
super().__init__(
id="d4e5f6a7-b8c9-0123-def4-567890123456",
description="Add text overlay/caption to video",
categories={BlockCategory.MULTIMEDIA},
input_schema=self.Input,
output_schema=self.Output,
test_input={"video_in": "/tmp/test.mp4", "text": "Hello World"},
test_output=[("video_out", str)],
test_mock={"_add_text": lambda *args: "/tmp/overlay.mp4"}
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip
except ImportError as e:
raise BlockExecutionError(
message="moviepy is not installed. Please install it with: pip install moviepy",
block_name=self.name,
block_id=str(self.id)
) from e
# Validate time range if both are provided
if (input_data.start_time is not None and
input_data.end_time is not None and
input_data.end_time <= input_data.start_time):
raise BlockExecutionError(
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
block_name=self.name,
block_id=str(self.id)
)
video = None
final = None
txt_clip = None
try:
video = VideoFileClip(input_data.video_in)
txt_clip = TextClip(
input_data.text,
fontsize=input_data.font_size,
color=input_data.font_color,
bg_color=input_data.bg_color,
)
# Position mapping
pos_map = {
"top": ("center", "top"),
"center": ("center", "center"),
"bottom": ("center", "bottom"),
"top-left": ("left", "top"),
"top-right": ("right", "top"),
"bottom-left": ("left", "bottom"),
"bottom-right": ("right", "bottom"),
}
txt_clip = txt_clip.set_position(pos_map[input_data.position])
# Set timing
start = input_data.start_time or 0
end = input_data.end_time or video.duration
duration = max(0, end - start)
txt_clip = txt_clip.set_start(start).set_end(end).set_duration(duration)
final = CompositeVideoClip([video, txt_clip])
output_path = f"/tmp/overlay_{uuid.uuid4()}.mp4"
final.write_videofile(output_path, logger=None)
yield "video_out", output_path
except Exception as e:
raise BlockExecutionError(
message=f"Failed to add text overlay: {e}",
block_name=self.name,
block_id=str(self.id)
) from e
finally:
if txt_clip:
txt_clip.close()
if final:
final.close()
if video:
video.close()