mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-19 12:08:46 -05:00
Compare commits
11 Commits
fix/chat-c
...
feature/vi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8d3893c16 | ||
|
|
1cfbc0dd08 | ||
|
|
ff84643b48 | ||
|
|
c19c3c834a | ||
|
|
d0f7ba8cfd | ||
|
|
2a855f4bd0 | ||
|
|
b93bb3b9f8 | ||
|
|
1b56ff13d9 | ||
|
|
f31c160043 | ||
|
|
06550a87eb | ||
|
|
088b9998dc |
@@ -28,6 +28,7 @@ from backend.executor.manager import get_db_async_client
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
|
||||
|
||||
class ExecutionAnalyticsRequest(BaseModel):
|
||||
@@ -63,6 +64,8 @@ class ExecutionAnalyticsResult(BaseModel):
|
||||
score: Optional[float]
|
||||
status: str # "success", "failed", "skipped"
|
||||
error_message: Optional[str] = None
|
||||
started_at: Optional[datetime] = None
|
||||
ended_at: Optional[datetime] = None
|
||||
|
||||
|
||||
class ExecutionAnalyticsResponse(BaseModel):
|
||||
@@ -224,11 +227,6 @@ async def generate_execution_analytics(
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate model configuration
|
||||
settings = Settings()
|
||||
if not settings.secrets.openai_internal_api_key:
|
||||
raise HTTPException(status_code=500, detail="OpenAI API key not configured")
|
||||
|
||||
# Get database client
|
||||
db_client = get_db_async_client()
|
||||
|
||||
@@ -320,6 +318,8 @@ async def generate_execution_analytics(
|
||||
),
|
||||
status="skipped",
|
||||
error_message=None, # Not an error - just already processed
|
||||
started_at=execution.started_at,
|
||||
ended_at=execution.ended_at,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -349,6 +349,9 @@ async def _process_batch(
|
||||
) -> list[ExecutionAnalyticsResult]:
|
||||
"""Process a batch of executions concurrently."""
|
||||
|
||||
if not settings.secrets.openai_internal_api_key:
|
||||
raise HTTPException(status_code=500, detail="OpenAI API key not configured")
|
||||
|
||||
async def process_single_execution(execution) -> ExecutionAnalyticsResult:
|
||||
try:
|
||||
# Generate activity status and score using the specified model
|
||||
@@ -387,6 +390,8 @@ async def _process_batch(
|
||||
score=None,
|
||||
status="skipped",
|
||||
error_message="Activity generation returned None",
|
||||
started_at=execution.started_at,
|
||||
ended_at=execution.ended_at,
|
||||
)
|
||||
|
||||
# Update the execution stats
|
||||
@@ -416,6 +421,8 @@ async def _process_batch(
|
||||
summary_text=activity_response["activity_status"],
|
||||
score=activity_response["correctness_score"],
|
||||
status="success",
|
||||
started_at=execution.started_at,
|
||||
ended_at=execution.ended_at,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -429,6 +436,8 @@ async def _process_batch(
|
||||
score=None,
|
||||
status="failed",
|
||||
error_message=str(e),
|
||||
started_at=execution.started_at,
|
||||
ended_at=execution.ended_at,
|
||||
)
|
||||
|
||||
# Process all executions in the batch concurrently
|
||||
|
||||
@@ -104,7 +104,7 @@ async def get_accuracy_trends_and_alerts(
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
{user_filter}
|
||||
GROUP BY DATE(e."createdAt")
|
||||
HAVING COUNT(*) >= 3 -- Need at least 3 executions per day
|
||||
HAVING COUNT(*) >= 1 -- Include all days with at least 1 execution
|
||||
),
|
||||
trends AS (
|
||||
SELECT
|
||||
|
||||
@@ -153,8 +153,14 @@ class GraphExecutionMeta(BaseDbModel):
|
||||
nodes_input_masks: Optional[dict[str, BlockInput]]
|
||||
preset_id: Optional[str]
|
||||
status: ExecutionStatus
|
||||
started_at: datetime
|
||||
ended_at: datetime
|
||||
started_at: Optional[datetime] = Field(
|
||||
None,
|
||||
description="When execution started running. Null if not yet started (QUEUED).",
|
||||
)
|
||||
ended_at: Optional[datetime] = Field(
|
||||
None,
|
||||
description="When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW).",
|
||||
)
|
||||
is_shared: bool = False
|
||||
share_token: Optional[str] = None
|
||||
|
||||
@@ -229,10 +235,8 @@ class GraphExecutionMeta(BaseDbModel):
|
||||
|
||||
@staticmethod
|
||||
def from_db(_graph_exec: AgentGraphExecution):
|
||||
now = datetime.now(timezone.utc)
|
||||
# TODO: make started_at and ended_at optional
|
||||
start_time = _graph_exec.startedAt or _graph_exec.createdAt
|
||||
end_time = _graph_exec.updatedAt or now
|
||||
start_time = _graph_exec.startedAt
|
||||
end_time = _graph_exec.endedAt
|
||||
|
||||
try:
|
||||
stats = GraphExecutionStats.model_validate(_graph_exec.stats)
|
||||
@@ -902,6 +906,14 @@ async def update_graph_execution_stats(
|
||||
|
||||
if status:
|
||||
update_data["executionStatus"] = status
|
||||
# Set endedAt when execution reaches a terminal status
|
||||
terminal_statuses = [
|
||||
ExecutionStatus.COMPLETED,
|
||||
ExecutionStatus.FAILED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
]
|
||||
if status in terminal_statuses:
|
||||
update_data["endedAt"] = datetime.now(tz=timezone.utc)
|
||||
|
||||
where_clause: AgentGraphExecutionWhereInput = {"id": graph_exec_id}
|
||||
|
||||
|
||||
@@ -96,9 +96,9 @@ jina_credentials = APIKeyCredentials(
|
||||
)
|
||||
unreal_credentials = APIKeyCredentials(
|
||||
id="66f20754-1b81-48e4-91d0-f4f0dd82145f",
|
||||
provider="unreal",
|
||||
provider="unreal_speech",
|
||||
api_key=SecretStr(settings.secrets.unreal_speech_api_key),
|
||||
title="Use Credits for Unreal",
|
||||
title="Use Credits for Unreal Speech",
|
||||
expires_at=None,
|
||||
)
|
||||
open_router_credentials = APIKeyCredentials(
|
||||
@@ -216,6 +216,14 @@ webshare_proxy_credentials = UserPasswordCredentials(
|
||||
title="Use Credits for Webshare Proxy",
|
||||
)
|
||||
|
||||
openweathermap_credentials = APIKeyCredentials(
|
||||
id="8b3d4e5f-6a7b-8c9d-0e1f-2a3b4c5d6e7f",
|
||||
provider="openweathermap",
|
||||
api_key=SecretStr(settings.secrets.openweathermap_api_key),
|
||||
title="Use Credits for OpenWeatherMap",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
@@ -243,6 +251,7 @@ DEFAULT_CREDENTIALS = [
|
||||
llama_api_credentials,
|
||||
v0_credentials,
|
||||
webshare_proxy_credentials,
|
||||
openweathermap_credentials,
|
||||
]
|
||||
|
||||
SYSTEM_CREDENTIAL_IDS = {cred.id for cred in DEFAULT_CREDENTIALS}
|
||||
@@ -346,11 +355,17 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(zerobounce_credentials)
|
||||
if settings.secrets.google_maps_api_key:
|
||||
all_credentials.append(google_maps_credentials)
|
||||
if settings.secrets.llama_api_key:
|
||||
all_credentials.append(llama_api_credentials)
|
||||
if settings.secrets.v0_api_key:
|
||||
all_credentials.append(v0_credentials)
|
||||
if (
|
||||
settings.secrets.webshare_proxy_username
|
||||
and settings.secrets.webshare_proxy_password
|
||||
):
|
||||
all_credentials.append(webshare_proxy_credentials)
|
||||
if settings.secrets.openweathermap_api_key:
|
||||
all_credentials.append(openweathermap_credentials)
|
||||
return all_credentials
|
||||
|
||||
async def get_creds_by_id(
|
||||
|
||||
@@ -60,8 +60,10 @@ class LateExecutionMonitor:
|
||||
if not all_late_executions:
|
||||
return "No late executions detected."
|
||||
|
||||
# Sort by created time (oldest first)
|
||||
all_late_executions.sort(key=lambda x: x.started_at)
|
||||
# Sort by started time (oldest first), with None values (unstarted) first
|
||||
all_late_executions.sort(
|
||||
key=lambda x: x.started_at or datetime.min.replace(tzinfo=timezone.utc)
|
||||
)
|
||||
|
||||
num_total_late = len(all_late_executions)
|
||||
num_queued = len(queued_late_executions)
|
||||
@@ -74,7 +76,7 @@ class LateExecutionMonitor:
|
||||
was_truncated = num_total_late > tuncate_size
|
||||
|
||||
late_execution_details = [
|
||||
f"* `Execution ID: {exec.id}, Graph ID: {exec.graph_id}v{exec.graph_version}, User ID: {exec.user_id}, Status: {exec.status}, Created At: {exec.started_at.isoformat()}`"
|
||||
f"* `Execution ID: {exec.id}, Graph ID: {exec.graph_id}v{exec.graph_version}, User ID: {exec.user_id}, Status: {exec.status}, Started At: {exec.started_at.isoformat() if exec.started_at else 'Not started'}`"
|
||||
for exec in truncated_executions
|
||||
]
|
||||
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
-- AlterTable
|
||||
ALTER TABLE "AgentGraphExecution" ADD COLUMN "endedAt" TIMESTAMP(3);
|
||||
|
||||
-- Set endedAt to updatedAt for existing records with terminal status only
|
||||
UPDATE "AgentGraphExecution"
|
||||
SET "endedAt" = "updatedAt"
|
||||
WHERE "endedAt" IS NULL
|
||||
AND "executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED');
|
||||
@@ -450,6 +450,7 @@ model AgentGraphExecution {
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime? @updatedAt
|
||||
startedAt DateTime?
|
||||
endedAt DateTime?
|
||||
|
||||
isDeleted Boolean @default(false)
|
||||
|
||||
|
||||
@@ -51,6 +51,8 @@ export function AnalyticsResultsTable({ results }: Props) {
|
||||
"Execution ID",
|
||||
"Status",
|
||||
"Score",
|
||||
"Started At",
|
||||
"Ended At",
|
||||
"Summary Text",
|
||||
"Error Message",
|
||||
];
|
||||
@@ -62,6 +64,8 @@ export function AnalyticsResultsTable({ results }: Props) {
|
||||
result.exec_id,
|
||||
result.status,
|
||||
result.score?.toString() || "",
|
||||
result.started_at ? new Date(result.started_at).toLocaleString() : "",
|
||||
result.ended_at ? new Date(result.ended_at).toLocaleString() : "",
|
||||
`"${(result.summary_text || "").replace(/"/g, '""')}"`, // Escape quotes in summary
|
||||
`"${(result.error_message || "").replace(/"/g, '""')}"`, // Escape quotes in error
|
||||
]);
|
||||
@@ -248,15 +252,13 @@ export function AnalyticsResultsTable({ results }: Props) {
|
||||
)}
|
||||
</td>
|
||||
<td className="px-4 py-3">
|
||||
{(result.summary_text || result.error_message) && (
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => toggleRowExpansion(result.exec_id)}
|
||||
>
|
||||
<EyeIcon size={16} />
|
||||
</Button>
|
||||
)}
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => toggleRowExpansion(result.exec_id)}
|
||||
>
|
||||
<EyeIcon size={16} />
|
||||
</Button>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
@@ -264,6 +266,44 @@ export function AnalyticsResultsTable({ results }: Props) {
|
||||
<tr>
|
||||
<td colSpan={7} className="bg-gray-50 px-4 py-3">
|
||||
<div className="space-y-3">
|
||||
{/* Timestamps section */}
|
||||
<div className="grid grid-cols-2 gap-4 border-b border-gray-200 pb-3">
|
||||
<div>
|
||||
<Text
|
||||
variant="body"
|
||||
className="text-xs font-medium text-gray-600"
|
||||
>
|
||||
Started At:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body"
|
||||
className="text-sm text-gray-700"
|
||||
>
|
||||
{result.started_at
|
||||
? new Date(
|
||||
result.started_at,
|
||||
).toLocaleString()
|
||||
: "—"}
|
||||
</Text>
|
||||
</div>
|
||||
<div>
|
||||
<Text
|
||||
variant="body"
|
||||
className="text-xs font-medium text-gray-600"
|
||||
>
|
||||
Ended At:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body"
|
||||
className="text-sm text-gray-700"
|
||||
>
|
||||
{result.ended_at
|
||||
? new Date(result.ended_at).toLocaleString()
|
||||
: "—"}
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{result.summary_text && (
|
||||
<div>
|
||||
<Text
|
||||
|
||||
@@ -541,7 +541,19 @@ export function ExecutionAnalyticsForm() {
|
||||
{/* Accuracy Trends Display */}
|
||||
{trendsData && (
|
||||
<div className="space-y-4">
|
||||
<h3 className="text-lg font-semibold">Execution Accuracy Trends</h3>
|
||||
<div className="flex items-start justify-between">
|
||||
<h3 className="text-lg font-semibold">Execution Accuracy Trends</h3>
|
||||
<div className="rounded-md bg-blue-50 px-3 py-2 text-xs text-blue-700">
|
||||
<p className="font-medium">
|
||||
Chart Filters (matches monitoring system):
|
||||
</p>
|
||||
<ul className="mt-1 list-inside list-disc space-y-1">
|
||||
<li>Only days with ≥1 execution with correctness score</li>
|
||||
<li>Last 30 days</li>
|
||||
<li>Averages calculated from scored executions only</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Alert Section */}
|
||||
{trendsData.alert && (
|
||||
|
||||
@@ -173,8 +173,9 @@ export function OldAgentLibraryView() {
|
||||
if (agentRuns.length > 0) {
|
||||
// select latest run
|
||||
const latestRun = agentRuns.reduce((latest, current) => {
|
||||
if (latest.started_at && !current.started_at) return current;
|
||||
else if (!latest.started_at) return latest;
|
||||
if (!latest.started_at && !current.started_at) return latest;
|
||||
if (!latest.started_at) return current;
|
||||
if (!current.started_at) return latest;
|
||||
return latest.started_at > current.started_at ? latest : current;
|
||||
}, agentRuns[0]);
|
||||
selectRun(latestRun.id as GraphExecutionID);
|
||||
|
||||
@@ -184,9 +184,11 @@ export function AgentRunsSelectorList({
|
||||
))}
|
||||
{agentPresets.length > 0 && <Separator className="my-1" />}
|
||||
{agentRuns
|
||||
.toSorted(
|
||||
(a, b) => b.started_at.getTime() - a.started_at.getTime(),
|
||||
)
|
||||
.toSorted((a, b) => {
|
||||
const aTime = a.started_at?.getTime() ?? 0;
|
||||
const bTime = b.started_at?.getTime() ?? 0;
|
||||
return bTime - aTime;
|
||||
})
|
||||
.map((run) => (
|
||||
<AgentRunSummaryCard
|
||||
className={listItemClasses}
|
||||
@@ -199,7 +201,7 @@ export function AgentRunsSelectorList({
|
||||
?.name
|
||||
: null) ?? agent.name
|
||||
}
|
||||
timestamp={run.started_at}
|
||||
timestamp={run.started_at ?? undefined}
|
||||
selected={selectedView.id === run.id}
|
||||
onClick={() => onSelectRun(run.id)}
|
||||
onDelete={() => doDeleteRun(run as GraphExecutionMeta)}
|
||||
|
||||
@@ -120,9 +120,11 @@ export const AgentFlowList = ({
|
||||
lastRun =
|
||||
runCount == 0
|
||||
? null
|
||||
: _flowRuns.reduce((a, c) =>
|
||||
a.started_at > c.started_at ? a : c,
|
||||
);
|
||||
: _flowRuns.reduce((a, c) => {
|
||||
const aTime = a.started_at?.getTime() ?? 0;
|
||||
const cTime = c.started_at?.getTime() ?? 0;
|
||||
return aTime > cTime ? a : c;
|
||||
});
|
||||
}
|
||||
return { flow, runCount, lastRun };
|
||||
})
|
||||
@@ -130,10 +132,9 @@ export const AgentFlowList = ({
|
||||
if (!a.lastRun && !b.lastRun) return 0;
|
||||
if (!a.lastRun) return 1;
|
||||
if (!b.lastRun) return -1;
|
||||
return (
|
||||
b.lastRun.started_at.getTime() -
|
||||
a.lastRun.started_at.getTime()
|
||||
);
|
||||
const bTime = b.lastRun.started_at?.getTime() ?? 0;
|
||||
const aTime = a.lastRun.started_at?.getTime() ?? 0;
|
||||
return bTime - aTime;
|
||||
})
|
||||
.map(({ flow, runCount, lastRun }) => (
|
||||
<TableRow
|
||||
|
||||
@@ -29,7 +29,10 @@ export const FlowRunsStatus: React.FC<{
|
||||
: statsSince;
|
||||
const filteredFlowRuns =
|
||||
statsSinceTimestamp != null
|
||||
? executions.filter((fr) => fr.started_at.getTime() > statsSinceTimestamp)
|
||||
? executions.filter(
|
||||
(fr) =>
|
||||
fr.started_at && fr.started_at.getTime() > statsSinceTimestamp,
|
||||
)
|
||||
: executions;
|
||||
|
||||
return (
|
||||
|
||||
@@ -98,40 +98,43 @@ export const FlowRunsTimeline = ({
|
||||
<Scatter
|
||||
key={flow.id}
|
||||
data={executions
|
||||
.filter((e) => e.graph_id == flow.graph_id)
|
||||
.filter((e) => e.graph_id == flow.graph_id && e.started_at)
|
||||
.map((e) => ({
|
||||
...e,
|
||||
time:
|
||||
e.started_at.getTime() + (e.stats?.node_exec_time ?? 0) * 1000,
|
||||
(e.started_at?.getTime() ?? 0) +
|
||||
(e.stats?.node_exec_time ?? 0) * 1000,
|
||||
_duration: e.stats?.node_exec_time ?? 0,
|
||||
}))}
|
||||
name={flow.name}
|
||||
fill={`hsl(${(hashString(flow.id) * 137.5) % 360}, 70%, 50%)`}
|
||||
/>
|
||||
))}
|
||||
{executions.map((execution) => (
|
||||
<Line
|
||||
key={execution.id}
|
||||
type="linear"
|
||||
dataKey="_duration"
|
||||
data={[
|
||||
{
|
||||
...execution,
|
||||
time: execution.started_at.getTime(),
|
||||
_duration: 0,
|
||||
},
|
||||
{
|
||||
...execution,
|
||||
time: execution.ended_at.getTime(),
|
||||
_duration: execution.stats?.node_exec_time ?? 0,
|
||||
},
|
||||
]}
|
||||
stroke={`hsl(${(hashString(execution.graph_id) * 137.5) % 360}, 70%, 50%)`}
|
||||
strokeWidth={2}
|
||||
dot={false}
|
||||
legendType="none"
|
||||
/>
|
||||
))}
|
||||
{executions
|
||||
.filter((e) => e.started_at && e.ended_at)
|
||||
.map((execution) => (
|
||||
<Line
|
||||
key={execution.id}
|
||||
type="linear"
|
||||
dataKey="_duration"
|
||||
data={[
|
||||
{
|
||||
...execution,
|
||||
time: execution.started_at!.getTime(),
|
||||
_duration: 0,
|
||||
},
|
||||
{
|
||||
...execution,
|
||||
time: execution.ended_at!.getTime(),
|
||||
_duration: execution.stats?.node_exec_time ?? 0,
|
||||
},
|
||||
]}
|
||||
stroke={`hsl(${(hashString(execution.graph_id) * 137.5) % 360}, 70%, 50%)`}
|
||||
strokeWidth={2}
|
||||
dot={false}
|
||||
legendType="none"
|
||||
/>
|
||||
))}
|
||||
<Legend
|
||||
content={<ScrollableLegend />}
|
||||
wrapperStyle={{
|
||||
|
||||
@@ -98,7 +98,11 @@ const Monitor = () => {
|
||||
...(selectedFlow
|
||||
? executions.filter((v) => v.graph_id == selectedFlow.graph_id)
|
||||
: executions),
|
||||
].sort((a, b) => b.started_at.getTime() - a.started_at.getTime())}
|
||||
].sort((a, b) => {
|
||||
const aTime = a.started_at?.getTime() ?? 0;
|
||||
const bTime = b.started_at?.getTime() ?? 0;
|
||||
return bTime - aTime;
|
||||
})}
|
||||
selectedRun={selectedRun}
|
||||
onSelectRun={(r) => setSelectedRun(r.id == selectedRun?.id ? null : r)}
|
||||
/>
|
||||
|
||||
@@ -116,6 +116,9 @@ export default function UserIntegrationsPage() {
|
||||
"63a6e279-2dc2-448e-bf57-85776f7176dc", // ZeroBounce
|
||||
"9aa1bde0-4947-4a70-a20c-84daa3850d52", // Google Maps
|
||||
"d44045af-1c33-4833-9e19-752313214de2", // Llama API
|
||||
"c4e6d1a0-3b5f-4789-a8e2-9b123456789f", // V0 by Vercel
|
||||
"a5b3c7d9-2e4f-4a6b-8c1d-9e0f1a2b3c4d", // Webshare Proxy
|
||||
"8b3d4e5f-6a7b-8c9d-0e1f-2a3b4c5d6e7f", // OpenWeatherMap
|
||||
],
|
||||
[],
|
||||
);
|
||||
|
||||
@@ -7148,6 +7148,20 @@
|
||||
"error_message": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Error Message"
|
||||
},
|
||||
"started_at": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Started At"
|
||||
},
|
||||
"ended_at": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Ended At"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
@@ -7254,14 +7268,20 @@
|
||||
},
|
||||
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
|
||||
"started_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Started At"
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Started At",
|
||||
"description": "When execution started running. Null if not yet started (QUEUED)."
|
||||
},
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Ended At"
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Ended At",
|
||||
"description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
|
||||
},
|
||||
"is_shared": {
|
||||
"type": "boolean",
|
||||
@@ -7295,8 +7315,6 @@
|
||||
"nodes_input_masks",
|
||||
"preset_id",
|
||||
"status",
|
||||
"started_at",
|
||||
"ended_at",
|
||||
"stats",
|
||||
"outputs"
|
||||
],
|
||||
@@ -7395,14 +7413,20 @@
|
||||
},
|
||||
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
|
||||
"started_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Started At"
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Started At",
|
||||
"description": "When execution started running. Null if not yet started (QUEUED)."
|
||||
},
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Ended At"
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Ended At",
|
||||
"description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
|
||||
},
|
||||
"is_shared": {
|
||||
"type": "boolean",
|
||||
@@ -7431,8 +7455,6 @@
|
||||
"nodes_input_masks",
|
||||
"preset_id",
|
||||
"status",
|
||||
"started_at",
|
||||
"ended_at",
|
||||
"stats"
|
||||
],
|
||||
"title": "GraphExecutionMeta"
|
||||
@@ -7479,14 +7501,20 @@
|
||||
},
|
||||
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
|
||||
"started_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Started At"
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Started At",
|
||||
"description": "When execution started running. Null if not yet started (QUEUED)."
|
||||
},
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Ended At"
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Ended At",
|
||||
"description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
|
||||
},
|
||||
"is_shared": {
|
||||
"type": "boolean",
|
||||
@@ -7525,8 +7553,6 @@
|
||||
"nodes_input_masks",
|
||||
"preset_id",
|
||||
"status",
|
||||
"started_at",
|
||||
"ended_at",
|
||||
"stats",
|
||||
"outputs",
|
||||
"node_executions"
|
||||
|
||||
@@ -50,7 +50,9 @@ export function ActivityItem({ execution }: Props) {
|
||||
execution.status === AgentExecutionStatus.QUEUED;
|
||||
|
||||
if (isActiveStatus) {
|
||||
const timeAgo = formatTimeAgo(execution.started_at.toString());
|
||||
const timeAgo = execution.started_at
|
||||
? formatTimeAgo(execution.started_at.toString())
|
||||
: "recently";
|
||||
const statusText =
|
||||
execution.status === AgentExecutionStatus.QUEUED ? "queued" : "running";
|
||||
return [
|
||||
@@ -61,7 +63,9 @@ export function ActivityItem({ execution }: Props) {
|
||||
// Handle all other statuses with time display
|
||||
const timeAgo = execution.ended_at
|
||||
? formatTimeAgo(execution.ended_at.toString())
|
||||
: formatTimeAgo(execution.started_at.toString());
|
||||
: execution.started_at
|
||||
? formatTimeAgo(execution.started_at.toString())
|
||||
: "recently";
|
||||
|
||||
let statusText = "ended";
|
||||
switch (execution.status) {
|
||||
|
||||
@@ -327,8 +327,8 @@ export type GraphExecutionMeta = {
|
||||
| "FAILED"
|
||||
| "INCOMPLETE"
|
||||
| "REVIEW";
|
||||
started_at: Date;
|
||||
ended_at: Date;
|
||||
started_at: Date | null;
|
||||
ended_at: Date | null;
|
||||
stats: {
|
||||
error: string | null;
|
||||
cost: number;
|
||||
|
||||
@@ -39,10 +39,9 @@ test.beforeEach(async ({ page }) => {
|
||||
await page.waitForTimeout(1000);
|
||||
|
||||
await page.goto("/library");
|
||||
await LibraryPage.clickFirstAgent(page);
|
||||
// Navigate to the specific agent we just created, not just the first one
|
||||
await LibraryPage.navigateToAgentByName(page, "Test Agent");
|
||||
await LibraryPage.waitForAgentPageLoad(page);
|
||||
const { getRole } = getSelectors(page);
|
||||
await isVisible(getRole("heading", "Test Agent"), 8000);
|
||||
});
|
||||
|
||||
test("shows badge with count when agent is running", async ({ page }) => {
|
||||
|
||||
@@ -450,45 +450,72 @@ export async function navigateToAgentByName(
|
||||
agentName: string,
|
||||
): Promise<void> {
|
||||
const agentCard = getAgentCards(page).filter({ hasText: agentName }).first();
|
||||
// Wait for the agent card to be visible before clicking
|
||||
// This handles async loading of agents after page navigation
|
||||
await agentCard.waitFor({ state: "visible", timeout: 15000 });
|
||||
await agentCard.click();
|
||||
}
|
||||
|
||||
export async function clickRunButton(page: Page): Promise<void> {
|
||||
const { getId } = getSelectors(page);
|
||||
|
||||
// Wait for page to stabilize and buttons to render
|
||||
// The NewAgentLibraryView shows either "Setup your task" (empty state)
|
||||
// or "New task" (with items) button
|
||||
const setupTaskButton = page.getByRole("button", {
|
||||
name: /Setup your task/i,
|
||||
});
|
||||
const newTaskButton = page.getByRole("button", { name: /New task/i });
|
||||
const runButton = getId("agent-run-button");
|
||||
const runAgainButton = getId("run-again-button");
|
||||
|
||||
// Use Promise.race with waitFor to wait for any of the buttons to appear
|
||||
// This handles the async rendering in CI environments
|
||||
try {
|
||||
await Promise.race([
|
||||
setupTaskButton.waitFor({ state: "visible", timeout: 15000 }),
|
||||
newTaskButton.waitFor({ state: "visible", timeout: 15000 }),
|
||||
runButton.waitFor({ state: "visible", timeout: 15000 }),
|
||||
runAgainButton.waitFor({ state: "visible", timeout: 15000 }),
|
||||
]);
|
||||
} catch {
|
||||
throw new Error(
|
||||
"Could not find run/start task button - none of the expected buttons appeared",
|
||||
);
|
||||
}
|
||||
|
||||
// Now check which button is visible and click it
|
||||
if (await setupTaskButton.isVisible()) {
|
||||
await setupTaskButton.click();
|
||||
const startTaskButton = page
|
||||
.getByRole("button", { name: /Start Task/i })
|
||||
.first();
|
||||
await startTaskButton.waitFor({ state: "visible", timeout: 10000 });
|
||||
await startTaskButton.click();
|
||||
return;
|
||||
}
|
||||
|
||||
const newTaskButton = page.getByRole("button", { name: /New task/i });
|
||||
if (await newTaskButton.isVisible()) {
|
||||
await newTaskButton.click();
|
||||
const startTaskButton = page
|
||||
.getByRole("button", { name: /Start Task/i })
|
||||
.first();
|
||||
await startTaskButton.waitFor({ state: "visible", timeout: 10000 });
|
||||
await startTaskButton.click();
|
||||
return;
|
||||
}
|
||||
|
||||
const runButton = getId("agent-run-button");
|
||||
const runAgainButton = getId("run-again-button");
|
||||
|
||||
if (await runButton.isVisible()) {
|
||||
await runButton.click();
|
||||
} else if (await runAgainButton.isVisible()) {
|
||||
await runAgainButton.click();
|
||||
} else {
|
||||
throw new Error("Could not find run/start task button");
|
||||
return;
|
||||
}
|
||||
|
||||
if (await runAgainButton.isVisible()) {
|
||||
await runAgainButton.click();
|
||||
return;
|
||||
}
|
||||
|
||||
throw new Error("Could not find run/start task button");
|
||||
}
|
||||
|
||||
export async function clickNewRunButton(page: Page): Promise<void> {
|
||||
|
||||
28
backend/blocks/video/__init__.py
Normal file
28
backend/blocks/video/__init__.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""Video editing blocks for AutoGPT Platform.
|
||||
|
||||
This module provides blocks for:
|
||||
- Downloading videos from URLs (YouTube, Vimeo, news sites, direct links)
|
||||
- Clipping/trimming video segments
|
||||
- Concatenating multiple videos
|
||||
- Adding text overlays
|
||||
- Adding AI-generated narration
|
||||
|
||||
Dependencies:
|
||||
- yt-dlp: For video downloading
|
||||
- moviepy: For video editing operations
|
||||
- requests: For API calls (narration block)
|
||||
"""
|
||||
|
||||
from .download import VideoDownloadBlock
|
||||
from .clip import VideoClipBlock
|
||||
from .concat import VideoConcatBlock
|
||||
from .text_overlay import VideoTextOverlayBlock
|
||||
from .narration import VideoNarrationBlock
|
||||
|
||||
__all__ = [
|
||||
"VideoClipBlock",
|
||||
"VideoConcatBlock",
|
||||
"VideoDownloadBlock",
|
||||
"VideoNarrationBlock",
|
||||
"VideoTextOverlayBlock",
|
||||
]
|
||||
93
backend/blocks/video/clip.py
Normal file
93
backend/blocks/video/clip.py
Normal file
@@ -0,0 +1,93 @@
|
||||
"""
|
||||
VideoClipBlock - Extract a segment from a video file
|
||||
"""
|
||||
import uuid
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
|
||||
|
||||
class VideoClipBlock(Block):
|
||||
"""Extract a time segment from a video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: str = SchemaField(
|
||||
description="Input video (URL, data URI, or file path)",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
start_time: float = SchemaField(
|
||||
description="Start time in seconds",
|
||||
ge=0.0
|
||||
)
|
||||
end_time: float = SchemaField(
|
||||
description="End time in seconds",
|
||||
ge=0.0
|
||||
)
|
||||
output_format: str = SchemaField(
|
||||
description="Output format",
|
||||
default="mp4",
|
||||
advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Clipped video file",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
duration: float = SchemaField(description="Clip duration in seconds")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b2c3d4e5-f6a7-8901-bcde-f23456789012",
|
||||
description="Extract a time segment from a video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={"video_in": "/tmp/test.mp4", "start_time": 0.0, "end_time": 10.0},
|
||||
test_output=[("video_out", str), ("duration", float)],
|
||||
test_mock={"_clip_video": lambda *args: ("/tmp/clip.mp4", 10.0)}
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Validate time range
|
||||
if input_data.end_time <= input_data.start_time:
|
||||
raise BlockExecutionError(
|
||||
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
)
|
||||
|
||||
try:
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
except ImportError as e:
|
||||
raise BlockExecutionError(
|
||||
message="moviepy is not installed. Please install it with: pip install moviepy",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
|
||||
clip = None
|
||||
subclip = None
|
||||
try:
|
||||
clip = VideoFileClip(input_data.video_in)
|
||||
subclip = clip.subclip(input_data.start_time, input_data.end_time)
|
||||
|
||||
output_path = f"/tmp/clip_{uuid.uuid4()}.{input_data.output_format}"
|
||||
subclip.write_videofile(output_path, logger=None)
|
||||
|
||||
yield "video_out", output_path
|
||||
yield "duration", subclip.duration
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to clip video: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
finally:
|
||||
if subclip:
|
||||
subclip.close()
|
||||
if clip:
|
||||
clip.close()
|
||||
123
backend/blocks/video/concat.py
Normal file
123
backend/blocks/video/concat.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""
|
||||
VideoConcatBlock - Concatenate multiple video clips into one
|
||||
"""
|
||||
import uuid
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
|
||||
|
||||
class VideoConcatBlock(Block):
|
||||
"""Merge multiple video clips into one continuous video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
videos: list[str] = SchemaField(
|
||||
description="List of video files to concatenate (in order)"
|
||||
)
|
||||
transition: str = SchemaField(
|
||||
description="Transition between clips",
|
||||
default="none",
|
||||
enum=["none", "crossfade", "fade_black"]
|
||||
)
|
||||
transition_duration: float = SchemaField(
|
||||
description="Transition duration in seconds",
|
||||
default=0.5,
|
||||
advanced=True
|
||||
)
|
||||
output_format: str = SchemaField(
|
||||
description="Output format",
|
||||
default="mp4",
|
||||
advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Concatenated video file",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
total_duration: float = SchemaField(description="Total duration in seconds")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c3d4e5f6-a7b8-9012-cdef-345678901234",
|
||||
description="Merge multiple video clips into one continuous video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={"videos": ["/tmp/a.mp4", "/tmp/b.mp4"]},
|
||||
test_output=[("video_out", str), ("total_duration", float)],
|
||||
test_mock={"_concat_videos": lambda *args: ("/tmp/concat.mp4", 20.0)}
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
from moviepy.editor import VideoFileClip, concatenate_videoclips
|
||||
except ImportError as e:
|
||||
raise BlockExecutionError(
|
||||
message="moviepy is not installed. Please install it with: pip install moviepy",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
|
||||
# Validate minimum clips
|
||||
if len(input_data.videos) < 2:
|
||||
raise BlockExecutionError(
|
||||
message="At least 2 videos are required for concatenation",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
)
|
||||
|
||||
clips = []
|
||||
faded_clips = []
|
||||
final = None
|
||||
try:
|
||||
# Load clips one by one to handle partial failures
|
||||
for v in input_data.videos:
|
||||
clips.append(VideoFileClip(v))
|
||||
|
||||
if input_data.transition == "crossfade":
|
||||
# Apply crossfade between clips using crossfadein/crossfadeout
|
||||
transition_dur = input_data.transition_duration
|
||||
for i, clip in enumerate(clips):
|
||||
if i > 0:
|
||||
clip = clip.crossfadein(transition_dur)
|
||||
if i < len(clips) - 1:
|
||||
clip = clip.crossfadeout(transition_dur)
|
||||
faded_clips.append(clip)
|
||||
final = concatenate_videoclips(
|
||||
faded_clips,
|
||||
method="compose",
|
||||
padding=-transition_dur
|
||||
)
|
||||
elif input_data.transition == "fade_black":
|
||||
# Fade to black between clips
|
||||
for clip in clips:
|
||||
faded = clip.fadein(input_data.transition_duration).fadeout(
|
||||
input_data.transition_duration
|
||||
)
|
||||
faded_clips.append(faded)
|
||||
final = concatenate_videoclips(faded_clips)
|
||||
else:
|
||||
final = concatenate_videoclips(clips)
|
||||
|
||||
output_path = f"/tmp/concat_{uuid.uuid4()}.{input_data.output_format}"
|
||||
final.write_videofile(output_path, logger=None)
|
||||
|
||||
yield "video_out", output_path
|
||||
yield "total_duration", final.duration
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to concatenate videos: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
finally:
|
||||
if final:
|
||||
final.close()
|
||||
for clip in faded_clips:
|
||||
clip.close()
|
||||
for clip in clips:
|
||||
clip.close()
|
||||
102
backend/blocks/video/download.py
Normal file
102
backend/blocks/video/download.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""
|
||||
VideoDownloadBlock - Download video from URL (YouTube, Vimeo, news sites, direct links)
|
||||
"""
|
||||
import uuid
|
||||
from typing import Literal
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
|
||||
|
||||
class VideoDownloadBlock(Block):
|
||||
"""Download video from URL using yt-dlp."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
url: str = SchemaField(
|
||||
description="URL of the video to download (YouTube, Vimeo, direct link, etc.)",
|
||||
placeholder="https://www.youtube.com/watch?v=..."
|
||||
)
|
||||
quality: Literal["best", "1080p", "720p", "480p", "audio_only"] = SchemaField(
|
||||
description="Video quality preference",
|
||||
default="720p"
|
||||
)
|
||||
output_format: Literal["mp4", "webm", "mkv"] = SchemaField(
|
||||
description="Output video format",
|
||||
default="mp4",
|
||||
advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_file: str = SchemaField(
|
||||
description="Path or data URI of downloaded video",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
duration: float = SchemaField(description="Video duration in seconds")
|
||||
title: str = SchemaField(description="Video title from source")
|
||||
source_url: str = SchemaField(description="Original source URL")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
description="Download video from URL (YouTube, Vimeo, news sites, direct links)",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={"url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ", "quality": "480p"},
|
||||
test_output=[("video_file", str), ("duration", float), ("title", str), ("source_url", str)],
|
||||
test_mock={"_download_video": lambda *args: ("/tmp/video.mp4", 212.0, "Test Video")}
|
||||
)
|
||||
|
||||
def _get_format_string(self, quality: str) -> str:
|
||||
formats = {
|
||||
"best": "bestvideo+bestaudio/best",
|
||||
"1080p": "bestvideo[height<=1080]+bestaudio/best[height<=1080]",
|
||||
"720p": "bestvideo[height<=720]+bestaudio/best[height<=720]",
|
||||
"480p": "bestvideo[height<=480]+bestaudio/best[height<=480]",
|
||||
"audio_only": "bestaudio/best"
|
||||
}
|
||||
return formats.get(quality, formats["720p"])
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
import yt_dlp
|
||||
except ImportError as e:
|
||||
raise BlockExecutionError(
|
||||
message="yt-dlp is not installed. Please install it with: pip install yt-dlp",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
|
||||
video_id = str(uuid.uuid4())[:8]
|
||||
output_template = f"/tmp/{video_id}.%(ext)s"
|
||||
|
||||
ydl_opts = {
|
||||
"format": self._get_format_string(input_data.quality),
|
||||
"outtmpl": output_template,
|
||||
"merge_output_format": input_data.output_format,
|
||||
"quiet": True,
|
||||
"no_warnings": True,
|
||||
}
|
||||
|
||||
try:
|
||||
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
||||
info = ydl.extract_info(input_data.url, download=True)
|
||||
video_path = ydl.prepare_filename(info)
|
||||
|
||||
# Handle format conversion in filename
|
||||
if not video_path.endswith(f".{input_data.output_format}"):
|
||||
video_path = video_path.rsplit(".", 1)[0] + f".{input_data.output_format}"
|
||||
|
||||
yield "video_file", video_path
|
||||
yield "duration", info.get("duration") or 0.0
|
||||
yield "title", info.get("title") or "Unknown"
|
||||
yield "source_url", input_data.url
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to download video: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
167
backend/blocks/video/narration.py
Normal file
167
backend/blocks/video/narration.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""
|
||||
VideoNarrationBlock - Generate AI voice narration and add to video
|
||||
"""
|
||||
import uuid
|
||||
from typing import Literal
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField, CredentialsMetaInput, APIKeyCredentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
|
||||
|
||||
class VideoNarrationBlock(Block):
|
||||
"""Generate AI narration and add to video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.ELEVENLABS], Literal["api_key"]
|
||||
] = SchemaField(
|
||||
description="ElevenLabs API key for voice synthesis"
|
||||
)
|
||||
video_in: str = SchemaField(
|
||||
description="Input video file",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
script: str = SchemaField(
|
||||
description="Narration script text"
|
||||
)
|
||||
voice_id: str = SchemaField(
|
||||
description="ElevenLabs voice ID",
|
||||
default="21m00Tcm4TlvDq8ikWAM" # Rachel
|
||||
)
|
||||
mix_mode: Literal["replace", "mix", "ducking"] = SchemaField(
|
||||
description="How to combine with original audio",
|
||||
default="ducking"
|
||||
)
|
||||
narration_volume: float = SchemaField(
|
||||
description="Narration volume (0.0 to 2.0)",
|
||||
default=1.0,
|
||||
ge=0.0,
|
||||
le=2.0,
|
||||
advanced=True
|
||||
)
|
||||
original_volume: float = SchemaField(
|
||||
description="Original audio volume when mixing (0.0 to 1.0)",
|
||||
default=0.3,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Video with narration",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
audio_file: str = SchemaField(
|
||||
description="Generated audio file",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="e5f6a7b8-c9d0-1234-ef56-789012345678",
|
||||
description="Generate AI narration and add to video",
|
||||
categories={BlockCategory.MULTIMEDIA, BlockCategory.AI},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"video_in": "/tmp/test.mp4",
|
||||
"script": "Hello world",
|
||||
"credentials": {"provider": "elevenlabs", "id": "test", "type": "api_key"}
|
||||
},
|
||||
test_output=[("video_out", str), ("audio_file", str)],
|
||||
test_mock={"_generate_narration": lambda *args: ("/tmp/narrated.mp4", "/tmp/audio.mp3")}
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
**kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
import requests
|
||||
from moviepy.editor import VideoFileClip, AudioFileClip, CompositeAudioClip
|
||||
except ImportError as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Missing dependency: {e}. Install moviepy and requests.",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
|
||||
video = None
|
||||
final = None
|
||||
narration = None
|
||||
try:
|
||||
# Generate narration via ElevenLabs
|
||||
response = requests.post(
|
||||
f"https://api.elevenlabs.io/v1/text-to-speech/{input_data.voice_id}",
|
||||
headers={
|
||||
"xi-api-key": credentials.api_key.get_secret_value(),
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
json={
|
||||
"text": input_data.script,
|
||||
"model_id": "eleven_monolingual_v1"
|
||||
},
|
||||
timeout=120
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
audio_path = f"/tmp/narration_{uuid.uuid4()}.mp3"
|
||||
with open(audio_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
# Combine with video
|
||||
video = VideoFileClip(input_data.video_in)
|
||||
narration = AudioFileClip(audio_path)
|
||||
narration = narration.volumex(input_data.narration_volume)
|
||||
|
||||
if input_data.mix_mode == "replace":
|
||||
final_audio = narration
|
||||
elif input_data.mix_mode == "mix":
|
||||
if video.audio:
|
||||
original = video.audio.volumex(input_data.original_volume)
|
||||
final_audio = CompositeAudioClip([original, narration])
|
||||
else:
|
||||
final_audio = narration
|
||||
else: # ducking - lower original volume more when narration plays
|
||||
if video.audio:
|
||||
# Apply stronger attenuation for ducking effect
|
||||
ducking_volume = input_data.original_volume * 0.3
|
||||
original = video.audio.volumex(ducking_volume)
|
||||
final_audio = CompositeAudioClip([original, narration])
|
||||
else:
|
||||
final_audio = narration
|
||||
|
||||
final = video.set_audio(final_audio)
|
||||
|
||||
output_path = f"/tmp/narrated_{uuid.uuid4()}.mp4"
|
||||
final.write_videofile(output_path, logger=None)
|
||||
|
||||
yield "video_out", output_path
|
||||
yield "audio_file", audio_path
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"ElevenLabs API error: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to add narration: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
finally:
|
||||
if narration:
|
||||
narration.close()
|
||||
if final:
|
||||
final.close()
|
||||
if video:
|
||||
video.close()
|
||||
149
backend/blocks/video/text_overlay.py
Normal file
149
backend/blocks/video/text_overlay.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""
|
||||
VideoTextOverlayBlock - Add text overlay to video
|
||||
"""
|
||||
import uuid
|
||||
from typing import Literal
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
|
||||
|
||||
class VideoTextOverlayBlock(Block):
|
||||
"""Add text overlay/caption to video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: str = SchemaField(
|
||||
description="Input video file",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
text: str = SchemaField(
|
||||
description="Text to overlay on video"
|
||||
)
|
||||
position: Literal[
|
||||
"top", "center", "bottom",
|
||||
"top-left", "top-right",
|
||||
"bottom-left", "bottom-right"
|
||||
] = SchemaField(
|
||||
description="Position of text on screen",
|
||||
default="bottom"
|
||||
)
|
||||
start_time: float | None = SchemaField(
|
||||
description="When to show text (seconds). None = entire video",
|
||||
default=None,
|
||||
advanced=True
|
||||
)
|
||||
end_time: float | None = SchemaField(
|
||||
description="When to hide text (seconds). None = until end",
|
||||
default=None,
|
||||
advanced=True
|
||||
)
|
||||
font_size: int = SchemaField(
|
||||
description="Font size",
|
||||
default=48,
|
||||
ge=12,
|
||||
le=200,
|
||||
advanced=True
|
||||
)
|
||||
font_color: str = SchemaField(
|
||||
description="Font color (hex or name)",
|
||||
default="white",
|
||||
advanced=True
|
||||
)
|
||||
bg_color: str | None = SchemaField(
|
||||
description="Background color behind text (None for transparent)",
|
||||
default=None,
|
||||
advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Video with text overlay",
|
||||
json_schema_extra={"format": "file"}
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d4e5f6a7-b8c9-0123-def4-567890123456",
|
||||
description="Add text overlay/caption to video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={"video_in": "/tmp/test.mp4", "text": "Hello World"},
|
||||
test_output=[("video_out", str)],
|
||||
test_mock={"_add_text": lambda *args: "/tmp/overlay.mp4"}
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip
|
||||
except ImportError as e:
|
||||
raise BlockExecutionError(
|
||||
message="moviepy is not installed. Please install it with: pip install moviepy",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
|
||||
# Validate time range if both are provided
|
||||
if (input_data.start_time is not None and
|
||||
input_data.end_time is not None and
|
||||
input_data.end_time <= input_data.start_time):
|
||||
raise BlockExecutionError(
|
||||
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
)
|
||||
|
||||
video = None
|
||||
final = None
|
||||
txt_clip = None
|
||||
try:
|
||||
video = VideoFileClip(input_data.video_in)
|
||||
|
||||
txt_clip = TextClip(
|
||||
input_data.text,
|
||||
fontsize=input_data.font_size,
|
||||
color=input_data.font_color,
|
||||
bg_color=input_data.bg_color,
|
||||
)
|
||||
|
||||
# Position mapping
|
||||
pos_map = {
|
||||
"top": ("center", "top"),
|
||||
"center": ("center", "center"),
|
||||
"bottom": ("center", "bottom"),
|
||||
"top-left": ("left", "top"),
|
||||
"top-right": ("right", "top"),
|
||||
"bottom-left": ("left", "bottom"),
|
||||
"bottom-right": ("right", "bottom"),
|
||||
}
|
||||
|
||||
txt_clip = txt_clip.set_position(pos_map[input_data.position])
|
||||
|
||||
# Set timing
|
||||
start = input_data.start_time or 0
|
||||
end = input_data.end_time or video.duration
|
||||
duration = max(0, end - start)
|
||||
txt_clip = txt_clip.set_start(start).set_end(end).set_duration(duration)
|
||||
|
||||
final = CompositeVideoClip([video, txt_clip])
|
||||
|
||||
output_path = f"/tmp/overlay_{uuid.uuid4()}.mp4"
|
||||
final.write_videofile(output_path, logger=None)
|
||||
|
||||
yield "video_out", output_path
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to add text overlay: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id)
|
||||
) from e
|
||||
finally:
|
||||
if txt_clip:
|
||||
txt_clip.close()
|
||||
if final:
|
||||
final.close()
|
||||
if video:
|
||||
video.close()
|
||||
Reference in New Issue
Block a user