diff --git a/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py b/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py
index 00f0bda884..45912dae67 100644
--- a/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py
+++ b/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py
@@ -28,6 +28,7 @@ from backend.executor.manager import get_db_async_client
from backend.util.settings import Settings
logger = logging.getLogger(__name__)
+settings = Settings()
class ExecutionAnalyticsRequest(BaseModel):
@@ -63,6 +64,8 @@ class ExecutionAnalyticsResult(BaseModel):
score: Optional[float]
status: str # "success", "failed", "skipped"
error_message: Optional[str] = None
+ started_at: Optional[datetime] = None
+ ended_at: Optional[datetime] = None
class ExecutionAnalyticsResponse(BaseModel):
@@ -224,11 +227,6 @@ async def generate_execution_analytics(
)
try:
- # Validate model configuration
- settings = Settings()
- if not settings.secrets.openai_internal_api_key:
- raise HTTPException(status_code=500, detail="OpenAI API key not configured")
-
# Get database client
db_client = get_db_async_client()
@@ -320,6 +318,8 @@ async def generate_execution_analytics(
),
status="skipped",
error_message=None, # Not an error - just already processed
+ started_at=execution.started_at,
+ ended_at=execution.ended_at,
)
)
@@ -349,6 +349,9 @@ async def _process_batch(
) -> list[ExecutionAnalyticsResult]:
"""Process a batch of executions concurrently."""
+ if not settings.secrets.openai_internal_api_key:
+ raise HTTPException(status_code=500, detail="OpenAI API key not configured")
+
async def process_single_execution(execution) -> ExecutionAnalyticsResult:
try:
# Generate activity status and score using the specified model
@@ -387,6 +390,8 @@ async def _process_batch(
score=None,
status="skipped",
error_message="Activity generation returned None",
+ started_at=execution.started_at,
+ ended_at=execution.ended_at,
)
# Update the execution stats
@@ -416,6 +421,8 @@ async def _process_batch(
summary_text=activity_response["activity_status"],
score=activity_response["correctness_score"],
status="success",
+ started_at=execution.started_at,
+ ended_at=execution.ended_at,
)
except Exception as e:
@@ -429,6 +436,8 @@ async def _process_batch(
score=None,
status="failed",
error_message=str(e),
+ started_at=execution.started_at,
+ ended_at=execution.ended_at,
)
# Process all executions in the batch concurrently
diff --git a/autogpt_platform/backend/backend/data/analytics.py b/autogpt_platform/backend/backend/data/analytics.py
index 7419539026..2e148cd10f 100644
--- a/autogpt_platform/backend/backend/data/analytics.py
+++ b/autogpt_platform/backend/backend/data/analytics.py
@@ -104,7 +104,7 @@ async def get_accuracy_trends_and_alerts(
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
{user_filter}
GROUP BY DATE(e."createdAt")
- HAVING COUNT(*) >= 3 -- Need at least 3 executions per day
+ HAVING COUNT(*) >= 1 -- Include all days with at least 1 execution
),
trends AS (
SELECT
diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py
index 2759dfe179..a6797032fd 100644
--- a/autogpt_platform/backend/backend/data/execution.py
+++ b/autogpt_platform/backend/backend/data/execution.py
@@ -153,8 +153,14 @@ class GraphExecutionMeta(BaseDbModel):
nodes_input_masks: Optional[dict[str, BlockInput]]
preset_id: Optional[str]
status: ExecutionStatus
- started_at: datetime
- ended_at: datetime
+ started_at: Optional[datetime] = Field(
+ None,
+ description="When execution started running. Null if not yet started (QUEUED).",
+ )
+ ended_at: Optional[datetime] = Field(
+ None,
+ description="When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW).",
+ )
is_shared: bool = False
share_token: Optional[str] = None
@@ -229,10 +235,8 @@ class GraphExecutionMeta(BaseDbModel):
@staticmethod
def from_db(_graph_exec: AgentGraphExecution):
- now = datetime.now(timezone.utc)
- # TODO: make started_at and ended_at optional
- start_time = _graph_exec.startedAt or _graph_exec.createdAt
- end_time = _graph_exec.updatedAt or now
+ start_time = _graph_exec.startedAt
+ end_time = _graph_exec.endedAt
try:
stats = GraphExecutionStats.model_validate(_graph_exec.stats)
@@ -902,6 +906,14 @@ async def update_graph_execution_stats(
if status:
update_data["executionStatus"] = status
+ # Set endedAt when execution reaches a terminal status
+ terminal_statuses = [
+ ExecutionStatus.COMPLETED,
+ ExecutionStatus.FAILED,
+ ExecutionStatus.TERMINATED,
+ ]
+ if status in terminal_statuses:
+ update_data["endedAt"] = datetime.now(tz=timezone.utc)
where_clause: AgentGraphExecutionWhereInput = {"id": graph_exec_id}
diff --git a/autogpt_platform/backend/backend/monitoring/late_execution_monitor.py b/autogpt_platform/backend/backend/monitoring/late_execution_monitor.py
index 1e0c99cac3..fd3d31cb47 100644
--- a/autogpt_platform/backend/backend/monitoring/late_execution_monitor.py
+++ b/autogpt_platform/backend/backend/monitoring/late_execution_monitor.py
@@ -60,8 +60,10 @@ class LateExecutionMonitor:
if not all_late_executions:
return "No late executions detected."
- # Sort by created time (oldest first)
- all_late_executions.sort(key=lambda x: x.started_at)
+ # Sort by started time (oldest first), with None values (unstarted) first
+ all_late_executions.sort(
+ key=lambda x: x.started_at or datetime.min.replace(tzinfo=timezone.utc)
+ )
num_total_late = len(all_late_executions)
num_queued = len(queued_late_executions)
@@ -74,7 +76,7 @@ class LateExecutionMonitor:
was_truncated = num_total_late > tuncate_size
late_execution_details = [
- f"* `Execution ID: {exec.id}, Graph ID: {exec.graph_id}v{exec.graph_version}, User ID: {exec.user_id}, Status: {exec.status}, Created At: {exec.started_at.isoformat()}`"
+ f"* `Execution ID: {exec.id}, Graph ID: {exec.graph_id}v{exec.graph_version}, User ID: {exec.user_id}, Status: {exec.status}, Started At: {exec.started_at.isoformat() if exec.started_at else 'Not started'}`"
for exec in truncated_executions
]
diff --git a/autogpt_platform/backend/migrations/20260113170839_add_ended_at_to_agent_graph_execution/migration.sql b/autogpt_platform/backend/migrations/20260113170839_add_ended_at_to_agent_graph_execution/migration.sql
new file mode 100644
index 0000000000..95fa3a573a
--- /dev/null
+++ b/autogpt_platform/backend/migrations/20260113170839_add_ended_at_to_agent_graph_execution/migration.sql
@@ -0,0 +1,8 @@
+-- AlterTable
+ALTER TABLE "AgentGraphExecution" ADD COLUMN "endedAt" TIMESTAMP(3);
+
+-- Set endedAt to updatedAt for existing records with terminal status only
+UPDATE "AgentGraphExecution"
+SET "endedAt" = "updatedAt"
+WHERE "endedAt" IS NULL
+ AND "executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED');
diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma
index b7dc98524a..4a2a7b583a 100644
--- a/autogpt_platform/backend/schema.prisma
+++ b/autogpt_platform/backend/schema.prisma
@@ -450,6 +450,7 @@ model AgentGraphExecution {
createdAt DateTime @default(now())
updatedAt DateTime? @updatedAt
startedAt DateTime?
+ endedAt DateTime?
isDeleted Boolean @default(false)
diff --git a/autogpt_platform/frontend/src/app/(platform)/admin/execution-analytics/components/AnalyticsResultsTable.tsx b/autogpt_platform/frontend/src/app/(platform)/admin/execution-analytics/components/AnalyticsResultsTable.tsx
index 56c52e2ceb..7ab251efb1 100644
--- a/autogpt_platform/frontend/src/app/(platform)/admin/execution-analytics/components/AnalyticsResultsTable.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/admin/execution-analytics/components/AnalyticsResultsTable.tsx
@@ -51,6 +51,8 @@ export function AnalyticsResultsTable({ results }: Props) {
"Execution ID",
"Status",
"Score",
+ "Started At",
+ "Ended At",
"Summary Text",
"Error Message",
];
@@ -62,6 +64,8 @@ export function AnalyticsResultsTable({ results }: Props) {
result.exec_id,
result.status,
result.score?.toString() || "",
+ result.started_at ? new Date(result.started_at).toLocaleString() : "",
+ result.ended_at ? new Date(result.ended_at).toLocaleString() : "",
`"${(result.summary_text || "").replace(/"/g, '""')}"`, // Escape quotes in summary
`"${(result.error_message || "").replace(/"/g, '""')}"`, // Escape quotes in error
]);
@@ -248,15 +252,13 @@ export function AnalyticsResultsTable({ results }: Props) {
)}
- {(result.summary_text || result.error_message) && (
-
- )}
+
|
@@ -264,6 +266,44 @@ export function AnalyticsResultsTable({ results }: Props) {
+ {/* Timestamps section */}
+
+
+
+ Started At:
+
+
+ {result.started_at
+ ? new Date(
+ result.started_at,
+ ).toLocaleString()
+ : "—"}
+
+
+
+
+ Ended At:
+
+
+ {result.ended_at
+ ? new Date(result.ended_at).toLocaleString()
+ : "—"}
+
+
+
+
{result.summary_text && (
- Execution Accuracy Trends
+
+ Execution Accuracy Trends
+
+
+ Chart Filters (matches monitoring system):
+
+
+ - Only days with ≥1 execution with correctness score
+ - Last 30 days
+ - Averages calculated from scored executions only
+
+
+
{/* Alert Section */}
{trendsData.alert && (
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx
index dff1b5a7bb..54cc07878d 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx
@@ -173,8 +173,9 @@ export function OldAgentLibraryView() {
if (agentRuns.length > 0) {
// select latest run
const latestRun = agentRuns.reduce((latest, current) => {
- if (latest.started_at && !current.started_at) return current;
- else if (!latest.started_at) return latest;
+ if (!latest.started_at && !current.started_at) return latest;
+ if (!latest.started_at) return current;
+ if (!current.started_at) return latest;
return latest.started_at > current.started_at ? latest : current;
}, agentRuns[0]);
selectRun(latestRun.id as GraphExecutionID);
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx
index 5931404846..49d93b4319 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx
@@ -184,9 +184,11 @@ export function AgentRunsSelectorList({
))}
{agentPresets.length > 0 && }
{agentRuns
- .toSorted(
- (a, b) => b.started_at.getTime() - a.started_at.getTime(),
- )
+ .toSorted((a, b) => {
+ const aTime = a.started_at?.getTime() ?? 0;
+ const bTime = b.started_at?.getTime() ?? 0;
+ return bTime - aTime;
+ })
.map((run) => (
onSelectRun(run.id)}
onDelete={() => doDeleteRun(run as GraphExecutionMeta)}
diff --git a/autogpt_platform/frontend/src/app/(platform)/monitoring/components/AgentFlowList.tsx b/autogpt_platform/frontend/src/app/(platform)/monitoring/components/AgentFlowList.tsx
index d4cb6fc649..badba61bf1 100644
--- a/autogpt_platform/frontend/src/app/(platform)/monitoring/components/AgentFlowList.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/monitoring/components/AgentFlowList.tsx
@@ -120,9 +120,11 @@ export const AgentFlowList = ({
lastRun =
runCount == 0
? null
- : _flowRuns.reduce((a, c) =>
- a.started_at > c.started_at ? a : c,
- );
+ : _flowRuns.reduce((a, c) => {
+ const aTime = a.started_at?.getTime() ?? 0;
+ const cTime = c.started_at?.getTime() ?? 0;
+ return aTime > cTime ? a : c;
+ });
}
return { flow, runCount, lastRun };
})
@@ -130,10 +132,9 @@ export const AgentFlowList = ({
if (!a.lastRun && !b.lastRun) return 0;
if (!a.lastRun) return 1;
if (!b.lastRun) return -1;
- return (
- b.lastRun.started_at.getTime() -
- a.lastRun.started_at.getTime()
- );
+ const bTime = b.lastRun.started_at?.getTime() ?? 0;
+ const aTime = a.lastRun.started_at?.getTime() ?? 0;
+ return bTime - aTime;
})
.map(({ flow, runCount, lastRun }) => (
fr.started_at.getTime() > statsSinceTimestamp)
+ ? executions.filter(
+ (fr) =>
+ fr.started_at && fr.started_at.getTime() > statsSinceTimestamp,
+ )
: executions;
return (
diff --git a/autogpt_platform/frontend/src/app/(platform)/monitoring/components/FlowRunsTimeline.tsx b/autogpt_platform/frontend/src/app/(platform)/monitoring/components/FlowRunsTimeline.tsx
index 02050627f5..ee01d67234 100644
--- a/autogpt_platform/frontend/src/app/(platform)/monitoring/components/FlowRunsTimeline.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/monitoring/components/FlowRunsTimeline.tsx
@@ -98,40 +98,43 @@ export const FlowRunsTimeline = ({
e.graph_id == flow.graph_id)
+ .filter((e) => e.graph_id == flow.graph_id && e.started_at)
.map((e) => ({
...e,
time:
- e.started_at.getTime() + (e.stats?.node_exec_time ?? 0) * 1000,
+ (e.started_at?.getTime() ?? 0) +
+ (e.stats?.node_exec_time ?? 0) * 1000,
_duration: e.stats?.node_exec_time ?? 0,
}))}
name={flow.name}
fill={`hsl(${(hashString(flow.id) * 137.5) % 360}, 70%, 50%)`}
/>
))}
- {executions.map((execution) => (
-
- ))}
+ {executions
+ .filter((e) => e.started_at && e.ended_at)
+ .map((execution) => (
+
+ ))}
}
wrapperStyle={{
diff --git a/autogpt_platform/frontend/src/app/(platform)/monitoring/page.tsx b/autogpt_platform/frontend/src/app/(platform)/monitoring/page.tsx
index 3b5aa46839..4c356dabef 100644
--- a/autogpt_platform/frontend/src/app/(platform)/monitoring/page.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/monitoring/page.tsx
@@ -98,7 +98,11 @@ const Monitor = () => {
...(selectedFlow
? executions.filter((v) => v.graph_id == selectedFlow.graph_id)
: executions),
- ].sort((a, b) => b.started_at.getTime() - a.started_at.getTime())}
+ ].sort((a, b) => {
+ const aTime = a.started_at?.getTime() ?? 0;
+ const bTime = b.started_at?.getTime() ?? 0;
+ return bTime - aTime;
+ })}
selectedRun={selectedRun}
onSelectRun={(r) => setSelectedRun(r.id == selectedRun?.id ? null : r)}
/>
diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json
index 776ba2321a..fc4e737651 100644
--- a/autogpt_platform/frontend/src/app/api/openapi.json
+++ b/autogpt_platform/frontend/src/app/api/openapi.json
@@ -7148,6 +7148,20 @@
"error_message": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Error Message"
+ },
+ "started_at": {
+ "anyOf": [
+ { "type": "string", "format": "date-time" },
+ { "type": "null" }
+ ],
+ "title": "Started At"
+ },
+ "ended_at": {
+ "anyOf": [
+ { "type": "string", "format": "date-time" },
+ { "type": "null" }
+ ],
+ "title": "Ended At"
}
},
"type": "object",
@@ -7254,14 +7268,20 @@
},
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
"started_at": {
- "type": "string",
- "format": "date-time",
- "title": "Started At"
+ "anyOf": [
+ { "type": "string", "format": "date-time" },
+ { "type": "null" }
+ ],
+ "title": "Started At",
+ "description": "When execution started running. Null if not yet started (QUEUED)."
},
"ended_at": {
- "type": "string",
- "format": "date-time",
- "title": "Ended At"
+ "anyOf": [
+ { "type": "string", "format": "date-time" },
+ { "type": "null" }
+ ],
+ "title": "Ended At",
+ "description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
},
"is_shared": {
"type": "boolean",
@@ -7295,8 +7315,6 @@
"nodes_input_masks",
"preset_id",
"status",
- "started_at",
- "ended_at",
"stats",
"outputs"
],
@@ -7395,14 +7413,20 @@
},
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
"started_at": {
- "type": "string",
- "format": "date-time",
- "title": "Started At"
+ "anyOf": [
+ { "type": "string", "format": "date-time" },
+ { "type": "null" }
+ ],
+ "title": "Started At",
+ "description": "When execution started running. Null if not yet started (QUEUED)."
},
"ended_at": {
- "type": "string",
- "format": "date-time",
- "title": "Ended At"
+ "anyOf": [
+ { "type": "string", "format": "date-time" },
+ { "type": "null" }
+ ],
+ "title": "Ended At",
+ "description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
},
"is_shared": {
"type": "boolean",
@@ -7431,8 +7455,6 @@
"nodes_input_masks",
"preset_id",
"status",
- "started_at",
- "ended_at",
"stats"
],
"title": "GraphExecutionMeta"
@@ -7479,14 +7501,20 @@
},
"status": { "$ref": "#/components/schemas/AgentExecutionStatus" },
"started_at": {
- "type": "string",
- "format": "date-time",
- "title": "Started At"
+ "anyOf": [
+ { "type": "string", "format": "date-time" },
+ { "type": "null" }
+ ],
+ "title": "Started At",
+ "description": "When execution started running. Null if not yet started (QUEUED)."
},
"ended_at": {
- "type": "string",
- "format": "date-time",
- "title": "Ended At"
+ "anyOf": [
+ { "type": "string", "format": "date-time" },
+ { "type": "null" }
+ ],
+ "title": "Ended At",
+ "description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)."
},
"is_shared": {
"type": "boolean",
@@ -7525,8 +7553,6 @@
"nodes_input_masks",
"preset_id",
"status",
- "started_at",
- "ended_at",
"stats",
"outputs",
"node_executions"
diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx
index 1ebca96b05..17576e1665 100644
--- a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx
+++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx
@@ -50,7 +50,9 @@ export function ActivityItem({ execution }: Props) {
execution.status === AgentExecutionStatus.QUEUED;
if (isActiveStatus) {
- const timeAgo = formatTimeAgo(execution.started_at.toString());
+ const timeAgo = execution.started_at
+ ? formatTimeAgo(execution.started_at.toString())
+ : "recently";
const statusText =
execution.status === AgentExecutionStatus.QUEUED ? "queued" : "running";
return [
@@ -61,7 +63,9 @@ export function ActivityItem({ execution }: Props) {
// Handle all other statuses with time display
const timeAgo = execution.ended_at
? formatTimeAgo(execution.ended_at.toString())
- : formatTimeAgo(execution.started_at.toString());
+ : execution.started_at
+ ? formatTimeAgo(execution.started_at.toString())
+ : "recently";
let statusText = "ended";
switch (execution.status) {
diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
index 8bc39a390a..82c03bc9f1 100644
--- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
+++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
@@ -327,8 +327,8 @@ export type GraphExecutionMeta = {
| "FAILED"
| "INCOMPLETE"
| "REVIEW";
- started_at: Date;
- ended_at: Date;
+ started_at: Date | null;
+ ended_at: Date | null;
stats: {
error: string | null;
cost: number;
|