From f31c16004374151db4795ffa2ed58d5615fb0824 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 16 Jan 2026 15:44:24 -0600 Subject: [PATCH] feat(platform): add endedAt field and fix execution analytics timestamps (#11759) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary This PR adds proper execution end time tracking and fixes timestamp handling throughout the execution analytics system. ### Key Changes 1. **Added `endedAt` field to database schema** - Executions now have a dedicated field for tracking when they finish 2. **Fixed timestamp nullable handling** - `started_at` and `ended_at` are now properly nullable in types 3. **Fixed chart aggregation** - Reduced threshold from ≥3 to ≥1 executions per day 4. **Improved timestamp display** - Moved timestamps to expandable details section in analytics table 5. **Fixed nullable timestamp bugs** - Updated all frontend code to handle null timestamps correctly ## Problem Statement ### Issue 1: Missing Execution End Times Previously, executions used `updatedAt` (last DB update) as a proxy for "end time". This broke when adding correctness scores retroactively - the end time would change to whenever the score was added, not when the execution actually finished. ### Issue 2: Chart Shows Only One Data Point The accuracy trends chart showed only one data point despite having executions across multiple days. Root cause: aggregation required ≥3 executions per day. ### Issue 3: Incorrect Type Definitions Manually maintained types defined `started_at` and `ended_at` as non-nullable `Date`, contradicting reality where QUEUED executions haven't started yet. ## Solution ### Database Schema (`schema.prisma`) ```prisma model AgentGraphExecution { // ... startedAt DateTime? endedAt DateTime? // NEW FIELD // ... } ``` ### Execution Lifecycle - **QUEUED**: `startedAt = null`, `endedAt = null` (not started) - **RUNNING**: `startedAt = set`, `endedAt = null` (in progress) - **COMPLETED/FAILED/TERMINATED**: `startedAt = set`, `endedAt = set` (finished) ### Migration Strategy ```sql -- Add endedAt column ALTER TABLE "AgentGraphExecution" ADD COLUMN "endedAt" TIMESTAMP(3); -- Backfill ONLY terminal executions (prevents marking RUNNING executions as ended) UPDATE "AgentGraphExecution" SET "endedAt" = "updatedAt" WHERE "endedAt" IS NULL AND "executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED'); ``` ## Changes by Component ### Backend **`schema.prisma`** - Added `endedAt` field to `AgentGraphExecution` **`execution.py`** - Made `started_at` and `ended_at` optional with Field descriptions - Updated `from_db()` to use `endedAt` instead of `updatedAt` - `update_graph_execution_stats()` sets `endedAt` when status becomes terminal **`execution_analytics_routes.py`** - Removed `created_at`/`updated_at` from `ExecutionAnalyticsResult` (DB metadata, not execution data) - Kept only `started_at`/`ended_at` (actual execution runtime) - Made settings global (avoid recreation) - Moved OpenAI key validation to `_process_batch` (only check when LLM actually runs) **`analytics.py`** - Fixed aggregation: `COUNT(*) >= 1` (was 3) - include all days with ≥1 execution - Uses `createdAt` for chart grouping (when execution was queued) **`late_execution_monitor.py`** - Handle optional `started_at` with fallback to `datetime.min` for sorting - Display "Not started" when `started_at` is null ### Frontend **Type Definitions** - Fixed manually maintained `types.ts`: `started_at: Date | null` (was non-nullable) - Generated types were already correct **Analytics Components** - `AnalyticsResultsTable.tsx`: Show only `started_at`/`ended_at` in 2-column expandable grid - `ExecutionAnalyticsForm.tsx`: Added filter explanation UI **Monitoring Components** - Fixed null handling bugs: - `OldAgentLibraryView.tsx`: Handle null in reduce function - `agent-runs-selector-list.tsx`: Safe sorting with `?.getTime() ?? 0` - `AgentFlowList.tsx`: Filter/sort with null checks - `FlowRunsStatus.tsx`: Filter null timestamps - `FlowRunsTimeline.tsx`: Filter executions with null timestamps before rendering - `monitoring/page.tsx`: Safe sorting - `ActivityItem.tsx`: Fallback to "recently" for null timestamps ## Benefits ✅ **Accurate End Times**: `endedAt` is frozen when execution finishes, not updated later ✅ **Type Safety**: Nullable types match reality, exposing real bugs ✅ **Better UX**: Chart shows all days with data (not just days with ≥3 executions) ✅ **Bug Fixes**: 7+ frontend components now handle null timestamps correctly ✅ **Documentation**: Field descriptions explain when timestamps are null ## Testing ### Backend ```bash cd autogpt_platform/backend poetry run format # ✅ All checks passed poetry run lint # ✅ All checks passed ``` ### Frontend ```bash cd autogpt_platform/frontend pnpm format # ✅ All checks passed pnpm lint # ✅ All checks passed pnpm types # ✅ All type errors fixed ``` ### Test Data Generation Created script to generate 35 test executions across 7 days with correctness scores: ```bash poetry run python scripts/generate_test_analytics_data.py ``` ## Migration Notes ⚠️ **Important**: The migration only backfills `endedAt` for executions with terminal status (COMPLETED, FAILED, TERMINATED). Active executions (QUEUED, RUNNING) correctly keep `endedAt = null`. ## Breaking Changes None - this is backward compatible: - `endedAt` is nullable, existing code that doesn't use it is unaffected - Frontend already used generated types which were correct - Migration safely backfills historical data --- > [!NOTE] > Introduces explicit execution end-time tracking and normalizes timestamp handling across backend and frontend. > > - Adds `endedAt` to `AgentGraphExecution` (schema + migration); backfills terminal executions; sets `endedAt` on terminal status updates > - Makes `GraphExecutionMeta.started_at/ended_at` optional; updates `from_db()` to use DB `endedAt`; exposes timestamps in `ExecutionAnalyticsResult` > - Moves OpenAI key validation into batch processing; instantiates `Settings` once > - Accuracy trends: reduce daily aggregation threshold to `>= 1`; optional historical series > - Monitoring/analytics UI: results table shows/export `started_at`/`ended_at`; adds chart filter explainer > - Frontend null-safety: update types (`Date | null`) and fix sorting/filtering/rendering for nullable timestamps across monitoring and library views > - Late execution monitor: safe sorting/display when `started_at` is null > - OpenAPI specs updated for new/nullable fields > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit 1d987ca6e59c96520ceff4b93846295ad6d42624. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). --------- Co-authored-by: Nicholas Tindle --- .../admin/execution_analytics_routes.py | 19 +++-- .../backend/backend/data/analytics.py | 2 +- .../backend/backend/data/execution.py | 24 ++++-- .../monitoring/late_execution_monitor.py | 8 +- .../migration.sql | 8 ++ autogpt_platform/backend/schema.prisma | 1 + .../components/AnalyticsResultsTable.tsx | 58 ++++++++++++--- .../components/ExecutionAnalyticsForm.tsx | 14 +++- .../OldAgentLibraryView.tsx | 5 +- .../components/agent-runs-selector-list.tsx | 10 ++- .../monitoring/components/AgentFlowList.tsx | 15 ++-- .../monitoring/components/FlowRunsStatus.tsx | 5 +- .../components/FlowRunsTimeline.tsx | 53 ++++++------- .../src/app/(platform)/monitoring/page.tsx | 6 +- .../frontend/src/app/api/openapi.json | 74 +++++++++++++------ .../components/ActivityItem.tsx | 8 +- .../src/lib/autogpt-server-api/types.ts | 4 +- 17 files changed, 221 insertions(+), 93 deletions(-) create mode 100644 autogpt_platform/backend/migrations/20260113170839_add_ended_at_to_agent_graph_execution/migration.sql diff --git a/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py b/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py index 00f0bda884..45912dae67 100644 --- a/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py +++ b/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py @@ -28,6 +28,7 @@ from backend.executor.manager import get_db_async_client from backend.util.settings import Settings logger = logging.getLogger(__name__) +settings = Settings() class ExecutionAnalyticsRequest(BaseModel): @@ -63,6 +64,8 @@ class ExecutionAnalyticsResult(BaseModel): score: Optional[float] status: str # "success", "failed", "skipped" error_message: Optional[str] = None + started_at: Optional[datetime] = None + ended_at: Optional[datetime] = None class ExecutionAnalyticsResponse(BaseModel): @@ -224,11 +227,6 @@ async def generate_execution_analytics( ) try: - # Validate model configuration - settings = Settings() - if not settings.secrets.openai_internal_api_key: - raise HTTPException(status_code=500, detail="OpenAI API key not configured") - # Get database client db_client = get_db_async_client() @@ -320,6 +318,8 @@ async def generate_execution_analytics( ), status="skipped", error_message=None, # Not an error - just already processed + started_at=execution.started_at, + ended_at=execution.ended_at, ) ) @@ -349,6 +349,9 @@ async def _process_batch( ) -> list[ExecutionAnalyticsResult]: """Process a batch of executions concurrently.""" + if not settings.secrets.openai_internal_api_key: + raise HTTPException(status_code=500, detail="OpenAI API key not configured") + async def process_single_execution(execution) -> ExecutionAnalyticsResult: try: # Generate activity status and score using the specified model @@ -387,6 +390,8 @@ async def _process_batch( score=None, status="skipped", error_message="Activity generation returned None", + started_at=execution.started_at, + ended_at=execution.ended_at, ) # Update the execution stats @@ -416,6 +421,8 @@ async def _process_batch( summary_text=activity_response["activity_status"], score=activity_response["correctness_score"], status="success", + started_at=execution.started_at, + ended_at=execution.ended_at, ) except Exception as e: @@ -429,6 +436,8 @@ async def _process_batch( score=None, status="failed", error_message=str(e), + started_at=execution.started_at, + ended_at=execution.ended_at, ) # Process all executions in the batch concurrently diff --git a/autogpt_platform/backend/backend/data/analytics.py b/autogpt_platform/backend/backend/data/analytics.py index 7419539026..2e148cd10f 100644 --- a/autogpt_platform/backend/backend/data/analytics.py +++ b/autogpt_platform/backend/backend/data/analytics.py @@ -104,7 +104,7 @@ async def get_accuracy_trends_and_alerts( AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED') {user_filter} GROUP BY DATE(e."createdAt") - HAVING COUNT(*) >= 3 -- Need at least 3 executions per day + HAVING COUNT(*) >= 1 -- Include all days with at least 1 execution ), trends AS ( SELECT diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index 2759dfe179..a6797032fd 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -153,8 +153,14 @@ class GraphExecutionMeta(BaseDbModel): nodes_input_masks: Optional[dict[str, BlockInput]] preset_id: Optional[str] status: ExecutionStatus - started_at: datetime - ended_at: datetime + started_at: Optional[datetime] = Field( + None, + description="When execution started running. Null if not yet started (QUEUED).", + ) + ended_at: Optional[datetime] = Field( + None, + description="When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW).", + ) is_shared: bool = False share_token: Optional[str] = None @@ -229,10 +235,8 @@ class GraphExecutionMeta(BaseDbModel): @staticmethod def from_db(_graph_exec: AgentGraphExecution): - now = datetime.now(timezone.utc) - # TODO: make started_at and ended_at optional - start_time = _graph_exec.startedAt or _graph_exec.createdAt - end_time = _graph_exec.updatedAt or now + start_time = _graph_exec.startedAt + end_time = _graph_exec.endedAt try: stats = GraphExecutionStats.model_validate(_graph_exec.stats) @@ -902,6 +906,14 @@ async def update_graph_execution_stats( if status: update_data["executionStatus"] = status + # Set endedAt when execution reaches a terminal status + terminal_statuses = [ + ExecutionStatus.COMPLETED, + ExecutionStatus.FAILED, + ExecutionStatus.TERMINATED, + ] + if status in terminal_statuses: + update_data["endedAt"] = datetime.now(tz=timezone.utc) where_clause: AgentGraphExecutionWhereInput = {"id": graph_exec_id} diff --git a/autogpt_platform/backend/backend/monitoring/late_execution_monitor.py b/autogpt_platform/backend/backend/monitoring/late_execution_monitor.py index 1e0c99cac3..fd3d31cb47 100644 --- a/autogpt_platform/backend/backend/monitoring/late_execution_monitor.py +++ b/autogpt_platform/backend/backend/monitoring/late_execution_monitor.py @@ -60,8 +60,10 @@ class LateExecutionMonitor: if not all_late_executions: return "No late executions detected." - # Sort by created time (oldest first) - all_late_executions.sort(key=lambda x: x.started_at) + # Sort by started time (oldest first), with None values (unstarted) first + all_late_executions.sort( + key=lambda x: x.started_at or datetime.min.replace(tzinfo=timezone.utc) + ) num_total_late = len(all_late_executions) num_queued = len(queued_late_executions) @@ -74,7 +76,7 @@ class LateExecutionMonitor: was_truncated = num_total_late > tuncate_size late_execution_details = [ - f"* `Execution ID: {exec.id}, Graph ID: {exec.graph_id}v{exec.graph_version}, User ID: {exec.user_id}, Status: {exec.status}, Created At: {exec.started_at.isoformat()}`" + f"* `Execution ID: {exec.id}, Graph ID: {exec.graph_id}v{exec.graph_version}, User ID: {exec.user_id}, Status: {exec.status}, Started At: {exec.started_at.isoformat() if exec.started_at else 'Not started'}`" for exec in truncated_executions ] diff --git a/autogpt_platform/backend/migrations/20260113170839_add_ended_at_to_agent_graph_execution/migration.sql b/autogpt_platform/backend/migrations/20260113170839_add_ended_at_to_agent_graph_execution/migration.sql new file mode 100644 index 0000000000..95fa3a573a --- /dev/null +++ b/autogpt_platform/backend/migrations/20260113170839_add_ended_at_to_agent_graph_execution/migration.sql @@ -0,0 +1,8 @@ +-- AlterTable +ALTER TABLE "AgentGraphExecution" ADD COLUMN "endedAt" TIMESTAMP(3); + +-- Set endedAt to updatedAt for existing records with terminal status only +UPDATE "AgentGraphExecution" +SET "endedAt" = "updatedAt" +WHERE "endedAt" IS NULL + AND "executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED'); diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index b7dc98524a..4a2a7b583a 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -450,6 +450,7 @@ model AgentGraphExecution { createdAt DateTime @default(now()) updatedAt DateTime? @updatedAt startedAt DateTime? + endedAt DateTime? isDeleted Boolean @default(false) diff --git a/autogpt_platform/frontend/src/app/(platform)/admin/execution-analytics/components/AnalyticsResultsTable.tsx b/autogpt_platform/frontend/src/app/(platform)/admin/execution-analytics/components/AnalyticsResultsTable.tsx index 56c52e2ceb..7ab251efb1 100644 --- a/autogpt_platform/frontend/src/app/(platform)/admin/execution-analytics/components/AnalyticsResultsTable.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/admin/execution-analytics/components/AnalyticsResultsTable.tsx @@ -51,6 +51,8 @@ export function AnalyticsResultsTable({ results }: Props) { "Execution ID", "Status", "Score", + "Started At", + "Ended At", "Summary Text", "Error Message", ]; @@ -62,6 +64,8 @@ export function AnalyticsResultsTable({ results }: Props) { result.exec_id, result.status, result.score?.toString() || "", + result.started_at ? new Date(result.started_at).toLocaleString() : "", + result.ended_at ? new Date(result.ended_at).toLocaleString() : "", `"${(result.summary_text || "").replace(/"/g, '""')}"`, // Escape quotes in summary `"${(result.error_message || "").replace(/"/g, '""')}"`, // Escape quotes in error ]); @@ -248,15 +252,13 @@ export function AnalyticsResultsTable({ results }: Props) { )} - {(result.summary_text || result.error_message) && ( - - )} + @@ -264,6 +266,44 @@ export function AnalyticsResultsTable({ results }: Props) {
+ {/* Timestamps section */} +
+
+ + Started At: + + + {result.started_at + ? new Date( + result.started_at, + ).toLocaleString() + : "—"} + +
+
+ + Ended At: + + + {result.ended_at + ? new Date(result.ended_at).toLocaleString() + : "—"} + +
+
+ {result.summary_text && (
-

Execution Accuracy Trends

+
+

Execution Accuracy Trends

+
+

+ Chart Filters (matches monitoring system): +

+
    +
  • Only days with ≥1 execution with correctness score
  • +
  • Last 30 days
  • +
  • Averages calculated from scored executions only
  • +
+
+
{/* Alert Section */} {trendsData.alert && ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx index dff1b5a7bb..54cc07878d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx @@ -173,8 +173,9 @@ export function OldAgentLibraryView() { if (agentRuns.length > 0) { // select latest run const latestRun = agentRuns.reduce((latest, current) => { - if (latest.started_at && !current.started_at) return current; - else if (!latest.started_at) return latest; + if (!latest.started_at && !current.started_at) return latest; + if (!latest.started_at) return current; + if (!current.started_at) return latest; return latest.started_at > current.started_at ? latest : current; }, agentRuns[0]); selectRun(latestRun.id as GraphExecutionID); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx index 5931404846..49d93b4319 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx @@ -184,9 +184,11 @@ export function AgentRunsSelectorList({ ))} {agentPresets.length > 0 && } {agentRuns - .toSorted( - (a, b) => b.started_at.getTime() - a.started_at.getTime(), - ) + .toSorted((a, b) => { + const aTime = a.started_at?.getTime() ?? 0; + const bTime = b.started_at?.getTime() ?? 0; + return bTime - aTime; + }) .map((run) => ( onSelectRun(run.id)} onDelete={() => doDeleteRun(run as GraphExecutionMeta)} diff --git a/autogpt_platform/frontend/src/app/(platform)/monitoring/components/AgentFlowList.tsx b/autogpt_platform/frontend/src/app/(platform)/monitoring/components/AgentFlowList.tsx index d4cb6fc649..badba61bf1 100644 --- a/autogpt_platform/frontend/src/app/(platform)/monitoring/components/AgentFlowList.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/monitoring/components/AgentFlowList.tsx @@ -120,9 +120,11 @@ export const AgentFlowList = ({ lastRun = runCount == 0 ? null - : _flowRuns.reduce((a, c) => - a.started_at > c.started_at ? a : c, - ); + : _flowRuns.reduce((a, c) => { + const aTime = a.started_at?.getTime() ?? 0; + const cTime = c.started_at?.getTime() ?? 0; + return aTime > cTime ? a : c; + }); } return { flow, runCount, lastRun }; }) @@ -130,10 +132,9 @@ export const AgentFlowList = ({ if (!a.lastRun && !b.lastRun) return 0; if (!a.lastRun) return 1; if (!b.lastRun) return -1; - return ( - b.lastRun.started_at.getTime() - - a.lastRun.started_at.getTime() - ); + const bTime = b.lastRun.started_at?.getTime() ?? 0; + const aTime = a.lastRun.started_at?.getTime() ?? 0; + return bTime - aTime; }) .map(({ flow, runCount, lastRun }) => ( fr.started_at.getTime() > statsSinceTimestamp) + ? executions.filter( + (fr) => + fr.started_at && fr.started_at.getTime() > statsSinceTimestamp, + ) : executions; return ( diff --git a/autogpt_platform/frontend/src/app/(platform)/monitoring/components/FlowRunsTimeline.tsx b/autogpt_platform/frontend/src/app/(platform)/monitoring/components/FlowRunsTimeline.tsx index 02050627f5..ee01d67234 100644 --- a/autogpt_platform/frontend/src/app/(platform)/monitoring/components/FlowRunsTimeline.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/monitoring/components/FlowRunsTimeline.tsx @@ -98,40 +98,43 @@ export const FlowRunsTimeline = ({ e.graph_id == flow.graph_id) + .filter((e) => e.graph_id == flow.graph_id && e.started_at) .map((e) => ({ ...e, time: - e.started_at.getTime() + (e.stats?.node_exec_time ?? 0) * 1000, + (e.started_at?.getTime() ?? 0) + + (e.stats?.node_exec_time ?? 0) * 1000, _duration: e.stats?.node_exec_time ?? 0, }))} name={flow.name} fill={`hsl(${(hashString(flow.id) * 137.5) % 360}, 70%, 50%)`} /> ))} - {executions.map((execution) => ( - - ))} + {executions + .filter((e) => e.started_at && e.ended_at) + .map((execution) => ( + + ))} } wrapperStyle={{ diff --git a/autogpt_platform/frontend/src/app/(platform)/monitoring/page.tsx b/autogpt_platform/frontend/src/app/(platform)/monitoring/page.tsx index 3b5aa46839..4c356dabef 100644 --- a/autogpt_platform/frontend/src/app/(platform)/monitoring/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/monitoring/page.tsx @@ -98,7 +98,11 @@ const Monitor = () => { ...(selectedFlow ? executions.filter((v) => v.graph_id == selectedFlow.graph_id) : executions), - ].sort((a, b) => b.started_at.getTime() - a.started_at.getTime())} + ].sort((a, b) => { + const aTime = a.started_at?.getTime() ?? 0; + const bTime = b.started_at?.getTime() ?? 0; + return bTime - aTime; + })} selectedRun={selectedRun} onSelectRun={(r) => setSelectedRun(r.id == selectedRun?.id ? null : r)} /> diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 776ba2321a..fc4e737651 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -7148,6 +7148,20 @@ "error_message": { "anyOf": [{ "type": "string" }, { "type": "null" }], "title": "Error Message" + }, + "started_at": { + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Started At" + }, + "ended_at": { + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Ended At" } }, "type": "object", @@ -7254,14 +7268,20 @@ }, "status": { "$ref": "#/components/schemas/AgentExecutionStatus" }, "started_at": { - "type": "string", - "format": "date-time", - "title": "Started At" + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Started At", + "description": "When execution started running. Null if not yet started (QUEUED)." }, "ended_at": { - "type": "string", - "format": "date-time", - "title": "Ended At" + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Ended At", + "description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)." }, "is_shared": { "type": "boolean", @@ -7295,8 +7315,6 @@ "nodes_input_masks", "preset_id", "status", - "started_at", - "ended_at", "stats", "outputs" ], @@ -7395,14 +7413,20 @@ }, "status": { "$ref": "#/components/schemas/AgentExecutionStatus" }, "started_at": { - "type": "string", - "format": "date-time", - "title": "Started At" + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Started At", + "description": "When execution started running. Null if not yet started (QUEUED)." }, "ended_at": { - "type": "string", - "format": "date-time", - "title": "Ended At" + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Ended At", + "description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)." }, "is_shared": { "type": "boolean", @@ -7431,8 +7455,6 @@ "nodes_input_masks", "preset_id", "status", - "started_at", - "ended_at", "stats" ], "title": "GraphExecutionMeta" @@ -7479,14 +7501,20 @@ }, "status": { "$ref": "#/components/schemas/AgentExecutionStatus" }, "started_at": { - "type": "string", - "format": "date-time", - "title": "Started At" + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Started At", + "description": "When execution started running. Null if not yet started (QUEUED)." }, "ended_at": { - "type": "string", - "format": "date-time", - "title": "Ended At" + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Ended At", + "description": "When execution finished. Null if not yet completed (QUEUED, RUNNING, INCOMPLETE, REVIEW)." }, "is_shared": { "type": "boolean", @@ -7525,8 +7553,6 @@ "nodes_input_masks", "preset_id", "status", - "started_at", - "ended_at", "stats", "outputs", "node_executions" diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx index 1ebca96b05..17576e1665 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx @@ -50,7 +50,9 @@ export function ActivityItem({ execution }: Props) { execution.status === AgentExecutionStatus.QUEUED; if (isActiveStatus) { - const timeAgo = formatTimeAgo(execution.started_at.toString()); + const timeAgo = execution.started_at + ? formatTimeAgo(execution.started_at.toString()) + : "recently"; const statusText = execution.status === AgentExecutionStatus.QUEUED ? "queued" : "running"; return [ @@ -61,7 +63,9 @@ export function ActivityItem({ execution }: Props) { // Handle all other statuses with time display const timeAgo = execution.ended_at ? formatTimeAgo(execution.ended_at.toString()) - : formatTimeAgo(execution.started_at.toString()); + : execution.started_at + ? formatTimeAgo(execution.started_at.toString()) + : "recently"; let statusText = "ended"; switch (execution.status) { diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 8bc39a390a..82c03bc9f1 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -327,8 +327,8 @@ export type GraphExecutionMeta = { | "FAILED" | "INCOMPLETE" | "REVIEW"; - started_at: Date; - ended_at: Date; + started_at: Date | null; + ended_at: Date | null; stats: { error: string | null; cost: number;