mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-09 15:17:59 -05:00
feat(platform): Add execution analytics admin endpoint with feature flag bypass (#11327)
This PR adds a comprehensive execution analytics admin endpoint that generates AI-powered activity summaries and correctness scores for graph executions, with proper feature flag bypass for admin use. ### Changes 🏗️ **Backend Changes:** - Added admin endpoint: `/api/executions/admin/execution_analytics` - Implemented feature flag bypass with `skip_feature_flag=True` parameter for admin operations - Fixed async database client usage (`get_db_async_client`) to resolve async/await errors - Added batch processing with configurable size limits to handle large datasets - Comprehensive error handling and logging for troubleshooting - Renamed entire feature from "Activity Backfill" to "Execution Analytics" for clarity **Frontend Changes:** - Created clean admin UI for execution analytics generation at `/admin/execution-analytics` - Built form with graph ID input, model selection dropdown, and optional filters - Implemented results table with status badges and detailed execution information - Added CSV export functionality for analytics results - Integrated with generated TypeScript API client for proper authentication - Added proper error handling with toast notifications and loading states **Database & API:** - Fixed critical async/await issue by switching from sync to async database client - Updated router configuration and endpoint naming for consistency - Generated proper TypeScript types and API client integration - Applied feature flag filtering at API level while bypassing for admin operations ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: **Test Plan:** - [x] Admin can access execution analytics page at `/admin/execution-analytics` - [x] Form validation works correctly (requires graph ID, validates inputs) - [x] API endpoint `/api/executions/admin/execution_analytics` responds correctly - [x] Authentication works properly through generated API client - [x] Analytics generation works with different LLM models (gpt-4o-mini, gpt-4o, etc.) - [x] Results display correctly with appropriate status badges (success/failed/skipped) - [x] CSV export functionality downloads correct data - [x] Error handling displays appropriate toast messages - [x] Feature flag bypass works for admin users (generates analytics regardless of user flags) - [x] Batch processing handles multiple executions correctly - [x] Loading states show proper feedback during processing #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] No configuration changes required for this feature **Related to:** PR #11325 (base correctness score functionality) 🤖 Generated with [Claude Code](https://claude.ai/code) --------- Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: claude[bot] <41898282+claude[bot]@users.noreply.github.com> Co-authored-by: Zamil Majdy <majdyz@users.noreply.github.com>
This commit is contained in:
@@ -194,6 +194,12 @@ class GraphExecutionMeta(BaseDbModel):
|
||||
correctness_score=self.correctness_score,
|
||||
)
|
||||
|
||||
def without_activity_features(self) -> "GraphExecutionMeta.Stats":
|
||||
"""Return a copy of stats with activity features (activity_status, correctness_score) set to None."""
|
||||
return self.model_copy(
|
||||
update={"activity_status": None, "correctness_score": None}
|
||||
)
|
||||
|
||||
stats: Stats | None
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -91,6 +91,8 @@ async def generate_activity_status_for_execution(
|
||||
db_client: "DatabaseManagerAsyncClient",
|
||||
user_id: str,
|
||||
execution_status: ExecutionStatus | None = None,
|
||||
model_name: str = "gpt-4o-mini",
|
||||
skip_feature_flag: bool = False,
|
||||
) -> ActivityStatusResponse | None:
|
||||
"""
|
||||
Generate an AI-based activity status summary and correctness assessment for a graph execution.
|
||||
@@ -112,7 +114,9 @@ async def generate_activity_status_for_execution(
|
||||
or None if feature is disabled
|
||||
"""
|
||||
# Check LaunchDarkly feature flag for AI activity status generation with full context support
|
||||
if not await is_feature_enabled(Flag.AI_ACTIVITY_STATUS, user_id):
|
||||
if not skip_feature_flag and not await is_feature_enabled(
|
||||
Flag.AI_ACTIVITY_STATUS, user_id
|
||||
):
|
||||
logger.debug("AI activity status generation is disabled via LaunchDarkly")
|
||||
return None
|
||||
|
||||
@@ -273,7 +277,7 @@ async def generate_activity_status_for_execution(
|
||||
prompt=prompt[1]["content"], # User prompt content
|
||||
sys_prompt=prompt[0]["content"], # System prompt content
|
||||
expected_format=expected_format,
|
||||
model=LlmModel.GPT4O_MINI,
|
||||
model=LlmModel(model_name),
|
||||
credentials=credentials_input, # type: ignore
|
||||
max_tokens=150,
|
||||
retry=3,
|
||||
@@ -306,7 +310,7 @@ async def generate_activity_status_for_execution(
|
||||
return activity_response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
logger.exception(
|
||||
f"Failed to generate activity status for execution {graph_exec_id}: {str(e)}"
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -24,6 +24,7 @@ import backend.integrations.webhooks.utils
|
||||
import backend.server.routers.postmark.postmark
|
||||
import backend.server.routers.v1
|
||||
import backend.server.v2.admin.credit_admin_routes
|
||||
import backend.server.v2.admin.execution_analytics_routes
|
||||
import backend.server.v2.admin.store_admin_routes
|
||||
import backend.server.v2.builder
|
||||
import backend.server.v2.builder.routes
|
||||
@@ -269,6 +270,11 @@ app.include_router(
|
||||
tags=["v2", "admin"],
|
||||
prefix="/api/credits",
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.admin.execution_analytics_routes.router,
|
||||
tags=["v2", "admin"],
|
||||
prefix="/api/executions",
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library"
|
||||
)
|
||||
|
||||
@@ -89,6 +89,7 @@ from backend.util.cache import cached
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.cloud_storage import get_cloud_storage_handler
|
||||
from backend.util.exceptions import GraphValidationError, NotFoundError
|
||||
from backend.util.feature_flag import Flag, is_feature_enabled
|
||||
from backend.util.json import dumps
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.timezone_utils import (
|
||||
@@ -109,6 +110,39 @@ def _create_file_size_error(size_bytes: int, max_size_mb: int) -> HTTPException:
|
||||
settings = Settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def hide_activity_summaries_if_disabled(
|
||||
executions: list[execution_db.GraphExecutionMeta], user_id: str
|
||||
) -> list[execution_db.GraphExecutionMeta]:
|
||||
"""Hide activity summaries and scores if AI_ACTIVITY_STATUS feature is disabled."""
|
||||
if await is_feature_enabled(Flag.AI_ACTIVITY_STATUS, user_id):
|
||||
return executions # Return as-is if feature is enabled
|
||||
|
||||
# Filter out activity features if disabled
|
||||
filtered_executions = []
|
||||
for execution in executions:
|
||||
if execution.stats:
|
||||
filtered_stats = execution.stats.without_activity_features()
|
||||
execution = execution.model_copy(update={"stats": filtered_stats})
|
||||
filtered_executions.append(execution)
|
||||
return filtered_executions
|
||||
|
||||
|
||||
async def hide_activity_summary_if_disabled(
|
||||
execution: execution_db.GraphExecution | execution_db.GraphExecutionWithNodes,
|
||||
user_id: str,
|
||||
) -> execution_db.GraphExecution | execution_db.GraphExecutionWithNodes:
|
||||
"""Hide activity summary and score for a single execution if AI_ACTIVITY_STATUS feature is disabled."""
|
||||
if await is_feature_enabled(Flag.AI_ACTIVITY_STATUS, user_id):
|
||||
return execution # Return as-is if feature is enabled
|
||||
|
||||
# Filter out activity features if disabled
|
||||
if execution.stats:
|
||||
filtered_stats = execution.stats.without_activity_features()
|
||||
return execution.model_copy(update={"stats": filtered_stats})
|
||||
return execution
|
||||
|
||||
|
||||
# Define the API routes
|
||||
v1_router = APIRouter()
|
||||
|
||||
@@ -986,7 +1020,12 @@ async def list_graphs_executions(
|
||||
page=1,
|
||||
page_size=250,
|
||||
)
|
||||
return paginated_result.executions
|
||||
|
||||
# Apply feature flags to filter out disabled features
|
||||
filtered_executions = await hide_activity_summaries_if_disabled(
|
||||
paginated_result.executions, user_id
|
||||
)
|
||||
return filtered_executions
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
@@ -1003,13 +1042,21 @@ async def list_graph_executions(
|
||||
25, ge=1, le=100, description="Number of executions per page"
|
||||
),
|
||||
) -> execution_db.GraphExecutionsPaginated:
|
||||
return await execution_db.get_graph_executions_paginated(
|
||||
paginated_result = await execution_db.get_graph_executions_paginated(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
# Apply feature flags to filter out disabled features
|
||||
filtered_executions = await hide_activity_summaries_if_disabled(
|
||||
paginated_result.executions, user_id
|
||||
)
|
||||
return execution_db.GraphExecutionsPaginated(
|
||||
executions=filtered_executions, pagination=paginated_result.pagination
|
||||
)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/graphs/{graph_id}/executions/{graph_exec_id}",
|
||||
@@ -1038,6 +1085,9 @@ async def get_graph_execution(
|
||||
status_code=404, detail=f"Graph execution #{graph_exec_id} not found."
|
||||
)
|
||||
|
||||
# Apply feature flags to filter out disabled features
|
||||
result = await hide_activity_summary_if_disabled(result, user_id)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,301 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from autogpt_libs.auth import get_user_id, requires_admin_user
|
||||
from fastapi import APIRouter, HTTPException, Security
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.data.execution import (
|
||||
ExecutionStatus,
|
||||
GraphExecutionMeta,
|
||||
get_graph_executions,
|
||||
update_graph_execution_stats,
|
||||
)
|
||||
from backend.data.model import GraphExecutionStats
|
||||
from backend.executor.activity_status_generator import (
|
||||
generate_activity_status_for_execution,
|
||||
)
|
||||
from backend.executor.manager import get_db_async_client
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExecutionAnalyticsRequest(BaseModel):
|
||||
graph_id: str = Field(..., description="Graph ID to analyze")
|
||||
graph_version: Optional[int] = Field(None, description="Optional graph version")
|
||||
user_id: Optional[str] = Field(None, description="Optional user ID filter")
|
||||
created_after: Optional[datetime] = Field(
|
||||
None, description="Optional created date lower bound"
|
||||
)
|
||||
model_name: Optional[str] = Field(
|
||||
"gpt-4o-mini", description="Model to use for generation"
|
||||
)
|
||||
batch_size: int = Field(
|
||||
10, description="Batch size for concurrent processing", le=25, ge=1
|
||||
)
|
||||
|
||||
|
||||
class ExecutionAnalyticsResult(BaseModel):
|
||||
agent_id: str
|
||||
version_id: int
|
||||
user_id: str
|
||||
exec_id: str
|
||||
summary_text: Optional[str]
|
||||
score: Optional[float]
|
||||
status: str # "success", "failed", "skipped"
|
||||
error_message: Optional[str] = None
|
||||
|
||||
|
||||
class ExecutionAnalyticsResponse(BaseModel):
|
||||
total_executions: int
|
||||
processed_executions: int
|
||||
successful_analytics: int
|
||||
failed_analytics: int
|
||||
skipped_executions: int
|
||||
results: list[ExecutionAnalyticsResult]
|
||||
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/admin",
|
||||
tags=["admin", "execution_analytics"],
|
||||
dependencies=[Security(requires_admin_user)],
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/execution_analytics",
|
||||
response_model=ExecutionAnalyticsResponse,
|
||||
summary="Generate Execution Analytics",
|
||||
)
|
||||
async def generate_execution_analytics(
|
||||
request: ExecutionAnalyticsRequest,
|
||||
admin_user_id: str = Security(get_user_id),
|
||||
):
|
||||
"""
|
||||
Generate activity summaries and correctness scores for graph executions.
|
||||
|
||||
This endpoint:
|
||||
1. Fetches all completed executions matching the criteria
|
||||
2. Identifies executions missing activity_status or correctness_score
|
||||
3. Generates missing data using AI in batches
|
||||
4. Updates the database with new stats
|
||||
5. Returns a detailed report of the analytics operation
|
||||
"""
|
||||
logger.info(
|
||||
f"Admin user {admin_user_id} starting execution analytics generation for graph {request.graph_id}"
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate model configuration
|
||||
settings = Settings()
|
||||
if not settings.secrets.openai_internal_api_key:
|
||||
raise HTTPException(status_code=500, detail="OpenAI API key not configured")
|
||||
|
||||
# Get database client
|
||||
db_client = get_db_async_client()
|
||||
|
||||
# Fetch executions to process
|
||||
executions = await get_graph_executions(
|
||||
graph_id=request.graph_id,
|
||||
user_id=request.user_id,
|
||||
created_time_gte=request.created_after,
|
||||
statuses=[
|
||||
ExecutionStatus.COMPLETED,
|
||||
ExecutionStatus.FAILED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
], # Only process finished executions
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Found {len(executions)} total executions for graph {request.graph_id}"
|
||||
)
|
||||
|
||||
# Filter executions that need analytics generation (missing activity_status or correctness_score)
|
||||
executions_to_process = []
|
||||
for execution in executions:
|
||||
if (
|
||||
not execution.stats
|
||||
or not execution.stats.activity_status
|
||||
or execution.stats.correctness_score is None
|
||||
):
|
||||
|
||||
# If version is specified, filter by it
|
||||
if (
|
||||
request.graph_version is None
|
||||
or execution.graph_version == request.graph_version
|
||||
):
|
||||
executions_to_process.append(execution)
|
||||
|
||||
logger.info(
|
||||
f"Found {len(executions_to_process)} executions needing analytics generation"
|
||||
)
|
||||
|
||||
# Create results for ALL executions - processed and skipped
|
||||
results = []
|
||||
successful_count = 0
|
||||
failed_count = 0
|
||||
|
||||
# Process executions that need analytics generation
|
||||
if executions_to_process:
|
||||
total_batches = len(
|
||||
range(0, len(executions_to_process), request.batch_size)
|
||||
)
|
||||
|
||||
for batch_idx, i in enumerate(
|
||||
range(0, len(executions_to_process), request.batch_size)
|
||||
):
|
||||
batch = executions_to_process[i : i + request.batch_size]
|
||||
logger.info(
|
||||
f"Processing batch {batch_idx + 1}/{total_batches} with {len(batch)} executions"
|
||||
)
|
||||
|
||||
batch_results = await _process_batch(
|
||||
batch, request.model_name or "gpt-4o-mini", db_client
|
||||
)
|
||||
|
||||
for result in batch_results:
|
||||
results.append(result)
|
||||
if result.status == "success":
|
||||
successful_count += 1
|
||||
elif result.status == "failed":
|
||||
failed_count += 1
|
||||
|
||||
# Small delay between batches to avoid overwhelming the LLM API
|
||||
if batch_idx < total_batches - 1: # Don't delay after the last batch
|
||||
await asyncio.sleep(2)
|
||||
|
||||
# Add ALL executions to results (both processed and skipped)
|
||||
for execution in executions:
|
||||
# Skip if already processed (added to results above)
|
||||
if execution in executions_to_process:
|
||||
continue
|
||||
|
||||
results.append(
|
||||
ExecutionAnalyticsResult(
|
||||
agent_id=execution.graph_id,
|
||||
version_id=execution.graph_version,
|
||||
user_id=execution.user_id,
|
||||
exec_id=execution.id,
|
||||
summary_text=(
|
||||
execution.stats.activity_status if execution.stats else None
|
||||
),
|
||||
score=(
|
||||
execution.stats.correctness_score if execution.stats else None
|
||||
),
|
||||
status="skipped",
|
||||
error_message=None, # Not an error - just already processed
|
||||
)
|
||||
)
|
||||
|
||||
response = ExecutionAnalyticsResponse(
|
||||
total_executions=len(executions),
|
||||
processed_executions=len(executions_to_process),
|
||||
successful_analytics=successful_count,
|
||||
failed_analytics=failed_count,
|
||||
skipped_executions=len(executions) - len(executions_to_process),
|
||||
results=results,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Analytics generation completed: {successful_count} successful, {failed_count} failed, "
|
||||
f"{response.skipped_executions} skipped"
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error during execution analytics generation: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
async def _process_batch(
|
||||
executions, model_name: str, db_client
|
||||
) -> list[ExecutionAnalyticsResult]:
|
||||
"""Process a batch of executions concurrently."""
|
||||
|
||||
async def process_single_execution(execution) -> ExecutionAnalyticsResult:
|
||||
try:
|
||||
# Generate activity status and score using the specified model
|
||||
# Convert stats to GraphExecutionStats if needed
|
||||
if execution.stats:
|
||||
if isinstance(execution.stats, GraphExecutionMeta.Stats):
|
||||
stats_for_generation = execution.stats.to_db()
|
||||
else:
|
||||
# Already GraphExecutionStats
|
||||
stats_for_generation = execution.stats
|
||||
else:
|
||||
stats_for_generation = GraphExecutionStats()
|
||||
|
||||
activity_response = await generate_activity_status_for_execution(
|
||||
graph_exec_id=execution.id,
|
||||
graph_id=execution.graph_id,
|
||||
graph_version=execution.graph_version,
|
||||
execution_stats=stats_for_generation,
|
||||
db_client=db_client,
|
||||
user_id=execution.user_id,
|
||||
execution_status=execution.status,
|
||||
model_name=model_name, # Pass model name parameter
|
||||
skip_feature_flag=True, # Admin endpoint bypasses feature flags
|
||||
)
|
||||
|
||||
if not activity_response:
|
||||
return ExecutionAnalyticsResult(
|
||||
agent_id=execution.graph_id,
|
||||
version_id=execution.graph_version,
|
||||
user_id=execution.user_id,
|
||||
exec_id=execution.id,
|
||||
summary_text=None,
|
||||
score=None,
|
||||
status="skipped",
|
||||
error_message="Activity generation returned None",
|
||||
)
|
||||
|
||||
# Update the execution stats
|
||||
# Convert GraphExecutionMeta.Stats to GraphExecutionStats for DB compatibility
|
||||
if execution.stats:
|
||||
if isinstance(execution.stats, GraphExecutionMeta.Stats):
|
||||
updated_stats = execution.stats.to_db()
|
||||
else:
|
||||
# Already GraphExecutionStats
|
||||
updated_stats = execution.stats
|
||||
else:
|
||||
updated_stats = GraphExecutionStats()
|
||||
|
||||
updated_stats.activity_status = activity_response["activity_status"]
|
||||
updated_stats.correctness_score = activity_response["correctness_score"]
|
||||
|
||||
# Save to database with correct stats type
|
||||
await update_graph_execution_stats(
|
||||
graph_exec_id=execution.id, stats=updated_stats
|
||||
)
|
||||
|
||||
return ExecutionAnalyticsResult(
|
||||
agent_id=execution.graph_id,
|
||||
version_id=execution.graph_version,
|
||||
user_id=execution.user_id,
|
||||
exec_id=execution.id,
|
||||
summary_text=activity_response["activity_status"],
|
||||
score=activity_response["correctness_score"],
|
||||
status="success",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing execution {execution.id}: {e}")
|
||||
return ExecutionAnalyticsResult(
|
||||
agent_id=execution.graph_id,
|
||||
version_id=execution.graph_version,
|
||||
user_id=execution.user_id,
|
||||
exec_id=execution.id,
|
||||
summary_text=None,
|
||||
score=None,
|
||||
status="failed",
|
||||
error_message=str(e),
|
||||
)
|
||||
|
||||
# Process all executions in the batch concurrently
|
||||
return await asyncio.gather(
|
||||
*[process_single_execution(execution) for execution in executions]
|
||||
)
|
||||
@@ -0,0 +1,319 @@
|
||||
"use client";
|
||||
|
||||
import React, { useState } from "react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Badge } from "@/components/atoms/Badge/Badge";
|
||||
import { DownloadIcon, EyeIcon, CopyIcon } from "@phosphor-icons/react";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import type { ExecutionAnalyticsResponse } from "@/app/api/__generated__/models/executionAnalyticsResponse";
|
||||
|
||||
interface Props {
|
||||
results: ExecutionAnalyticsResponse;
|
||||
}
|
||||
|
||||
export function AnalyticsResultsTable({ results }: Props) {
|
||||
const [expandedRows, setExpandedRows] = useState<Set<string>>(new Set());
|
||||
const { toast } = useToast();
|
||||
|
||||
const createCopyableId = (value: string, label: string) => (
|
||||
<div
|
||||
className="group flex cursor-pointer items-center gap-1 font-mono text-xs text-gray-500 hover:text-gray-700"
|
||||
onClick={() => {
|
||||
navigator.clipboard.writeText(value);
|
||||
toast({
|
||||
title: "Copied",
|
||||
description: `${label} copied to clipboard`,
|
||||
});
|
||||
}}
|
||||
title={`Click to copy ${label.toLowerCase()}`}
|
||||
>
|
||||
{value.substring(0, 8)}...
|
||||
<CopyIcon className="h-3 w-3 opacity-0 transition-opacity group-hover:opacity-100" />
|
||||
</div>
|
||||
);
|
||||
|
||||
const toggleRowExpansion = (execId: string) => {
|
||||
const newExpanded = new Set(expandedRows);
|
||||
if (newExpanded.has(execId)) {
|
||||
newExpanded.delete(execId);
|
||||
} else {
|
||||
newExpanded.add(execId);
|
||||
}
|
||||
setExpandedRows(newExpanded);
|
||||
};
|
||||
|
||||
const exportToCSV = () => {
|
||||
const headers = [
|
||||
"Agent ID",
|
||||
"Version",
|
||||
"User ID",
|
||||
"Execution ID",
|
||||
"Status",
|
||||
"Score",
|
||||
"Summary Text",
|
||||
"Error Message",
|
||||
];
|
||||
|
||||
const csvData = results.results.map((result) => [
|
||||
result.agent_id,
|
||||
result.version_id.toString(),
|
||||
result.user_id,
|
||||
result.exec_id,
|
||||
result.status,
|
||||
result.score?.toString() || "",
|
||||
`"${(result.summary_text || "").replace(/"/g, '""')}"`, // Escape quotes in summary
|
||||
`"${(result.error_message || "").replace(/"/g, '""')}"`, // Escape quotes in error
|
||||
]);
|
||||
|
||||
const csvContent = [
|
||||
headers.join(","),
|
||||
...csvData.map((row) => row.join(",")),
|
||||
].join("\n");
|
||||
|
||||
const blob = new Blob([csvContent], { type: "text/csv;charset=utf-8;" });
|
||||
const link = document.createElement("a");
|
||||
const url = URL.createObjectURL(blob);
|
||||
|
||||
link.setAttribute("href", url);
|
||||
link.setAttribute(
|
||||
"download",
|
||||
`execution-analytics-results-${new Date().toISOString().split("T")[0]}.csv`,
|
||||
);
|
||||
link.style.visibility = "hidden";
|
||||
|
||||
document.body.appendChild(link);
|
||||
link.click();
|
||||
document.body.removeChild(link);
|
||||
};
|
||||
|
||||
const getStatusBadge = (status: string) => {
|
||||
switch (status) {
|
||||
case "success":
|
||||
return <Badge variant="success">Success</Badge>;
|
||||
case "failed":
|
||||
return <Badge variant="error">Failed</Badge>;
|
||||
case "skipped":
|
||||
return <Badge variant="info">Skipped</Badge>;
|
||||
default:
|
||||
return <Badge variant="info">{status}</Badge>;
|
||||
}
|
||||
};
|
||||
|
||||
const getScoreDisplay = (score?: number) => {
|
||||
if (score === undefined || score === null) return "—";
|
||||
|
||||
const percentage = Math.round(score * 100);
|
||||
let colorClass = "";
|
||||
|
||||
if (score >= 0.8) colorClass = "text-green-600";
|
||||
else if (score >= 0.6) colorClass = "text-yellow-600";
|
||||
else if (score >= 0.4) colorClass = "text-orange-600";
|
||||
else colorClass = "text-red-600";
|
||||
|
||||
return <span className={colorClass}>{percentage}%</span>;
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
{/* Summary Stats */}
|
||||
<div className="rounded-lg bg-gray-50 p-4">
|
||||
<Text variant="h3" className="mb-3">
|
||||
Analytics Summary
|
||||
</Text>
|
||||
<div className="grid grid-cols-2 gap-4 text-sm md:grid-cols-5">
|
||||
<div>
|
||||
<Text variant="body" className="text-gray-600">
|
||||
Total Executions:
|
||||
</Text>
|
||||
<Text variant="h4" className="font-semibold">
|
||||
{results.total_executions}
|
||||
</Text>
|
||||
</div>
|
||||
<div>
|
||||
<Text variant="body" className="text-gray-600">
|
||||
Processed:
|
||||
</Text>
|
||||
<Text variant="h4" className="font-semibold">
|
||||
{results.processed_executions}
|
||||
</Text>
|
||||
</div>
|
||||
<div>
|
||||
<Text variant="body" className="text-gray-600">
|
||||
Successful:
|
||||
</Text>
|
||||
<Text variant="h4" className="font-semibold text-green-600">
|
||||
{results.successful_analytics}
|
||||
</Text>
|
||||
</div>
|
||||
<div>
|
||||
<Text variant="body" className="text-gray-600">
|
||||
Failed:
|
||||
</Text>
|
||||
<Text variant="h4" className="font-semibold text-red-600">
|
||||
{results.failed_analytics}
|
||||
</Text>
|
||||
</div>
|
||||
<div>
|
||||
<Text variant="body" className="text-gray-600">
|
||||
Skipped:
|
||||
</Text>
|
||||
<Text variant="h4" className="font-semibold text-gray-600">
|
||||
{results.skipped_executions}
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Export Button */}
|
||||
<div className="flex justify-end">
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={exportToCSV}
|
||||
disabled={results.results.length === 0}
|
||||
>
|
||||
<DownloadIcon size={16} className="mr-2" />
|
||||
Export CSV
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* Results Table */}
|
||||
{results.results.length > 0 ? (
|
||||
<div className="overflow-hidden rounded-lg border">
|
||||
<div className="overflow-x-auto">
|
||||
<table className="w-full">
|
||||
<thead className="bg-gray-50">
|
||||
<tr>
|
||||
<th className="px-4 py-3 text-left">
|
||||
<Text variant="body" className="font-medium text-gray-600">
|
||||
Agent ID
|
||||
</Text>
|
||||
</th>
|
||||
<th className="px-4 py-3 text-left">
|
||||
<Text variant="body" className="font-medium text-gray-600">
|
||||
Version
|
||||
</Text>
|
||||
</th>
|
||||
<th className="px-4 py-3 text-left">
|
||||
<Text variant="body" className="font-medium text-gray-600">
|
||||
User ID
|
||||
</Text>
|
||||
</th>
|
||||
<th className="px-4 py-3 text-left">
|
||||
<Text variant="body" className="font-medium text-gray-600">
|
||||
Execution ID
|
||||
</Text>
|
||||
</th>
|
||||
<th className="px-4 py-3 text-left">
|
||||
<Text variant="body" className="font-medium text-gray-600">
|
||||
Status
|
||||
</Text>
|
||||
</th>
|
||||
<th className="px-4 py-3 text-left">
|
||||
<Text variant="body" className="font-medium text-gray-600">
|
||||
Score
|
||||
</Text>
|
||||
</th>
|
||||
<th className="px-4 py-3 text-left">
|
||||
<Text variant="body" className="font-medium text-gray-600">
|
||||
Actions
|
||||
</Text>
|
||||
</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody className="divide-y divide-gray-200">
|
||||
{results.results.map((result) => (
|
||||
<React.Fragment key={result.exec_id}>
|
||||
<tr className="hover:bg-gray-50">
|
||||
<td className="px-4 py-3">
|
||||
{createCopyableId(result.agent_id, "Agent ID")}
|
||||
</td>
|
||||
<td className="px-4 py-3">
|
||||
<Text variant="body">{result.version_id}</Text>
|
||||
</td>
|
||||
<td className="px-4 py-3">
|
||||
{createCopyableId(result.user_id, "User ID")}
|
||||
</td>
|
||||
<td className="px-4 py-3">
|
||||
{createCopyableId(result.exec_id, "Execution ID")}
|
||||
</td>
|
||||
<td className="px-4 py-3">
|
||||
{getStatusBadge(result.status)}
|
||||
</td>
|
||||
<td className="px-4 py-3">
|
||||
{getScoreDisplay(
|
||||
typeof result.score === "number"
|
||||
? result.score
|
||||
: undefined,
|
||||
)}
|
||||
</td>
|
||||
<td className="px-4 py-3">
|
||||
{(result.summary_text || result.error_message) && (
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => toggleRowExpansion(result.exec_id)}
|
||||
>
|
||||
<EyeIcon size={16} />
|
||||
</Button>
|
||||
)}
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
{expandedRows.has(result.exec_id) && (
|
||||
<tr>
|
||||
<td colSpan={7} className="bg-gray-50 px-4 py-3">
|
||||
<div className="space-y-3">
|
||||
{result.summary_text && (
|
||||
<div>
|
||||
<Text
|
||||
variant="body"
|
||||
className="mb-1 font-medium text-gray-700"
|
||||
>
|
||||
Summary:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body"
|
||||
className="leading-relaxed text-gray-600"
|
||||
>
|
||||
{result.summary_text}
|
||||
</Text>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{result.error_message && (
|
||||
<div>
|
||||
<Text
|
||||
variant="body"
|
||||
className="mb-1 font-medium text-red-700"
|
||||
>
|
||||
Error:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body"
|
||||
className="leading-relaxed text-red-600"
|
||||
>
|
||||
{result.error_message}
|
||||
</Text>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
)}
|
||||
</React.Fragment>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div className="py-8 text-center">
|
||||
<Text variant="body" className="text-gray-500">
|
||||
No executions were processed.
|
||||
</Text>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,212 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Input } from "@/components/__legacy__/ui/input";
|
||||
import { Label } from "@/components/__legacy__/ui/label";
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/__legacy__/ui/select";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { usePostV2GenerateExecutionAnalytics } from "@/app/api/__generated__/endpoints/admin/admin";
|
||||
import type { ExecutionAnalyticsRequest } from "@/app/api/__generated__/models/executionAnalyticsRequest";
|
||||
import type { ExecutionAnalyticsResponse } from "@/app/api/__generated__/models/executionAnalyticsResponse";
|
||||
|
||||
// Local interface for form state to simplify handling
|
||||
interface FormData {
|
||||
graph_id: string;
|
||||
graph_version?: number;
|
||||
user_id?: string;
|
||||
created_after?: string;
|
||||
model_name: string;
|
||||
batch_size: number;
|
||||
}
|
||||
import { AnalyticsResultsTable } from "./AnalyticsResultsTable";
|
||||
|
||||
const MODEL_OPTIONS = [
|
||||
{ value: "gpt-4o-mini", label: "GPT-4o Mini (Recommended)" },
|
||||
{ value: "gpt-4o", label: "GPT-4o" },
|
||||
{ value: "gpt-4-turbo", label: "GPT-4 Turbo" },
|
||||
{ value: "gpt-4.1", label: "GPT-4.1" },
|
||||
{ value: "gpt-4.1-mini", label: "GPT-4.1 Mini" },
|
||||
];
|
||||
|
||||
export function ExecutionAnalyticsForm() {
|
||||
const [results, setResults] = useState<ExecutionAnalyticsResponse | null>(
|
||||
null,
|
||||
);
|
||||
const { toast } = useToast();
|
||||
|
||||
const generateAnalytics = usePostV2GenerateExecutionAnalytics({
|
||||
mutation: {
|
||||
onSuccess: (res) => {
|
||||
if (res.status !== 200) {
|
||||
throw new Error("Something went wrong!");
|
||||
}
|
||||
const result = res.data;
|
||||
setResults(result);
|
||||
toast({
|
||||
title: "Analytics Generated",
|
||||
description: `Processed ${result.processed_executions} executions. ${result.successful_analytics} successful, ${result.failed_analytics} failed, ${result.skipped_executions} skipped.`,
|
||||
variant: "default",
|
||||
});
|
||||
},
|
||||
onError: (error: any) => {
|
||||
console.error("Analytics generation error:", error);
|
||||
toast({
|
||||
title: "Analytics Generation Failed",
|
||||
description:
|
||||
error?.message || error?.detail || "An unexpected error occurred",
|
||||
variant: "destructive",
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const [formData, setFormData] = useState<FormData>({
|
||||
graph_id: "",
|
||||
model_name: "gpt-4o-mini",
|
||||
batch_size: 10, // Fixed internal value
|
||||
});
|
||||
|
||||
const handleSubmit = async (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
|
||||
if (!formData.graph_id.trim()) {
|
||||
toast({
|
||||
title: "Validation Error",
|
||||
description: "Graph ID is required",
|
||||
variant: "destructive",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
setResults(null);
|
||||
|
||||
// Prepare the request payload
|
||||
const payload: ExecutionAnalyticsRequest = {
|
||||
graph_id: formData.graph_id.trim(),
|
||||
model_name: formData.model_name,
|
||||
batch_size: formData.batch_size,
|
||||
};
|
||||
|
||||
if (formData.graph_version) {
|
||||
payload.graph_version = formData.graph_version;
|
||||
}
|
||||
|
||||
if (formData.user_id?.trim()) {
|
||||
payload.user_id = formData.user_id.trim();
|
||||
}
|
||||
|
||||
if (
|
||||
formData.created_after &&
|
||||
typeof formData.created_after === "string" &&
|
||||
formData.created_after.trim()
|
||||
) {
|
||||
payload.created_after = new Date(formData.created_after.trim());
|
||||
}
|
||||
|
||||
generateAnalytics.mutate({ data: payload });
|
||||
};
|
||||
|
||||
const handleInputChange = (field: keyof FormData, value: any) => {
|
||||
setFormData((prev: FormData) => ({ ...prev, [field]: value }));
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="space-y-6">
|
||||
<form onSubmit={handleSubmit} className="space-y-4">
|
||||
<div className="grid grid-cols-1 gap-4 md:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="graph_id">
|
||||
Graph ID <span className="text-red-500">*</span>
|
||||
</Label>
|
||||
<Input
|
||||
id="graph_id"
|
||||
value={formData.graph_id}
|
||||
onChange={(e) => handleInputChange("graph_id", e.target.value)}
|
||||
placeholder="Enter graph/agent ID"
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="graph_version">Graph Version</Label>
|
||||
<Input
|
||||
id="graph_version"
|
||||
type="number"
|
||||
value={formData.graph_version || ""}
|
||||
onChange={(e) =>
|
||||
handleInputChange(
|
||||
"graph_version",
|
||||
e.target.value ? parseInt(e.target.value) : undefined,
|
||||
)
|
||||
}
|
||||
placeholder="Optional - leave empty for all versions"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="user_id">User ID</Label>
|
||||
<Input
|
||||
id="user_id"
|
||||
value={formData.user_id || ""}
|
||||
onChange={(e) => handleInputChange("user_id", e.target.value)}
|
||||
placeholder="Optional - leave empty for all users"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="created_after">Created After</Label>
|
||||
<Input
|
||||
id="created_after"
|
||||
type="datetime-local"
|
||||
value={formData.created_after || ""}
|
||||
onChange={(e) =>
|
||||
handleInputChange("created_after", e.target.value)
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="model_name">AI Model</Label>
|
||||
<Select
|
||||
value={formData.model_name}
|
||||
onValueChange={(value) => handleInputChange("model_name", value)}
|
||||
>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder="Select AI model" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{MODEL_OPTIONS.map((option) => (
|
||||
<SelectItem key={option.value} value={option.value}>
|
||||
{option.label}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex justify-end">
|
||||
<Button
|
||||
variant="primary"
|
||||
size="large"
|
||||
type="submit"
|
||||
disabled={generateAnalytics.isPending}
|
||||
>
|
||||
{generateAnalytics.isPending
|
||||
? "Processing..."
|
||||
: "Generate Analytics"}
|
||||
</Button>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
{results && <AnalyticsResultsTable results={results} />}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
import { withRoleAccess } from "@/lib/withRoleAccess";
|
||||
import { Suspense } from "react";
|
||||
import { ExecutionAnalyticsForm } from "./components/ExecutionAnalyticsForm";
|
||||
|
||||
function ExecutionAnalyticsDashboard() {
|
||||
return (
|
||||
<div className="mx-auto p-6">
|
||||
<div className="flex flex-col gap-6">
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<h1 className="text-3xl font-bold">Execution Analytics</h1>
|
||||
<p className="text-gray-500">
|
||||
Generate missing activity summaries and success scores for agent
|
||||
executions
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="rounded-lg border bg-white p-6 shadow-sm">
|
||||
<h2 className="mb-4 text-xl font-semibold">Analytics Generation</h2>
|
||||
<p className="mb-6 text-gray-600">
|
||||
This tool will identify completed executions missing activity
|
||||
summaries or success scores and generate them using AI. Only
|
||||
executions that meet the criteria and are missing these fields will
|
||||
be processed.
|
||||
</p>
|
||||
|
||||
<Suspense
|
||||
fallback={<div className="py-10 text-center">Loading...</div>}
|
||||
>
|
||||
<ExecutionAnalyticsForm />
|
||||
</Suspense>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default async function ExecutionAnalyticsPage() {
|
||||
"use server";
|
||||
const withAdminAccess = await withRoleAccess(["admin"]);
|
||||
const ProtectedExecutionAnalyticsDashboard = await withAdminAccess(
|
||||
ExecutionAnalyticsDashboard,
|
||||
);
|
||||
return <ProtectedExecutionAnalyticsDashboard />;
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
import { Sidebar } from "@/components/__legacy__/Sidebar";
|
||||
import { Users, DollarSign, UserSearch } from "lucide-react";
|
||||
import { Users, DollarSign, UserSearch, FileText } from "lucide-react";
|
||||
|
||||
import { IconSliders } from "@/components/__legacy__/ui/icons";
|
||||
|
||||
@@ -21,6 +21,11 @@ const sidebarLinkGroups = [
|
||||
href: "/admin/impersonation",
|
||||
icon: <UserSearch className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "Execution Analytics",
|
||||
href: "/admin/execution-analytics",
|
||||
icon: <FileText className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "Admin User Management",
|
||||
href: "/admin/settings",
|
||||
|
||||
@@ -305,10 +305,62 @@ export function AgentRunDetailsView({
|
||||
</TooltipProvider>
|
||||
</CardTitle>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<CardContent className="space-y-4">
|
||||
<p className="text-sm leading-relaxed text-neutral-700">
|
||||
{run.stats.activity_status}
|
||||
</p>
|
||||
|
||||
{/* Correctness Score */}
|
||||
{typeof run.stats.correctness_score === "number" && (
|
||||
<div className="flex items-center gap-3 rounded-lg bg-neutral-50 p-3">
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm font-medium text-neutral-600">
|
||||
Success Estimate:
|
||||
</span>
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="relative h-2 w-16 overflow-hidden rounded-full bg-neutral-200">
|
||||
<div
|
||||
className={`h-full transition-all ${
|
||||
run.stats.correctness_score >= 0.8
|
||||
? "bg-green-500"
|
||||
: run.stats.correctness_score >= 0.6
|
||||
? "bg-yellow-500"
|
||||
: run.stats.correctness_score >= 0.4
|
||||
? "bg-orange-500"
|
||||
: "bg-red-500"
|
||||
}`}
|
||||
style={{
|
||||
width: `${Math.round(run.stats.correctness_score * 100)}%`,
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
<span className="text-sm font-medium">
|
||||
{Math.round(run.stats.correctness_score * 100)}%
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<IconCircleAlert className="size-4 cursor-help text-neutral-400 hover:text-neutral-600" />
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p className="max-w-xs">
|
||||
AI-generated estimate of how well this execution
|
||||
achieved its intended purpose. This score indicates
|
||||
{run.stats.correctness_score >= 0.8
|
||||
? " the agent was highly successful."
|
||||
: run.stats.correctness_score >= 0.6
|
||||
? " the agent was mostly successful with minor issues."
|
||||
: run.stats.correctness_score >= 0.4
|
||||
? " the agent was partially successful with some gaps."
|
||||
: " the agent had limited success with significant issues."}
|
||||
</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
</div>
|
||||
)}
|
||||
</CardContent>
|
||||
</Card>
|
||||
)}
|
||||
|
||||
@@ -3886,6 +3886,48 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/executions/admin/execution_analytics": {
|
||||
"post": {
|
||||
"tags": ["v2", "admin", "admin", "execution_analytics"],
|
||||
"summary": "Generate Execution Analytics",
|
||||
"description": "Generate activity summaries and correctness scores for graph executions.\n\nThis endpoint:\n1. Fetches all completed executions matching the criteria\n2. Identifies executions missing activity_status or correctness_score\n3. Generates missing data using AI in batches\n4. Updates the database with new stats\n5. Returns a detailed report of the analytics operation",
|
||||
"operationId": "postV2Generate execution analytics",
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ExecutionAnalyticsRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ExecutionAnalyticsResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"422": {
|
||||
"description": "Validation Error",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
}
|
||||
},
|
||||
"security": [{ "HTTPBearerJWT": [] }]
|
||||
}
|
||||
},
|
||||
"/api/library/presets": {
|
||||
"get": {
|
||||
"tags": ["v2", "presets"],
|
||||
@@ -5755,6 +5797,123 @@
|
||||
"required": ["url", "relevance_score"],
|
||||
"title": "Document"
|
||||
},
|
||||
"ExecutionAnalyticsRequest": {
|
||||
"properties": {
|
||||
"graph_id": {
|
||||
"type": "string",
|
||||
"title": "Graph Id",
|
||||
"description": "Graph ID to analyze"
|
||||
},
|
||||
"graph_version": {
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Graph Version",
|
||||
"description": "Optional graph version"
|
||||
},
|
||||
"user_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "User Id",
|
||||
"description": "Optional user ID filter"
|
||||
},
|
||||
"created_after": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Created After",
|
||||
"description": "Optional created date lower bound"
|
||||
},
|
||||
"model_name": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Model Name",
|
||||
"description": "Model to use for generation",
|
||||
"default": "gpt-4o-mini"
|
||||
},
|
||||
"batch_size": {
|
||||
"type": "integer",
|
||||
"maximum": 25.0,
|
||||
"minimum": 1.0,
|
||||
"title": "Batch Size",
|
||||
"description": "Batch size for concurrent processing",
|
||||
"default": 10
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["graph_id"],
|
||||
"title": "ExecutionAnalyticsRequest"
|
||||
},
|
||||
"ExecutionAnalyticsResponse": {
|
||||
"properties": {
|
||||
"total_executions": {
|
||||
"type": "integer",
|
||||
"title": "Total Executions"
|
||||
},
|
||||
"processed_executions": {
|
||||
"type": "integer",
|
||||
"title": "Processed Executions"
|
||||
},
|
||||
"successful_analytics": {
|
||||
"type": "integer",
|
||||
"title": "Successful Analytics"
|
||||
},
|
||||
"failed_analytics": {
|
||||
"type": "integer",
|
||||
"title": "Failed Analytics"
|
||||
},
|
||||
"skipped_executions": {
|
||||
"type": "integer",
|
||||
"title": "Skipped Executions"
|
||||
},
|
||||
"results": {
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/ExecutionAnalyticsResult"
|
||||
},
|
||||
"type": "array",
|
||||
"title": "Results"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": [
|
||||
"total_executions",
|
||||
"processed_executions",
|
||||
"successful_analytics",
|
||||
"failed_analytics",
|
||||
"skipped_executions",
|
||||
"results"
|
||||
],
|
||||
"title": "ExecutionAnalyticsResponse"
|
||||
},
|
||||
"ExecutionAnalyticsResult": {
|
||||
"properties": {
|
||||
"agent_id": { "type": "string", "title": "Agent Id" },
|
||||
"version_id": { "type": "integer", "title": "Version Id" },
|
||||
"user_id": { "type": "string", "title": "User Id" },
|
||||
"exec_id": { "type": "string", "title": "Exec Id" },
|
||||
"summary_text": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Summary Text"
|
||||
},
|
||||
"score": {
|
||||
"anyOf": [{ "type": "number" }, { "type": "null" }],
|
||||
"title": "Score"
|
||||
},
|
||||
"status": { "type": "string", "title": "Status" },
|
||||
"error_message": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Error Message"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": [
|
||||
"agent_id",
|
||||
"version_id",
|
||||
"user_id",
|
||||
"exec_id",
|
||||
"summary_text",
|
||||
"score",
|
||||
"status"
|
||||
],
|
||||
"title": "ExecutionAnalyticsResult"
|
||||
},
|
||||
"Graph": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
|
||||
Reference in New Issue
Block a user