diff --git a/apps/sim/app/api/knowledge/[id]/documents/route.ts b/apps/sim/app/api/knowledge/[id]/documents/route.ts index ee0712aed..84c330d74 100644 --- a/apps/sim/app/api/knowledge/[id]/documents/route.ts +++ b/apps/sim/app/api/knowledge/[id]/documents/route.ts @@ -186,6 +186,20 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: `[${requestId}] Starting controlled async processing of ${createdDocuments.length} documents` ) + // Track bulk document upload + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + trackPlatformEvent('platform.knowledge_base.documents_uploaded', { + 'knowledge_base.id': knowledgeBaseId, + 'documents.count': createdDocuments.length, + 'documents.upload_type': 'bulk', + 'processing.chunk_size': validatedData.processingOptions.chunkSize, + 'processing.recipe': validatedData.processingOptions.recipe, + }) + } catch (_e) { + // Silently fail + } + processDocumentsWithQueue( createdDocuments, knowledgeBaseId, @@ -231,6 +245,20 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: const newDocument = await createSingleDocument(validatedData, knowledgeBaseId, requestId) + // Track single document upload + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + trackPlatformEvent('platform.knowledge_base.documents_uploaded', { + 'knowledge_base.id': knowledgeBaseId, + 'documents.count': 1, + 'documents.upload_type': 'single', + 'document.mime_type': validatedData.mimeType, + 'document.file_size': validatedData.fileSize, + }) + } catch (_e) { + // Silently fail + } + return NextResponse.json({ success: true, data: newDocument, diff --git a/apps/sim/app/api/knowledge/search/route.test.ts b/apps/sim/app/api/knowledge/search/route.test.ts index 68e5b97ba..50c3c3a7f 100644 --- a/apps/sim/app/api/knowledge/search/route.test.ts +++ b/apps/sim/app/api/knowledge/search/route.test.ts @@ -214,10 +214,6 @@ describe('Knowledge Search API Route', () => { const response = await POST(req) const data = await response.json() - if (response.status !== 200) { - console.log('Test failed with response:', data) - } - expect(response.status).toBe(200) expect(data.success).toBe(true) expect(data.data.results).toHaveLength(2) @@ -723,10 +719,6 @@ describe('Knowledge Search API Route', () => { const response = await POST(req) const data = await response.json() - if (response.status !== 200) { - console.log('Tag-only search test error:', data) - } - expect(response.status).toBe(200) expect(data.success).toBe(true) expect(data.data.results).toHaveLength(2) @@ -783,10 +775,6 @@ describe('Knowledge Search API Route', () => { const response = await POST(req) const data = await response.json() - if (response.status !== 200) { - console.log('Query+tag combination test error:', data) - } - expect(response.status).toBe(200) expect(data.success).toBe(true) expect(data.data.results).toHaveLength(2) diff --git a/apps/sim/app/api/mcp/servers/route.ts b/apps/sim/app/api/mcp/servers/route.ts index 6c82eb275..9394e5bf8 100644 --- a/apps/sim/app/api/mcp/servers/route.ts +++ b/apps/sim/app/api/mcp/servers/route.ts @@ -106,6 +106,20 @@ export const POST = withMcpAuth('write')( mcpService.clearCache(workspaceId) logger.info(`[${requestId}] Successfully registered MCP server: ${body.name}`) + + // Track MCP server registration + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + trackPlatformEvent('platform.mcp.server_added', { + 'mcp.server_id': serverId, + 'mcp.server_name': body.name, + 'mcp.transport': body.transport, + 'workspace.id': workspaceId, + }) + } catch (_e) { + // Silently fail + } + return createMcpSuccessResponse({ serverId }, 201) } catch (error) { logger.error(`[${requestId}] Error registering MCP server:`, error) diff --git a/apps/sim/app/api/mcp/tools/execute/route.ts b/apps/sim/app/api/mcp/tools/execute/route.ts index d4951fe9a..cecac072c 100644 --- a/apps/sim/app/api/mcp/tools/execute/route.ts +++ b/apps/sim/app/api/mcp/tools/execute/route.ts @@ -174,6 +174,20 @@ export const POST = withMcpAuth('read')( ) } logger.info(`[${requestId}] Successfully executed tool ${toolName} on server ${serverId}`) + + // Track MCP tool execution + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + trackPlatformEvent('platform.mcp.tool_executed', { + 'mcp.server_id': serverId, + 'mcp.tool_name': toolName, + 'mcp.execution_status': 'success', + 'workspace.id': workspaceId, + }) + } catch (_e) { + // Silently fail + } + return createMcpSuccessResponse(transformedResult) } catch (error) { logger.error(`[${requestId}] Error executing MCP tool:`, error) diff --git a/apps/sim/app/api/schedules/route.ts b/apps/sim/app/api/schedules/route.ts index a47acec32..ecbd7756d 100644 --- a/apps/sim/app/api/schedules/route.ts +++ b/apps/sim/app/api/schedules/route.ts @@ -366,6 +366,19 @@ export async function POST(req: NextRequest) { cronExpression, }) + // Track schedule creation/update + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + trackPlatformEvent('platform.schedule.created', { + 'workflow.id': workflowId, + 'schedule.type': scheduleType || 'daily', + 'schedule.timezone': timezone, + 'schedule.is_custom': scheduleType === 'custom', + }) + } catch (_e) { + // Silently fail + } + return NextResponse.json({ message: 'Schedule updated', nextRunAt, diff --git a/apps/sim/app/api/telemetry/route.ts b/apps/sim/app/api/telemetry/route.ts index 5b7b44664..d2fc0c43c 100644 --- a/apps/sim/app/api/telemetry/route.ts +++ b/apps/sim/app/api/telemetry/route.ts @@ -12,6 +12,7 @@ const ALLOWED_CATEGORIES = [ 'error', 'workflow', 'consent', + 'batch', ] const DEFAULT_TIMEOUT = 5000 // 5 seconds timeout @@ -132,7 +133,6 @@ async function forwardToCollector(data: any): Promise { ], } - // Create explicit AbortController for timeout const controller = new AbortController() const timeoutId = setTimeout(() => controller.abort(), timeout) diff --git a/apps/sim/app/api/templates/[id]/use/route.ts b/apps/sim/app/api/templates/[id]/use/route.ts index f228ec666..88165d8a3 100644 --- a/apps/sim/app/api/templates/[id]/use/route.ts +++ b/apps/sim/app/api/templates/[id]/use/route.ts @@ -174,6 +174,23 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{ `[${requestId}] Successfully used template: ${id}, created workflow: ${newWorkflowId}, database returned: ${result.id}` ) + // Track template usage + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + const templateState = templateData.state as any + trackPlatformEvent('platform.template.used', { + 'template.id': id, + 'template.name': templateData.name, + 'workflow.created_id': newWorkflowId, + 'workflow.blocks_count': templateState?.blocks + ? Object.keys(templateState.blocks).length + : 0, + 'workspace.id': workspaceId, + }) + } catch (_e) { + // Silently fail + } + // Verify the workflow was actually created const verifyWorkflow = await db .select({ id: workflow.id }) diff --git a/apps/sim/app/api/workflows/[id]/deploy/route.ts b/apps/sim/app/api/workflows/[id]/deploy/route.ts index b153aa4fc..7a3aad016 100644 --- a/apps/sim/app/api/workflows/[id]/deploy/route.ts +++ b/apps/sim/app/api/workflows/[id]/deploy/route.ts @@ -353,6 +353,31 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{ logger.info(`[${requestId}] Workflow deployed successfully: ${id}`) + // Track workflow deployment + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + + // Aggregate block types to understand which blocks are being used + const blockTypeCounts: Record = {} + for (const block of Object.values(currentState.blocks)) { + const blockType = (block as any).type || 'unknown' + blockTypeCounts[blockType] = (blockTypeCounts[blockType] || 0) + 1 + } + + trackPlatformEvent('platform.workflow.deployed', { + 'workflow.id': id, + 'workflow.name': workflowData!.name, + 'workflow.blocks_count': Object.keys(currentState.blocks).length, + 'workflow.edges_count': currentState.edges.length, + 'workflow.has_loops': Object.keys(currentState.loops).length > 0, + 'workflow.has_parallels': Object.keys(currentState.parallels).length > 0, + 'workflow.api_key_type': keyInfo?.type || 'default', + 'workflow.block_types': JSON.stringify(blockTypeCounts), + }) + } catch (_e) { + // Silently fail + } + const responseApiKeyInfo = keyInfo ? `${keyInfo.name} (${keyInfo.type})` : 'Default key' return createSuccessResponse({ @@ -400,6 +425,17 @@ export async function DELETE( }) logger.info(`[${requestId}] Workflow undeployed successfully: ${id}`) + + // Track workflow undeployment + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + trackPlatformEvent('platform.workflow.undeployed', { + 'workflow.id': id, + }) + } catch (_e) { + // Silently fail + } + return createSuccessResponse({ isDeployed: false, deployedAt: null, diff --git a/apps/sim/app/api/workflows/[id]/log/route.ts b/apps/sim/app/api/workflows/[id]/log/route.ts index 75bec17ce..c862cef76 100644 --- a/apps/sim/app/api/workflows/[id]/log/route.ts +++ b/apps/sim/app/api/workflows/[id]/log/route.ts @@ -44,20 +44,20 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{ variables: {}, }) - const { traceSpans } = buildTraceSpans(result) + const { traceSpans, totalDuration } = buildTraceSpans(result) if (result.success === false) { const message = result.error || 'Workflow execution failed' await loggingSession.safeCompleteWithError({ endedAt: new Date().toISOString(), - totalDurationMs: result.metadata?.duration || 0, + totalDurationMs: totalDuration || result.metadata?.duration || 0, error: { message }, traceSpans, }) } else { await loggingSession.safeComplete({ endedAt: new Date().toISOString(), - totalDurationMs: result.metadata?.duration || 0, + totalDurationMs: totalDuration || result.metadata?.duration || 0, finalOutput: result.output || {}, traceSpans, }) diff --git a/apps/sim/app/api/workflows/route.ts b/apps/sim/app/api/workflows/route.ts index 62045859a..a90f11723 100644 --- a/apps/sim/app/api/workflows/route.ts +++ b/apps/sim/app/api/workflows/route.ts @@ -99,6 +99,19 @@ export async function POST(req: NextRequest) { logger.info(`[${requestId}] Creating workflow ${workflowId} for user ${session.user.id}`) + // Track workflow creation + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + trackPlatformEvent('platform.workflow.created', { + 'workflow.id': workflowId, + 'workflow.name': name, + 'workflow.has_workspace': !!workspaceId, + 'workflow.has_folder': !!folderId, + }) + } catch (_e) { + // Silently fail + } + await db.insert(workflow).values({ id: workflowId, userId: session.user.id, diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/sidebar/sidebar.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/sidebar/sidebar.tsx index 214772865..b39853f40 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/sidebar/sidebar.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/sidebar/sidebar.tsx @@ -12,7 +12,7 @@ import { FrozenCanvasModal } from '@/app/workspace/[workspaceId]/logs/components import { FileDownload } from '@/app/workspace/[workspaceId]/logs/components/sidebar/components/file-download' import LogMarkdownRenderer from '@/app/workspace/[workspaceId]/logs/components/sidebar/components/markdown-renderer' import { ToolCallsDisplay } from '@/app/workspace/[workspaceId]/logs/components/tool-calls/tool-calls-display' -import { TraceSpansDisplay } from '@/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display' +import { TraceSpans } from '@/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans' import { formatDate } from '@/app/workspace/[workspaceId]/logs/utils/format-date' import { formatCost } from '@/providers/utils' import type { WorkflowLog } from '@/stores/logs/filters/types' @@ -32,7 +32,6 @@ interface LogSidebarProps { */ const tryPrettifyJson = (content: string): { isJson: boolean; formatted: string } => { try { - // First check if the content looks like JSON (starts with { or [) const trimmed = content.trim() if ( !(trimmed.startsWith('{') || trimmed.startsWith('[')) || @@ -41,12 +40,10 @@ const tryPrettifyJson = (content: string): { isJson: boolean; formatted: string return { isJson: false, formatted: content } } - // Try to parse the JSON const parsed = JSON.parse(trimmed) const prettified = JSON.stringify(parsed, null, 2) return { isJson: true, formatted: prettified } } catch (_e) { - // If parsing fails, it's not valid JSON return { isJson: false, formatted: content } } } @@ -55,7 +52,6 @@ const tryPrettifyJson = (content: string): { isJson: boolean; formatted: string * Formats JSON content for display, handling multiple JSON objects separated by '--' */ const formatJsonContent = (content: string, blockInput?: Record): React.ReactNode => { - // Look for a pattern like "Block Agent 1 (agent):" to separate system comment from content const blockPattern = /^(Block .+?\(.+?\):)\s*/ const match = content.match(blockPattern) @@ -74,7 +70,6 @@ const formatJsonContent = (content: string, blockInput?: Record): R ) } - // If no system comment pattern found, show the whole content const { isJson, formatted } = tryPrettifyJson(content) return ( @@ -241,7 +236,6 @@ export function Sidebar({ } }, [log?.id]) - // Determine if this is a workflow execution log const isWorkflowExecutionLog = useMemo(() => { if (!log) return false return ( @@ -250,8 +244,6 @@ export function Sidebar({ ) }, [log]) - // Helper to determine if we have cost information to display - // All workflow executions now have cost info (base charge + any model costs) const hasCostInfo = useMemo(() => { return isWorkflowExecutionLog && log?.cost }, [log, isWorkflowExecutionLog]) @@ -260,18 +252,14 @@ export function Sidebar({ return isWorkflowExecutionLog && hasCostInfo }, [isWorkflowExecutionLog, hasCostInfo]) - // Handle trace span expansion state const handleTraceSpanToggle = (expanded: boolean) => { setIsTraceExpanded(expanded) - // If a trace span is expanded, increase the sidebar width only if it's currently below the expanded width if (expanded) { - // Only expand if current width is less than expanded width if (width < EXPANDED_WIDTH) { setWidth(EXPANDED_WIDTH) } } else { - // If all trace spans are collapsed, revert to default width only if we're at expanded width if (width === EXPANDED_WIDTH) { setWidth(DEFAULT_WIDTH) } @@ -288,7 +276,6 @@ export function Sidebar({ const handleMouseMove = (e: MouseEvent) => { if (isDragging) { const newWidth = window.innerWidth - e.clientX - // Maintain minimum width and respect expansion state const minWidthToUse = isTraceExpanded ? Math.max(MIN_WIDTH, EXPANDED_WIDTH) : MIN_WIDTH setWidth(Math.max(minWidthToUse, Math.min(newWidth, window.innerWidth * 0.8))) } @@ -309,22 +296,18 @@ export function Sidebar({ } }, [isDragging, isTraceExpanded, MIN_WIDTH, EXPANDED_WIDTH, width]) - // Handle escape key to close the sidebar useEffect(() => { const handleKeyDown = (e: KeyboardEvent) => { if (e.key === 'Escape' && isOpen) { onClose() } - // Add keyboard shortcuts for navigation if (isOpen) { - // Up arrow key for previous log if (e.key === 'ArrowUp' && hasPrev && onNavigatePrev) { e.preventDefault() handleNavigate(onNavigatePrev) } - // Down arrow key for next log if (e.key === 'ArrowDown' && hasNext && onNavigateNext) { e.preventDefault() handleNavigate(onNavigateNext) @@ -336,7 +319,6 @@ export function Sidebar({ return () => window.removeEventListener('keydown', handleKeyDown) }, [isOpen, onClose, hasPrev, hasNext, onNavigatePrev, onNavigateNext]) - // Handle navigation const handleNavigate = (navigateFunction: () => void) => { navigateFunction() } @@ -530,10 +512,10 @@ export function Sidebar({ Workflow State + {inputExpanded && ( +
+ +
+ )} + + )} + + {span.output && ( +
+ + {outputExpanded && ( +
+ +
+ )} +
+ )} + + ) +} diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/components/trace-span-item.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/components/trace-span-item.tsx new file mode 100644 index 000000000..10a8c9eb7 --- /dev/null +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/components/trace-span-item.tsx @@ -0,0 +1,725 @@ +import type React from 'react' +import { ChevronDown, ChevronRight, Code, Cpu, ExternalLink } from 'lucide-react' +import { + AgentIcon, + ApiIcon, + ChartBarIcon, + CodeIcon, + ConditionalIcon, + ConnectIcon, +} from '@/components/icons' +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip' +import { cn } from '@/lib/utils' +import { + CollapsibleInputOutput, + normalizeChildWorkflowSpan, +} from '@/app/workspace/[workspaceId]/logs/components/trace-spans' +import { getBlock } from '@/blocks/registry' +import { getProviderIcon } from '@/providers/utils' +import type { TraceSpan } from '@/stores/logs/filters/types' +import { getTool } from '@/tools/utils' + +interface TraceSpanItemProps { + span: TraceSpan + depth: number + totalDuration: number + isLast: boolean + parentStartTime: number + workflowStartTime: number + onToggle: (spanId: string, expanded: boolean, hasSubItems: boolean) => void + expandedSpans: Set + hasSubItems?: boolean + hoveredPercent?: number | null + hoveredWorkflowMs?: number | null + forwardHover: (clientX: number, clientY: number) => void + gapBeforeMs?: number + gapBeforePercent?: number + showRelativeChip?: boolean + chipVisibility?: { + model: boolean + toolProvider: boolean + tokens: boolean + cost: boolean + relative: boolean + } +} + +export function TraceSpanItem({ + span, + depth, + totalDuration, + parentStartTime, + workflowStartTime, + onToggle, + expandedSpans, + forwardHover, + gapBeforeMs = 0, + gapBeforePercent = 0, + showRelativeChip = true, + chipVisibility = { model: true, toolProvider: true, tokens: true, cost: true, relative: true }, +}: TraceSpanItemProps): React.ReactNode { + const spanId = span.id || `span-${span.name}-${span.startTime}` + const expanded = expandedSpans.has(spanId) + const hasChildren = span.children && span.children.length > 0 + const hasToolCalls = span.toolCalls && span.toolCalls.length > 0 + const hasInputOutput = Boolean(span.input || span.output) + const hasNestedItems = hasChildren || hasToolCalls || hasInputOutput + + const spanStartTime = new Date(span.startTime).getTime() + const spanEndTime = new Date(span.endTime).getTime() + const duration = span.duration || spanEndTime - spanStartTime + const startOffset = spanStartTime - parentStartTime + + const relativeStartPercent = + totalDuration > 0 ? ((spanStartTime - workflowStartTime) / totalDuration) * 100 : 0 + + const actualDurationPercent = totalDuration > 0 ? (duration / totalDuration) * 100 : 0 + + const safeStartPercent = Math.min(100, Math.max(0, relativeStartPercent)) + const safeWidthPercent = Math.max(2, Math.min(100 - safeStartPercent, actualDurationPercent)) + + const handleSpanClick = () => { + if (hasNestedItems) { + onToggle(spanId, !expanded, hasNestedItems) + } + } + + const getSpanIcon = () => { + const type = span.type.toLowerCase() + if (hasNestedItems) { + return expanded ? : + } + if (type === 'agent') + return + if (type === 'evaluator') return + if (type === 'condition') return + if (type === 'router') return + if (type === 'model') return + if (type === 'function') return + if (type === 'tool') { + const toolId = String(span.name || '') + const parts = toolId.split('_') + for (let i = parts.length; i > 0; i--) { + const candidate = parts.slice(0, i).join('_') + const block = getBlock(candidate) + if (block?.icon) { + const Icon = block.icon as React.ComponentType<{ + className?: string + style?: React.CSSProperties + }> + const color = (block as { bgColor?: string }).bgColor || '#f97316' + return + } + } + return + } + if (type === 'api') return + return + } + + const formatRelativeTime = (ms: number) => { + if (ms === 0) return 'start' + return `+${ms}ms` + } + + const getSpanColor = (type: string) => { + switch (type.toLowerCase()) { + case 'agent': + return 'var(--brand-primary-hover-hex)' + case 'provider': + return '#818cf8' + case 'model': + return '#10a37f' + case 'function': + return '#FF402F' + case 'tool': + return '#f97316' + case 'router': + return '#2FA1FF' + case 'condition': + return '#FF972F' + case 'evaluator': + return '#2FA1FF' + case 'api': + return '#2F55FF' + default: + return '#6b7280' + } + } + + // Prefer registry-provided block color; fallback to legacy per-type colors + const getBlockColor = (type: string) => { + try { + const block = getBlock(type) + const color = (block as { bgColor?: string } | null)?.bgColor + if (color) return color as string + } catch {} + return getSpanColor(type) + } + const spanColor = getBlockColor(span.type) + + const formatDuration = (ms: number) => { + if (ms < 1000) return `${ms}ms` + return `${(ms / 1000).toFixed(2)}s` + } + + const extractModelName = (spanName: string) => { + const modelMatch = spanName.match(/\(([\w.-]+)\)/i) + return modelMatch ? modelMatch[1] : '' + } + + const formatSpanName = (span: TraceSpan) => { + if (span.type === 'tool') { + const raw = String(span.name || '') + const tool = getTool(raw) + const displayName = (() => { + if (tool?.name) return tool.name + const parts = raw.split('_') + const label = parts.slice(1).join(' ') + if (label) { + return label.replace(/\b\w/g, (c) => c.toUpperCase()) + } + return raw.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase()) + })() + return displayName + } + if (span.type === 'model') { + const modelName = extractModelName(span.name) + if (span.name.includes('Initial response')) { + return ( + <> + Initial response{' '} + {modelName && ({modelName})} + + ) + } + if (span.name.includes('(iteration')) { + return ( + <> + Model response {modelName && ({modelName})} + + ) + } + if (span.name.includes('Model Generation')) { + return ( + <> + Model Generation{' '} + {modelName && ({modelName})} + + ) + } + } + return span.name + } + + // Utilities: soften block colors so they are less harsh in light mode and visible in dark mode + const hexToRgb = (hex: string) => { + const m = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex) + if (!m) return null + return { + r: Number.parseInt(m[1], 16), + g: Number.parseInt(m[2], 16), + b: Number.parseInt(m[3], 16), + } + } + + const rgbToHex = (r: number, g: number, b: number) => + `#${[r, g, b] + .map((v) => + Math.max(0, Math.min(255, Math.round(v))) + .toString(16) + .padStart(2, '0') + ) + .join('')}` + + const softenColor = (hex: string, isDark: boolean, factor = 0.22) => { + const rgb = hexToRgb(hex) + if (!rgb) return hex + // Blend toward white a bit to reduce harshness and increase visibility in dark mode + const t = isDark ? factor : factor + 0.08 + const r = rgb.r + (255 - rgb.r) * t + const g = rgb.g + (255 - rgb.g) * t + const b = rgb.b + (255 - rgb.b) * t + return rgbToHex(r, g, b) + } + + return ( +
+ {depth > 0 && ( +
+ )} +
+
+ {getSpanIcon()} +
+ +
+
+
+ + {formatSpanName(span)} + + {chipVisibility.model && span.model && ( + + + + + {(() => { + const model = String(span.model) || '' + const IconComp = getProviderIcon(model) as React.ComponentType<{ + className?: string + }> | null + return IconComp ? : null + })()} + {String(span.model)} + + + Model + + + )} + {chipVisibility.toolProvider && + span.type === 'tool' && + (() => { + const raw = String(span.name || '') + const parts = raw.split('_') + let block: ReturnType | null = null + for (let i = parts.length; i > 0; i--) { + const candidate = parts.slice(0, i).join('_') + const b = getBlock(candidate) + if (b) { + block = b + break + } + } + if (!block?.icon) return null + const Icon = block.icon as React.ComponentType<{ className?: string }> + return ( + + + + ) + })()} + {chipVisibility.tokens && span.tokens && ( + + + + + {(() => { + const t = span.tokens + const total = + typeof t === 'number' + ? t + : (t.total ?? (t.input || 0) + (t.output || 0)) + return `T:${total}` + })()} + + + + {(() => { + const t = span.tokens + if (typeof t === 'number') return {t} tokens + const hasIn = typeof t.input === 'number' + const hasOut = typeof t.output === 'number' + const input = hasIn ? t.input : undefined + const output = hasOut ? t.output : undefined + const total = + t.total ?? + (hasIn && hasOut ? (t.input || 0) + (t.output || 0) : undefined) + + if (hasIn || hasOut) { + return ( + + {`${hasIn ? input : '—'} in / ${hasOut ? output : '—'} out`} + {typeof total === 'number' ? ` (total ${total})` : ''} + + ) + } + if (typeof total === 'number') + return Total {total} tokens + return Tokens unavailable + })()} + + + + )} + {chipVisibility.cost && span.cost?.total !== undefined && ( + + + + + {(() => { + try { + const { formatCost } = require('@/providers/utils') + return formatCost(Number(span.cost.total) || 0) + } catch { + return `$${Number.parseFloat(String(span.cost.total)).toFixed(4)}` + } + })()} + + + + {(() => { + const c = span.cost || {} + const input = typeof c.input === 'number' ? c.input : undefined + const output = typeof c.output === 'number' ? c.output : undefined + const total = + typeof c.total === 'number' + ? c.total + : typeof input === 'number' && typeof output === 'number' + ? input + output + : undefined + let formatCostFn: (v: number) => string = (v: number) => + `$${Number(v).toFixed(4)}` + try { + formatCostFn = require('@/providers/utils').formatCost as ( + v: number + ) => string + } catch {} + return ( +
+ {typeof input === 'number' && ( +
Input: {formatCostFn(input)}
+ )} + {typeof output === 'number' && ( +
Output: {formatCostFn(output)}
+ )} + {typeof total === 'number' && ( +
+ Total: {formatCostFn(total)} +
+ )} +
+ ) + })()} +
+
+
+ )} + {showRelativeChip && depth > 0 && ( + + {span.relativeStartMs !== undefined + ? `+${span.relativeStartMs}ms` + : formatRelativeTime(startOffset)} + + )} +
+ {formatDuration(duration)} +
+ +
+
forwardHover(e.clientX, e.clientY)} + > + {gapBeforeMs > 5 && ( +
+ )} + + {(() => { + const providerTiming = span.providerTiming + const hasSegs = + Array.isArray(providerTiming?.segments) && providerTiming.segments.length > 0 + const type = String(span.type || '').toLowerCase() + const isDark = + typeof document !== 'undefined' && + document.documentElement.classList.contains('dark') + // Base rail: keep workflow neutral so overlays stand out; otherwise use block color + const neutralRail = isDark + ? 'rgba(148, 163, 184, 0.28)' + : 'rgba(148, 163, 184, 0.32)' + const baseColor = type === 'workflow' ? neutralRail : softenColor(spanColor, isDark) + const isFlatBase = type !== 'workflow' + return ( +
+ ) + })()} + + {/* Workflow-level overlay of child spans (no duplication of agent's model/streaming) */} + {(() => { + if (String(span.type || '').toLowerCase() !== 'workflow') return null + const children = (span.children || []) as TraceSpan[] + if (!children.length) return null + // Build overlay segments (exclude agent-internal pieces like model/streaming) + const overlay = children + .filter( + (c) => c.type !== 'model' && c.name?.toLowerCase() !== 'streaming response' + ) + .map((c) => ({ + startMs: new Date(c.startTime).getTime(), + endMs: new Date(c.endTime).getTime(), + type: String(c.type || ''), + name: c.name || '', + })) + .sort((a, b) => a.startMs - b.startMs) + + if (!overlay.length) return null + + const render: React.ReactNode[] = [] + const isDark = document?.documentElement?.classList?.contains('dark') ?? false + const msToPercent = (ms: number) => + totalDuration > 0 ? (ms / totalDuration) * 100 : 0 + + for (let i = 0; i < overlay.length; i++) { + const seg = overlay[i] + const prevEnd = i > 0 ? overlay[i - 1].endMs : undefined + // Render gap between previous and current overlay segment (like in row-level spans) + if (prevEnd && seg.startMs - prevEnd > 5) { + const gapStartPercent = msToPercent(prevEnd - workflowStartTime) + const gapWidthPercent = msToPercent(seg.startMs - prevEnd) + render.push( +
+ ) + } + + const segStartPercent = msToPercent(seg.startMs - workflowStartTime) + const segWidthPercent = msToPercent(seg.endMs - seg.startMs) + const childColor = softenColor(getBlockColor(seg.type), isDark, 0.18) + render.push( +
+ ) + } + + return render + })()} + + {(() => { + const providerTiming = span.providerTiming + const segments: Array<{ + type: string + startTime: string | number + endTime: string | number + name?: string + }> = [] + + const isWorkflow = String(span.type || '').toLowerCase() === 'workflow' + + // For workflow rows, avoid duplicating model/streaming info on the base rail – + // those are already represented inside Agent. Only show provider timing if present. + if ( + !hasChildren && + providerTiming?.segments && + Array.isArray(providerTiming.segments) + ) { + providerTiming.segments.forEach((seg) => + segments.push({ + type: seg.type || 'segment', + startTime: seg.startTime, + endTime: seg.endTime, + name: seg.name, + }) + ) + } + if (!segments.length || safeWidthPercent <= 0) return null + + return segments + .sort((a, b) => new Date(a.startTime).getTime() - new Date(b.startTime).getTime()) + .map((seg, index) => { + const startMs = new Date(seg.startTime).getTime() + const endMs = new Date(seg.endTime).getTime() + const segDuration = endMs - startMs + + // Calculate position on the GLOBAL workflow timeline + // This ensures overlay segments align with their corresponding child rows + const segmentStartPercent = + totalDuration > 0 ? ((startMs - workflowStartTime) / totalDuration) * 100 : 0 + const segmentWidthPercent = + totalDuration > 0 ? (segDuration / totalDuration) * 100 : 0 + + const color = seg.type === 'tool' ? getSpanColor('tool') : getSpanColor('model') + + return ( +
+ ) + }) + })()} +
+
+
+ + + {`${duration}ms`} + +
+
+ + {expanded && ( +
+ {(span.input || span.output) && ( + + )} + + {hasChildren && ( +
+ {span.children?.map((childSpan, index) => { + const enrichedChildSpan = normalizeChildWorkflowSpan(childSpan) + + const childHasSubItems = Boolean( + (enrichedChildSpan.children && enrichedChildSpan.children.length > 0) || + (enrichedChildSpan.toolCalls && enrichedChildSpan.toolCalls.length > 0) || + enrichedChildSpan.input || + enrichedChildSpan.output + ) + + let childGapMs = 0 + let childGapPercent = 0 + if (index > 0 && span.children) { + const prevChild = span.children[index - 1] + const prevEndTime = new Date(prevChild.endTime).getTime() + const currentStartTime = new Date(enrichedChildSpan.startTime).getTime() + childGapMs = currentStartTime - prevEndTime + if (childGapMs > 0 && totalDuration > 0) { + childGapPercent = (childGapMs / totalDuration) * 100 + } + } + + return ( + + ) + })} +
+ )} + + {hasToolCalls && ( +
+ {span.toolCalls?.map((toolCall, index) => { + const toolStartTime = toolCall.startTime + ? new Date(toolCall.startTime).getTime() + : spanStartTime + const toolEndTime = toolCall.endTime + ? new Date(toolCall.endTime).getTime() + : toolStartTime + (toolCall.duration || 0) + + const toolSpan: TraceSpan = { + id: `${spanId}-tool-${index}`, + name: toolCall.name, + type: 'tool', + duration: toolCall.duration || toolEndTime - toolStartTime, + startTime: new Date(toolStartTime).toISOString(), + endTime: new Date(toolEndTime).toISOString(), + status: toolCall.error ? 'error' : 'success', + input: toolCall.input, + output: toolCall.error + ? { error: toolCall.error, ...(toolCall.output || {}) } + : toolCall.output, + } + + const hasToolCallData = Boolean(toolCall.input || toolCall.output || toolCall.error) + + return ( + + ) + })} +
+ )} +
+ )} +
+ ) +} diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/index.ts b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/index.ts new file mode 100644 index 000000000..863aef2b3 --- /dev/null +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/index.ts @@ -0,0 +1,4 @@ +export { BlockDataDisplay } from './components/block-data-display' +export { CollapsibleInputOutput } from './components/collapsible-input-output' +export { TraceSpanItem } from './components/trace-span-item' +export * from './utils' diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx deleted file mode 100644 index 7a3657f5d..000000000 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx +++ /dev/null @@ -1,755 +0,0 @@ -'use client' - -import { useState } from 'react' -import { ChevronDown, ChevronRight, Code, Cpu, ExternalLink } from 'lucide-react' -import { - AgentIcon, - ApiIcon, - ChartBarIcon, - CodeIcon, - ConditionalIcon, - ConnectIcon, -} from '@/components/icons' -import { cn, redactApiKeys } from '@/lib/utils' -import type { TraceSpan } from '@/stores/logs/filters/types' - -function getSpanKey(span: TraceSpan): string { - if (span.id) { - return span.id - } - - const name = span.name || 'span' - const start = span.startTime || 'unknown-start' - const end = span.endTime || 'unknown-end' - - return `${name}|${start}|${end}` -} - -function mergeTraceSpanChildren(...groups: TraceSpan[][]): TraceSpan[] { - const merged: TraceSpan[] = [] - const seen = new Set() - - groups.forEach((group) => { - group.forEach((child) => { - const key = getSpanKey(child) - if (seen.has(key)) { - return - } - seen.add(key) - merged.push(child) - }) - }) - - return merged -} - -function normalizeChildWorkflowSpan(span: TraceSpan): TraceSpan { - const enrichedSpan: TraceSpan = { ...span } - - if (enrichedSpan.output && typeof enrichedSpan.output === 'object') { - enrichedSpan.output = { ...enrichedSpan.output } - } - - const normalizedChildren = Array.isArray(span.children) - ? span.children.map((childSpan) => normalizeChildWorkflowSpan(childSpan)) - : [] - - const outputChildSpans = Array.isArray(span.output?.childTraceSpans) - ? (span.output!.childTraceSpans as TraceSpan[]).map((childSpan) => - normalizeChildWorkflowSpan(childSpan) - ) - : [] - - const mergedChildren = mergeTraceSpanChildren(normalizedChildren, outputChildSpans) - - if (enrichedSpan.output && 'childTraceSpans' in enrichedSpan.output) { - const { childTraceSpans, ...cleanOutput } = enrichedSpan.output as { - childTraceSpans?: TraceSpan[] - } & Record - enrichedSpan.output = cleanOutput - } - - enrichedSpan.children = mergedChildren.length > 0 ? mergedChildren : undefined - - return enrichedSpan -} - -interface TraceSpansDisplayProps { - traceSpans?: TraceSpan[] - totalDuration?: number - onExpansionChange?: (expanded: boolean) => void -} - -// Transform raw block data into clean, user-friendly format -function transformBlockData(data: any, blockType: string, isInput: boolean) { - if (!data) return null - - // For input data, filter out sensitive information - if (isInput) { - const cleanInput = redactApiKeys(data) - - // Remove null/undefined values for cleaner display - Object.keys(cleanInput).forEach((key) => { - if (cleanInput[key] === null || cleanInput[key] === undefined) { - delete cleanInput[key] - } - }) - - return cleanInput - } - - // For output data, extract meaningful information based on block type - if (data.response) { - const response = data.response - - switch (blockType) { - case 'agent': - return { - content: response.content, - model: data.model, - tokens: data.tokens, - toolCalls: response.toolCalls, - ...(data.cost && { cost: data.cost }), - } - - case 'function': - return { - result: response.result, - stdout: response.stdout, - ...(response.executionTime && { executionTime: `${response.executionTime}ms` }), - } - - case 'api': - return { - data: response.data, - status: response.status, - headers: response.headers, - } - - case 'tool': - // For tool calls, show the result data directly - return response - - default: - // For other block types, show the response content - return response - } - } - - return data -} - -// Collapsible Input/Output component -interface CollapsibleInputOutputProps { - span: TraceSpan - spanId: string - depth: number -} - -function CollapsibleInputOutput({ span, spanId, depth }: CollapsibleInputOutputProps) { - const [inputExpanded, setInputExpanded] = useState(false) - const [outputExpanded, setOutputExpanded] = useState(false) - - // Calculate the left margin based on depth to match the parent span's indentation - const leftMargin = depth * 16 + 8 + 24 // Base depth indentation + icon width + extra padding - - return ( -
- {/* Input Data - Collapsible */} - {span.input && ( -
- - {inputExpanded && ( -
- -
- )} -
- )} - - {/* Output Data - Collapsible */} - {span.output && ( -
- - {outputExpanded && ( -
- -
- )} -
- )} -
- ) -} - -// Component to display block input/output data in a clean, readable format -function BlockDataDisplay({ - data, - blockType, - isInput = false, - isError = false, -}: { - data: any - blockType?: string - isInput?: boolean - isError?: boolean -}) { - if (!data) return null - - // Handle different data types - const renderValue = (value: any, key?: string): React.ReactNode => { - if (value === null) return null - if (value === undefined) return undefined - - if (typeof value === 'string') { - return "{value}" - } - - if (typeof value === 'number') { - return {value} - } - - if (typeof value === 'boolean') { - return ( - {value.toString()} - ) - } - - if (Array.isArray(value)) { - if (value.length === 0) return [] - return ( -
- [ -
- {value.map((item, index) => ( -
- - {index}: - -
{renderValue(item)}
-
- ))} -
- ] -
- ) - } - - if (typeof value === 'object') { - const entries = Object.entries(value) - if (entries.length === 0) return {'{}'} - - return ( -
- {entries.map(([objKey, objValue]) => ( -
- - {objKey}: - -
{renderValue(objValue, objKey)}
-
- ))} -
- ) - } - - return {String(value)} - } - - // Transform the data for better display - const transformedData = transformBlockData(data, blockType || 'unknown', isInput) - - // Special handling for error output - if (isError && data.error) { - return ( -
-
-
Error
-
{data.error}
-
- {/* Show other output data if available */} - {transformedData && - Object.keys(transformedData).filter((key) => key !== 'error' && key !== 'success') - .length > 0 && ( -
- {Object.entries(transformedData) - .filter(([key]) => key !== 'error' && key !== 'success') - .map(([key, value]) => ( -
- {key}: - {renderValue(value, key)} -
- ))} -
- )} -
- ) - } - - return ( -
{renderValue(transformedData || data)}
- ) -} - -export function TraceSpansDisplay({ - traceSpans, - totalDuration = 0, - onExpansionChange, -}: TraceSpansDisplayProps) { - // Keep track of expanded spans - const [expandedSpans, setExpandedSpans] = useState>(new Set()) - - // Early return after all hooks - if (!traceSpans || traceSpans.length === 0) { - return
No trace data available
- } - - // Find the earliest start time among all spans to be the workflow start time - const workflowStartTime = traceSpans.reduce((earliest, span) => { - const startTime = new Date(span.startTime).getTime() - return startTime < earliest ? startTime : earliest - }, Number.POSITIVE_INFINITY) - - // Find the latest end time among all spans - const workflowEndTime = traceSpans.reduce((latest, span) => { - const endTime = span.endTime ? new Date(span.endTime).getTime() : 0 - return endTime > latest ? endTime : latest - }, 0) - - // Calculate the actual total workflow duration from start to end - // This ensures parallel spans are represented correctly in the timeline - const actualTotalDuration = workflowEndTime - workflowStartTime - - // Handle span toggling - const handleSpanToggle = (spanId: string, expanded: boolean, hasSubItems: boolean) => { - const newExpandedSpans = new Set(expandedSpans) - if (expanded) { - newExpandedSpans.add(spanId) - } else { - newExpandedSpans.delete(spanId) - } - setExpandedSpans(newExpandedSpans) - - // Only notify parent component if this span has children or tool calls - if (onExpansionChange && hasSubItems) { - onExpansionChange(newExpandedSpans.size > 0) - } - } - - return ( -
-
-
Workflow Execution
-
-
- {traceSpans.map((span, index) => { - const normalizedSpan = normalizeChildWorkflowSpan(span) - const hasSubItems = Boolean( - (normalizedSpan.children && normalizedSpan.children.length > 0) || - (normalizedSpan.toolCalls && normalizedSpan.toolCalls.length > 0) || - normalizedSpan.input || - normalizedSpan.output - ) - return ( - - ) - })} -
-
- ) -} - -interface TraceSpanItemProps { - span: TraceSpan - depth: number - totalDuration: number - isLast: boolean - parentStartTime: number // Start time of the parent span for offset calculation - workflowStartTime: number // Start time of the entire workflow - onToggle: (spanId: string, expanded: boolean, hasSubItems: boolean) => void - expandedSpans: Set - hasSubItems?: boolean -} - -function TraceSpanItem({ - span, - depth, - totalDuration, - isLast, - parentStartTime, - workflowStartTime, - onToggle, - expandedSpans, - hasSubItems = false, -}: TraceSpanItemProps): React.ReactNode { - const spanId = span.id || `span-${span.name}-${span.startTime}` - const expanded = expandedSpans.has(spanId) - const hasChildren = span.children && span.children.length > 0 - const hasToolCalls = span.toolCalls && span.toolCalls.length > 0 - const hasInputOutput = Boolean(span.input || span.output) - const hasNestedItems = hasChildren || hasToolCalls || hasInputOutput - - // Calculate timing information - const spanStartTime = new Date(span.startTime).getTime() - const spanEndTime = new Date(span.endTime).getTime() - const duration = span.duration || spanEndTime - spanStartTime - const startOffset = spanStartTime - parentStartTime // Time from parent start to this span's start - - // Calculate the position relative to the workflow start time for accurate timeline visualization - // For parallel execution, this ensures spans align correctly based on their actual start time - const relativeStartPercent = - totalDuration > 0 ? ((spanStartTime - workflowStartTime) / totalDuration) * 100 : 0 - - // Calculate width based on the span's actual duration relative to total workflow duration - const actualDurationPercent = totalDuration > 0 ? (duration / totalDuration) * 100 : 0 - - // Ensure values are within valid range - const safeStartPercent = Math.min(100, Math.max(0, relativeStartPercent)) - const safeWidthPercent = Math.max(2, Math.min(100 - safeStartPercent, actualDurationPercent)) - - // Handle click to expand/collapse this span - const handleSpanClick = () => { - if (hasNestedItems) { - onToggle(spanId, !expanded, hasNestedItems) - } - } - - // Get appropriate icon based on span type - const getSpanIcon = () => { - const type = span.type.toLowerCase() - - // Expand/collapse for spans with children - if (hasNestedItems) { - return expanded ? : - } - - // Block type specific icons - if (type === 'agent') { - return - } - - if (type === 'evaluator') { - return - } - - if (type === 'condition') { - return - } - - if (type === 'router') { - return - } - - if (type === 'model') { - return - } - - if (type === 'function') { - return - } - - if (type === 'tool') { - return - } - - if (type === 'api') { - return - } - - return - } - - // Format milliseconds as +XXms for relative timing - const formatRelativeTime = (ms: number) => { - if (ms === 0) return 'start' - return `+${ms}ms` - } - - // Get color based on span type - const getSpanColor = (type: string) => { - switch (type.toLowerCase()) { - case 'agent': - return 'var(--brand-primary-hover-hex)' // Purple from AgentBlock - case 'provider': - return '#818cf8' // Indigo for provider - case 'model': - return '#10a37f' // Green from OpenAIBlock - case 'function': - return '#FF402F' // Orange-red from FunctionBlock - case 'tool': - return '#f97316' // Orange for tools - case 'router': - return '#2FA1FF' // Blue from RouterBlock - case 'condition': - return '#FF972F' // Orange from ConditionBlock - case 'evaluator': - return '#2FA1FF' // Blue from EvaluatorBlock - case 'api': - return '#2F55FF' // Blue from ApiBlock - default: - return '#6b7280' // Gray for others - } - } - - const spanColor = getSpanColor(span.type) - - // Format duration to be more readable - const formatDuration = (ms: number) => { - if (ms < 1000) return `${ms}ms` - return `${(ms / 1000).toFixed(2)}s` - } - - // Extract model name from span name using a more general pattern - const extractModelName = (spanName: string) => { - // Try to match model name in parentheses - const modelMatch = spanName.match(/\(([\w.-]+)\)/i) - return modelMatch ? modelMatch[1] : '' - } - - // Format span name for display - const formatSpanName = (span: TraceSpan) => { - if (span.type === 'model') { - const modelName = extractModelName(span.name) - - if (span.name.includes('Initial response')) { - return ( - <> - Initial response{' '} - {modelName && ({modelName})} - - ) - } - if (span.name.includes('(iteration')) { - const iterMatch = span.name.match(/\(iteration (\d+)\)/) - const iterNum = iterMatch ? iterMatch[1] : '' - return ( - <> - Model response{' '} - {iterNum && (iteration {iterNum})}{' '} - {modelName && ({modelName})} - - ) - } - if (span.name.includes('Model Generation')) { - return ( - <> - Model Generation{' '} - {modelName && ({modelName})} - - ) - } - } - - return span.name - } - - return ( -
- {/* Span header */} -
-
- {getSpanIcon()} -
- -
-
-
- - {formatSpanName(span)} - - {depth > 0 && ( - - {span.relativeStartMs !== undefined - ? `+${span.relativeStartMs}ms` - : formatRelativeTime(startOffset)} - - )} - {depth === 0 && ( - - {new Date(span.startTime).toLocaleTimeString([], { - hour: '2-digit', - minute: '2-digit', - second: '2-digit', - hour12: false, - })} - - )} -
- {formatDuration(duration)} -
- -
- {/* Timeline visualization - responsive width based on container size */} -
-
-
- - {/* Duration text - always show in ms */} - - {`${duration}ms`} - -
-
-
- - {/* Expanded content */} - {expanded && ( -
- {/* Block Input/Output Data - Collapsible */} - {(span.input || span.output) && ( - - )} - - {/* Children and tool calls */} - {/* Render child spans */} - {hasChildren && ( -
- {span.children?.map((childSpan, index) => { - const enrichedChildSpan = normalizeChildWorkflowSpan(childSpan) - - const childHasSubItems = Boolean( - (enrichedChildSpan.children && enrichedChildSpan.children.length > 0) || - (enrichedChildSpan.toolCalls && enrichedChildSpan.toolCalls.length > 0) || - enrichedChildSpan.input || - enrichedChildSpan.output - ) - - return ( - - ) - })} -
- )} - - {/* Render tool calls as spans */} - {hasToolCalls && ( -
- {span.toolCalls?.map((toolCall, index) => { - // Create a pseudo-span for each tool call - const toolStartTime = toolCall.startTime - ? new Date(toolCall.startTime).getTime() - : spanStartTime - const toolEndTime = toolCall.endTime - ? new Date(toolCall.endTime).getTime() - : toolStartTime + (toolCall.duration || 0) - - const toolSpan: TraceSpan = { - id: `${spanId}-tool-${index}`, - name: toolCall.name, - type: 'tool', - duration: toolCall.duration || toolEndTime - toolStartTime, - startTime: new Date(toolStartTime).toISOString(), - endTime: new Date(toolEndTime).toISOString(), - status: toolCall.error ? 'error' : 'success', - // Include tool call arguments as input and result as output - input: toolCall.input, - output: toolCall.error - ? { error: toolCall.error, ...(toolCall.output || {}) } - : toolCall.output, - } - - // Tool calls now have input/output data to display - const hasToolCallData = Boolean(toolCall.input || toolCall.output || toolCall.error) - - return ( - - ) - })} -
- )} -
- )} -
- ) -} diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans.tsx new file mode 100644 index 000000000..ac266b8fa --- /dev/null +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans.tsx @@ -0,0 +1,308 @@ +'use client' + +import { useCallback, useEffect, useMemo, useRef, useState } from 'react' +import { Maximize2, Minimize2 } from 'lucide-react' +import { + formatDurationDisplay, + normalizeChildWorkflowSpan, + TraceSpanItem, +} from '@/app/workspace/[workspaceId]/logs/components/trace-spans' +import type { TraceSpan } from '@/stores/logs/filters/types' + +interface TraceSpansProps { + traceSpans?: TraceSpan[] + totalDuration?: number + onExpansionChange?: (expanded: boolean) => void +} + +export function TraceSpans({ traceSpans, totalDuration = 0, onExpansionChange }: TraceSpansProps) { + const [expandedSpans, setExpandedSpans] = useState>(new Set()) + const [typeFilters, setTypeFilters] = useState>({}) + const containerRef = useRef(null) + const timelineHitboxRef = useRef(null) + const [hoveredPercent, setHoveredPercent] = useState(null) + const [hoveredWorkflowMs, setHoveredWorkflowMs] = useState(null) + const [hoveredX, setHoveredX] = useState(null) + const [containerWidth, setContainerWidth] = useState(0) + + type ChipVisibility = { + model: boolean + toolProvider: boolean + tokens: boolean + cost: boolean + relative: boolean + } + + const chipVisibility: ChipVisibility = useMemo(() => { + const leftBudget = containerWidth * 0.55 + return { + model: leftBudget >= 300, // first to reveal + toolProvider: leftBudget >= 300, // alongside model + tokens: leftBudget >= 380, // then tokens + cost: leftBudget >= 460, // then cost + relative: leftBudget >= 540, // finally relative timing + } + }, [containerWidth]) + + if (!traceSpans || traceSpans.length === 0) { + return
No trace data available
+ } + + const workflowStartTime = traceSpans.reduce((earliest, span) => { + const startTime = new Date(span.startTime).getTime() + return startTime < earliest ? startTime : earliest + }, Number.POSITIVE_INFINITY) + + const workflowEndTime = traceSpans.reduce((latest, span) => { + const endTime = span.endTime ? new Date(span.endTime).getTime() : 0 + return endTime > latest ? endTime : latest + }, 0) + + const actualTotalDuration = workflowEndTime - workflowStartTime + + const handleSpanToggle = (spanId: string, expanded: boolean, hasSubItems: boolean) => { + const newExpandedSpans = new Set(expandedSpans) + if (expanded) { + newExpandedSpans.add(spanId) + } else { + newExpandedSpans.delete(spanId) + } + setExpandedSpans(newExpandedSpans) + + if (onExpansionChange && hasSubItems) { + onExpansionChange(newExpandedSpans.size > 0) + } + } + + const availableTypes = useMemo(() => { + const set = new Set() + const visit = (spans?: TraceSpan[]) => { + if (!spans) return + for (const s of spans) { + if (s?.type) { + const tl = s.type.toLowerCase() + if (tl !== 'workflow') set.add(tl) // Never expose 'workflow' as a filter + } + if (s?.children?.length) visit(s.children) + if (s?.toolCalls?.length) set.add('tool') + } + } + visit(traceSpans) + return Array.from(set).sort() + }, [traceSpans]) + + const effectiveTypeFilters = useMemo(() => { + if (!availableTypes.length) return {} + if (Object.keys(typeFilters).length === 0) { + const all: Record = {} + availableTypes.forEach((t) => (all[t] = true)) + return all + } + const merged = { ...typeFilters } + availableTypes.forEach((t) => { + if (merged[t] === undefined) merged[t] = true + }) + return merged + }, [availableTypes, typeFilters]) + + const toggleAll = (expand: boolean) => { + if (!traceSpans) return + const next = new Set() + if (expand) { + const collect = (spans: TraceSpan[]) => { + for (const s of spans) { + const id = s.id || `span-${s.name}-${s.startTime}` + next.add(id) + if (s.children?.length) collect(s.children) + if (s?.toolCalls?.length) next.add(`${id}-tools`) + } + } + collect(traceSpans) + } + setExpandedSpans(next) + onExpansionChange?.(expand) + } + + const filtered = useMemo(() => { + const allowed = new Set( + Object.entries(effectiveTypeFilters) + .filter(([, v]) => v) + .map(([k]) => k) + ) + const filterTree = (spans: TraceSpan[]): TraceSpan[] => + spans + .map((s) => ({ ...s })) + .filter((s) => { + const tl = s.type?.toLowerCase?.() || '' + if (tl === 'workflow') return true + return allowed.has(tl) + }) + .map((s) => ({ + ...s, + children: s.children ? filterTree(s.children) : undefined, + })) + return traceSpans ? filterTree(traceSpans) : [] + }, [traceSpans, effectiveTypeFilters]) + + const forwardHover = useCallback( + (clientX: number, clientY: number) => { + if (!timelineHitboxRef.current || !containerRef.current) return + + const railRect = timelineHitboxRef.current.getBoundingClientRect() + const containerRect = containerRef.current.getBoundingClientRect() + + const withinX = clientX >= railRect.left && clientX <= railRect.right + const withinY = clientY >= railRect.top && clientY <= railRect.bottom + + if (!withinX || !withinY) { + setHoveredPercent(null) + setHoveredWorkflowMs(null) + setHoveredX(null) + return + } + + const clamped = Math.max(0, Math.min(1, (clientX - railRect.left) / railRect.width)) + setHoveredPercent(clamped * 100) + setHoveredWorkflowMs(workflowStartTime + clamped * actualTotalDuration) + setHoveredX(railRect.left + clamped * railRect.width - containerRect.left) + }, + [actualTotalDuration, workflowStartTime] + ) + + useEffect(() => { + const handleMove = (event: MouseEvent) => { + forwardHover(event.clientX, event.clientY) + } + + window.addEventListener('pointermove', handleMove) + return () => window.removeEventListener('pointermove', handleMove) + }, [forwardHover]) + + useEffect(() => { + if (!containerRef.current) return + const el = containerRef.current + const ro = new ResizeObserver((entries: ResizeObserverEntry[]) => { + const width = entries?.[0]?.contentRect?.width || el.clientWidth + setContainerWidth(width) + }) + ro.observe(el) + setContainerWidth(el.clientWidth) + return () => ro.disconnect() + }, []) + + return ( +
+
+
+
Workflow Execution
+
+
+ {(() => { + const anyExpanded = expandedSpans.size > 0 + return ( + + ) + })()} +
+
+
{ + setHoveredPercent(null) + setHoveredWorkflowMs(null) + setHoveredX(null) + }} + > + {filtered.map((span, index) => { + const normalizedSpan = normalizeChildWorkflowSpan(span) + const hasSubItems = Boolean( + (normalizedSpan.children && normalizedSpan.children.length > 0) || + (normalizedSpan.toolCalls && normalizedSpan.toolCalls.length > 0) || + normalizedSpan.input || + normalizedSpan.output + ) + + // Calculate gap from previous span (for sequential execution visualization) + let gapMs = 0 + let gapPercent = 0 + if (index > 0) { + const prevSpan = filtered[index - 1] + const prevEndTime = new Date(prevSpan.endTime).getTime() + const currentStartTime = new Date(normalizedSpan.startTime).getTime() + gapMs = currentStartTime - prevEndTime + if (gapMs > 0 && actualTotalDuration > 0) { + gapPercent = (gapMs / actualTotalDuration) * 100 + } + } + + return ( + + ) + })} + + {/* Global crosshair spanning all rows with visible time label */} + {hoveredPercent !== null && hoveredX !== null && ( + <> +
+
+ {formatDurationDisplay(Math.max(0, (hoveredWorkflowMs || 0) - workflowStartTime))} +
+ + )} + + {/* Hover capture area - aligned to timeline bars, not extending to edge */} +
forwardHover(e.clientX, e.clientY)} + onPointerLeave={() => { + setHoveredPercent(null) + setHoveredWorkflowMs(null) + setHoveredX(null) + }} + /> +
+
+ ) +} diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/utils.ts b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/utils.ts new file mode 100644 index 000000000..bc49b75c8 --- /dev/null +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/utils.ts @@ -0,0 +1,128 @@ +import { redactApiKeys } from '@/lib/utils' +import type { TraceSpan } from '@/stores/logs/filters/types' + +export function getSpanKey(span: TraceSpan): string { + if (span.id) { + return span.id + } + + const name = span.name || 'span' + const start = span.startTime || 'unknown-start' + const end = span.endTime || 'unknown-end' + + return `${name}|${start}|${end}` +} + +export function mergeTraceSpanChildren(...groups: TraceSpan[][]): TraceSpan[] { + const merged: TraceSpan[] = [] + const seen = new Set() + + groups.forEach((group) => { + group.forEach((child) => { + const key = getSpanKey(child) + if (seen.has(key)) { + return + } + seen.add(key) + merged.push(child) + }) + }) + + return merged +} + +export function normalizeChildWorkflowSpan(span: TraceSpan): TraceSpan { + const enrichedSpan: TraceSpan = { ...span } + + if (enrichedSpan.output && typeof enrichedSpan.output === 'object') { + enrichedSpan.output = { ...enrichedSpan.output } + } + + const normalizedChildren = Array.isArray(span.children) + ? span.children.map((childSpan) => normalizeChildWorkflowSpan(childSpan)) + : [] + + const outputChildSpans = Array.isArray(span.output?.childTraceSpans) + ? (span.output!.childTraceSpans as TraceSpan[]).map((childSpan) => + normalizeChildWorkflowSpan(childSpan) + ) + : [] + + const mergedChildren = mergeTraceSpanChildren(normalizedChildren, outputChildSpans) + + if (enrichedSpan.output && 'childTraceSpans' in enrichedSpan.output) { + const { childTraceSpans, ...cleanOutput } = enrichedSpan.output as { + childTraceSpans?: TraceSpan[] + } & Record + enrichedSpan.output = cleanOutput + } + + enrichedSpan.children = mergedChildren.length > 0 ? mergedChildren : undefined + + return enrichedSpan +} + +export function transformBlockData(data: unknown, blockType: string, isInput: boolean) { + if (!data) return null + + if (isInput) { + const cleanInput = redactApiKeys(data) + + Object.keys(cleanInput).forEach((key) => { + if (cleanInput[key] === null || cleanInput[key] === undefined) { + delete cleanInput[key] + } + }) + + return cleanInput + } + + if (typeof data === 'object' && data !== null && 'response' in data) { + const dataWithResponse = data as Record + const response = dataWithResponse.response as Record + + switch (blockType) { + case 'agent': + return { + content: response.content, + model: 'model' in dataWithResponse ? dataWithResponse.model : undefined, + tokens: 'tokens' in dataWithResponse ? dataWithResponse.tokens : undefined, + toolCalls: response.toolCalls, + ...('cost' in dataWithResponse && dataWithResponse.cost + ? { cost: dataWithResponse.cost } + : {}), + } + + case 'function': + return { + result: response.result, + stdout: response.stdout, + ...('executionTime' in response && response.executionTime + ? { executionTime: `${response.executionTime}ms` } + : {}), + } + + case 'api': + return { + data: response.data, + status: response.status, + headers: response.headers, + } + + case 'tool': + return response + + default: + return response + } + } + + return data +} + +export function formatDurationDisplay(ms: number): string { + if (ms < 1000) { + return `${ms.toFixed(0)}ms` + } + return `${(ms / 1000).toFixed(2)}s` +} diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/sub-block.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/sub-block.tsx index 354f3849f..6f625c31b 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/sub-block.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/sub-block.tsx @@ -68,8 +68,6 @@ export const SubBlock = memo( }: SubBlockProps) { const [isValidJson, setIsValidJson] = useState(true) - // Removed debug logging for performance - const handleMouseDown = (e: React.MouseEvent) => { e.stopPropagation() } diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-edge/workflow-edge.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-edge/workflow-edge.tsx index 789864645..4020f07e3 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-edge/workflow-edge.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-edge/workflow-edge.tsx @@ -1,9 +1,7 @@ -import { useEffect } from 'react' import { X } from 'lucide-react' import { BaseEdge, EdgeLabelRenderer, type EdgeProps, getSmoothStepPath } from 'reactflow' import type { EdgeDiffStatus } from '@/lib/workflows/diff/types' import { useWorkflowDiffStore } from '@/stores/workflow-diff' -import { useCurrentWorkflow } from '../../hooks' interface WorkflowEdgeProps extends EdgeProps { sourceHandle?: string | null @@ -38,110 +36,45 @@ export const WorkflowEdge = ({ offset: isHorizontal ? 30 : 20, }) - // Use the directly provided isSelected flag instead of computing it const isSelected = data?.isSelected ?? false const isInsideLoop = data?.isInsideLoop ?? false const parentLoopId = data?.parentLoopId - // Get edge diff status const diffAnalysis = useWorkflowDiffStore((state) => state.diffAnalysis) const isShowingDiff = useWorkflowDiffStore((state) => state.isShowingDiff) const isDiffReady = useWorkflowDiffStore((state) => state.isDiffReady) - const currentWorkflow = useCurrentWorkflow() - // Generate edge identifier using block IDs to match diff analysis from sim agent - // This must exactly match the logic used by the sim agent diff analysis const generateEdgeIdentity = ( sourceId: string, targetId: string, sourceHandle?: string | null, targetHandle?: string | null ): string => { - // The diff analysis generates edge identifiers in the format: sourceId-sourceHandle-targetId-targetHandle - // Use actual handle names, defaulting to 'source' and 'target' if not provided const actualSourceHandle = sourceHandle || 'source' const actualTargetHandle = targetHandle || 'target' return `${sourceId}-${actualSourceHandle}-${targetId}-${actualTargetHandle}` } - // Generate edge identifier using the exact same logic as the diff engine const edgeIdentifier = generateEdgeIdentity(source, target, sourceHandle, targetHandle) - // Debug logging to understand what's happening - useEffect(() => { - if (edgeIdentifier && diffAnalysis?.edge_diff) { - console.log(`[Edge Debug] Edge ${id}:`, { - edgeIdentifier, - sourceHandle, - targetHandle, - sourceBlockId: source, - targetBlockId: target, - isShowingDiff, - isDiffMode: currentWorkflow.isDiffMode, - edgeDiffAnalysis: diffAnalysis.edge_diff, - // Show actual array contents to see why matching fails - newEdgesArray: diffAnalysis.edge_diff.new_edges, - deletedEdgesArray: diffAnalysis.edge_diff.deleted_edges, - unchangedEdgesArray: diffAnalysis.edge_diff.unchanged_edges, - // Check if this edge matches any in the diff analysis - matchesNew: diffAnalysis.edge_diff.new_edges.includes(edgeIdentifier), - matchesDeleted: diffAnalysis.edge_diff.deleted_edges.includes(edgeIdentifier), - matchesUnchanged: diffAnalysis.edge_diff.unchanged_edges.includes(edgeIdentifier), - }) - } - }, [ - edgeIdentifier, - diffAnalysis, - isShowingDiff, - id, - sourceHandle, - targetHandle, - source, - target, - currentWorkflow.isDiffMode, - ]) - - // One-time debug log of full diff analysis - useEffect(() => { - if (diffAnalysis && id === Object.keys(currentWorkflow.blocks)[0]) { - // Only log once per diff - console.log('[Full Diff Analysis]:', { - edge_diff: diffAnalysis.edge_diff, - new_blocks: diffAnalysis.new_blocks, - edited_blocks: diffAnalysis.edited_blocks, - deleted_blocks: diffAnalysis.deleted_blocks, - isShowingDiff, - currentWorkflowEdgeCount: currentWorkflow.edges.length, - currentWorkflowBlockCount: Object.keys(currentWorkflow.blocks).length, - }) - } - }, [diffAnalysis, id, currentWorkflow.blocks, currentWorkflow.edges, isShowingDiff]) - - // Determine edge diff status let edgeDiffStatus: EdgeDiffStatus = null - // Check if edge is directly marked as deleted (for reconstructed edges) if (data?.isDeleted) { edgeDiffStatus = 'deleted' - } - // Only attempt to determine diff status if all required data is available - else if (diffAnalysis?.edge_diff && edgeIdentifier && isDiffReady) { + } else if (diffAnalysis?.edge_diff && edgeIdentifier && isDiffReady) { if (isShowingDiff) { - // In diff view, show new edges if (diffAnalysis.edge_diff.new_edges.includes(edgeIdentifier)) { edgeDiffStatus = 'new' } else if (diffAnalysis.edge_diff.unchanged_edges.includes(edgeIdentifier)) { edgeDiffStatus = 'unchanged' } } else { - // In original workflow, show deleted edges if (diffAnalysis.edge_diff.deleted_edges.includes(edgeIdentifier)) { edgeDiffStatus = 'deleted' } } } - // Merge any style props passed from parent with diff highlighting const getEdgeColor = () => { if (edgeDiffStatus === 'new') return '#22c55e' // Green for new edges if (edgeDiffStatus === 'deleted') return '#ef4444' // Red for deleted edges diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts index 4203c0218..db436758a 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts @@ -406,11 +406,14 @@ export function useWorkflowExecution() { } } + const streamCompletionTimes = new Map() + const onStream = async (streamingExecution: StreamingExecution) => { const promise = (async () => { if (!streamingExecution.stream) return const reader = streamingExecution.stream.getReader() const blockId = (streamingExecution.execution as any)?.blockId + const streamStartTime = Date.now() let isFirstChunk = true if (blockId) { @@ -420,6 +423,10 @@ export function useWorkflowExecution() { while (true) { const { done, value } = await reader.read() if (done) { + // Record when this stream completed + if (blockId) { + streamCompletionTimes.set(blockId, Date.now()) + } break } const chunk = new TextDecoder().decode(value) @@ -517,6 +524,25 @@ export function useWorkflowExecution() { result.metadata = { duration: 0, startTime: new Date().toISOString() } } ;(result.metadata as any).source = 'chat' + + // Update block logs with actual stream completion times + if (result.logs && streamCompletionTimes.size > 0) { + const streamCompletionEndTime = new Date( + Math.max(...Array.from(streamCompletionTimes.values())) + ).toISOString() + + result.logs.forEach((log: BlockLog) => { + if (streamCompletionTimes.has(log.blockId)) { + const completionTime = streamCompletionTimes.get(log.blockId)! + const startTime = new Date(log.startedAt).getTime() + + // Update the log with actual stream completion time + log.endedAt = new Date(completionTime).toISOString() + log.durationMs = completionTime - startTime + } + }) + } + // Update streamed content and apply tokenization if (result.logs) { result.logs.forEach((log: BlockLog) => { diff --git a/apps/sim/executor/index.ts b/apps/sim/executor/index.ts index b1601dc44..de14bf953 100644 --- a/apps/sim/executor/index.ts +++ b/apps/sim/executor/index.ts @@ -242,6 +242,7 @@ export class Executor { ): Promise { const { setIsExecuting, setIsDebugging, setPendingBlocks, reset } = useExecutionStore.getState() const startTime = new Date() + const executorStartMs = startTime.getTime() let finalOutput: NormalizedBlockOutput = {} // Track workflow execution start @@ -252,7 +253,9 @@ export class Executor { startTime: startTime.toISOString(), }) + const beforeValidation = Date.now() this.validateWorkflow(startBlockId) + const validationTime = Date.now() - beforeValidation const context = this.createExecutionContext(workflowId, startTime, startBlockId) @@ -268,10 +271,13 @@ export class Executor { let hasMoreLayers = true let iteration = 0 + const firstBlockExecutionTime: number | null = null const maxIterations = 500 // Safety limit for infinite loops while (hasMoreLayers && iteration < maxIterations && !this.isCancelled) { + const iterationStart = Date.now() const nextLayer = this.getNextExecutionLayer(context) + const getNextLayerTime = Date.now() - iterationStart if (this.isDebugging) { // In debug mode, update the pending blocks and wait for user interaction @@ -405,6 +411,7 @@ export class Executor { if (normalizedOutputs.length > 0) { finalOutput = normalizedOutputs[normalizedOutputs.length - 1] } + // Process loop iterations - this will activate external paths when loops complete await this.loopManager.processLoopIterations(context) diff --git a/apps/sim/instrumentation-client.ts b/apps/sim/instrumentation-client.ts index 584abb63f..4f5852ae4 100644 --- a/apps/sim/instrumentation-client.ts +++ b/apps/sim/instrumentation-client.ts @@ -1,15 +1,16 @@ /** * Sim Telemetry - Client-side Instrumentation - * - * This file initializes client-side telemetry when the app loads in the browser. - * It respects the user's telemetry preferences stored in localStorage. - * */ + import { env } from './lib/env' if (typeof window !== 'undefined') { const TELEMETRY_STATUS_KEY = 'simstudio-telemetry-status' + const BATCH_INTERVAL_MS = 10000 // Send batches every 10 seconds + const MAX_BATCH_SIZE = 50 // Max events per batch let telemetryEnabled = true + const eventBatch: any[] = [] + let batchTimer: NodeJS.Timeout | null = null try { if (env.NEXT_TELEMETRY_DISABLED === '1') { @@ -26,216 +27,221 @@ if (typeof window !== 'undefined') { } /** - * Safe serialize function to ensure we don't include circular references or invalid data + * Add event to batch and schedule flush */ - function safeSerialize(obj: any): any { - if (obj === null || obj === undefined) return null - if (typeof obj !== 'object') return obj + function addToBatch(event: any): void { + if (!telemetryEnabled) return - if (Array.isArray(obj)) { - return obj.map((item) => safeSerialize(item)) + eventBatch.push(event) + + if (eventBatch.length >= MAX_BATCH_SIZE) { + flushBatch() + } else if (!batchTimer) { + batchTimer = setTimeout(flushBatch, BATCH_INTERVAL_MS) } - - const result: Record = {} - - for (const key in obj) { - if (key in obj) { - const value = obj[key] - if ( - value === undefined || - value === null || - typeof value === 'function' || - typeof value === 'symbol' - ) { - continue - } - - try { - result[key] = safeSerialize(value) - } catch (_e) { - try { - result[key] = String(value) - } catch (_e2) {} - } - } - } - - return result } + + /** + * Sanitize event data to remove sensitive information + */ + function sanitizeEvent(event: any): any { + const patterns = ['password', 'token', 'secret', 'key', 'auth', 'credential', 'private'] + const sensitiveRe = new RegExp(patterns.join('|'), 'i') + + const scrubString = (s: string) => (s && sensitiveRe.test(s) ? '[redacted]' : s) + + if (event == null) return event + if (typeof event === 'string') return scrubString(event) + if (typeof event !== 'object') return event + + if (Array.isArray(event)) { + return event.map((item) => sanitizeEvent(item)) + } + + const sanitized: Record = {} + for (const [key, value] of Object.entries(event)) { + const lowerKey = key.toLowerCase() + if (patterns.some((p) => lowerKey.includes(p))) continue + + if (typeof value === 'string') sanitized[key] = scrubString(value) + else if (Array.isArray(value)) sanitized[key] = value.map((v) => sanitizeEvent(v)) + else if (value && typeof value === 'object') sanitized[key] = sanitizeEvent(value) + else sanitized[key] = value + } + + return sanitized + } + + /** + * Flush batch of events to server + */ + function flushBatch(): void { + if (eventBatch.length === 0) return + + const batch = eventBatch.splice(0, eventBatch.length) + if (batchTimer) { + clearTimeout(batchTimer) + batchTimer = null + } + + const sanitizedBatch = batch.map(sanitizeEvent) + + const payload = JSON.stringify({ + category: 'batch', + action: 'client_events', + events: sanitizedBatch, + timestamp: Date.now(), + }) + + const payloadSize = new Blob([payload]).size + const MAX_BEACON_SIZE = 64 * 1024 // 64KB + + if (navigator.sendBeacon && payloadSize < MAX_BEACON_SIZE) { + const sent = navigator.sendBeacon('/api/telemetry', payload) + + if (!sent) { + fetch('/api/telemetry', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: payload, + keepalive: true, + }).catch(() => { + // Silently fail + }) + } + } else { + fetch('/api/telemetry', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: payload, + keepalive: true, + }).catch(() => { + // Silently fail + }) + } + } + + window.addEventListener('beforeunload', flushBatch) + window.addEventListener('visibilitychange', () => { + if (document.visibilityState === 'hidden') { + flushBatch() + } + }) + + /** + * Global event tracking function + */ + ;(window as any).__SIM_TELEMETRY_ENABLED = telemetryEnabled ;(window as any).__SIM_TRACK_EVENT = (eventName: string, properties?: any) => { if (!telemetryEnabled) return - const safeProps = properties || {} - - const payload = { + addToBatch({ category: 'feature_usage', - action: eventName || 'unknown_event', + action: eventName, timestamp: Date.now(), - ...safeSerialize(safeProps), - } - - fetch('/api/telemetry', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(payload), - }).catch(() => { - // Silently fail if sending metrics fails + ...(properties || {}), }) } if (telemetryEnabled) { - performance.mark('sim-studio-init') + const shouldTrackVitals = Math.random() < 0.1 - let telemetryConfig - try { - telemetryConfig = (window as any).__SIM_STUDIO_TELEMETRY_CONFIG || { - clientSide: { enabled: true }, - } - } catch (_e) { - telemetryConfig = { clientSide: { enabled: true } } + if (shouldTrackVitals) { + window.addEventListener( + 'load', + () => { + if (typeof PerformanceObserver !== 'undefined') { + const lcpObserver = new PerformanceObserver((list) => { + const entries = list.getEntries() + const lastEntry = entries[entries.length - 1] + + if (lastEntry) { + addToBatch({ + category: 'performance', + action: 'web_vital', + label: 'LCP', + value: (lastEntry as any).startTime || 0, + entryType: 'largest-contentful-paint', + timestamp: Date.now(), + }) + } + + lcpObserver.disconnect() + }) + + let clsValue = 0 + const clsObserver = new PerformanceObserver((list) => { + for (const entry of list.getEntries()) { + if (!(entry as any).hadRecentInput) { + clsValue += (entry as any).value || 0 + } + } + }) + + const fidObserver = new PerformanceObserver((list) => { + const entries = list.getEntries() + + for (const entry of entries) { + const fidValue = + ((entry as any).processingStart || 0) - ((entry as any).startTime || 0) + + addToBatch({ + category: 'performance', + action: 'web_vital', + label: 'FID', + value: fidValue, + entryType: 'first-input', + timestamp: Date.now(), + }) + } + + fidObserver.disconnect() + }) + + window.addEventListener('beforeunload', () => { + if (clsValue > 0) { + addToBatch({ + category: 'performance', + action: 'web_vital', + label: 'CLS', + value: clsValue, + entryType: 'layout-shift', + timestamp: Date.now(), + }) + } + clsObserver.disconnect() + }) + + lcpObserver.observe({ type: 'largest-contentful-paint', buffered: true }) + clsObserver.observe({ type: 'layout-shift', buffered: true }) + fidObserver.observe({ type: 'first-input', buffered: true }) + } + }, + { once: true } + ) } - window.addEventListener('load', () => { - performance.mark('sim-studio-loaded') - performance.measure('page-load', 'sim-studio-init', 'sim-studio-loaded') - - if (typeof PerformanceObserver !== 'undefined') { - const lcpObserver = new PerformanceObserver((list) => { - const entries = list.getEntries() - - entries.forEach((entry) => { - const value = - entry.entryType === 'largest-contentful-paint' - ? (entry as any).startTime - : (entry as any).value || 0 - - // Ensure we have non-null values for all fields - const metric = { - name: entry.name || 'unknown', - value: value || 0, - entryType: entry.entryType || 'unknown', - } - - if (telemetryEnabled && telemetryConfig?.clientSide?.enabled) { - const safePayload = { - category: 'performance', - action: 'web_vital', - label: metric.name, - value: metric.value, - entryType: metric.entryType, - timestamp: Date.now(), - } - - fetch('/api/telemetry', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(safePayload), - }).catch(() => { - // Silently fail if sending metrics fails - }) - } - }) - - lcpObserver.disconnect() + window.addEventListener('error', (event) => { + if (telemetryEnabled && !event.defaultPrevented) { + addToBatch({ + category: 'error', + action: 'unhandled_error', + message: event.error?.message || event.message || 'Unknown error', + url: window.location.pathname, + timestamp: Date.now(), }) - - const clsObserver = new PerformanceObserver((list) => { - const entries = list.getEntries() - let clsValue = 0 - - entries.forEach((entry) => { - clsValue += (entry as any).value || 0 - }) - - if (telemetryEnabled && telemetryConfig?.clientSide?.enabled) { - const safePayload = { - category: 'performance', - action: 'web_vital', - label: 'CLS', - value: clsValue || 0, - entryType: 'layout-shift', - timestamp: Date.now(), - } - - fetch('/api/telemetry', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(safePayload), - }).catch(() => { - // Silently fail if sending metrics fails - }) - } - - clsObserver.disconnect() - }) - - const fidObserver = new PerformanceObserver((list) => { - const entries = list.getEntries() - - entries.forEach((entry) => { - const processingStart = (entry as any).processingStart || 0 - const startTime = (entry as any).startTime || 0 - - const metric = { - name: entry.name || 'unknown', - value: processingStart - startTime, - entryType: entry.entryType || 'unknown', - } - - if (telemetryEnabled && telemetryConfig?.clientSide?.enabled) { - const safePayload = { - category: 'performance', - action: 'web_vital', - label: 'FID', - value: metric.value, - entryType: metric.entryType, - timestamp: Date.now(), - } - - fetch('/api/telemetry', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(safePayload), - }).catch(() => { - // Silently fail if sending metrics fails - }) - } - }) - - fidObserver.disconnect() - }) - - lcpObserver.observe({ type: 'largest-contentful-paint', buffered: true }) - clsObserver.observe({ type: 'layout-shift', buffered: true }) - fidObserver.observe({ type: 'first-input', buffered: true }) } }) - window.addEventListener('error', (event) => { - if (telemetryEnabled && telemetryConfig?.clientSide?.enabled) { - const errorDetails = { - message: event.error?.message || 'Unknown error', - stack: event.error?.stack?.split('\n')[0] || '', + window.addEventListener('unhandledrejection', (event) => { + if (telemetryEnabled) { + addToBatch({ + category: 'error', + action: 'unhandled_rejection', + message: event.reason?.message || String(event.reason) || 'Unhandled promise rejection', url: window.location.pathname, timestamp: Date.now(), - } - - const safePayload = { - category: 'error', - action: 'client_error', - label: errorDetails.message, - stack: errorDetails.stack, - url: errorDetails.url, - timestamp: errorDetails.timestamp, - } - - fetch('/api/telemetry', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(safePayload), - }).catch(() => { - // Silently fail if sending error fails }) } }) diff --git a/apps/sim/instrumentation-edge.ts b/apps/sim/instrumentation-edge.ts index fe839d738..3bd411316 100644 --- a/apps/sim/instrumentation-edge.ts +++ b/apps/sim/instrumentation-edge.ts @@ -11,10 +11,6 @@ const logger = createLogger('EdgeInstrumentation') export async function register() { try { - // Only Web API compatible code here - // No Node.js APIs like process.on, crypto, fs, etc. - - // Future: Add Edge Runtime compatible telemetry here logger.info('Edge Runtime instrumentation initialized') } catch (error) { logger.error('Failed to initialize Edge Runtime instrumentation', error) diff --git a/apps/sim/instrumentation-node.ts b/apps/sim/instrumentation-node.ts index 8194a5040..258d2c354 100644 --- a/apps/sim/instrumentation-node.ts +++ b/apps/sim/instrumentation-node.ts @@ -1,13 +1,14 @@ /** - * Sim Telemetry - Server-side Instrumentation - * - * This file contains all server-side instrumentation logic. + * Sim OpenTelemetry - Server-side Instrumentation */ +import { DiagConsoleLogger, DiagLogLevel, diag } from '@opentelemetry/api' import { env } from './lib/env' -import { createLogger } from './lib/logs/console/logger.ts' +import { createLogger } from './lib/logs/console/logger' -const logger = createLogger('OtelInstrumentation') +diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.ERROR) + +const logger = createLogger('OTelInstrumentation') const DEFAULT_TELEMETRY_CONFIG = { endpoint: env.TELEMETRY_ENDPOINT || 'https://telemetry.simstudio.ai/v1/traces', @@ -15,68 +16,97 @@ const DEFAULT_TELEMETRY_CONFIG = { serviceVersion: '0.1.0', serverSide: { enabled: true }, batchSettings: { - maxQueueSize: 100, - maxExportBatchSize: 10, + maxQueueSize: 2048, + maxExportBatchSize: 512, scheduledDelayMillis: 5000, exportTimeoutMillis: 30000, }, } -// Initialize OpenTelemetry +/** + * Initialize OpenTelemetry SDK with proper configuration + */ async function initializeOpenTelemetry() { try { if (env.NEXT_TELEMETRY_DISABLED === '1') { - logger.info('OpenTelemetry telemetry disabled via environment variable') + logger.info('OpenTelemetry disabled via NEXT_TELEMETRY_DISABLED=1') return } let telemetryConfig try { - // Use dynamic import for ES modules - telemetryConfig = (await import('./telemetry.config.ts')).default - } catch (_e) { + telemetryConfig = (await import('./telemetry.config')).default + } catch { telemetryConfig = DEFAULT_TELEMETRY_CONFIG } if (telemetryConfig.serverSide?.enabled === false) { - logger.info('Server-side OpenTelemetry instrumentation is disabled in config') + logger.info('Server-side OpenTelemetry disabled in config') return } - // Dynamic imports for server-side libraries const { NodeSDK } = await import('@opentelemetry/sdk-node') - const { resourceFromAttributes } = await import('@opentelemetry/resources') - const { SemanticResourceAttributes } = await import('@opentelemetry/semantic-conventions') + const { defaultResource, resourceFromAttributes } = await import('@opentelemetry/resources') + const { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION, ATTR_DEPLOYMENT_ENVIRONMENT } = await import( + '@opentelemetry/semantic-conventions/incubating' + ) const { OTLPTraceExporter } = await import('@opentelemetry/exporter-trace-otlp-http') + const { BatchSpanProcessor } = await import('@opentelemetry/sdk-trace-node') + const { ParentBasedSampler, TraceIdRatioBasedSampler } = await import( + '@opentelemetry/sdk-trace-base' + ) const exporter = new OTLPTraceExporter({ url: telemetryConfig.endpoint, + headers: {}, + timeoutMillis: telemetryConfig.batchSettings.exportTimeoutMillis, }) - const configResource = resourceFromAttributes({ - [SemanticResourceAttributes.SERVICE_NAME]: telemetryConfig.serviceName, - [SemanticResourceAttributes.SERVICE_VERSION]: telemetryConfig.serviceVersion, - [SemanticResourceAttributes.DEPLOYMENT_ENVIRONMENT]: env.NODE_ENV, + const spanProcessor = new BatchSpanProcessor(exporter, { + maxQueueSize: telemetryConfig.batchSettings.maxQueueSize, + maxExportBatchSize: telemetryConfig.batchSettings.maxExportBatchSize, + scheduledDelayMillis: telemetryConfig.batchSettings.scheduledDelayMillis, + exportTimeoutMillis: telemetryConfig.batchSettings.exportTimeoutMillis, + }) + + const resource = defaultResource().merge( + resourceFromAttributes({ + [ATTR_SERVICE_NAME]: telemetryConfig.serviceName, + [ATTR_SERVICE_VERSION]: telemetryConfig.serviceVersion, + [ATTR_DEPLOYMENT_ENVIRONMENT]: env.NODE_ENV || 'development', + 'service.namespace': 'sim-ai-platform', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.version': '1.0.0', + }) + ) + + const sampler = new ParentBasedSampler({ + root: new TraceIdRatioBasedSampler(0.1), // 10% sampling for root spans }) const sdk = new NodeSDK({ - resource: configResource, + resource, + spanProcessor, + sampler, traceExporter: exporter, }) sdk.start() const shutdownHandler = async () => { - await sdk - .shutdown() - .then(() => logger.info('OpenTelemetry SDK shut down successfully')) - .catch((err) => logger.error('Error shutting down OpenTelemetry SDK', err)) + try { + await sdk.shutdown() + logger.info('OpenTelemetry SDK shut down successfully') + } catch (err) { + logger.error('Error shutting down OpenTelemetry SDK', err) + } } process.on('SIGTERM', shutdownHandler) process.on('SIGINT', shutdownHandler) - logger.info('OpenTelemetry instrumentation initialized for server-side telemetry') + logger.info('OpenTelemetry instrumentation initialized') } catch (error) { logger.error('Failed to initialize OpenTelemetry instrumentation', error) } diff --git a/apps/sim/instrumentation.ts b/apps/sim/instrumentation.ts index 8687311f0..a561b258f 100644 --- a/apps/sim/instrumentation.ts +++ b/apps/sim/instrumentation.ts @@ -1,32 +1,28 @@ +/** + * OpenTelemetry Instrumentation Entry Point + * + * This is the main entry point for OpenTelemetry instrumentation. + * It delegates to runtime-specific instrumentation modules. + */ export async function register() { - console.log('[Main Instrumentation] register() called, environment:', { - NEXT_RUNTIME: process.env.NEXT_RUNTIME, - NODE_ENV: process.env.NODE_ENV, - }) - // Load Node.js-specific instrumentation if (process.env.NEXT_RUNTIME === 'nodejs') { - console.log('[Main Instrumentation] Loading Node.js instrumentation...') const nodeInstrumentation = await import('./instrumentation-node') if (nodeInstrumentation.register) { - console.log('[Main Instrumentation] Calling Node.js register()...') await nodeInstrumentation.register() } } // Load Edge Runtime-specific instrumentation if (process.env.NEXT_RUNTIME === 'edge') { - console.log('[Main Instrumentation] Loading Edge Runtime instrumentation...') const edgeInstrumentation = await import('./instrumentation-edge') if (edgeInstrumentation.register) { - console.log('[Main Instrumentation] Calling Edge Runtime register()...') await edgeInstrumentation.register() } } // Load client instrumentation if we're on the client if (typeof window !== 'undefined') { - console.log('[Main Instrumentation] Loading client instrumentation...') await import('./instrumentation-client') } } diff --git a/apps/sim/lib/logs/execution/logging-session.ts b/apps/sim/lib/logs/execution/logging-session.ts index 7fa58171a..25698241f 100644 --- a/apps/sim/lib/logs/execution/logging-session.ts +++ b/apps/sim/lib/logs/execution/logging-session.ts @@ -111,17 +111,50 @@ export class LoggingSession { try { const costSummary = calculateCostSummary(traceSpans || []) + const endTime = endedAt || new Date().toISOString() + const duration = totalDurationMs || 0 await executionLogger.completeWorkflowExecution({ executionId: this.executionId, - endedAt: endedAt || new Date().toISOString(), - totalDurationMs: totalDurationMs || 0, + endedAt: endTime, + totalDurationMs: duration, costSummary, finalOutput: finalOutput || {}, traceSpans: traceSpans || [], workflowInput, }) + // Track workflow execution outcome + if (traceSpans && traceSpans.length > 0) { + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + + // Determine status from trace spans + const hasErrors = traceSpans.some((span: any) => { + const checkForErrors = (s: any): boolean => { + if (s.status === 'error') return true + if (s.children && Array.isArray(s.children)) { + return s.children.some(checkForErrors) + } + return false + } + return checkForErrors(span) + }) + + trackPlatformEvent('platform.workflow.executed', { + 'workflow.id': this.workflowId, + 'execution.duration_ms': duration, + 'execution.status': hasErrors ? 'error' : 'success', + 'execution.trigger': this.triggerType, + 'execution.blocks_executed': traceSpans.length, + 'execution.has_errors': hasErrors, + 'execution.total_cost': costSummary.totalCost || 0, + }) + } catch (_e) { + // Silently fail + } + } + if (this.requestId) { logger.debug(`[${this.requestId}] Completed logging for execution ${this.executionId}`) } @@ -168,15 +201,33 @@ export class LoggingSession { output: { error: message }, } + const spans = hasProvidedSpans ? traceSpans : [errorSpan] + await executionLogger.completeWorkflowExecution({ executionId: this.executionId, endedAt: endTime.toISOString(), totalDurationMs: Math.max(1, durationMs), costSummary, finalOutput: { error: message }, - traceSpans: hasProvidedSpans ? traceSpans : [errorSpan], + traceSpans: spans, }) + // Track workflow execution error outcome + try { + const { trackPlatformEvent } = await import('@/lib/telemetry/tracer') + trackPlatformEvent('platform.workflow.executed', { + 'workflow.id': this.workflowId, + 'execution.duration_ms': Math.max(1, durationMs), + 'execution.status': 'error', + 'execution.trigger': this.triggerType, + 'execution.blocks_executed': spans.length, + 'execution.has_errors': true, + 'execution.error_message': message, + }) + } catch (_e) { + // Silently fail + } + if (this.requestId) { logger.debug(`[${this.requestId}] Completed logging for execution ${this.executionId}`) } diff --git a/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts b/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts index 31020d316..891bcdc2e 100644 --- a/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts +++ b/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts @@ -1,5 +1,5 @@ import { createLogger } from '@/lib/logs/console/logger' -import type { TraceSpan } from '@/lib/logs/types' +import type { ToolCall, TraceSpan } from '@/lib/logs/types' import { isWorkflowBlockType } from '@/executor/consts' import type { ExecutionResult } from '@/executor/types' @@ -58,29 +58,21 @@ function mergeTraceSpanChildren(...childGroups: TraceSpan[][]): TraceSpan[] { return merged } -// Helper function to build a tree of trace spans from execution logs export function buildTraceSpans(result: ExecutionResult): { traceSpans: TraceSpan[] totalDuration: number } { - // If no logs, return empty spans if (!result.logs || result.logs.length === 0) { return { traceSpans: [], totalDuration: 0 } } - // Store all spans as a map for faster lookup const spanMap = new Map() - // Create a map to track parent-child relationships from workflow structure - // This helps distinguish between actual parent-child relationships vs parallel execution const parentChildMap = new Map() - // If we have workflow information in the logs, extract parent-child relationships - // Define connection type inline for now type Connection = { source: string; target: string } const workflowConnections: Connection[] = result.metadata?.workflowConnections || [] if (workflowConnections.length > 0) { - // Build the connection map from workflow connections workflowConnections.forEach((conn: Connection) => { if (conn.source && conn.target) { parentChildMap.set(conn.target, conn.source) @@ -88,21 +80,15 @@ export function buildTraceSpans(result: ExecutionResult): { }) } - // First pass: Create spans for each block result.logs.forEach((log) => { - // Skip logs that don't have block execution information if (!log.blockId || !log.blockType) return - // Create a unique ID for this span using blockId and timestamp const spanId = `${log.blockId}-${new Date(log.startedAt).getTime()}` - // Extract duration if available const duration = log.durationMs || 0 - // Create the span let output = log.output || {} - // If there's an error, include it in the output if (log.error) { output = { ...output, @@ -110,7 +96,6 @@ export function buildTraceSpans(result: ExecutionResult): { } } - // Use block name consistently for all block types const displayName = log.blockName || log.blockId const span: TraceSpan = { @@ -122,21 +107,26 @@ export function buildTraceSpans(result: ExecutionResult): { endTime: log.endedAt, status: log.error ? 'error' : 'success', children: [], - // Store the block ID for later use in identifying direct parent-child relationships blockId: log.blockId, - // Include block input/output data input: log.input || {}, output: output, } - // Add provider timing data if it exists if (log.output?.providerTiming) { - const providerTiming = log.output.providerTiming + const providerTiming = log.output.providerTiming as { + duration: number + startTime: string + endTime: string + timeSegments?: Array<{ + type: string + name?: string + startTime: string | number + endTime: string | number + duration: number + }> + } - // Store provider timing as metadata instead of creating child spans - // This keeps the UI cleaner while preserving timing information - - ;(span as any).providerTiming = { + span.providerTiming = { duration: providerTiming.duration, startTime: providerTiming.startTime, endTime: providerTiming.endTime, @@ -144,22 +134,48 @@ export function buildTraceSpans(result: ExecutionResult): { } } - // Always add cost, token, and model information if available (regardless of provider timing) if (log.output?.cost) { - ;(span as any).cost = log.output.cost + span.cost = log.output.cost as { + input?: number + output?: number + total?: number + } } if (log.output?.tokens) { - ;(span as any).tokens = log.output.tokens + const t = log.output.tokens as + | number + | { + input?: number + output?: number + total?: number + prompt?: number + completion?: number + } + if (typeof t === 'number') { + span.tokens = t + } else if (typeof t === 'object') { + const input = t.input ?? t.prompt + const output = t.output ?? t.completion + const total = + t.total ?? + (typeof input === 'number' || typeof output === 'number' + ? (input || 0) + (output || 0) + : undefined) + span.tokens = { + ...(typeof input === 'number' ? { input } : {}), + ...(typeof output === 'number' ? { output } : {}), + ...(typeof total === 'number' ? { total } : {}), + } + } else { + span.tokens = t + } } if (log.output?.model) { - ;(span as any).model = log.output.model + span.model = log.output.model as string } - // Enhanced approach: Use timeSegments for sequential flow if available - // This provides the actual model→tool→model execution sequence - // Skip for workflow blocks since they will be processed via output.childTraceSpans at the end if ( !isWorkflowBlockType(log.blockType) && log.output?.providerTiming?.timeSegments && @@ -168,70 +184,78 @@ export function buildTraceSpans(result: ExecutionResult): { const timeSegments = log.output.providerTiming.timeSegments const toolCallsData = log.output?.toolCalls?.list || log.output?.toolCalls || [] - // Create child spans for each time segment - span.children = timeSegments.map((segment: any, index: number) => { - const segmentStartTime = new Date(segment.startTime).toISOString() - const segmentEndTime = new Date(segment.endTime).toISOString() + span.children = timeSegments.map( + ( + segment: { + type: string + name?: string + startTime: string | number + endTime: string | number + duration: number + }, + index: number + ) => { + const segmentStartTime = new Date(segment.startTime).toISOString() + let segmentEndTime = new Date(segment.endTime).toISOString() + let segmentDuration = segment.duration - if (segment.type === 'tool') { - // Find matching tool call data for this segment - const matchingToolCall = toolCallsData.find( - (tc: any) => tc.name === segment.name || stripCustomToolPrefix(tc.name) === segment.name - ) + if (segment.name?.toLowerCase().includes('streaming') && log.endedAt) { + const blockEndTime = new Date(log.endedAt).getTime() + const segmentEndTimeMs = new Date(segment.endTime).getTime() + if (blockEndTime > segmentEndTimeMs) { + segmentEndTime = log.endedAt + segmentDuration = blockEndTime - new Date(segment.startTime).getTime() + } + } + + if (segment.type === 'tool') { + const matchingToolCall = toolCallsData.find( + (tc: { name?: string; [key: string]: unknown }) => + tc.name === segment.name || stripCustomToolPrefix(tc.name || '') === segment.name + ) + + return { + id: `${span.id}-segment-${index}`, + name: stripCustomToolPrefix(segment.name || ''), + type: 'tool', + duration: segment.duration, + startTime: segmentStartTime, + endTime: segmentEndTime, + status: matchingToolCall?.error ? 'error' : 'success', + input: matchingToolCall?.arguments || matchingToolCall?.input, + output: matchingToolCall?.error + ? { + error: matchingToolCall.error, + ...(matchingToolCall.result || matchingToolCall.output || {}), + } + : matchingToolCall?.result || matchingToolCall?.output, + } + } return { id: `${span.id}-segment-${index}`, - name: stripCustomToolPrefix(segment.name), - type: 'tool', - duration: segment.duration, + name: segment.name, + type: 'model', + duration: segmentDuration, startTime: segmentStartTime, endTime: segmentEndTime, - status: matchingToolCall?.error ? 'error' : 'success', - input: matchingToolCall?.arguments || matchingToolCall?.input, - output: matchingToolCall?.error - ? { - error: matchingToolCall.error, - ...(matchingToolCall.result || matchingToolCall.output || {}), - } - : matchingToolCall?.result || matchingToolCall?.output, + status: 'success', } } - // Model segment - return { - id: `${span.id}-segment-${index}`, - name: segment.name, - type: 'model', - duration: segment.duration, - startTime: segmentStartTime, - endTime: segmentEndTime, - status: 'success', - } - }) + ) } else { - // Fallback: Extract tool calls using the original approach for backwards compatibility - // Tool calls handling for different formats: - // 1. Standard format in response.toolCalls.list - // 2. Direct toolCalls array in response - // 3. Streaming response formats with executionData - - // Check all possible paths for toolCalls let toolCallsList = null - // Wrap extraction in try-catch to handle unexpected toolCalls formats try { if (log.output?.toolCalls?.list) { - // Standard format with list property toolCallsList = log.output.toolCalls.list } else if (Array.isArray(log.output?.toolCalls)) { - // Direct array format toolCallsList = log.output.toolCalls } else if (log.output?.executionData?.output?.toolCalls) { - // Streaming format with executionData const tcObj = log.output.executionData.output.toolCalls toolCallsList = Array.isArray(tcObj) ? tcObj : tcObj.list || [] } - // Validate that toolCallsList is actually an array before processing if (toolCallsList && !Array.isArray(toolCallsList)) { logger.warn(`toolCallsList is not an array: ${typeof toolCallsList}`, { blockId: log.blockId, @@ -241,36 +265,56 @@ export function buildTraceSpans(result: ExecutionResult): { } } catch (error) { logger.error(`Error extracting toolCalls from block ${log.blockId}:`, error) - toolCallsList = [] // Set to empty array as fallback + toolCallsList = [] } if (toolCallsList && toolCallsList.length > 0) { - span.toolCalls = toolCallsList - .map((tc: any) => { - // Add null check for each tool call - if (!tc) return null + const processedToolCalls: ToolCall[] = [] - try { - return { - name: stripCustomToolPrefix(tc.name || 'unnamed-tool'), - duration: tc.duration || 0, - startTime: tc.startTime || log.startedAt, - endTime: tc.endTime || log.endedAt, - status: tc.error ? 'error' : 'success', - input: tc.arguments || tc.input, - output: tc.result || tc.output, - error: tc.error, - } - } catch (tcError) { - logger.error(`Error processing tool call in block ${log.blockId}:`, tcError) - return null + for (const tc of toolCallsList as Array<{ + name?: string + duration?: number + startTime?: string + endTime?: string + error?: string + arguments?: Record + input?: Record + result?: Record + output?: Record + }>) { + if (!tc) continue + + try { + const toolCall: ToolCall = { + name: stripCustomToolPrefix(tc.name || 'unnamed-tool'), + duration: tc.duration || 0, + startTime: tc.startTime || log.startedAt, + endTime: tc.endTime || log.endedAt, + status: tc.error ? 'error' : 'success', } - }) - .filter(Boolean) // Remove any null entries from failed processing + + if (tc.arguments || tc.input) { + toolCall.input = tc.arguments || tc.input + } + + if (tc.result || tc.output) { + toolCall.output = tc.result || tc.output + } + + if (tc.error) { + toolCall.error = tc.error + } + + processedToolCalls.push(toolCall) + } catch (tcError) { + logger.error(`Error processing tool call in block ${log.blockId}:`, tcError) + } + } + + span.toolCalls = processedToolCalls } } - // Handle child workflow spans for workflow blocks - process at the end to avoid being overwritten if ( isWorkflowBlockType(log.blockType) && log.output?.childTraceSpans && @@ -281,14 +325,9 @@ export function buildTraceSpans(result: ExecutionResult): { span.children = mergeTraceSpanChildren(span.children || [], flattenedChildren) } - // Store in map spanMap.set(spanId, span) }) - // Second pass: Build a flat hierarchy for sequential workflow execution - // For most workflows, blocks execute sequentially and should be shown at the same level - // Only nest blocks that are truly hierarchical (like subflows, loops, etc.) - const sortedLogs = [...result.logs].sort((a, b) => { const aTime = new Date(a.startedAt).getTime() const bTime = new Date(b.startedAt).getTime() @@ -297,8 +336,6 @@ export function buildTraceSpans(result: ExecutionResult): { const rootSpans: TraceSpan[] = [] - // For now, treat all blocks as top-level spans in execution order - // This gives a cleaner, more intuitive view of workflow execution sortedLogs.forEach((log) => { if (!log.blockId) return @@ -310,10 +347,8 @@ export function buildTraceSpans(result: ExecutionResult): { }) if (rootSpans.length === 0 && workflowConnections.length === 0) { - // Track parent spans using a stack const spanStack: TraceSpan[] = [] - // Process logs to build time-based hierarchy (original approach) sortedLogs.forEach((log) => { if (!log.blockId || !log.blockType) return @@ -321,20 +356,16 @@ export function buildTraceSpans(result: ExecutionResult): { const span = spanMap.get(spanId) if (!span) return - // If we have a non-empty stack, check if this span should be a child if (spanStack.length > 0) { const potentialParent = spanStack[spanStack.length - 1] const parentStartTime = new Date(potentialParent.startTime).getTime() const parentEndTime = new Date(potentialParent.endTime).getTime() const spanStartTime = new Date(span.startTime).getTime() - // If this span starts after the parent starts and the parent is still on the stack, - // we'll assume it's a child span if (spanStartTime >= parentStartTime && spanStartTime <= parentEndTime) { if (!potentialParent.children) potentialParent.children = [] potentialParent.children.push(span) } else { - // This span doesn't belong to the current parent, pop from stack while ( spanStack.length > 0 && new Date(spanStack[spanStack.length - 1].endTime).getTime() < spanStartTime @@ -342,22 +373,18 @@ export function buildTraceSpans(result: ExecutionResult): { spanStack.pop() } - // Check if we still have a parent if (spanStack.length > 0) { const newParent = spanStack[spanStack.length - 1] if (!newParent.children) newParent.children = [] newParent.children.push(span) } else { - // No parent, this is a root span rootSpans.push(span) } } } else { - // Empty stack, this is a root span rootSpans.push(span) } - // Check if this span could be a parent to future spans if (log.blockType === 'agent' || isWorkflowBlockType(log.blockType)) { spanStack.push(span) } @@ -383,6 +410,16 @@ export function buildTraceSpans(result: ExecutionResult): { const actualWorkflowDuration = latestEnd - earliestStart + const addRelativeTimestamps = (spans: TraceSpan[], workflowStartMs: number) => { + spans.forEach((span) => { + span.relativeStartMs = new Date(span.startTime).getTime() - workflowStartMs + if (span.children && span.children.length > 0) { + addRelativeTimestamps(span.children, workflowStartMs) + } + }) + } + addRelativeTimestamps(groupedRootSpans, earliestStart) + const hasErrors = groupedRootSpans.some((span) => { if (span.status === 'error') return true const checkChildren = (children: TraceSpan[] = []): boolean => { diff --git a/apps/sim/lib/logs/types.ts b/apps/sim/lib/logs/types.ts index 37a300d2c..fddf97576 100644 --- a/apps/sim/lib/logs/types.ts +++ b/apps/sim/lib/logs/types.ts @@ -35,8 +35,8 @@ export interface ToolCall { startTime: string endTime: string status: 'success' | 'error' - input: Record - output: Record + input?: Record + output?: Record error?: string } @@ -133,6 +133,27 @@ export interface WorkflowExecutionLog { export type WorkflowExecutionLogInsert = Omit export type WorkflowExecutionLogSelect = WorkflowExecutionLog +export interface TokenInfo { + input?: number + output?: number + total?: number + prompt?: number + completion?: number +} + +export interface ProviderTiming { + duration: number + startTime: string + endTime: string + segments: Array<{ + type: string + name?: string + startTime: string | number + endTime: string | number + duration: number + }> +} + export interface TraceSpan { id: string name: string @@ -143,11 +164,18 @@ export interface TraceSpan { children?: TraceSpan[] toolCalls?: ToolCall[] status?: 'success' | 'error' - tokens?: number + tokens?: number | TokenInfo relativeStartMs?: number blockId?: string input?: Record output?: Record + model?: string + cost?: { + input?: number + output?: number + total?: number + } + providerTiming?: ProviderTiming } export interface WorkflowExecutionSummary { diff --git a/apps/sim/lib/telemetry.ts b/apps/sim/lib/telemetry.ts deleted file mode 100644 index 8121140e0..000000000 --- a/apps/sim/lib/telemetry.ts +++ /dev/null @@ -1,284 +0,0 @@ -/** - * Sim Telemetry - * - * This file can be customized in forked repositories: - * - Set TELEMETRY_ENDPOINT in telemetry.config.ts to your collector - * - Modify allowed event categories as needed - * - Edit disclosure text to match your privacy policy - * - * Please maintain ethical telemetry practices if modified. - */ -import { DiagConsoleLogger, DiagLogLevel, diag } from '@opentelemetry/api' -import { env } from '@/lib/env' -import { isProd } from '@/lib/environment' -import { createLogger } from '@/lib/logs/console/logger' - -diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.ERROR) - -const logger = createLogger('Telemetry') - -export type TelemetryEvent = { - name: string - properties?: Record -} - -export type TelemetryStatus = { - enabled: boolean -} - -const TELEMETRY_STATUS_KEY = 'simstudio-telemetry-status' - -let telemetryConfig = { - endpoint: env.TELEMETRY_ENDPOINT || 'https://telemetry.simstudio.ai/v1/traces', - serviceName: 'sim-studio', - serviceVersion: '0.1.0', -} - -if (typeof window !== 'undefined' && (window as any).__SIM_STUDIO_TELEMETRY_CONFIG) { - telemetryConfig = { ...telemetryConfig, ...(window as any).__SIM_STUDIO_TELEMETRY_CONFIG } -} - -let telemetryInitialized = false - -/** - * Gets the current telemetry status from localStorage - */ -export function getTelemetryStatus(): TelemetryStatus { - if (typeof window === 'undefined') { - return { enabled: true } - } - - try { - if (env.NEXT_TELEMETRY_DISABLED === '1') { - return { enabled: false } - } - - const stored = localStorage.getItem(TELEMETRY_STATUS_KEY) - return stored ? JSON.parse(stored) : { enabled: true } - } catch (error) { - logger.error('Failed to get telemetry status from localStorage', error) - return { enabled: true } - } -} - -/** - * Sets the telemetry status in localStorage - */ -export function setTelemetryStatus(status: TelemetryStatus): void { - if (typeof window === 'undefined') { - return - } - - try { - localStorage.setItem(TELEMETRY_STATUS_KEY, JSON.stringify(status)) - - if (status.enabled && !telemetryInitialized) { - initializeClientTelemetry() - } - } catch (error) { - logger.error('Failed to set telemetry status in localStorage', error) - } -} - -/** - * Disables telemetry - */ -export function disableTelemetry(): void { - const currentStatus = getTelemetryStatus() - if (currentStatus.enabled) { - trackEvent('consent', 'opt_out') - } - - setTelemetryStatus({ enabled: false }) - logger.info('Telemetry disabled') -} - -/** - * Enables telemetry - */ -export function enableTelemetry(): void { - if (env.NEXT_TELEMETRY_DISABLED === '1') { - logger.info('Telemetry disabled by environment variable, cannot enable') - return - } - - const currentStatus = getTelemetryStatus() - if (!currentStatus.enabled) { - trackEvent('consent', 'opt_in') - } - - setTelemetryStatus({ enabled: true }) - logger.info('Telemetry enabled') - - if (!telemetryInitialized) { - initializeClientTelemetry() - } -} - -/** - * Initialize client-side telemetry without OpenTelemetry SDK - * This approach uses direct event tracking instead of the OTel SDK - * to avoid TypeScript compatibility issues while still collecting useful data - */ -function initializeClientTelemetry(): void { - if (typeof window === 'undefined' || telemetryInitialized) { - return - } - - try { - const clientSideEnabled = - (window as any).__SIM_STUDIO_TELEMETRY_CONFIG?.clientSide?.enabled !== false - - if (!clientSideEnabled) { - logger.info('Client-side telemetry disabled in configuration') - return - } - - if (isProd) { - trackEvent('page_view', window.location.pathname) - - if (typeof window.history !== 'undefined') { - const originalPushState = window.history.pushState - window.history.pushState = function (...args) { - const result = originalPushState.apply(this, args) - trackEvent('page_view', window.location.pathname) - return result - } - } - - if (typeof window.performance !== 'undefined') { - window.addEventListener('load', () => { - setTimeout(() => { - if (performance.getEntriesByType) { - const navigationTiming = performance.getEntriesByType( - 'navigation' - )[0] as PerformanceNavigationTiming - if (navigationTiming) { - trackEvent( - 'performance', - 'page_load', - window.location.pathname, - navigationTiming.loadEventEnd - navigationTiming.startTime - ) - } - - const lcpEntries = performance - .getEntriesByType('paint') - .filter((entry) => entry.name === 'largest-contentful-paint') - if (lcpEntries.length > 0) { - trackEvent( - 'performance', - 'largest_contentful_paint', - window.location.pathname, - lcpEntries[0].startTime - ) - } - } - }, 0) - }) - } - - document.addEventListener( - 'click', - (e) => { - let target = e.target as HTMLElement | null - let telemetryAction = null - - while (target && !telemetryAction) { - telemetryAction = target.getAttribute('data-telemetry') - if (!telemetryAction) { - target = target.parentElement - } - } - - if (telemetryAction) { - trackEvent('feature_usage', telemetryAction) - } - }, - { passive: true } - ) - - document.addEventListener( - 'submit', - (e) => { - const form = e.target as HTMLFormElement - const telemetryAction = form.getAttribute('data-telemetry') - if (telemetryAction) { - trackEvent('feature_usage', telemetryAction) - } - }, - { passive: true } - ) - - window.addEventListener( - 'error', - (event) => { - const errorDetails = { - message: event.error?.message || 'Unknown error', - stack: event.error?.stack?.split('\n')[0] || '', - url: window.location.pathname, - } - trackEvent('error', 'client_error', errorDetails.message) - }, - { passive: true } - ) - - window.addEventListener( - 'unhandledrejection', - (event) => { - const errorDetails = { - message: event.reason?.message || String(event.reason) || 'Unhandled promise rejection', - url: window.location.pathname, - } - trackEvent('error', 'unhandled_rejection', errorDetails.message) - }, - { passive: true } - ) - - logger.info('Enhanced client-side telemetry initialized') - telemetryInitialized = true - } - } catch (error) { - logger.error('Failed to initialize client-side telemetry', error) - } -} - -/** - * Track a telemetry event - */ -export async function trackEvent( - category: string, - action: string, - label?: string, - value?: number -): Promise { - const status = getTelemetryStatus() - - if (!status.enabled) return - - try { - if (isProd) { - await fetch('/api/telemetry', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - category, - action, - label, - value, - timestamp: new Date().toISOString(), - service: telemetryConfig.serviceName, - version: telemetryConfig.serviceVersion, - userAgent: typeof navigator !== 'undefined' ? navigator.userAgent : undefined, - url: typeof window !== 'undefined' ? window.location.pathname : undefined, - }), - }) - } else { - if (category === 'consent') { - logger.debug('Telemetry consent change', { action }) - } - } - } catch (error) { - logger.error('Failed to track telemetry event', error) - } -} diff --git a/apps/sim/lib/telemetry/tracer.ts b/apps/sim/lib/telemetry/tracer.ts new file mode 100644 index 000000000..cf4356075 --- /dev/null +++ b/apps/sim/lib/telemetry/tracer.ts @@ -0,0 +1,451 @@ +/** + * OpenTelemetry Integration for Sim Execution Pipeline + * + * This module integrates OpenTelemetry tracing with the existing execution logging system. + * It converts TraceSpans and BlockLogs into proper OpenTelemetry spans with semantic conventions. + * + * Architecture: + * - LoggingSession tracks workflow execution start/complete + * - Executor generates BlockLogs for each block execution + * - TraceSpans are built from BlockLogs + * - This module converts TraceSpans -> OpenTelemetry Spans + * + * Integration Points: + * 1. LoggingSession.start() -> Create root workflow span + * 2. LoggingSession.complete() -> End workflow span with all block spans as children + * 3. Block execution -> Create span for each block type with proper attributes + */ + +import { context, type Span, SpanStatusCode, trace } from '@opentelemetry/api' +import { createLogger } from '@/lib/logs/console/logger' +import type { TraceSpan } from '@/lib/logs/types' + +/** + * GenAI Semantic Convention Attributes + */ +const GenAIAttributes = { + // System attributes + SYSTEM: 'gen_ai.system', + REQUEST_MODEL: 'gen_ai.request.model', + RESPONSE_MODEL: 'gen_ai.response.model', + + // Token usage + USAGE_INPUT_TOKENS: 'gen_ai.usage.input_tokens', + USAGE_OUTPUT_TOKENS: 'gen_ai.usage.output_tokens', + USAGE_TOTAL_TOKENS: 'gen_ai.usage.total_tokens', + + // Request/Response + REQUEST_TEMPERATURE: 'gen_ai.request.temperature', + REQUEST_TOP_P: 'gen_ai.request.top_p', + REQUEST_MAX_TOKENS: 'gen_ai.request.max_tokens', + RESPONSE_FINISH_REASON: 'gen_ai.response.finish_reason', + + // Agent-specific + AGENT_ID: 'gen_ai.agent.id', + AGENT_NAME: 'gen_ai.agent.name', + AGENT_TASK: 'gen_ai.agent.task', + + // Workflow-specific + WORKFLOW_ID: 'gen_ai.workflow.id', + WORKFLOW_NAME: 'gen_ai.workflow.name', + WORKFLOW_VERSION: 'gen_ai.workflow.version', + WORKFLOW_EXECUTION_ID: 'gen_ai.workflow.execution_id', + + // Tool-specific + TOOL_NAME: 'gen_ai.tool.name', + TOOL_DESCRIPTION: 'gen_ai.tool.description', + + // Cost tracking + COST_TOTAL: 'gen_ai.cost.total', + COST_INPUT: 'gen_ai.cost.input', + COST_OUTPUT: 'gen_ai.cost.output', +} + +const logger = createLogger('OTelIntegration') + +// Lazy-load tracer +let _tracer: ReturnType | null = null + +function getTracer() { + if (!_tracer) { + _tracer = trace.getTracer('sim-ai-platform', '1.0.0') + } + return _tracer +} + +/** + * Map block types to OpenTelemetry span kinds and semantic conventions + */ +const BLOCK_TYPE_MAPPING: Record< + string, + { + spanName: string + spanKind: string + getAttributes: (traceSpan: TraceSpan) => Record + } +> = { + agent: { + spanName: 'gen_ai.agent.execute', + spanKind: 'gen_ai.agent', + getAttributes: (span) => { + const attrs: Record = { + [GenAIAttributes.AGENT_ID]: span.blockId || span.id, + [GenAIAttributes.AGENT_NAME]: span.name, + } + + if (span.model) { + attrs[GenAIAttributes.REQUEST_MODEL] = span.model + } + + if (span.tokens) { + if (typeof span.tokens === 'number') { + attrs[GenAIAttributes.USAGE_TOTAL_TOKENS] = span.tokens + } else { + attrs[GenAIAttributes.USAGE_INPUT_TOKENS] = span.tokens.input || span.tokens.prompt || 0 + attrs[GenAIAttributes.USAGE_OUTPUT_TOKENS] = + span.tokens.output || span.tokens.completion || 0 + attrs[GenAIAttributes.USAGE_TOTAL_TOKENS] = span.tokens.total || 0 + } + } + + if (span.cost) { + attrs[GenAIAttributes.COST_INPUT] = span.cost.input || 0 + attrs[GenAIAttributes.COST_OUTPUT] = span.cost.output || 0 + attrs[GenAIAttributes.COST_TOTAL] = span.cost.total || 0 + } + + return attrs + }, + }, + workflow: { + spanName: 'gen_ai.workflow.execute', + spanKind: 'gen_ai.workflow', + getAttributes: (span) => ({ + [GenAIAttributes.WORKFLOW_ID]: span.blockId || 'root', + [GenAIAttributes.WORKFLOW_NAME]: span.name, + }), + }, + tool: { + spanName: 'gen_ai.tool.call', + spanKind: 'gen_ai.tool', + getAttributes: (span) => ({ + [GenAIAttributes.TOOL_NAME]: span.name, + 'tool.id': span.id, + 'tool.duration_ms': span.duration, + }), + }, + model: { + spanName: 'gen_ai.model.request', + spanKind: 'gen_ai.model', + getAttributes: (span) => ({ + 'model.name': span.name, + 'model.id': span.id, + 'model.duration_ms': span.duration, + }), + }, + api: { + spanName: 'http.client.request', + spanKind: 'http.client', + getAttributes: (span) => { + const input = span.input as { method?: string; url?: string } | undefined + const output = span.output as { status?: number } | undefined + return { + 'http.request.method': input?.method || 'GET', + 'http.request.url': input?.url || '', + 'http.response.status_code': output?.status || 0, + 'block.id': span.blockId, + 'block.name': span.name, + } + }, + }, + function: { + spanName: 'function.execute', + spanKind: 'function', + getAttributes: (span) => ({ + 'function.name': span.name, + 'function.id': span.blockId, + 'function.execution_time_ms': span.duration, + }), + }, + router: { + spanName: 'router.evaluate', + spanKind: 'router', + getAttributes: (span) => { + const output = span.output as { selectedPath?: { blockId?: string } } | undefined + return { + 'router.name': span.name, + 'router.id': span.blockId, + 'router.selected_path': output?.selectedPath?.blockId, + } + }, + }, + condition: { + spanName: 'condition.evaluate', + spanKind: 'condition', + getAttributes: (span) => { + const output = span.output as { conditionResult?: boolean | string } | undefined + return { + 'condition.name': span.name, + 'condition.id': span.blockId, + 'condition.result': output?.conditionResult, + } + }, + }, + loop: { + spanName: 'loop.execute', + spanKind: 'loop', + getAttributes: (span) => ({ + 'loop.name': span.name, + 'loop.id': span.blockId, + 'loop.iterations': span.children?.length || 0, + }), + }, + parallel: { + spanName: 'parallel.execute', + spanKind: 'parallel', + getAttributes: (span) => ({ + 'parallel.name': span.name, + 'parallel.id': span.blockId, + 'parallel.branches': span.children?.length || 0, + }), + }, +} + +/** + * Convert a TraceSpan to an OpenTelemetry span + * Creates a proper OTel span with all the metadata from the trace span + */ +export function createOTelSpanFromTraceSpan(traceSpan: TraceSpan, parentSpan?: Span): Span | null { + try { + const tracer = getTracer() + + const blockMapping = BLOCK_TYPE_MAPPING[traceSpan.type] || { + spanName: `block.${traceSpan.type}`, + spanKind: 'internal', + getAttributes: (span: TraceSpan) => ({ + 'block.type': span.type, + 'block.id': span.blockId, + 'block.name': span.name, + }), + } + + const attributes = { + ...blockMapping.getAttributes(traceSpan), + 'span.type': traceSpan.type, + 'span.duration_ms': traceSpan.duration, + 'span.status': traceSpan.status, + } + + const ctx = parentSpan ? trace.setSpan(context.active(), parentSpan) : context.active() + + const span = tracer.startSpan( + blockMapping.spanName, + { + attributes, + startTime: new Date(traceSpan.startTime), + }, + ctx + ) + + if (traceSpan.status === 'error') { + const errorMessage = + typeof traceSpan.output?.error === 'string' + ? traceSpan.output.error + : 'Block execution failed' + + span.setStatus({ + code: SpanStatusCode.ERROR, + message: errorMessage, + }) + + if (errorMessage && errorMessage !== 'Block execution failed') { + span.recordException(new Error(errorMessage)) + } + } else { + span.setStatus({ code: SpanStatusCode.OK }) + } + + if (traceSpan.children && traceSpan.children.length > 0) { + for (const childTraceSpan of traceSpan.children) { + createOTelSpanFromTraceSpan(childTraceSpan, span) + } + } + + if (traceSpan.toolCalls && traceSpan.toolCalls.length > 0) { + for (const toolCall of traceSpan.toolCalls) { + const toolSpan = tracer.startSpan( + 'gen_ai.tool.call', + { + attributes: { + [GenAIAttributes.TOOL_NAME]: toolCall.name, + 'tool.status': toolCall.status, + 'tool.duration_ms': toolCall.duration || 0, + }, + startTime: new Date(toolCall.startTime), + }, + trace.setSpan(context.active(), span) + ) + + if (toolCall.status === 'error') { + toolSpan.setStatus({ + code: SpanStatusCode.ERROR, + message: toolCall.error || 'Tool call failed', + }) + if (toolCall.error) { + toolSpan.recordException(new Error(toolCall.error)) + } + } else { + toolSpan.setStatus({ code: SpanStatusCode.OK }) + } + + toolSpan.end(new Date(toolCall.endTime)) + } + } + + span.end(new Date(traceSpan.endTime)) + + return span + } catch (error) { + logger.error('Failed to create OTel span from trace span', { + error, + traceSpanId: traceSpan.id, + traceSpanType: traceSpan.type, + }) + return null + } +} + +/** + * Create OpenTelemetry spans for an entire workflow execution + * This is called from LoggingSession.complete() with the final trace spans + */ +export function createOTelSpansForWorkflowExecution(params: { + workflowId: string + workflowName?: string + executionId: string + traceSpans: TraceSpan[] + trigger: string + startTime: string + endTime: string + totalDurationMs: number + status: 'success' | 'error' + error?: string +}): void { + try { + const tracer = getTracer() + + const rootSpan = tracer.startSpan( + 'gen_ai.workflow.execute', + { + attributes: { + [GenAIAttributes.WORKFLOW_ID]: params.workflowId, + [GenAIAttributes.WORKFLOW_NAME]: params.workflowName || params.workflowId, + [GenAIAttributes.WORKFLOW_EXECUTION_ID]: params.executionId, + 'workflow.trigger': params.trigger, + 'workflow.duration_ms': params.totalDurationMs, + }, + startTime: new Date(params.startTime), + }, + context.active() + ) + + if (params.status === 'error') { + rootSpan.setStatus({ + code: SpanStatusCode.ERROR, + message: params.error || 'Workflow execution failed', + }) + if (params.error) { + rootSpan.recordException(new Error(params.error)) + } + } else { + rootSpan.setStatus({ code: SpanStatusCode.OK }) + } + + for (const traceSpan of params.traceSpans) { + createOTelSpanFromTraceSpan(traceSpan, rootSpan) + } + + rootSpan.end(new Date(params.endTime)) + + logger.debug('Created OTel spans for workflow execution', { + workflowId: params.workflowId, + executionId: params.executionId, + spanCount: params.traceSpans.length, + }) + } catch (error) { + logger.error('Failed to create OTel spans for workflow execution', { + error, + workflowId: params.workflowId, + executionId: params.executionId, + }) + } +} + +/** + * Create a real-time OpenTelemetry span for a block execution + * Can be called from block handlers during execution for real-time tracing + */ +export async function traceBlockExecution( + blockType: string, + blockId: string, + blockName: string, + fn: (span: Span) => Promise +): Promise { + const tracer = getTracer() + + const blockMapping = BLOCK_TYPE_MAPPING[blockType] || { + spanName: `block.${blockType}`, + spanKind: 'internal', + getAttributes: () => ({}), + } + + return tracer.startActiveSpan( + blockMapping.spanName, + { + attributes: { + 'block.type': blockType, + 'block.id': blockId, + 'block.name': blockName, + }, + }, + async (span) => { + try { + const result = await fn(span) + span.setStatus({ code: SpanStatusCode.OK }) + return result + } catch (error) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : 'Block execution failed', + }) + span.recordException(error instanceof Error ? error : new Error(String(error))) + throw error + } finally { + span.end() + } + } + ) +} + +/** + * Track platform events (workflow creation, knowledge base operations, etc.) + */ +export function trackPlatformEvent( + eventName: string, + attributes: Record +): void { + try { + const tracer = getTracer() + const span = tracer.startSpan(eventName, { + attributes: { + ...attributes, + 'event.name': eventName, + 'event.timestamp': Date.now(), + }, + }) + span.setStatus({ code: SpanStatusCode.OK }) + span.end() + } catch (error) { + // Silently fail + } +} diff --git a/apps/sim/lib/workflows/streaming.ts b/apps/sim/lib/workflows/streaming.ts index 4498d84be..7af1cb856 100644 --- a/apps/sim/lib/workflows/streaming.ts +++ b/apps/sim/lib/workflows/streaming.ts @@ -39,6 +39,7 @@ export async function createStreamingResponse( try { const streamedContent = new Map() const processedOutputs = new Set() + const streamCompletionTimes = new Map() const sendChunk = (blockId: string, content: string) => { const separator = processedOutputs.size > 0 ? '\n\n' : '' @@ -58,7 +59,11 @@ export async function createStreamingResponse( try { while (true) { const { done, value } = await reader.read() - if (done) break + if (done) { + // Record when this stream completed + streamCompletionTimes.set(blockId, Date.now()) + break + } const textChunk = decoder.decode(value, { stream: true }) streamedContent.set(blockId, (streamedContent.get(blockId) || '') + textChunk) @@ -124,6 +129,15 @@ export async function createStreamingResponse( result.logs = result.logs.map((log: any) => { if (streamedContent.has(log.blockId)) { const content = streamedContent.get(log.blockId) + + // Update timing to reflect actual stream completion + if (streamCompletionTimes.has(log.blockId)) { + const completionTime = streamCompletionTimes.get(log.blockId)! + const startTime = new Date(log.startedAt).getTime() + log.endedAt = new Date(completionTime).toISOString() + log.durationMs = completionTime - startTime + } + if (log.output && content) { return { ...log, output: { ...log.output, content } } } diff --git a/apps/sim/providers/models.ts b/apps/sim/providers/models.ts index b25457f19..89c7f976f 100644 --- a/apps/sim/providers/models.ts +++ b/apps/sim/providers/models.ts @@ -83,7 +83,7 @@ export const PROVIDER_DEFINITIONS: Record = { name: 'OpenAI', description: "OpenAI's models", defaultModel: 'gpt-4o', - modelPatterns: [/^gpt/, /^o1/], + modelPatterns: [/^gpt/, /^o1/, /^text-embedding/], icon: OpenAIIcon, capabilities: { toolUsageControl: true, diff --git a/apps/sim/stores/logs/filters/types.ts b/apps/sim/stores/logs/filters/types.ts index e1bfb33f6..2b7a73183 100644 --- a/apps/sim/stores/logs/filters/types.ts +++ b/apps/sim/stores/logs/filters/types.ts @@ -12,8 +12,8 @@ export interface ToolCall { startTime: string // ISO timestamp endTime: string // ISO timestamp status: 'success' | 'error' // Status of the tool call - input?: Record // Input parameters (optional) - output?: Record // Output data (optional) + input?: Record // Input parameters (optional) + output?: Record // Output data (optional) error?: string // Error message if status is 'error' } @@ -51,6 +51,27 @@ export interface CostMetadata { } } +export interface TokenInfo { + input?: number + output?: number + total?: number + prompt?: number + completion?: number +} + +export interface ProviderTiming { + duration: number + startTime: string + endTime: string + segments: Array<{ + type: string + name?: string + startTime: string | number + endTime: string | number + duration: number + }> +} + export interface TraceSpan { id: string name: string @@ -61,11 +82,18 @@ export interface TraceSpan { children?: TraceSpan[] toolCalls?: ToolCall[] status?: 'success' | 'error' - tokens?: number + tokens?: number | TokenInfo relativeStartMs?: number // Time in ms from the start of the parent span blockId?: string // Added to track the original block ID for relationship mapping - input?: Record // Added to store input data for this span - output?: Record // Added to store output data for this span + input?: Record // Added to store input data for this span + output?: Record // Added to store output data for this span + model?: string + cost?: { + input?: number + output?: number + total?: number + } + providerTiming?: ProviderTiming } export interface WorkflowLog { @@ -93,7 +121,7 @@ export interface WorkflowLog { executionData?: ToolCallMetadata & { traceSpans?: TraceSpan[] totalDuration?: number - blockInput?: Record + blockInput?: Record enhanced?: boolean blockExecutions?: Array<{ @@ -107,10 +135,10 @@ export interface WorkflowLog { status: 'success' | 'error' | 'skipped' errorMessage?: string errorStackTrace?: string - inputData: any - outputData: any + inputData: unknown + outputData: unknown cost?: CostMetadata - metadata: any + metadata: Record }> } } diff --git a/apps/sim/telemetry.config.ts b/apps/sim/telemetry.config.ts index 31c597860..621560b29 100644 --- a/apps/sim/telemetry.config.ts +++ b/apps/sim/telemetry.config.ts @@ -1,5 +1,5 @@ /** - * Sim Telemetry Configuration + * Sim OpenTelemetry Configuration * * PRIVACY NOTICE: * - Telemetry is enabled by default to help us improve the product @@ -7,14 +7,15 @@ * 1. Settings UI > Privacy tab > Toggle off "Allow anonymous telemetry" * 2. Setting NEXT_TELEMETRY_DISABLED=1 environment variable * - * This file allows you to configure telemetry collection for your + * This file allows you to configure OpenTelemetry collection for your * Sim instance. If you've forked the repository, you can modify * this file to send telemetry to your own collector. * * We only collect anonymous usage data to improve the product: * - Feature usage statistics - * - Error rates - * - Performance metrics + * - Error rates (always captured) + * - Performance metrics (sampled at 10%) + * - AI/LLM operation traces (always captured for workflows) * * We NEVER collect: * - Personal information @@ -26,14 +27,15 @@ import { env } from './lib/env' const config = { /** - * Endpoint URL where telemetry data is sent + * OTLP Endpoint URL where telemetry data is sent * Change this if you want to send telemetry to your own collector + * Supports any OTLP-compatible backend (Jaeger, Grafana Tempo, etc.) */ endpoint: env.TELEMETRY_ENDPOINT || 'https://telemetry.simstudio.ai/v1/traces', /** * Service name used to identify this instance - * You can change this + * You can change this for your fork */ serviceName: 'sim-studio', @@ -43,36 +45,70 @@ const config = { serviceVersion: '0.1.0', /** - * Batch settings for sending telemetry - * - maxQueueSize: Max number of spans to buffer - * - maxExportBatchSize: Max number of spans to send in a single batch - * - scheduledDelayMillis: Delay between batches (ms) - * - exportTimeoutMillis: Timeout for exporting data (ms) + * Batch settings for OpenTelemetry BatchSpanProcessor + * Optimized for production use with minimal overhead + * + * - maxQueueSize: Max number of spans to buffer (increased from 100 to 2048) + * - maxExportBatchSize: Max number of spans per batch (increased from 10 to 512) + * - scheduledDelayMillis: Delay between batches (5 seconds) + * - exportTimeoutMillis: Timeout for exporting data (30 seconds) */ batchSettings: { - maxQueueSize: 100, - maxExportBatchSize: 10, + maxQueueSize: 2048, + maxExportBatchSize: 512, scheduledDelayMillis: 5000, exportTimeoutMillis: 30000, }, + /** + * Sampling configuration + * - Errors: Always sampled (100%) + * - AI/LLM operations: Always sampled (100%) + * - Other operations: Sampled at 10% + */ + sampling: { + defaultRate: 0.1, // 10% sampling for regular operations + alwaysSampleErrors: true, + alwaysSampleAI: true, + }, + /** * Categories of events that can be collected * This is used for validation when events are sent */ - allowedCategories: ['page_view', 'feature_usage', 'performance', 'error', 'workflow', 'consent'], + allowedCategories: [ + 'page_view', + 'feature_usage', + 'performance', + 'error', + 'workflow', + 'consent', + 'batch', // Added for batched events + ], /** * Client-side instrumentation settings * Set enabled: false to disable client-side telemetry entirely + * + * Client-side telemetry now uses: + * - Event batching (send every 10s or 50 events) + * - Only critical Web Vitals (LCP, FID, CLS) + * - Unhandled errors only */ clientSide: { enabled: true, + batchIntervalMs: 10000, // 10 seconds + maxBatchSize: 50, }, /** * Server-side instrumentation settings * Set enabled: false to disable server-side telemetry entirely + * + * Server-side telemetry uses: + * - OpenTelemetry SDK with BatchSpanProcessor + * - Intelligent sampling (errors and AI ops always captured) + * - Semantic conventions for AI/LLM operations */ serverSide: { enabled: true, diff --git a/apps/sim/tools/knowledge/search.ts b/apps/sim/tools/knowledge/search.ts index 7f146a8a1..8008dfcfe 100644 --- a/apps/sim/tools/knowledge/search.ts +++ b/apps/sim/tools/knowledge/search.ts @@ -24,7 +24,7 @@ export const knowledgeSearchTool: ToolConfig = { description: 'Number of most similar results to return (1-100)', }, tagFilters: { - type: 'any', + type: 'array', required: false, description: 'Array of tag filters with tagName and tagValue properties', }, diff --git a/apps/sim/tools/params.ts b/apps/sim/tools/params.ts index dd3ebcdb0..da1d4b617 100644 --- a/apps/sim/tools/params.ts +++ b/apps/sim/tools/params.ts @@ -1,6 +1,9 @@ +import { createLogger } from '@/lib/logs/console/logger' import type { ParameterVisibility, ToolConfig } from '@/tools/types' import { getTool } from '@/tools/utils' +const logger = createLogger('ToolsParams') + export interface Option { label: string value: string @@ -133,13 +136,13 @@ export function getToolParametersConfig( try { const toolConfig = getTool(toolId) if (!toolConfig) { - console.warn(`Tool not found: ${toolId}`) + logger.warn(`Tool not found: ${toolId}`) return null } // Validate that toolConfig has required properties if (!toolConfig.params || typeof toolConfig.params !== 'object') { - console.warn(`Tool ${toolId} has invalid params configuration`) + logger.warn(`Tool ${toolId} has invalid params configuration`) return null } @@ -265,7 +268,7 @@ export function getToolParametersConfig( optionalParameters, } } catch (error) { - console.error('Error getting tool parameters config:', error) + logger.error('Error getting tool parameters config:', error) return null } } @@ -306,8 +309,13 @@ export function createLLMToolSchema( } // Add parameter to LLM schema + let schemaType = param.type + if (param.type === 'json' || param.type === 'any') { + schemaType = 'object' + } + schema.properties[paramId] = { - type: param.type === 'json' ? 'object' : param.type, + type: schemaType, description: param.description || '', } diff --git a/apps/sim/tools/supabase/insert.ts b/apps/sim/tools/supabase/insert.ts index 978f6e0cb..b13719fb8 100644 --- a/apps/sim/tools/supabase/insert.ts +++ b/apps/sim/tools/supabase/insert.ts @@ -21,10 +21,10 @@ export const insertTool: ToolConfig