mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-11 07:58:06 -05:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f3bc1fc250 |
@@ -3,8 +3,9 @@ import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { persistExecutionLogs } from '@/lib/logs/execution-logger'
|
||||
import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session'
|
||||
import { buildTraceSpans } from '@/lib/logs/trace-spans'
|
||||
import { processStreamingBlockLogs } from '@/lib/tokenization'
|
||||
import { decryptSecret } from '@/lib/utils'
|
||||
import { db } from '@/db'
|
||||
import { chat, environment as envTable, userStats, workflow } from '@/db/schema'
|
||||
@@ -252,11 +253,14 @@ export async function executeWorkflowForChat(
|
||||
|
||||
const deployment = deploymentResult[0]
|
||||
const workflowId = deployment.workflowId
|
||||
const executionId = uuidv4()
|
||||
|
||||
// Set up enhanced logging for chat execution
|
||||
const loggingSession = new EnhancedLoggingSession(workflowId, executionId, 'chat', requestId)
|
||||
|
||||
// Check for multi-output configuration in customizations
|
||||
const customizations = (deployment.customizations || {}) as Record<string, any>
|
||||
let outputBlockIds: string[] = []
|
||||
let outputPaths: string[] = []
|
||||
|
||||
// Extract output configs from the new schema format
|
||||
if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
|
||||
@@ -271,13 +275,11 @@ export async function executeWorkflowForChat(
|
||||
})
|
||||
|
||||
outputBlockIds = deployment.outputConfigs.map((config) => config.blockId)
|
||||
outputPaths = deployment.outputConfigs.map((config) => config.path || '')
|
||||
} else {
|
||||
// Use customizations as fallback
|
||||
outputBlockIds = Array.isArray(customizations.outputBlockIds)
|
||||
? customizations.outputBlockIds
|
||||
: []
|
||||
outputPaths = Array.isArray(customizations.outputPaths) ? customizations.outputPaths : []
|
||||
}
|
||||
|
||||
// Fall back to customizations if we still have no outputs
|
||||
@@ -287,7 +289,6 @@ export async function executeWorkflowForChat(
|
||||
customizations.outputBlockIds.length > 0
|
||||
) {
|
||||
outputBlockIds = customizations.outputBlockIds
|
||||
outputPaths = customizations.outputPaths || new Array(outputBlockIds.length).fill('')
|
||||
}
|
||||
|
||||
logger.debug(`[${requestId}] Using ${outputBlockIds.length} output blocks for extraction`)
|
||||
@@ -407,6 +408,13 @@ export async function executeWorkflowForChat(
|
||||
{} as Record<string, Record<string, any>>
|
||||
)
|
||||
|
||||
// Start enhanced logging session
|
||||
await loggingSession.safeStart({
|
||||
userId: deployment.userId,
|
||||
workspaceId: '', // TODO: Get from workflow
|
||||
variables: workflowVariables,
|
||||
})
|
||||
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
@@ -458,16 +466,41 @@ export async function executeWorkflowForChat(
|
||||
},
|
||||
})
|
||||
|
||||
const result = await executor.execute(workflowId)
|
||||
// Set up enhanced logging on the executor
|
||||
loggingSession.setupExecutor(executor)
|
||||
|
||||
let result
|
||||
try {
|
||||
result = await executor.execute(workflowId)
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Chat workflow execution failed:`, error)
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
error: {
|
||||
message: error.message || 'Chat workflow execution failed',
|
||||
stackTrace: error.stack,
|
||||
},
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
if (result && 'success' in result) {
|
||||
result.logs?.forEach((log: BlockLog) => {
|
||||
if (streamedContent.has(log.blockId)) {
|
||||
if (log.output) {
|
||||
log.output.content = streamedContent.get(log.blockId)
|
||||
// Update streamed content and apply tokenization
|
||||
if (result.logs) {
|
||||
result.logs.forEach((log: BlockLog) => {
|
||||
if (streamedContent.has(log.blockId)) {
|
||||
const content = streamedContent.get(log.blockId)
|
||||
if (log.output) {
|
||||
log.output.content = content
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Process all logs for streaming tokenization
|
||||
const processedCount = processStreamingBlockLogs(result.logs, streamedContent)
|
||||
logger.info(`[CHAT-API] Processed ${processedCount} blocks for streaming tokenization`)
|
||||
}
|
||||
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(result)
|
||||
const enrichedResult = { ...result, traceSpans, totalDuration }
|
||||
@@ -481,8 +514,7 @@ export async function executeWorkflowForChat(
|
||||
;(enrichedResult.metadata as any).conversationId = conversationId
|
||||
}
|
||||
const executionId = uuidv4()
|
||||
await persistExecutionLogs(workflowId, executionId, enrichedResult, 'chat')
|
||||
logger.debug(`Persisted logs for deployed chat: ${executionId}`)
|
||||
logger.debug(`Generated execution ID for deployed chat: ${executionId}`)
|
||||
|
||||
if (result.success) {
|
||||
try {
|
||||
@@ -506,6 +538,17 @@ export async function executeWorkflowForChat(
|
||||
)
|
||||
}
|
||||
|
||||
// Complete enhanced logging session (for both success and failure)
|
||||
if (result && 'success' in result) {
|
||||
const { traceSpans } = buildTraceSpans(result)
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: result.metadata?.duration || 0,
|
||||
finalOutput: result.output,
|
||||
traceSpans,
|
||||
})
|
||||
}
|
||||
|
||||
controller.close()
|
||||
},
|
||||
})
|
||||
|
||||
@@ -40,6 +40,7 @@ describe('Individual Folder API Route', () => {
|
||||
}
|
||||
|
||||
const { mockAuthenticatedUser, mockUnauthenticated } = mockAuth(TEST_USER)
|
||||
const mockGetUserEntityPermissions = vi.fn()
|
||||
|
||||
function createFolderDbMock(options: FolderDbMockOptions = {}) {
|
||||
const {
|
||||
@@ -109,6 +110,12 @@ describe('Individual Folder API Route', () => {
|
||||
vi.resetModules()
|
||||
vi.clearAllMocks()
|
||||
setupCommonApiMocks()
|
||||
|
||||
mockGetUserEntityPermissions.mockResolvedValue('admin')
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: mockGetUserEntityPermissions,
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
@@ -181,6 +188,72 @@ describe('Individual Folder API Route', () => {
|
||||
expect(data).toHaveProperty('error', 'Unauthorized')
|
||||
})
|
||||
|
||||
it('should return 403 when user has only read permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
|
||||
|
||||
const dbMock = createFolderDbMock()
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('PUT', {
|
||||
name: 'Updated Folder',
|
||||
})
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { PUT } = await import('./route')
|
||||
|
||||
const response = await PUT(req, { params })
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error', 'Write access required to update folders')
|
||||
})
|
||||
|
||||
it('should allow folder update for write permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('write') // Write permissions
|
||||
|
||||
const dbMock = createFolderDbMock()
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('PUT', {
|
||||
name: 'Updated Folder',
|
||||
})
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { PUT } = await import('./route')
|
||||
|
||||
const response = await PUT(req, { params })
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('folder')
|
||||
})
|
||||
|
||||
it('should allow folder update for admin permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('admin') // Admin permissions
|
||||
|
||||
const dbMock = createFolderDbMock()
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('PUT', {
|
||||
name: 'Updated Folder',
|
||||
})
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { PUT } = await import('./route')
|
||||
|
||||
const response = await PUT(req, { params })
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('folder')
|
||||
})
|
||||
|
||||
it('should return 400 when trying to set folder as its own parent', async () => {
|
||||
mockAuthenticatedUser()
|
||||
|
||||
@@ -387,6 +460,68 @@ describe('Individual Folder API Route', () => {
|
||||
expect(data).toHaveProperty('error', 'Unauthorized')
|
||||
})
|
||||
|
||||
it('should return 403 when user has only read permissions for delete', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
|
||||
|
||||
const dbMock = createFolderDbMock()
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('DELETE')
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { DELETE } = await import('./route')
|
||||
|
||||
const response = await DELETE(req, { params })
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error', 'Admin access required to delete folders')
|
||||
})
|
||||
|
||||
it('should return 403 when user has only write permissions for delete', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('write') // Write permissions (not enough for delete)
|
||||
|
||||
const dbMock = createFolderDbMock()
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('DELETE')
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { DELETE } = await import('./route')
|
||||
|
||||
const response = await DELETE(req, { params })
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error', 'Admin access required to delete folders')
|
||||
})
|
||||
|
||||
it('should allow folder deletion for admin permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('admin') // Admin permissions
|
||||
|
||||
const dbMock = createFolderDbMock({
|
||||
folderLookupResult: mockFolder,
|
||||
})
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('DELETE')
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { DELETE } = await import('./route')
|
||||
|
||||
const response = await DELETE(req, { params })
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('success', true)
|
||||
})
|
||||
|
||||
it('should handle database errors during deletion', async () => {
|
||||
mockAuthenticatedUser()
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getUserEntityPermissions } from '@/lib/permissions/utils'
|
||||
import { db } from '@/db'
|
||||
import { workflow, workflowFolder } from '@/db/schema'
|
||||
|
||||
@@ -19,17 +20,31 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
const body = await request.json()
|
||||
const { name, color, isExpanded, parentId } = body
|
||||
|
||||
// Verify the folder exists and belongs to the user
|
||||
// Verify the folder exists
|
||||
const existingFolder = await db
|
||||
.select()
|
||||
.from(workflowFolder)
|
||||
.where(and(eq(workflowFolder.id, id), eq(workflowFolder.userId, session.user.id)))
|
||||
.where(eq(workflowFolder.id, id))
|
||||
.then((rows) => rows[0])
|
||||
|
||||
if (!existingFolder) {
|
||||
return NextResponse.json({ error: 'Folder not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Check if user has write permissions for the workspace
|
||||
const workspacePermission = await getUserEntityPermissions(
|
||||
session.user.id,
|
||||
'workspace',
|
||||
existingFolder.workspaceId
|
||||
)
|
||||
|
||||
if (!workspacePermission || workspacePermission === 'read') {
|
||||
return NextResponse.json(
|
||||
{ error: 'Write access required to update folders' },
|
||||
{ status: 403 }
|
||||
)
|
||||
}
|
||||
|
||||
// Prevent setting a folder as its own parent or creating circular references
|
||||
if (parentId && parentId === id) {
|
||||
return NextResponse.json({ error: 'Folder cannot be its own parent' }, { status: 400 })
|
||||
@@ -81,19 +96,33 @@ export async function DELETE(
|
||||
|
||||
const { id } = await params
|
||||
|
||||
// Verify the folder exists and belongs to the user
|
||||
// Verify the folder exists
|
||||
const existingFolder = await db
|
||||
.select()
|
||||
.from(workflowFolder)
|
||||
.where(and(eq(workflowFolder.id, id), eq(workflowFolder.userId, session.user.id)))
|
||||
.where(eq(workflowFolder.id, id))
|
||||
.then((rows) => rows[0])
|
||||
|
||||
if (!existingFolder) {
|
||||
return NextResponse.json({ error: 'Folder not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Check if user has admin permissions for the workspace (admin-only for deletions)
|
||||
const workspacePermission = await getUserEntityPermissions(
|
||||
session.user.id,
|
||||
'workspace',
|
||||
existingFolder.workspaceId
|
||||
)
|
||||
|
||||
if (workspacePermission !== 'admin') {
|
||||
return NextResponse.json(
|
||||
{ error: 'Admin access required to delete folders' },
|
||||
{ status: 403 }
|
||||
)
|
||||
}
|
||||
|
||||
// Recursively delete folder and all its contents
|
||||
const deletionStats = await deleteFolderRecursively(id, session.user.id)
|
||||
const deletionStats = await deleteFolderRecursively(id, existingFolder.workspaceId)
|
||||
|
||||
logger.info('Deleted folder and all contents:', {
|
||||
id,
|
||||
@@ -113,41 +142,40 @@ export async function DELETE(
|
||||
// Helper function to recursively delete a folder and all its contents
|
||||
async function deleteFolderRecursively(
|
||||
folderId: string,
|
||||
userId: string
|
||||
workspaceId: string
|
||||
): Promise<{ folders: number; workflows: number }> {
|
||||
const stats = { folders: 0, workflows: 0 }
|
||||
|
||||
// Get all child folders first
|
||||
// Get all child folders first (workspace-scoped, not user-scoped)
|
||||
const childFolders = await db
|
||||
.select({ id: workflowFolder.id })
|
||||
.from(workflowFolder)
|
||||
.where(and(eq(workflowFolder.parentId, folderId), eq(workflowFolder.userId, userId)))
|
||||
.where(and(eq(workflowFolder.parentId, folderId), eq(workflowFolder.workspaceId, workspaceId)))
|
||||
|
||||
// Recursively delete child folders
|
||||
for (const childFolder of childFolders) {
|
||||
const childStats = await deleteFolderRecursively(childFolder.id, userId)
|
||||
const childStats = await deleteFolderRecursively(childFolder.id, workspaceId)
|
||||
stats.folders += childStats.folders
|
||||
stats.workflows += childStats.workflows
|
||||
}
|
||||
|
||||
// Delete all workflows in this folder
|
||||
// Delete all workflows in this folder (workspace-scoped, not user-scoped)
|
||||
// The database cascade will handle deleting related workflow_blocks, workflow_edges, workflow_subflows
|
||||
const workflowsInFolder = await db
|
||||
.select({ id: workflow.id })
|
||||
.from(workflow)
|
||||
.where(and(eq(workflow.folderId, folderId), eq(workflow.userId, userId)))
|
||||
.where(and(eq(workflow.folderId, folderId), eq(workflow.workspaceId, workspaceId)))
|
||||
|
||||
if (workflowsInFolder.length > 0) {
|
||||
await db
|
||||
.delete(workflow)
|
||||
.where(and(eq(workflow.folderId, folderId), eq(workflow.userId, userId)))
|
||||
.where(and(eq(workflow.folderId, folderId), eq(workflow.workspaceId, workspaceId)))
|
||||
|
||||
stats.workflows += workflowsInFolder.length
|
||||
}
|
||||
|
||||
// Delete this folder
|
||||
await db
|
||||
.delete(workflowFolder)
|
||||
.where(and(eq(workflowFolder.id, folderId), eq(workflowFolder.userId, userId)))
|
||||
await db.delete(workflowFolder).where(eq(workflowFolder.id, folderId))
|
||||
|
||||
stats.folders += 1
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ describe('Folders API Route', () => {
|
||||
const mockValues = vi.fn()
|
||||
const mockReturning = vi.fn()
|
||||
const mockTransaction = vi.fn()
|
||||
const mockGetUserEntityPermissions = vi.fn()
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
@@ -72,6 +73,8 @@ describe('Folders API Route', () => {
|
||||
mockValues.mockReturnValue({ returning: mockReturning })
|
||||
mockReturning.mockReturnValue([mockFolders[0]])
|
||||
|
||||
mockGetUserEntityPermissions.mockResolvedValue('admin')
|
||||
|
||||
vi.doMock('@/db', () => ({
|
||||
db: {
|
||||
select: mockSelect,
|
||||
@@ -79,6 +82,10 @@ describe('Folders API Route', () => {
|
||||
transaction: mockTransaction,
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: mockGetUserEntityPermissions,
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
@@ -143,6 +150,42 @@ describe('Folders API Route', () => {
|
||||
expect(data).toHaveProperty('error', 'Workspace ID is required')
|
||||
})
|
||||
|
||||
it('should return 403 when user has no workspace permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue(null) // No permissions
|
||||
|
||||
const mockRequest = createMockRequest('GET')
|
||||
Object.defineProperty(mockRequest, 'url', {
|
||||
value: 'http://localhost:3000/api/folders?workspaceId=workspace-123',
|
||||
})
|
||||
|
||||
const { GET } = await import('./route')
|
||||
const response = await GET(mockRequest)
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error', 'Access denied to this workspace')
|
||||
})
|
||||
|
||||
it('should return 403 when user has only read permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
|
||||
|
||||
const mockRequest = createMockRequest('GET')
|
||||
Object.defineProperty(mockRequest, 'url', {
|
||||
value: 'http://localhost:3000/api/folders?workspaceId=workspace-123',
|
||||
})
|
||||
|
||||
const { GET } = await import('./route')
|
||||
const response = await GET(mockRequest)
|
||||
|
||||
expect(response.status).toBe(200) // Should work for read permissions
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('folders')
|
||||
})
|
||||
|
||||
it('should handle database errors gracefully', async () => {
|
||||
mockAuthenticatedUser()
|
||||
|
||||
@@ -295,6 +338,100 @@ describe('Folders API Route', () => {
|
||||
expect(data).toHaveProperty('error', 'Unauthorized')
|
||||
})
|
||||
|
||||
it('should return 403 when user has only read permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
name: 'Test Folder',
|
||||
workspaceId: 'workspace-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error', 'Write or Admin access required to create folders')
|
||||
})
|
||||
|
||||
it('should allow folder creation for write permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('write') // Write permissions
|
||||
|
||||
mockTransaction.mockImplementationOnce(async (callback: any) => {
|
||||
const tx = {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
orderBy: vi.fn().mockReturnValue({
|
||||
limit: vi.fn().mockReturnValue([]), // No existing folders
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
insert: vi.fn().mockReturnValue({
|
||||
values: vi.fn().mockReturnValue({
|
||||
returning: vi.fn().mockReturnValue([mockFolders[0]]),
|
||||
}),
|
||||
}),
|
||||
}
|
||||
return await callback(tx)
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
name: 'Test Folder',
|
||||
workspaceId: 'workspace-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('folder')
|
||||
})
|
||||
|
||||
it('should allow folder creation for admin permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('admin') // Admin permissions
|
||||
|
||||
mockTransaction.mockImplementationOnce(async (callback: any) => {
|
||||
const tx = {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
orderBy: vi.fn().mockReturnValue({
|
||||
limit: vi.fn().mockReturnValue([]), // No existing folders
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
insert: vi.fn().mockReturnValue({
|
||||
values: vi.fn().mockReturnValue({
|
||||
returning: vi.fn().mockReturnValue([mockFolders[0]]),
|
||||
}),
|
||||
}),
|
||||
}
|
||||
return await callback(tx)
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
name: 'Test Folder',
|
||||
workspaceId: 'workspace-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('folder')
|
||||
})
|
||||
|
||||
it('should return 400 when required fields are missing', async () => {
|
||||
const testCases = [
|
||||
{ name: '', workspaceId: 'workspace-123' }, // Missing name
|
||||
|
||||
@@ -2,6 +2,7 @@ import { and, asc, desc, eq, isNull } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getUserEntityPermissions } from '@/lib/permissions/utils'
|
||||
import { db } from '@/db'
|
||||
import { workflowFolder } from '@/db/schema'
|
||||
|
||||
@@ -22,13 +23,23 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Workspace ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Fetch all folders for the workspace, ordered by sortOrder and createdAt
|
||||
// Check if user has workspace permissions
|
||||
const workspacePermission = await getUserEntityPermissions(
|
||||
session.user.id,
|
||||
'workspace',
|
||||
workspaceId
|
||||
)
|
||||
|
||||
if (!workspacePermission) {
|
||||
return NextResponse.json({ error: 'Access denied to this workspace' }, { status: 403 })
|
||||
}
|
||||
|
||||
// If user has workspace permissions, fetch ALL folders in the workspace
|
||||
// This allows shared workspace members to see folders created by other users
|
||||
const folders = await db
|
||||
.select()
|
||||
.from(workflowFolder)
|
||||
.where(
|
||||
and(eq(workflowFolder.workspaceId, workspaceId), eq(workflowFolder.userId, session.user.id))
|
||||
)
|
||||
.where(eq(workflowFolder.workspaceId, workspaceId))
|
||||
.orderBy(asc(workflowFolder.sortOrder), asc(workflowFolder.createdAt))
|
||||
|
||||
return NextResponse.json({ folders })
|
||||
@@ -53,19 +64,33 @@ export async function POST(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Name and workspace ID are required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Check if user has workspace permissions (at least 'write' access to create folders)
|
||||
const workspacePermission = await getUserEntityPermissions(
|
||||
session.user.id,
|
||||
'workspace',
|
||||
workspaceId
|
||||
)
|
||||
|
||||
if (!workspacePermission || workspacePermission === 'read') {
|
||||
return NextResponse.json(
|
||||
{ error: 'Write or Admin access required to create folders' },
|
||||
{ status: 403 }
|
||||
)
|
||||
}
|
||||
|
||||
// Generate a new ID
|
||||
const id = crypto.randomUUID()
|
||||
|
||||
// Use transaction to ensure sortOrder consistency
|
||||
const newFolder = await db.transaction(async (tx) => {
|
||||
// Get the next sort order for the parent (or root level)
|
||||
// Consider all folders in the workspace, not just those created by current user
|
||||
const existingFolders = await tx
|
||||
.select({ sortOrder: workflowFolder.sortOrder })
|
||||
.from(workflowFolder)
|
||||
.where(
|
||||
and(
|
||||
eq(workflowFolder.workspaceId, workspaceId),
|
||||
eq(workflowFolder.userId, session.user.id),
|
||||
parentId ? eq(workflowFolder.parentId, parentId) : isNull(workflowFolder.parentId)
|
||||
)
|
||||
)
|
||||
|
||||
76
apps/sim/app/api/logs/[executionId]/frozen-canvas/route.ts
Normal file
76
apps/sim/app/api/logs/[executionId]/frozen-canvas/route.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { db } from '@/db'
|
||||
import { workflowExecutionLogs, workflowExecutionSnapshots } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('FrozenCanvasAPI')
|
||||
|
||||
export async function GET(
|
||||
_request: NextRequest,
|
||||
{ params }: { params: Promise<{ executionId: string }> }
|
||||
) {
|
||||
try {
|
||||
const { executionId } = await params
|
||||
|
||||
logger.debug(`Fetching frozen canvas data for execution: ${executionId}`)
|
||||
|
||||
// Get the workflow execution log to find the snapshot
|
||||
const [workflowLog] = await db
|
||||
.select()
|
||||
.from(workflowExecutionLogs)
|
||||
.where(eq(workflowExecutionLogs.executionId, executionId))
|
||||
.limit(1)
|
||||
|
||||
if (!workflowLog) {
|
||||
return NextResponse.json({ error: 'Workflow execution not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Get the workflow state snapshot
|
||||
const [snapshot] = await db
|
||||
.select()
|
||||
.from(workflowExecutionSnapshots)
|
||||
.where(eq(workflowExecutionSnapshots.id, workflowLog.stateSnapshotId))
|
||||
.limit(1)
|
||||
|
||||
if (!snapshot) {
|
||||
return NextResponse.json({ error: 'Workflow state snapshot not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const response = {
|
||||
executionId,
|
||||
workflowId: workflowLog.workflowId,
|
||||
workflowState: snapshot.stateData,
|
||||
executionMetadata: {
|
||||
trigger: workflowLog.trigger,
|
||||
startedAt: workflowLog.startedAt.toISOString(),
|
||||
endedAt: workflowLog.endedAt?.toISOString(),
|
||||
totalDurationMs: workflowLog.totalDurationMs,
|
||||
blockStats: {
|
||||
total: workflowLog.blockCount,
|
||||
success: workflowLog.successCount,
|
||||
error: workflowLog.errorCount,
|
||||
skipped: workflowLog.skippedCount,
|
||||
},
|
||||
cost: {
|
||||
total: workflowLog.totalCost ? Number.parseFloat(workflowLog.totalCost) : null,
|
||||
input: workflowLog.totalInputCost ? Number.parseFloat(workflowLog.totalInputCost) : null,
|
||||
output: workflowLog.totalOutputCost
|
||||
? Number.parseFloat(workflowLog.totalOutputCost)
|
||||
: null,
|
||||
},
|
||||
totalTokens: workflowLog.totalTokens,
|
||||
},
|
||||
}
|
||||
|
||||
logger.debug(`Successfully fetched frozen canvas data for execution: ${executionId}`)
|
||||
logger.debug(
|
||||
`Workflow state contains ${Object.keys((snapshot.stateData as any)?.blocks || {}).length} blocks`
|
||||
)
|
||||
|
||||
return NextResponse.json(response)
|
||||
} catch (error) {
|
||||
logger.error('Error fetching frozen canvas data:', error)
|
||||
return NextResponse.json({ error: 'Failed to fetch frozen canvas data' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -3,9 +3,10 @@ import { and, eq, inArray, lt, sql } from 'drizzle-orm'
|
||||
import { NextResponse } from 'next/server'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { snapshotService } from '@/lib/logs/snapshot-service'
|
||||
import { getS3Client } from '@/lib/uploads/s3/s3-client'
|
||||
import { db } from '@/db'
|
||||
import { subscription, user, workflow, workflowLogs } from '@/db/schema'
|
||||
import { subscription, user, workflow, workflowExecutionLogs } from '@/db/schema'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
@@ -66,99 +67,143 @@ export async function GET(request: Request) {
|
||||
const workflowIds = workflowsQuery.map((w) => w.id)
|
||||
|
||||
const results = {
|
||||
total: 0,
|
||||
archived: 0,
|
||||
archiveFailed: 0,
|
||||
deleted: 0,
|
||||
deleteFailed: 0,
|
||||
enhancedLogs: {
|
||||
total: 0,
|
||||
archived: 0,
|
||||
archiveFailed: 0,
|
||||
deleted: 0,
|
||||
deleteFailed: 0,
|
||||
},
|
||||
snapshots: {
|
||||
cleaned: 0,
|
||||
cleanupFailed: 0,
|
||||
},
|
||||
}
|
||||
|
||||
const startTime = Date.now()
|
||||
const MAX_BATCHES = 10
|
||||
|
||||
// Process enhanced logging cleanup
|
||||
let batchesProcessed = 0
|
||||
let hasMoreLogs = true
|
||||
|
||||
logger.info(`Starting enhanced logs cleanup for ${workflowIds.length} workflows`)
|
||||
|
||||
while (hasMoreLogs && batchesProcessed < MAX_BATCHES) {
|
||||
const oldLogs = await db
|
||||
// Query enhanced execution logs that need cleanup
|
||||
const oldEnhancedLogs = await db
|
||||
.select({
|
||||
id: workflowLogs.id,
|
||||
workflowId: workflowLogs.workflowId,
|
||||
executionId: workflowLogs.executionId,
|
||||
level: workflowLogs.level,
|
||||
message: workflowLogs.message,
|
||||
duration: workflowLogs.duration,
|
||||
trigger: workflowLogs.trigger,
|
||||
createdAt: workflowLogs.createdAt,
|
||||
metadata: workflowLogs.metadata,
|
||||
id: workflowExecutionLogs.id,
|
||||
workflowId: workflowExecutionLogs.workflowId,
|
||||
executionId: workflowExecutionLogs.executionId,
|
||||
stateSnapshotId: workflowExecutionLogs.stateSnapshotId,
|
||||
level: workflowExecutionLogs.level,
|
||||
message: workflowExecutionLogs.message,
|
||||
trigger: workflowExecutionLogs.trigger,
|
||||
startedAt: workflowExecutionLogs.startedAt,
|
||||
endedAt: workflowExecutionLogs.endedAt,
|
||||
totalDurationMs: workflowExecutionLogs.totalDurationMs,
|
||||
blockCount: workflowExecutionLogs.blockCount,
|
||||
successCount: workflowExecutionLogs.successCount,
|
||||
errorCount: workflowExecutionLogs.errorCount,
|
||||
skippedCount: workflowExecutionLogs.skippedCount,
|
||||
totalCost: workflowExecutionLogs.totalCost,
|
||||
totalInputCost: workflowExecutionLogs.totalInputCost,
|
||||
totalOutputCost: workflowExecutionLogs.totalOutputCost,
|
||||
totalTokens: workflowExecutionLogs.totalTokens,
|
||||
metadata: workflowExecutionLogs.metadata,
|
||||
createdAt: workflowExecutionLogs.createdAt,
|
||||
})
|
||||
.from(workflowLogs)
|
||||
.from(workflowExecutionLogs)
|
||||
.where(
|
||||
and(
|
||||
inArray(workflowLogs.workflowId, workflowIds),
|
||||
lt(workflowLogs.createdAt, retentionDate)
|
||||
inArray(workflowExecutionLogs.workflowId, workflowIds),
|
||||
lt(workflowExecutionLogs.createdAt, retentionDate)
|
||||
)
|
||||
)
|
||||
.limit(BATCH_SIZE)
|
||||
|
||||
results.total += oldLogs.length
|
||||
results.enhancedLogs.total += oldEnhancedLogs.length
|
||||
|
||||
for (const log of oldLogs) {
|
||||
for (const log of oldEnhancedLogs) {
|
||||
const today = new Date().toISOString().split('T')[0]
|
||||
|
||||
const logKey = `archived-logs/${today}/${log.id}.json`
|
||||
const logData = JSON.stringify(log)
|
||||
// Archive enhanced log with more detailed structure
|
||||
const enhancedLogKey = `archived-enhanced-logs/${today}/${log.id}.json`
|
||||
const enhancedLogData = JSON.stringify({
|
||||
...log,
|
||||
archivedAt: new Date().toISOString(),
|
||||
logType: 'enhanced',
|
||||
})
|
||||
|
||||
try {
|
||||
await getS3Client().send(
|
||||
new PutObjectCommand({
|
||||
Bucket: S3_CONFIG.bucket,
|
||||
Key: logKey,
|
||||
Body: logData,
|
||||
Key: enhancedLogKey,
|
||||
Body: enhancedLogData,
|
||||
ContentType: 'application/json',
|
||||
Metadata: {
|
||||
logId: String(log.id),
|
||||
workflowId: String(log.workflowId),
|
||||
executionId: String(log.executionId),
|
||||
logType: 'enhanced',
|
||||
archivedAt: new Date().toISOString(),
|
||||
},
|
||||
})
|
||||
)
|
||||
|
||||
results.archived++
|
||||
results.enhancedLogs.archived++
|
||||
|
||||
try {
|
||||
// Delete enhanced log (will cascade to workflowExecutionBlocks due to foreign key)
|
||||
const deleteResult = await db
|
||||
.delete(workflowLogs)
|
||||
.where(eq(workflowLogs.id, log.id))
|
||||
.returning({ id: workflowLogs.id })
|
||||
.delete(workflowExecutionLogs)
|
||||
.where(eq(workflowExecutionLogs.id, log.id))
|
||||
.returning({ id: workflowExecutionLogs.id })
|
||||
|
||||
if (deleteResult.length > 0) {
|
||||
results.deleted++
|
||||
results.enhancedLogs.deleted++
|
||||
} else {
|
||||
results.deleteFailed++
|
||||
logger.warn(`Failed to delete log ${log.id} after archiving: No rows deleted`)
|
||||
results.enhancedLogs.deleteFailed++
|
||||
logger.warn(
|
||||
`Failed to delete enhanced log ${log.id} after archiving: No rows deleted`
|
||||
)
|
||||
}
|
||||
} catch (deleteError) {
|
||||
results.deleteFailed++
|
||||
logger.error(`Error deleting log ${log.id} after archiving:`, { deleteError })
|
||||
results.enhancedLogs.deleteFailed++
|
||||
logger.error(`Error deleting enhanced log ${log.id} after archiving:`, { deleteError })
|
||||
}
|
||||
} catch (archiveError) {
|
||||
results.archiveFailed++
|
||||
logger.error(`Failed to archive log ${log.id}:`, { archiveError })
|
||||
results.enhancedLogs.archiveFailed++
|
||||
logger.error(`Failed to archive enhanced log ${log.id}:`, { archiveError })
|
||||
}
|
||||
}
|
||||
|
||||
batchesProcessed++
|
||||
hasMoreLogs = oldLogs.length === BATCH_SIZE
|
||||
hasMoreLogs = oldEnhancedLogs.length === BATCH_SIZE
|
||||
|
||||
logger.info(`Processed batch ${batchesProcessed}: ${oldLogs.length} logs`)
|
||||
logger.info(
|
||||
`Processed enhanced logs batch ${batchesProcessed}: ${oldEnhancedLogs.length} logs`
|
||||
)
|
||||
}
|
||||
|
||||
// Cleanup orphaned snapshots
|
||||
try {
|
||||
const snapshotRetentionDays = Number(env.FREE_PLAN_LOG_RETENTION_DAYS || '7') + 1 // Keep snapshots 1 day longer
|
||||
const cleanedSnapshots = await snapshotService.cleanupOrphanedSnapshots(snapshotRetentionDays)
|
||||
results.snapshots.cleaned = cleanedSnapshots
|
||||
logger.info(`Cleaned up ${cleanedSnapshots} orphaned snapshots`)
|
||||
} catch (snapshotError) {
|
||||
results.snapshots.cleanupFailed = 1
|
||||
logger.error('Error cleaning up orphaned snapshots:', { snapshotError })
|
||||
}
|
||||
|
||||
const timeElapsed = (Date.now() - startTime) / 1000
|
||||
const reachedLimit = batchesProcessed >= MAX_BATCHES && hasMoreLogs
|
||||
|
||||
return NextResponse.json({
|
||||
message: `Processed ${batchesProcessed} batches (${results.total} logs) in ${timeElapsed.toFixed(2)}s${reachedLimit ? ' (batch limit reached)' : ''}`,
|
||||
message: `Processed ${batchesProcessed} enhanced log batches (${results.enhancedLogs.total} logs) in ${timeElapsed.toFixed(2)}s${reachedLimit ? ' (batch limit reached)' : ''}`,
|
||||
results,
|
||||
complete: !hasMoreLogs,
|
||||
batchLimitReached: reachedLimit,
|
||||
|
||||
499
apps/sim/app/api/logs/enhanced/route.ts
Normal file
499
apps/sim/app/api/logs/enhanced/route.ts
Normal file
@@ -0,0 +1,499 @@
|
||||
import { and, desc, eq, gte, inArray, lte, or, type SQL, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { db } from '@/db'
|
||||
import { workflow, workflowExecutionBlocks, workflowExecutionLogs } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('EnhancedLogsAPI')
|
||||
|
||||
// Helper function to extract block executions from trace spans
|
||||
function extractBlockExecutionsFromTraceSpans(traceSpans: any[]): any[] {
|
||||
const blockExecutions: any[] = []
|
||||
|
||||
function processSpan(span: any) {
|
||||
if (span.blockId) {
|
||||
blockExecutions.push({
|
||||
id: span.id,
|
||||
blockId: span.blockId,
|
||||
blockName: span.name || '',
|
||||
blockType: span.type,
|
||||
startedAt: span.startTime,
|
||||
endedAt: span.endTime,
|
||||
durationMs: span.duration || 0,
|
||||
status: span.status || 'success',
|
||||
errorMessage: span.output?.error || undefined,
|
||||
inputData: span.input || {},
|
||||
outputData: span.output || {},
|
||||
cost: span.cost || undefined,
|
||||
metadata: {},
|
||||
})
|
||||
}
|
||||
|
||||
// Process children recursively
|
||||
if (span.children && Array.isArray(span.children)) {
|
||||
span.children.forEach(processSpan)
|
||||
}
|
||||
}
|
||||
|
||||
traceSpans.forEach(processSpan)
|
||||
return blockExecutions
|
||||
}
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
export const revalidate = 0
|
||||
|
||||
const QueryParamsSchema = z.object({
|
||||
includeWorkflow: z.coerce.boolean().optional().default(false),
|
||||
includeBlocks: z.coerce.boolean().optional().default(false),
|
||||
limit: z.coerce.number().optional().default(100),
|
||||
offset: z.coerce.number().optional().default(0),
|
||||
level: z.string().optional(),
|
||||
workflowIds: z.string().optional(), // Comma-separated list of workflow IDs
|
||||
folderIds: z.string().optional(), // Comma-separated list of folder IDs
|
||||
triggers: z.string().optional(), // Comma-separated list of trigger types
|
||||
startDate: z.string().optional(),
|
||||
endDate: z.string().optional(),
|
||||
search: z.string().optional(),
|
||||
})
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized enhanced logs access attempt`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const userId = session.user.id
|
||||
|
||||
try {
|
||||
const { searchParams } = new URL(request.url)
|
||||
const params = QueryParamsSchema.parse(Object.fromEntries(searchParams.entries()))
|
||||
|
||||
// Get user's workflows
|
||||
const userWorkflows = await db
|
||||
.select({ id: workflow.id, folderId: workflow.folderId })
|
||||
.from(workflow)
|
||||
.where(eq(workflow.userId, userId))
|
||||
|
||||
const userWorkflowIds = userWorkflows.map((w) => w.id)
|
||||
|
||||
if (userWorkflowIds.length === 0) {
|
||||
return NextResponse.json({ data: [], total: 0 }, { status: 200 })
|
||||
}
|
||||
|
||||
// Build conditions for enhanced logs
|
||||
let conditions: SQL | undefined = inArray(workflowExecutionLogs.workflowId, userWorkflowIds)
|
||||
|
||||
// Filter by level
|
||||
if (params.level && params.level !== 'all') {
|
||||
conditions = and(conditions, eq(workflowExecutionLogs.level, params.level))
|
||||
}
|
||||
|
||||
// Filter by specific workflow IDs
|
||||
if (params.workflowIds) {
|
||||
const workflowIds = params.workflowIds.split(',').filter(Boolean)
|
||||
const filteredWorkflowIds = workflowIds.filter((id) => userWorkflowIds.includes(id))
|
||||
if (filteredWorkflowIds.length > 0) {
|
||||
conditions = and(
|
||||
conditions,
|
||||
inArray(workflowExecutionLogs.workflowId, filteredWorkflowIds)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Filter by folder IDs
|
||||
if (params.folderIds) {
|
||||
const folderIds = params.folderIds.split(',').filter(Boolean)
|
||||
const workflowsInFolders = userWorkflows
|
||||
.filter((w) => w.folderId && folderIds.includes(w.folderId))
|
||||
.map((w) => w.id)
|
||||
|
||||
if (workflowsInFolders.length > 0) {
|
||||
conditions = and(
|
||||
conditions,
|
||||
inArray(workflowExecutionLogs.workflowId, workflowsInFolders)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Filter by triggers
|
||||
if (params.triggers) {
|
||||
const triggers = params.triggers.split(',').filter(Boolean)
|
||||
if (triggers.length > 0 && !triggers.includes('all')) {
|
||||
conditions = and(conditions, inArray(workflowExecutionLogs.trigger, triggers))
|
||||
}
|
||||
}
|
||||
|
||||
// Filter by date range
|
||||
if (params.startDate) {
|
||||
conditions = and(
|
||||
conditions,
|
||||
gte(workflowExecutionLogs.startedAt, new Date(params.startDate))
|
||||
)
|
||||
}
|
||||
if (params.endDate) {
|
||||
conditions = and(conditions, lte(workflowExecutionLogs.startedAt, new Date(params.endDate)))
|
||||
}
|
||||
|
||||
// Filter by search query
|
||||
if (params.search) {
|
||||
const searchTerm = `%${params.search}%`
|
||||
conditions = and(
|
||||
conditions,
|
||||
or(
|
||||
sql`${workflowExecutionLogs.message} ILIKE ${searchTerm}`,
|
||||
sql`${workflowExecutionLogs.executionId} ILIKE ${searchTerm}`
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
// Execute the query
|
||||
const logs = await db
|
||||
.select()
|
||||
.from(workflowExecutionLogs)
|
||||
.where(conditions)
|
||||
.orderBy(desc(workflowExecutionLogs.startedAt))
|
||||
.limit(params.limit)
|
||||
.offset(params.offset)
|
||||
|
||||
// Get total count for pagination
|
||||
const countResult = await db
|
||||
.select({ count: sql<number>`count(*)` })
|
||||
.from(workflowExecutionLogs)
|
||||
.where(conditions)
|
||||
|
||||
const count = countResult[0]?.count || 0
|
||||
|
||||
// Get block executions for all workflow executions
|
||||
const executionIds = logs.map((log) => log.executionId)
|
||||
let blockExecutionsByExecution: Record<string, any[]> = {}
|
||||
|
||||
if (executionIds.length > 0) {
|
||||
const blockLogs = await db
|
||||
.select()
|
||||
.from(workflowExecutionBlocks)
|
||||
.where(inArray(workflowExecutionBlocks.executionId, executionIds))
|
||||
.orderBy(workflowExecutionBlocks.startedAt)
|
||||
|
||||
// Group block logs by execution ID
|
||||
blockExecutionsByExecution = blockLogs.reduce(
|
||||
(acc, blockLog) => {
|
||||
if (!acc[blockLog.executionId]) {
|
||||
acc[blockLog.executionId] = []
|
||||
}
|
||||
acc[blockLog.executionId].push({
|
||||
id: blockLog.id,
|
||||
blockId: blockLog.blockId,
|
||||
blockName: blockLog.blockName || '',
|
||||
blockType: blockLog.blockType,
|
||||
startedAt: blockLog.startedAt.toISOString(),
|
||||
endedAt: blockLog.endedAt?.toISOString() || blockLog.startedAt.toISOString(),
|
||||
durationMs: blockLog.durationMs || 0,
|
||||
status: blockLog.status,
|
||||
errorMessage: blockLog.errorMessage || undefined,
|
||||
errorStackTrace: blockLog.errorStackTrace || undefined,
|
||||
inputData: blockLog.inputData,
|
||||
outputData: blockLog.outputData,
|
||||
cost: blockLog.costTotal
|
||||
? {
|
||||
input: Number(blockLog.costInput) || 0,
|
||||
output: Number(blockLog.costOutput) || 0,
|
||||
total: Number(blockLog.costTotal) || 0,
|
||||
tokens: {
|
||||
prompt: blockLog.tokensPrompt || 0,
|
||||
completion: blockLog.tokensCompletion || 0,
|
||||
total: blockLog.tokensTotal || 0,
|
||||
},
|
||||
model: blockLog.modelUsed || '',
|
||||
}
|
||||
: undefined,
|
||||
metadata: blockLog.metadata || {},
|
||||
})
|
||||
return acc
|
||||
},
|
||||
{} as Record<string, any[]>
|
||||
)
|
||||
}
|
||||
|
||||
// Create clean trace spans from block executions
|
||||
const createTraceSpans = (blockExecutions: any[]) => {
|
||||
return blockExecutions.map((block, index) => {
|
||||
// For error blocks, include error information in the output
|
||||
let output = block.outputData
|
||||
if (block.status === 'error' && block.errorMessage) {
|
||||
output = {
|
||||
...output,
|
||||
error: block.errorMessage,
|
||||
stackTrace: block.errorStackTrace,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
id: block.id,
|
||||
name: `Block ${block.blockName || block.blockType} (${block.blockType})`,
|
||||
type: block.blockType,
|
||||
duration: block.durationMs,
|
||||
startTime: block.startedAt,
|
||||
endTime: block.endedAt,
|
||||
status: block.status === 'success' ? 'success' : 'error',
|
||||
blockId: block.blockId,
|
||||
input: block.inputData,
|
||||
output,
|
||||
tokens: block.cost?.tokens?.total || 0,
|
||||
relativeStartMs: index * 100,
|
||||
children: [],
|
||||
toolCalls: [],
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Extract cost information from block executions
|
||||
const extractCostSummary = (blockExecutions: any[]) => {
|
||||
let totalCost = 0
|
||||
let totalInputCost = 0
|
||||
let totalOutputCost = 0
|
||||
let totalTokens = 0
|
||||
let totalPromptTokens = 0
|
||||
let totalCompletionTokens = 0
|
||||
const models = new Map()
|
||||
|
||||
blockExecutions.forEach((block) => {
|
||||
if (block.cost) {
|
||||
totalCost += Number(block.cost.total) || 0
|
||||
totalInputCost += Number(block.cost.input) || 0
|
||||
totalOutputCost += Number(block.cost.output) || 0
|
||||
totalTokens += block.cost.tokens?.total || 0
|
||||
totalPromptTokens += block.cost.tokens?.prompt || 0
|
||||
totalCompletionTokens += block.cost.tokens?.completion || 0
|
||||
|
||||
// Track per-model costs
|
||||
if (block.cost.model) {
|
||||
if (!models.has(block.cost.model)) {
|
||||
models.set(block.cost.model, {
|
||||
input: 0,
|
||||
output: 0,
|
||||
total: 0,
|
||||
tokens: { prompt: 0, completion: 0, total: 0 },
|
||||
})
|
||||
}
|
||||
const modelCost = models.get(block.cost.model)
|
||||
modelCost.input += Number(block.cost.input) || 0
|
||||
modelCost.output += Number(block.cost.output) || 0
|
||||
modelCost.total += Number(block.cost.total) || 0
|
||||
modelCost.tokens.prompt += block.cost.tokens?.prompt || 0
|
||||
modelCost.tokens.completion += block.cost.tokens?.completion || 0
|
||||
modelCost.tokens.total += block.cost.tokens?.total || 0
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
total: totalCost,
|
||||
input: totalInputCost,
|
||||
output: totalOutputCost,
|
||||
tokens: {
|
||||
total: totalTokens,
|
||||
prompt: totalPromptTokens,
|
||||
completion: totalCompletionTokens,
|
||||
},
|
||||
models: Object.fromEntries(models), // Convert Map to object for JSON serialization
|
||||
}
|
||||
}
|
||||
|
||||
// Transform to clean enhanced log format
|
||||
const enhancedLogs = logs.map((log) => {
|
||||
const blockExecutions = blockExecutionsByExecution[log.executionId] || []
|
||||
|
||||
// Use stored trace spans from metadata if available, otherwise create from block executions
|
||||
const storedTraceSpans = (log.metadata as any)?.traceSpans
|
||||
const traceSpans =
|
||||
storedTraceSpans && Array.isArray(storedTraceSpans) && storedTraceSpans.length > 0
|
||||
? storedTraceSpans
|
||||
: createTraceSpans(blockExecutions)
|
||||
|
||||
// Use extracted cost summary if available, otherwise use stored values
|
||||
const costSummary =
|
||||
blockExecutions.length > 0
|
||||
? extractCostSummary(blockExecutions)
|
||||
: {
|
||||
input: Number(log.totalInputCost) || 0,
|
||||
output: Number(log.totalOutputCost) || 0,
|
||||
total: Number(log.totalCost) || 0,
|
||||
tokens: {
|
||||
total: log.totalTokens || 0,
|
||||
prompt: (log.metadata as any)?.tokenBreakdown?.prompt || 0,
|
||||
completion: (log.metadata as any)?.tokenBreakdown?.completion || 0,
|
||||
},
|
||||
models: (log.metadata as any)?.models || {},
|
||||
}
|
||||
|
||||
return {
|
||||
id: log.id,
|
||||
workflowId: log.workflowId,
|
||||
executionId: log.executionId,
|
||||
level: log.level,
|
||||
message: log.message,
|
||||
duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null,
|
||||
trigger: log.trigger,
|
||||
createdAt: log.startedAt.toISOString(),
|
||||
metadata: {
|
||||
totalDuration: log.totalDurationMs,
|
||||
cost: costSummary,
|
||||
blockStats: {
|
||||
total: log.blockCount,
|
||||
success: log.successCount,
|
||||
error: log.errorCount,
|
||||
skipped: log.skippedCount,
|
||||
},
|
||||
traceSpans,
|
||||
blockExecutions,
|
||||
enhanced: true,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
if (params.includeWorkflow) {
|
||||
const workflowIds = [...new Set(logs.map((log) => log.workflowId))]
|
||||
const workflowConditions = inArray(workflow.id, workflowIds)
|
||||
|
||||
const workflowData = await db.select().from(workflow).where(workflowConditions)
|
||||
const workflowMap = new Map(workflowData.map((w) => [w.id, w]))
|
||||
|
||||
const logsWithWorkflow = enhancedLogs.map((log) => ({
|
||||
...log,
|
||||
workflow: workflowMap.get(log.workflowId) || null,
|
||||
}))
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
data: logsWithWorkflow,
|
||||
total: Number(count),
|
||||
page: Math.floor(params.offset / params.limit) + 1,
|
||||
pageSize: params.limit,
|
||||
totalPages: Math.ceil(Number(count) / params.limit),
|
||||
},
|
||||
{ status: 200 }
|
||||
)
|
||||
}
|
||||
|
||||
// Include block execution data if requested
|
||||
if (params.includeBlocks) {
|
||||
const executionIds = logs.map((log) => log.executionId)
|
||||
|
||||
if (executionIds.length > 0) {
|
||||
const blockLogs = await db
|
||||
.select()
|
||||
.from(workflowExecutionBlocks)
|
||||
.where(inArray(workflowExecutionBlocks.executionId, executionIds))
|
||||
.orderBy(workflowExecutionBlocks.startedAt)
|
||||
|
||||
// Group block logs by execution ID
|
||||
const blockLogsByExecution = blockLogs.reduce(
|
||||
(acc, blockLog) => {
|
||||
if (!acc[blockLog.executionId]) {
|
||||
acc[blockLog.executionId] = []
|
||||
}
|
||||
acc[blockLog.executionId].push({
|
||||
id: blockLog.id,
|
||||
blockId: blockLog.blockId,
|
||||
blockName: blockLog.blockName || '',
|
||||
blockType: blockLog.blockType,
|
||||
startedAt: blockLog.startedAt.toISOString(),
|
||||
endedAt: blockLog.endedAt?.toISOString() || blockLog.startedAt.toISOString(),
|
||||
durationMs: blockLog.durationMs || 0,
|
||||
status: blockLog.status,
|
||||
errorMessage: blockLog.errorMessage || undefined,
|
||||
inputData: blockLog.inputData,
|
||||
outputData: blockLog.outputData,
|
||||
cost: blockLog.costTotal
|
||||
? {
|
||||
input: Number(blockLog.costInput) || 0,
|
||||
output: Number(blockLog.costOutput) || 0,
|
||||
total: Number(blockLog.costTotal) || 0,
|
||||
tokens: {
|
||||
prompt: blockLog.tokensPrompt || 0,
|
||||
completion: blockLog.tokensCompletion || 0,
|
||||
total: blockLog.tokensTotal || 0,
|
||||
},
|
||||
model: blockLog.modelUsed || '',
|
||||
}
|
||||
: undefined,
|
||||
})
|
||||
return acc
|
||||
},
|
||||
{} as Record<string, any[]>
|
||||
)
|
||||
|
||||
// For executions with no block logs in the database,
|
||||
// extract block executions from stored trace spans in metadata
|
||||
logs.forEach((log) => {
|
||||
if (
|
||||
!blockLogsByExecution[log.executionId] ||
|
||||
blockLogsByExecution[log.executionId].length === 0
|
||||
) {
|
||||
const storedTraceSpans = (log.metadata as any)?.traceSpans
|
||||
if (storedTraceSpans && Array.isArray(storedTraceSpans)) {
|
||||
blockLogsByExecution[log.executionId] =
|
||||
extractBlockExecutionsFromTraceSpans(storedTraceSpans)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Add block logs to metadata
|
||||
const logsWithBlocks = enhancedLogs.map((log) => ({
|
||||
...log,
|
||||
metadata: {
|
||||
...log.metadata,
|
||||
blockExecutions: blockLogsByExecution[log.executionId] || [],
|
||||
},
|
||||
}))
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
data: logsWithBlocks,
|
||||
total: Number(count),
|
||||
page: Math.floor(params.offset / params.limit) + 1,
|
||||
pageSize: params.limit,
|
||||
totalPages: Math.ceil(Number(count) / params.limit),
|
||||
},
|
||||
{ status: 200 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Return basic logs
|
||||
return NextResponse.json(
|
||||
{
|
||||
data: enhancedLogs,
|
||||
total: Number(count),
|
||||
page: Math.floor(params.offset / params.limit) + 1,
|
||||
pageSize: params.limit,
|
||||
totalPages: Math.ceil(Number(count) / params.limit),
|
||||
},
|
||||
{ status: 200 }
|
||||
)
|
||||
} catch (validationError) {
|
||||
if (validationError instanceof z.ZodError) {
|
||||
logger.warn(`[${requestId}] Invalid enhanced logs request parameters`, {
|
||||
errors: validationError.errors,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: 'Invalid request parameters',
|
||||
details: validationError.errors,
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
throw validationError
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Enhanced logs fetch error`, error)
|
||||
return NextResponse.json({ error: error.message }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@
|
||||
*/
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
createMockRequest,
|
||||
mockExecutionDependencies,
|
||||
mockScheduleExecuteDb,
|
||||
sampleWorkflowState,
|
||||
@@ -23,7 +22,7 @@ describe('Scheduled Workflow Execution API Route', () => {
|
||||
blocks: sampleWorkflowState.blocks,
|
||||
edges: sampleWorkflowState.edges || [],
|
||||
loops: sampleWorkflowState.loops || {},
|
||||
parallels: sampleWorkflowState.parallels || {},
|
||||
parallels: {},
|
||||
isFromNormalizedTables: true,
|
||||
}),
|
||||
}))
|
||||
@@ -122,9 +121,8 @@ describe('Scheduled Workflow Execution API Route', () => {
|
||||
})),
|
||||
}))
|
||||
|
||||
const req = createMockRequest('GET')
|
||||
const { GET } = await import('./route')
|
||||
const response = await GET(req)
|
||||
const response = await GET()
|
||||
expect(response).toBeDefined()
|
||||
|
||||
const data = await response.json()
|
||||
@@ -136,7 +134,6 @@ describe('Scheduled Workflow Execution API Route', () => {
|
||||
const persistExecutionErrorMock = vi.fn().mockResolvedValue(undefined)
|
||||
|
||||
vi.doMock('@/lib/logs/execution-logger', () => ({
|
||||
persistExecutionLogs: vi.fn().mockResolvedValue(undefined),
|
||||
persistExecutionError: persistExecutionErrorMock,
|
||||
}))
|
||||
|
||||
@@ -146,9 +143,8 @@ describe('Scheduled Workflow Execution API Route', () => {
|
||||
})),
|
||||
}))
|
||||
|
||||
const req = createMockRequest('GET')
|
||||
const { GET } = await import('./route')
|
||||
const response = await GET(req)
|
||||
const response = await GET()
|
||||
|
||||
expect(response).toBeDefined()
|
||||
|
||||
@@ -176,9 +172,8 @@ describe('Scheduled Workflow Execution API Route', () => {
|
||||
return { db: mockDb }
|
||||
})
|
||||
|
||||
const req = createMockRequest('GET')
|
||||
const { GET } = await import('./route')
|
||||
const response = await GET(req)
|
||||
const response = await GET()
|
||||
expect(response.status).toBe(200)
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('executedCount', 0)
|
||||
@@ -205,9 +200,8 @@ describe('Scheduled Workflow Execution API Route', () => {
|
||||
return { db: mockDb }
|
||||
})
|
||||
|
||||
const req = createMockRequest('GET')
|
||||
const { GET } = await import('./route')
|
||||
const response = await GET(req)
|
||||
const response = await GET()
|
||||
expect(response.status).toBe(500)
|
||||
const data = await response.json()
|
||||
|
||||
@@ -238,9 +232,8 @@ describe('Scheduled Workflow Execution API Route', () => {
|
||||
],
|
||||
})
|
||||
|
||||
const req = createMockRequest('GET')
|
||||
const { GET } = await import('./route')
|
||||
const response = await GET(req)
|
||||
const response = await GET()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
})
|
||||
@@ -269,9 +262,8 @@ describe('Scheduled Workflow Execution API Route', () => {
|
||||
],
|
||||
})
|
||||
|
||||
const req = createMockRequest('GET')
|
||||
const { GET } = await import('./route')
|
||||
const response = await GET(req)
|
||||
const response = await GET()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const data = await response.json()
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import { Cron } from 'croner'
|
||||
import { and, eq, lte, not, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { z } from 'zod'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { persistExecutionError, persistExecutionLogs } from '@/lib/logs/execution-logger'
|
||||
import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session'
|
||||
import { buildTraceSpans } from '@/lib/logs/trace-spans'
|
||||
import {
|
||||
type BlockState,
|
||||
@@ -17,7 +17,7 @@ import { decryptSecret } from '@/lib/utils'
|
||||
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
|
||||
import { updateWorkflowRunCounts } from '@/lib/workflows/utils'
|
||||
import { db } from '@/db'
|
||||
import { environment, userStats, workflow, workflowSchedule } from '@/db/schema'
|
||||
import { environment as environmentTable, userStats, workflow, workflowSchedule } from '@/db/schema'
|
||||
import { Executor } from '@/executor'
|
||||
import { Serializer } from '@/serializer'
|
||||
import { mergeSubblockState } from '@/stores/workflows/server-utils'
|
||||
@@ -58,7 +58,7 @@ const EnvVarsSchema = z.record(z.string())
|
||||
|
||||
const runningExecutions = new Set<string>()
|
||||
|
||||
export async function GET(req: NextRequest) {
|
||||
export async function GET() {
|
||||
logger.info(`Scheduled execution triggered at ${new Date().toISOString()}`)
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
const now = new Date()
|
||||
@@ -85,6 +85,7 @@ export async function GET(req: NextRequest) {
|
||||
|
||||
for (const schedule of dueSchedules) {
|
||||
const executionId = uuidv4()
|
||||
let loggingSession: EnhancedLoggingSession | null = null
|
||||
|
||||
try {
|
||||
if (runningExecutions.has(schedule.workflowId)) {
|
||||
@@ -118,15 +119,7 @@ export async function GET(req: NextRequest) {
|
||||
}
|
||||
)
|
||||
|
||||
await persistExecutionError(
|
||||
schedule.workflowId,
|
||||
executionId,
|
||||
new Error(
|
||||
usageCheck.message ||
|
||||
'Usage limit exceeded. Please upgrade your plan to continue running scheduled workflows.'
|
||||
),
|
||||
'schedule'
|
||||
)
|
||||
// Error logging handled by enhanced logging session
|
||||
|
||||
const retryDelay = 24 * 60 * 60 * 1000 // 24 hour delay for exceeded limits
|
||||
const nextRetryAt = new Date(now.getTime() + retryDelay)
|
||||
@@ -176,8 +169,8 @@ export async function GET(req: NextRequest) {
|
||||
// Retrieve environment variables for this user (if any).
|
||||
const [userEnv] = await db
|
||||
.select()
|
||||
.from(environment)
|
||||
.where(eq(environment.userId, workflowRecord.userId))
|
||||
.from(environmentTable)
|
||||
.where(eq(environmentTable.userId, workflowRecord.userId))
|
||||
.limit(1)
|
||||
|
||||
if (!userEnv) {
|
||||
@@ -306,6 +299,30 @@ export async function GET(req: NextRequest) {
|
||||
logger.debug(`[${requestId}] No workflow variables found for: ${schedule.workflowId}`)
|
||||
}
|
||||
|
||||
// Start enhanced logging
|
||||
loggingSession = new EnhancedLoggingSession(
|
||||
schedule.workflowId,
|
||||
executionId,
|
||||
'schedule',
|
||||
requestId
|
||||
)
|
||||
|
||||
// Load the actual workflow state from normalized tables
|
||||
const enhancedNormalizedData = await loadWorkflowFromNormalizedTables(schedule.workflowId)
|
||||
|
||||
if (!enhancedNormalizedData) {
|
||||
throw new Error(
|
||||
`Workflow ${schedule.workflowId} has no normalized data available. Ensure the workflow is properly saved to normalized tables.`
|
||||
)
|
||||
}
|
||||
|
||||
// Start enhanced logging with environment variables
|
||||
await loggingSession.safeStart({
|
||||
userId: workflowRecord.userId,
|
||||
workspaceId: workflowRecord.workspaceId || '',
|
||||
variables: variables || {},
|
||||
})
|
||||
|
||||
const executor = new Executor(
|
||||
serializedWorkflow,
|
||||
processedBlockStates,
|
||||
@@ -313,6 +330,10 @@ export async function GET(req: NextRequest) {
|
||||
input,
|
||||
workflowVariables
|
||||
)
|
||||
|
||||
// Set up enhanced logging on the executor
|
||||
loggingSession.setupExecutor(executor)
|
||||
|
||||
const result = await executor.execute(schedule.workflowId)
|
||||
|
||||
const executionResult =
|
||||
@@ -343,13 +364,16 @@ export async function GET(req: NextRequest) {
|
||||
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
|
||||
|
||||
const enrichedResult = {
|
||||
...executionResult,
|
||||
traceSpans,
|
||||
totalDuration,
|
||||
}
|
||||
// Log individual block executions to enhanced system are automatically
|
||||
// handled by the logging session
|
||||
|
||||
await persistExecutionLogs(schedule.workflowId, executionId, enrichedResult, 'schedule')
|
||||
// Complete enhanced logging
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: totalDuration || 0,
|
||||
finalOutput: executionResult.output || {},
|
||||
traceSpans: (traceSpans || []) as any,
|
||||
})
|
||||
|
||||
if (executionResult.success) {
|
||||
logger.info(`[${requestId}] Workflow ${schedule.workflowId} executed successfully`)
|
||||
@@ -413,7 +437,18 @@ export async function GET(req: NextRequest) {
|
||||
error
|
||||
)
|
||||
|
||||
await persistExecutionError(schedule.workflowId, executionId, error, 'schedule')
|
||||
// Error logging handled by enhanced logging session
|
||||
|
||||
if (loggingSession) {
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
error: {
|
||||
message: error.message || 'Scheduled workflow execution failed',
|
||||
stackTrace: error.stack,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
let nextRunAt: Date
|
||||
try {
|
||||
|
||||
@@ -32,7 +32,6 @@ const executeMock = vi.fn().mockResolvedValue({
|
||||
endTime: new Date().toISOString(),
|
||||
},
|
||||
})
|
||||
const persistExecutionLogsMock = vi.fn().mockResolvedValue(undefined)
|
||||
const persistExecutionErrorMock = vi.fn().mockResolvedValue(undefined)
|
||||
|
||||
// Mock the DB schema objects
|
||||
@@ -80,7 +79,6 @@ vi.mock('@/executor', () => ({
|
||||
}))
|
||||
|
||||
vi.mock('@/lib/logs/execution-logger', () => ({
|
||||
persistExecutionLogs: persistExecutionLogsMock,
|
||||
persistExecutionError: persistExecutionErrorMock,
|
||||
}))
|
||||
|
||||
|
||||
@@ -139,7 +139,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
return createErrorResponse(validation.error.message, validation.error.status)
|
||||
}
|
||||
|
||||
// Get the workflow to find the user
|
||||
// Get the workflow to find the user (removed deprecated state column)
|
||||
const workflowData = await db
|
||||
.select({
|
||||
userId: workflow.userId,
|
||||
|
||||
@@ -88,6 +88,7 @@ describe('Workflow Execution API Route', () => {
|
||||
vi.doMock('@/executor', () => ({
|
||||
Executor: vi.fn().mockImplementation(() => ({
|
||||
execute: executeMock,
|
||||
setEnhancedLogger: vi.fn(),
|
||||
})),
|
||||
}))
|
||||
|
||||
@@ -104,6 +105,14 @@ describe('Workflow Execution API Route', () => {
|
||||
persistExecutionError: vi.fn().mockResolvedValue(undefined),
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/logs/enhanced-execution-logger', () => ({
|
||||
enhancedExecutionLogger: {
|
||||
startWorkflowExecution: vi.fn().mockResolvedValue(undefined),
|
||||
logBlockExecution: vi.fn().mockResolvedValue(undefined),
|
||||
completeWorkflowExecution: vi.fn().mockResolvedValue(undefined),
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/logs/trace-spans', () => ({
|
||||
buildTraceSpans: vi.fn().mockReturnValue({
|
||||
traceSpans: [],
|
||||
@@ -395,6 +404,7 @@ describe('Workflow Execution API Route', () => {
|
||||
vi.doMock('@/executor', () => ({
|
||||
Executor: vi.fn().mockImplementation(() => ({
|
||||
execute: vi.fn().mockRejectedValue(new Error('Execution failed')),
|
||||
setEnhancedLogger: vi.fn(),
|
||||
})),
|
||||
}))
|
||||
|
||||
@@ -418,10 +428,10 @@ describe('Workflow Execution API Route', () => {
|
||||
expect(data).toHaveProperty('error')
|
||||
expect(data.error).toContain('Execution failed')
|
||||
|
||||
// Verify error logger was called
|
||||
const persistExecutionError = (await import('@/lib/logs/execution-logger'))
|
||||
.persistExecutionError
|
||||
expect(persistExecutionError).toHaveBeenCalled()
|
||||
// Verify enhanced logger was called for error completion
|
||||
const enhancedExecutionLogger = (await import('@/lib/logs/enhanced-execution-logger'))
|
||||
.enhancedExecutionLogger
|
||||
expect(enhancedExecutionLogger.completeWorkflowExecution).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
/**
|
||||
|
||||
@@ -3,7 +3,7 @@ import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { z } from 'zod'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { persistExecutionError, persistExecutionLogs } from '@/lib/logs/execution-logger'
|
||||
import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session'
|
||||
import { buildTraceSpans } from '@/lib/logs/trace-spans'
|
||||
import { checkServerSideUsageLimits } from '@/lib/usage-monitor'
|
||||
import { decryptSecret } from '@/lib/utils'
|
||||
@@ -14,11 +14,10 @@ import {
|
||||
workflowHasResponseBlock,
|
||||
} from '@/lib/workflows/utils'
|
||||
import { db } from '@/db'
|
||||
import { environment, userStats } from '@/db/schema'
|
||||
import { environment as environmentTable, userStats } from '@/db/schema'
|
||||
import { Executor } from '@/executor'
|
||||
import { Serializer } from '@/serializer'
|
||||
import { mergeSubblockState } from '@/stores/workflows/server-utils'
|
||||
import type { WorkflowState } from '@/stores/workflows/workflow/types'
|
||||
import { validateWorkflowAccess } from '../../middleware'
|
||||
import { createErrorResponse, createSuccessResponse } from '../../utils'
|
||||
|
||||
@@ -59,6 +58,8 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
|
||||
throw new Error('Execution is already running')
|
||||
}
|
||||
|
||||
const loggingSession = new EnhancedLoggingSession(workflowId, executionId, 'api', requestId)
|
||||
|
||||
// Check if the user has exceeded their usage limits
|
||||
const usageCheck = await checkServerSideUsageLimits(workflow.userId)
|
||||
if (usageCheck.isExceeded) {
|
||||
@@ -92,39 +93,30 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
|
||||
logger.debug(`[${requestId}] Loading workflow ${workflowId} from normalized tables`)
|
||||
const normalizedData = await loadWorkflowFromNormalizedTables(workflowId)
|
||||
|
||||
let blocks: Record<string, any>
|
||||
let edges: any[]
|
||||
let loops: Record<string, any>
|
||||
let parallels: Record<string, any>
|
||||
|
||||
if (normalizedData) {
|
||||
// Use normalized data as primary source
|
||||
;({ blocks, edges, loops, parallels } = normalizedData)
|
||||
logger.info(`[${requestId}] Using normalized tables for workflow execution: ${workflowId}`)
|
||||
} else {
|
||||
// Fallback to deployed state if available (for legacy workflows)
|
||||
logger.warn(
|
||||
`[${requestId}] No normalized data found, falling back to deployed state for workflow: ${workflowId}`
|
||||
if (!normalizedData) {
|
||||
throw new Error(
|
||||
`Workflow ${workflowId} has no normalized data available. Ensure the workflow is properly saved to normalized tables.`
|
||||
)
|
||||
|
||||
if (!workflow.deployedState) {
|
||||
throw new Error(
|
||||
`Workflow ${workflowId} has no deployed state and no normalized data available`
|
||||
)
|
||||
}
|
||||
|
||||
const deployedState = workflow.deployedState as WorkflowState
|
||||
;({ blocks, edges, loops, parallels } = deployedState)
|
||||
}
|
||||
|
||||
// Use normalized data as primary source
|
||||
const { blocks, edges, loops, parallels } = normalizedData
|
||||
logger.info(`[${requestId}] Using normalized tables for workflow execution: ${workflowId}`)
|
||||
logger.debug(`[${requestId}] Normalized data loaded:`, {
|
||||
blocksCount: Object.keys(blocks || {}).length,
|
||||
edgesCount: (edges || []).length,
|
||||
loopsCount: Object.keys(loops || {}).length,
|
||||
parallelsCount: Object.keys(parallels || {}).length,
|
||||
})
|
||||
|
||||
// Use the same execution flow as in scheduled executions
|
||||
const mergedStates = mergeSubblockState(blocks)
|
||||
|
||||
// Fetch the user's environment variables (if any)
|
||||
const [userEnv] = await db
|
||||
.select()
|
||||
.from(environment)
|
||||
.where(eq(environment.userId, workflow.userId))
|
||||
.from(environmentTable)
|
||||
.where(eq(environmentTable.userId, workflow.userId))
|
||||
.limit(1)
|
||||
|
||||
if (!userEnv) {
|
||||
@@ -133,9 +125,14 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
|
||||
)
|
||||
}
|
||||
|
||||
// Parse and validate environment variables.
|
||||
const variables = EnvVarsSchema.parse(userEnv?.variables ?? {})
|
||||
|
||||
await loggingSession.safeStart({
|
||||
userId: workflow.userId,
|
||||
workspaceId: workflow.workspaceId,
|
||||
variables,
|
||||
})
|
||||
|
||||
// Replace environment variables in the block states
|
||||
const currentBlockStates = await Object.entries(mergedStates).reduce(
|
||||
async (accPromise, [id, block]) => {
|
||||
@@ -260,6 +257,9 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
|
||||
workflowVariables
|
||||
)
|
||||
|
||||
// Set up enhanced logging on the executor
|
||||
loggingSession.setupExecutor(executor)
|
||||
|
||||
const result = await executor.execute(workflowId)
|
||||
|
||||
// Check if we got a StreamingExecution result (with stream + execution properties)
|
||||
@@ -271,6 +271,9 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
|
||||
executionTime: executionResult.metadata?.duration,
|
||||
})
|
||||
|
||||
// Build trace spans from execution result (works for both success and failure)
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
|
||||
|
||||
// Update workflow run counts if execution was successful
|
||||
if (executionResult.success) {
|
||||
await updateWorkflowRunCounts(workflowId)
|
||||
@@ -285,24 +288,26 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
|
||||
.where(eq(userStats.userId, workflow.userId))
|
||||
}
|
||||
|
||||
// Build trace spans from execution logs
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
|
||||
|
||||
// Add trace spans to the execution result
|
||||
const enrichedResult = {
|
||||
...executionResult,
|
||||
traceSpans,
|
||||
totalDuration,
|
||||
}
|
||||
|
||||
// Log each execution step and the final result
|
||||
await persistExecutionLogs(workflowId, executionId, enrichedResult, 'api')
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: totalDuration || 0,
|
||||
finalOutput: executionResult.output || {},
|
||||
traceSpans: (traceSpans || []) as any,
|
||||
})
|
||||
|
||||
return executionResult
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Workflow execution failed: ${workflowId}`, error)
|
||||
// Log the error
|
||||
await persistExecutionError(workflowId, executionId, error, 'api')
|
||||
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
error: {
|
||||
message: error.message || 'Workflow execution failed',
|
||||
stackTrace: error.stack,
|
||||
},
|
||||
})
|
||||
|
||||
throw error
|
||||
} finally {
|
||||
runningExecutions.delete(executionKey)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { NextRequest } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { persistExecutionLogs, persistLog } from '@/lib/logs/execution-logger'
|
||||
import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session'
|
||||
import { buildTraceSpans } from '@/lib/logs/trace-spans'
|
||||
import { validateWorkflowAccess } from '../../middleware'
|
||||
import { createErrorResponse, createSuccessResponse } from '../../utils'
|
||||
|
||||
@@ -33,9 +33,25 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
// Check if this execution is from chat using only the explicit source flag
|
||||
const isChatExecution = result.metadata?.source === 'chat'
|
||||
|
||||
// Use persistExecutionLogs which handles tool call extraction
|
||||
// Use 'chat' trigger type for chat executions, otherwise 'manual'
|
||||
await persistExecutionLogs(id, executionId, result, isChatExecution ? 'chat' : 'manual')
|
||||
// Also log to enhanced system
|
||||
const triggerType = isChatExecution ? 'chat' : 'manual'
|
||||
const loggingSession = new EnhancedLoggingSession(id, executionId, triggerType, requestId)
|
||||
|
||||
await loggingSession.safeStart({
|
||||
userId: '', // TODO: Get from session
|
||||
workspaceId: '', // TODO: Get from workflow
|
||||
variables: {},
|
||||
})
|
||||
|
||||
// Build trace spans from execution logs
|
||||
const { traceSpans } = buildTraceSpans(result)
|
||||
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: result.metadata?.duration || 0,
|
||||
finalOutput: result.output || {},
|
||||
traceSpans,
|
||||
})
|
||||
|
||||
return createSuccessResponse({
|
||||
message: 'Execution logs persisted successfully',
|
||||
@@ -52,21 +68,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
executionId,
|
||||
})
|
||||
|
||||
// Persist each log using the original method
|
||||
for (const log of logs) {
|
||||
await persistLog({
|
||||
id: uuidv4(),
|
||||
workflowId: id,
|
||||
executionId,
|
||||
level: log.level,
|
||||
message: log.message,
|
||||
duration: log.duration,
|
||||
trigger: log.trigger || 'manual',
|
||||
createdAt: new Date(log.createdAt || new Date()),
|
||||
metadata: log.metadata,
|
||||
})
|
||||
}
|
||||
|
||||
return createSuccessResponse({ message: 'Logs persisted successfully' })
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Error persisting logs for workflow: ${id}`, error)
|
||||
|
||||
@@ -274,14 +274,6 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
const mockTransaction = vi.fn().mockImplementation(async (callback) => {
|
||||
await callback({
|
||||
delete: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockResolvedValue(undefined),
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
vi.doMock('@/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
@@ -291,7 +283,9 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
transaction: mockTransaction,
|
||||
delete: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockResolvedValue(undefined),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
|
||||
@@ -326,14 +320,6 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
const mockTransaction = vi.fn().mockImplementation(async (callback) => {
|
||||
await callback({
|
||||
delete: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockResolvedValue(undefined),
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
vi.doMock('@/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
@@ -343,7 +329,9 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
transaction: mockTransaction,
|
||||
delete: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockResolvedValue(undefined),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
|
||||
|
||||
@@ -3,11 +3,12 @@ import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { verifyInternalToken } from '@/lib/auth/internal'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getUserEntityPermissions, hasAdminPermission } from '@/lib/permissions/utils'
|
||||
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
|
||||
import { db } from '@/db'
|
||||
import { workflow, workflowBlocks, workflowEdges, workflowSubflows } from '@/db/schema'
|
||||
import { workflow } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('WorkflowByIdAPI')
|
||||
|
||||
@@ -206,16 +207,7 @@ export async function DELETE(
|
||||
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Delete workflow and all related data in a transaction
|
||||
await db.transaction(async (tx) => {
|
||||
// Delete from normalized tables first (foreign key constraints)
|
||||
await tx.delete(workflowSubflows).where(eq(workflowSubflows.workflowId, workflowId))
|
||||
await tx.delete(workflowEdges).where(eq(workflowEdges.workflowId, workflowId))
|
||||
await tx.delete(workflowBlocks).where(eq(workflowBlocks.workflowId, workflowId))
|
||||
|
||||
// Delete the main workflow record
|
||||
await tx.delete(workflow).where(eq(workflow.id, workflowId))
|
||||
})
|
||||
await db.delete(workflow).where(eq(workflow.id, workflowId))
|
||||
|
||||
const elapsed = Date.now() - startTime
|
||||
logger.info(`[${requestId}] Successfully deleted workflow ${workflowId} in ${elapsed}ms`)
|
||||
@@ -224,7 +216,7 @@ export async function DELETE(
|
||||
// This prevents "Block not found" errors when collaborative updates try to process
|
||||
// after the workflow has been deleted
|
||||
try {
|
||||
const socketUrl = process.env.SOCKET_SERVER_URL || 'http://localhost:3002'
|
||||
const socketUrl = env.SOCKET_SERVER_URL || 'http://localhost:3002'
|
||||
const socketResponse = await fetch(`${socketUrl}/api/workflow-deleted`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
|
||||
@@ -2,13 +2,7 @@ import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import {
|
||||
workflow,
|
||||
workflowBlocks,
|
||||
workflowEdges,
|
||||
workflowSubflows,
|
||||
workspaceMember,
|
||||
} from '@/db/schema'
|
||||
import { workflow, workspaceMember } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('WorkspaceByIdAPI')
|
||||
|
||||
@@ -26,9 +20,9 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
|
||||
|
||||
const workspaceId = id
|
||||
|
||||
// Check if user has read access to this workspace
|
||||
// Check if user has any access to this workspace
|
||||
const userPermission = await getUserEntityPermissions(session.user.id, 'workspace', workspaceId)
|
||||
if (userPermission !== 'read') {
|
||||
if (!userPermission) {
|
||||
return NextResponse.json({ error: 'Workspace not found or access denied' }, { status: 404 })
|
||||
}
|
||||
|
||||
@@ -126,20 +120,10 @@ export async function DELETE(
|
||||
|
||||
// Delete workspace and all related data in a transaction
|
||||
await db.transaction(async (tx) => {
|
||||
// Get all workflows in this workspace
|
||||
const workspaceWorkflows = await tx
|
||||
.select({ id: workflow.id })
|
||||
.from(workflow)
|
||||
.where(eq(workflow.workspaceId, workspaceId))
|
||||
|
||||
// Delete all workflow-related data for each workflow
|
||||
for (const wf of workspaceWorkflows) {
|
||||
await tx.delete(workflowSubflows).where(eq(workflowSubflows.workflowId, wf.id))
|
||||
await tx.delete(workflowEdges).where(eq(workflowEdges.workflowId, wf.id))
|
||||
await tx.delete(workflowBlocks).where(eq(workflowBlocks.workflowId, wf.id))
|
||||
}
|
||||
|
||||
// Delete all workflows in the workspace
|
||||
// Delete all workflows in the workspace - database cascade will handle all workflow-related data
|
||||
// The database cascade will handle deleting related workflow_blocks, workflow_edges, workflow_subflows,
|
||||
// workflow_logs, workflow_execution_snapshots, workflow_execution_logs, workflow_execution_trace_spans,
|
||||
// workflow_schedule, webhook, marketplace, chat, and memory records
|
||||
await tx.delete(workflow).where(eq(workflow.workspaceId, workspaceId))
|
||||
|
||||
// Delete workspace members
|
||||
|
||||
@@ -60,7 +60,7 @@ export async function GET(req: NextRequest) {
|
||||
|
||||
return NextResponse.json({ invitations })
|
||||
} catch (error) {
|
||||
console.error('Error fetching workspace invitations:', error)
|
||||
logger.error('Error fetching workspace invitations:', error)
|
||||
return NextResponse.json({ error: 'Failed to fetch invitations' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -204,7 +204,7 @@ export async function POST(req: NextRequest) {
|
||||
|
||||
return NextResponse.json({ success: true, invitation: invitationData })
|
||||
} catch (error) {
|
||||
console.error('Error creating workspace invitation:', error)
|
||||
logger.error('Error creating workspace invitation:', error)
|
||||
return NextResponse.json({ error: 'Failed to create invitation' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -252,9 +252,9 @@ async function sendInvitationEmail({
|
||||
html: emailHtml,
|
||||
})
|
||||
|
||||
console.log(`Invitation email sent to ${to}`)
|
||||
logger.info(`Invitation email sent to ${to}`)
|
||||
} catch (error) {
|
||||
console.error('Error sending invitation email:', error)
|
||||
logger.error('Error sending invitation email:', error)
|
||||
// Continue even if email fails - the invitation is still created
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ export function ControlBar() {
|
||||
const fetchLogs = async () => {
|
||||
try {
|
||||
const queryParams = buildQueryParams(1, 50) // Get first 50 logs for refresh
|
||||
const response = await fetch(`/api/logs?${queryParams}`)
|
||||
const response = await fetch(`/api/logs/enhanced?${queryParams}`)
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Error fetching logs: ${response.statusText}`)
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
'use client'
|
||||
|
||||
import { useState } from 'react'
|
||||
import { Eye, Maximize2, Minimize2, X } from 'lucide-react'
|
||||
import { Badge } from '@/components/ui/badge'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Dialog, DialogContent, DialogHeader, DialogTitle } from '@/components/ui/dialog'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { FrozenCanvas } from './frozen-canvas'
|
||||
|
||||
interface FrozenCanvasModalProps {
|
||||
executionId: string
|
||||
workflowName?: string
|
||||
trigger?: string
|
||||
traceSpans?: any[] // TraceSpans data from log metadata
|
||||
isOpen: boolean
|
||||
onClose: () => void
|
||||
}
|
||||
|
||||
export function FrozenCanvasModal({
|
||||
executionId,
|
||||
workflowName,
|
||||
trigger,
|
||||
traceSpans,
|
||||
isOpen,
|
||||
onClose,
|
||||
}: FrozenCanvasModalProps) {
|
||||
const [isFullscreen, setIsFullscreen] = useState(false)
|
||||
|
||||
const toggleFullscreen = () => {
|
||||
setIsFullscreen(!isFullscreen)
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog open={isOpen} onOpenChange={onClose}>
|
||||
<DialogContent
|
||||
className={cn(
|
||||
'flex flex-col gap-0 p-0',
|
||||
isFullscreen
|
||||
? 'h-[100vh] max-h-[100vh] w-[100vw] max-w-[100vw] rounded-none'
|
||||
: 'h-[90vh] max-h-[90vh] overflow-hidden sm:max-w-[1100px]'
|
||||
)}
|
||||
hideCloseButton={true}
|
||||
>
|
||||
{/* Header */}
|
||||
<DialogHeader className='flex flex-row items-center justify-between border-b bg-background p-4'>
|
||||
<div className='flex items-center gap-3'>
|
||||
<Eye className='h-5 w-5 text-blue-500 dark:text-blue-400' />
|
||||
<div>
|
||||
<DialogTitle className='font-semibold text-foreground text-lg'>
|
||||
Logged Workflow State
|
||||
</DialogTitle>
|
||||
<div className='mt-1 flex items-center gap-2'>
|
||||
{workflowName && (
|
||||
<span className='text-muted-foreground text-sm'>{workflowName}</span>
|
||||
)}
|
||||
{trigger && (
|
||||
<Badge variant='secondary' className='text-xs'>
|
||||
{trigger}
|
||||
</Badge>
|
||||
)}
|
||||
<span className='font-mono text-muted-foreground text-xs'>
|
||||
{executionId.slice(0, 8)}...
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className='flex items-center gap-2'>
|
||||
<Button variant='ghost' size='sm' onClick={toggleFullscreen} className='h-8 w-8 p-0'>
|
||||
{isFullscreen ? <Minimize2 className='h-4 w-4' /> : <Maximize2 className='h-4 w-4' />}
|
||||
</Button>
|
||||
<Button variant='ghost' size='sm' onClick={onClose} className='h-8 w-8 p-0'>
|
||||
<X className='h-4 w-4' />
|
||||
</Button>
|
||||
</div>
|
||||
</DialogHeader>
|
||||
|
||||
{/* Canvas Container */}
|
||||
<div className='min-h-0 flex-1'>
|
||||
<FrozenCanvas
|
||||
executionId={executionId}
|
||||
traceSpans={traceSpans}
|
||||
height='100%'
|
||||
width='100%'
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Footer with instructions */}
|
||||
<div className='border-t bg-background px-6 py-3'>
|
||||
<div className='text-muted-foreground text-sm'>
|
||||
💡 Click on blocks to see their input and output data at execution time. This canvas
|
||||
shows the exact state of the workflow when this execution was captured.
|
||||
</div>
|
||||
</div>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,467 @@
|
||||
'use client'
|
||||
|
||||
import { useEffect, useState } from 'react'
|
||||
import {
|
||||
AlertCircle,
|
||||
ChevronLeft,
|
||||
ChevronRight,
|
||||
Clock,
|
||||
DollarSign,
|
||||
Hash,
|
||||
Loader2,
|
||||
X,
|
||||
Zap,
|
||||
} from 'lucide-react'
|
||||
import { Badge } from '@/components/ui/badge'
|
||||
import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { cn, redactApiKeys } from '@/lib/utils'
|
||||
import { WorkflowPreview } from '@/app/workspace/[workspaceId]/w/components/workflow-preview/workflow-preview'
|
||||
import type { WorkflowState } from '@/stores/workflows/workflow/types'
|
||||
|
||||
const logger = createLogger('FrozenCanvas')
|
||||
|
||||
function formatExecutionData(executionData: any) {
|
||||
const {
|
||||
inputData,
|
||||
outputData,
|
||||
cost,
|
||||
tokens,
|
||||
durationMs,
|
||||
status,
|
||||
blockName,
|
||||
blockType,
|
||||
errorMessage,
|
||||
errorStackTrace,
|
||||
} = executionData
|
||||
|
||||
return {
|
||||
blockName: blockName || 'Unknown Block',
|
||||
blockType: blockType || 'unknown',
|
||||
status,
|
||||
duration: durationMs ? `${durationMs}ms` : 'N/A',
|
||||
input: redactApiKeys(inputData || {}),
|
||||
output: redactApiKeys(outputData || {}),
|
||||
errorMessage,
|
||||
errorStackTrace,
|
||||
cost: cost
|
||||
? {
|
||||
input: cost.input || 0,
|
||||
output: cost.output || 0,
|
||||
total: cost.total || 0,
|
||||
}
|
||||
: null,
|
||||
tokens: tokens
|
||||
? {
|
||||
prompt: tokens.prompt || 0,
|
||||
completion: tokens.completion || 0,
|
||||
total: tokens.total || 0,
|
||||
}
|
||||
: null,
|
||||
}
|
||||
}
|
||||
|
||||
function getCurrentIterationData(blockExecutionData: any) {
|
||||
if (blockExecutionData.iterations && Array.isArray(blockExecutionData.iterations)) {
|
||||
const currentIndex = blockExecutionData.currentIteration ?? 0
|
||||
return {
|
||||
executionData: blockExecutionData.iterations[currentIndex],
|
||||
currentIteration: currentIndex,
|
||||
totalIterations: blockExecutionData.totalIterations ?? blockExecutionData.iterations.length,
|
||||
hasMultipleIterations: blockExecutionData.iterations.length > 1,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
executionData: blockExecutionData,
|
||||
currentIteration: 0,
|
||||
totalIterations: 1,
|
||||
hasMultipleIterations: false,
|
||||
}
|
||||
}
|
||||
|
||||
function PinnedLogs({ executionData, onClose }: { executionData: any; onClose: () => void }) {
|
||||
const [currentIterationIndex, setCurrentIterationIndex] = useState(0)
|
||||
|
||||
const iterationInfo = getCurrentIterationData({
|
||||
...executionData,
|
||||
currentIteration: currentIterationIndex,
|
||||
})
|
||||
|
||||
const formatted = formatExecutionData(iterationInfo.executionData)
|
||||
|
||||
const totalIterations = executionData.iterations?.length || 1
|
||||
|
||||
const goToPreviousIteration = () => {
|
||||
if (currentIterationIndex > 0) {
|
||||
setCurrentIterationIndex(currentIterationIndex - 1)
|
||||
}
|
||||
}
|
||||
|
||||
const goToNextIteration = () => {
|
||||
if (currentIterationIndex < totalIterations - 1) {
|
||||
setCurrentIterationIndex(currentIterationIndex + 1)
|
||||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
setCurrentIterationIndex(0)
|
||||
}, [executionData])
|
||||
|
||||
return (
|
||||
<Card className='fixed top-4 right-4 z-[100] max-h-[calc(100vh-8rem)] w-96 overflow-y-auto border-border bg-background shadow-lg'>
|
||||
<CardHeader className='pb-3'>
|
||||
<div className='flex items-center justify-between'>
|
||||
<CardTitle className='flex items-center gap-2 text-foreground text-lg'>
|
||||
<Zap className='h-5 w-5' />
|
||||
{formatted.blockName}
|
||||
</CardTitle>
|
||||
<button onClick={onClose} className='rounded-sm p-1 text-foreground hover:bg-muted'>
|
||||
<X className='h-4 w-4' />
|
||||
</button>
|
||||
</div>
|
||||
<div className='flex items-center justify-between'>
|
||||
<div className='flex items-center gap-2'>
|
||||
<Badge variant={formatted.status === 'success' ? 'default' : 'destructive'}>
|
||||
{formatted.blockType}
|
||||
</Badge>
|
||||
<Badge variant='outline'>{formatted.status}</Badge>
|
||||
</div>
|
||||
|
||||
{/* Iteration Navigation */}
|
||||
{iterationInfo.hasMultipleIterations && (
|
||||
<div className='flex items-center gap-1'>
|
||||
<button
|
||||
onClick={goToPreviousIteration}
|
||||
disabled={currentIterationIndex === 0}
|
||||
className='rounded p-1 text-muted-foreground hover:bg-muted hover:text-foreground disabled:cursor-not-allowed disabled:opacity-50'
|
||||
>
|
||||
<ChevronLeft className='h-4 w-4' />
|
||||
</button>
|
||||
<span className='px-2 text-muted-foreground text-xs'>
|
||||
{currentIterationIndex + 1} / {iterationInfo.totalIterations}
|
||||
</span>
|
||||
<button
|
||||
onClick={goToNextIteration}
|
||||
disabled={currentIterationIndex === totalIterations - 1}
|
||||
className='rounded p-1 text-muted-foreground hover:bg-muted hover:text-foreground disabled:cursor-not-allowed disabled:opacity-50'
|
||||
>
|
||||
<ChevronRight className='h-4 w-4' />
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</CardHeader>
|
||||
|
||||
<CardContent className='space-y-4'>
|
||||
<div className='grid grid-cols-2 gap-4'>
|
||||
<div className='flex items-center gap-2'>
|
||||
<Clock className='h-4 w-4 text-muted-foreground' />
|
||||
<span className='text-foreground text-sm'>{formatted.duration}</span>
|
||||
</div>
|
||||
|
||||
{formatted.cost && (
|
||||
<div className='flex items-center gap-2'>
|
||||
<DollarSign className='h-4 w-4 text-muted-foreground' />
|
||||
<span className='text-foreground text-sm'>${formatted.cost.total.toFixed(5)}</span>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{formatted.tokens && (
|
||||
<div className='flex items-center gap-2'>
|
||||
<Hash className='h-4 w-4 text-muted-foreground' />
|
||||
<span className='text-foreground text-sm'>{formatted.tokens.total} tokens</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<h4 className='mb-2 font-medium text-foreground text-sm'>Input</h4>
|
||||
<div className='max-h-32 overflow-y-auto rounded bg-muted p-3 font-mono text-xs'>
|
||||
<pre className='text-foreground'>{JSON.stringify(formatted.input, null, 2)}</pre>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<h4 className='mb-2 font-medium text-foreground text-sm'>Output</h4>
|
||||
<div className='max-h-32 overflow-y-auto rounded bg-muted p-3 font-mono text-xs'>
|
||||
<pre className='text-foreground'>{JSON.stringify(formatted.output, null, 2)}</pre>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{formatted.cost && (
|
||||
<div>
|
||||
<h4 className='mb-2 font-medium text-foreground text-sm'>Cost Breakdown</h4>
|
||||
<div className='space-y-1 text-sm'>
|
||||
<div className='flex justify-between text-foreground'>
|
||||
<span>Input:</span>
|
||||
<span>${formatted.cost.input.toFixed(5)}</span>
|
||||
</div>
|
||||
<div className='flex justify-between text-foreground'>
|
||||
<span>Output:</span>
|
||||
<span>${formatted.cost.output.toFixed(5)}</span>
|
||||
</div>
|
||||
<div className='flex justify-between border-border border-t pt-1 font-medium text-foreground'>
|
||||
<span>Total:</span>
|
||||
<span>${formatted.cost.total.toFixed(5)}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{formatted.tokens && (
|
||||
<div>
|
||||
<h4 className='mb-2 font-medium text-foreground text-sm'>Token Usage</h4>
|
||||
<div className='space-y-1 text-sm'>
|
||||
<div className='flex justify-between text-foreground'>
|
||||
<span>Prompt:</span>
|
||||
<span>{formatted.tokens.prompt}</span>
|
||||
</div>
|
||||
<div className='flex justify-between text-foreground'>
|
||||
<span>Completion:</span>
|
||||
<span>{formatted.tokens.completion}</span>
|
||||
</div>
|
||||
<div className='flex justify-between border-border border-t pt-1 font-medium text-foreground'>
|
||||
<span>Total:</span>
|
||||
<span>{formatted.tokens.total}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</CardContent>
|
||||
</Card>
|
||||
)
|
||||
}
|
||||
|
||||
interface FrozenCanvasData {
|
||||
executionId: string
|
||||
workflowId: string
|
||||
workflowState: WorkflowState
|
||||
executionMetadata: {
|
||||
trigger: string
|
||||
startedAt: string
|
||||
endedAt?: string
|
||||
totalDurationMs?: number
|
||||
blockStats: {
|
||||
total: number
|
||||
success: number
|
||||
error: number
|
||||
skipped: number
|
||||
}
|
||||
cost: {
|
||||
total: number | null
|
||||
input: number | null
|
||||
output: number | null
|
||||
}
|
||||
totalTokens: number | null
|
||||
}
|
||||
}
|
||||
|
||||
interface FrozenCanvasProps {
|
||||
executionId: string
|
||||
traceSpans?: any[]
|
||||
className?: string
|
||||
height?: string | number
|
||||
width?: string | number
|
||||
}
|
||||
|
||||
export function FrozenCanvas({
|
||||
executionId,
|
||||
traceSpans,
|
||||
className,
|
||||
height = '100%',
|
||||
width = '100%',
|
||||
}: FrozenCanvasProps) {
|
||||
const [data, setData] = useState<FrozenCanvasData | null>(null)
|
||||
const [blockExecutions, setBlockExecutions] = useState<Record<string, any>>({})
|
||||
const [loading, setLoading] = useState(true)
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
|
||||
const [pinnedBlockId, setPinnedBlockId] = useState<string | null>(null)
|
||||
|
||||
// Process traceSpans to create blockExecutions map
|
||||
useEffect(() => {
|
||||
if (traceSpans && Array.isArray(traceSpans)) {
|
||||
const blockExecutionMap: Record<string, any> = {}
|
||||
|
||||
const workflowSpan = traceSpans[0]
|
||||
if (workflowSpan?.children && Array.isArray(workflowSpan.children)) {
|
||||
const traceSpansByBlockId = workflowSpan.children.reduce((acc: any, span: any) => {
|
||||
if (span.blockId) {
|
||||
if (!acc[span.blockId]) {
|
||||
acc[span.blockId] = []
|
||||
}
|
||||
acc[span.blockId].push(span)
|
||||
}
|
||||
return acc
|
||||
}, {})
|
||||
|
||||
for (const [blockId, spans] of Object.entries(traceSpansByBlockId)) {
|
||||
const spanArray = spans as any[]
|
||||
|
||||
const iterations = spanArray.map((span: any) => {
|
||||
// Extract error information from span output if status is error
|
||||
let errorMessage = null
|
||||
let errorStackTrace = null
|
||||
|
||||
if (span.status === 'error' && span.output) {
|
||||
// Error information can be in different formats in the output
|
||||
if (typeof span.output === 'string') {
|
||||
errorMessage = span.output
|
||||
} else if (span.output.error) {
|
||||
errorMessage = span.output.error
|
||||
errorStackTrace = span.output.stackTrace || span.output.stack
|
||||
} else if (span.output.message) {
|
||||
errorMessage = span.output.message
|
||||
errorStackTrace = span.output.stackTrace || span.output.stack
|
||||
} else {
|
||||
// Fallback: stringify the entire output for error cases
|
||||
errorMessage = JSON.stringify(span.output)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
id: span.id,
|
||||
blockId: span.blockId,
|
||||
blockName: span.name,
|
||||
blockType: span.type,
|
||||
status: span.status,
|
||||
startedAt: span.startTime,
|
||||
endedAt: span.endTime,
|
||||
durationMs: span.duration,
|
||||
inputData: span.input,
|
||||
outputData: span.output,
|
||||
errorMessage,
|
||||
errorStackTrace,
|
||||
cost: span.cost || {
|
||||
input: null,
|
||||
output: null,
|
||||
total: null,
|
||||
},
|
||||
tokens: span.tokens || {
|
||||
prompt: null,
|
||||
completion: null,
|
||||
total: null,
|
||||
},
|
||||
modelUsed: span.model || null,
|
||||
metadata: {},
|
||||
}
|
||||
})
|
||||
|
||||
blockExecutionMap[blockId] = {
|
||||
iterations,
|
||||
currentIteration: 0,
|
||||
totalIterations: iterations.length,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setBlockExecutions(blockExecutionMap)
|
||||
}
|
||||
}, [traceSpans])
|
||||
|
||||
useEffect(() => {
|
||||
const fetchData = async () => {
|
||||
try {
|
||||
setLoading(true)
|
||||
setError(null)
|
||||
|
||||
const response = await fetch(`/api/logs/${executionId}/frozen-canvas`)
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch frozen canvas data: ${response.statusText}`)
|
||||
}
|
||||
|
||||
const result = await response.json()
|
||||
setData(result)
|
||||
logger.debug(`Loaded frozen canvas data for execution: ${executionId}`)
|
||||
} catch (err) {
|
||||
const errorMessage = err instanceof Error ? err.message : 'Unknown error'
|
||||
logger.error('Failed to fetch frozen canvas data:', err)
|
||||
setError(errorMessage)
|
||||
} finally {
|
||||
setLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
fetchData()
|
||||
}, [executionId])
|
||||
|
||||
// No need to create a temporary workflow - just use the workflowState directly
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<div className={cn('flex items-center justify-center', className)} style={{ height, width }}>
|
||||
<div className='flex items-center gap-2 text-muted-foreground'>
|
||||
<Loader2 className='h-5 w-5 animate-spin' />
|
||||
<span>Loading frozen canvas...</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return (
|
||||
<div className={cn('flex items-center justify-center', className)} style={{ height, width }}>
|
||||
<div className='flex items-center gap-2 text-destructive'>
|
||||
<AlertCircle className='h-5 w-5' />
|
||||
<span>Failed to load frozen canvas: {error}</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (!data) {
|
||||
return (
|
||||
<div className={cn('flex items-center justify-center', className)} style={{ height, width }}>
|
||||
<div className='text-muted-foreground'>No data available</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Check if this is a migrated log without real workflow state
|
||||
const isMigratedLog = (data.workflowState as any)?._migrated === true
|
||||
if (isMigratedLog) {
|
||||
return (
|
||||
<div
|
||||
className={cn('flex flex-col items-center justify-center gap-4 p-8', className)}
|
||||
style={{ height, width }}
|
||||
>
|
||||
<div className='flex items-center gap-3 text-amber-600 dark:text-amber-400'>
|
||||
<AlertCircle className='h-6 w-6' />
|
||||
<span className='font-medium text-lg'>Logged State Not Found</span>
|
||||
</div>
|
||||
<div className='max-w-md text-center text-muted-foreground text-sm'>
|
||||
This log was migrated from the old logging system. The workflow state at execution time is
|
||||
not available.
|
||||
</div>
|
||||
<div className='text-muted-foreground text-xs'>
|
||||
Note: {(data.workflowState as any)?._note}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<div style={{ height, width }} className={cn('frozen-canvas-mode h-full w-full', className)}>
|
||||
<WorkflowPreview
|
||||
workflowState={data.workflowState}
|
||||
showSubBlocks={true}
|
||||
isPannable={true}
|
||||
onNodeClick={(blockId) => {
|
||||
if (blockExecutions[blockId]) {
|
||||
setPinnedBlockId(blockId)
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{pinnedBlockId && blockExecutions[pinnedBlockId] && (
|
||||
<PinnedLogs
|
||||
executionData={blockExecutions[pinnedBlockId]}
|
||||
onClose={() => setPinnedBlockId(null)}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
export { FrozenCanvas } from './frozen-canvas'
|
||||
export { FrozenCanvasModal } from './frozen-canvas-modal'
|
||||
@@ -1,7 +1,7 @@
|
||||
'use client'
|
||||
|
||||
import { useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { ChevronDown, ChevronUp, X } from 'lucide-react'
|
||||
import { ChevronDown, ChevronUp, Eye, X } from 'lucide-react'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { CopyButton } from '@/components/ui/copy-button'
|
||||
import { ScrollArea } from '@/components/ui/scroll-area'
|
||||
@@ -10,6 +10,7 @@ import { redactApiKeys } from '@/lib/utils'
|
||||
import type { WorkflowLog } from '@/app/workspace/[workspaceId]/logs/stores/types'
|
||||
import { formatDate } from '@/app/workspace/[workspaceId]/logs/utils/format-date'
|
||||
import { formatCost } from '@/providers/utils'
|
||||
import { FrozenCanvasModal } from '../frozen-canvas/frozen-canvas-modal'
|
||||
import { ToolCallsDisplay } from '../tool-calls/tool-calls-display'
|
||||
import { TraceSpansDisplay } from '../trace-spans/trace-spans-display'
|
||||
import LogMarkdownRenderer from './components/markdown-renderer'
|
||||
@@ -153,7 +154,7 @@ const BlockContentDisplay = ({
|
||||
<>
|
||||
<CopyButton text={redactedOutput} className='z-10 h-7 w-7' />
|
||||
{isJson ? (
|
||||
<pre className='w-full overflow-visible whitespace-pre-wrap break-all text-sm'>
|
||||
<pre className='w-full overflow-y-auto overflow-x-hidden whitespace-pre-wrap break-all text-sm'>
|
||||
{redactedOutput}
|
||||
</pre>
|
||||
) : (
|
||||
@@ -166,7 +167,7 @@ const BlockContentDisplay = ({
|
||||
text={JSON.stringify(redactedBlockInput, null, 2)}
|
||||
className='z-10 h-7 w-7'
|
||||
/>
|
||||
<pre className='w-full overflow-visible whitespace-pre-wrap break-all text-sm'>
|
||||
<pre className='w-full overflow-y-auto overflow-x-hidden whitespace-pre-wrap break-all text-sm'>
|
||||
{JSON.stringify(redactedBlockInput, null, 2)}
|
||||
</pre>
|
||||
</>
|
||||
@@ -193,6 +194,8 @@ export function Sidebar({
|
||||
const [isDragging, setIsDragging] = useState(false)
|
||||
const [_currentLogId, setCurrentLogId] = useState<string | null>(null)
|
||||
const [isTraceExpanded, setIsTraceExpanded] = useState(false)
|
||||
const [isModelsExpanded, setIsModelsExpanded] = useState(false)
|
||||
const [isFrozenCanvasOpen, setIsFrozenCanvasOpen] = useState(false)
|
||||
const scrollAreaRef = useRef<HTMLDivElement>(null)
|
||||
|
||||
// Update currentLogId when log changes
|
||||
@@ -238,22 +241,26 @@ export function Sidebar({
|
||||
// Determine if this is a workflow execution log
|
||||
const isWorkflowExecutionLog = useMemo(() => {
|
||||
if (!log) return false
|
||||
// Check if message contains "workflow executed" or similar phrases
|
||||
// Check if message contains workflow execution phrases (success or failure)
|
||||
return (
|
||||
log.message.toLowerCase().includes('workflow executed') ||
|
||||
log.message.toLowerCase().includes('execution completed') ||
|
||||
(log.trigger === 'manual' && log.duration)
|
||||
log.message.toLowerCase().includes('workflow execution failed') ||
|
||||
log.message.toLowerCase().includes('execution failed') ||
|
||||
(log.trigger === 'manual' && log.duration) ||
|
||||
// Also check if we have enhanced logging metadata with trace spans
|
||||
(log.metadata?.enhanced && log.metadata?.traceSpans)
|
||||
)
|
||||
}, [log])
|
||||
|
||||
// Helper to determine if we have trace spans to display
|
||||
const _hasTraceSpans = useMemo(() => {
|
||||
return !!(log?.metadata?.traceSpans && log.metadata.traceSpans.length > 0)
|
||||
}, [log])
|
||||
|
||||
// Helper to determine if we have cost information to display
|
||||
const hasCostInfo = useMemo(() => {
|
||||
return !!(log?.metadata?.cost && (log.metadata.cost.input || log.metadata.cost.output))
|
||||
return !!(
|
||||
log?.metadata?.cost &&
|
||||
((log.metadata.cost.input && log.metadata.cost.input > 0) ||
|
||||
(log.metadata.cost.output && log.metadata.cost.output > 0) ||
|
||||
(log.metadata.cost.total && log.metadata.cost.total > 0))
|
||||
)
|
||||
}, [log])
|
||||
|
||||
const isWorkflowWithCost = useMemo(() => {
|
||||
@@ -487,6 +494,103 @@ export function Sidebar({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Enhanced Stats - only show for enhanced logs */}
|
||||
{log.metadata?.enhanced && log.metadata?.blockStats && (
|
||||
<div>
|
||||
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>
|
||||
Block Execution Stats
|
||||
</h3>
|
||||
<div className='space-y-1 text-sm'>
|
||||
<div className='flex justify-between'>
|
||||
<span>Total Blocks:</span>
|
||||
<span className='font-medium'>{log.metadata.blockStats.total}</span>
|
||||
</div>
|
||||
<div className='flex justify-between'>
|
||||
<span>Successful:</span>
|
||||
<span className='font-medium text-green-600'>
|
||||
{log.metadata.blockStats.success}
|
||||
</span>
|
||||
</div>
|
||||
{log.metadata.blockStats.error > 0 && (
|
||||
<div className='flex justify-between'>
|
||||
<span>Failed:</span>
|
||||
<span className='font-medium text-red-600'>
|
||||
{log.metadata.blockStats.error}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
{log.metadata.blockStats.skipped > 0 && (
|
||||
<div className='flex justify-between'>
|
||||
<span>Skipped:</span>
|
||||
<span className='font-medium text-yellow-600'>
|
||||
{log.metadata.blockStats.skipped}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Enhanced Cost - only show for enhanced logs with actual cost data */}
|
||||
{log.metadata?.enhanced && hasCostInfo && (
|
||||
<div>
|
||||
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>Cost Breakdown</h3>
|
||||
<div className='space-y-1 text-sm'>
|
||||
{(log.metadata?.cost?.total ?? 0) > 0 && (
|
||||
<div className='flex justify-between'>
|
||||
<span>Total Cost:</span>
|
||||
<span className='font-medium'>
|
||||
${log.metadata?.cost?.total?.toFixed(4)}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
{(log.metadata?.cost?.input ?? 0) > 0 && (
|
||||
<div className='flex justify-between'>
|
||||
<span>Input Cost:</span>
|
||||
<span className='text-muted-foreground'>
|
||||
${log.metadata?.cost?.input?.toFixed(4)}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
{(log.metadata?.cost?.output ?? 0) > 0 && (
|
||||
<div className='flex justify-between'>
|
||||
<span>Output Cost:</span>
|
||||
<span className='text-muted-foreground'>
|
||||
${log.metadata?.cost?.output?.toFixed(4)}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
{(log.metadata?.cost?.tokens?.total ?? 0) > 0 && (
|
||||
<div className='flex justify-between'>
|
||||
<span>Total Tokens:</span>
|
||||
<span className='text-muted-foreground'>
|
||||
{log.metadata?.cost?.tokens?.total?.toLocaleString()}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Frozen Canvas Button - only show for workflow execution logs with execution ID */}
|
||||
{isWorkflowExecutionLog && log.executionId && (
|
||||
<div>
|
||||
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>Workflow State</h3>
|
||||
<Button
|
||||
variant='outline'
|
||||
size='sm'
|
||||
onClick={() => setIsFrozenCanvasOpen(true)}
|
||||
className='w-full justify-start gap-2'
|
||||
>
|
||||
<Eye className='h-4 w-4' />
|
||||
View Frozen Canvas
|
||||
</Button>
|
||||
<p className='mt-1 text-muted-foreground text-xs'>
|
||||
See the exact workflow state and block inputs/outputs at execution time
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Message Content */}
|
||||
<div className='w-full pb-2'>
|
||||
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>Message</h3>
|
||||
@@ -517,42 +621,94 @@ export function Sidebar({
|
||||
)}
|
||||
|
||||
{/* Cost Information (moved to bottom) */}
|
||||
{hasCostInfo && log.metadata?.cost && (
|
||||
{hasCostInfo && (
|
||||
<div>
|
||||
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>
|
||||
{isWorkflowWithCost ? 'Total Model Cost' : 'Model Cost'}
|
||||
</h3>
|
||||
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>Models</h3>
|
||||
<div className='overflow-hidden rounded-md border'>
|
||||
<div className='space-y-2 p-3'>
|
||||
{log.metadata.cost.model && (
|
||||
<div className='flex items-center justify-between'>
|
||||
<span className='text-muted-foreground text-sm'>Model:</span>
|
||||
<span className='text-sm'>{log.metadata.cost.model}</span>
|
||||
</div>
|
||||
)}
|
||||
<div className='flex items-center justify-between'>
|
||||
<span className='text-muted-foreground text-sm'>Input:</span>
|
||||
<span className='text-sm'>{formatCost(log.metadata.cost.input || 0)}</span>
|
||||
<span className='text-sm'>
|
||||
{formatCost(log.metadata?.cost?.input || 0)}
|
||||
</span>
|
||||
</div>
|
||||
<div className='flex items-center justify-between'>
|
||||
<span className='text-muted-foreground text-sm'>Output:</span>
|
||||
<span className='text-sm'>{formatCost(log.metadata.cost.output || 0)}</span>
|
||||
<span className='text-sm'>
|
||||
{formatCost(log.metadata?.cost?.output || 0)}
|
||||
</span>
|
||||
</div>
|
||||
<div className='mt-1 flex items-center justify-between border-t pt-2'>
|
||||
<span className='text-muted-foreground text-sm'>Total:</span>
|
||||
<span className='text-foreground text-sm'>
|
||||
{formatCost(log.metadata.cost.total || 0)}
|
||||
{formatCost(log.metadata?.cost?.total || 0)}
|
||||
</span>
|
||||
</div>
|
||||
<div className='flex items-center justify-between'>
|
||||
<span className='text-muted-foreground text-xs'>Tokens:</span>
|
||||
<span className='text-muted-foreground text-xs'>
|
||||
{log.metadata.cost.tokens?.prompt || 0} in /{' '}
|
||||
{log.metadata.cost.tokens?.completion || 0} out
|
||||
{log.metadata?.cost?.tokens?.prompt || 0} in /{' '}
|
||||
{log.metadata?.cost?.tokens?.completion || 0} out
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Models Breakdown */}
|
||||
{log.metadata?.cost?.models &&
|
||||
Object.keys(log.metadata?.cost?.models).length > 0 && (
|
||||
<div className='border-t'>
|
||||
<button
|
||||
onClick={() => setIsModelsExpanded(!isModelsExpanded)}
|
||||
className='flex w-full items-center justify-between p-3 text-left transition-colors hover:bg-muted/50'
|
||||
>
|
||||
<span className='font-medium text-muted-foreground text-xs'>
|
||||
Model Breakdown (
|
||||
{Object.keys(log.metadata?.cost?.models || {}).length})
|
||||
</span>
|
||||
{isModelsExpanded ? (
|
||||
<ChevronUp className='h-3 w-3 text-muted-foreground' />
|
||||
) : (
|
||||
<ChevronDown className='h-3 w-3 text-muted-foreground' />
|
||||
)}
|
||||
</button>
|
||||
|
||||
{isModelsExpanded && (
|
||||
<div className='space-y-3 border-t bg-muted/30 p-3'>
|
||||
{Object.entries(log.metadata?.cost?.models || {}).map(
|
||||
([model, cost]: [string, any]) => (
|
||||
<div key={model} className='space-y-1'>
|
||||
<div className='font-medium font-mono text-xs'>{model}</div>
|
||||
<div className='space-y-1 text-xs'>
|
||||
<div className='flex justify-between'>
|
||||
<span className='text-muted-foreground'>Input:</span>
|
||||
<span>{formatCost(cost.input || 0)}</span>
|
||||
</div>
|
||||
<div className='flex justify-between'>
|
||||
<span className='text-muted-foreground'>Output:</span>
|
||||
<span>{formatCost(cost.output || 0)}</span>
|
||||
</div>
|
||||
<div className='flex justify-between border-t pt-1'>
|
||||
<span className='text-muted-foreground'>Total:</span>
|
||||
<span className='font-medium'>
|
||||
{formatCost(cost.total || 0)}
|
||||
</span>
|
||||
</div>
|
||||
<div className='flex justify-between'>
|
||||
<span className='text-muted-foreground'>Tokens:</span>
|
||||
<span>
|
||||
{cost.tokens?.prompt || 0} in /{' '}
|
||||
{cost.tokens?.completion || 0} out
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{isWorkflowWithCost && (
|
||||
<div className='border-t bg-muted p-3 text-muted-foreground text-xs'>
|
||||
<p>
|
||||
@@ -568,6 +724,18 @@ export function Sidebar({
|
||||
</ScrollArea>
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* Frozen Canvas Modal */}
|
||||
{log?.executionId && (
|
||||
<FrozenCanvasModal
|
||||
executionId={log.executionId}
|
||||
workflowName={log.workflow?.name}
|
||||
trigger={log.trigger || undefined}
|
||||
traceSpans={log.metadata?.traceSpans}
|
||||
isOpen={isFrozenCanvasOpen}
|
||||
onClose={() => setIsFrozenCanvasOpen(false)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ function ToolCallItem({ toolCall, index }: ToolCallItemProps) {
|
||||
{toolCall.input && (
|
||||
<div>
|
||||
<div className='mb-1 text-muted-foreground'>Input</div>
|
||||
<pre className='group relative max-h-32 overflow-auto rounded bg-background p-2'>
|
||||
<pre className='group relative max-h-32 overflow-y-auto overflow-x-hidden whitespace-pre-wrap break-all rounded bg-background p-2'>
|
||||
<CopyButton text={JSON.stringify(toolCall.input, null, 2)} />
|
||||
<code>{JSON.stringify(toolCall.input, null, 2)}</code>
|
||||
</pre>
|
||||
@@ -122,7 +122,7 @@ function ToolCallItem({ toolCall, index }: ToolCallItemProps) {
|
||||
{toolCall.status === 'success' && toolCall.output && (
|
||||
<div>
|
||||
<div className='mb-1 text-muted-foreground'>Output</div>
|
||||
<pre className='group relative max-h-32 overflow-auto rounded bg-background p-2'>
|
||||
<pre className='group relative max-h-32 overflow-y-auto overflow-x-hidden whitespace-pre-wrap break-all rounded bg-background p-2'>
|
||||
<CopyButton text={JSON.stringify(toolCall.output, null, 2)} />
|
||||
<code>{JSON.stringify(toolCall.output, null, 2)}</code>
|
||||
</pre>
|
||||
@@ -132,7 +132,7 @@ function ToolCallItem({ toolCall, index }: ToolCallItemProps) {
|
||||
{toolCall.status === 'error' && toolCall.error && (
|
||||
<div>
|
||||
<div className='mb-1 text-destructive'>Error</div>
|
||||
<pre className='group relative max-h-32 overflow-auto rounded bg-destructive/10 p-2 text-destructive'>
|
||||
<pre className='group relative max-h-32 overflow-y-auto overflow-x-hidden whitespace-pre-wrap break-all rounded bg-destructive/10 p-2 text-destructive'>
|
||||
<CopyButton text={toolCall.error} />
|
||||
<code>{toolCall.error}</code>
|
||||
</pre>
|
||||
|
||||
@@ -27,6 +27,174 @@ interface TraceSpansDisplayProps {
|
||||
onExpansionChange?: (expanded: boolean) => void
|
||||
}
|
||||
|
||||
// Transform raw block data into clean, user-friendly format
|
||||
function transformBlockData(data: any, blockType: string, isInput: boolean) {
|
||||
if (!data) return null
|
||||
|
||||
// For input data, filter out sensitive information
|
||||
if (isInput) {
|
||||
const cleanInput = { ...data }
|
||||
|
||||
// Remove sensitive fields
|
||||
if (cleanInput.apiKey) {
|
||||
cleanInput.apiKey = '***'
|
||||
}
|
||||
if (cleanInput.azureApiKey) {
|
||||
cleanInput.azureApiKey = '***'
|
||||
}
|
||||
|
||||
// Remove null/undefined values for cleaner display
|
||||
Object.keys(cleanInput).forEach((key) => {
|
||||
if (cleanInput[key] === null || cleanInput[key] === undefined) {
|
||||
delete cleanInput[key]
|
||||
}
|
||||
})
|
||||
|
||||
return cleanInput
|
||||
}
|
||||
|
||||
// For output data, extract meaningful information based on block type
|
||||
if (data.response) {
|
||||
const response = data.response
|
||||
|
||||
switch (blockType) {
|
||||
case 'agent':
|
||||
return {
|
||||
content: response.content,
|
||||
model: data.model,
|
||||
tokens: data.tokens,
|
||||
toolCalls: response.toolCalls,
|
||||
...(data.cost && { cost: data.cost }),
|
||||
}
|
||||
|
||||
case 'function':
|
||||
return {
|
||||
result: response.result,
|
||||
stdout: response.stdout,
|
||||
...(response.executionTime && { executionTime: `${response.executionTime}ms` }),
|
||||
}
|
||||
|
||||
case 'api':
|
||||
return {
|
||||
data: response.data,
|
||||
status: response.status,
|
||||
headers: response.headers,
|
||||
}
|
||||
|
||||
default:
|
||||
// For other block types, show the response content
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// Component to display block input/output data in a clean, readable format
|
||||
function BlockDataDisplay({
|
||||
data,
|
||||
blockType,
|
||||
isInput = false,
|
||||
isError = false,
|
||||
}: {
|
||||
data: any
|
||||
blockType?: string
|
||||
isInput?: boolean
|
||||
isError?: boolean
|
||||
}) {
|
||||
if (!data) return null
|
||||
|
||||
// Handle different data types
|
||||
const renderValue = (value: any, key?: string): React.ReactNode => {
|
||||
if (value === null) return <span className='text-muted-foreground italic'>null</span>
|
||||
if (value === undefined) return <span className='text-muted-foreground italic'>undefined</span>
|
||||
|
||||
if (typeof value === 'string') {
|
||||
return <span className='break-all text-green-700 dark:text-green-400'>"{value}"</span>
|
||||
}
|
||||
|
||||
if (typeof value === 'number') {
|
||||
return <span className='text-blue-700 dark:text-blue-400'>{value}</span>
|
||||
}
|
||||
|
||||
if (typeof value === 'boolean') {
|
||||
return <span className='text-purple-700 dark:text-purple-400'>{value.toString()}</span>
|
||||
}
|
||||
|
||||
if (Array.isArray(value)) {
|
||||
if (value.length === 0) return <span className='text-muted-foreground'>[]</span>
|
||||
return (
|
||||
<div className='space-y-1'>
|
||||
<span className='text-muted-foreground'>[</span>
|
||||
<div className='ml-4 space-y-1'>
|
||||
{value.map((item, index) => (
|
||||
<div key={index} className='flex min-w-0 gap-2'>
|
||||
<span className='flex-shrink-0 text-muted-foreground text-xs'>{index}:</span>
|
||||
<div className='min-w-0 flex-1 overflow-hidden'>{renderValue(item)}</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
<span className='text-muted-foreground'>]</span>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (typeof value === 'object') {
|
||||
const entries = Object.entries(value)
|
||||
if (entries.length === 0) return <span className='text-muted-foreground'>{'{}'}</span>
|
||||
|
||||
return (
|
||||
<div className='space-y-1'>
|
||||
{entries.map(([objKey, objValue]) => (
|
||||
<div key={objKey} className='flex min-w-0 gap-2'>
|
||||
<span className='flex-shrink-0 font-medium text-orange-700 dark:text-orange-400'>
|
||||
{objKey}:
|
||||
</span>
|
||||
<div className='min-w-0 flex-1 overflow-hidden'>{renderValue(objValue, objKey)}</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return <span>{String(value)}</span>
|
||||
}
|
||||
|
||||
// Transform the data for better display
|
||||
const transformedData = transformBlockData(data, blockType || 'unknown', isInput)
|
||||
|
||||
// Special handling for error output
|
||||
if (isError && data.error) {
|
||||
return (
|
||||
<div className='space-y-2 text-xs'>
|
||||
<div className='rounded border border-red-200 bg-red-50 p-2 dark:border-red-800 dark:bg-red-950/20'>
|
||||
<div className='mb-1 font-medium text-red-800 dark:text-red-400'>Error</div>
|
||||
<div className='text-red-700 dark:text-red-300'>{data.error}</div>
|
||||
</div>
|
||||
{/* Show other output data if available */}
|
||||
{transformedData &&
|
||||
Object.keys(transformedData).filter((key) => key !== 'error' && key !== 'success')
|
||||
.length > 0 && (
|
||||
<div className='space-y-1'>
|
||||
{Object.entries(transformedData)
|
||||
.filter(([key]) => key !== 'error' && key !== 'success')
|
||||
.map(([key, value]) => (
|
||||
<div key={key} className='flex gap-2'>
|
||||
<span className='font-medium text-orange-700 dark:text-orange-400'>{key}:</span>
|
||||
{renderValue(value, key)}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className='space-y-1 overflow-hidden text-xs'>{renderValue(transformedData || data)}</div>
|
||||
)
|
||||
}
|
||||
|
||||
export function TraceSpansDisplay({
|
||||
traceSpans,
|
||||
totalDuration = 0,
|
||||
@@ -35,6 +203,30 @@ export function TraceSpansDisplay({
|
||||
// Keep track of expanded spans
|
||||
const [expandedSpans, setExpandedSpans] = useState<Set<string>>(new Set())
|
||||
|
||||
// Function to collect all span IDs recursively (for expand all functionality)
|
||||
const collectAllSpanIds = (spans: TraceSpan[]): string[] => {
|
||||
const ids: string[] = []
|
||||
|
||||
const collectIds = (span: TraceSpan) => {
|
||||
const spanId = span.id || `span-${span.name}-${span.startTime}`
|
||||
ids.push(spanId)
|
||||
|
||||
// Process children
|
||||
if (span.children && span.children.length > 0) {
|
||||
span.children.forEach(collectIds)
|
||||
}
|
||||
}
|
||||
|
||||
spans.forEach(collectIds)
|
||||
return ids
|
||||
}
|
||||
|
||||
const allSpanIds = useMemo(() => {
|
||||
if (!traceSpans || traceSpans.length === 0) return []
|
||||
return collectAllSpanIds(traceSpans)
|
||||
}, [traceSpans])
|
||||
|
||||
// Early return after all hooks
|
||||
if (!traceSpans || traceSpans.length === 0) {
|
||||
return <div className='text-muted-foreground text-sm'>No trace data available</div>
|
||||
}
|
||||
@@ -61,26 +253,6 @@ export function TraceSpansDisplay({
|
||||
// This ensures parallel spans are represented correctly in the timeline
|
||||
const actualTotalDuration = workflowEndTime - workflowStartTime
|
||||
|
||||
// Function to collect all span IDs recursively (for expand all functionality)
|
||||
const collectAllSpanIds = (spans: TraceSpan[]): string[] => {
|
||||
const ids: string[] = []
|
||||
|
||||
const collectIds = (span: TraceSpan) => {
|
||||
const spanId = span.id || `span-${span.name}-${span.startTime}`
|
||||
ids.push(spanId)
|
||||
|
||||
// Process children
|
||||
if (span.children && span.children.length > 0) {
|
||||
span.children.forEach(collectIds)
|
||||
}
|
||||
}
|
||||
|
||||
spans.forEach(collectIds)
|
||||
return ids
|
||||
}
|
||||
|
||||
const allSpanIds = useMemo(() => collectAllSpanIds(traceSpans), [traceSpans])
|
||||
|
||||
// Handle span toggling
|
||||
const handleSpanToggle = (spanId: string, expanded: boolean, hasSubItems: boolean) => {
|
||||
const newExpandedSpans = new Set(expandedSpans)
|
||||
@@ -140,11 +312,14 @@ export function TraceSpansDisplay({
|
||||
)}
|
||||
</button>
|
||||
</div>
|
||||
<div className='overflow-hidden rounded-md border shadow-sm'>
|
||||
<div className='w-full overflow-hidden rounded-md border shadow-sm'>
|
||||
{traceSpans.map((span, index) => {
|
||||
const hasSubItems =
|
||||
const hasSubItems = Boolean(
|
||||
(span.children && span.children.length > 0) ||
|
||||
(span.toolCalls && span.toolCalls.length > 0)
|
||||
(span.toolCalls && span.toolCalls.length > 0) ||
|
||||
span.input ||
|
||||
span.output
|
||||
)
|
||||
return (
|
||||
<TraceSpanItem
|
||||
key={index}
|
||||
@@ -430,6 +605,43 @@ function TraceSpanItem({
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Children and tool calls */}
|
||||
{expanded && (
|
||||
<div>
|
||||
{/* Block Input/Output Data */}
|
||||
{(span.input || span.output) && (
|
||||
<div className='mt-2 ml-8 space-y-3 overflow-hidden'>
|
||||
{/* Input Data */}
|
||||
{span.input && (
|
||||
<div>
|
||||
<h4 className='mb-2 font-medium text-muted-foreground text-xs'>Input</h4>
|
||||
<div className='overflow-hidden rounded-md bg-secondary/30 p-3'>
|
||||
<BlockDataDisplay data={span.input} blockType={span.type} isInput={true} />
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Output Data */}
|
||||
{span.output && (
|
||||
<div>
|
||||
<h4 className='mb-2 font-medium text-muted-foreground text-xs'>
|
||||
{span.status === 'error' ? 'Error Details' : 'Output'}
|
||||
</h4>
|
||||
<div className='overflow-hidden rounded-md bg-secondary/30 p-3'>
|
||||
<BlockDataDisplay
|
||||
data={span.output}
|
||||
blockType={span.type}
|
||||
isInput={false}
|
||||
isError={span.status === 'error'}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Children and tool calls */}
|
||||
{expanded && (
|
||||
<div>
|
||||
@@ -437,9 +649,12 @@ function TraceSpanItem({
|
||||
{hasChildren && (
|
||||
<div>
|
||||
{span.children?.map((childSpan, index) => {
|
||||
const childHasSubItems =
|
||||
const childHasSubItems = Boolean(
|
||||
(childSpan.children && childSpan.children.length > 0) ||
|
||||
(childSpan.toolCalls && childSpan.toolCalls.length > 0)
|
||||
(childSpan.toolCalls && childSpan.toolCalls.length > 0) ||
|
||||
childSpan.input ||
|
||||
childSpan.output
|
||||
)
|
||||
|
||||
return (
|
||||
<TraceSpanItem
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use client'
|
||||
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
import { AlertCircle, Info, Loader2 } from 'lucide-react'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { useSidebarStore } from '@/stores/sidebar/store'
|
||||
@@ -14,34 +14,6 @@ import { formatDate } from './utils/format-date'
|
||||
const logger = createLogger('Logs')
|
||||
const LOGS_PER_PAGE = 50
|
||||
|
||||
const getLevelBadgeStyles = (level: string) => {
|
||||
switch (level.toLowerCase()) {
|
||||
case 'error':
|
||||
return 'bg-destructive/20 text-destructive error-badge'
|
||||
case 'warn':
|
||||
return 'bg-warning/20 text-warning'
|
||||
default:
|
||||
return 'bg-secondary text-secondary-foreground'
|
||||
}
|
||||
}
|
||||
|
||||
const getTriggerBadgeStyles = (trigger: string) => {
|
||||
switch (trigger.toLowerCase()) {
|
||||
case 'manual':
|
||||
return 'bg-secondary text-secondary-foreground'
|
||||
case 'api':
|
||||
return 'bg-blue-100 dark:bg-blue-950/40 text-blue-700 dark:text-blue-400'
|
||||
case 'webhook':
|
||||
return 'bg-orange-100 dark:bg-orange-950/40 text-orange-700 dark:text-orange-400'
|
||||
case 'schedule':
|
||||
return 'bg-green-100 dark:bg-green-950/40 text-green-700 dark:text-green-400'
|
||||
case 'chat':
|
||||
return 'bg-purple-100 dark:bg-purple-950/40 text-purple-700 dark:text-purple-400'
|
||||
default:
|
||||
return 'bg-gray-100 dark:bg-gray-800 text-gray-700 dark:text-gray-400'
|
||||
}
|
||||
}
|
||||
|
||||
const selectedRowAnimation = `
|
||||
@keyframes borderPulse {
|
||||
0% { border-left-color: hsl(var(--primary) / 0.3) }
|
||||
@@ -87,28 +59,6 @@ export default function Logs() {
|
||||
const isSidebarCollapsed =
|
||||
mode === 'expanded' ? !isExpanded : mode === 'collapsed' || mode === 'hover'
|
||||
|
||||
const executionGroups = useMemo(() => {
|
||||
const groups: Record<string, WorkflowLog[]> = {}
|
||||
|
||||
// Group logs by executionId
|
||||
logs.forEach((log) => {
|
||||
if (log.executionId) {
|
||||
if (!groups[log.executionId]) {
|
||||
groups[log.executionId] = []
|
||||
}
|
||||
groups[log.executionId].push(log)
|
||||
}
|
||||
})
|
||||
|
||||
Object.keys(groups).forEach((executionId) => {
|
||||
groups[executionId].sort(
|
||||
(a, b) => new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime()
|
||||
)
|
||||
})
|
||||
|
||||
return groups
|
||||
}, [logs])
|
||||
|
||||
const handleLogClick = (log: WorkflowLog) => {
|
||||
setSelectedLog(log)
|
||||
const index = logs.findIndex((l) => l.id === log.id)
|
||||
@@ -134,6 +84,8 @@ export default function Logs() {
|
||||
|
||||
const handleCloseSidebar = () => {
|
||||
setIsSidebarOpen(false)
|
||||
setSelectedLog(null)
|
||||
setSelectedLogIndex(-1)
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
@@ -155,7 +107,7 @@ export default function Logs() {
|
||||
}
|
||||
|
||||
const queryParams = buildQueryParams(pageNum, LOGS_PER_PAGE)
|
||||
const response = await fetch(`/api/logs?${queryParams}`)
|
||||
const response = await fetch(`/api/logs/enhanced?${queryParams}`)
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Error fetching logs: ${response.statusText}`)
|
||||
@@ -203,7 +155,7 @@ export default function Logs() {
|
||||
try {
|
||||
setLoading(true)
|
||||
const queryParams = buildQueryParams(1, LOGS_PER_PAGE)
|
||||
const response = await fetch(`/api/logs?${queryParams}`)
|
||||
const response = await fetch(`/api/logs/enhanced?${queryParams}`)
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Error fetching logs: ${response.statusText}`)
|
||||
@@ -353,46 +305,16 @@ export default function Logs() {
|
||||
<div className='flex flex-1 flex-col overflow-hidden'>
|
||||
{/* Table container */}
|
||||
<div className='flex flex-1 flex-col overflow-hidden'>
|
||||
{/* Table header - fixed */}
|
||||
<div className='sticky top-0 z-10 border-b bg-background'>
|
||||
<table className='w-full table-fixed'>
|
||||
<colgroup>
|
||||
<col className={`${isSidebarCollapsed ? 'w-[16%]' : 'w-[19%]'}`} />
|
||||
<col className='w-[8%] md:w-[7%]' />
|
||||
<col className='w-[12%] md:w-[10%]' />
|
||||
<col className='hidden w-[8%] lg:table-column' />
|
||||
<col className='hidden w-[8%] lg:table-column' />
|
||||
<col
|
||||
className={`${isSidebarCollapsed ? 'w-auto md:w-[53%] lg:w-auto' : 'w-auto md:w-[50%] lg:w-auto'}`}
|
||||
/>
|
||||
<col className='w-[8%] md:w-[10%]' />
|
||||
</colgroup>
|
||||
<thead>
|
||||
<tr>
|
||||
<th className='px-4 pt-2 pb-3 text-left font-medium'>
|
||||
<span className='text-muted-foreground text-xs leading-none'>Time</span>
|
||||
</th>
|
||||
<th className='px-4 pt-2 pb-3 text-left font-medium'>
|
||||
<span className='text-muted-foreground text-xs leading-none'>Status</span>
|
||||
</th>
|
||||
<th className='px-4 pt-2 pb-3 text-left font-medium'>
|
||||
<span className='text-muted-foreground text-xs leading-none'>Workflow</span>
|
||||
</th>
|
||||
<th className='hidden px-4 pt-2 pb-3 text-left font-medium lg:table-cell'>
|
||||
<span className='text-muted-foreground text-xs leading-none'>id</span>
|
||||
</th>
|
||||
<th className='hidden px-4 pt-2 pb-3 text-left font-medium lg:table-cell'>
|
||||
<span className='text-muted-foreground text-xs leading-none'>Trigger</span>
|
||||
</th>
|
||||
<th className='px-4 pt-2 pb-3 text-left font-medium'>
|
||||
<span className='text-muted-foreground text-xs leading-none'>Message</span>
|
||||
</th>
|
||||
<th className='px-4 pt-2 pb-3 text-left font-medium'>
|
||||
<span className='text-muted-foreground text-xs leading-none'>Duration</span>
|
||||
</th>
|
||||
</tr>
|
||||
</thead>
|
||||
</table>
|
||||
{/* Simple header */}
|
||||
<div className='border-border/50 border-b px-4 py-3'>
|
||||
<div className='flex items-center gap-4 font-medium text-muted-foreground text-xs'>
|
||||
<div className='w-32'>Time</div>
|
||||
<div className='w-20'>Status</div>
|
||||
<div className='flex-1'>Workflow</div>
|
||||
<div className='hidden w-24 lg:block'>Trigger</div>
|
||||
<div className='hidden w-20 xl:block'>Cost</div>
|
||||
<div className='w-20'>Duration</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Table body - scrollable */}
|
||||
@@ -419,163 +341,106 @@ export default function Logs() {
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<table className='w-full table-fixed'>
|
||||
<colgroup>
|
||||
<col className={`${isSidebarCollapsed ? 'w-[16%]' : 'w-[19%]'}`} />
|
||||
<col className='w-[8%] md:w-[7%]' />
|
||||
<col className='w-[12%] md:w-[10%]' />
|
||||
<col className='hidden w-[8%] lg:table-column' />
|
||||
<col className='hidden w-[8%] lg:table-column' />
|
||||
<col
|
||||
className={`${isSidebarCollapsed ? 'w-auto md:w-[53%] lg:w-auto' : 'w-auto md:w-[50%] lg:w-auto'}`}
|
||||
/>
|
||||
<col className='w-[8%] md:w-[10%]' />
|
||||
</colgroup>
|
||||
<tbody>
|
||||
{logs.map((log) => {
|
||||
const formattedDate = formatDate(log.createdAt)
|
||||
const isSelected = selectedLog?.id === log.id
|
||||
const _isWorkflowExecutionLog =
|
||||
log.executionId && executionGroups[log.executionId].length === 1
|
||||
<div className='space-y-1 p-4'>
|
||||
{logs.map((log) => {
|
||||
const formattedDate = formatDate(log.createdAt)
|
||||
const isSelected = selectedLog?.id === log.id
|
||||
|
||||
return (
|
||||
<tr
|
||||
key={log.id}
|
||||
ref={isSelected ? selectedRowRef : null}
|
||||
className={`cursor-pointer border-b transition-colors ${
|
||||
isSelected
|
||||
? 'selected-row border-l-2 bg-accent/40 hover:bg-accent/50'
|
||||
: 'hover:bg-accent/30'
|
||||
}`}
|
||||
onClick={() => handleLogClick(log)}
|
||||
>
|
||||
{/* Time column */}
|
||||
<td className='px-4 py-3'>
|
||||
<div className='flex flex-col justify-center'>
|
||||
<div className='flex items-center font-medium text-xs'>
|
||||
<span>{formattedDate.formatted}</span>
|
||||
<span className='mx-1.5 hidden text-muted-foreground xl:inline'>
|
||||
•
|
||||
</span>
|
||||
<span className='hidden text-muted-foreground xl:inline'>
|
||||
{new Date(log.createdAt).toLocaleDateString('en-US', {
|
||||
month: 'short',
|
||||
day: 'numeric',
|
||||
year: 'numeric',
|
||||
})}
|
||||
</span>
|
||||
</div>
|
||||
<div className='mt-0.5 text-muted-foreground text-xs'>
|
||||
<span>{formattedDate.relative}</span>
|
||||
</div>
|
||||
return (
|
||||
<div
|
||||
key={log.id}
|
||||
ref={isSelected ? selectedRowRef : null}
|
||||
className={`cursor-pointer rounded-lg border transition-all duration-200 ${
|
||||
isSelected
|
||||
? 'border-primary bg-accent/40 shadow-sm'
|
||||
: 'border-border hover:border-border/80 hover:bg-accent/20'
|
||||
}`}
|
||||
onClick={() => handleLogClick(log)}
|
||||
>
|
||||
<div className='flex items-center gap-4 p-4'>
|
||||
{/* Time */}
|
||||
<div className='w-32 flex-shrink-0'>
|
||||
<div className='font-medium text-sm'>{formattedDate.formatted}</div>
|
||||
<div className='text-muted-foreground text-xs'>
|
||||
{formattedDate.relative}
|
||||
</div>
|
||||
</td>
|
||||
</div>
|
||||
|
||||
{/* Level column */}
|
||||
<td className='px-4 py-3'>
|
||||
{/* Status */}
|
||||
<div className='w-20 flex-shrink-0'>
|
||||
<div
|
||||
className={`inline-flex items-center justify-center rounded-md px-2 py-1 text-xs ${getLevelBadgeStyles(log.level)}`}
|
||||
className={`inline-flex items-center justify-center rounded-md px-2 py-1 text-xs ${
|
||||
log.level === 'error'
|
||||
? 'bg-red-100 text-red-800'
|
||||
: 'bg-green-100 text-green-800'
|
||||
}`}
|
||||
>
|
||||
<span className='font-medium'>{log.level}</span>
|
||||
<span className='font-medium'>
|
||||
{log.level === 'error' ? 'Failed' : 'Success'}
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
</div>
|
||||
|
||||
{/* Workflow column */}
|
||||
<td className='px-4 py-3'>
|
||||
{log.workflow && (
|
||||
<div
|
||||
className='inline-flex max-w-full items-center truncate rounded-md px-2 py-1 text-xs'
|
||||
style={{
|
||||
backgroundColor: `${log.workflow.color}20`,
|
||||
color: log.workflow.color,
|
||||
}}
|
||||
title={log.workflow.name}
|
||||
>
|
||||
<span className='truncate font-medium'>{log.workflow.name}</span>
|
||||
</div>
|
||||
)}
|
||||
</td>
|
||||
|
||||
{/* ID column - hidden on small screens */}
|
||||
<td className='hidden px-4 py-3 lg:table-cell'>
|
||||
<div className='font-mono text-muted-foreground text-xs'>
|
||||
{log.executionId ? `#${log.executionId.substring(0, 4)}` : '—'}
|
||||
{/* Workflow */}
|
||||
<div className='min-w-0 flex-1'>
|
||||
<div className='truncate font-medium text-sm'>
|
||||
{log.workflow?.name || 'Unknown Workflow'}
|
||||
</div>
|
||||
</td>
|
||||
|
||||
{/* Trigger column - hidden on medium screens and below */}
|
||||
<td className='hidden px-4 py-3 lg:table-cell'>
|
||||
{log.trigger && (
|
||||
<div
|
||||
className={`inline-flex items-center rounded-md px-2 py-1 text-xs ${getTriggerBadgeStyles(log.trigger)}`}
|
||||
>
|
||||
<span className='font-medium'>{log.trigger}</span>
|
||||
</div>
|
||||
)}
|
||||
</td>
|
||||
|
||||
{/* Message column */}
|
||||
<td className='px-4 py-3'>
|
||||
<div className='truncate text-sm' title={log.message}>
|
||||
<div className='truncate text-muted-foreground text-xs'>
|
||||
{log.message}
|
||||
</div>
|
||||
</td>
|
||||
</div>
|
||||
|
||||
{/* Duration column */}
|
||||
<td className='px-4 py-3'>
|
||||
{/* Trigger */}
|
||||
<div className='hidden w-24 flex-shrink-0 lg:block'>
|
||||
<div className='text-muted-foreground text-xs'>
|
||||
{log.trigger || '—'}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Cost */}
|
||||
<div className='hidden w-20 flex-shrink-0 xl:block'>
|
||||
<div className='text-xs'>
|
||||
{log.metadata?.enhanced && log.metadata?.cost?.total ? (
|
||||
<span className='text-muted-foreground'>
|
||||
${log.metadata.cost.total.toFixed(4)}
|
||||
</span>
|
||||
) : (
|
||||
<span className='text-muted-foreground'>—</span>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Duration */}
|
||||
<div className='w-20 flex-shrink-0'>
|
||||
<div className='text-muted-foreground text-xs'>
|
||||
{log.duration || '—'}
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
)
|
||||
})}
|
||||
|
||||
{/* Infinite scroll loader */}
|
||||
{hasMore && (
|
||||
<tr>
|
||||
<td colSpan={7}>
|
||||
<div
|
||||
ref={loaderRef}
|
||||
className='flex items-center justify-center py-2'
|
||||
style={{ height: '50px' }}
|
||||
>
|
||||
{isFetchingMore && (
|
||||
<div className='flex items-center gap-2 text-muted-foreground opacity-70'>
|
||||
<Loader2 className='h-4 w-4 animate-spin' />
|
||||
<span className='text-xs'>Loading more logs...</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
)}
|
||||
|
||||
{/* Footer status indicator - useful for development */}
|
||||
<tr className='border-t'>
|
||||
<td colSpan={7}>
|
||||
<div className='flex items-center justify-between px-4 py-2 text-muted-foreground text-xs'>
|
||||
<span>Showing {logs.length} logs</span>
|
||||
<div className='flex items-center gap-4'>
|
||||
{isFetchingMore ? (
|
||||
<div className='flex items-center gap-2' />
|
||||
) : hasMore ? (
|
||||
<button
|
||||
type='button'
|
||||
onClick={loadMoreLogs}
|
||||
className='text-primary text-xs hover:underline'
|
||||
>
|
||||
Load more logs
|
||||
</button>
|
||||
) : (
|
||||
<span>End of logs</span>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
|
||||
{/* Infinite scroll loader */}
|
||||
{hasMore && (
|
||||
<div className='flex items-center justify-center py-4'>
|
||||
<div
|
||||
ref={loaderRef}
|
||||
className='flex items-center gap-2 text-muted-foreground'
|
||||
>
|
||||
{isFetchingMore ? (
|
||||
<>
|
||||
<Loader2 className='h-4 w-4 animate-spin' />
|
||||
<span className='text-sm'>Loading more...</span>
|
||||
</>
|
||||
) : (
|
||||
<span className='text-sm'>Scroll to load more</span>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -22,7 +22,19 @@ export interface ToolCallMetadata {
|
||||
}
|
||||
|
||||
export interface CostMetadata {
|
||||
model?: string
|
||||
models?: Record<
|
||||
string,
|
||||
{
|
||||
input: number
|
||||
output: number
|
||||
total: number
|
||||
tokens?: {
|
||||
prompt?: number
|
||||
completion?: number
|
||||
total?: number
|
||||
}
|
||||
}
|
||||
>
|
||||
input?: number
|
||||
output?: number
|
||||
total?: number
|
||||
@@ -53,6 +65,7 @@ export interface TraceSpan {
|
||||
relativeStartMs?: number // Time in ms from the start of the parent span
|
||||
blockId?: string // Added to track the original block ID for relationship mapping
|
||||
input?: Record<string, any> // Added to store input data for this span
|
||||
output?: Record<string, any> // Added to store output data for this span
|
||||
}
|
||||
|
||||
export interface WorkflowLog {
|
||||
@@ -70,6 +83,29 @@ export interface WorkflowLog {
|
||||
totalDuration?: number
|
||||
cost?: CostMetadata
|
||||
blockInput?: Record<string, any>
|
||||
enhanced?: boolean
|
||||
blockStats?: {
|
||||
total: number
|
||||
success: number
|
||||
error: number
|
||||
skipped: number
|
||||
}
|
||||
blockExecutions?: Array<{
|
||||
id: string
|
||||
blockId: string
|
||||
blockName: string
|
||||
blockType: string
|
||||
startedAt: string
|
||||
endedAt: string
|
||||
durationMs: number
|
||||
status: 'success' | 'error' | 'skipped'
|
||||
errorMessage?: string
|
||||
errorStackTrace?: string
|
||||
inputData: any
|
||||
outputData: any
|
||||
cost?: CostMetadata
|
||||
metadata: any
|
||||
}>
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ import { Input } from '@/components/ui/input'
|
||||
import { Label } from '@/components/ui/label'
|
||||
import { Skeleton } from '@/components/ui/skeleton'
|
||||
import { Textarea } from '@/components/ui/textarea'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getBaseDomain } from '@/lib/urls/utils'
|
||||
import { cn } from '@/lib/utils'
|
||||
@@ -54,7 +55,7 @@ interface ChatDeployProps {
|
||||
type AuthType = 'public' | 'password' | 'email'
|
||||
|
||||
const getDomainSuffix = (() => {
|
||||
const suffix = process.env.NODE_ENV === 'development' ? `.${getBaseDomain()}` : '.simstudio.ai'
|
||||
const suffix = env.NODE_ENV === 'development' ? `.${getBaseDomain()}` : '.simstudio.ai'
|
||||
return () => suffix
|
||||
})()
|
||||
|
||||
|
||||
@@ -458,7 +458,7 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
|
||||
* Handle deleting the current workflow
|
||||
*/
|
||||
const handleDeleteWorkflow = () => {
|
||||
if (!activeWorkflowId || !userPermissions.canEdit) return
|
||||
if (!activeWorkflowId || !userPermissions.canAdmin) return
|
||||
|
||||
const sidebarWorkflows = getSidebarOrderedWorkflows()
|
||||
const currentIndex = sidebarWorkflows.findIndex((w) => w.id === activeWorkflowId)
|
||||
@@ -691,12 +691,12 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
|
||||
* Render delete workflow button with confirmation dialog
|
||||
*/
|
||||
const renderDeleteButton = () => {
|
||||
const canEdit = userPermissions.canEdit
|
||||
const canAdmin = userPermissions.canAdmin
|
||||
const hasMultipleWorkflows = Object.keys(workflows).length > 1
|
||||
const isDisabled = !canEdit || !hasMultipleWorkflows
|
||||
const isDisabled = !canAdmin || !hasMultipleWorkflows
|
||||
|
||||
const getTooltipText = () => {
|
||||
if (!canEdit) return 'Admin permission required to delete workflows'
|
||||
if (!canAdmin) return 'Admin permission required to delete workflows'
|
||||
if (!hasMultipleWorkflows) return 'Cannot delete the last workflow'
|
||||
return 'Delete Workflow'
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ import { useCallback, useState } from 'react'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { buildTraceSpans } from '@/lib/logs/trace-spans'
|
||||
import { processStreamingBlockLogs } from '@/lib/tokenization'
|
||||
import type { BlockOutput } from '@/blocks/types'
|
||||
import { Executor } from '@/executor'
|
||||
import type { BlockLog, ExecutionResult, StreamingExecution } from '@/executor/types'
|
||||
@@ -211,15 +212,22 @@ export function useWorkflowExecution() {
|
||||
result.metadata = { duration: 0, startTime: new Date().toISOString() }
|
||||
}
|
||||
;(result.metadata as any).source = 'chat'
|
||||
result.logs?.forEach((log: BlockLog) => {
|
||||
if (streamedContent.has(log.blockId)) {
|
||||
const content = streamedContent.get(log.blockId) || ''
|
||||
if (log.output) {
|
||||
log.output.content = content
|
||||
// Update streamed content and apply tokenization
|
||||
if (result.logs) {
|
||||
result.logs.forEach((log: BlockLog) => {
|
||||
if (streamedContent.has(log.blockId)) {
|
||||
const content = streamedContent.get(log.blockId) || ''
|
||||
if (log.output) {
|
||||
log.output.content = content
|
||||
}
|
||||
useConsoleStore.getState().updateConsole(log.blockId, content)
|
||||
}
|
||||
useConsoleStore.getState().updateConsole(log.blockId, content)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Process all logs for streaming tokenization
|
||||
const processedCount = processStreamingBlockLogs(result.logs, streamedContent)
|
||||
logger.info(`Processed ${processedCount} blocks for streaming tokenization`)
|
||||
}
|
||||
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ event: 'final', data: result })}\n\n`)
|
||||
|
||||
@@ -14,7 +14,9 @@ import {
|
||||
} from '@/components/ui/dropdown-menu'
|
||||
import { Input } from '@/components/ui/input'
|
||||
import { Label } from '@/components/ui/label'
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/w/components/providers/workspace-permissions-provider'
|
||||
import { useFolderStore } from '@/stores/folders/store'
|
||||
|
||||
const logger = createLogger('FolderContextMenu')
|
||||
@@ -43,6 +45,9 @@ export function FolderContextMenu({
|
||||
const params = useParams()
|
||||
const workspaceId = params.workspaceId as string
|
||||
|
||||
// Get user permissions for the workspace
|
||||
const userPermissions = useUserPermissionsContext()
|
||||
|
||||
const { createFolder, updateFolder, deleteFolder } = useFolderStore()
|
||||
|
||||
const handleCreateWorkflow = () => {
|
||||
@@ -58,12 +63,17 @@ export function FolderContextMenu({
|
||||
setShowRenameDialog(true)
|
||||
}
|
||||
|
||||
const handleDelete = () => {
|
||||
const handleDelete = async () => {
|
||||
if (onDelete) {
|
||||
onDelete(folderId)
|
||||
} else {
|
||||
// Default delete behavior
|
||||
deleteFolder(folderId, workspaceId)
|
||||
// Default delete behavior with proper error handling
|
||||
try {
|
||||
await deleteFolder(folderId, workspaceId)
|
||||
logger.info(`Successfully deleted folder from context menu: ${folderName}`)
|
||||
} catch (error) {
|
||||
logger.error('Failed to delete folder from context menu:', { error, folderId, folderName })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,23 +139,46 @@ export function FolderContextMenu({
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align='end' onClick={(e) => e.stopPropagation()}>
|
||||
<DropdownMenuItem onClick={handleCreateWorkflow}>
|
||||
<File className='mr-2 h-4 w-4' />
|
||||
New Workflow
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem onClick={handleCreateSubfolder}>
|
||||
<Folder className='mr-2 h-4 w-4' />
|
||||
New Subfolder
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuSeparator />
|
||||
<DropdownMenuItem onClick={handleRename}>
|
||||
<Pencil className='mr-2 h-4 w-4' />
|
||||
Rename
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem onClick={handleDelete} className='text-destructive'>
|
||||
<Trash2 className='mr-2 h-4 w-4' />
|
||||
Delete
|
||||
</DropdownMenuItem>
|
||||
{userPermissions.canEdit && (
|
||||
<>
|
||||
<DropdownMenuItem onClick={handleCreateWorkflow}>
|
||||
<File className='mr-2 h-4 w-4' />
|
||||
New Workflow
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem onClick={handleCreateSubfolder}>
|
||||
<Folder className='mr-2 h-4 w-4' />
|
||||
New Subfolder
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuSeparator />
|
||||
<DropdownMenuItem onClick={handleRename}>
|
||||
<Pencil className='mr-2 h-4 w-4' />
|
||||
Rename
|
||||
</DropdownMenuItem>
|
||||
</>
|
||||
)}
|
||||
{userPermissions.canAdmin ? (
|
||||
<DropdownMenuItem onClick={handleDelete} className='text-destructive'>
|
||||
<Trash2 className='mr-2 h-4 w-4' />
|
||||
Delete
|
||||
</DropdownMenuItem>
|
||||
) : (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<div>
|
||||
<DropdownMenuItem
|
||||
className='cursor-not-allowed text-muted-foreground opacity-50'
|
||||
onClick={(e) => e.preventDefault()}
|
||||
>
|
||||
<Trash2 className='mr-2 h-4 w-4' />
|
||||
Delete
|
||||
</DropdownMenuItem>
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>Admin access required to delete folders</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import { useParams, usePathname, useRouter } from 'next/navigation'
|
||||
import { Skeleton } from '@/components/ui/skeleton'
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
|
||||
import { useSession } from '@/lib/auth-client'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import {
|
||||
getKeyboardShortcutText,
|
||||
@@ -27,7 +28,7 @@ import { WorkspaceHeader } from './components/workspace-header/workspace-header'
|
||||
|
||||
const logger = createLogger('Sidebar')
|
||||
|
||||
const IS_DEV = process.env.NODE_ENV === 'development'
|
||||
const IS_DEV = env.NODE_ENV === 'development'
|
||||
|
||||
export function Sidebar() {
|
||||
useGlobalShortcuts()
|
||||
|
||||
@@ -33,6 +33,7 @@ interface WorkflowPreviewProps {
|
||||
isPannable?: boolean
|
||||
defaultPosition?: { x: number; y: number }
|
||||
defaultZoom?: number
|
||||
onNodeClick?: (blockId: string, mousePosition: { x: number; y: number }) => void
|
||||
}
|
||||
|
||||
// Define node types - the components now handle preview mode internally
|
||||
@@ -55,6 +56,7 @@ export function WorkflowPreview({
|
||||
isPannable = false,
|
||||
defaultPosition,
|
||||
defaultZoom,
|
||||
onNodeClick,
|
||||
}: WorkflowPreviewProps) {
|
||||
const blocksStructure = useMemo(
|
||||
() => ({
|
||||
@@ -256,6 +258,14 @@ export function WorkflowPreview({
|
||||
elementsSelectable={false}
|
||||
nodesDraggable={false}
|
||||
nodesConnectable={false}
|
||||
onNodeClick={
|
||||
onNodeClick
|
||||
? (event, node) => {
|
||||
logger.debug('Node clicked:', { nodeId: node.id, event })
|
||||
onNodeClick(node.id, { x: event.clientX, y: event.clientY })
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
>
|
||||
<Background />
|
||||
</ReactFlow>
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import { DocumentIcon } from '@/components/icons'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import type { FileParserOutput } from '@/tools/file/types'
|
||||
import type { BlockConfig, SubBlockConfig, SubBlockLayout, SubBlockType } from '../types'
|
||||
|
||||
const logger = createLogger('FileBlock')
|
||||
|
||||
const shouldEnableURLInput = process.env.NODE_ENV === 'production'
|
||||
const shouldEnableURLInput = env.NODE_ENV === 'production'
|
||||
|
||||
const inputMethodBlock: SubBlockConfig = {
|
||||
id: 'inputMethod',
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import { MistralIcon } from '@/components/icons'
|
||||
import { env } from '@/lib/env'
|
||||
import type { MistralParserOutput } from '@/tools/mistral/types'
|
||||
import type { BlockConfig, SubBlockConfig, SubBlockLayout, SubBlockType } from '../types'
|
||||
|
||||
const shouldEnableFileUpload = process.env.NODE_ENV === 'production'
|
||||
const shouldEnableFileUpload = env.NODE_ENV === 'production'
|
||||
|
||||
const inputMethodBlock: SubBlockConfig = {
|
||||
id: 'inputMethod',
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
Section,
|
||||
Text,
|
||||
} from '@react-email/components'
|
||||
import { env } from '@/lib/env'
|
||||
import { baseStyles } from './base-styles'
|
||||
import EmailFooter from './footer'
|
||||
|
||||
@@ -20,7 +21,7 @@ interface WorkspaceInvitationEmailProps {
|
||||
invitationLink?: string
|
||||
}
|
||||
|
||||
const baseUrl = process.env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'
|
||||
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'
|
||||
|
||||
export const WorkspaceInvitationEmail = ({
|
||||
workspaceName = 'Workspace',
|
||||
|
||||
@@ -202,6 +202,16 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
|
||||
}
|
||||
}
|
||||
|
||||
// Check for invalid blocks before serialization to prevent race conditions
|
||||
const hasInvalidBlocks = Object.values(blocks).some((block) => !block || !block.type)
|
||||
if (hasInvalidBlocks) {
|
||||
return {
|
||||
tags: [],
|
||||
variableInfoMap: {},
|
||||
blockTagGroups: [],
|
||||
}
|
||||
}
|
||||
|
||||
// Create serialized workflow for BlockPathCalculator
|
||||
const serializer = new Serializer()
|
||||
const serializedWorkflow = serializer.serializeWorkflow(blocks, edges, loops, parallels)
|
||||
|
||||
@@ -21,7 +21,7 @@ const TooltipContent = React.forwardRef<
|
||||
ref={ref}
|
||||
sideOffset={sideOffset}
|
||||
className={cn(
|
||||
'fade-in-0 zoom-in-95 data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 animate-in overflow-hidden rounded-md bg-black px-3 py-1.5 text-white text-xs shadow-md data-[state=closed]:animate-out dark:bg-white dark:text-black',
|
||||
'fade-in-0 zoom-in-95 data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-[60] animate-in overflow-hidden rounded-md bg-black px-3 py-1.5 text-white text-xs shadow-md data-[state=closed]:animate-out dark:bg-white dark:text-black',
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
} from 'react'
|
||||
import { useParams } from 'next/navigation'
|
||||
import { io, type Socket } from 'socket.io-client'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
|
||||
const logger = createLogger('SocketContext')
|
||||
@@ -134,7 +135,7 @@ export function SocketProvider({ children, user }: SocketProviderProps) {
|
||||
// Generate initial token for socket authentication
|
||||
const token = await generateSocketToken()
|
||||
|
||||
const socketUrl = process.env.NEXT_PUBLIC_SOCKET_URL || 'http://localhost:3002'
|
||||
const socketUrl = env.NEXT_PUBLIC_SOCKET_URL || 'http://localhost:3002'
|
||||
|
||||
logger.info('Attempting to connect to Socket.IO server', {
|
||||
url: socketUrl,
|
||||
|
||||
@@ -38,4 +38,4 @@ declare global {
|
||||
}
|
||||
|
||||
export const db = global.database || drizzleClient
|
||||
if (process.env.NODE_ENV !== 'production') global.database = db
|
||||
if (env.NODE_ENV !== 'production') global.database = db
|
||||
|
||||
@@ -159,7 +159,7 @@ export class WorkflowBlockHandler implements BlockHandler {
|
||||
|
||||
logger.info(`Loaded child workflow: ${workflowData.name} (${workflowId})`)
|
||||
|
||||
// Extract the workflow state
|
||||
// Extract the workflow state (API returns normalized data in state field)
|
||||
const workflowState = workflowData.state
|
||||
|
||||
if (!workflowState || !workflowState.blocks) {
|
||||
@@ -167,7 +167,7 @@ export class WorkflowBlockHandler implements BlockHandler {
|
||||
return null
|
||||
}
|
||||
|
||||
// Use blocks directly since DB format should match UI format
|
||||
// Use blocks directly since API returns data from normalized tables
|
||||
const serializedWorkflow = this.serializer.serializeWorkflow(
|
||||
workflowState.blocks,
|
||||
workflowState.edges || [],
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import { stripeClient } from '@better-auth/stripe/client'
|
||||
import { emailOTPClient, genericOAuthClient, organizationClient } from 'better-auth/client/plugins'
|
||||
import { createAuthClient } from 'better-auth/react'
|
||||
import { env } from './env'
|
||||
|
||||
const clientEnv = {
|
||||
NEXT_PUBLIC_VERCEL_URL: process.env.NEXT_PUBLIC_VERCEL_URL,
|
||||
NEXT_PUBLIC_APP_URL: process.env.NEXT_PUBLIC_APP_URL,
|
||||
NODE_ENV: process.env.NODE_ENV,
|
||||
VERCEL_ENV: process.env.VERCEL_ENV || '',
|
||||
BETTER_AUTH_URL: process.env.BETTER_AUTH_URL,
|
||||
NEXT_PUBLIC_VERCEL_URL: env.NEXT_PUBLIC_VERCEL_URL,
|
||||
NEXT_PUBLIC_APP_URL: env.NEXT_PUBLIC_APP_URL,
|
||||
NODE_ENV: env.NODE_ENV,
|
||||
VERCEL_ENV: env.VERCEL_ENV || '',
|
||||
BETTER_AUTH_URL: env.BETTER_AUTH_URL,
|
||||
}
|
||||
|
||||
export function getBaseURL() {
|
||||
|
||||
@@ -6,7 +6,11 @@ import {
|
||||
verifyUnsubscribeToken,
|
||||
} from './unsubscribe'
|
||||
|
||||
vi.stubEnv('BETTER_AUTH_SECRET', 'test-secret-key')
|
||||
vi.mock('../env', () => ({
|
||||
env: {
|
||||
BETTER_AUTH_SECRET: 'test-secret-key',
|
||||
},
|
||||
}))
|
||||
|
||||
describe('unsubscribe utilities', () => {
|
||||
const testEmail = 'test@example.com'
|
||||
@@ -75,10 +79,9 @@ describe('unsubscribe utilities', () => {
|
||||
it.concurrent('should handle legacy tokens (2 parts) and default to marketing', () => {
|
||||
// Generate a real legacy token using the actual hashing logic to ensure backward compatibility
|
||||
const salt = 'abc123'
|
||||
const secret = 'test-secret-key'
|
||||
const { createHash } = require('crypto')
|
||||
const hash = createHash('sha256')
|
||||
.update(`${testEmail}:${salt}:${process.env.BETTER_AUTH_SECRET}`)
|
||||
.digest('hex')
|
||||
const hash = createHash('sha256').update(`${testEmail}:${salt}:${secret}`).digest('hex')
|
||||
const legacyToken = `${salt}:${hash}`
|
||||
|
||||
// This should return valid since we're using the actual legacy format properly
|
||||
|
||||
@@ -3,6 +3,7 @@ import { eq } from 'drizzle-orm'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { db } from '@/db'
|
||||
import { settings, user } from '@/db/schema'
|
||||
import { env } from '../env'
|
||||
import type { EmailType } from './mailer'
|
||||
|
||||
const logger = createLogger('Unsubscribe')
|
||||
@@ -20,7 +21,7 @@ export interface EmailPreferences {
|
||||
export function generateUnsubscribeToken(email: string, emailType = 'marketing'): string {
|
||||
const salt = randomBytes(16).toString('hex')
|
||||
const hash = createHash('sha256')
|
||||
.update(`${email}:${salt}:${emailType}:${process.env.BETTER_AUTH_SECRET}`)
|
||||
.update(`${email}:${salt}:${emailType}:${env.BETTER_AUTH_SECRET}`)
|
||||
.digest('hex')
|
||||
|
||||
return `${salt}:${hash}:${emailType}`
|
||||
@@ -41,7 +42,7 @@ export function verifyUnsubscribeToken(
|
||||
if (parts.length === 2) {
|
||||
const [salt, expectedHash] = parts
|
||||
const hash = createHash('sha256')
|
||||
.update(`${email}:${salt}:${process.env.BETTER_AUTH_SECRET}`)
|
||||
.update(`${email}:${salt}:${env.BETTER_AUTH_SECRET}`)
|
||||
.digest('hex')
|
||||
|
||||
return { valid: hash === expectedHash, emailType: 'marketing' }
|
||||
@@ -52,7 +53,7 @@ export function verifyUnsubscribeToken(
|
||||
if (!salt || !expectedHash || !emailType) return { valid: false }
|
||||
|
||||
const hash = createHash('sha256')
|
||||
.update(`${email}:${salt}:${emailType}:${process.env.BETTER_AUTH_SECRET}`)
|
||||
.update(`${email}:${salt}:${emailType}:${env.BETTER_AUTH_SECRET}`)
|
||||
.digest('hex')
|
||||
|
||||
return { valid: hash === expectedHash, emailType }
|
||||
|
||||
@@ -104,6 +104,8 @@ export const env = createEnv({
|
||||
SLACK_CLIENT_ID: z.string().optional(),
|
||||
SLACK_CLIENT_SECRET: z.string().optional(),
|
||||
SOCKET_SERVER_URL: z.string().url().optional(),
|
||||
SOCKET_PORT: z.number().optional(),
|
||||
PORT: z.number().optional(),
|
||||
},
|
||||
|
||||
client: {
|
||||
|
||||
@@ -93,10 +93,10 @@ export async function executeCode(
|
||||
nodeModules: packages,
|
||||
timeout: null,
|
||||
// Add environment variables if needed
|
||||
envVars: Object.entries(process.env).reduce(
|
||||
envVars: Object.entries(env).reduce(
|
||||
(acc, [key, value]) => {
|
||||
if (value !== undefined) {
|
||||
acc[key] = value
|
||||
acc[key] = value as string
|
||||
}
|
||||
return acc
|
||||
},
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
* It is separate from the user-facing logging system in logging.ts.
|
||||
*/
|
||||
import chalk from 'chalk'
|
||||
import { env } from '../env'
|
||||
|
||||
/**
|
||||
* LogLevel enum defines the severity levels for logging
|
||||
@@ -55,7 +56,7 @@ const LOG_CONFIG = {
|
||||
}
|
||||
|
||||
// Get current environment
|
||||
const ENV = (process.env.NODE_ENV || 'development') as keyof typeof LOG_CONFIG
|
||||
const ENV = (env.NODE_ENV || 'development') as keyof typeof LOG_CONFIG
|
||||
const config = LOG_CONFIG[ENV] || LOG_CONFIG.development
|
||||
|
||||
// Format objects for logging
|
||||
|
||||
34
apps/sim/lib/logs/enhanced-execution-logger.test.ts
Normal file
34
apps/sim/lib/logs/enhanced-execution-logger.test.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { beforeEach, describe, expect, test } from 'vitest'
|
||||
import { EnhancedExecutionLogger } from './enhanced-execution-logger'
|
||||
|
||||
describe('EnhancedExecutionLogger', () => {
|
||||
let logger: EnhancedExecutionLogger
|
||||
|
||||
beforeEach(() => {
|
||||
logger = new EnhancedExecutionLogger()
|
||||
})
|
||||
|
||||
describe('class instantiation', () => {
|
||||
test('should create logger instance', () => {
|
||||
expect(logger).toBeDefined()
|
||||
expect(logger).toBeInstanceOf(EnhancedExecutionLogger)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getTriggerPrefix', () => {
|
||||
test('should return correct prefixes for trigger types', () => {
|
||||
// Access the private method for testing
|
||||
const getTriggerPrefix = (logger as any).getTriggerPrefix.bind(logger)
|
||||
|
||||
expect(getTriggerPrefix('api')).toBe('API')
|
||||
expect(getTriggerPrefix('webhook')).toBe('Webhook')
|
||||
expect(getTriggerPrefix('schedule')).toBe('Scheduled')
|
||||
expect(getTriggerPrefix('manual')).toBe('Manual')
|
||||
expect(getTriggerPrefix('chat')).toBe('Chat')
|
||||
expect(getTriggerPrefix('unknown' as any)).toBe('Unknown')
|
||||
})
|
||||
})
|
||||
|
||||
// Note: Database integration tests would require proper mocking setup
|
||||
// For now, we're testing the basic functionality without database calls
|
||||
})
|
||||
396
apps/sim/lib/logs/enhanced-execution-logger.ts
Normal file
396
apps/sim/lib/logs/enhanced-execution-logger.ts
Normal file
@@ -0,0 +1,396 @@
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { db } from '@/db'
|
||||
import { workflowExecutionBlocks, workflowExecutionLogs } from '@/db/schema'
|
||||
import { createLogger } from './console-logger'
|
||||
import { snapshotService } from './snapshot-service'
|
||||
import type {
|
||||
BlockExecutionLog,
|
||||
BlockInputData,
|
||||
BlockOutputData,
|
||||
CostBreakdown,
|
||||
ExecutionEnvironment,
|
||||
ExecutionTrigger,
|
||||
ExecutionLoggerService as IExecutionLoggerService,
|
||||
TraceSpan,
|
||||
WorkflowExecutionLog,
|
||||
WorkflowExecutionSnapshot,
|
||||
WorkflowState,
|
||||
} from './types'
|
||||
|
||||
const logger = createLogger('EnhancedExecutionLogger')
|
||||
|
||||
export class EnhancedExecutionLogger implements IExecutionLoggerService {
|
||||
async startWorkflowExecution(params: {
|
||||
workflowId: string
|
||||
executionId: string
|
||||
trigger: ExecutionTrigger
|
||||
environment: ExecutionEnvironment
|
||||
workflowState: WorkflowState
|
||||
}): Promise<{
|
||||
workflowLog: WorkflowExecutionLog
|
||||
snapshot: WorkflowExecutionSnapshot
|
||||
}> {
|
||||
const { workflowId, executionId, trigger, environment, workflowState } = params
|
||||
|
||||
logger.debug(`Starting workflow execution ${executionId} for workflow ${workflowId}`)
|
||||
|
||||
const snapshotResult = await snapshotService.createSnapshotWithDeduplication(
|
||||
workflowId,
|
||||
workflowState
|
||||
)
|
||||
|
||||
const startTime = new Date()
|
||||
|
||||
const [workflowLog] = await db
|
||||
.insert(workflowExecutionLogs)
|
||||
.values({
|
||||
id: uuidv4(),
|
||||
workflowId,
|
||||
executionId,
|
||||
stateSnapshotId: snapshotResult.snapshot.id,
|
||||
level: 'info',
|
||||
message: `${this.getTriggerPrefix(trigger.type)} execution started`,
|
||||
trigger: trigger.type,
|
||||
startedAt: startTime,
|
||||
endedAt: null,
|
||||
totalDurationMs: null,
|
||||
blockCount: 0,
|
||||
successCount: 0,
|
||||
errorCount: 0,
|
||||
skippedCount: 0,
|
||||
totalCost: null,
|
||||
totalInputCost: null,
|
||||
totalOutputCost: null,
|
||||
totalTokens: null,
|
||||
metadata: {
|
||||
environment,
|
||||
trigger,
|
||||
},
|
||||
})
|
||||
.returning()
|
||||
|
||||
logger.debug(`Created workflow log ${workflowLog.id} for execution ${executionId}`)
|
||||
|
||||
return {
|
||||
workflowLog: {
|
||||
id: workflowLog.id,
|
||||
workflowId: workflowLog.workflowId,
|
||||
executionId: workflowLog.executionId,
|
||||
stateSnapshotId: workflowLog.stateSnapshotId,
|
||||
level: workflowLog.level as 'info' | 'error',
|
||||
message: workflowLog.message,
|
||||
trigger: workflowLog.trigger as ExecutionTrigger['type'],
|
||||
startedAt: workflowLog.startedAt.toISOString(),
|
||||
endedAt: workflowLog.endedAt?.toISOString() || workflowLog.startedAt.toISOString(),
|
||||
totalDurationMs: workflowLog.totalDurationMs || 0,
|
||||
blockCount: workflowLog.blockCount,
|
||||
successCount: workflowLog.successCount,
|
||||
errorCount: workflowLog.errorCount,
|
||||
skippedCount: workflowLog.skippedCount,
|
||||
totalCost: Number(workflowLog.totalCost) || 0,
|
||||
totalInputCost: Number(workflowLog.totalInputCost) || 0,
|
||||
totalOutputCost: Number(workflowLog.totalOutputCost) || 0,
|
||||
totalTokens: workflowLog.totalTokens || 0,
|
||||
metadata: workflowLog.metadata as WorkflowExecutionLog['metadata'],
|
||||
createdAt: workflowLog.createdAt.toISOString(),
|
||||
},
|
||||
snapshot: snapshotResult.snapshot,
|
||||
}
|
||||
}
|
||||
|
||||
async logBlockExecution(params: {
|
||||
executionId: string
|
||||
workflowId: string
|
||||
blockId: string
|
||||
blockName: string
|
||||
blockType: string
|
||||
input: BlockInputData
|
||||
output: BlockOutputData
|
||||
timing: {
|
||||
startedAt: string
|
||||
endedAt: string
|
||||
durationMs: number
|
||||
}
|
||||
status: BlockExecutionLog['status']
|
||||
error?: {
|
||||
message: string
|
||||
stackTrace?: string
|
||||
}
|
||||
cost?: CostBreakdown
|
||||
metadata?: BlockExecutionLog['metadata']
|
||||
}): Promise<BlockExecutionLog> {
|
||||
const {
|
||||
executionId,
|
||||
workflowId,
|
||||
blockId,
|
||||
blockName,
|
||||
blockType,
|
||||
input,
|
||||
output,
|
||||
timing,
|
||||
status,
|
||||
error,
|
||||
cost,
|
||||
metadata,
|
||||
} = params
|
||||
|
||||
logger.debug(`Logging block execution ${blockId} for execution ${executionId}`)
|
||||
|
||||
const blockLogId = uuidv4()
|
||||
|
||||
const [blockLog] = await db
|
||||
.insert(workflowExecutionBlocks)
|
||||
.values({
|
||||
id: blockLogId,
|
||||
executionId,
|
||||
workflowId,
|
||||
blockId,
|
||||
blockName,
|
||||
blockType,
|
||||
startedAt: new Date(timing.startedAt),
|
||||
endedAt: new Date(timing.endedAt),
|
||||
durationMs: timing.durationMs,
|
||||
status,
|
||||
errorMessage: error?.message || null,
|
||||
errorStackTrace: error?.stackTrace || null,
|
||||
inputData: input,
|
||||
outputData: output,
|
||||
costInput: cost?.input ? cost.input.toString() : null,
|
||||
costOutput: cost?.output ? cost.output.toString() : null,
|
||||
costTotal: cost?.total ? cost.total.toString() : null,
|
||||
tokensPrompt: cost?.tokens?.prompt || null,
|
||||
tokensCompletion: cost?.tokens?.completion || null,
|
||||
tokensTotal: cost?.tokens?.total || null,
|
||||
modelUsed: cost?.model || null,
|
||||
metadata: metadata || {},
|
||||
})
|
||||
.returning()
|
||||
|
||||
logger.debug(`Created block log ${blockLog.id} for block ${blockId}`)
|
||||
|
||||
return {
|
||||
id: blockLog.id,
|
||||
executionId: blockLog.executionId,
|
||||
workflowId: blockLog.workflowId,
|
||||
blockId: blockLog.blockId,
|
||||
blockName: blockLog.blockName || '',
|
||||
blockType: blockLog.blockType,
|
||||
startedAt: blockLog.startedAt.toISOString(),
|
||||
endedAt: blockLog.endedAt?.toISOString() || timing.endedAt,
|
||||
durationMs: blockLog.durationMs || timing.durationMs,
|
||||
status: blockLog.status as BlockExecutionLog['status'],
|
||||
errorMessage: blockLog.errorMessage || undefined,
|
||||
errorStackTrace: blockLog.errorStackTrace || undefined,
|
||||
inputData: input,
|
||||
outputData: output,
|
||||
cost: cost || null,
|
||||
metadata: (blockLog.metadata as BlockExecutionLog['metadata']) || {},
|
||||
createdAt: blockLog.createdAt.toISOString(),
|
||||
}
|
||||
}
|
||||
|
||||
async completeWorkflowExecution(params: {
|
||||
executionId: string
|
||||
endedAt: string
|
||||
totalDurationMs: number
|
||||
blockStats: {
|
||||
total: number
|
||||
success: number
|
||||
error: number
|
||||
skipped: number
|
||||
}
|
||||
costSummary: {
|
||||
totalCost: number
|
||||
totalInputCost: number
|
||||
totalOutputCost: number
|
||||
totalTokens: number
|
||||
totalPromptTokens: number
|
||||
totalCompletionTokens: number
|
||||
models: Record<
|
||||
string,
|
||||
{
|
||||
input: number
|
||||
output: number
|
||||
total: number
|
||||
tokens: { prompt: number; completion: number; total: number }
|
||||
}
|
||||
>
|
||||
}
|
||||
finalOutput: BlockOutputData
|
||||
traceSpans?: TraceSpan[]
|
||||
}): Promise<WorkflowExecutionLog> {
|
||||
const {
|
||||
executionId,
|
||||
endedAt,
|
||||
totalDurationMs,
|
||||
blockStats,
|
||||
costSummary,
|
||||
finalOutput,
|
||||
traceSpans,
|
||||
} = params
|
||||
|
||||
logger.debug(`Completing workflow execution ${executionId}`)
|
||||
|
||||
const level = blockStats.error > 0 ? 'error' : 'info'
|
||||
const message =
|
||||
blockStats.error > 0
|
||||
? `Workflow execution failed: ${blockStats.error} error(s), ${blockStats.success} success(es)`
|
||||
: `Workflow execution completed: ${blockStats.success} block(s) executed successfully`
|
||||
|
||||
const [updatedLog] = await db
|
||||
.update(workflowExecutionLogs)
|
||||
.set({
|
||||
level,
|
||||
message,
|
||||
endedAt: new Date(endedAt),
|
||||
totalDurationMs,
|
||||
blockCount: blockStats.total,
|
||||
successCount: blockStats.success,
|
||||
errorCount: blockStats.error,
|
||||
skippedCount: blockStats.skipped,
|
||||
totalCost: costSummary.totalCost.toString(),
|
||||
totalInputCost: costSummary.totalInputCost.toString(),
|
||||
totalOutputCost: costSummary.totalOutputCost.toString(),
|
||||
totalTokens: costSummary.totalTokens,
|
||||
metadata: {
|
||||
traceSpans,
|
||||
finalOutput,
|
||||
tokenBreakdown: {
|
||||
prompt: costSummary.totalPromptTokens,
|
||||
completion: costSummary.totalCompletionTokens,
|
||||
total: costSummary.totalTokens,
|
||||
},
|
||||
models: costSummary.models,
|
||||
},
|
||||
})
|
||||
.where(eq(workflowExecutionLogs.executionId, executionId))
|
||||
.returning()
|
||||
|
||||
if (!updatedLog) {
|
||||
throw new Error(`Workflow log not found for execution ${executionId}`)
|
||||
}
|
||||
|
||||
logger.debug(`Completed workflow execution ${executionId}`)
|
||||
|
||||
return {
|
||||
id: updatedLog.id,
|
||||
workflowId: updatedLog.workflowId,
|
||||
executionId: updatedLog.executionId,
|
||||
stateSnapshotId: updatedLog.stateSnapshotId,
|
||||
level: updatedLog.level as 'info' | 'error',
|
||||
message: updatedLog.message,
|
||||
trigger: updatedLog.trigger as ExecutionTrigger['type'],
|
||||
startedAt: updatedLog.startedAt.toISOString(),
|
||||
endedAt: updatedLog.endedAt?.toISOString() || endedAt,
|
||||
totalDurationMs: updatedLog.totalDurationMs || totalDurationMs,
|
||||
blockCount: updatedLog.blockCount,
|
||||
successCount: updatedLog.successCount,
|
||||
errorCount: updatedLog.errorCount,
|
||||
skippedCount: updatedLog.skippedCount,
|
||||
totalCost: Number(updatedLog.totalCost) || 0,
|
||||
totalInputCost: Number(updatedLog.totalInputCost) || 0,
|
||||
totalOutputCost: Number(updatedLog.totalOutputCost) || 0,
|
||||
totalTokens: updatedLog.totalTokens || 0,
|
||||
metadata: updatedLog.metadata as WorkflowExecutionLog['metadata'],
|
||||
createdAt: updatedLog.createdAt.toISOString(),
|
||||
}
|
||||
}
|
||||
|
||||
async getBlockExecutionsForWorkflow(executionId: string): Promise<BlockExecutionLog[]> {
|
||||
const blockLogs = await db
|
||||
.select()
|
||||
.from(workflowExecutionBlocks)
|
||||
.where(eq(workflowExecutionBlocks.executionId, executionId))
|
||||
.orderBy(workflowExecutionBlocks.startedAt)
|
||||
|
||||
return blockLogs.map((log) => ({
|
||||
id: log.id,
|
||||
executionId: log.executionId,
|
||||
workflowId: log.workflowId,
|
||||
blockId: log.blockId,
|
||||
blockName: log.blockName || '',
|
||||
blockType: log.blockType,
|
||||
startedAt: log.startedAt.toISOString(),
|
||||
endedAt: log.endedAt?.toISOString() || log.startedAt.toISOString(),
|
||||
durationMs: log.durationMs || 0,
|
||||
status: log.status as BlockExecutionLog['status'],
|
||||
errorMessage: log.errorMessage || undefined,
|
||||
errorStackTrace: log.errorStackTrace || undefined,
|
||||
inputData: log.inputData as BlockInputData,
|
||||
outputData: log.outputData as BlockOutputData,
|
||||
cost: log.costTotal
|
||||
? {
|
||||
input: Number(log.costInput) || 0,
|
||||
output: Number(log.costOutput) || 0,
|
||||
total: Number(log.costTotal) || 0,
|
||||
tokens: {
|
||||
prompt: log.tokensPrompt || 0,
|
||||
completion: log.tokensCompletion || 0,
|
||||
total: log.tokensTotal || 0,
|
||||
},
|
||||
model: log.modelUsed || '',
|
||||
pricing: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
updatedAt: new Date().toISOString(),
|
||||
},
|
||||
}
|
||||
: null,
|
||||
metadata: (log.metadata as BlockExecutionLog['metadata']) || {},
|
||||
createdAt: log.createdAt.toISOString(),
|
||||
}))
|
||||
}
|
||||
|
||||
async getWorkflowExecution(executionId: string): Promise<WorkflowExecutionLog | null> {
|
||||
const [workflowLog] = await db
|
||||
.select()
|
||||
.from(workflowExecutionLogs)
|
||||
.where(eq(workflowExecutionLogs.executionId, executionId))
|
||||
.limit(1)
|
||||
|
||||
if (!workflowLog) return null
|
||||
|
||||
return {
|
||||
id: workflowLog.id,
|
||||
workflowId: workflowLog.workflowId,
|
||||
executionId: workflowLog.executionId,
|
||||
stateSnapshotId: workflowLog.stateSnapshotId,
|
||||
level: workflowLog.level as 'info' | 'error',
|
||||
message: workflowLog.message,
|
||||
trigger: workflowLog.trigger as ExecutionTrigger['type'],
|
||||
startedAt: workflowLog.startedAt.toISOString(),
|
||||
endedAt: workflowLog.endedAt?.toISOString() || workflowLog.startedAt.toISOString(),
|
||||
totalDurationMs: workflowLog.totalDurationMs || 0,
|
||||
blockCount: workflowLog.blockCount,
|
||||
successCount: workflowLog.successCount,
|
||||
errorCount: workflowLog.errorCount,
|
||||
skippedCount: workflowLog.skippedCount,
|
||||
totalCost: Number(workflowLog.totalCost) || 0,
|
||||
totalInputCost: Number(workflowLog.totalInputCost) || 0,
|
||||
totalOutputCost: Number(workflowLog.totalOutputCost) || 0,
|
||||
totalTokens: workflowLog.totalTokens || 0,
|
||||
metadata: workflowLog.metadata as WorkflowExecutionLog['metadata'],
|
||||
createdAt: workflowLog.createdAt.toISOString(),
|
||||
}
|
||||
}
|
||||
|
||||
private getTriggerPrefix(triggerType: ExecutionTrigger['type']): string {
|
||||
switch (triggerType) {
|
||||
case 'api':
|
||||
return 'API'
|
||||
case 'webhook':
|
||||
return 'Webhook'
|
||||
case 'schedule':
|
||||
return 'Scheduled'
|
||||
case 'manual':
|
||||
return 'Manual'
|
||||
case 'chat':
|
||||
return 'Chat'
|
||||
default:
|
||||
return 'Unknown'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const enhancedExecutionLogger = new EnhancedExecutionLogger()
|
||||
197
apps/sim/lib/logs/enhanced-logging-factory.ts
Normal file
197
apps/sim/lib/logs/enhanced-logging-factory.ts
Normal file
@@ -0,0 +1,197 @@
|
||||
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
|
||||
import type { ExecutionEnvironment, ExecutionTrigger, WorkflowState } from './types'
|
||||
|
||||
export function createTriggerObject(
|
||||
type: ExecutionTrigger['type'],
|
||||
additionalData?: Record<string, unknown>
|
||||
): ExecutionTrigger {
|
||||
return {
|
||||
type,
|
||||
source: type,
|
||||
timestamp: new Date().toISOString(),
|
||||
...(additionalData && { data: additionalData }),
|
||||
}
|
||||
}
|
||||
|
||||
export function createEnvironmentObject(
|
||||
workflowId: string,
|
||||
executionId: string,
|
||||
userId?: string,
|
||||
workspaceId?: string,
|
||||
variables?: Record<string, string>
|
||||
): ExecutionEnvironment {
|
||||
return {
|
||||
variables: variables || {},
|
||||
workflowId,
|
||||
executionId,
|
||||
userId: userId || '',
|
||||
workspaceId: workspaceId || '',
|
||||
}
|
||||
}
|
||||
|
||||
export async function loadWorkflowStateForExecution(workflowId: string): Promise<WorkflowState> {
|
||||
const normalizedData = await loadWorkflowFromNormalizedTables(workflowId)
|
||||
|
||||
if (!normalizedData) {
|
||||
throw new Error(
|
||||
`Workflow ${workflowId} has no normalized data available. Ensure the workflow is properly saved to normalized tables.`
|
||||
)
|
||||
}
|
||||
|
||||
return {
|
||||
blocks: normalizedData.blocks || {},
|
||||
edges: normalizedData.edges || [],
|
||||
loops: normalizedData.loops || {},
|
||||
parallels: normalizedData.parallels || {},
|
||||
}
|
||||
}
|
||||
|
||||
export function calculateBlockStats(traceSpans: any[]): {
|
||||
total: number
|
||||
success: number
|
||||
error: number
|
||||
skipped: number
|
||||
} {
|
||||
if (!traceSpans || traceSpans.length === 0) {
|
||||
return { total: 0, success: 0, error: 0, skipped: 0 }
|
||||
}
|
||||
|
||||
// Recursively collect all block spans from the trace span tree
|
||||
const collectBlockSpans = (spans: any[]): any[] => {
|
||||
const blocks: any[] = []
|
||||
|
||||
for (const span of spans) {
|
||||
// Check if this span is an actual workflow block
|
||||
if (
|
||||
span.type &&
|
||||
span.type !== 'workflow' &&
|
||||
span.type !== 'provider' &&
|
||||
span.type !== 'model' &&
|
||||
span.blockId
|
||||
) {
|
||||
blocks.push(span)
|
||||
}
|
||||
|
||||
// Recursively check children
|
||||
if (span.children && Array.isArray(span.children)) {
|
||||
blocks.push(...collectBlockSpans(span.children))
|
||||
}
|
||||
}
|
||||
|
||||
return blocks
|
||||
}
|
||||
|
||||
const blockSpans = collectBlockSpans(traceSpans)
|
||||
|
||||
const total = blockSpans.length
|
||||
const success = blockSpans.filter((span) => span.status === 'success').length
|
||||
const error = blockSpans.filter((span) => span.status === 'error').length
|
||||
const skipped = blockSpans.filter((span) => span.status === 'skipped').length
|
||||
|
||||
return { total, success, error, skipped }
|
||||
}
|
||||
|
||||
export function calculateCostSummary(traceSpans: any[]): {
|
||||
totalCost: number
|
||||
totalInputCost: number
|
||||
totalOutputCost: number
|
||||
totalTokens: number
|
||||
totalPromptTokens: number
|
||||
totalCompletionTokens: number
|
||||
models: Record<
|
||||
string,
|
||||
{
|
||||
input: number
|
||||
output: number
|
||||
total: number
|
||||
tokens: { prompt: number; completion: number; total: number }
|
||||
}
|
||||
>
|
||||
} {
|
||||
if (!traceSpans || traceSpans.length === 0) {
|
||||
return {
|
||||
totalCost: 0,
|
||||
totalInputCost: 0,
|
||||
totalOutputCost: 0,
|
||||
totalTokens: 0,
|
||||
totalPromptTokens: 0,
|
||||
totalCompletionTokens: 0,
|
||||
models: {},
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively collect all spans with cost information from the trace span tree
|
||||
const collectCostSpans = (spans: any[]): any[] => {
|
||||
const costSpans: any[] = []
|
||||
|
||||
for (const span of spans) {
|
||||
if (span.cost) {
|
||||
costSpans.push(span)
|
||||
}
|
||||
|
||||
if (span.children && Array.isArray(span.children)) {
|
||||
costSpans.push(...collectCostSpans(span.children))
|
||||
}
|
||||
}
|
||||
|
||||
return costSpans
|
||||
}
|
||||
|
||||
const costSpans = collectCostSpans(traceSpans)
|
||||
|
||||
let totalCost = 0
|
||||
let totalInputCost = 0
|
||||
let totalOutputCost = 0
|
||||
let totalTokens = 0
|
||||
let totalPromptTokens = 0
|
||||
let totalCompletionTokens = 0
|
||||
const models: Record<
|
||||
string,
|
||||
{
|
||||
input: number
|
||||
output: number
|
||||
total: number
|
||||
tokens: { prompt: number; completion: number; total: number }
|
||||
}
|
||||
> = {}
|
||||
|
||||
for (const span of costSpans) {
|
||||
totalCost += span.cost.total || 0
|
||||
totalInputCost += span.cost.input || 0
|
||||
totalOutputCost += span.cost.output || 0
|
||||
// Tokens are at span.tokens, not span.cost.tokens
|
||||
totalTokens += span.tokens?.total || 0
|
||||
totalPromptTokens += span.tokens?.prompt || 0
|
||||
totalCompletionTokens += span.tokens?.completion || 0
|
||||
|
||||
// Aggregate model-specific costs - model is at span.model, not span.cost.model
|
||||
if (span.model) {
|
||||
const model = span.model
|
||||
if (!models[model]) {
|
||||
models[model] = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
total: 0,
|
||||
tokens: { prompt: 0, completion: 0, total: 0 },
|
||||
}
|
||||
}
|
||||
models[model].input += span.cost.input || 0
|
||||
models[model].output += span.cost.output || 0
|
||||
models[model].total += span.cost.total || 0
|
||||
// Tokens are at span.tokens, not span.cost.tokens
|
||||
models[model].tokens.prompt += span.tokens?.prompt || 0
|
||||
models[model].tokens.completion += span.tokens?.completion || 0
|
||||
models[model].tokens.total += span.tokens?.total || 0
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
totalCost,
|
||||
totalInputCost,
|
||||
totalOutputCost,
|
||||
totalTokens,
|
||||
totalPromptTokens,
|
||||
totalCompletionTokens,
|
||||
models,
|
||||
}
|
||||
}
|
||||
199
apps/sim/lib/logs/enhanced-logging-session.ts
Normal file
199
apps/sim/lib/logs/enhanced-logging-session.ts
Normal file
@@ -0,0 +1,199 @@
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { enhancedExecutionLogger } from './enhanced-execution-logger'
|
||||
import {
|
||||
calculateBlockStats,
|
||||
calculateCostSummary,
|
||||
createEnvironmentObject,
|
||||
createTriggerObject,
|
||||
loadWorkflowStateForExecution,
|
||||
} from './enhanced-logging-factory'
|
||||
import type { ExecutionEnvironment, ExecutionTrigger, WorkflowState } from './types'
|
||||
|
||||
const logger = createLogger('EnhancedLoggingSession')
|
||||
|
||||
export interface SessionStartParams {
|
||||
userId?: string
|
||||
workspaceId?: string
|
||||
variables?: Record<string, string>
|
||||
triggerData?: Record<string, unknown>
|
||||
}
|
||||
|
||||
export interface SessionCompleteParams {
|
||||
endedAt?: string
|
||||
totalDurationMs?: number
|
||||
finalOutput?: any
|
||||
traceSpans?: any[]
|
||||
}
|
||||
|
||||
export class EnhancedLoggingSession {
|
||||
private workflowId: string
|
||||
private executionId: string
|
||||
private triggerType: ExecutionTrigger['type']
|
||||
private requestId?: string
|
||||
private trigger?: ExecutionTrigger
|
||||
private environment?: ExecutionEnvironment
|
||||
private workflowState?: WorkflowState
|
||||
private enhancedLogger = enhancedExecutionLogger
|
||||
|
||||
constructor(
|
||||
workflowId: string,
|
||||
executionId: string,
|
||||
triggerType: ExecutionTrigger['type'],
|
||||
requestId?: string
|
||||
) {
|
||||
this.workflowId = workflowId
|
||||
this.executionId = executionId
|
||||
this.triggerType = triggerType
|
||||
this.requestId = requestId
|
||||
}
|
||||
|
||||
async start(params: SessionStartParams = {}): Promise<void> {
|
||||
const { userId, workspaceId, variables, triggerData } = params
|
||||
|
||||
try {
|
||||
this.trigger = createTriggerObject(this.triggerType, triggerData)
|
||||
this.environment = createEnvironmentObject(
|
||||
this.workflowId,
|
||||
this.executionId,
|
||||
userId,
|
||||
workspaceId,
|
||||
variables
|
||||
)
|
||||
this.workflowState = await loadWorkflowStateForExecution(this.workflowId)
|
||||
|
||||
await enhancedExecutionLogger.startWorkflowExecution({
|
||||
workflowId: this.workflowId,
|
||||
executionId: this.executionId,
|
||||
trigger: this.trigger,
|
||||
environment: this.environment,
|
||||
workflowState: this.workflowState,
|
||||
})
|
||||
|
||||
if (this.requestId) {
|
||||
logger.debug(
|
||||
`[${this.requestId}] Started enhanced logging for execution ${this.executionId}`
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
if (this.requestId) {
|
||||
logger.error(`[${this.requestId}] Failed to start enhanced logging:`, error)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up enhanced logging on an executor instance
|
||||
* Note: Enhanced logging now works through trace spans only, no direct executor integration needed
|
||||
*/
|
||||
setupExecutor(executor: any): void {
|
||||
// No longer setting enhanced logger on executor - trace spans handle everything
|
||||
if (this.requestId) {
|
||||
logger.debug(
|
||||
`[${this.requestId}] Enhanced logging session ready for execution ${this.executionId}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async complete(params: SessionCompleteParams = {}): Promise<void> {
|
||||
const { endedAt, totalDurationMs, finalOutput, traceSpans } = params
|
||||
|
||||
try {
|
||||
const blockStats = calculateBlockStats(traceSpans || [])
|
||||
const costSummary = calculateCostSummary(traceSpans || [])
|
||||
|
||||
await enhancedExecutionLogger.completeWorkflowExecution({
|
||||
executionId: this.executionId,
|
||||
endedAt: endedAt || new Date().toISOString(),
|
||||
totalDurationMs: totalDurationMs || 0,
|
||||
blockStats,
|
||||
costSummary,
|
||||
finalOutput: finalOutput || {},
|
||||
traceSpans: traceSpans || [],
|
||||
})
|
||||
|
||||
if (this.requestId) {
|
||||
logger.debug(
|
||||
`[${this.requestId}] Completed enhanced logging for execution ${this.executionId}`
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
if (this.requestId) {
|
||||
logger.error(`[${this.requestId}] Failed to complete enhanced logging:`, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async completeWithError(error?: any): Promise<void> {
|
||||
try {
|
||||
const blockStats = { total: 0, success: 0, error: 1, skipped: 0 }
|
||||
const costSummary = {
|
||||
totalCost: 0,
|
||||
totalInputCost: 0,
|
||||
totalOutputCost: 0,
|
||||
totalTokens: 0,
|
||||
totalPromptTokens: 0,
|
||||
totalCompletionTokens: 0,
|
||||
models: {},
|
||||
}
|
||||
|
||||
await enhancedExecutionLogger.completeWorkflowExecution({
|
||||
executionId: this.executionId,
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
blockStats,
|
||||
costSummary,
|
||||
finalOutput: null,
|
||||
traceSpans: [],
|
||||
})
|
||||
|
||||
if (this.requestId) {
|
||||
logger.debug(
|
||||
`[${this.requestId}] Completed enhanced logging with error for execution ${this.executionId}`
|
||||
)
|
||||
}
|
||||
} catch (enhancedError) {
|
||||
if (this.requestId) {
|
||||
logger.error(
|
||||
`[${this.requestId}] Failed to complete enhanced logging for error:`,
|
||||
enhancedError
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async safeStart(params: SessionStartParams = {}): Promise<boolean> {
|
||||
try {
|
||||
await this.start(params)
|
||||
return true
|
||||
} catch (error) {
|
||||
if (this.requestId) {
|
||||
logger.error(
|
||||
`[${this.requestId}] Enhanced logging start failed, continuing execution:`,
|
||||
error
|
||||
)
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
async safeComplete(params: SessionCompleteParams = {}): Promise<void> {
|
||||
try {
|
||||
await this.complete(params)
|
||||
} catch (error) {
|
||||
if (this.requestId) {
|
||||
logger.error(`[${this.requestId}] Enhanced logging completion failed:`, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async safeCompleteWithError(error?: any): Promise<void> {
|
||||
try {
|
||||
await this.completeWithError(error)
|
||||
} catch (enhancedError) {
|
||||
if (this.requestId) {
|
||||
logger.error(`[${this.requestId}] Enhanced logging error completion failed:`, enhancedError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
219
apps/sim/lib/logs/snapshot-service.test.ts
Normal file
219
apps/sim/lib/logs/snapshot-service.test.ts
Normal file
@@ -0,0 +1,219 @@
|
||||
import { beforeEach, describe, expect, test } from 'vitest'
|
||||
import { SnapshotService } from './snapshot-service'
|
||||
import type { WorkflowState } from './types'
|
||||
|
||||
describe('SnapshotService', () => {
|
||||
let service: SnapshotService
|
||||
|
||||
beforeEach(() => {
|
||||
service = new SnapshotService()
|
||||
})
|
||||
|
||||
describe('computeStateHash', () => {
|
||||
test('should generate consistent hashes for identical states', () => {
|
||||
const state: WorkflowState = {
|
||||
blocks: {
|
||||
block1: {
|
||||
id: 'block1',
|
||||
name: 'Test Agent',
|
||||
type: 'agent',
|
||||
position: { x: 100, y: 200 },
|
||||
|
||||
subBlocks: {},
|
||||
outputs: {},
|
||||
enabled: true,
|
||||
horizontalHandles: true,
|
||||
isWide: false,
|
||||
advancedMode: false,
|
||||
height: 0,
|
||||
},
|
||||
},
|
||||
edges: [{ id: 'edge1', source: 'block1', target: 'block2' }],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
}
|
||||
|
||||
const hash1 = service.computeStateHash(state)
|
||||
const hash2 = service.computeStateHash(state)
|
||||
|
||||
expect(hash1).toBe(hash2)
|
||||
expect(hash1).toHaveLength(64) // SHA-256 hex string
|
||||
})
|
||||
|
||||
test('should ignore position changes', () => {
|
||||
const baseState: WorkflowState = {
|
||||
blocks: {
|
||||
block1: {
|
||||
id: 'block1',
|
||||
name: 'Test Agent',
|
||||
type: 'agent',
|
||||
position: { x: 100, y: 200 },
|
||||
|
||||
subBlocks: {},
|
||||
outputs: {},
|
||||
enabled: true,
|
||||
horizontalHandles: true,
|
||||
isWide: false,
|
||||
advancedMode: false,
|
||||
height: 0,
|
||||
},
|
||||
},
|
||||
edges: [],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
}
|
||||
|
||||
const stateWithDifferentPosition: WorkflowState = {
|
||||
...baseState,
|
||||
blocks: {
|
||||
block1: {
|
||||
...baseState.blocks.block1,
|
||||
position: { x: 500, y: 600 }, // Different position
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
const hash1 = service.computeStateHash(baseState)
|
||||
const hash2 = service.computeStateHash(stateWithDifferentPosition)
|
||||
|
||||
expect(hash1).toBe(hash2)
|
||||
})
|
||||
|
||||
test('should detect meaningful changes', () => {
|
||||
const baseState: WorkflowState = {
|
||||
blocks: {
|
||||
block1: {
|
||||
id: 'block1',
|
||||
name: 'Test Agent',
|
||||
type: 'agent',
|
||||
position: { x: 100, y: 200 },
|
||||
|
||||
subBlocks: {},
|
||||
outputs: {},
|
||||
enabled: true,
|
||||
horizontalHandles: true,
|
||||
isWide: false,
|
||||
advancedMode: false,
|
||||
height: 0,
|
||||
},
|
||||
},
|
||||
edges: [],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
}
|
||||
|
||||
const stateWithDifferentPrompt: WorkflowState = {
|
||||
...baseState,
|
||||
blocks: {
|
||||
block1: {
|
||||
...baseState.blocks.block1,
|
||||
// Different block state - we can change outputs to make it different
|
||||
outputs: { response: { content: 'different result' } as Record<string, any> },
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
const hash1 = service.computeStateHash(baseState)
|
||||
const hash2 = service.computeStateHash(stateWithDifferentPrompt)
|
||||
|
||||
expect(hash1).not.toBe(hash2)
|
||||
})
|
||||
|
||||
test('should handle edge order consistently', () => {
|
||||
const state1: WorkflowState = {
|
||||
blocks: {},
|
||||
edges: [
|
||||
{ id: 'edge1', source: 'a', target: 'b' },
|
||||
{ id: 'edge2', source: 'b', target: 'c' },
|
||||
],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
}
|
||||
|
||||
const state2: WorkflowState = {
|
||||
blocks: {},
|
||||
edges: [
|
||||
{ id: 'edge2', source: 'b', target: 'c' }, // Different order
|
||||
{ id: 'edge1', source: 'a', target: 'b' },
|
||||
],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
}
|
||||
|
||||
const hash1 = service.computeStateHash(state1)
|
||||
const hash2 = service.computeStateHash(state2)
|
||||
|
||||
expect(hash1).toBe(hash2) // Should be same despite different order
|
||||
})
|
||||
|
||||
test('should handle empty states', () => {
|
||||
const emptyState: WorkflowState = {
|
||||
blocks: {},
|
||||
edges: [],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
}
|
||||
|
||||
const hash = service.computeStateHash(emptyState)
|
||||
expect(hash).toHaveLength(64)
|
||||
})
|
||||
|
||||
test('should handle complex nested structures', () => {
|
||||
const complexState: WorkflowState = {
|
||||
blocks: {
|
||||
block1: {
|
||||
id: 'block1',
|
||||
name: 'Complex Agent',
|
||||
type: 'agent',
|
||||
position: { x: 100, y: 200 },
|
||||
|
||||
subBlocks: {
|
||||
prompt: {
|
||||
id: 'prompt',
|
||||
type: 'short-input',
|
||||
value: 'Test prompt',
|
||||
},
|
||||
model: {
|
||||
id: 'model',
|
||||
type: 'short-input',
|
||||
value: 'gpt-4',
|
||||
},
|
||||
},
|
||||
outputs: {
|
||||
response: { content: 'Agent response' } as Record<string, any>,
|
||||
},
|
||||
enabled: true,
|
||||
horizontalHandles: true,
|
||||
isWide: false,
|
||||
advancedMode: true,
|
||||
height: 200,
|
||||
},
|
||||
},
|
||||
edges: [{ id: 'edge1', source: 'block1', target: 'block2', sourceHandle: 'output' }],
|
||||
loops: {
|
||||
loop1: {
|
||||
id: 'loop1',
|
||||
nodes: ['block1'],
|
||||
iterations: 10,
|
||||
loopType: 'for',
|
||||
},
|
||||
},
|
||||
parallels: {
|
||||
parallel1: {
|
||||
id: 'parallel1',
|
||||
nodes: ['block1'],
|
||||
count: 3,
|
||||
parallelType: 'count',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
const hash = service.computeStateHash(complexState)
|
||||
expect(hash).toHaveLength(64)
|
||||
|
||||
// Should be consistent
|
||||
const hash2 = service.computeStateHash(complexState)
|
||||
expect(hash).toBe(hash2)
|
||||
})
|
||||
})
|
||||
})
|
||||
236
apps/sim/lib/logs/snapshot-service.ts
Normal file
236
apps/sim/lib/logs/snapshot-service.ts
Normal file
@@ -0,0 +1,236 @@
|
||||
import { createHash } from 'crypto'
|
||||
import { and, eq, lt } from 'drizzle-orm'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { db } from '@/db'
|
||||
import { workflowExecutionSnapshots } from '@/db/schema'
|
||||
import { createLogger } from './console-logger'
|
||||
import type {
|
||||
SnapshotService as ISnapshotService,
|
||||
SnapshotCreationResult,
|
||||
WorkflowExecutionSnapshot,
|
||||
WorkflowExecutionSnapshotInsert,
|
||||
WorkflowState,
|
||||
} from './types'
|
||||
|
||||
const logger = createLogger('SnapshotService')
|
||||
|
||||
export class SnapshotService implements ISnapshotService {
|
||||
async createSnapshot(
|
||||
workflowId: string,
|
||||
state: WorkflowState
|
||||
): Promise<WorkflowExecutionSnapshot> {
|
||||
const result = await this.createSnapshotWithDeduplication(workflowId, state)
|
||||
return result.snapshot
|
||||
}
|
||||
|
||||
async createSnapshotWithDeduplication(
|
||||
workflowId: string,
|
||||
state: WorkflowState
|
||||
): Promise<SnapshotCreationResult> {
|
||||
// Hash the position-less state for deduplication (functional equivalence)
|
||||
const stateHash = this.computeStateHash(state)
|
||||
|
||||
const existingSnapshot = await this.getSnapshotByHash(workflowId, stateHash)
|
||||
if (existingSnapshot) {
|
||||
logger.debug(`Reusing existing snapshot for workflow ${workflowId} with hash ${stateHash}`)
|
||||
return {
|
||||
snapshot: existingSnapshot,
|
||||
isNew: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Store the FULL state (including positions) so we can recreate the exact workflow
|
||||
// Even though we hash without positions, we want to preserve the complete state
|
||||
const snapshotData: WorkflowExecutionSnapshotInsert = {
|
||||
id: uuidv4(),
|
||||
workflowId,
|
||||
stateHash,
|
||||
stateData: state, // Full state with positions, subblock values, etc.
|
||||
}
|
||||
|
||||
const [newSnapshot] = await db
|
||||
.insert(workflowExecutionSnapshots)
|
||||
.values(snapshotData)
|
||||
.returning()
|
||||
|
||||
logger.debug(`Created new snapshot for workflow ${workflowId} with hash ${stateHash}`)
|
||||
logger.debug(`Stored full state with ${Object.keys(state.blocks || {}).length} blocks`)
|
||||
return {
|
||||
snapshot: {
|
||||
...newSnapshot,
|
||||
stateData: newSnapshot.stateData as WorkflowState,
|
||||
createdAt: newSnapshot.createdAt.toISOString(),
|
||||
},
|
||||
isNew: true,
|
||||
}
|
||||
}
|
||||
|
||||
async getSnapshot(id: string): Promise<WorkflowExecutionSnapshot | null> {
|
||||
const [snapshot] = await db
|
||||
.select()
|
||||
.from(workflowExecutionSnapshots)
|
||||
.where(eq(workflowExecutionSnapshots.id, id))
|
||||
.limit(1)
|
||||
|
||||
if (!snapshot) return null
|
||||
|
||||
return {
|
||||
...snapshot,
|
||||
stateData: snapshot.stateData as WorkflowState,
|
||||
createdAt: snapshot.createdAt.toISOString(),
|
||||
}
|
||||
}
|
||||
|
||||
async getSnapshotByHash(
|
||||
workflowId: string,
|
||||
hash: string
|
||||
): Promise<WorkflowExecutionSnapshot | null> {
|
||||
const [snapshot] = await db
|
||||
.select()
|
||||
.from(workflowExecutionSnapshots)
|
||||
.where(
|
||||
and(
|
||||
eq(workflowExecutionSnapshots.workflowId, workflowId),
|
||||
eq(workflowExecutionSnapshots.stateHash, hash)
|
||||
)
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
if (!snapshot) return null
|
||||
|
||||
return {
|
||||
...snapshot,
|
||||
stateData: snapshot.stateData as WorkflowState,
|
||||
createdAt: snapshot.createdAt.toISOString(),
|
||||
}
|
||||
}
|
||||
|
||||
computeStateHash(state: WorkflowState): string {
|
||||
const normalizedState = this.normalizeStateForHashing(state)
|
||||
const stateString = this.normalizedStringify(normalizedState)
|
||||
return createHash('sha256').update(stateString).digest('hex')
|
||||
}
|
||||
|
||||
async cleanupOrphanedSnapshots(olderThanDays: number): Promise<number> {
|
||||
const cutoffDate = new Date()
|
||||
cutoffDate.setDate(cutoffDate.getDate() - olderThanDays)
|
||||
|
||||
const deletedSnapshots = await db
|
||||
.delete(workflowExecutionSnapshots)
|
||||
.where(lt(workflowExecutionSnapshots.createdAt, cutoffDate))
|
||||
.returning({ id: workflowExecutionSnapshots.id })
|
||||
|
||||
const deletedCount = deletedSnapshots.length
|
||||
logger.info(`Cleaned up ${deletedCount} orphaned snapshots older than ${olderThanDays} days`)
|
||||
return deletedCount
|
||||
}
|
||||
|
||||
private normalizeStateForHashing(state: WorkflowState): any {
|
||||
// Use the same normalization logic as hasWorkflowChanged for consistency
|
||||
|
||||
// 1. Normalize edges (same as hasWorkflowChanged)
|
||||
const normalizedEdges = (state.edges || [])
|
||||
.map((edge) => ({
|
||||
source: edge.source,
|
||||
sourceHandle: edge.sourceHandle,
|
||||
target: edge.target,
|
||||
targetHandle: edge.targetHandle,
|
||||
}))
|
||||
.sort((a, b) =>
|
||||
`${a.source}-${a.sourceHandle}-${a.target}-${a.targetHandle}`.localeCompare(
|
||||
`${b.source}-${b.sourceHandle}-${b.target}-${b.targetHandle}`
|
||||
)
|
||||
)
|
||||
|
||||
// 2. Normalize blocks (same as hasWorkflowChanged)
|
||||
const normalizedBlocks: Record<string, any> = {}
|
||||
|
||||
for (const [blockId, block] of Object.entries(state.blocks || {})) {
|
||||
// Skip position as it doesn't affect functionality
|
||||
const { position, ...blockWithoutPosition } = block
|
||||
|
||||
// Handle subBlocks with detailed comparison (same as hasWorkflowChanged)
|
||||
const subBlocks = blockWithoutPosition.subBlocks || {}
|
||||
const normalizedSubBlocks: Record<string, any> = {}
|
||||
|
||||
for (const [subBlockId, subBlock] of Object.entries(subBlocks)) {
|
||||
// Normalize value with special handling for null/undefined
|
||||
const value = subBlock.value ?? null
|
||||
|
||||
normalizedSubBlocks[subBlockId] = {
|
||||
type: subBlock.type,
|
||||
value: this.normalizeValue(value),
|
||||
// Include other properties except value
|
||||
...Object.fromEntries(
|
||||
Object.entries(subBlock).filter(([key]) => key !== 'value' && key !== 'type')
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
normalizedBlocks[blockId] = {
|
||||
...blockWithoutPosition,
|
||||
subBlocks: normalizedSubBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Normalize loops and parallels
|
||||
const normalizedLoops: Record<string, any> = {}
|
||||
for (const [loopId, loop] of Object.entries(state.loops || {})) {
|
||||
normalizedLoops[loopId] = this.normalizeValue(loop)
|
||||
}
|
||||
|
||||
const normalizedParallels: Record<string, any> = {}
|
||||
for (const [parallelId, parallel] of Object.entries(state.parallels || {})) {
|
||||
normalizedParallels[parallelId] = this.normalizeValue(parallel)
|
||||
}
|
||||
|
||||
return {
|
||||
blocks: normalizedBlocks,
|
||||
edges: normalizedEdges,
|
||||
loops: normalizedLoops,
|
||||
parallels: normalizedParallels,
|
||||
}
|
||||
}
|
||||
|
||||
private normalizeValue(value: any): any {
|
||||
// Handle null/undefined consistently
|
||||
if (value === null || value === undefined) return null
|
||||
|
||||
// Handle arrays
|
||||
if (Array.isArray(value)) {
|
||||
return value.map((item) => this.normalizeValue(item))
|
||||
}
|
||||
|
||||
// Handle objects
|
||||
if (typeof value === 'object') {
|
||||
const normalized: Record<string, any> = {}
|
||||
for (const [key, val] of Object.entries(value)) {
|
||||
normalized[key] = this.normalizeValue(val)
|
||||
}
|
||||
return normalized
|
||||
}
|
||||
|
||||
// Handle primitives
|
||||
return value
|
||||
}
|
||||
|
||||
private normalizedStringify(obj: any): string {
|
||||
if (obj === null || obj === undefined) return 'null'
|
||||
if (typeof obj === 'string') return `"${obj}"`
|
||||
if (typeof obj === 'number' || typeof obj === 'boolean') return String(obj)
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
return `[${obj.map((item) => this.normalizedStringify(item)).join(',')}]`
|
||||
}
|
||||
|
||||
if (typeof obj === 'object') {
|
||||
const keys = Object.keys(obj).sort()
|
||||
const pairs = keys.map((key) => `"${key}":${this.normalizedStringify(obj[key])}`)
|
||||
return `{${pairs.join(',')}}`
|
||||
}
|
||||
|
||||
return String(obj)
|
||||
}
|
||||
}
|
||||
|
||||
export const snapshotService = new SnapshotService()
|
||||
@@ -1,6 +1,9 @@
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import type { TraceSpan } from '@/app/workspace/[workspaceId]/logs/stores/types'
|
||||
import type { ExecutionResult } from '@/executor/types'
|
||||
|
||||
const logger = createLogger('TraceSpans')
|
||||
|
||||
// Helper function to build a tree of trace spans from execution logs
|
||||
export function buildTraceSpans(result: ExecutionResult): {
|
||||
traceSpans: TraceSpan[]
|
||||
@@ -43,6 +46,16 @@ export function buildTraceSpans(result: ExecutionResult): {
|
||||
const duration = log.durationMs || 0
|
||||
|
||||
// Create the span
|
||||
let output = log.output || {}
|
||||
|
||||
// If there's an error, include it in the output
|
||||
if (log.error) {
|
||||
output = {
|
||||
...output,
|
||||
error: log.error,
|
||||
}
|
||||
}
|
||||
|
||||
const span: TraceSpan = {
|
||||
id: spanId,
|
||||
name: log.blockName || log.blockId,
|
||||
@@ -54,146 +67,68 @@ export function buildTraceSpans(result: ExecutionResult): {
|
||||
children: [],
|
||||
// Store the block ID for later use in identifying direct parent-child relationships
|
||||
blockId: log.blockId,
|
||||
// Include block input/output data
|
||||
input: log.input || {},
|
||||
output: output,
|
||||
}
|
||||
|
||||
// Add provider timing data if it exists
|
||||
if (log.output?.providerTiming) {
|
||||
const providerTiming = log.output.providerTiming
|
||||
|
||||
// If we have time segments, use them to create a more detailed timeline
|
||||
if (providerTiming.timeSegments && providerTiming.timeSegments.length > 0) {
|
||||
const segmentStartTime = new Date(log.startedAt).getTime()
|
||||
const children: TraceSpan[] = []
|
||||
// Store provider timing as metadata instead of creating child spans
|
||||
// This keeps the UI cleaner while preserving timing information
|
||||
|
||||
// Process segments in order
|
||||
providerTiming.timeSegments.forEach(
|
||||
(
|
||||
segment: {
|
||||
type: string
|
||||
name: string
|
||||
startTime: number
|
||||
endTime: number
|
||||
duration: number
|
||||
},
|
||||
index: number
|
||||
) => {
|
||||
// Ensure we have valid startTime and endTime
|
||||
let segmentStart: number
|
||||
let segmentEnd: number
|
||||
|
||||
// Handle different time formats - some providers use ISO strings, some use timestamps
|
||||
if (typeof segment.startTime === 'string') {
|
||||
try {
|
||||
segmentStart = new Date(segment.startTime).getTime()
|
||||
} catch (_e) {
|
||||
segmentStart = segmentStartTime + index * 1000 // Fallback offset
|
||||
}
|
||||
} else {
|
||||
segmentStart = segment.startTime
|
||||
}
|
||||
|
||||
if (typeof segment.endTime === 'string') {
|
||||
try {
|
||||
segmentEnd = new Date(segment.endTime).getTime()
|
||||
} catch (_e) {
|
||||
segmentEnd = segmentStart + (segment.duration || 1000) // Fallback duration
|
||||
}
|
||||
} else {
|
||||
segmentEnd = segment.endTime
|
||||
}
|
||||
|
||||
// For streaming responses, make sure our timing is valid
|
||||
if (
|
||||
Number.isNaN(segmentStart) ||
|
||||
Number.isNaN(segmentEnd) ||
|
||||
segmentEnd < segmentStart
|
||||
) {
|
||||
// Use fallback values
|
||||
segmentStart = segmentStartTime + index * 1000
|
||||
segmentEnd = segmentStart + (segment.duration || 1000)
|
||||
}
|
||||
|
||||
const childSpan: TraceSpan = {
|
||||
id: `${spanId}-segment-${index}`,
|
||||
name: segment.name || `${segment.type} operation`,
|
||||
startTime: new Date(segmentStart).toISOString(),
|
||||
endTime: new Date(segmentEnd).toISOString(),
|
||||
duration: segment.duration || segmentEnd - segmentStart,
|
||||
type:
|
||||
segment.type === 'model'
|
||||
? 'model'
|
||||
: segment.type === 'tool'
|
||||
? 'tool'
|
||||
: 'processing',
|
||||
status: 'success',
|
||||
children: [],
|
||||
}
|
||||
|
||||
// Add any additional metadata
|
||||
if (segment.type === 'tool' && typeof segment.name === 'string') {
|
||||
// Add as a custom attribute using type assertion
|
||||
;(childSpan as any).toolName = segment.name
|
||||
}
|
||||
|
||||
children.push(childSpan)
|
||||
}
|
||||
)
|
||||
|
||||
// Only add children if we have valid spans
|
||||
if (children.length > 0) {
|
||||
span.children = children
|
||||
}
|
||||
;(span as any).providerTiming = {
|
||||
duration: providerTiming.duration,
|
||||
startTime: providerTiming.startTime,
|
||||
endTime: providerTiming.endTime,
|
||||
segments: providerTiming.timeSegments || [],
|
||||
}
|
||||
// If no segments but we have provider timing, create a provider span
|
||||
else {
|
||||
// Create a child span for the provider execution
|
||||
const providerSpan: TraceSpan = {
|
||||
id: `${spanId}-provider`,
|
||||
name: log.output.model || 'AI Provider',
|
||||
type: 'provider',
|
||||
duration: providerTiming.duration || 0,
|
||||
startTime: providerTiming.startTime || log.startedAt,
|
||||
endTime: providerTiming.endTime || log.endedAt,
|
||||
status: 'success',
|
||||
tokens: log.output.tokens?.total,
|
||||
}
|
||||
|
||||
// If we have model time, create a child span for just the model processing
|
||||
if (providerTiming.modelTime) {
|
||||
const modelName = log.output.model || ''
|
||||
const modelSpan: TraceSpan = {
|
||||
id: `${spanId}-model`,
|
||||
name: `Model Generation${modelName ? ` (${modelName})` : ''}`,
|
||||
type: 'model',
|
||||
duration: providerTiming.modelTime,
|
||||
startTime: providerTiming.startTime, // Approximate
|
||||
endTime: providerTiming.endTime, // Approximate
|
||||
status: 'success',
|
||||
tokens: log.output.tokens?.completion,
|
||||
}
|
||||
// Add cost information if available
|
||||
if (log.output?.cost) {
|
||||
;(span as any).cost = log.output.cost
|
||||
logger.debug(`Added cost to span ${span.id}`, {
|
||||
blockId: log.blockId,
|
||||
blockType: log.blockType,
|
||||
cost: log.output.cost,
|
||||
})
|
||||
}
|
||||
|
||||
if (!providerSpan.children) providerSpan.children = []
|
||||
providerSpan.children.push(modelSpan)
|
||||
}
|
||||
// Add token information if available
|
||||
if (log.output?.tokens) {
|
||||
;(span as any).tokens = log.output.tokens
|
||||
logger.debug(`Added tokens to span ${span.id}`, {
|
||||
blockId: log.blockId,
|
||||
blockType: log.blockType,
|
||||
tokens: log.output.tokens,
|
||||
})
|
||||
}
|
||||
|
||||
if (!span.children) span.children = []
|
||||
span.children.push(providerSpan)
|
||||
|
||||
// When using provider timing without segments, still add tool calls if they exist
|
||||
if (log.output?.toolCalls?.list) {
|
||||
span.toolCalls = log.output.toolCalls.list.map((tc: any) => ({
|
||||
name: stripCustomToolPrefix(tc.name),
|
||||
duration: tc.duration || 0,
|
||||
startTime: tc.startTime || log.startedAt,
|
||||
endTime: tc.endTime || log.endedAt,
|
||||
status: tc.error ? 'error' : 'success',
|
||||
input: tc.arguments || tc.input,
|
||||
output: tc.result || tc.output,
|
||||
error: tc.error,
|
||||
}))
|
||||
}
|
||||
// Add model information
|
||||
if (log.output?.model) {
|
||||
;(span as any).model = log.output.model
|
||||
logger.debug(`Added model to span ${span.id}`, {
|
||||
blockId: log.blockId,
|
||||
blockType: log.blockType,
|
||||
model: log.output.model,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// When not using provider timing, still add cost and token information
|
||||
if (log.output?.cost) {
|
||||
;(span as any).cost = log.output.cost
|
||||
}
|
||||
|
||||
if (log.output?.tokens) {
|
||||
;(span as any).tokens = log.output.tokens
|
||||
}
|
||||
|
||||
if (log.output?.model) {
|
||||
;(span as any).model = log.output.model
|
||||
}
|
||||
|
||||
// When not using provider timing at all, add tool calls if they exist
|
||||
// Tool calls handling for different formats:
|
||||
// 1. Standard format in response.toolCalls.list
|
||||
@@ -257,96 +192,30 @@ export function buildTraceSpans(result: ExecutionResult): {
|
||||
spanMap.set(spanId, span)
|
||||
})
|
||||
|
||||
// Second pass: Build the hierarchy based on direct relationships
|
||||
// We'll first need to sort logs chronologically for proper order
|
||||
// Second pass: Build a flat hierarchy for sequential workflow execution
|
||||
// For most workflows, blocks execute sequentially and should be shown at the same level
|
||||
// Only nest blocks that are truly hierarchical (like subflows, loops, etc.)
|
||||
|
||||
const sortedLogs = [...result.logs].sort((a, b) => {
|
||||
const aTime = new Date(a.startedAt).getTime()
|
||||
const bTime = new Date(b.startedAt).getTime()
|
||||
return aTime - bTime
|
||||
})
|
||||
|
||||
// Map to track spans by block ID (for parent-child relationship identification)
|
||||
const blockToSpanMap = new Map<string, string>()
|
||||
|
||||
// First, map block IDs to their span IDs
|
||||
sortedLogs.forEach((log) => {
|
||||
if (!log.blockId) return
|
||||
|
||||
const spanId = `${log.blockId}-${new Date(log.startedAt).getTime()}`
|
||||
blockToSpanMap.set(log.blockId, spanId)
|
||||
})
|
||||
|
||||
// Identify root spans and build relationships
|
||||
const rootSpans: TraceSpan[] = []
|
||||
|
||||
// For sequential blocks, we need to determine if they are true parent-child
|
||||
// or just execution dependencies. True parent-child should be nested,
|
||||
// while sequential execution blocks should be at the same level.
|
||||
|
||||
// Identify blocks at the top level (aka "layer 0")
|
||||
const topLevelBlocks = new Set<string>()
|
||||
|
||||
// Create the array of parent values once before the loop
|
||||
const parentValues = Array.from(parentChildMap.values())
|
||||
|
||||
workflowConnections.forEach((conn) => {
|
||||
// If the source is starter or doesn't exist in our connections as a target, it's top level
|
||||
if (conn.source === 'starter' || !parentValues.includes(conn.source)) {
|
||||
topLevelBlocks.add(conn.target)
|
||||
}
|
||||
})
|
||||
|
||||
// For now, treat all blocks as top-level spans in execution order
|
||||
// This gives a cleaner, more intuitive view of workflow execution
|
||||
sortedLogs.forEach((log) => {
|
||||
if (!log.blockId) return
|
||||
|
||||
const spanId = `${log.blockId}-${new Date(log.startedAt).getTime()}`
|
||||
const span = spanMap.get(spanId)
|
||||
if (!span) return
|
||||
|
||||
// Check if this block has a direct parent in the workflow
|
||||
const parentBlockId = parentChildMap.get(log.blockId)
|
||||
|
||||
// Top level blocks are those that:
|
||||
// 1. Have no parent (or parent is starter)
|
||||
// 2. Are identified as top level in our analysis
|
||||
const isTopLevel =
|
||||
!parentBlockId || parentBlockId === 'starter' || topLevelBlocks.has(log.blockId)
|
||||
|
||||
if (isTopLevel) {
|
||||
// This is a top level span
|
||||
if (span) {
|
||||
rootSpans.push(span)
|
||||
} else {
|
||||
// This has a parent
|
||||
// Only nest as a child if the parent block is NOT a top-level block
|
||||
// This ensures sequential blocks at the same "layer" stay at the same level
|
||||
// while true parent-child relationships are preserved
|
||||
if (parentBlockId && !topLevelBlocks.has(parentBlockId)) {
|
||||
const parentSpanId = blockToSpanMap.get(parentBlockId)
|
||||
|
||||
if (parentSpanId) {
|
||||
const parentSpan = spanMap.get(parentSpanId)
|
||||
if (parentSpan) {
|
||||
// Add as child to direct parent
|
||||
if (!parentSpan.children) parentSpan.children = []
|
||||
parentSpan.children.push(span)
|
||||
} else {
|
||||
// Parent span not found, add as root
|
||||
rootSpans.push(span)
|
||||
}
|
||||
} else {
|
||||
// Parent block executed but no span, add as root
|
||||
rootSpans.push(span)
|
||||
}
|
||||
} else {
|
||||
// Parent is a top level block, so this should also be a top level span
|
||||
// This prevents sequential top-level blocks from being nested
|
||||
rootSpans.push(span)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Fall back to time-based hierarchy only if we couldn't establish relationships
|
||||
// This happens when we don't have workflow connection information
|
||||
if (rootSpans.length === 0 && workflowConnections.length === 0) {
|
||||
// Track parent spans using a stack
|
||||
const spanStack: TraceSpan[] = []
|
||||
|
||||
@@ -95,7 +95,6 @@ export interface WorkflowExecutionLog {
|
||||
totalInputCost: number
|
||||
totalOutputCost: number
|
||||
totalTokens: number
|
||||
primaryModel: string
|
||||
metadata: {
|
||||
environment: ExecutionEnvironment
|
||||
trigger: ExecutionTrigger
|
||||
@@ -157,6 +156,7 @@ export interface TraceSpan {
|
||||
relativeStartMs?: number
|
||||
blockId?: string
|
||||
input?: Record<string, unknown>
|
||||
output?: Record<string, unknown>
|
||||
}
|
||||
|
||||
export interface WorkflowExecutionSummary {
|
||||
@@ -180,7 +180,6 @@ export interface WorkflowExecutionSummary {
|
||||
inputCost: number
|
||||
outputCost: number
|
||||
tokens: number
|
||||
primaryModel: string
|
||||
}
|
||||
stateSnapshotId: string
|
||||
errorSummary?: {
|
||||
@@ -372,7 +371,6 @@ export interface ExecutionLoggerService {
|
||||
totalInputCost: number
|
||||
totalOutputCost: number
|
||||
totalTokens: number
|
||||
primaryModel: string
|
||||
}
|
||||
finalOutput: BlockOutputData
|
||||
traceSpans?: TraceSpan[]
|
||||
|
||||
@@ -4,6 +4,7 @@ import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { db } from '@/db'
|
||||
import { member, subscription, userStats } from '@/db/schema'
|
||||
import { client } from '../auth-client'
|
||||
import { env } from '../env'
|
||||
import { calculateUsageLimit, checkEnterprisePlan, checkProPlan, checkTeamPlan } from './utils'
|
||||
|
||||
const logger = createLogger('Subscription')
|
||||
@@ -172,9 +173,7 @@ export async function hasExceededCostLimit(userId: string): Promise<boolean> {
|
||||
limit,
|
||||
})
|
||||
} else {
|
||||
limit = process.env.FREE_TIER_COST_LIMIT
|
||||
? Number.parseFloat(process.env.FREE_TIER_COST_LIMIT)
|
||||
: 5
|
||||
limit = env.FREE_TIER_COST_LIMIT || 5
|
||||
logger.info('Using free tier limit', { userId, limit })
|
||||
}
|
||||
|
||||
|
||||
@@ -1,18 +1,14 @@
|
||||
import { afterAll, beforeAll, describe, expect, it } from 'vitest'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import { calculateUsageLimit, checkEnterprisePlan } from './utils'
|
||||
|
||||
const ORIGINAL_ENV = { ...process.env }
|
||||
|
||||
beforeAll(() => {
|
||||
process.env.FREE_TIER_COST_LIMIT = '5'
|
||||
process.env.PRO_TIER_COST_LIMIT = '20'
|
||||
process.env.TEAM_TIER_COST_LIMIT = '40'
|
||||
process.env.ENTERPRISE_TIER_COST_LIMIT = '200'
|
||||
})
|
||||
|
||||
afterAll(() => {
|
||||
process.env = ORIGINAL_ENV
|
||||
})
|
||||
vi.mock('../env', () => ({
|
||||
env: {
|
||||
FREE_TIER_COST_LIMIT: 5,
|
||||
PRO_TIER_COST_LIMIT: 20,
|
||||
TEAM_TIER_COST_LIMIT: 40,
|
||||
ENTERPRISE_TIER_COST_LIMIT: 200,
|
||||
},
|
||||
}))
|
||||
|
||||
describe('Subscription Utilities', () => {
|
||||
describe('checkEnterprisePlan', () => {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import { env } from '../env'
|
||||
|
||||
export function checkEnterprisePlan(subscription: any): boolean {
|
||||
return subscription?.plan === 'enterprise' && subscription?.status === 'active'
|
||||
}
|
||||
@@ -17,16 +19,16 @@ export function checkTeamPlan(subscription: any): boolean {
|
||||
*/
|
||||
export function calculateUsageLimit(subscription: any): number {
|
||||
if (!subscription || subscription.status !== 'active') {
|
||||
return Number.parseFloat(process.env.FREE_TIER_COST_LIMIT!)
|
||||
return env.FREE_TIER_COST_LIMIT || 0
|
||||
}
|
||||
|
||||
const seats = subscription.seats || 1
|
||||
|
||||
if (subscription.plan === 'pro') {
|
||||
return Number.parseFloat(process.env.PRO_TIER_COST_LIMIT!)
|
||||
return env.PRO_TIER_COST_LIMIT || 0
|
||||
}
|
||||
if (subscription.plan === 'team') {
|
||||
return seats * Number.parseFloat(process.env.TEAM_TIER_COST_LIMIT!)
|
||||
return seats * (env.TEAM_TIER_COST_LIMIT || 0)
|
||||
}
|
||||
if (subscription.plan === 'enterprise') {
|
||||
const metadata = subscription.metadata || {}
|
||||
@@ -39,8 +41,8 @@ export function calculateUsageLimit(subscription: any): number {
|
||||
return Number.parseFloat(metadata.totalAllowance)
|
||||
}
|
||||
|
||||
return seats * Number.parseFloat(process.env.ENTERPRISE_TIER_COST_LIMIT!)
|
||||
return seats * (env.ENTERPRISE_TIER_COST_LIMIT || 0)
|
||||
}
|
||||
|
||||
return Number.parseFloat(process.env.FREE_TIER_COST_LIMIT!)
|
||||
return env.FREE_TIER_COST_LIMIT || 0
|
||||
}
|
||||
|
||||
145
apps/sim/lib/tokenization/calculators.ts
Normal file
145
apps/sim/lib/tokenization/calculators.ts
Normal file
@@ -0,0 +1,145 @@
|
||||
/**
|
||||
* Cost calculation functions for tokenization
|
||||
*/
|
||||
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { calculateCost } from '@/providers/utils'
|
||||
import { createTokenizationError } from './errors'
|
||||
import { estimateInputTokens, estimateOutputTokens, estimateTokenCount } from './estimators'
|
||||
import type { CostBreakdown, StreamingCostResult, TokenizationInput, TokenUsage } from './types'
|
||||
import {
|
||||
getProviderForTokenization,
|
||||
logTokenizationDetails,
|
||||
validateTokenizationInput,
|
||||
} from './utils'
|
||||
|
||||
const logger = createLogger('TokenizationCalculators')
|
||||
|
||||
/**
|
||||
* Calculates cost estimate for streaming execution using token estimation
|
||||
*/
|
||||
export function calculateStreamingCost(
|
||||
model: string,
|
||||
inputText: string,
|
||||
outputText: string,
|
||||
systemPrompt?: string,
|
||||
context?: string,
|
||||
messages?: Array<{ role: string; content: string }>
|
||||
): StreamingCostResult {
|
||||
try {
|
||||
// Validate inputs
|
||||
validateTokenizationInput(model, inputText, outputText)
|
||||
|
||||
const providerId = getProviderForTokenization(model)
|
||||
|
||||
logger.debug('Starting streaming cost calculation', {
|
||||
model,
|
||||
providerId,
|
||||
inputLength: inputText.length,
|
||||
outputLength: outputText.length,
|
||||
hasSystemPrompt: !!systemPrompt,
|
||||
hasContext: !!context,
|
||||
hasMessages: !!messages?.length,
|
||||
})
|
||||
|
||||
// Estimate input tokens (combine all input sources)
|
||||
const inputEstimate = estimateInputTokens(systemPrompt, context, messages, providerId)
|
||||
|
||||
// Add the main input text to the estimation
|
||||
const mainInputEstimate = estimateTokenCount(inputText, providerId)
|
||||
const totalPromptTokens = inputEstimate.count + mainInputEstimate.count
|
||||
|
||||
// Estimate output tokens
|
||||
const outputEstimate = estimateOutputTokens(outputText, providerId)
|
||||
const completionTokens = outputEstimate.count
|
||||
|
||||
// Calculate total tokens
|
||||
const totalTokens = totalPromptTokens + completionTokens
|
||||
|
||||
// Create token usage object
|
||||
const tokens: TokenUsage = {
|
||||
prompt: totalPromptTokens,
|
||||
completion: completionTokens,
|
||||
total: totalTokens,
|
||||
}
|
||||
|
||||
// Calculate cost using provider pricing
|
||||
const costResult = calculateCost(model, totalPromptTokens, completionTokens, false)
|
||||
|
||||
const cost: CostBreakdown = {
|
||||
input: costResult.input,
|
||||
output: costResult.output,
|
||||
total: costResult.total,
|
||||
}
|
||||
|
||||
const result: StreamingCostResult = {
|
||||
tokens,
|
||||
cost,
|
||||
model,
|
||||
provider: providerId,
|
||||
method: 'tokenization',
|
||||
}
|
||||
|
||||
logTokenizationDetails('Streaming cost calculation completed', {
|
||||
model,
|
||||
provider: providerId,
|
||||
inputLength: inputText.length,
|
||||
outputLength: outputText.length,
|
||||
tokens,
|
||||
cost,
|
||||
method: 'tokenization',
|
||||
})
|
||||
|
||||
return result
|
||||
} catch (error) {
|
||||
logger.error('Streaming cost calculation failed', {
|
||||
model,
|
||||
inputLength: inputText?.length || 0,
|
||||
outputLength: outputText?.length || 0,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
|
||||
if (error instanceof Error && error.name === 'TokenizationError') {
|
||||
throw error
|
||||
}
|
||||
|
||||
throw createTokenizationError(
|
||||
'CALCULATION_FAILED',
|
||||
`Failed to calculate streaming cost: ${error instanceof Error ? error.message : String(error)}`,
|
||||
{ model, inputLength: inputText?.length || 0, outputLength: outputText?.length || 0 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates cost for tokenization input object
|
||||
*/
|
||||
export function calculateTokenizationCost(input: TokenizationInput): StreamingCostResult {
|
||||
return calculateStreamingCost(
|
||||
input.model,
|
||||
input.inputText,
|
||||
input.outputText,
|
||||
input.systemPrompt,
|
||||
input.context,
|
||||
input.messages
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a streaming cost result from existing provider response data
|
||||
*/
|
||||
export function createCostResultFromProviderData(
|
||||
model: string,
|
||||
providerTokens: TokenUsage,
|
||||
providerCost: CostBreakdown
|
||||
): StreamingCostResult {
|
||||
const providerId = getProviderForTokenization(model)
|
||||
|
||||
return {
|
||||
tokens: providerTokens,
|
||||
cost: providerCost,
|
||||
model,
|
||||
provider: providerId,
|
||||
method: 'provider_response',
|
||||
}
|
||||
}
|
||||
71
apps/sim/lib/tokenization/constants.ts
Normal file
71
apps/sim/lib/tokenization/constants.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
/**
|
||||
* Configuration constants for tokenization functionality
|
||||
*/
|
||||
|
||||
import type { ProviderTokenizationConfig } from './types'
|
||||
|
||||
export const TOKENIZATION_CONFIG = {
|
||||
providers: {
|
||||
openai: {
|
||||
avgCharsPerToken: 4,
|
||||
confidence: 'high',
|
||||
supportedMethods: ['heuristic', 'fallback'],
|
||||
},
|
||||
'azure-openai': {
|
||||
avgCharsPerToken: 4,
|
||||
confidence: 'high',
|
||||
supportedMethods: ['heuristic', 'fallback'],
|
||||
},
|
||||
anthropic: {
|
||||
avgCharsPerToken: 4.5,
|
||||
confidence: 'high',
|
||||
supportedMethods: ['heuristic', 'fallback'],
|
||||
},
|
||||
google: {
|
||||
avgCharsPerToken: 5,
|
||||
confidence: 'medium',
|
||||
supportedMethods: ['heuristic', 'fallback'],
|
||||
},
|
||||
deepseek: {
|
||||
avgCharsPerToken: 4,
|
||||
confidence: 'medium',
|
||||
supportedMethods: ['heuristic', 'fallback'],
|
||||
},
|
||||
xai: {
|
||||
avgCharsPerToken: 4,
|
||||
confidence: 'medium',
|
||||
supportedMethods: ['heuristic', 'fallback'],
|
||||
},
|
||||
cerebras: {
|
||||
avgCharsPerToken: 4,
|
||||
confidence: 'medium',
|
||||
supportedMethods: ['heuristic', 'fallback'],
|
||||
},
|
||||
groq: {
|
||||
avgCharsPerToken: 4,
|
||||
confidence: 'medium',
|
||||
supportedMethods: ['heuristic', 'fallback'],
|
||||
},
|
||||
ollama: {
|
||||
avgCharsPerToken: 4,
|
||||
confidence: 'low',
|
||||
supportedMethods: ['fallback'],
|
||||
},
|
||||
} satisfies Record<string, ProviderTokenizationConfig>,
|
||||
|
||||
fallback: {
|
||||
avgCharsPerToken: 4,
|
||||
confidence: 'low',
|
||||
supportedMethods: ['fallback'],
|
||||
} satisfies ProviderTokenizationConfig,
|
||||
|
||||
defaults: {
|
||||
model: 'gpt-4o',
|
||||
provider: 'openai',
|
||||
},
|
||||
} as const
|
||||
|
||||
export const LLM_BLOCK_TYPES = ['agent', 'router', 'evaluator'] as const
|
||||
|
||||
export const MIN_TEXT_LENGTH_FOR_ESTIMATION = 1
|
||||
export const MAX_PREVIEW_LENGTH = 100
|
||||
23
apps/sim/lib/tokenization/errors.ts
Normal file
23
apps/sim/lib/tokenization/errors.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
/**
|
||||
* Custom error classes for tokenization functionality
|
||||
*/
|
||||
|
||||
export class TokenizationError extends Error {
|
||||
public readonly code: 'INVALID_PROVIDER' | 'MISSING_TEXT' | 'CALCULATION_FAILED' | 'INVALID_MODEL'
|
||||
public readonly details?: Record<string, unknown>
|
||||
|
||||
constructor(message: string, code: TokenizationError['code'], details?: Record<string, unknown>) {
|
||||
super(message)
|
||||
this.name = 'TokenizationError'
|
||||
this.code = code
|
||||
this.details = details
|
||||
}
|
||||
}
|
||||
|
||||
export function createTokenizationError(
|
||||
code: TokenizationError['code'],
|
||||
message: string,
|
||||
details?: Record<string, unknown>
|
||||
): TokenizationError {
|
||||
return new TokenizationError(message, code, details)
|
||||
}
|
||||
191
apps/sim/lib/tokenization/estimators.ts
Normal file
191
apps/sim/lib/tokenization/estimators.ts
Normal file
@@ -0,0 +1,191 @@
|
||||
/**
|
||||
* Token estimation functions for different providers
|
||||
*/
|
||||
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { MIN_TEXT_LENGTH_FOR_ESTIMATION, TOKENIZATION_CONFIG } from './constants'
|
||||
import type { TokenEstimate } from './types'
|
||||
import { createTextPreview, getProviderConfig } from './utils'
|
||||
|
||||
const logger = createLogger('TokenizationEstimators')
|
||||
|
||||
/**
|
||||
* Estimates token count for text using provider-specific heuristics
|
||||
*/
|
||||
export function estimateTokenCount(text: string, providerId?: string): TokenEstimate {
|
||||
if (!text || text.length < MIN_TEXT_LENGTH_FOR_ESTIMATION) {
|
||||
return {
|
||||
count: 0,
|
||||
confidence: 'high',
|
||||
provider: providerId || 'unknown',
|
||||
method: 'fallback',
|
||||
}
|
||||
}
|
||||
|
||||
const effectiveProviderId = providerId || TOKENIZATION_CONFIG.defaults.provider
|
||||
const config = getProviderConfig(effectiveProviderId)
|
||||
|
||||
logger.debug('Starting token estimation', {
|
||||
provider: effectiveProviderId,
|
||||
textLength: text.length,
|
||||
preview: createTextPreview(text),
|
||||
avgCharsPerToken: config.avgCharsPerToken,
|
||||
})
|
||||
|
||||
let estimatedTokens: number
|
||||
|
||||
switch (effectiveProviderId) {
|
||||
case 'openai':
|
||||
case 'azure-openai':
|
||||
estimatedTokens = estimateOpenAITokens(text)
|
||||
break
|
||||
case 'anthropic':
|
||||
estimatedTokens = estimateAnthropicTokens(text)
|
||||
break
|
||||
case 'google':
|
||||
estimatedTokens = estimateGoogleTokens(text)
|
||||
break
|
||||
default:
|
||||
estimatedTokens = estimateGenericTokens(text, config.avgCharsPerToken)
|
||||
}
|
||||
|
||||
const result: TokenEstimate = {
|
||||
count: Math.max(1, Math.round(estimatedTokens)),
|
||||
confidence: config.confidence,
|
||||
provider: effectiveProviderId,
|
||||
method: 'heuristic',
|
||||
}
|
||||
|
||||
logger.debug('Token estimation completed', {
|
||||
provider: effectiveProviderId,
|
||||
textLength: text.length,
|
||||
estimatedTokens: result.count,
|
||||
confidence: result.confidence,
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
/**
|
||||
* OpenAI-specific token estimation using BPE characteristics
|
||||
*/
|
||||
function estimateOpenAITokens(text: string): number {
|
||||
const words = text.trim().split(/\s+/)
|
||||
let tokenCount = 0
|
||||
|
||||
for (const word of words) {
|
||||
if (word.length === 0) continue
|
||||
|
||||
// GPT tokenizer characteristics based on BPE
|
||||
if (word.length <= 4) {
|
||||
tokenCount += 1
|
||||
} else if (word.length <= 8) {
|
||||
tokenCount += Math.ceil(word.length / 4.5)
|
||||
} else {
|
||||
tokenCount += Math.ceil(word.length / 4)
|
||||
}
|
||||
|
||||
// Add extra tokens for punctuation
|
||||
const punctuationCount = (word.match(/[.,!?;:"'()[\]{}<>]/g) || []).length
|
||||
tokenCount += punctuationCount * 0.5
|
||||
}
|
||||
|
||||
// Add tokens for newlines and formatting
|
||||
const newlineCount = (text.match(/\n/g) || []).length
|
||||
tokenCount += newlineCount * 0.5
|
||||
|
||||
return tokenCount
|
||||
}
|
||||
|
||||
/**
|
||||
* Anthropic Claude-specific token estimation
|
||||
*/
|
||||
function estimateAnthropicTokens(text: string): number {
|
||||
const words = text.trim().split(/\s+/)
|
||||
let tokenCount = 0
|
||||
|
||||
for (const word of words) {
|
||||
if (word.length === 0) continue
|
||||
|
||||
// Claude tokenizer tends to be slightly more efficient
|
||||
if (word.length <= 4) {
|
||||
tokenCount += 1
|
||||
} else if (word.length <= 8) {
|
||||
tokenCount += Math.ceil(word.length / 5)
|
||||
} else {
|
||||
tokenCount += Math.ceil(word.length / 4.5)
|
||||
}
|
||||
}
|
||||
|
||||
// Claude handles formatting slightly better
|
||||
const newlineCount = (text.match(/\n/g) || []).length
|
||||
tokenCount += newlineCount * 0.3
|
||||
|
||||
return tokenCount
|
||||
}
|
||||
|
||||
/**
|
||||
* Google Gemini-specific token estimation
|
||||
*/
|
||||
function estimateGoogleTokens(text: string): number {
|
||||
const words = text.trim().split(/\s+/)
|
||||
let tokenCount = 0
|
||||
|
||||
for (const word of words) {
|
||||
if (word.length === 0) continue
|
||||
|
||||
// Gemini tokenizer characteristics
|
||||
if (word.length <= 5) {
|
||||
tokenCount += 1
|
||||
} else if (word.length <= 10) {
|
||||
tokenCount += Math.ceil(word.length / 6)
|
||||
} else {
|
||||
tokenCount += Math.ceil(word.length / 5)
|
||||
}
|
||||
}
|
||||
|
||||
return tokenCount
|
||||
}
|
||||
|
||||
/**
|
||||
* Generic token estimation fallback
|
||||
*/
|
||||
function estimateGenericTokens(text: string, avgCharsPerToken: number): number {
|
||||
const charCount = text.trim().length
|
||||
return Math.ceil(charCount / avgCharsPerToken)
|
||||
}
|
||||
|
||||
/**
|
||||
* Estimates tokens for input content including context
|
||||
*/
|
||||
export function estimateInputTokens(
|
||||
systemPrompt?: string,
|
||||
context?: string,
|
||||
messages?: Array<{ role: string; content: string }>,
|
||||
providerId?: string
|
||||
): TokenEstimate {
|
||||
let totalText = ''
|
||||
|
||||
if (systemPrompt) {
|
||||
totalText += `${systemPrompt}\n`
|
||||
}
|
||||
|
||||
if (context) {
|
||||
totalText += `${context}\n`
|
||||
}
|
||||
|
||||
if (messages) {
|
||||
for (const message of messages) {
|
||||
totalText += `${message.role}: ${message.content}\n`
|
||||
}
|
||||
}
|
||||
|
||||
return estimateTokenCount(totalText, providerId)
|
||||
}
|
||||
|
||||
/**
|
||||
* Estimates tokens for output content
|
||||
*/
|
||||
export function estimateOutputTokens(content: string, providerId?: string): TokenEstimate {
|
||||
return estimateTokenCount(content, providerId)
|
||||
}
|
||||
43
apps/sim/lib/tokenization/index.ts
Normal file
43
apps/sim/lib/tokenization/index.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
/**
|
||||
* Main tokenization module exports
|
||||
*
|
||||
* This module provides token estimation and cost calculation functionality
|
||||
* for streaming LLM executions where actual token counts are not available.
|
||||
*/
|
||||
|
||||
// Core calculation functions
|
||||
export {
|
||||
calculateStreamingCost,
|
||||
calculateTokenizationCost,
|
||||
createCostResultFromProviderData,
|
||||
} from './calculators'
|
||||
// Constants
|
||||
export { LLM_BLOCK_TYPES, TOKENIZATION_CONFIG } from './constants'
|
||||
// Error handling
|
||||
export { createTokenizationError, TokenizationError } from './errors'
|
||||
// Token estimation functions
|
||||
export { estimateInputTokens, estimateOutputTokens, estimateTokenCount } from './estimators'
|
||||
// Streaming-specific helpers
|
||||
export { processStreamingBlockLog, processStreamingBlockLogs } from './streaming'
|
||||
// Types
|
||||
export type {
|
||||
CostBreakdown,
|
||||
ProviderTokenizationConfig,
|
||||
StreamingCostResult,
|
||||
TokenEstimate,
|
||||
TokenizationInput,
|
||||
TokenUsage,
|
||||
} from './types'
|
||||
// Utility functions
|
||||
export {
|
||||
createTextPreview,
|
||||
extractTextContent,
|
||||
formatTokenCount,
|
||||
getProviderConfig,
|
||||
getProviderForTokenization,
|
||||
hasRealCostData,
|
||||
hasRealTokenData,
|
||||
isTokenizableBlockType,
|
||||
logTokenizationDetails,
|
||||
validateTokenizationInput,
|
||||
} from './utils'
|
||||
158
apps/sim/lib/tokenization/streaming.ts
Normal file
158
apps/sim/lib/tokenization/streaming.ts
Normal file
@@ -0,0 +1,158 @@
|
||||
/**
|
||||
* Streaming-specific tokenization helpers
|
||||
*/
|
||||
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import type { BlockLog } from '@/executor/types'
|
||||
import { calculateStreamingCost } from './calculators'
|
||||
import { TOKENIZATION_CONFIG } from './constants'
|
||||
import {
|
||||
extractTextContent,
|
||||
hasRealCostData,
|
||||
hasRealTokenData,
|
||||
isTokenizableBlockType,
|
||||
logTokenizationDetails,
|
||||
} from './utils'
|
||||
|
||||
const logger = createLogger('StreamingTokenization')
|
||||
|
||||
/**
|
||||
* Processes a block log and adds tokenization data if needed
|
||||
*/
|
||||
export function processStreamingBlockLog(log: BlockLog, streamedContent: string): boolean {
|
||||
// Check if this block should be tokenized
|
||||
if (!isTokenizableBlockType(log.blockType)) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if we already have meaningful token/cost data
|
||||
if (hasRealTokenData(log.output?.tokens) && hasRealCostData(log.output?.cost)) {
|
||||
logger.debug(`Block ${log.blockId} already has real token/cost data`, {
|
||||
blockType: log.blockType,
|
||||
tokens: log.output?.tokens,
|
||||
cost: log.output?.cost,
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if we have content to tokenize
|
||||
if (!streamedContent?.trim()) {
|
||||
logger.debug(`Block ${log.blockId} has no content to tokenize`, {
|
||||
blockType: log.blockType,
|
||||
contentLength: streamedContent?.length || 0,
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
try {
|
||||
// Determine model to use
|
||||
const model = getModelForBlock(log)
|
||||
|
||||
// Prepare input text from log
|
||||
const inputText = extractTextContent(log.input)
|
||||
|
||||
logger.debug(`Starting tokenization for streaming block ${log.blockId}`, {
|
||||
blockType: log.blockType,
|
||||
model,
|
||||
inputLength: inputText.length,
|
||||
outputLength: streamedContent.length,
|
||||
hasInput: !!log.input,
|
||||
})
|
||||
|
||||
// Calculate streaming cost
|
||||
const result = calculateStreamingCost(
|
||||
model,
|
||||
inputText,
|
||||
streamedContent,
|
||||
log.input?.systemPrompt,
|
||||
log.input?.context,
|
||||
log.input?.messages
|
||||
)
|
||||
|
||||
// Update the log output with tokenization data
|
||||
if (!log.output) {
|
||||
log.output = {}
|
||||
}
|
||||
|
||||
log.output.tokens = result.tokens
|
||||
log.output.cost = result.cost
|
||||
log.output.model = result.model
|
||||
|
||||
logTokenizationDetails(`✅ Streaming tokenization completed for ${log.blockType}`, {
|
||||
blockId: log.blockId,
|
||||
blockType: log.blockType,
|
||||
model: result.model,
|
||||
provider: result.provider,
|
||||
inputLength: inputText.length,
|
||||
outputLength: streamedContent.length,
|
||||
tokens: result.tokens,
|
||||
cost: result.cost,
|
||||
method: result.method,
|
||||
})
|
||||
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error(`❌ Streaming tokenization failed for block ${log.blockId}`, {
|
||||
blockType: log.blockType,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
contentLength: streamedContent?.length || 0,
|
||||
})
|
||||
|
||||
// Don't throw - graceful degradation
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines the appropriate model for a block
|
||||
*/
|
||||
function getModelForBlock(log: BlockLog): string {
|
||||
// Try to get model from output first
|
||||
if (log.output?.model?.trim()) {
|
||||
return log.output.model
|
||||
}
|
||||
|
||||
// Try to get model from input
|
||||
if (log.input?.model?.trim()) {
|
||||
return log.input.model
|
||||
}
|
||||
|
||||
// Use block type specific defaults
|
||||
const blockType = log.blockType
|
||||
if (blockType === 'agent' || blockType === 'router' || blockType === 'evaluator') {
|
||||
return TOKENIZATION_CONFIG.defaults.model
|
||||
}
|
||||
|
||||
// Final fallback
|
||||
return TOKENIZATION_CONFIG.defaults.model
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes multiple block logs for streaming tokenization
|
||||
*/
|
||||
export function processStreamingBlockLogs(
|
||||
logs: BlockLog[],
|
||||
streamedContentMap: Map<string, string>
|
||||
): number {
|
||||
let processedCount = 0
|
||||
|
||||
logger.debug('Processing streaming block logs for tokenization', {
|
||||
totalLogs: logs.length,
|
||||
streamedBlocks: streamedContentMap.size,
|
||||
})
|
||||
|
||||
for (const log of logs) {
|
||||
const content = streamedContentMap.get(log.blockId)
|
||||
if (content && processStreamingBlockLog(log, content)) {
|
||||
processedCount++
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`Streaming tokenization summary`, {
|
||||
totalLogs: logs.length,
|
||||
processedBlocks: processedCount,
|
||||
streamedBlocks: streamedContentMap.size,
|
||||
})
|
||||
|
||||
return processedCount
|
||||
}
|
||||
69
apps/sim/lib/tokenization/types.ts
Normal file
69
apps/sim/lib/tokenization/types.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
/**
|
||||
* Type definitions for tokenization functionality
|
||||
*/
|
||||
|
||||
export interface TokenEstimate {
|
||||
/** Estimated number of tokens */
|
||||
count: number
|
||||
/** Confidence level of the estimation */
|
||||
confidence: 'high' | 'medium' | 'low'
|
||||
/** Provider used for estimation */
|
||||
provider: string
|
||||
/** Method used for estimation */
|
||||
method: 'precise' | 'heuristic' | 'fallback'
|
||||
}
|
||||
|
||||
export interface TokenUsage {
|
||||
/** Number of prompt/input tokens */
|
||||
prompt: number
|
||||
/** Number of completion/output tokens */
|
||||
completion: number
|
||||
/** Total number of tokens */
|
||||
total: number
|
||||
}
|
||||
|
||||
export interface CostBreakdown {
|
||||
/** Input cost in USD */
|
||||
input: number
|
||||
/** Output cost in USD */
|
||||
output: number
|
||||
/** Total cost in USD */
|
||||
total: number
|
||||
}
|
||||
|
||||
export interface StreamingCostResult {
|
||||
/** Token usage breakdown */
|
||||
tokens: TokenUsage
|
||||
/** Cost breakdown */
|
||||
cost: CostBreakdown
|
||||
/** Model used for calculation */
|
||||
model: string
|
||||
/** Provider ID */
|
||||
provider: string
|
||||
/** Estimation method used */
|
||||
method: 'tokenization' | 'provider_response'
|
||||
}
|
||||
|
||||
export interface TokenizationInput {
|
||||
/** Primary input text */
|
||||
inputText: string
|
||||
/** Generated output text */
|
||||
outputText: string
|
||||
/** Model identifier */
|
||||
model: string
|
||||
/** Optional system prompt */
|
||||
systemPrompt?: string
|
||||
/** Optional context */
|
||||
context?: string
|
||||
/** Optional message history */
|
||||
messages?: Array<{ role: string; content: string }>
|
||||
}
|
||||
|
||||
export interface ProviderTokenizationConfig {
|
||||
/** Average characters per token for this provider */
|
||||
avgCharsPerToken: number
|
||||
/** Confidence level for this provider's estimation */
|
||||
confidence: TokenEstimate['confidence']
|
||||
/** Supported token estimation methods */
|
||||
supportedMethods: TokenEstimate['method'][]
|
||||
}
|
||||
163
apps/sim/lib/tokenization/utils.ts
Normal file
163
apps/sim/lib/tokenization/utils.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
/**
|
||||
* Utility functions for tokenization
|
||||
*/
|
||||
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getProviderFromModel } from '@/providers/utils'
|
||||
import { LLM_BLOCK_TYPES, MAX_PREVIEW_LENGTH, TOKENIZATION_CONFIG } from './constants'
|
||||
import { createTokenizationError } from './errors'
|
||||
import type { ProviderTokenizationConfig, TokenUsage } from './types'
|
||||
|
||||
const logger = createLogger('TokenizationUtils')
|
||||
|
||||
/**
|
||||
* Gets tokenization configuration for a specific provider
|
||||
*/
|
||||
export function getProviderConfig(providerId: string): ProviderTokenizationConfig {
|
||||
const config =
|
||||
TOKENIZATION_CONFIG.providers[providerId as keyof typeof TOKENIZATION_CONFIG.providers]
|
||||
|
||||
if (!config) {
|
||||
logger.debug(`No specific config for provider ${providerId}, using fallback`, { providerId })
|
||||
return TOKENIZATION_CONFIG.fallback
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts provider ID from model name
|
||||
*/
|
||||
export function getProviderForTokenization(model: string): string {
|
||||
try {
|
||||
return getProviderFromModel(model)
|
||||
} catch (error) {
|
||||
logger.warn(`Failed to get provider for model ${model}, using default`, {
|
||||
model,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return TOKENIZATION_CONFIG.defaults.provider
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a block type should be tokenized
|
||||
*/
|
||||
export function isTokenizableBlockType(blockType?: string): boolean {
|
||||
if (!blockType) return false
|
||||
return LLM_BLOCK_TYPES.includes(blockType as any)
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if tokens/cost data is meaningful (non-zero)
|
||||
*/
|
||||
export function hasRealTokenData(tokens?: TokenUsage): boolean {
|
||||
if (!tokens) return false
|
||||
return tokens.total > 0 || tokens.prompt > 0 || tokens.completion > 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if cost data is meaningful (non-zero)
|
||||
*/
|
||||
export function hasRealCostData(cost?: {
|
||||
total?: number
|
||||
input?: number
|
||||
output?: number
|
||||
}): boolean {
|
||||
if (!cost) return false
|
||||
return (cost.total || 0) > 0 || (cost.input || 0) > 0 || (cost.output || 0) > 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Safely extracts text content from various input formats
|
||||
*/
|
||||
export function extractTextContent(input: unknown): string {
|
||||
if (typeof input === 'string') {
|
||||
return input.trim()
|
||||
}
|
||||
|
||||
if (input && typeof input === 'object') {
|
||||
try {
|
||||
return JSON.stringify(input)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to stringify input object', {
|
||||
inputType: typeof input,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return ''
|
||||
}
|
||||
}
|
||||
|
||||
return String(input || '')
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a preview of text for logging (truncated)
|
||||
*/
|
||||
export function createTextPreview(text: string): string {
|
||||
if (text.length <= MAX_PREVIEW_LENGTH) {
|
||||
return text
|
||||
}
|
||||
return `${text.substring(0, MAX_PREVIEW_LENGTH)}...`
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates tokenization input
|
||||
*/
|
||||
export function validateTokenizationInput(
|
||||
model: string,
|
||||
inputText: string,
|
||||
outputText: string
|
||||
): void {
|
||||
if (!model?.trim()) {
|
||||
throw createTokenizationError('INVALID_MODEL', 'Model is required for tokenization', { model })
|
||||
}
|
||||
|
||||
if (!inputText?.trim() && !outputText?.trim()) {
|
||||
throw createTokenizationError(
|
||||
'MISSING_TEXT',
|
||||
'Either input text or output text must be provided',
|
||||
{ inputLength: inputText?.length || 0, outputLength: outputText?.length || 0 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats token count for display
|
||||
*/
|
||||
export function formatTokenCount(count: number): string {
|
||||
if (count === 0) return '0'
|
||||
if (count < 1000) return count.toString()
|
||||
if (count < 1000000) return `${(count / 1000).toFixed(1)}K`
|
||||
return `${(count / 1000000).toFixed(1)}M`
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs tokenization operation details
|
||||
*/
|
||||
export function logTokenizationDetails(
|
||||
operation: string,
|
||||
details: {
|
||||
blockId?: string
|
||||
blockType?: string
|
||||
model?: string
|
||||
provider?: string
|
||||
inputLength?: number
|
||||
outputLength?: number
|
||||
tokens?: TokenUsage
|
||||
cost?: { input?: number; output?: number; total?: number }
|
||||
method?: string
|
||||
}
|
||||
): void {
|
||||
logger.info(`${operation}`, {
|
||||
blockId: details.blockId,
|
||||
blockType: details.blockType,
|
||||
model: details.model,
|
||||
provider: details.provider,
|
||||
inputLength: details.inputLength,
|
||||
outputLength: details.outputLength,
|
||||
tokens: details.tokens,
|
||||
cost: details.cost,
|
||||
method: details.method,
|
||||
})
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
import { env } from '../env'
|
||||
|
||||
/**
|
||||
* Returns the base URL of the application, respecting environment variables for deployment environments
|
||||
* @returns The base URL string (e.g., 'http://localhost:3000' or 'https://example.com')
|
||||
@@ -7,13 +9,13 @@ export function getBaseUrl(): string {
|
||||
return window.location.origin
|
||||
}
|
||||
|
||||
const baseUrl = process.env.NEXT_PUBLIC_APP_URL
|
||||
const baseUrl = env.NEXT_PUBLIC_APP_URL
|
||||
if (baseUrl) {
|
||||
if (baseUrl.startsWith('http://') || baseUrl.startsWith('https://')) {
|
||||
return baseUrl
|
||||
}
|
||||
|
||||
const isProd = process.env.NODE_ENV === 'production'
|
||||
const isProd = env.NODE_ENV === 'production'
|
||||
const protocol = isProd ? 'https://' : 'http://'
|
||||
return `${protocol}${baseUrl}`
|
||||
}
|
||||
@@ -30,11 +32,11 @@ export function getBaseDomain(): string {
|
||||
const url = new URL(getBaseUrl())
|
||||
return url.host // host includes port if specified
|
||||
} catch (_e) {
|
||||
const fallbackUrl = process.env.NEXT_PUBLIC_APP_URL || 'http://localhost:3000'
|
||||
const fallbackUrl = env.NEXT_PUBLIC_APP_URL || 'http://localhost:3000'
|
||||
try {
|
||||
return new URL(fallbackUrl).host
|
||||
} catch {
|
||||
const isProd = process.env.NODE_ENV === 'production'
|
||||
const isProd = env.NODE_ENV === 'production'
|
||||
return isProd ? 'simstudio.ai' : 'localhost:3000'
|
||||
}
|
||||
}
|
||||
@@ -49,7 +51,7 @@ export function getEmailDomain(): string {
|
||||
const baseDomain = getBaseDomain()
|
||||
return baseDomain.startsWith('www.') ? baseDomain.substring(4) : baseDomain
|
||||
} catch (_e) {
|
||||
const isProd = process.env.NODE_ENV === 'production'
|
||||
const isProd = env.NODE_ENV === 'production'
|
||||
return isProd ? 'simstudio.ai' : 'localhost:3000'
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,15 +2,14 @@ import { and, eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { persistExecutionError, persistExecutionLogs } from '@/lib/logs/execution-logger'
|
||||
import { buildTraceSpans } from '@/lib/logs/trace-spans'
|
||||
import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session'
|
||||
import { hasProcessedMessage, markMessageAsProcessed } from '@/lib/redis'
|
||||
import { decryptSecret } from '@/lib/utils'
|
||||
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
|
||||
import { updateWorkflowRunCounts } from '@/lib/workflows/utils'
|
||||
import { getOAuthToken } from '@/app/api/auth/oauth/utils'
|
||||
import { db } from '@/db'
|
||||
import { environment, userStats, webhook } from '@/db/schema'
|
||||
import { environment as environmentTable, userStats, webhook } from '@/db/schema'
|
||||
import { Executor } from '@/executor'
|
||||
import { Serializer } from '@/serializer'
|
||||
import { mergeSubblockStateAsync } from '@/stores/workflows/server-utils'
|
||||
@@ -433,47 +432,13 @@ export async function executeWorkflowFromPayload(
|
||||
triggerSource: 'webhook-payload',
|
||||
})
|
||||
|
||||
// DEBUG: Log specific payload details
|
||||
if (input?.airtableChanges) {
|
||||
logger.debug(`[${requestId}] TRACE: Execution received Airtable input`, {
|
||||
changeCount: input.airtableChanges.length,
|
||||
firstTableId: input.airtableChanges[0]?.tableId,
|
||||
timestamp: new Date().toISOString(),
|
||||
})
|
||||
}
|
||||
const loggingSession = new EnhancedLoggingSession(
|
||||
foundWorkflow.id,
|
||||
executionId,
|
||||
'webhook',
|
||||
requestId
|
||||
)
|
||||
|
||||
// Validate and ensure proper input structure
|
||||
if (!input) {
|
||||
logger.warn(`[${requestId}] Empty input for workflow execution, creating empty object`)
|
||||
input = {}
|
||||
}
|
||||
|
||||
// Special handling for Airtable webhook inputs
|
||||
if (input.airtableChanges) {
|
||||
if (!Array.isArray(input.airtableChanges)) {
|
||||
logger.warn(
|
||||
`[${requestId}] Invalid airtableChanges input type (${typeof input.airtableChanges}), converting to array`
|
||||
)
|
||||
// Force to array if somehow not an array
|
||||
input.airtableChanges = [input.airtableChanges]
|
||||
}
|
||||
|
||||
// Log the structure of the payload for debugging
|
||||
logger.info(`[${requestId}] Airtable webhook payload:`, {
|
||||
changeCount: input.airtableChanges.length,
|
||||
hasAirtableChanges: true,
|
||||
sampleTableIds: input.airtableChanges.slice(0, 2).map((c: any) => c.tableId),
|
||||
})
|
||||
}
|
||||
|
||||
// Log the full input format to help diagnose data issues
|
||||
logger.debug(`[${requestId}] Workflow input format:`, {
|
||||
inputKeys: Object.keys(input || {}),
|
||||
hasAirtableChanges: input?.airtableChanges && Array.isArray(input.airtableChanges),
|
||||
airtableChangesCount: input?.airtableChanges?.length || 0,
|
||||
})
|
||||
|
||||
// Returns void as errors are handled internally
|
||||
try {
|
||||
// Load workflow data from normalized tables
|
||||
logger.debug(`[${requestId}] Loading workflow ${foundWorkflow.id} from normalized tables`)
|
||||
@@ -511,19 +476,18 @@ export async function executeWorkflowFromPayload(
|
||||
})
|
||||
|
||||
// Retrieve and decrypt environment variables
|
||||
const envStartTime = Date.now()
|
||||
const [userEnv] = await db
|
||||
.select()
|
||||
.from(environment)
|
||||
.where(eq(environment.userId, foundWorkflow.userId))
|
||||
.from(environmentTable)
|
||||
.where(eq(environmentTable.userId, foundWorkflow.userId))
|
||||
.limit(1)
|
||||
let decryptedEnvVars: Record<string, string> = {}
|
||||
if (userEnv) {
|
||||
// Decryption logic
|
||||
const decryptionPromises = Object.entries(userEnv.variables as Record<string, string>).map(
|
||||
const decryptionPromises = Object.entries((userEnv.variables as any) || {}).map(
|
||||
async ([key, encryptedValue]) => {
|
||||
try {
|
||||
const { decrypted } = await decryptSecret(encryptedValue)
|
||||
const { decrypted } = await decryptSecret(encryptedValue as string)
|
||||
return [key, decrypted] as const
|
||||
} catch (error: any) {
|
||||
logger.error(
|
||||
@@ -536,18 +500,18 @@ export async function executeWorkflowFromPayload(
|
||||
)
|
||||
const decryptedEntries = await Promise.all(decryptionPromises)
|
||||
decryptedEnvVars = Object.fromEntries(decryptedEntries)
|
||||
|
||||
// DEBUG: Log env vars retrieval
|
||||
logger.debug(`[${requestId}] TRACE: Environment variables decrypted`, {
|
||||
duration: `${Date.now() - envStartTime}ms`,
|
||||
envVarCount: Object.keys(decryptedEnvVars).length,
|
||||
})
|
||||
} else {
|
||||
logger.debug(`[${requestId}] TRACE: No environment variables found for user`, {
|
||||
userId: foundWorkflow.userId,
|
||||
})
|
||||
}
|
||||
|
||||
await loggingSession.safeStart({
|
||||
userId: foundWorkflow.userId,
|
||||
workspaceId: foundWorkflow.workspaceId,
|
||||
variables: decryptedEnvVars,
|
||||
})
|
||||
|
||||
// Process block states (extract subBlock values, parse responseFormat)
|
||||
const blockStatesStartTime = Date.now()
|
||||
const currentBlockStates = Object.entries(mergedStates).reduce(
|
||||
@@ -683,6 +647,9 @@ export async function executeWorkflowFromPayload(
|
||||
workflowVariables
|
||||
)
|
||||
|
||||
// Set up enhanced logging on the executor
|
||||
loggingSession.setupExecutor(executor)
|
||||
|
||||
// Log workflow execution start time for tracking
|
||||
const executionStartTime = Date.now()
|
||||
logger.info(`[${requestId}] TRACE: Executor instantiated, starting workflow execution now`, {
|
||||
@@ -743,20 +710,45 @@ export async function executeWorkflowFromPayload(
|
||||
lastActive: new Date(),
|
||||
})
|
||||
.where(eq(userStats.userId, foundWorkflow.userId))
|
||||
|
||||
// DEBUG: Log stats update
|
||||
logger.debug(`[${requestId}] TRACE: Workflow stats updated`, {
|
||||
workflowId: foundWorkflow.id,
|
||||
userId: foundWorkflow.userId,
|
||||
})
|
||||
}
|
||||
|
||||
// Build and enrich result with trace spans
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
|
||||
const enrichedResult = { ...executionResult, traceSpans, totalDuration }
|
||||
// Calculate total duration for enhanced logging
|
||||
const totalDuration = executionResult.metadata?.duration || 0
|
||||
|
||||
// Persist logs for this execution using the standard 'webhook' trigger type
|
||||
await persistExecutionLogs(foundWorkflow.id, executionId, enrichedResult, 'webhook')
|
||||
const traceSpans = (executionResult.logs || []).map((blockLog: any, index: number) => {
|
||||
let output = blockLog.output
|
||||
if (!blockLog.success && blockLog.error) {
|
||||
output = {
|
||||
error: blockLog.error,
|
||||
success: false,
|
||||
...(blockLog.output || {}),
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
id: blockLog.blockId,
|
||||
name: `Block ${blockLog.blockName || blockLog.blockType} (${blockLog.blockType || 'unknown'})`,
|
||||
type: blockLog.blockType || 'unknown',
|
||||
duration: blockLog.durationMs || 0,
|
||||
startTime: blockLog.startedAt,
|
||||
endTime: blockLog.endedAt || blockLog.startedAt,
|
||||
status: blockLog.success ? 'success' : 'error',
|
||||
blockId: blockLog.blockId,
|
||||
input: blockLog.input,
|
||||
output: output,
|
||||
tokens: blockLog.output?.tokens?.total || 0,
|
||||
relativeStartMs: index * 100,
|
||||
children: [],
|
||||
toolCalls: (blockLog as any).toolCalls || [],
|
||||
}
|
||||
})
|
||||
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: totalDuration || 0,
|
||||
finalOutput: executionResult.output || {},
|
||||
traceSpans: (traceSpans || []) as any,
|
||||
})
|
||||
|
||||
// DEBUG: Final success log
|
||||
logger.info(`[${requestId}] TRACE: Execution logs persisted successfully`, {
|
||||
@@ -781,8 +773,17 @@ export async function executeWorkflowFromPayload(
|
||||
error: error.message,
|
||||
stack: error.stack,
|
||||
})
|
||||
// Persist the error for this execution using the standard 'webhook' trigger type
|
||||
await persistExecutionError(foundWorkflow.id, executionId, error, 'webhook')
|
||||
// Error logging handled by enhanced logging session
|
||||
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
error: {
|
||||
message: error.message || 'Webhook workflow execution failed',
|
||||
stackTrace: error.stack,
|
||||
},
|
||||
})
|
||||
|
||||
// Re-throw the error so the caller knows it failed
|
||||
throw error
|
||||
}
|
||||
@@ -914,8 +915,7 @@ export async function fetchAndProcessAirtablePayloads(
|
||||
workflowData: any,
|
||||
requestId: string // Original request ID from the ping, used for the final execution log
|
||||
) {
|
||||
// Use a prefix derived from requestId for *internal* polling logs/errors
|
||||
const internalPollIdPrefix = `poll-${requestId}`
|
||||
// Enhanced logging handles all error logging
|
||||
let currentCursor: number | null = null
|
||||
let mightHaveMore = true
|
||||
let payloadsFetched = 0 // Track total payloads fetched
|
||||
@@ -943,12 +943,7 @@ export async function fetchAndProcessAirtablePayloads(
|
||||
logger.error(
|
||||
`[${requestId}] Missing baseId or externalId in providerConfig for webhook ${webhookData.id}. Cannot fetch payloads.`
|
||||
)
|
||||
await persistExecutionError(
|
||||
workflowData.id,
|
||||
`${internalPollIdPrefix}-config-error`,
|
||||
new Error('Missing Airtable baseId or externalId in providerConfig'),
|
||||
'webhook'
|
||||
)
|
||||
// Error logging handled by enhanced logging session
|
||||
return // Exit early
|
||||
}
|
||||
|
||||
@@ -984,13 +979,7 @@ export async function fetchAndProcessAirtablePayloads(
|
||||
error: initError.message,
|
||||
stack: initError.stack,
|
||||
})
|
||||
// Persist the error specifically for cursor initialization failure
|
||||
await persistExecutionError(
|
||||
workflowData.id,
|
||||
`${internalPollIdPrefix}-cursor-init-error`,
|
||||
initError,
|
||||
'webhook'
|
||||
)
|
||||
// Error logging handled by enhanced logging session
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1028,12 +1017,7 @@ export async function fetchAndProcessAirtablePayloads(
|
||||
userId: workflowData.userId,
|
||||
}
|
||||
)
|
||||
await persistExecutionError(
|
||||
workflowData.id,
|
||||
`${internalPollIdPrefix}-token-error`,
|
||||
tokenError,
|
||||
'webhook'
|
||||
)
|
||||
// Error logging handled by enhanced logging session
|
||||
return // Exit early
|
||||
}
|
||||
|
||||
@@ -1097,12 +1081,7 @@ export async function fetchAndProcessAirtablePayloads(
|
||||
error: errorMessage,
|
||||
}
|
||||
)
|
||||
await persistExecutionError(
|
||||
workflowData.id,
|
||||
`${internalPollIdPrefix}-api-error-${apiCallCount}`,
|
||||
new Error(`Airtable API Error: ${errorMessage}`),
|
||||
'webhook'
|
||||
)
|
||||
// Error logging handled by enhanced logging session
|
||||
mightHaveMore = false
|
||||
break
|
||||
}
|
||||
@@ -1246,12 +1225,7 @@ export async function fetchAndProcessAirtablePayloads(
|
||||
cursor: currentCursor,
|
||||
error: dbError.message,
|
||||
})
|
||||
await persistExecutionError(
|
||||
workflowData.id,
|
||||
`${internalPollIdPrefix}-cursor-persist-error`,
|
||||
dbError,
|
||||
'webhook'
|
||||
)
|
||||
// Error logging handled by enhanced logging session
|
||||
mightHaveMore = false
|
||||
throw new Error('Failed to save Airtable cursor, stopping processing.') // Re-throw to break loop clearly
|
||||
}
|
||||
@@ -1271,12 +1245,7 @@ export async function fetchAndProcessAirtablePayloads(
|
||||
`[${requestId}] Network error calling Airtable GET /payloads (Call ${apiCallCount}) for webhook ${webhookData.id}`,
|
||||
fetchError
|
||||
)
|
||||
await persistExecutionError(
|
||||
workflowData.id,
|
||||
`${internalPollIdPrefix}-fetch-error-${apiCallCount}`,
|
||||
fetchError,
|
||||
'webhook'
|
||||
)
|
||||
// Error logging handled by enhanced logging session
|
||||
mightHaveMore = false
|
||||
break
|
||||
}
|
||||
@@ -1347,13 +1316,7 @@ export async function fetchAndProcessAirtablePayloads(
|
||||
error: (error as Error).message,
|
||||
}
|
||||
)
|
||||
// Persist this higher-level error
|
||||
await persistExecutionError(
|
||||
workflowData.id,
|
||||
`${internalPollIdPrefix}-processing-error`,
|
||||
error as Error,
|
||||
'webhook'
|
||||
)
|
||||
// Error logging handled by enhanced logging session
|
||||
}
|
||||
|
||||
// DEBUG: Log function completion
|
||||
|
||||
@@ -27,10 +27,10 @@ const nextConfig: NextConfig = {
|
||||
},
|
||||
...(env.NODE_ENV === 'development' && {
|
||||
allowedDevOrigins: [
|
||||
...(process.env.NEXT_PUBLIC_APP_URL
|
||||
...(env.NEXT_PUBLIC_APP_URL
|
||||
? (() => {
|
||||
try {
|
||||
return [new URL(process.env.NEXT_PUBLIC_APP_URL).host]
|
||||
return [new URL(env.NEXT_PUBLIC_APP_URL).host]
|
||||
} catch {
|
||||
return []
|
||||
}
|
||||
@@ -81,7 +81,7 @@ const nextConfig: NextConfig = {
|
||||
{ key: 'Access-Control-Allow-Credentials', value: 'true' },
|
||||
{
|
||||
key: 'Access-Control-Allow-Origin',
|
||||
value: process.env.NEXT_PUBLIC_APP_URL || 'http://localhost:3001',
|
||||
value: env.NEXT_PUBLIC_APP_URL || 'http://localhost:3001',
|
||||
},
|
||||
{
|
||||
key: 'Access-Control-Allow-Methods',
|
||||
@@ -158,7 +158,7 @@ const nextConfig: NextConfig = {
|
||||
},
|
||||
{
|
||||
key: 'Content-Security-Policy',
|
||||
value: `default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://*.google.com https://apis.google.com https://*.vercel-scripts.com https://*.vercel-insights.com https://vercel.live https://*.vercel.live https://vercel.com https://*.vercel.app https://vitals.vercel-insights.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: blob: https://*.googleusercontent.com https://*.google.com https://*.atlassian.com https://cdn.discordapp.com https://*.githubusercontent.com; media-src 'self' blob:; font-src 'self' https://fonts.gstatic.com; connect-src 'self' ${process.env.NEXT_PUBLIC_APP_URL || ''} ${env.OLLAMA_URL || 'http://localhost:11434'} ${process.env.NEXT_PUBLIC_SOCKET_URL || 'http://localhost:3002'} ${process.env.NEXT_PUBLIC_SOCKET_URL?.replace('http://', 'ws://').replace('https://', 'wss://') || 'ws://localhost:3002'} https://*.up.railway.app wss://*.up.railway.app https://api.browser-use.com https://*.googleapis.com https://*.amazonaws.com https://*.s3.amazonaws.com https://*.blob.core.windows.net https://*.vercel-insights.com https://vitals.vercel-insights.com https://*.atlassian.com https://vercel.live https://*.vercel.live https://vercel.com https://*.vercel.app wss://*.vercel.app; frame-src https://drive.google.com https://*.google.com; frame-ancestors 'self'; form-action 'self'; base-uri 'self'; object-src 'none'`,
|
||||
value: `default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://*.google.com https://apis.google.com https://*.vercel-scripts.com https://*.vercel-insights.com https://vercel.live https://*.vercel.live https://vercel.com https://*.vercel.app https://vitals.vercel-insights.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: blob: https://*.googleusercontent.com https://*.google.com https://*.atlassian.com https://cdn.discordapp.com https://*.githubusercontent.com; media-src 'self' blob:; font-src 'self' https://fonts.gstatic.com; connect-src 'self' ${env.NEXT_PUBLIC_APP_URL || ''} ${env.OLLAMA_URL || 'http://localhost:11434'} ${env.NEXT_PUBLIC_SOCKET_URL || 'http://localhost:3002'} ${env.NEXT_PUBLIC_SOCKET_URL?.replace('http://', 'ws://').replace('https://', 'wss://') || 'ws://localhost:3002'} https://*.up.railway.app wss://*.up.railway.app https://api.browser-use.com https://*.googleapis.com https://*.amazonaws.com https://*.s3.amazonaws.com https://*.blob.core.windows.net https://*.vercel-insights.com https://vitals.vercel-insights.com https://*.atlassian.com https://vercel.live https://*.vercel.live https://vercel.com https://*.vercel.app wss://*.vercel.app; frame-src https://drive.google.com https://*.google.com; frame-ancestors 'self'; form-action 'self'; base-uri 'self'; object-src 'none'`,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
@@ -84,20 +84,43 @@ describe('Socket Server Index Integration', () => {
|
||||
const httpHandler = createHttpHandler(roomManager, logger)
|
||||
httpServer.on('request', httpHandler)
|
||||
|
||||
// Start server
|
||||
await new Promise<void>((resolve) => {
|
||||
// Start server with timeout handling
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
const timeout = setTimeout(() => {
|
||||
reject(new Error(`Server failed to start on port ${PORT} within 15 seconds`))
|
||||
}, 15000)
|
||||
|
||||
httpServer.listen(PORT, '0.0.0.0', () => {
|
||||
clearTimeout(timeout)
|
||||
resolve()
|
||||
})
|
||||
|
||||
httpServer.on('error', (err: any) => {
|
||||
clearTimeout(timeout)
|
||||
if (err.code === 'EADDRINUSE') {
|
||||
// Try a different port
|
||||
PORT = 3333 + Math.floor(Math.random() * 1000)
|
||||
httpServer.listen(PORT, '0.0.0.0', () => {
|
||||
resolve()
|
||||
})
|
||||
} else {
|
||||
reject(err)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
}, 20000)
|
||||
|
||||
afterEach(async () => {
|
||||
// Properly close servers and wait for them to fully close
|
||||
if (io) {
|
||||
io.close()
|
||||
await new Promise<void>((resolve) => {
|
||||
io.close(() => resolve())
|
||||
})
|
||||
}
|
||||
if (httpServer) {
|
||||
httpServer.close()
|
||||
await new Promise<void>((resolve) => {
|
||||
httpServer.close(() => resolve())
|
||||
})
|
||||
}
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
@@ -322,7 +345,7 @@ describe('Socket Server Index Integration', () => {
|
||||
expect(() => WorkflowOperationSchema.parse(validEdgeOperation)).not.toThrow()
|
||||
})
|
||||
|
||||
it.concurrent('should validate subflow operations', async () => {
|
||||
it('should validate subflow operations', async () => {
|
||||
const { WorkflowOperationSchema } = await import('./validation/schemas')
|
||||
|
||||
const validSubflowOperation = {
|
||||
|
||||
@@ -314,6 +314,7 @@ export const useFolderStore = create<FolderState>()(
|
||||
|
||||
const responseData = await response.json()
|
||||
|
||||
// Remove the folder from local state
|
||||
get().removeFolder(id)
|
||||
|
||||
// Remove from expanded state
|
||||
@@ -323,33 +324,19 @@ export const useFolderStore = create<FolderState>()(
|
||||
return { expandedFolders: newExpanded }
|
||||
})
|
||||
|
||||
// Remove subfolders from local state
|
||||
get().removeSubfoldersRecursively(id)
|
||||
|
||||
// The backend has already deleted the workflows, so we just need to refresh
|
||||
// the workflow registry to sync with the server state
|
||||
const workflowRegistry = useWorkflowRegistry.getState()
|
||||
if (responseData.deletedItems) {
|
||||
try {
|
||||
const workflows = Object.values(workflowRegistry.workflows)
|
||||
const workflowsToDelete = workflows.filter(
|
||||
(workflow) =>
|
||||
workflow.folderId === id || get().isWorkflowInDeletedSubfolder(workflow, id)
|
||||
)
|
||||
|
||||
workflowsToDelete.forEach((workflow) => {
|
||||
workflowRegistry.removeWorkflow(workflow.id)
|
||||
})
|
||||
|
||||
get().removeSubfoldersRecursively(id)
|
||||
|
||||
logger.info(
|
||||
`Deleted ${responseData.deletedItems.workflows} workflow(s) and ${responseData.deletedItems.folders} folder(s)`
|
||||
)
|
||||
} catch (error) {
|
||||
logger.error('Error updating local state after folder deletion:', error)
|
||||
}
|
||||
}
|
||||
|
||||
if (workspaceId) {
|
||||
// Trigger workflow refresh through registry store
|
||||
await workflowRegistry.switchToWorkspace(workspaceId)
|
||||
await workflowRegistry.loadWorkflows(workspaceId)
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Deleted ${responseData.deletedItems.workflows} workflow(s) and ${responseData.deletedItems.folders} folder(s)`
|
||||
)
|
||||
},
|
||||
|
||||
isWorkflowInDeletedSubfolder: (workflow: Workflow, deletedFolderId: string) => {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { env } from '@/lib/env'
|
||||
import { getNodeEnv } from '@/lib/environment'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getBaseUrl } from '@/lib/urls/utils'
|
||||
@@ -14,7 +15,7 @@ const getReferer = (): string => {
|
||||
try {
|
||||
return getBaseUrl()
|
||||
} catch (_error) {
|
||||
return process.env.NEXT_PUBLIC_APP_URL || 'http://localhost:3000'
|
||||
return env.NEXT_PUBLIC_APP_URL || 'http://localhost:3000'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user