Compare commits

...

2 Commits

Author SHA1 Message Date
Waleed Latif
c2f786e40b v0.2.9: fix + feat (#643)
* fix(sharing): fixed folders not appearing when sharing workflows (#616)

* fix(sharing): fixed folders not appearing when sharing workflows

* cleanup

* fixed error case

* fix(deletions): folder deletions were hanging + use cascade deletions throughout  (#620)

* use cascade deletion

* fix lint

---------

Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@vikhyaths-air.lan>

* fix(envvars): t3-env standardization (#606)

* chore: use t3-env as source of truth

* chore: update mock env for failing tests

* feat(enhanced logs): integration + log visualizer canvas (#618)

* feat(logs): enhanced logging system with cleanup and theme fixes

- Implement enhanced logging cleanup with S3 archival and retention policies
- Fix error propagation in trace spans for manual executions
- Add theme-aware styling for frozen canvas modal
- Integrate enhanced logging system across all execution pathways
- Add comprehensive trace span processing and iteration navigation
- Fix boolean parameter types in enhanced logs API

* add warning for old logs

* fix lint

* added cost for streaming outputs

* fix overflow issue

* fix lint

* fix selection on closing sidebar

* tooltips z index increase

---------

Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@vikhyaths-air.lan>
Co-authored-by: Waleed Latif <walif6@gmail.com>

* fix(frozen canvas): don't error if workflow state not available for migrated logs (#624)

* fix(frozen canvas): don't error if workflow state not available for old logs

* fix lint

---------

Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@vikhyaths-air.lan>

* fix(reddit): update to oauth endpoints  (#627)

* fix(reddit): change tool to use oauth token

* fix lint

* add contact info

* Update apps/sim/tools/reddit/get_comments.ts

Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com>

* Update apps/sim/tools/reddit/hot_posts.ts

Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com>

* Update apps/sim/tools/reddit/get_posts.ts

Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com>

* fix type error

---------

Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@Vikhyaths-MacBook-Air.local>
Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@Vikhyaths-Air.attlocal.net>
Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com>

* feat(tools): added reordering of tool calls in agent tool input  (#629)

* added tool re-ordering in agent block

* styling

* fix(oauth): fix oauth to use correct subblock value setter + remove unused local storage code (#628)

* fix(oauth): fixed oauth state not persisting in credential selector

* remove unused local storage code for oauth

* fix lint

* selector clearance issue fix

* fix typing issue

* fix lint

* remove cred id from logs

* fix lint

* works

---------

Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@Vikhyaths-Air.attlocal.net>

* fix(mem-deletion): hard deletion of memory (#622)

* fix: memory deletion

* fix: bun run lint

---------

Co-authored-by: Adam Gough <adamgough@Adams-MacBook-Pro.local>

* feat(build): added turbopack builds to prod (#630)

* added turbopack to prod builds

* block access to sourcemaps

* revert changes to docs

* fix(docs): fixed broken docs links (#632)

* fix(resp format): non-json input was crashing (#631)

* fix response format non-json input crash bug

* fix lint

---------

Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@Vikhyaths-Air.attlocal.net>

* fix(revert-deployed): correctly revert to deployed state as unit op using separate endpoint (#633)

* fix(revert-deployed): revert deployed functionality with separate endpoint

* fix lint

---------

Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@Vikhyaths-Air.attlocal.net>

* fix(dropdown): simplify & fix tag dropdown for parallel & loop blocks (#634)

* fix(dropdown): simplify & fix tag dropdown for parallel & loop blocks

* fixed build

* fix(response-format): add response format to tag dropdown, chat panel, and chat client (#637)

* add response format structure to tag dropdown

* handle response format outputs for chat client and chat panel, implemented the response format handling for streamed responses

* cleanup

* fix(sockets-server-disconnection): on reconnect force sync store to db  (#638)

* keep warning until refresh

* works

* fix sockets server sync on reconnection

* infinite reconn attempts

* fix lint

---------

Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@Vikhyaths-Air.attlocal.net>

* fix(build): fixed build

* Revert "fix(sockets-server-disconnection): on reconnect force sync store to d…" (#640)

This reverts commit 6dc8b17bed.

* fix(sockets): force user to refresh on disconnect in order to mkae changes, add read-only offline mode (#641)

* force user to refresh on disconnect in order to mkae changes, add read-only offline mode

* remove unused hook

* style

* update tooltip msg

* remove unnecessary useMemo around log

* fix(sockets): added debouncing for sub-block values to prevent overloading socket server, fixed persistence issue during streaming back from LLM response format, removed unused events (#642)

* fix(sockets): added debouncing for sub-block values to prevent overloading socket server, fixed persistence issue during streaming back from LLM response format, removed unused events

* reuse existing isStreaming state for code block llm-generated response format

---------

Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@vikhyaths-air.lan>
Co-authored-by: Aditya Tripathi <aditya@climactic.co>
Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@Vikhyaths-MacBook-Air.local>
Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@Vikhyaths-Air.attlocal.net>
Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com>
Co-authored-by: Adam Gough <77861281+aadamgough@users.noreply.github.com>
Co-authored-by: Adam Gough <adamgough@Adams-MacBook-Pro.local>
2025-07-08 21:58:06 -07:00
Vikhyath Mondreti
f3bc1fc250 v0.2.8: fix + feat + improvement (#621)
* fix(sharing): fixed folders not appearing when sharing workflows (#616)

* fix(sharing): fixed folders not appearing when sharing workflows

* cleanup

* fixed error case

* fix(deletions): folder deletions were hanging + use cascade deletions throughout  (#620)

* use cascade deletion

* fix lint

---------

Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@vikhyaths-air.lan>

* fix(envvars): t3-env standardization (#606)

* chore: use t3-env as source of truth

* chore: update mock env for failing tests

* feat(enhanced logs): integration + log visualizer canvas (#618)

* feat(logs): enhanced logging system with cleanup and theme fixes

- Implement enhanced logging cleanup with S3 archival and retention policies
- Fix error propagation in trace spans for manual executions
- Add theme-aware styling for frozen canvas modal
- Integrate enhanced logging system across all execution pathways
- Add comprehensive trace span processing and iteration navigation
- Fix boolean parameter types in enhanced logs API

* add warning for old logs

* fix lint

* added cost for streaming outputs

* fix overflow issue

* fix lint

* fix selection on closing sidebar

* tooltips z index increase

---------

Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@vikhyaths-air.lan>
Co-authored-by: Waleed Latif <walif6@gmail.com>

---------

Co-authored-by: Waleed Latif <walif6@gmail.com>
Co-authored-by: Vikhyath Mondreti <vikhyathmondreti@vikhyaths-air.lan>
Co-authored-by: Aditya Tripathi <aditya@climactic.co>
2025-07-06 20:07:43 -07:00
139 changed files with 7980 additions and 1973 deletions

View File

@@ -81,4 +81,4 @@ Sim Studio provides a wide range of features designed to accelerate your develop
##
Ready to get started? Check out our [Getting Started](/getting-started) guide or explore our [Blocks](/docs/blocks) and [Tools](/docs/tools) in more detail.
Ready to get started? Check out our [Getting Started](/getting-started) guide or explore our [Blocks](/blocks) and [Tools](/tools) in more detail.

View File

@@ -19,7 +19,7 @@
"fumadocs-mdx": "^11.5.6",
"fumadocs-ui": "^15.0.16",
"lucide-react": "^0.511.0",
"next": "^15.2.3",
"next": "^15.3.2",
"next-themes": "^0.4.6",
"react": "19.1.0",
"react-dom": "19.1.0",

View File

@@ -14,6 +14,8 @@ const logger = createLogger('OAuthTokenAPI')
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
logger.info(`[${requestId}] OAuth token API POST request received`)
try {
// Parse request body
const body = await request.json()
@@ -38,6 +40,7 @@ export async function POST(request: NextRequest) {
const credential = await getCredential(requestId, credentialId, userId)
if (!credential) {
logger.error(`[${requestId}] Credential not found: ${credentialId}`)
return NextResponse.json({ error: 'Credential not found' }, { status: 404 })
}
@@ -45,7 +48,8 @@ export async function POST(request: NextRequest) {
// Refresh the token if needed
const { accessToken } = await refreshTokenIfNeeded(requestId, credential, credentialId)
return NextResponse.json({ accessToken }, { status: 200 })
} catch (_error) {
} catch (error) {
logger.error(`[${requestId}] Failed to refresh access token:`, error)
return NextResponse.json({ error: 'Failed to refresh access token' }, { status: 401 })
}
} catch (error) {

View File

@@ -89,6 +89,7 @@ export async function getOAuthToken(userId: string, providerId: string): Promise
// Check if the token is expired and needs refreshing
const now = new Date()
const tokenExpiry = credential.accessTokenExpiresAt
// Only refresh if we have an expiration time AND it's expired AND we have a refresh token
const needsRefresh = tokenExpiry && tokenExpiry < now && !!credential.refreshToken
if (needsRefresh) {
@@ -166,7 +167,9 @@ export async function refreshAccessTokenIfNeeded(
// Check if we need to refresh the token
const expiresAt = credential.accessTokenExpiresAt
const now = new Date()
const needsRefresh = !expiresAt || expiresAt <= now
// Only refresh if we have an expiration time AND it's expired
// If no expiration time is set (newly created credentials), assume token is valid
const needsRefresh = expiresAt && expiresAt <= now
const accessToken = credential.accessToken
@@ -233,7 +236,9 @@ export async function refreshTokenIfNeeded(
// Check if we need to refresh the token
const expiresAt = credential.accessTokenExpiresAt
const now = new Date()
const needsRefresh = !expiresAt || expiresAt <= now
// Only refresh if we have an expiration time AND it's expired
// If no expiration time is set (newly created credentials), assume token is valid
const needsRefresh = expiresAt && expiresAt <= now
// If token is still valid, return it directly
if (!needsRefresh || !credential.refreshToken) {

View File

@@ -194,6 +194,7 @@ export async function GET(
description: deployment.description,
customizations: deployment.customizations,
authType: deployment.authType,
outputConfigs: deployment.outputConfigs,
}),
request
)
@@ -219,6 +220,7 @@ export async function GET(
description: deployment.description,
customizations: deployment.customizations,
authType: deployment.authType,
outputConfigs: deployment.outputConfigs,
}),
request
)

View File

@@ -3,8 +3,9 @@ import { type NextRequest, NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console-logger'
import { persistExecutionLogs } from '@/lib/logs/execution-logger'
import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session'
import { buildTraceSpans } from '@/lib/logs/trace-spans'
import { processStreamingBlockLogs } from '@/lib/tokenization'
import { decryptSecret } from '@/lib/utils'
import { db } from '@/db'
import { chat, environment as envTable, userStats, workflow } from '@/db/schema'
@@ -252,32 +253,42 @@ export async function executeWorkflowForChat(
const deployment = deploymentResult[0]
const workflowId = deployment.workflowId
const executionId = uuidv4()
// Set up enhanced logging for chat execution
const loggingSession = new EnhancedLoggingSession(workflowId, executionId, 'chat', requestId)
// Check for multi-output configuration in customizations
const customizations = (deployment.customizations || {}) as Record<string, any>
let outputBlockIds: string[] = []
let outputPaths: string[] = []
// Extract output configs from the new schema format
let selectedOutputIds: string[] = []
if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
// Extract block IDs and paths from the new outputConfigs array format
// Extract output IDs in the format expected by the streaming processor
logger.debug(
`[${requestId}] Found ${deployment.outputConfigs.length} output configs in deployment`
)
deployment.outputConfigs.forEach((config) => {
selectedOutputIds = deployment.outputConfigs.map((config) => {
const outputId = config.path
? `${config.blockId}_${config.path}`
: `${config.blockId}.content`
logger.debug(
`[${requestId}] Processing output config: blockId=${config.blockId}, path=${config.path || 'none'}`
`[${requestId}] Processing output config: blockId=${config.blockId}, path=${config.path || 'content'} -> outputId=${outputId}`
)
return outputId
})
// Also extract block IDs for legacy compatibility
outputBlockIds = deployment.outputConfigs.map((config) => config.blockId)
outputPaths = deployment.outputConfigs.map((config) => config.path || '')
} else {
// Use customizations as fallback
outputBlockIds = Array.isArray(customizations.outputBlockIds)
? customizations.outputBlockIds
: []
outputPaths = Array.isArray(customizations.outputPaths) ? customizations.outputPaths : []
}
// Fall back to customizations if we still have no outputs
@@ -287,10 +298,11 @@ export async function executeWorkflowForChat(
customizations.outputBlockIds.length > 0
) {
outputBlockIds = customizations.outputBlockIds
outputPaths = customizations.outputPaths || new Array(outputBlockIds.length).fill('')
}
logger.debug(`[${requestId}] Using ${outputBlockIds.length} output blocks for extraction`)
logger.debug(
`[${requestId}] Using ${outputBlockIds.length} output blocks and ${selectedOutputIds.length} selected output IDs for extraction`
)
// Find the workflow (deployedState is NOT deprecated - needed for chat execution)
const workflowResult = await db
@@ -407,6 +419,13 @@ export async function executeWorkflowForChat(
{} as Record<string, Record<string, any>>
)
// Start enhanced logging session
await loggingSession.safeStart({
userId: deployment.userId,
workspaceId: '', // TODO: Get from workflow
variables: workflowVariables,
})
const stream = new ReadableStream({
async start(controller) {
const encoder = new TextEncoder()
@@ -449,7 +468,7 @@ export async function executeWorkflowForChat(
workflowVariables,
contextExtensions: {
stream: true,
selectedOutputIds: outputBlockIds,
selectedOutputIds: selectedOutputIds.length > 0 ? selectedOutputIds : outputBlockIds,
edges: edges.map((e: any) => ({
source: e.source,
target: e.target,
@@ -458,16 +477,41 @@ export async function executeWorkflowForChat(
},
})
const result = await executor.execute(workflowId)
// Set up enhanced logging on the executor
loggingSession.setupExecutor(executor)
let result
try {
result = await executor.execute(workflowId)
} catch (error: any) {
logger.error(`[${requestId}] Chat workflow execution failed:`, error)
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
error: {
message: error.message || 'Chat workflow execution failed',
stackTrace: error.stack,
},
})
throw error
}
if (result && 'success' in result) {
result.logs?.forEach((log: BlockLog) => {
if (streamedContent.has(log.blockId)) {
if (log.output) {
log.output.content = streamedContent.get(log.blockId)
// Update streamed content and apply tokenization
if (result.logs) {
result.logs.forEach((log: BlockLog) => {
if (streamedContent.has(log.blockId)) {
const content = streamedContent.get(log.blockId)
if (log.output) {
log.output.content = content
}
}
}
})
})
// Process all logs for streaming tokenization
const processedCount = processStreamingBlockLogs(result.logs, streamedContent)
logger.info(`[CHAT-API] Processed ${processedCount} blocks for streaming tokenization`)
}
const { traceSpans, totalDuration } = buildTraceSpans(result)
const enrichedResult = { ...result, traceSpans, totalDuration }
@@ -481,8 +525,7 @@ export async function executeWorkflowForChat(
;(enrichedResult.metadata as any).conversationId = conversationId
}
const executionId = uuidv4()
await persistExecutionLogs(workflowId, executionId, enrichedResult, 'chat')
logger.debug(`Persisted logs for deployed chat: ${executionId}`)
logger.debug(`Generated execution ID for deployed chat: ${executionId}`)
if (result.success) {
try {
@@ -506,6 +549,17 @@ export async function executeWorkflowForChat(
)
}
// Complete enhanced logging session (for both success and failure)
if (result && 'success' in result) {
const { traceSpans } = buildTraceSpans(result)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: result.metadata?.duration || 0,
finalOutput: result.output,
traceSpans,
})
}
controller.close()
},
})

View File

@@ -40,6 +40,7 @@ describe('Individual Folder API Route', () => {
}
const { mockAuthenticatedUser, mockUnauthenticated } = mockAuth(TEST_USER)
const mockGetUserEntityPermissions = vi.fn()
function createFolderDbMock(options: FolderDbMockOptions = {}) {
const {
@@ -109,6 +110,12 @@ describe('Individual Folder API Route', () => {
vi.resetModules()
vi.clearAllMocks()
setupCommonApiMocks()
mockGetUserEntityPermissions.mockResolvedValue('admin')
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: mockGetUserEntityPermissions,
}))
})
afterEach(() => {
@@ -181,6 +188,72 @@ describe('Individual Folder API Route', () => {
expect(data).toHaveProperty('error', 'Unauthorized')
})
it('should return 403 when user has only read permissions', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
const dbMock = createFolderDbMock()
vi.doMock('@/db', () => dbMock)
const req = createMockRequest('PUT', {
name: 'Updated Folder',
})
const params = Promise.resolve({ id: 'folder-1' })
const { PUT } = await import('./route')
const response = await PUT(req, { params })
expect(response.status).toBe(403)
const data = await response.json()
expect(data).toHaveProperty('error', 'Write access required to update folders')
})
it('should allow folder update for write permissions', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue('write') // Write permissions
const dbMock = createFolderDbMock()
vi.doMock('@/db', () => dbMock)
const req = createMockRequest('PUT', {
name: 'Updated Folder',
})
const params = Promise.resolve({ id: 'folder-1' })
const { PUT } = await import('./route')
const response = await PUT(req, { params })
expect(response.status).toBe(200)
const data = await response.json()
expect(data).toHaveProperty('folder')
})
it('should allow folder update for admin permissions', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue('admin') // Admin permissions
const dbMock = createFolderDbMock()
vi.doMock('@/db', () => dbMock)
const req = createMockRequest('PUT', {
name: 'Updated Folder',
})
const params = Promise.resolve({ id: 'folder-1' })
const { PUT } = await import('./route')
const response = await PUT(req, { params })
expect(response.status).toBe(200)
const data = await response.json()
expect(data).toHaveProperty('folder')
})
it('should return 400 when trying to set folder as its own parent', async () => {
mockAuthenticatedUser()
@@ -387,6 +460,68 @@ describe('Individual Folder API Route', () => {
expect(data).toHaveProperty('error', 'Unauthorized')
})
it('should return 403 when user has only read permissions for delete', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
const dbMock = createFolderDbMock()
vi.doMock('@/db', () => dbMock)
const req = createMockRequest('DELETE')
const params = Promise.resolve({ id: 'folder-1' })
const { DELETE } = await import('./route')
const response = await DELETE(req, { params })
expect(response.status).toBe(403)
const data = await response.json()
expect(data).toHaveProperty('error', 'Admin access required to delete folders')
})
it('should return 403 when user has only write permissions for delete', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue('write') // Write permissions (not enough for delete)
const dbMock = createFolderDbMock()
vi.doMock('@/db', () => dbMock)
const req = createMockRequest('DELETE')
const params = Promise.resolve({ id: 'folder-1' })
const { DELETE } = await import('./route')
const response = await DELETE(req, { params })
expect(response.status).toBe(403)
const data = await response.json()
expect(data).toHaveProperty('error', 'Admin access required to delete folders')
})
it('should allow folder deletion for admin permissions', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue('admin') // Admin permissions
const dbMock = createFolderDbMock({
folderLookupResult: mockFolder,
})
vi.doMock('@/db', () => dbMock)
const req = createMockRequest('DELETE')
const params = Promise.resolve({ id: 'folder-1' })
const { DELETE } = await import('./route')
const response = await DELETE(req, { params })
expect(response.status).toBe(200)
const data = await response.json()
expect(data).toHaveProperty('success', true)
})
it('should handle database errors during deletion', async () => {
mockAuthenticatedUser()

View File

@@ -2,6 +2,7 @@ import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console-logger'
import { getUserEntityPermissions } from '@/lib/permissions/utils'
import { db } from '@/db'
import { workflow, workflowFolder } from '@/db/schema'
@@ -19,17 +20,31 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
const body = await request.json()
const { name, color, isExpanded, parentId } = body
// Verify the folder exists and belongs to the user
// Verify the folder exists
const existingFolder = await db
.select()
.from(workflowFolder)
.where(and(eq(workflowFolder.id, id), eq(workflowFolder.userId, session.user.id)))
.where(eq(workflowFolder.id, id))
.then((rows) => rows[0])
if (!existingFolder) {
return NextResponse.json({ error: 'Folder not found' }, { status: 404 })
}
// Check if user has write permissions for the workspace
const workspacePermission = await getUserEntityPermissions(
session.user.id,
'workspace',
existingFolder.workspaceId
)
if (!workspacePermission || workspacePermission === 'read') {
return NextResponse.json(
{ error: 'Write access required to update folders' },
{ status: 403 }
)
}
// Prevent setting a folder as its own parent or creating circular references
if (parentId && parentId === id) {
return NextResponse.json({ error: 'Folder cannot be its own parent' }, { status: 400 })
@@ -81,19 +96,33 @@ export async function DELETE(
const { id } = await params
// Verify the folder exists and belongs to the user
// Verify the folder exists
const existingFolder = await db
.select()
.from(workflowFolder)
.where(and(eq(workflowFolder.id, id), eq(workflowFolder.userId, session.user.id)))
.where(eq(workflowFolder.id, id))
.then((rows) => rows[0])
if (!existingFolder) {
return NextResponse.json({ error: 'Folder not found' }, { status: 404 })
}
// Check if user has admin permissions for the workspace (admin-only for deletions)
const workspacePermission = await getUserEntityPermissions(
session.user.id,
'workspace',
existingFolder.workspaceId
)
if (workspacePermission !== 'admin') {
return NextResponse.json(
{ error: 'Admin access required to delete folders' },
{ status: 403 }
)
}
// Recursively delete folder and all its contents
const deletionStats = await deleteFolderRecursively(id, session.user.id)
const deletionStats = await deleteFolderRecursively(id, existingFolder.workspaceId)
logger.info('Deleted folder and all contents:', {
id,
@@ -113,41 +142,40 @@ export async function DELETE(
// Helper function to recursively delete a folder and all its contents
async function deleteFolderRecursively(
folderId: string,
userId: string
workspaceId: string
): Promise<{ folders: number; workflows: number }> {
const stats = { folders: 0, workflows: 0 }
// Get all child folders first
// Get all child folders first (workspace-scoped, not user-scoped)
const childFolders = await db
.select({ id: workflowFolder.id })
.from(workflowFolder)
.where(and(eq(workflowFolder.parentId, folderId), eq(workflowFolder.userId, userId)))
.where(and(eq(workflowFolder.parentId, folderId), eq(workflowFolder.workspaceId, workspaceId)))
// Recursively delete child folders
for (const childFolder of childFolders) {
const childStats = await deleteFolderRecursively(childFolder.id, userId)
const childStats = await deleteFolderRecursively(childFolder.id, workspaceId)
stats.folders += childStats.folders
stats.workflows += childStats.workflows
}
// Delete all workflows in this folder
// Delete all workflows in this folder (workspace-scoped, not user-scoped)
// The database cascade will handle deleting related workflow_blocks, workflow_edges, workflow_subflows
const workflowsInFolder = await db
.select({ id: workflow.id })
.from(workflow)
.where(and(eq(workflow.folderId, folderId), eq(workflow.userId, userId)))
.where(and(eq(workflow.folderId, folderId), eq(workflow.workspaceId, workspaceId)))
if (workflowsInFolder.length > 0) {
await db
.delete(workflow)
.where(and(eq(workflow.folderId, folderId), eq(workflow.userId, userId)))
.where(and(eq(workflow.folderId, folderId), eq(workflow.workspaceId, workspaceId)))
stats.workflows += workflowsInFolder.length
}
// Delete this folder
await db
.delete(workflowFolder)
.where(and(eq(workflowFolder.id, folderId), eq(workflowFolder.userId, userId)))
await db.delete(workflowFolder).where(eq(workflowFolder.id, folderId))
stats.folders += 1

View File

@@ -52,6 +52,7 @@ describe('Folders API Route', () => {
const mockValues = vi.fn()
const mockReturning = vi.fn()
const mockTransaction = vi.fn()
const mockGetUserEntityPermissions = vi.fn()
beforeEach(() => {
vi.resetModules()
@@ -72,6 +73,8 @@ describe('Folders API Route', () => {
mockValues.mockReturnValue({ returning: mockReturning })
mockReturning.mockReturnValue([mockFolders[0]])
mockGetUserEntityPermissions.mockResolvedValue('admin')
vi.doMock('@/db', () => ({
db: {
select: mockSelect,
@@ -79,6 +82,10 @@ describe('Folders API Route', () => {
transaction: mockTransaction,
},
}))
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: mockGetUserEntityPermissions,
}))
})
afterEach(() => {
@@ -143,6 +150,42 @@ describe('Folders API Route', () => {
expect(data).toHaveProperty('error', 'Workspace ID is required')
})
it('should return 403 when user has no workspace permissions', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue(null) // No permissions
const mockRequest = createMockRequest('GET')
Object.defineProperty(mockRequest, 'url', {
value: 'http://localhost:3000/api/folders?workspaceId=workspace-123',
})
const { GET } = await import('./route')
const response = await GET(mockRequest)
expect(response.status).toBe(403)
const data = await response.json()
expect(data).toHaveProperty('error', 'Access denied to this workspace')
})
it('should return 403 when user has only read permissions', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
const mockRequest = createMockRequest('GET')
Object.defineProperty(mockRequest, 'url', {
value: 'http://localhost:3000/api/folders?workspaceId=workspace-123',
})
const { GET } = await import('./route')
const response = await GET(mockRequest)
expect(response.status).toBe(200) // Should work for read permissions
const data = await response.json()
expect(data).toHaveProperty('folders')
})
it('should handle database errors gracefully', async () => {
mockAuthenticatedUser()
@@ -295,6 +338,100 @@ describe('Folders API Route', () => {
expect(data).toHaveProperty('error', 'Unauthorized')
})
it('should return 403 when user has only read permissions', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
const req = createMockRequest('POST', {
name: 'Test Folder',
workspaceId: 'workspace-123',
})
const { POST } = await import('./route')
const response = await POST(req)
expect(response.status).toBe(403)
const data = await response.json()
expect(data).toHaveProperty('error', 'Write or Admin access required to create folders')
})
it('should allow folder creation for write permissions', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue('write') // Write permissions
mockTransaction.mockImplementationOnce(async (callback: any) => {
const tx = {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
orderBy: vi.fn().mockReturnValue({
limit: vi.fn().mockReturnValue([]), // No existing folders
}),
}),
}),
}),
insert: vi.fn().mockReturnValue({
values: vi.fn().mockReturnValue({
returning: vi.fn().mockReturnValue([mockFolders[0]]),
}),
}),
}
return await callback(tx)
})
const req = createMockRequest('POST', {
name: 'Test Folder',
workspaceId: 'workspace-123',
})
const { POST } = await import('./route')
const response = await POST(req)
expect(response.status).toBe(200)
const data = await response.json()
expect(data).toHaveProperty('folder')
})
it('should allow folder creation for admin permissions', async () => {
mockAuthenticatedUser()
mockGetUserEntityPermissions.mockResolvedValue('admin') // Admin permissions
mockTransaction.mockImplementationOnce(async (callback: any) => {
const tx = {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
orderBy: vi.fn().mockReturnValue({
limit: vi.fn().mockReturnValue([]), // No existing folders
}),
}),
}),
}),
insert: vi.fn().mockReturnValue({
values: vi.fn().mockReturnValue({
returning: vi.fn().mockReturnValue([mockFolders[0]]),
}),
}),
}
return await callback(tx)
})
const req = createMockRequest('POST', {
name: 'Test Folder',
workspaceId: 'workspace-123',
})
const { POST } = await import('./route')
const response = await POST(req)
expect(response.status).toBe(200)
const data = await response.json()
expect(data).toHaveProperty('folder')
})
it('should return 400 when required fields are missing', async () => {
const testCases = [
{ name: '', workspaceId: 'workspace-123' }, // Missing name

View File

@@ -2,6 +2,7 @@ import { and, asc, desc, eq, isNull } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console-logger'
import { getUserEntityPermissions } from '@/lib/permissions/utils'
import { db } from '@/db'
import { workflowFolder } from '@/db/schema'
@@ -22,13 +23,23 @@ export async function GET(request: NextRequest) {
return NextResponse.json({ error: 'Workspace ID is required' }, { status: 400 })
}
// Fetch all folders for the workspace, ordered by sortOrder and createdAt
// Check if user has workspace permissions
const workspacePermission = await getUserEntityPermissions(
session.user.id,
'workspace',
workspaceId
)
if (!workspacePermission) {
return NextResponse.json({ error: 'Access denied to this workspace' }, { status: 403 })
}
// If user has workspace permissions, fetch ALL folders in the workspace
// This allows shared workspace members to see folders created by other users
const folders = await db
.select()
.from(workflowFolder)
.where(
and(eq(workflowFolder.workspaceId, workspaceId), eq(workflowFolder.userId, session.user.id))
)
.where(eq(workflowFolder.workspaceId, workspaceId))
.orderBy(asc(workflowFolder.sortOrder), asc(workflowFolder.createdAt))
return NextResponse.json({ folders })
@@ -53,19 +64,33 @@ export async function POST(request: NextRequest) {
return NextResponse.json({ error: 'Name and workspace ID are required' }, { status: 400 })
}
// Check if user has workspace permissions (at least 'write' access to create folders)
const workspacePermission = await getUserEntityPermissions(
session.user.id,
'workspace',
workspaceId
)
if (!workspacePermission || workspacePermission === 'read') {
return NextResponse.json(
{ error: 'Write or Admin access required to create folders' },
{ status: 403 }
)
}
// Generate a new ID
const id = crypto.randomUUID()
// Use transaction to ensure sortOrder consistency
const newFolder = await db.transaction(async (tx) => {
// Get the next sort order for the parent (or root level)
// Consider all folders in the workspace, not just those created by current user
const existingFolders = await tx
.select({ sortOrder: workflowFolder.sortOrder })
.from(workflowFolder)
.where(
and(
eq(workflowFolder.workspaceId, workspaceId),
eq(workflowFolder.userId, session.user.id),
parentId ? eq(workflowFolder.parentId, parentId) : isNull(workflowFolder.parentId)
)
)

View File

@@ -0,0 +1,76 @@
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { createLogger } from '@/lib/logs/console-logger'
import { db } from '@/db'
import { workflowExecutionLogs, workflowExecutionSnapshots } from '@/db/schema'
const logger = createLogger('FrozenCanvasAPI')
export async function GET(
_request: NextRequest,
{ params }: { params: Promise<{ executionId: string }> }
) {
try {
const { executionId } = await params
logger.debug(`Fetching frozen canvas data for execution: ${executionId}`)
// Get the workflow execution log to find the snapshot
const [workflowLog] = await db
.select()
.from(workflowExecutionLogs)
.where(eq(workflowExecutionLogs.executionId, executionId))
.limit(1)
if (!workflowLog) {
return NextResponse.json({ error: 'Workflow execution not found' }, { status: 404 })
}
// Get the workflow state snapshot
const [snapshot] = await db
.select()
.from(workflowExecutionSnapshots)
.where(eq(workflowExecutionSnapshots.id, workflowLog.stateSnapshotId))
.limit(1)
if (!snapshot) {
return NextResponse.json({ error: 'Workflow state snapshot not found' }, { status: 404 })
}
const response = {
executionId,
workflowId: workflowLog.workflowId,
workflowState: snapshot.stateData,
executionMetadata: {
trigger: workflowLog.trigger,
startedAt: workflowLog.startedAt.toISOString(),
endedAt: workflowLog.endedAt?.toISOString(),
totalDurationMs: workflowLog.totalDurationMs,
blockStats: {
total: workflowLog.blockCount,
success: workflowLog.successCount,
error: workflowLog.errorCount,
skipped: workflowLog.skippedCount,
},
cost: {
total: workflowLog.totalCost ? Number.parseFloat(workflowLog.totalCost) : null,
input: workflowLog.totalInputCost ? Number.parseFloat(workflowLog.totalInputCost) : null,
output: workflowLog.totalOutputCost
? Number.parseFloat(workflowLog.totalOutputCost)
: null,
},
totalTokens: workflowLog.totalTokens,
},
}
logger.debug(`Successfully fetched frozen canvas data for execution: ${executionId}`)
logger.debug(
`Workflow state contains ${Object.keys((snapshot.stateData as any)?.blocks || {}).length} blocks`
)
return NextResponse.json(response)
} catch (error) {
logger.error('Error fetching frozen canvas data:', error)
return NextResponse.json({ error: 'Failed to fetch frozen canvas data' }, { status: 500 })
}
}

View File

@@ -3,9 +3,10 @@ import { and, eq, inArray, lt, sql } from 'drizzle-orm'
import { NextResponse } from 'next/server'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console-logger'
import { snapshotService } from '@/lib/logs/snapshot-service'
import { getS3Client } from '@/lib/uploads/s3/s3-client'
import { db } from '@/db'
import { subscription, user, workflow, workflowLogs } from '@/db/schema'
import { subscription, user, workflow, workflowExecutionLogs } from '@/db/schema'
export const dynamic = 'force-dynamic'
@@ -66,99 +67,143 @@ export async function GET(request: Request) {
const workflowIds = workflowsQuery.map((w) => w.id)
const results = {
total: 0,
archived: 0,
archiveFailed: 0,
deleted: 0,
deleteFailed: 0,
enhancedLogs: {
total: 0,
archived: 0,
archiveFailed: 0,
deleted: 0,
deleteFailed: 0,
},
snapshots: {
cleaned: 0,
cleanupFailed: 0,
},
}
const startTime = Date.now()
const MAX_BATCHES = 10
// Process enhanced logging cleanup
let batchesProcessed = 0
let hasMoreLogs = true
logger.info(`Starting enhanced logs cleanup for ${workflowIds.length} workflows`)
while (hasMoreLogs && batchesProcessed < MAX_BATCHES) {
const oldLogs = await db
// Query enhanced execution logs that need cleanup
const oldEnhancedLogs = await db
.select({
id: workflowLogs.id,
workflowId: workflowLogs.workflowId,
executionId: workflowLogs.executionId,
level: workflowLogs.level,
message: workflowLogs.message,
duration: workflowLogs.duration,
trigger: workflowLogs.trigger,
createdAt: workflowLogs.createdAt,
metadata: workflowLogs.metadata,
id: workflowExecutionLogs.id,
workflowId: workflowExecutionLogs.workflowId,
executionId: workflowExecutionLogs.executionId,
stateSnapshotId: workflowExecutionLogs.stateSnapshotId,
level: workflowExecutionLogs.level,
message: workflowExecutionLogs.message,
trigger: workflowExecutionLogs.trigger,
startedAt: workflowExecutionLogs.startedAt,
endedAt: workflowExecutionLogs.endedAt,
totalDurationMs: workflowExecutionLogs.totalDurationMs,
blockCount: workflowExecutionLogs.blockCount,
successCount: workflowExecutionLogs.successCount,
errorCount: workflowExecutionLogs.errorCount,
skippedCount: workflowExecutionLogs.skippedCount,
totalCost: workflowExecutionLogs.totalCost,
totalInputCost: workflowExecutionLogs.totalInputCost,
totalOutputCost: workflowExecutionLogs.totalOutputCost,
totalTokens: workflowExecutionLogs.totalTokens,
metadata: workflowExecutionLogs.metadata,
createdAt: workflowExecutionLogs.createdAt,
})
.from(workflowLogs)
.from(workflowExecutionLogs)
.where(
and(
inArray(workflowLogs.workflowId, workflowIds),
lt(workflowLogs.createdAt, retentionDate)
inArray(workflowExecutionLogs.workflowId, workflowIds),
lt(workflowExecutionLogs.createdAt, retentionDate)
)
)
.limit(BATCH_SIZE)
results.total += oldLogs.length
results.enhancedLogs.total += oldEnhancedLogs.length
for (const log of oldLogs) {
for (const log of oldEnhancedLogs) {
const today = new Date().toISOString().split('T')[0]
const logKey = `archived-logs/${today}/${log.id}.json`
const logData = JSON.stringify(log)
// Archive enhanced log with more detailed structure
const enhancedLogKey = `archived-enhanced-logs/${today}/${log.id}.json`
const enhancedLogData = JSON.stringify({
...log,
archivedAt: new Date().toISOString(),
logType: 'enhanced',
})
try {
await getS3Client().send(
new PutObjectCommand({
Bucket: S3_CONFIG.bucket,
Key: logKey,
Body: logData,
Key: enhancedLogKey,
Body: enhancedLogData,
ContentType: 'application/json',
Metadata: {
logId: String(log.id),
workflowId: String(log.workflowId),
executionId: String(log.executionId),
logType: 'enhanced',
archivedAt: new Date().toISOString(),
},
})
)
results.archived++
results.enhancedLogs.archived++
try {
// Delete enhanced log (will cascade to workflowExecutionBlocks due to foreign key)
const deleteResult = await db
.delete(workflowLogs)
.where(eq(workflowLogs.id, log.id))
.returning({ id: workflowLogs.id })
.delete(workflowExecutionLogs)
.where(eq(workflowExecutionLogs.id, log.id))
.returning({ id: workflowExecutionLogs.id })
if (deleteResult.length > 0) {
results.deleted++
results.enhancedLogs.deleted++
} else {
results.deleteFailed++
logger.warn(`Failed to delete log ${log.id} after archiving: No rows deleted`)
results.enhancedLogs.deleteFailed++
logger.warn(
`Failed to delete enhanced log ${log.id} after archiving: No rows deleted`
)
}
} catch (deleteError) {
results.deleteFailed++
logger.error(`Error deleting log ${log.id} after archiving:`, { deleteError })
results.enhancedLogs.deleteFailed++
logger.error(`Error deleting enhanced log ${log.id} after archiving:`, { deleteError })
}
} catch (archiveError) {
results.archiveFailed++
logger.error(`Failed to archive log ${log.id}:`, { archiveError })
results.enhancedLogs.archiveFailed++
logger.error(`Failed to archive enhanced log ${log.id}:`, { archiveError })
}
}
batchesProcessed++
hasMoreLogs = oldLogs.length === BATCH_SIZE
hasMoreLogs = oldEnhancedLogs.length === BATCH_SIZE
logger.info(`Processed batch ${batchesProcessed}: ${oldLogs.length} logs`)
logger.info(
`Processed enhanced logs batch ${batchesProcessed}: ${oldEnhancedLogs.length} logs`
)
}
// Cleanup orphaned snapshots
try {
const snapshotRetentionDays = Number(env.FREE_PLAN_LOG_RETENTION_DAYS || '7') + 1 // Keep snapshots 1 day longer
const cleanedSnapshots = await snapshotService.cleanupOrphanedSnapshots(snapshotRetentionDays)
results.snapshots.cleaned = cleanedSnapshots
logger.info(`Cleaned up ${cleanedSnapshots} orphaned snapshots`)
} catch (snapshotError) {
results.snapshots.cleanupFailed = 1
logger.error('Error cleaning up orphaned snapshots:', { snapshotError })
}
const timeElapsed = (Date.now() - startTime) / 1000
const reachedLimit = batchesProcessed >= MAX_BATCHES && hasMoreLogs
return NextResponse.json({
message: `Processed ${batchesProcessed} batches (${results.total} logs) in ${timeElapsed.toFixed(2)}s${reachedLimit ? ' (batch limit reached)' : ''}`,
message: `Processed ${batchesProcessed} enhanced log batches (${results.enhancedLogs.total} logs) in ${timeElapsed.toFixed(2)}s${reachedLimit ? ' (batch limit reached)' : ''}`,
results,
complete: !hasMoreLogs,
batchLimitReached: reachedLimit,

View File

@@ -0,0 +1,499 @@
import { and, desc, eq, gte, inArray, lte, or, type SQL, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console-logger'
import { db } from '@/db'
import { workflow, workflowExecutionBlocks, workflowExecutionLogs } from '@/db/schema'
const logger = createLogger('EnhancedLogsAPI')
// Helper function to extract block executions from trace spans
function extractBlockExecutionsFromTraceSpans(traceSpans: any[]): any[] {
const blockExecutions: any[] = []
function processSpan(span: any) {
if (span.blockId) {
blockExecutions.push({
id: span.id,
blockId: span.blockId,
blockName: span.name || '',
blockType: span.type,
startedAt: span.startTime,
endedAt: span.endTime,
durationMs: span.duration || 0,
status: span.status || 'success',
errorMessage: span.output?.error || undefined,
inputData: span.input || {},
outputData: span.output || {},
cost: span.cost || undefined,
metadata: {},
})
}
// Process children recursively
if (span.children && Array.isArray(span.children)) {
span.children.forEach(processSpan)
}
}
traceSpans.forEach(processSpan)
return blockExecutions
}
export const dynamic = 'force-dynamic'
export const revalidate = 0
const QueryParamsSchema = z.object({
includeWorkflow: z.coerce.boolean().optional().default(false),
includeBlocks: z.coerce.boolean().optional().default(false),
limit: z.coerce.number().optional().default(100),
offset: z.coerce.number().optional().default(0),
level: z.string().optional(),
workflowIds: z.string().optional(), // Comma-separated list of workflow IDs
folderIds: z.string().optional(), // Comma-separated list of folder IDs
triggers: z.string().optional(), // Comma-separated list of trigger types
startDate: z.string().optional(),
endDate: z.string().optional(),
search: z.string().optional(),
})
export async function GET(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
try {
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized enhanced logs access attempt`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
try {
const { searchParams } = new URL(request.url)
const params = QueryParamsSchema.parse(Object.fromEntries(searchParams.entries()))
// Get user's workflows
const userWorkflows = await db
.select({ id: workflow.id, folderId: workflow.folderId })
.from(workflow)
.where(eq(workflow.userId, userId))
const userWorkflowIds = userWorkflows.map((w) => w.id)
if (userWorkflowIds.length === 0) {
return NextResponse.json({ data: [], total: 0 }, { status: 200 })
}
// Build conditions for enhanced logs
let conditions: SQL | undefined = inArray(workflowExecutionLogs.workflowId, userWorkflowIds)
// Filter by level
if (params.level && params.level !== 'all') {
conditions = and(conditions, eq(workflowExecutionLogs.level, params.level))
}
// Filter by specific workflow IDs
if (params.workflowIds) {
const workflowIds = params.workflowIds.split(',').filter(Boolean)
const filteredWorkflowIds = workflowIds.filter((id) => userWorkflowIds.includes(id))
if (filteredWorkflowIds.length > 0) {
conditions = and(
conditions,
inArray(workflowExecutionLogs.workflowId, filteredWorkflowIds)
)
}
}
// Filter by folder IDs
if (params.folderIds) {
const folderIds = params.folderIds.split(',').filter(Boolean)
const workflowsInFolders = userWorkflows
.filter((w) => w.folderId && folderIds.includes(w.folderId))
.map((w) => w.id)
if (workflowsInFolders.length > 0) {
conditions = and(
conditions,
inArray(workflowExecutionLogs.workflowId, workflowsInFolders)
)
}
}
// Filter by triggers
if (params.triggers) {
const triggers = params.triggers.split(',').filter(Boolean)
if (triggers.length > 0 && !triggers.includes('all')) {
conditions = and(conditions, inArray(workflowExecutionLogs.trigger, triggers))
}
}
// Filter by date range
if (params.startDate) {
conditions = and(
conditions,
gte(workflowExecutionLogs.startedAt, new Date(params.startDate))
)
}
if (params.endDate) {
conditions = and(conditions, lte(workflowExecutionLogs.startedAt, new Date(params.endDate)))
}
// Filter by search query
if (params.search) {
const searchTerm = `%${params.search}%`
conditions = and(
conditions,
or(
sql`${workflowExecutionLogs.message} ILIKE ${searchTerm}`,
sql`${workflowExecutionLogs.executionId} ILIKE ${searchTerm}`
)
)
}
// Execute the query
const logs = await db
.select()
.from(workflowExecutionLogs)
.where(conditions)
.orderBy(desc(workflowExecutionLogs.startedAt))
.limit(params.limit)
.offset(params.offset)
// Get total count for pagination
const countResult = await db
.select({ count: sql<number>`count(*)` })
.from(workflowExecutionLogs)
.where(conditions)
const count = countResult[0]?.count || 0
// Get block executions for all workflow executions
const executionIds = logs.map((log) => log.executionId)
let blockExecutionsByExecution: Record<string, any[]> = {}
if (executionIds.length > 0) {
const blockLogs = await db
.select()
.from(workflowExecutionBlocks)
.where(inArray(workflowExecutionBlocks.executionId, executionIds))
.orderBy(workflowExecutionBlocks.startedAt)
// Group block logs by execution ID
blockExecutionsByExecution = blockLogs.reduce(
(acc, blockLog) => {
if (!acc[blockLog.executionId]) {
acc[blockLog.executionId] = []
}
acc[blockLog.executionId].push({
id: blockLog.id,
blockId: blockLog.blockId,
blockName: blockLog.blockName || '',
blockType: blockLog.blockType,
startedAt: blockLog.startedAt.toISOString(),
endedAt: blockLog.endedAt?.toISOString() || blockLog.startedAt.toISOString(),
durationMs: blockLog.durationMs || 0,
status: blockLog.status,
errorMessage: blockLog.errorMessage || undefined,
errorStackTrace: blockLog.errorStackTrace || undefined,
inputData: blockLog.inputData,
outputData: blockLog.outputData,
cost: blockLog.costTotal
? {
input: Number(blockLog.costInput) || 0,
output: Number(blockLog.costOutput) || 0,
total: Number(blockLog.costTotal) || 0,
tokens: {
prompt: blockLog.tokensPrompt || 0,
completion: blockLog.tokensCompletion || 0,
total: blockLog.tokensTotal || 0,
},
model: blockLog.modelUsed || '',
}
: undefined,
metadata: blockLog.metadata || {},
})
return acc
},
{} as Record<string, any[]>
)
}
// Create clean trace spans from block executions
const createTraceSpans = (blockExecutions: any[]) => {
return blockExecutions.map((block, index) => {
// For error blocks, include error information in the output
let output = block.outputData
if (block.status === 'error' && block.errorMessage) {
output = {
...output,
error: block.errorMessage,
stackTrace: block.errorStackTrace,
}
}
return {
id: block.id,
name: `Block ${block.blockName || block.blockType} (${block.blockType})`,
type: block.blockType,
duration: block.durationMs,
startTime: block.startedAt,
endTime: block.endedAt,
status: block.status === 'success' ? 'success' : 'error',
blockId: block.blockId,
input: block.inputData,
output,
tokens: block.cost?.tokens?.total || 0,
relativeStartMs: index * 100,
children: [],
toolCalls: [],
}
})
}
// Extract cost information from block executions
const extractCostSummary = (blockExecutions: any[]) => {
let totalCost = 0
let totalInputCost = 0
let totalOutputCost = 0
let totalTokens = 0
let totalPromptTokens = 0
let totalCompletionTokens = 0
const models = new Map()
blockExecutions.forEach((block) => {
if (block.cost) {
totalCost += Number(block.cost.total) || 0
totalInputCost += Number(block.cost.input) || 0
totalOutputCost += Number(block.cost.output) || 0
totalTokens += block.cost.tokens?.total || 0
totalPromptTokens += block.cost.tokens?.prompt || 0
totalCompletionTokens += block.cost.tokens?.completion || 0
// Track per-model costs
if (block.cost.model) {
if (!models.has(block.cost.model)) {
models.set(block.cost.model, {
input: 0,
output: 0,
total: 0,
tokens: { prompt: 0, completion: 0, total: 0 },
})
}
const modelCost = models.get(block.cost.model)
modelCost.input += Number(block.cost.input) || 0
modelCost.output += Number(block.cost.output) || 0
modelCost.total += Number(block.cost.total) || 0
modelCost.tokens.prompt += block.cost.tokens?.prompt || 0
modelCost.tokens.completion += block.cost.tokens?.completion || 0
modelCost.tokens.total += block.cost.tokens?.total || 0
}
}
})
return {
total: totalCost,
input: totalInputCost,
output: totalOutputCost,
tokens: {
total: totalTokens,
prompt: totalPromptTokens,
completion: totalCompletionTokens,
},
models: Object.fromEntries(models), // Convert Map to object for JSON serialization
}
}
// Transform to clean enhanced log format
const enhancedLogs = logs.map((log) => {
const blockExecutions = blockExecutionsByExecution[log.executionId] || []
// Use stored trace spans from metadata if available, otherwise create from block executions
const storedTraceSpans = (log.metadata as any)?.traceSpans
const traceSpans =
storedTraceSpans && Array.isArray(storedTraceSpans) && storedTraceSpans.length > 0
? storedTraceSpans
: createTraceSpans(blockExecutions)
// Use extracted cost summary if available, otherwise use stored values
const costSummary =
blockExecutions.length > 0
? extractCostSummary(blockExecutions)
: {
input: Number(log.totalInputCost) || 0,
output: Number(log.totalOutputCost) || 0,
total: Number(log.totalCost) || 0,
tokens: {
total: log.totalTokens || 0,
prompt: (log.metadata as any)?.tokenBreakdown?.prompt || 0,
completion: (log.metadata as any)?.tokenBreakdown?.completion || 0,
},
models: (log.metadata as any)?.models || {},
}
return {
id: log.id,
workflowId: log.workflowId,
executionId: log.executionId,
level: log.level,
message: log.message,
duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null,
trigger: log.trigger,
createdAt: log.startedAt.toISOString(),
metadata: {
totalDuration: log.totalDurationMs,
cost: costSummary,
blockStats: {
total: log.blockCount,
success: log.successCount,
error: log.errorCount,
skipped: log.skippedCount,
},
traceSpans,
blockExecutions,
enhanced: true,
},
}
})
if (params.includeWorkflow) {
const workflowIds = [...new Set(logs.map((log) => log.workflowId))]
const workflowConditions = inArray(workflow.id, workflowIds)
const workflowData = await db.select().from(workflow).where(workflowConditions)
const workflowMap = new Map(workflowData.map((w) => [w.id, w]))
const logsWithWorkflow = enhancedLogs.map((log) => ({
...log,
workflow: workflowMap.get(log.workflowId) || null,
}))
return NextResponse.json(
{
data: logsWithWorkflow,
total: Number(count),
page: Math.floor(params.offset / params.limit) + 1,
pageSize: params.limit,
totalPages: Math.ceil(Number(count) / params.limit),
},
{ status: 200 }
)
}
// Include block execution data if requested
if (params.includeBlocks) {
const executionIds = logs.map((log) => log.executionId)
if (executionIds.length > 0) {
const blockLogs = await db
.select()
.from(workflowExecutionBlocks)
.where(inArray(workflowExecutionBlocks.executionId, executionIds))
.orderBy(workflowExecutionBlocks.startedAt)
// Group block logs by execution ID
const blockLogsByExecution = blockLogs.reduce(
(acc, blockLog) => {
if (!acc[blockLog.executionId]) {
acc[blockLog.executionId] = []
}
acc[blockLog.executionId].push({
id: blockLog.id,
blockId: blockLog.blockId,
blockName: blockLog.blockName || '',
blockType: blockLog.blockType,
startedAt: blockLog.startedAt.toISOString(),
endedAt: blockLog.endedAt?.toISOString() || blockLog.startedAt.toISOString(),
durationMs: blockLog.durationMs || 0,
status: blockLog.status,
errorMessage: blockLog.errorMessage || undefined,
inputData: blockLog.inputData,
outputData: blockLog.outputData,
cost: blockLog.costTotal
? {
input: Number(blockLog.costInput) || 0,
output: Number(blockLog.costOutput) || 0,
total: Number(blockLog.costTotal) || 0,
tokens: {
prompt: blockLog.tokensPrompt || 0,
completion: blockLog.tokensCompletion || 0,
total: blockLog.tokensTotal || 0,
},
model: blockLog.modelUsed || '',
}
: undefined,
})
return acc
},
{} as Record<string, any[]>
)
// For executions with no block logs in the database,
// extract block executions from stored trace spans in metadata
logs.forEach((log) => {
if (
!blockLogsByExecution[log.executionId] ||
blockLogsByExecution[log.executionId].length === 0
) {
const storedTraceSpans = (log.metadata as any)?.traceSpans
if (storedTraceSpans && Array.isArray(storedTraceSpans)) {
blockLogsByExecution[log.executionId] =
extractBlockExecutionsFromTraceSpans(storedTraceSpans)
}
}
})
// Add block logs to metadata
const logsWithBlocks = enhancedLogs.map((log) => ({
...log,
metadata: {
...log.metadata,
blockExecutions: blockLogsByExecution[log.executionId] || [],
},
}))
return NextResponse.json(
{
data: logsWithBlocks,
total: Number(count),
page: Math.floor(params.offset / params.limit) + 1,
pageSize: params.limit,
totalPages: Math.ceil(Number(count) / params.limit),
},
{ status: 200 }
)
}
}
// Return basic logs
return NextResponse.json(
{
data: enhancedLogs,
total: Number(count),
page: Math.floor(params.offset / params.limit) + 1,
pageSize: params.limit,
totalPages: Math.ceil(Number(count) / params.limit),
},
{ status: 200 }
)
} catch (validationError) {
if (validationError instanceof z.ZodError) {
logger.warn(`[${requestId}] Invalid enhanced logs request parameters`, {
errors: validationError.errors,
})
return NextResponse.json(
{
error: 'Invalid request parameters',
details: validationError.errors,
},
{ status: 400 }
)
}
throw validationError
}
} catch (error: any) {
logger.error(`[${requestId}] Enhanced logs fetch error`, error)
return NextResponse.json({ error: error.message }, { status: 500 })
}
}

View File

@@ -1,4 +1,4 @@
import { and, eq, isNull } from 'drizzle-orm'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { createLogger } from '@/lib/logs/console-logger'
import { db } from '@/db'
@@ -40,7 +40,7 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
const memories = await db
.select()
.from(memory)
.where(and(eq(memory.key, id), eq(memory.workflowId, workflowId), isNull(memory.deletedAt)))
.where(and(eq(memory.key, id), eq(memory.workflowId, workflowId)))
.orderBy(memory.createdAt)
.limit(1)
@@ -112,7 +112,7 @@ export async function DELETE(
const existingMemory = await db
.select({ id: memory.id })
.from(memory)
.where(and(eq(memory.key, id), eq(memory.workflowId, workflowId), isNull(memory.deletedAt)))
.where(and(eq(memory.key, id), eq(memory.workflowId, workflowId)))
.limit(1)
if (existingMemory.length === 0) {
@@ -128,14 +128,8 @@ export async function DELETE(
)
}
// Soft delete by setting deletedAt timestamp
await db
.update(memory)
.set({
deletedAt: new Date(),
updatedAt: new Date(),
})
.where(and(eq(memory.key, id), eq(memory.workflowId, workflowId)))
// Hard delete the memory
await db.delete(memory).where(and(eq(memory.key, id), eq(memory.workflowId, workflowId)))
logger.info(`[${requestId}] Memory deleted successfully: ${id} for workflow: ${workflowId}`)
return NextResponse.json(
@@ -202,7 +196,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
const existingMemories = await db
.select()
.from(memory)
.where(and(eq(memory.key, id), eq(memory.workflowId, workflowId), isNull(memory.deletedAt)))
.where(and(eq(memory.key, id), eq(memory.workflowId, workflowId)))
.limit(1)
if (existingMemories.length === 0) {
@@ -250,13 +244,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
}
// Update the memory with new data
await db
.update(memory)
.set({
data,
updatedAt: new Date(),
})
.where(and(eq(memory.key, id), eq(memory.workflowId, workflowId)))
await db.delete(memory).where(and(eq(memory.key, id), eq(memory.workflowId, workflowId)))
// Fetch the updated memory
const updatedMemories = await db

View File

@@ -5,7 +5,6 @@
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockExecutionDependencies,
mockScheduleExecuteDb,
sampleWorkflowState,
@@ -23,7 +22,7 @@ describe('Scheduled Workflow Execution API Route', () => {
blocks: sampleWorkflowState.blocks,
edges: sampleWorkflowState.edges || [],
loops: sampleWorkflowState.loops || {},
parallels: sampleWorkflowState.parallels || {},
parallels: {},
isFromNormalizedTables: true,
}),
}))
@@ -122,9 +121,8 @@ describe('Scheduled Workflow Execution API Route', () => {
})),
}))
const req = createMockRequest('GET')
const { GET } = await import('./route')
const response = await GET(req)
const response = await GET()
expect(response).toBeDefined()
const data = await response.json()
@@ -136,7 +134,6 @@ describe('Scheduled Workflow Execution API Route', () => {
const persistExecutionErrorMock = vi.fn().mockResolvedValue(undefined)
vi.doMock('@/lib/logs/execution-logger', () => ({
persistExecutionLogs: vi.fn().mockResolvedValue(undefined),
persistExecutionError: persistExecutionErrorMock,
}))
@@ -146,9 +143,8 @@ describe('Scheduled Workflow Execution API Route', () => {
})),
}))
const req = createMockRequest('GET')
const { GET } = await import('./route')
const response = await GET(req)
const response = await GET()
expect(response).toBeDefined()
@@ -176,9 +172,8 @@ describe('Scheduled Workflow Execution API Route', () => {
return { db: mockDb }
})
const req = createMockRequest('GET')
const { GET } = await import('./route')
const response = await GET(req)
const response = await GET()
expect(response.status).toBe(200)
const data = await response.json()
expect(data).toHaveProperty('executedCount', 0)
@@ -205,9 +200,8 @@ describe('Scheduled Workflow Execution API Route', () => {
return { db: mockDb }
})
const req = createMockRequest('GET')
const { GET } = await import('./route')
const response = await GET(req)
const response = await GET()
expect(response.status).toBe(500)
const data = await response.json()
@@ -238,9 +232,8 @@ describe('Scheduled Workflow Execution API Route', () => {
],
})
const req = createMockRequest('GET')
const { GET } = await import('./route')
const response = await GET(req)
const response = await GET()
expect(response.status).toBe(200)
})
@@ -269,9 +262,8 @@ describe('Scheduled Workflow Execution API Route', () => {
],
})
const req = createMockRequest('GET')
const { GET } = await import('./route')
const response = await GET(req)
const response = await GET()
expect(response.status).toBe(200)
const data = await response.json()

View File

@@ -1,10 +1,10 @@
import { Cron } from 'croner'
import { and, eq, lte, not, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console-logger'
import { persistExecutionError, persistExecutionLogs } from '@/lib/logs/execution-logger'
import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session'
import { buildTraceSpans } from '@/lib/logs/trace-spans'
import {
type BlockState,
@@ -17,7 +17,7 @@ import { decryptSecret } from '@/lib/utils'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
import { updateWorkflowRunCounts } from '@/lib/workflows/utils'
import { db } from '@/db'
import { environment, userStats, workflow, workflowSchedule } from '@/db/schema'
import { environment as environmentTable, userStats, workflow, workflowSchedule } from '@/db/schema'
import { Executor } from '@/executor'
import { Serializer } from '@/serializer'
import { mergeSubblockState } from '@/stores/workflows/server-utils'
@@ -58,7 +58,7 @@ const EnvVarsSchema = z.record(z.string())
const runningExecutions = new Set<string>()
export async function GET(req: NextRequest) {
export async function GET() {
logger.info(`Scheduled execution triggered at ${new Date().toISOString()}`)
const requestId = crypto.randomUUID().slice(0, 8)
const now = new Date()
@@ -85,6 +85,7 @@ export async function GET(req: NextRequest) {
for (const schedule of dueSchedules) {
const executionId = uuidv4()
let loggingSession: EnhancedLoggingSession | null = null
try {
if (runningExecutions.has(schedule.workflowId)) {
@@ -118,15 +119,7 @@ export async function GET(req: NextRequest) {
}
)
await persistExecutionError(
schedule.workflowId,
executionId,
new Error(
usageCheck.message ||
'Usage limit exceeded. Please upgrade your plan to continue running scheduled workflows.'
),
'schedule'
)
// Error logging handled by enhanced logging session
const retryDelay = 24 * 60 * 60 * 1000 // 24 hour delay for exceeded limits
const nextRetryAt = new Date(now.getTime() + retryDelay)
@@ -176,8 +169,8 @@ export async function GET(req: NextRequest) {
// Retrieve environment variables for this user (if any).
const [userEnv] = await db
.select()
.from(environment)
.where(eq(environment.userId, workflowRecord.userId))
.from(environmentTable)
.where(eq(environmentTable.userId, workflowRecord.userId))
.limit(1)
if (!userEnv) {
@@ -306,6 +299,30 @@ export async function GET(req: NextRequest) {
logger.debug(`[${requestId}] No workflow variables found for: ${schedule.workflowId}`)
}
// Start enhanced logging
loggingSession = new EnhancedLoggingSession(
schedule.workflowId,
executionId,
'schedule',
requestId
)
// Load the actual workflow state from normalized tables
const enhancedNormalizedData = await loadWorkflowFromNormalizedTables(schedule.workflowId)
if (!enhancedNormalizedData) {
throw new Error(
`Workflow ${schedule.workflowId} has no normalized data available. Ensure the workflow is properly saved to normalized tables.`
)
}
// Start enhanced logging with environment variables
await loggingSession.safeStart({
userId: workflowRecord.userId,
workspaceId: workflowRecord.workspaceId || '',
variables: variables || {},
})
const executor = new Executor(
serializedWorkflow,
processedBlockStates,
@@ -313,6 +330,10 @@ export async function GET(req: NextRequest) {
input,
workflowVariables
)
// Set up enhanced logging on the executor
loggingSession.setupExecutor(executor)
const result = await executor.execute(schedule.workflowId)
const executionResult =
@@ -343,13 +364,16 @@ export async function GET(req: NextRequest) {
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
const enrichedResult = {
...executionResult,
traceSpans,
totalDuration,
}
// Log individual block executions to enhanced system are automatically
// handled by the logging session
await persistExecutionLogs(schedule.workflowId, executionId, enrichedResult, 'schedule')
// Complete enhanced logging
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
finalOutput: executionResult.output || {},
traceSpans: (traceSpans || []) as any,
})
if (executionResult.success) {
logger.info(`[${requestId}] Workflow ${schedule.workflowId} executed successfully`)
@@ -413,7 +437,18 @@ export async function GET(req: NextRequest) {
error
)
await persistExecutionError(schedule.workflowId, executionId, error, 'schedule')
// Error logging handled by enhanced logging session
if (loggingSession) {
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
error: {
message: error.message || 'Scheduled workflow execution failed',
stackTrace: error.stack,
},
})
}
let nextRunAt: Date
try {

View File

@@ -32,7 +32,6 @@ const executeMock = vi.fn().mockResolvedValue({
endTime: new Date().toISOString(),
},
})
const persistExecutionLogsMock = vi.fn().mockResolvedValue(undefined)
const persistExecutionErrorMock = vi.fn().mockResolvedValue(undefined)
// Mock the DB schema objects
@@ -80,7 +79,6 @@ vi.mock('@/executor', () => ({
}))
vi.mock('@/lib/logs/execution-logger', () => ({
persistExecutionLogs: persistExecutionLogsMock,
persistExecutionError: persistExecutionErrorMock,
}))

View File

@@ -139,7 +139,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
return createErrorResponse(validation.error.message, validation.error.status)
}
// Get the workflow to find the user
// Get the workflow to find the user (removed deprecated state column)
const workflowData = await db
.select({
userId: workflow.userId,

View File

@@ -88,6 +88,7 @@ describe('Workflow Execution API Route', () => {
vi.doMock('@/executor', () => ({
Executor: vi.fn().mockImplementation(() => ({
execute: executeMock,
setEnhancedLogger: vi.fn(),
})),
}))
@@ -104,6 +105,14 @@ describe('Workflow Execution API Route', () => {
persistExecutionError: vi.fn().mockResolvedValue(undefined),
}))
vi.doMock('@/lib/logs/enhanced-execution-logger', () => ({
enhancedExecutionLogger: {
startWorkflowExecution: vi.fn().mockResolvedValue(undefined),
logBlockExecution: vi.fn().mockResolvedValue(undefined),
completeWorkflowExecution: vi.fn().mockResolvedValue(undefined),
},
}))
vi.doMock('@/lib/logs/trace-spans', () => ({
buildTraceSpans: vi.fn().mockReturnValue({
traceSpans: [],
@@ -395,6 +404,7 @@ describe('Workflow Execution API Route', () => {
vi.doMock('@/executor', () => ({
Executor: vi.fn().mockImplementation(() => ({
execute: vi.fn().mockRejectedValue(new Error('Execution failed')),
setEnhancedLogger: vi.fn(),
})),
}))
@@ -418,10 +428,10 @@ describe('Workflow Execution API Route', () => {
expect(data).toHaveProperty('error')
expect(data.error).toContain('Execution failed')
// Verify error logger was called
const persistExecutionError = (await import('@/lib/logs/execution-logger'))
.persistExecutionError
expect(persistExecutionError).toHaveBeenCalled()
// Verify enhanced logger was called for error completion
const enhancedExecutionLogger = (await import('@/lib/logs/enhanced-execution-logger'))
.enhancedExecutionLogger
expect(enhancedExecutionLogger.completeWorkflowExecution).toHaveBeenCalled()
})
/**

View File

@@ -3,7 +3,7 @@ import { type NextRequest, NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console-logger'
import { persistExecutionError, persistExecutionLogs } from '@/lib/logs/execution-logger'
import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session'
import { buildTraceSpans } from '@/lib/logs/trace-spans'
import { checkServerSideUsageLimits } from '@/lib/usage-monitor'
import { decryptSecret } from '@/lib/utils'
@@ -14,11 +14,10 @@ import {
workflowHasResponseBlock,
} from '@/lib/workflows/utils'
import { db } from '@/db'
import { environment, userStats } from '@/db/schema'
import { environment as environmentTable, userStats } from '@/db/schema'
import { Executor } from '@/executor'
import { Serializer } from '@/serializer'
import { mergeSubblockState } from '@/stores/workflows/server-utils'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
import { validateWorkflowAccess } from '../../middleware'
import { createErrorResponse, createSuccessResponse } from '../../utils'
@@ -59,6 +58,8 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
throw new Error('Execution is already running')
}
const loggingSession = new EnhancedLoggingSession(workflowId, executionId, 'api', requestId)
// Check if the user has exceeded their usage limits
const usageCheck = await checkServerSideUsageLimits(workflow.userId)
if (usageCheck.isExceeded) {
@@ -92,39 +93,30 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
logger.debug(`[${requestId}] Loading workflow ${workflowId} from normalized tables`)
const normalizedData = await loadWorkflowFromNormalizedTables(workflowId)
let blocks: Record<string, any>
let edges: any[]
let loops: Record<string, any>
let parallels: Record<string, any>
if (normalizedData) {
// Use normalized data as primary source
;({ blocks, edges, loops, parallels } = normalizedData)
logger.info(`[${requestId}] Using normalized tables for workflow execution: ${workflowId}`)
} else {
// Fallback to deployed state if available (for legacy workflows)
logger.warn(
`[${requestId}] No normalized data found, falling back to deployed state for workflow: ${workflowId}`
if (!normalizedData) {
throw new Error(
`Workflow ${workflowId} has no normalized data available. Ensure the workflow is properly saved to normalized tables.`
)
if (!workflow.deployedState) {
throw new Error(
`Workflow ${workflowId} has no deployed state and no normalized data available`
)
}
const deployedState = workflow.deployedState as WorkflowState
;({ blocks, edges, loops, parallels } = deployedState)
}
// Use normalized data as primary source
const { blocks, edges, loops, parallels } = normalizedData
logger.info(`[${requestId}] Using normalized tables for workflow execution: ${workflowId}`)
logger.debug(`[${requestId}] Normalized data loaded:`, {
blocksCount: Object.keys(blocks || {}).length,
edgesCount: (edges || []).length,
loopsCount: Object.keys(loops || {}).length,
parallelsCount: Object.keys(parallels || {}).length,
})
// Use the same execution flow as in scheduled executions
const mergedStates = mergeSubblockState(blocks)
// Fetch the user's environment variables (if any)
const [userEnv] = await db
.select()
.from(environment)
.where(eq(environment.userId, workflow.userId))
.from(environmentTable)
.where(eq(environmentTable.userId, workflow.userId))
.limit(1)
if (!userEnv) {
@@ -133,9 +125,14 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
)
}
// Parse and validate environment variables.
const variables = EnvVarsSchema.parse(userEnv?.variables ?? {})
await loggingSession.safeStart({
userId: workflow.userId,
workspaceId: workflow.workspaceId,
variables,
})
// Replace environment variables in the block states
const currentBlockStates = await Object.entries(mergedStates).reduce(
async (accPromise, [id, block]) => {
@@ -200,18 +197,42 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
(acc, [blockId, blockState]) => {
// Check if this block has a responseFormat that needs to be parsed
if (blockState.responseFormat && typeof blockState.responseFormat === 'string') {
try {
logger.debug(`[${requestId}] Parsing responseFormat for block ${blockId}`)
// Attempt to parse the responseFormat if it's a string
const parsedResponseFormat = JSON.parse(blockState.responseFormat)
const responseFormatValue = blockState.responseFormat.trim()
// Check for variable references like <start.input>
if (responseFormatValue.startsWith('<') && responseFormatValue.includes('>')) {
logger.debug(
`[${requestId}] Response format contains variable reference for block ${blockId}`
)
// Keep variable references as-is - they will be resolved during execution
acc[blockId] = blockState
} else if (responseFormatValue === '') {
// Empty string - remove response format
acc[blockId] = {
...blockState,
responseFormat: parsedResponseFormat,
responseFormat: undefined,
}
} else {
try {
logger.debug(`[${requestId}] Parsing responseFormat for block ${blockId}`)
// Attempt to parse the responseFormat if it's a string
const parsedResponseFormat = JSON.parse(responseFormatValue)
acc[blockId] = {
...blockState,
responseFormat: parsedResponseFormat,
}
} catch (error) {
logger.warn(
`[${requestId}] Failed to parse responseFormat for block ${blockId}, using undefined`,
error
)
// Set to undefined instead of keeping malformed JSON - this allows execution to continue
acc[blockId] = {
...blockState,
responseFormat: undefined,
}
}
} catch (error) {
logger.warn(`[${requestId}] Failed to parse responseFormat for block ${blockId}`, error)
acc[blockId] = blockState
}
} else {
acc[blockId] = blockState
@@ -260,6 +281,9 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
workflowVariables
)
// Set up enhanced logging on the executor
loggingSession.setupExecutor(executor)
const result = await executor.execute(workflowId)
// Check if we got a StreamingExecution result (with stream + execution properties)
@@ -271,6 +295,9 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
executionTime: executionResult.metadata?.duration,
})
// Build trace spans from execution result (works for both success and failure)
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
// Update workflow run counts if execution was successful
if (executionResult.success) {
await updateWorkflowRunCounts(workflowId)
@@ -285,24 +312,26 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) {
.where(eq(userStats.userId, workflow.userId))
}
// Build trace spans from execution logs
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
// Add trace spans to the execution result
const enrichedResult = {
...executionResult,
traceSpans,
totalDuration,
}
// Log each execution step and the final result
await persistExecutionLogs(workflowId, executionId, enrichedResult, 'api')
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
finalOutput: executionResult.output || {},
traceSpans: (traceSpans || []) as any,
})
return executionResult
} catch (error: any) {
logger.error(`[${requestId}] Workflow execution failed: ${workflowId}`, error)
// Log the error
await persistExecutionError(workflowId, executionId, error, 'api')
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
error: {
message: error.message || 'Workflow execution failed',
stackTrace: error.stack,
},
})
throw error
} finally {
runningExecutions.delete(executionKey)

View File

@@ -1,7 +1,7 @@
import type { NextRequest } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { createLogger } from '@/lib/logs/console-logger'
import { persistExecutionLogs, persistLog } from '@/lib/logs/execution-logger'
import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session'
import { buildTraceSpans } from '@/lib/logs/trace-spans'
import { validateWorkflowAccess } from '../../middleware'
import { createErrorResponse, createSuccessResponse } from '../../utils'
@@ -33,9 +33,25 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
// Check if this execution is from chat using only the explicit source flag
const isChatExecution = result.metadata?.source === 'chat'
// Use persistExecutionLogs which handles tool call extraction
// Use 'chat' trigger type for chat executions, otherwise 'manual'
await persistExecutionLogs(id, executionId, result, isChatExecution ? 'chat' : 'manual')
// Also log to enhanced system
const triggerType = isChatExecution ? 'chat' : 'manual'
const loggingSession = new EnhancedLoggingSession(id, executionId, triggerType, requestId)
await loggingSession.safeStart({
userId: '', // TODO: Get from session
workspaceId: '', // TODO: Get from workflow
variables: {},
})
// Build trace spans from execution logs
const { traceSpans } = buildTraceSpans(result)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: result.metadata?.duration || 0,
finalOutput: result.output || {},
traceSpans,
})
return createSuccessResponse({
message: 'Execution logs persisted successfully',
@@ -52,21 +68,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
executionId,
})
// Persist each log using the original method
for (const log of logs) {
await persistLog({
id: uuidv4(),
workflowId: id,
executionId,
level: log.level,
message: log.message,
duration: log.duration,
trigger: log.trigger || 'manual',
createdAt: new Date(log.createdAt || new Date()),
metadata: log.metadata,
})
}
return createSuccessResponse({ message: 'Logs persisted successfully' })
} catch (error: any) {
logger.error(`[${requestId}] Error persisting logs for workflow: ${id}`, error)

View File

@@ -0,0 +1,121 @@
import crypto from 'crypto'
import { eq } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { createLogger } from '@/lib/logs/console-logger'
import { saveWorkflowToNormalizedTables } from '@/lib/workflows/db-helpers'
import { db } from '@/db'
import { workflow } from '@/db/schema'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
import { validateWorkflowAccess } from '../../middleware'
import { createErrorResponse, createSuccessResponse } from '../../utils'
const logger = createLogger('RevertToDeployedAPI')
export const dynamic = 'force-dynamic'
export const runtime = 'nodejs'
/**
* POST /api/workflows/[id]/revert-to-deployed
* Revert workflow to its deployed state by saving deployed state to normalized tables
*/
export async function POST(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
const requestId = crypto.randomUUID().slice(0, 8)
const { id } = await params
try {
logger.debug(`[${requestId}] Reverting workflow to deployed state: ${id}`)
const validation = await validateWorkflowAccess(request, id, false)
if (validation.error) {
logger.warn(`[${requestId}] Workflow revert failed: ${validation.error.message}`)
return createErrorResponse(validation.error.message, validation.error.status)
}
const workflowData = validation.workflow
// Check if workflow is deployed and has deployed state
if (!workflowData.isDeployed || !workflowData.deployedState) {
logger.warn(`[${requestId}] Cannot revert: workflow is not deployed or has no deployed state`)
return createErrorResponse('Workflow is not deployed or has no deployed state', 400)
}
// Validate deployed state structure
const deployedState = workflowData.deployedState as WorkflowState
if (!deployedState.blocks || !deployedState.edges) {
logger.error(`[${requestId}] Invalid deployed state structure`, { deployedState })
return createErrorResponse('Invalid deployed state structure', 500)
}
logger.debug(`[${requestId}] Saving deployed state to normalized tables`, {
blocksCount: Object.keys(deployedState.blocks).length,
edgesCount: deployedState.edges.length,
loopsCount: Object.keys(deployedState.loops || {}).length,
parallelsCount: Object.keys(deployedState.parallels || {}).length,
})
// Save deployed state to normalized tables
const saveResult = await saveWorkflowToNormalizedTables(id, {
blocks: deployedState.blocks,
edges: deployedState.edges,
loops: deployedState.loops || {},
parallels: deployedState.parallels || {},
lastSaved: Date.now(),
isDeployed: workflowData.isDeployed,
deployedAt: workflowData.deployedAt,
deploymentStatuses: deployedState.deploymentStatuses || {},
hasActiveSchedule: deployedState.hasActiveSchedule || false,
hasActiveWebhook: deployedState.hasActiveWebhook || false,
})
if (!saveResult.success) {
logger.error(`[${requestId}] Failed to save deployed state to normalized tables`, {
error: saveResult.error,
})
return createErrorResponse(
saveResult.error || 'Failed to save deployed state to normalized tables',
500
)
}
// Update workflow's last_synced timestamp to indicate changes
await db
.update(workflow)
.set({
lastSynced: new Date(),
updatedAt: new Date(),
})
.where(eq(workflow.id, id))
// Notify socket server about the revert operation for real-time sync
try {
const socketServerUrl = process.env.SOCKET_SERVER_URL || 'http://localhost:3002'
await fetch(`${socketServerUrl}/api/workflow-reverted`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
workflowId: id,
timestamp: Date.now(),
}),
})
logger.debug(`[${requestId}] Notified socket server about workflow revert: ${id}`)
} catch (socketError) {
// Don't fail the request if socket notification fails
logger.warn(`[${requestId}] Failed to notify socket server about revert:`, socketError)
}
logger.info(`[${requestId}] Successfully reverted workflow to deployed state: ${id}`)
return createSuccessResponse({
message: 'Workflow successfully reverted to deployed state',
lastSaved: Date.now(),
})
} catch (error: any) {
logger.error(`[${requestId}] Error reverting workflow to deployed state: ${id}`, {
error: error.message,
stack: error.stack,
})
return createErrorResponse(error.message || 'Failed to revert workflow to deployed state', 500)
}
}

View File

@@ -274,14 +274,6 @@ describe('Workflow By ID API Route', () => {
}),
}))
const mockTransaction = vi.fn().mockImplementation(async (callback) => {
await callback({
delete: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
})
})
vi.doMock('@/db', () => ({
db: {
select: vi.fn().mockReturnValue({
@@ -291,7 +283,9 @@ describe('Workflow By ID API Route', () => {
}),
}),
}),
transaction: mockTransaction,
delete: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
},
}))
@@ -326,14 +320,6 @@ describe('Workflow By ID API Route', () => {
}),
}))
const mockTransaction = vi.fn().mockImplementation(async (callback) => {
await callback({
delete: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
})
})
vi.doMock('@/db', () => ({
db: {
select: vi.fn().mockReturnValue({
@@ -343,7 +329,9 @@ describe('Workflow By ID API Route', () => {
}),
}),
}),
transaction: mockTransaction,
delete: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
},
}))

View File

@@ -3,11 +3,12 @@ import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { verifyInternalToken } from '@/lib/auth/internal'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console-logger'
import { getUserEntityPermissions, hasAdminPermission } from '@/lib/permissions/utils'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
import { db } from '@/db'
import { workflow, workflowBlocks, workflowEdges, workflowSubflows } from '@/db/schema'
import { workflow } from '@/db/schema'
const logger = createLogger('WorkflowByIdAPI')
@@ -206,16 +207,7 @@ export async function DELETE(
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
}
// Delete workflow and all related data in a transaction
await db.transaction(async (tx) => {
// Delete from normalized tables first (foreign key constraints)
await tx.delete(workflowSubflows).where(eq(workflowSubflows.workflowId, workflowId))
await tx.delete(workflowEdges).where(eq(workflowEdges.workflowId, workflowId))
await tx.delete(workflowBlocks).where(eq(workflowBlocks.workflowId, workflowId))
// Delete the main workflow record
await tx.delete(workflow).where(eq(workflow.id, workflowId))
})
await db.delete(workflow).where(eq(workflow.id, workflowId))
const elapsed = Date.now() - startTime
logger.info(`[${requestId}] Successfully deleted workflow ${workflowId} in ${elapsed}ms`)
@@ -224,7 +216,7 @@ export async function DELETE(
// This prevents "Block not found" errors when collaborative updates try to process
// after the workflow has been deleted
try {
const socketUrl = process.env.SOCKET_SERVER_URL || 'http://localhost:3002'
const socketUrl = env.SOCKET_SERVER_URL || 'http://localhost:3002'
const socketResponse = await fetch(`${socketUrl}/api/workflow-deleted`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },

View File

@@ -2,13 +2,7 @@ import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console-logger'
import {
workflow,
workflowBlocks,
workflowEdges,
workflowSubflows,
workspaceMember,
} from '@/db/schema'
import { workflow, workspaceMember } from '@/db/schema'
const logger = createLogger('WorkspaceByIdAPI')
@@ -26,9 +20,9 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
const workspaceId = id
// Check if user has read access to this workspace
// Check if user has any access to this workspace
const userPermission = await getUserEntityPermissions(session.user.id, 'workspace', workspaceId)
if (userPermission !== 'read') {
if (!userPermission) {
return NextResponse.json({ error: 'Workspace not found or access denied' }, { status: 404 })
}
@@ -126,20 +120,10 @@ export async function DELETE(
// Delete workspace and all related data in a transaction
await db.transaction(async (tx) => {
// Get all workflows in this workspace
const workspaceWorkflows = await tx
.select({ id: workflow.id })
.from(workflow)
.where(eq(workflow.workspaceId, workspaceId))
// Delete all workflow-related data for each workflow
for (const wf of workspaceWorkflows) {
await tx.delete(workflowSubflows).where(eq(workflowSubflows.workflowId, wf.id))
await tx.delete(workflowEdges).where(eq(workflowEdges.workflowId, wf.id))
await tx.delete(workflowBlocks).where(eq(workflowBlocks.workflowId, wf.id))
}
// Delete all workflows in the workspace
// Delete all workflows in the workspace - database cascade will handle all workflow-related data
// The database cascade will handle deleting related workflow_blocks, workflow_edges, workflow_subflows,
// workflow_logs, workflow_execution_snapshots, workflow_execution_logs, workflow_execution_trace_spans,
// workflow_schedule, webhook, marketplace, chat, and memory records
await tx.delete(workflow).where(eq(workflow.workspaceId, workspaceId))
// Delete workspace members

View File

@@ -60,7 +60,7 @@ export async function GET(req: NextRequest) {
return NextResponse.json({ invitations })
} catch (error) {
console.error('Error fetching workspace invitations:', error)
logger.error('Error fetching workspace invitations:', error)
return NextResponse.json({ error: 'Failed to fetch invitations' }, { status: 500 })
}
}
@@ -204,7 +204,7 @@ export async function POST(req: NextRequest) {
return NextResponse.json({ success: true, invitation: invitationData })
} catch (error) {
console.error('Error creating workspace invitation:', error)
logger.error('Error creating workspace invitation:', error)
return NextResponse.json({ error: 'Failed to create invitation' }, { status: 500 })
}
}
@@ -252,9 +252,9 @@ async function sendInvitationEmail({
html: emailHtml,
})
console.log(`Invitation email sent to ${to}`)
logger.info(`Invitation email sent to ${to}`)
} catch (error) {
console.error('Error sending invitation email:', error)
logger.error('Error sending invitation email:', error)
// Continue even if email fails - the invitation is still created
}
}

View File

@@ -33,6 +33,7 @@ interface ChatConfig {
headerText?: string
}
authType?: 'public' | 'password' | 'email'
outputConfigs?: Array<{ blockId: string; path?: string }>
}
interface AudioStreamingOptions {
@@ -373,8 +374,16 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
const json = JSON.parse(line.substring(6))
const { blockId, chunk: contentChunk, event: eventType } = json
if (eventType === 'final') {
if (eventType === 'final' && json.data) {
setIsLoading(false)
// Process final execution result for field extraction
const result = json.data
const nonStreamingLogs =
result.logs?.filter((log: any) => !messageIdMap.has(log.blockId)) || []
// Chat field extraction will be handled by the backend using deployment outputConfigs
return
}

View File

@@ -36,7 +36,7 @@ export function ControlBar() {
const fetchLogs = async () => {
try {
const queryParams = buildQueryParams(1, 50) // Get first 50 logs for refresh
const response = await fetch(`/api/logs?${queryParams}`)
const response = await fetch(`/api/logs/enhanced?${queryParams}`)
if (!response.ok) {
throw new Error(`Error fetching logs: ${response.statusText}`)

View File

@@ -0,0 +1,99 @@
'use client'
import { useState } from 'react'
import { Eye, Maximize2, Minimize2, X } from 'lucide-react'
import { Badge } from '@/components/ui/badge'
import { Button } from '@/components/ui/button'
import { Dialog, DialogContent, DialogHeader, DialogTitle } from '@/components/ui/dialog'
import { cn } from '@/lib/utils'
import { FrozenCanvas } from './frozen-canvas'
interface FrozenCanvasModalProps {
executionId: string
workflowName?: string
trigger?: string
traceSpans?: any[] // TraceSpans data from log metadata
isOpen: boolean
onClose: () => void
}
export function FrozenCanvasModal({
executionId,
workflowName,
trigger,
traceSpans,
isOpen,
onClose,
}: FrozenCanvasModalProps) {
const [isFullscreen, setIsFullscreen] = useState(false)
const toggleFullscreen = () => {
setIsFullscreen(!isFullscreen)
}
return (
<Dialog open={isOpen} onOpenChange={onClose}>
<DialogContent
className={cn(
'flex flex-col gap-0 p-0',
isFullscreen
? 'h-[100vh] max-h-[100vh] w-[100vw] max-w-[100vw] rounded-none'
: 'h-[90vh] max-h-[90vh] overflow-hidden sm:max-w-[1100px]'
)}
hideCloseButton={true}
>
{/* Header */}
<DialogHeader className='flex flex-row items-center justify-between border-b bg-background p-4'>
<div className='flex items-center gap-3'>
<Eye className='h-5 w-5 text-blue-500 dark:text-blue-400' />
<div>
<DialogTitle className='font-semibold text-foreground text-lg'>
Logged Workflow State
</DialogTitle>
<div className='mt-1 flex items-center gap-2'>
{workflowName && (
<span className='text-muted-foreground text-sm'>{workflowName}</span>
)}
{trigger && (
<Badge variant='secondary' className='text-xs'>
{trigger}
</Badge>
)}
<span className='font-mono text-muted-foreground text-xs'>
{executionId.slice(0, 8)}...
</span>
</div>
</div>
</div>
<div className='flex items-center gap-2'>
<Button variant='ghost' size='sm' onClick={toggleFullscreen} className='h-8 w-8 p-0'>
{isFullscreen ? <Minimize2 className='h-4 w-4' /> : <Maximize2 className='h-4 w-4' />}
</Button>
<Button variant='ghost' size='sm' onClick={onClose} className='h-8 w-8 p-0'>
<X className='h-4 w-4' />
</Button>
</div>
</DialogHeader>
{/* Canvas Container */}
<div className='min-h-0 flex-1'>
<FrozenCanvas
executionId={executionId}
traceSpans={traceSpans}
height='100%'
width='100%'
/>
</div>
{/* Footer with instructions */}
<div className='border-t bg-background px-6 py-3'>
<div className='text-muted-foreground text-sm'>
💡 Click on blocks to see their input and output data at execution time. This canvas
shows the exact state of the workflow when this execution was captured.
</div>
</div>
</DialogContent>
</Dialog>
)
}

View File

@@ -0,0 +1,467 @@
'use client'
import { useEffect, useState } from 'react'
import {
AlertCircle,
ChevronLeft,
ChevronRight,
Clock,
DollarSign,
Hash,
Loader2,
X,
Zap,
} from 'lucide-react'
import { Badge } from '@/components/ui/badge'
import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card'
import { createLogger } from '@/lib/logs/console-logger'
import { cn, redactApiKeys } from '@/lib/utils'
import { WorkflowPreview } from '@/app/workspace/[workspaceId]/w/components/workflow-preview/workflow-preview'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
const logger = createLogger('FrozenCanvas')
function formatExecutionData(executionData: any) {
const {
inputData,
outputData,
cost,
tokens,
durationMs,
status,
blockName,
blockType,
errorMessage,
errorStackTrace,
} = executionData
return {
blockName: blockName || 'Unknown Block',
blockType: blockType || 'unknown',
status,
duration: durationMs ? `${durationMs}ms` : 'N/A',
input: redactApiKeys(inputData || {}),
output: redactApiKeys(outputData || {}),
errorMessage,
errorStackTrace,
cost: cost
? {
input: cost.input || 0,
output: cost.output || 0,
total: cost.total || 0,
}
: null,
tokens: tokens
? {
prompt: tokens.prompt || 0,
completion: tokens.completion || 0,
total: tokens.total || 0,
}
: null,
}
}
function getCurrentIterationData(blockExecutionData: any) {
if (blockExecutionData.iterations && Array.isArray(blockExecutionData.iterations)) {
const currentIndex = blockExecutionData.currentIteration ?? 0
return {
executionData: blockExecutionData.iterations[currentIndex],
currentIteration: currentIndex,
totalIterations: blockExecutionData.totalIterations ?? blockExecutionData.iterations.length,
hasMultipleIterations: blockExecutionData.iterations.length > 1,
}
}
return {
executionData: blockExecutionData,
currentIteration: 0,
totalIterations: 1,
hasMultipleIterations: false,
}
}
function PinnedLogs({ executionData, onClose }: { executionData: any; onClose: () => void }) {
const [currentIterationIndex, setCurrentIterationIndex] = useState(0)
const iterationInfo = getCurrentIterationData({
...executionData,
currentIteration: currentIterationIndex,
})
const formatted = formatExecutionData(iterationInfo.executionData)
const totalIterations = executionData.iterations?.length || 1
const goToPreviousIteration = () => {
if (currentIterationIndex > 0) {
setCurrentIterationIndex(currentIterationIndex - 1)
}
}
const goToNextIteration = () => {
if (currentIterationIndex < totalIterations - 1) {
setCurrentIterationIndex(currentIterationIndex + 1)
}
}
useEffect(() => {
setCurrentIterationIndex(0)
}, [executionData])
return (
<Card className='fixed top-4 right-4 z-[100] max-h-[calc(100vh-8rem)] w-96 overflow-y-auto border-border bg-background shadow-lg'>
<CardHeader className='pb-3'>
<div className='flex items-center justify-between'>
<CardTitle className='flex items-center gap-2 text-foreground text-lg'>
<Zap className='h-5 w-5' />
{formatted.blockName}
</CardTitle>
<button onClick={onClose} className='rounded-sm p-1 text-foreground hover:bg-muted'>
<X className='h-4 w-4' />
</button>
</div>
<div className='flex items-center justify-between'>
<div className='flex items-center gap-2'>
<Badge variant={formatted.status === 'success' ? 'default' : 'destructive'}>
{formatted.blockType}
</Badge>
<Badge variant='outline'>{formatted.status}</Badge>
</div>
{/* Iteration Navigation */}
{iterationInfo.hasMultipleIterations && (
<div className='flex items-center gap-1'>
<button
onClick={goToPreviousIteration}
disabled={currentIterationIndex === 0}
className='rounded p-1 text-muted-foreground hover:bg-muted hover:text-foreground disabled:cursor-not-allowed disabled:opacity-50'
>
<ChevronLeft className='h-4 w-4' />
</button>
<span className='px-2 text-muted-foreground text-xs'>
{currentIterationIndex + 1} / {iterationInfo.totalIterations}
</span>
<button
onClick={goToNextIteration}
disabled={currentIterationIndex === totalIterations - 1}
className='rounded p-1 text-muted-foreground hover:bg-muted hover:text-foreground disabled:cursor-not-allowed disabled:opacity-50'
>
<ChevronRight className='h-4 w-4' />
</button>
</div>
)}
</div>
</CardHeader>
<CardContent className='space-y-4'>
<div className='grid grid-cols-2 gap-4'>
<div className='flex items-center gap-2'>
<Clock className='h-4 w-4 text-muted-foreground' />
<span className='text-foreground text-sm'>{formatted.duration}</span>
</div>
{formatted.cost && (
<div className='flex items-center gap-2'>
<DollarSign className='h-4 w-4 text-muted-foreground' />
<span className='text-foreground text-sm'>${formatted.cost.total.toFixed(5)}</span>
</div>
)}
{formatted.tokens && (
<div className='flex items-center gap-2'>
<Hash className='h-4 w-4 text-muted-foreground' />
<span className='text-foreground text-sm'>{formatted.tokens.total} tokens</span>
</div>
)}
</div>
<div>
<h4 className='mb-2 font-medium text-foreground text-sm'>Input</h4>
<div className='max-h-32 overflow-y-auto rounded bg-muted p-3 font-mono text-xs'>
<pre className='text-foreground'>{JSON.stringify(formatted.input, null, 2)}</pre>
</div>
</div>
<div>
<h4 className='mb-2 font-medium text-foreground text-sm'>Output</h4>
<div className='max-h-32 overflow-y-auto rounded bg-muted p-3 font-mono text-xs'>
<pre className='text-foreground'>{JSON.stringify(formatted.output, null, 2)}</pre>
</div>
</div>
{formatted.cost && (
<div>
<h4 className='mb-2 font-medium text-foreground text-sm'>Cost Breakdown</h4>
<div className='space-y-1 text-sm'>
<div className='flex justify-between text-foreground'>
<span>Input:</span>
<span>${formatted.cost.input.toFixed(5)}</span>
</div>
<div className='flex justify-between text-foreground'>
<span>Output:</span>
<span>${formatted.cost.output.toFixed(5)}</span>
</div>
<div className='flex justify-between border-border border-t pt-1 font-medium text-foreground'>
<span>Total:</span>
<span>${formatted.cost.total.toFixed(5)}</span>
</div>
</div>
</div>
)}
{formatted.tokens && (
<div>
<h4 className='mb-2 font-medium text-foreground text-sm'>Token Usage</h4>
<div className='space-y-1 text-sm'>
<div className='flex justify-between text-foreground'>
<span>Prompt:</span>
<span>{formatted.tokens.prompt}</span>
</div>
<div className='flex justify-between text-foreground'>
<span>Completion:</span>
<span>{formatted.tokens.completion}</span>
</div>
<div className='flex justify-between border-border border-t pt-1 font-medium text-foreground'>
<span>Total:</span>
<span>{formatted.tokens.total}</span>
</div>
</div>
</div>
)}
</CardContent>
</Card>
)
}
interface FrozenCanvasData {
executionId: string
workflowId: string
workflowState: WorkflowState
executionMetadata: {
trigger: string
startedAt: string
endedAt?: string
totalDurationMs?: number
blockStats: {
total: number
success: number
error: number
skipped: number
}
cost: {
total: number | null
input: number | null
output: number | null
}
totalTokens: number | null
}
}
interface FrozenCanvasProps {
executionId: string
traceSpans?: any[]
className?: string
height?: string | number
width?: string | number
}
export function FrozenCanvas({
executionId,
traceSpans,
className,
height = '100%',
width = '100%',
}: FrozenCanvasProps) {
const [data, setData] = useState<FrozenCanvasData | null>(null)
const [blockExecutions, setBlockExecutions] = useState<Record<string, any>>({})
const [loading, setLoading] = useState(true)
const [error, setError] = useState<string | null>(null)
const [pinnedBlockId, setPinnedBlockId] = useState<string | null>(null)
// Process traceSpans to create blockExecutions map
useEffect(() => {
if (traceSpans && Array.isArray(traceSpans)) {
const blockExecutionMap: Record<string, any> = {}
const workflowSpan = traceSpans[0]
if (workflowSpan?.children && Array.isArray(workflowSpan.children)) {
const traceSpansByBlockId = workflowSpan.children.reduce((acc: any, span: any) => {
if (span.blockId) {
if (!acc[span.blockId]) {
acc[span.blockId] = []
}
acc[span.blockId].push(span)
}
return acc
}, {})
for (const [blockId, spans] of Object.entries(traceSpansByBlockId)) {
const spanArray = spans as any[]
const iterations = spanArray.map((span: any) => {
// Extract error information from span output if status is error
let errorMessage = null
let errorStackTrace = null
if (span.status === 'error' && span.output) {
// Error information can be in different formats in the output
if (typeof span.output === 'string') {
errorMessage = span.output
} else if (span.output.error) {
errorMessage = span.output.error
errorStackTrace = span.output.stackTrace || span.output.stack
} else if (span.output.message) {
errorMessage = span.output.message
errorStackTrace = span.output.stackTrace || span.output.stack
} else {
// Fallback: stringify the entire output for error cases
errorMessage = JSON.stringify(span.output)
}
}
return {
id: span.id,
blockId: span.blockId,
blockName: span.name,
blockType: span.type,
status: span.status,
startedAt: span.startTime,
endedAt: span.endTime,
durationMs: span.duration,
inputData: span.input,
outputData: span.output,
errorMessage,
errorStackTrace,
cost: span.cost || {
input: null,
output: null,
total: null,
},
tokens: span.tokens || {
prompt: null,
completion: null,
total: null,
},
modelUsed: span.model || null,
metadata: {},
}
})
blockExecutionMap[blockId] = {
iterations,
currentIteration: 0,
totalIterations: iterations.length,
}
}
}
setBlockExecutions(blockExecutionMap)
}
}, [traceSpans])
useEffect(() => {
const fetchData = async () => {
try {
setLoading(true)
setError(null)
const response = await fetch(`/api/logs/${executionId}/frozen-canvas`)
if (!response.ok) {
throw new Error(`Failed to fetch frozen canvas data: ${response.statusText}`)
}
const result = await response.json()
setData(result)
logger.debug(`Loaded frozen canvas data for execution: ${executionId}`)
} catch (err) {
const errorMessage = err instanceof Error ? err.message : 'Unknown error'
logger.error('Failed to fetch frozen canvas data:', err)
setError(errorMessage)
} finally {
setLoading(false)
}
}
fetchData()
}, [executionId])
// No need to create a temporary workflow - just use the workflowState directly
if (loading) {
return (
<div className={cn('flex items-center justify-center', className)} style={{ height, width }}>
<div className='flex items-center gap-2 text-muted-foreground'>
<Loader2 className='h-5 w-5 animate-spin' />
<span>Loading frozen canvas...</span>
</div>
</div>
)
}
if (error) {
return (
<div className={cn('flex items-center justify-center', className)} style={{ height, width }}>
<div className='flex items-center gap-2 text-destructive'>
<AlertCircle className='h-5 w-5' />
<span>Failed to load frozen canvas: {error}</span>
</div>
</div>
)
}
if (!data) {
return (
<div className={cn('flex items-center justify-center', className)} style={{ height, width }}>
<div className='text-muted-foreground'>No data available</div>
</div>
)
}
// Check if this is a migrated log without real workflow state
const isMigratedLog = (data.workflowState as any)?._migrated === true
if (isMigratedLog) {
return (
<div
className={cn('flex flex-col items-center justify-center gap-4 p-8', className)}
style={{ height, width }}
>
<div className='flex items-center gap-3 text-amber-600 dark:text-amber-400'>
<AlertCircle className='h-6 w-6' />
<span className='font-medium text-lg'>Logged State Not Found</span>
</div>
<div className='max-w-md text-center text-muted-foreground text-sm'>
This log was migrated from the old logging system. The workflow state at execution time is
not available.
</div>
<div className='text-muted-foreground text-xs'>
Note: {(data.workflowState as any)?._note}
</div>
</div>
)
}
return (
<>
<div style={{ height, width }} className={cn('frozen-canvas-mode h-full w-full', className)}>
<WorkflowPreview
workflowState={data.workflowState}
showSubBlocks={true}
isPannable={true}
onNodeClick={(blockId) => {
if (blockExecutions[blockId]) {
setPinnedBlockId(blockId)
}
}}
/>
</div>
{pinnedBlockId && blockExecutions[pinnedBlockId] && (
<PinnedLogs
executionData={blockExecutions[pinnedBlockId]}
onClose={() => setPinnedBlockId(null)}
/>
)}
</>
)
}

View File

@@ -0,0 +1,2 @@
export { FrozenCanvas } from './frozen-canvas'
export { FrozenCanvasModal } from './frozen-canvas-modal'

View File

@@ -1,7 +1,7 @@
'use client'
import { useEffect, useMemo, useRef, useState } from 'react'
import { ChevronDown, ChevronUp, X } from 'lucide-react'
import { ChevronDown, ChevronUp, Eye, X } from 'lucide-react'
import { Button } from '@/components/ui/button'
import { CopyButton } from '@/components/ui/copy-button'
import { ScrollArea } from '@/components/ui/scroll-area'
@@ -10,6 +10,7 @@ import { redactApiKeys } from '@/lib/utils'
import type { WorkflowLog } from '@/app/workspace/[workspaceId]/logs/stores/types'
import { formatDate } from '@/app/workspace/[workspaceId]/logs/utils/format-date'
import { formatCost } from '@/providers/utils'
import { FrozenCanvasModal } from '../frozen-canvas/frozen-canvas-modal'
import { ToolCallsDisplay } from '../tool-calls/tool-calls-display'
import { TraceSpansDisplay } from '../trace-spans/trace-spans-display'
import LogMarkdownRenderer from './components/markdown-renderer'
@@ -153,7 +154,7 @@ const BlockContentDisplay = ({
<>
<CopyButton text={redactedOutput} className='z-10 h-7 w-7' />
{isJson ? (
<pre className='w-full overflow-visible whitespace-pre-wrap break-all text-sm'>
<pre className='w-full overflow-y-auto overflow-x-hidden whitespace-pre-wrap break-all text-sm'>
{redactedOutput}
</pre>
) : (
@@ -166,7 +167,7 @@ const BlockContentDisplay = ({
text={JSON.stringify(redactedBlockInput, null, 2)}
className='z-10 h-7 w-7'
/>
<pre className='w-full overflow-visible whitespace-pre-wrap break-all text-sm'>
<pre className='w-full overflow-y-auto overflow-x-hidden whitespace-pre-wrap break-all text-sm'>
{JSON.stringify(redactedBlockInput, null, 2)}
</pre>
</>
@@ -193,6 +194,8 @@ export function Sidebar({
const [isDragging, setIsDragging] = useState(false)
const [_currentLogId, setCurrentLogId] = useState<string | null>(null)
const [isTraceExpanded, setIsTraceExpanded] = useState(false)
const [isModelsExpanded, setIsModelsExpanded] = useState(false)
const [isFrozenCanvasOpen, setIsFrozenCanvasOpen] = useState(false)
const scrollAreaRef = useRef<HTMLDivElement>(null)
// Update currentLogId when log changes
@@ -238,22 +241,26 @@ export function Sidebar({
// Determine if this is a workflow execution log
const isWorkflowExecutionLog = useMemo(() => {
if (!log) return false
// Check if message contains "workflow executed" or similar phrases
// Check if message contains workflow execution phrases (success or failure)
return (
log.message.toLowerCase().includes('workflow executed') ||
log.message.toLowerCase().includes('execution completed') ||
(log.trigger === 'manual' && log.duration)
log.message.toLowerCase().includes('workflow execution failed') ||
log.message.toLowerCase().includes('execution failed') ||
(log.trigger === 'manual' && log.duration) ||
// Also check if we have enhanced logging metadata with trace spans
(log.metadata?.enhanced && log.metadata?.traceSpans)
)
}, [log])
// Helper to determine if we have trace spans to display
const _hasTraceSpans = useMemo(() => {
return !!(log?.metadata?.traceSpans && log.metadata.traceSpans.length > 0)
}, [log])
// Helper to determine if we have cost information to display
const hasCostInfo = useMemo(() => {
return !!(log?.metadata?.cost && (log.metadata.cost.input || log.metadata.cost.output))
return !!(
log?.metadata?.cost &&
((log.metadata.cost.input && log.metadata.cost.input > 0) ||
(log.metadata.cost.output && log.metadata.cost.output > 0) ||
(log.metadata.cost.total && log.metadata.cost.total > 0))
)
}, [log])
const isWorkflowWithCost = useMemo(() => {
@@ -487,6 +494,103 @@ export function Sidebar({
</div>
)}
{/* Enhanced Stats - only show for enhanced logs */}
{log.metadata?.enhanced && log.metadata?.blockStats && (
<div>
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>
Block Execution Stats
</h3>
<div className='space-y-1 text-sm'>
<div className='flex justify-between'>
<span>Total Blocks:</span>
<span className='font-medium'>{log.metadata.blockStats.total}</span>
</div>
<div className='flex justify-between'>
<span>Successful:</span>
<span className='font-medium text-green-600'>
{log.metadata.blockStats.success}
</span>
</div>
{log.metadata.blockStats.error > 0 && (
<div className='flex justify-between'>
<span>Failed:</span>
<span className='font-medium text-red-600'>
{log.metadata.blockStats.error}
</span>
</div>
)}
{log.metadata.blockStats.skipped > 0 && (
<div className='flex justify-between'>
<span>Skipped:</span>
<span className='font-medium text-yellow-600'>
{log.metadata.blockStats.skipped}
</span>
</div>
)}
</div>
</div>
)}
{/* Enhanced Cost - only show for enhanced logs with actual cost data */}
{log.metadata?.enhanced && hasCostInfo && (
<div>
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>Cost Breakdown</h3>
<div className='space-y-1 text-sm'>
{(log.metadata?.cost?.total ?? 0) > 0 && (
<div className='flex justify-between'>
<span>Total Cost:</span>
<span className='font-medium'>
${log.metadata?.cost?.total?.toFixed(4)}
</span>
</div>
)}
{(log.metadata?.cost?.input ?? 0) > 0 && (
<div className='flex justify-between'>
<span>Input Cost:</span>
<span className='text-muted-foreground'>
${log.metadata?.cost?.input?.toFixed(4)}
</span>
</div>
)}
{(log.metadata?.cost?.output ?? 0) > 0 && (
<div className='flex justify-between'>
<span>Output Cost:</span>
<span className='text-muted-foreground'>
${log.metadata?.cost?.output?.toFixed(4)}
</span>
</div>
)}
{(log.metadata?.cost?.tokens?.total ?? 0) > 0 && (
<div className='flex justify-between'>
<span>Total Tokens:</span>
<span className='text-muted-foreground'>
{log.metadata?.cost?.tokens?.total?.toLocaleString()}
</span>
</div>
)}
</div>
</div>
)}
{/* Frozen Canvas Button - only show for workflow execution logs with execution ID */}
{isWorkflowExecutionLog && log.executionId && (
<div>
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>Workflow State</h3>
<Button
variant='outline'
size='sm'
onClick={() => setIsFrozenCanvasOpen(true)}
className='w-full justify-start gap-2'
>
<Eye className='h-4 w-4' />
View Frozen Canvas
</Button>
<p className='mt-1 text-muted-foreground text-xs'>
See the exact workflow state and block inputs/outputs at execution time
</p>
</div>
)}
{/* Message Content */}
<div className='w-full pb-2'>
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>Message</h3>
@@ -517,42 +621,94 @@ export function Sidebar({
)}
{/* Cost Information (moved to bottom) */}
{hasCostInfo && log.metadata?.cost && (
{hasCostInfo && (
<div>
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>
{isWorkflowWithCost ? 'Total Model Cost' : 'Model Cost'}
</h3>
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>Models</h3>
<div className='overflow-hidden rounded-md border'>
<div className='space-y-2 p-3'>
{log.metadata.cost.model && (
<div className='flex items-center justify-between'>
<span className='text-muted-foreground text-sm'>Model:</span>
<span className='text-sm'>{log.metadata.cost.model}</span>
</div>
)}
<div className='flex items-center justify-between'>
<span className='text-muted-foreground text-sm'>Input:</span>
<span className='text-sm'>{formatCost(log.metadata.cost.input || 0)}</span>
<span className='text-sm'>
{formatCost(log.metadata?.cost?.input || 0)}
</span>
</div>
<div className='flex items-center justify-between'>
<span className='text-muted-foreground text-sm'>Output:</span>
<span className='text-sm'>{formatCost(log.metadata.cost.output || 0)}</span>
<span className='text-sm'>
{formatCost(log.metadata?.cost?.output || 0)}
</span>
</div>
<div className='mt-1 flex items-center justify-between border-t pt-2'>
<span className='text-muted-foreground text-sm'>Total:</span>
<span className='text-foreground text-sm'>
{formatCost(log.metadata.cost.total || 0)}
{formatCost(log.metadata?.cost?.total || 0)}
</span>
</div>
<div className='flex items-center justify-between'>
<span className='text-muted-foreground text-xs'>Tokens:</span>
<span className='text-muted-foreground text-xs'>
{log.metadata.cost.tokens?.prompt || 0} in /{' '}
{log.metadata.cost.tokens?.completion || 0} out
{log.metadata?.cost?.tokens?.prompt || 0} in /{' '}
{log.metadata?.cost?.tokens?.completion || 0} out
</span>
</div>
</div>
{/* Models Breakdown */}
{log.metadata?.cost?.models &&
Object.keys(log.metadata?.cost?.models).length > 0 && (
<div className='border-t'>
<button
onClick={() => setIsModelsExpanded(!isModelsExpanded)}
className='flex w-full items-center justify-between p-3 text-left transition-colors hover:bg-muted/50'
>
<span className='font-medium text-muted-foreground text-xs'>
Model Breakdown (
{Object.keys(log.metadata?.cost?.models || {}).length})
</span>
{isModelsExpanded ? (
<ChevronUp className='h-3 w-3 text-muted-foreground' />
) : (
<ChevronDown className='h-3 w-3 text-muted-foreground' />
)}
</button>
{isModelsExpanded && (
<div className='space-y-3 border-t bg-muted/30 p-3'>
{Object.entries(log.metadata?.cost?.models || {}).map(
([model, cost]: [string, any]) => (
<div key={model} className='space-y-1'>
<div className='font-medium font-mono text-xs'>{model}</div>
<div className='space-y-1 text-xs'>
<div className='flex justify-between'>
<span className='text-muted-foreground'>Input:</span>
<span>{formatCost(cost.input || 0)}</span>
</div>
<div className='flex justify-between'>
<span className='text-muted-foreground'>Output:</span>
<span>{formatCost(cost.output || 0)}</span>
</div>
<div className='flex justify-between border-t pt-1'>
<span className='text-muted-foreground'>Total:</span>
<span className='font-medium'>
{formatCost(cost.total || 0)}
</span>
</div>
<div className='flex justify-between'>
<span className='text-muted-foreground'>Tokens:</span>
<span>
{cost.tokens?.prompt || 0} in /{' '}
{cost.tokens?.completion || 0} out
</span>
</div>
</div>
</div>
)
)}
</div>
)}
</div>
)}
{isWorkflowWithCost && (
<div className='border-t bg-muted p-3 text-muted-foreground text-xs'>
<p>
@@ -568,6 +724,18 @@ export function Sidebar({
</ScrollArea>
</>
)}
{/* Frozen Canvas Modal */}
{log?.executionId && (
<FrozenCanvasModal
executionId={log.executionId}
workflowName={log.workflow?.name}
trigger={log.trigger || undefined}
traceSpans={log.metadata?.traceSpans}
isOpen={isFrozenCanvasOpen}
onClose={() => setIsFrozenCanvasOpen(false)}
/>
)}
</div>
)
}

View File

@@ -111,7 +111,7 @@ function ToolCallItem({ toolCall, index }: ToolCallItemProps) {
{toolCall.input && (
<div>
<div className='mb-1 text-muted-foreground'>Input</div>
<pre className='group relative max-h-32 overflow-auto rounded bg-background p-2'>
<pre className='group relative max-h-32 overflow-y-auto overflow-x-hidden whitespace-pre-wrap break-all rounded bg-background p-2'>
<CopyButton text={JSON.stringify(toolCall.input, null, 2)} />
<code>{JSON.stringify(toolCall.input, null, 2)}</code>
</pre>
@@ -122,7 +122,7 @@ function ToolCallItem({ toolCall, index }: ToolCallItemProps) {
{toolCall.status === 'success' && toolCall.output && (
<div>
<div className='mb-1 text-muted-foreground'>Output</div>
<pre className='group relative max-h-32 overflow-auto rounded bg-background p-2'>
<pre className='group relative max-h-32 overflow-y-auto overflow-x-hidden whitespace-pre-wrap break-all rounded bg-background p-2'>
<CopyButton text={JSON.stringify(toolCall.output, null, 2)} />
<code>{JSON.stringify(toolCall.output, null, 2)}</code>
</pre>
@@ -132,7 +132,7 @@ function ToolCallItem({ toolCall, index }: ToolCallItemProps) {
{toolCall.status === 'error' && toolCall.error && (
<div>
<div className='mb-1 text-destructive'>Error</div>
<pre className='group relative max-h-32 overflow-auto rounded bg-destructive/10 p-2 text-destructive'>
<pre className='group relative max-h-32 overflow-y-auto overflow-x-hidden whitespace-pre-wrap break-all rounded bg-destructive/10 p-2 text-destructive'>
<CopyButton text={toolCall.error} />
<code>{toolCall.error}</code>
</pre>

View File

@@ -27,6 +27,174 @@ interface TraceSpansDisplayProps {
onExpansionChange?: (expanded: boolean) => void
}
// Transform raw block data into clean, user-friendly format
function transformBlockData(data: any, blockType: string, isInput: boolean) {
if (!data) return null
// For input data, filter out sensitive information
if (isInput) {
const cleanInput = { ...data }
// Remove sensitive fields
if (cleanInput.apiKey) {
cleanInput.apiKey = '***'
}
if (cleanInput.azureApiKey) {
cleanInput.azureApiKey = '***'
}
// Remove null/undefined values for cleaner display
Object.keys(cleanInput).forEach((key) => {
if (cleanInput[key] === null || cleanInput[key] === undefined) {
delete cleanInput[key]
}
})
return cleanInput
}
// For output data, extract meaningful information based on block type
if (data.response) {
const response = data.response
switch (blockType) {
case 'agent':
return {
content: response.content,
model: data.model,
tokens: data.tokens,
toolCalls: response.toolCalls,
...(data.cost && { cost: data.cost }),
}
case 'function':
return {
result: response.result,
stdout: response.stdout,
...(response.executionTime && { executionTime: `${response.executionTime}ms` }),
}
case 'api':
return {
data: response.data,
status: response.status,
headers: response.headers,
}
default:
// For other block types, show the response content
return response
}
}
return data
}
// Component to display block input/output data in a clean, readable format
function BlockDataDisplay({
data,
blockType,
isInput = false,
isError = false,
}: {
data: any
blockType?: string
isInput?: boolean
isError?: boolean
}) {
if (!data) return null
// Handle different data types
const renderValue = (value: any, key?: string): React.ReactNode => {
if (value === null) return <span className='text-muted-foreground italic'>null</span>
if (value === undefined) return <span className='text-muted-foreground italic'>undefined</span>
if (typeof value === 'string') {
return <span className='break-all text-green-700 dark:text-green-400'>"{value}"</span>
}
if (typeof value === 'number') {
return <span className='text-blue-700 dark:text-blue-400'>{value}</span>
}
if (typeof value === 'boolean') {
return <span className='text-purple-700 dark:text-purple-400'>{value.toString()}</span>
}
if (Array.isArray(value)) {
if (value.length === 0) return <span className='text-muted-foreground'>[]</span>
return (
<div className='space-y-1'>
<span className='text-muted-foreground'>[</span>
<div className='ml-4 space-y-1'>
{value.map((item, index) => (
<div key={index} className='flex min-w-0 gap-2'>
<span className='flex-shrink-0 text-muted-foreground text-xs'>{index}:</span>
<div className='min-w-0 flex-1 overflow-hidden'>{renderValue(item)}</div>
</div>
))}
</div>
<span className='text-muted-foreground'>]</span>
</div>
)
}
if (typeof value === 'object') {
const entries = Object.entries(value)
if (entries.length === 0) return <span className='text-muted-foreground'>{'{}'}</span>
return (
<div className='space-y-1'>
{entries.map(([objKey, objValue]) => (
<div key={objKey} className='flex min-w-0 gap-2'>
<span className='flex-shrink-0 font-medium text-orange-700 dark:text-orange-400'>
{objKey}:
</span>
<div className='min-w-0 flex-1 overflow-hidden'>{renderValue(objValue, objKey)}</div>
</div>
))}
</div>
)
}
return <span>{String(value)}</span>
}
// Transform the data for better display
const transformedData = transformBlockData(data, blockType || 'unknown', isInput)
// Special handling for error output
if (isError && data.error) {
return (
<div className='space-y-2 text-xs'>
<div className='rounded border border-red-200 bg-red-50 p-2 dark:border-red-800 dark:bg-red-950/20'>
<div className='mb-1 font-medium text-red-800 dark:text-red-400'>Error</div>
<div className='text-red-700 dark:text-red-300'>{data.error}</div>
</div>
{/* Show other output data if available */}
{transformedData &&
Object.keys(transformedData).filter((key) => key !== 'error' && key !== 'success')
.length > 0 && (
<div className='space-y-1'>
{Object.entries(transformedData)
.filter(([key]) => key !== 'error' && key !== 'success')
.map(([key, value]) => (
<div key={key} className='flex gap-2'>
<span className='font-medium text-orange-700 dark:text-orange-400'>{key}:</span>
{renderValue(value, key)}
</div>
))}
</div>
)}
</div>
)
}
return (
<div className='space-y-1 overflow-hidden text-xs'>{renderValue(transformedData || data)}</div>
)
}
export function TraceSpansDisplay({
traceSpans,
totalDuration = 0,
@@ -35,6 +203,30 @@ export function TraceSpansDisplay({
// Keep track of expanded spans
const [expandedSpans, setExpandedSpans] = useState<Set<string>>(new Set())
// Function to collect all span IDs recursively (for expand all functionality)
const collectAllSpanIds = (spans: TraceSpan[]): string[] => {
const ids: string[] = []
const collectIds = (span: TraceSpan) => {
const spanId = span.id || `span-${span.name}-${span.startTime}`
ids.push(spanId)
// Process children
if (span.children && span.children.length > 0) {
span.children.forEach(collectIds)
}
}
spans.forEach(collectIds)
return ids
}
const allSpanIds = useMemo(() => {
if (!traceSpans || traceSpans.length === 0) return []
return collectAllSpanIds(traceSpans)
}, [traceSpans])
// Early return after all hooks
if (!traceSpans || traceSpans.length === 0) {
return <div className='text-muted-foreground text-sm'>No trace data available</div>
}
@@ -61,26 +253,6 @@ export function TraceSpansDisplay({
// This ensures parallel spans are represented correctly in the timeline
const actualTotalDuration = workflowEndTime - workflowStartTime
// Function to collect all span IDs recursively (for expand all functionality)
const collectAllSpanIds = (spans: TraceSpan[]): string[] => {
const ids: string[] = []
const collectIds = (span: TraceSpan) => {
const spanId = span.id || `span-${span.name}-${span.startTime}`
ids.push(spanId)
// Process children
if (span.children && span.children.length > 0) {
span.children.forEach(collectIds)
}
}
spans.forEach(collectIds)
return ids
}
const allSpanIds = useMemo(() => collectAllSpanIds(traceSpans), [traceSpans])
// Handle span toggling
const handleSpanToggle = (spanId: string, expanded: boolean, hasSubItems: boolean) => {
const newExpandedSpans = new Set(expandedSpans)
@@ -140,11 +312,14 @@ export function TraceSpansDisplay({
)}
</button>
</div>
<div className='overflow-hidden rounded-md border shadow-sm'>
<div className='w-full overflow-hidden rounded-md border shadow-sm'>
{traceSpans.map((span, index) => {
const hasSubItems =
const hasSubItems = Boolean(
(span.children && span.children.length > 0) ||
(span.toolCalls && span.toolCalls.length > 0)
(span.toolCalls && span.toolCalls.length > 0) ||
span.input ||
span.output
)
return (
<TraceSpanItem
key={index}
@@ -430,6 +605,43 @@ function TraceSpanItem({
</div>
</div>
{/* Children and tool calls */}
{expanded && (
<div>
{/* Block Input/Output Data */}
{(span.input || span.output) && (
<div className='mt-2 ml-8 space-y-3 overflow-hidden'>
{/* Input Data */}
{span.input && (
<div>
<h4 className='mb-2 font-medium text-muted-foreground text-xs'>Input</h4>
<div className='overflow-hidden rounded-md bg-secondary/30 p-3'>
<BlockDataDisplay data={span.input} blockType={span.type} isInput={true} />
</div>
</div>
)}
{/* Output Data */}
{span.output && (
<div>
<h4 className='mb-2 font-medium text-muted-foreground text-xs'>
{span.status === 'error' ? 'Error Details' : 'Output'}
</h4>
<div className='overflow-hidden rounded-md bg-secondary/30 p-3'>
<BlockDataDisplay
data={span.output}
blockType={span.type}
isInput={false}
isError={span.status === 'error'}
/>
</div>
</div>
)}
</div>
)}
</div>
)}
{/* Children and tool calls */}
{expanded && (
<div>
@@ -437,9 +649,12 @@ function TraceSpanItem({
{hasChildren && (
<div>
{span.children?.map((childSpan, index) => {
const childHasSubItems =
const childHasSubItems = Boolean(
(childSpan.children && childSpan.children.length > 0) ||
(childSpan.toolCalls && childSpan.toolCalls.length > 0)
(childSpan.toolCalls && childSpan.toolCalls.length > 0) ||
childSpan.input ||
childSpan.output
)
return (
<TraceSpanItem

View File

@@ -1,6 +1,6 @@
'use client'
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { useCallback, useEffect, useRef, useState } from 'react'
import { AlertCircle, Info, Loader2 } from 'lucide-react'
import { createLogger } from '@/lib/logs/console-logger'
import { useSidebarStore } from '@/stores/sidebar/store'
@@ -14,34 +14,6 @@ import { formatDate } from './utils/format-date'
const logger = createLogger('Logs')
const LOGS_PER_PAGE = 50
const getLevelBadgeStyles = (level: string) => {
switch (level.toLowerCase()) {
case 'error':
return 'bg-destructive/20 text-destructive error-badge'
case 'warn':
return 'bg-warning/20 text-warning'
default:
return 'bg-secondary text-secondary-foreground'
}
}
const getTriggerBadgeStyles = (trigger: string) => {
switch (trigger.toLowerCase()) {
case 'manual':
return 'bg-secondary text-secondary-foreground'
case 'api':
return 'bg-blue-100 dark:bg-blue-950/40 text-blue-700 dark:text-blue-400'
case 'webhook':
return 'bg-orange-100 dark:bg-orange-950/40 text-orange-700 dark:text-orange-400'
case 'schedule':
return 'bg-green-100 dark:bg-green-950/40 text-green-700 dark:text-green-400'
case 'chat':
return 'bg-purple-100 dark:bg-purple-950/40 text-purple-700 dark:text-purple-400'
default:
return 'bg-gray-100 dark:bg-gray-800 text-gray-700 dark:text-gray-400'
}
}
const selectedRowAnimation = `
@keyframes borderPulse {
0% { border-left-color: hsl(var(--primary) / 0.3) }
@@ -87,28 +59,6 @@ export default function Logs() {
const isSidebarCollapsed =
mode === 'expanded' ? !isExpanded : mode === 'collapsed' || mode === 'hover'
const executionGroups = useMemo(() => {
const groups: Record<string, WorkflowLog[]> = {}
// Group logs by executionId
logs.forEach((log) => {
if (log.executionId) {
if (!groups[log.executionId]) {
groups[log.executionId] = []
}
groups[log.executionId].push(log)
}
})
Object.keys(groups).forEach((executionId) => {
groups[executionId].sort(
(a, b) => new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime()
)
})
return groups
}, [logs])
const handleLogClick = (log: WorkflowLog) => {
setSelectedLog(log)
const index = logs.findIndex((l) => l.id === log.id)
@@ -134,6 +84,8 @@ export default function Logs() {
const handleCloseSidebar = () => {
setIsSidebarOpen(false)
setSelectedLog(null)
setSelectedLogIndex(-1)
}
useEffect(() => {
@@ -155,7 +107,7 @@ export default function Logs() {
}
const queryParams = buildQueryParams(pageNum, LOGS_PER_PAGE)
const response = await fetch(`/api/logs?${queryParams}`)
const response = await fetch(`/api/logs/enhanced?${queryParams}`)
if (!response.ok) {
throw new Error(`Error fetching logs: ${response.statusText}`)
@@ -203,7 +155,7 @@ export default function Logs() {
try {
setLoading(true)
const queryParams = buildQueryParams(1, LOGS_PER_PAGE)
const response = await fetch(`/api/logs?${queryParams}`)
const response = await fetch(`/api/logs/enhanced?${queryParams}`)
if (!response.ok) {
throw new Error(`Error fetching logs: ${response.statusText}`)
@@ -353,46 +305,19 @@ export default function Logs() {
<div className='flex flex-1 flex-col overflow-hidden'>
{/* Table container */}
<div className='flex flex-1 flex-col overflow-hidden'>
{/* Table header - fixed */}
<div className='sticky top-0 z-10 border-b bg-background'>
<table className='w-full table-fixed'>
<colgroup>
<col className={`${isSidebarCollapsed ? 'w-[16%]' : 'w-[19%]'}`} />
<col className='w-[8%] md:w-[7%]' />
<col className='w-[12%] md:w-[10%]' />
<col className='hidden w-[8%] lg:table-column' />
<col className='hidden w-[8%] lg:table-column' />
<col
className={`${isSidebarCollapsed ? 'w-auto md:w-[53%] lg:w-auto' : 'w-auto md:w-[50%] lg:w-auto'}`}
/>
<col className='w-[8%] md:w-[10%]' />
</colgroup>
<thead>
<tr>
<th className='px-4 pt-2 pb-3 text-left font-medium'>
<span className='text-muted-foreground text-xs leading-none'>Time</span>
</th>
<th className='px-4 pt-2 pb-3 text-left font-medium'>
<span className='text-muted-foreground text-xs leading-none'>Status</span>
</th>
<th className='px-4 pt-2 pb-3 text-left font-medium'>
<span className='text-muted-foreground text-xs leading-none'>Workflow</span>
</th>
<th className='hidden px-4 pt-2 pb-3 text-left font-medium lg:table-cell'>
<span className='text-muted-foreground text-xs leading-none'>id</span>
</th>
<th className='hidden px-4 pt-2 pb-3 text-left font-medium lg:table-cell'>
<span className='text-muted-foreground text-xs leading-none'>Trigger</span>
</th>
<th className='px-4 pt-2 pb-3 text-left font-medium'>
<span className='text-muted-foreground text-xs leading-none'>Message</span>
</th>
<th className='px-4 pt-2 pb-3 text-left font-medium'>
<span className='text-muted-foreground text-xs leading-none'>Duration</span>
</th>
</tr>
</thead>
</table>
{/* Table with fixed layout */}
<div className='w-full min-w-[800px]'>
{/* Header */}
<div className='border-border/50 border-b'>
<div className='grid grid-cols-[160px_100px_1fr_120px_100px_100px] gap-4 px-4 py-3 font-medium text-muted-foreground text-xs'>
<div>Time</div>
<div>Status</div>
<div>Workflow</div>
<div className='hidden lg:block'>Trigger</div>
<div className='hidden xl:block'>Cost</div>
<div>Duration</div>
</div>
</div>
</div>
{/* Table body - scrollable */}
@@ -419,163 +344,106 @@ export default function Logs() {
</div>
</div>
) : (
<table className='w-full table-fixed'>
<colgroup>
<col className={`${isSidebarCollapsed ? 'w-[16%]' : 'w-[19%]'}`} />
<col className='w-[8%] md:w-[7%]' />
<col className='w-[12%] md:w-[10%]' />
<col className='hidden w-[8%] lg:table-column' />
<col className='hidden w-[8%] lg:table-column' />
<col
className={`${isSidebarCollapsed ? 'w-auto md:w-[53%] lg:w-auto' : 'w-auto md:w-[50%] lg:w-auto'}`}
/>
<col className='w-[8%] md:w-[10%]' />
</colgroup>
<tbody>
{logs.map((log) => {
const formattedDate = formatDate(log.createdAt)
const isSelected = selectedLog?.id === log.id
const _isWorkflowExecutionLog =
log.executionId && executionGroups[log.executionId].length === 1
<div className='space-y-1 p-4'>
{logs.map((log) => {
const formattedDate = formatDate(log.createdAt)
const isSelected = selectedLog?.id === log.id
return (
<tr
key={log.id}
ref={isSelected ? selectedRowRef : null}
className={`cursor-pointer border-b transition-colors ${
isSelected
? 'selected-row border-l-2 bg-accent/40 hover:bg-accent/50'
: 'hover:bg-accent/30'
}`}
onClick={() => handleLogClick(log)}
>
{/* Time column */}
<td className='px-4 py-3'>
<div className='flex flex-col justify-center'>
<div className='flex items-center font-medium text-xs'>
<span>{formattedDate.formatted}</span>
<span className='mx-1.5 hidden text-muted-foreground xl:inline'>
</span>
<span className='hidden text-muted-foreground xl:inline'>
{new Date(log.createdAt).toLocaleDateString('en-US', {
month: 'short',
day: 'numeric',
year: 'numeric',
})}
</span>
</div>
<div className='mt-0.5 text-muted-foreground text-xs'>
<span>{formattedDate.relative}</span>
</div>
return (
<div
key={log.id}
ref={isSelected ? selectedRowRef : null}
className={`cursor-pointer rounded-lg border transition-all duration-200 ${
isSelected
? 'border-primary bg-accent/40 shadow-sm'
: 'border-border hover:border-border/80 hover:bg-accent/20'
}`}
onClick={() => handleLogClick(log)}
>
<div className='grid grid-cols-[160px_100px_1fr_120px_100px_100px] gap-4 p-4'>
{/* Time */}
<div>
<div className='font-medium text-sm'>{formattedDate.formatted}</div>
<div className='text-muted-foreground text-xs'>
{formattedDate.relative}
</div>
</td>
</div>
{/* Level column */}
<td className='px-4 py-3'>
{/* Status */}
<div>
<div
className={`inline-flex items-center justify-center rounded-md px-2 py-1 text-xs ${getLevelBadgeStyles(log.level)}`}
className={`inline-flex items-center justify-center rounded-md px-2 py-1 text-xs ${
log.level === 'error'
? 'bg-red-100 text-red-800'
: 'bg-green-100 text-green-800'
}`}
>
<span className='font-medium'>{log.level}</span>
<span className='font-medium'>
{log.level === 'error' ? 'Failed' : 'Success'}
</span>
</div>
</td>
</div>
{/* Workflow column */}
<td className='px-4 py-3'>
{log.workflow && (
<div
className='inline-flex max-w-full items-center truncate rounded-md px-2 py-1 text-xs'
style={{
backgroundColor: `${log.workflow.color}20`,
color: log.workflow.color,
}}
title={log.workflow.name}
>
<span className='truncate font-medium'>{log.workflow.name}</span>
</div>
)}
</td>
{/* ID column - hidden on small screens */}
<td className='hidden px-4 py-3 lg:table-cell'>
<div className='font-mono text-muted-foreground text-xs'>
{log.executionId ? `#${log.executionId.substring(0, 4)}` : '—'}
{/* Workflow */}
<div className='min-w-0'>
<div className='truncate font-medium text-sm'>
{log.workflow?.name || 'Unknown Workflow'}
</div>
</td>
{/* Trigger column - hidden on medium screens and below */}
<td className='hidden px-4 py-3 lg:table-cell'>
{log.trigger && (
<div
className={`inline-flex items-center rounded-md px-2 py-1 text-xs ${getTriggerBadgeStyles(log.trigger)}`}
>
<span className='font-medium'>{log.trigger}</span>
</div>
)}
</td>
{/* Message column */}
<td className='px-4 py-3'>
<div className='truncate text-sm' title={log.message}>
<div className='truncate text-muted-foreground text-xs'>
{log.message}
</div>
</td>
</div>
{/* Duration column */}
<td className='px-4 py-3'>
{/* Trigger */}
<div className='hidden lg:block'>
<div className='text-muted-foreground text-xs'>
{log.trigger || '—'}
</div>
</div>
{/* Cost */}
<div className='hidden xl:block'>
<div className='text-xs'>
{log.metadata?.enhanced && log.metadata?.cost?.total ? (
<span className='text-muted-foreground'>
${log.metadata.cost.total.toFixed(4)}
</span>
) : (
<span className='text-muted-foreground'></span>
)}
</div>
</div>
{/* Duration */}
<div>
<div className='text-muted-foreground text-xs'>
{log.duration || '—'}
</div>
</td>
</tr>
)
})}
{/* Infinite scroll loader */}
{hasMore && (
<tr>
<td colSpan={7}>
<div
ref={loaderRef}
className='flex items-center justify-center py-2'
style={{ height: '50px' }}
>
{isFetchingMore && (
<div className='flex items-center gap-2 text-muted-foreground opacity-70'>
<Loader2 className='h-4 w-4 animate-spin' />
<span className='text-xs'>Loading more logs...</span>
</div>
)}
</div>
</td>
</tr>
)}
{/* Footer status indicator - useful for development */}
<tr className='border-t'>
<td colSpan={7}>
<div className='flex items-center justify-between px-4 py-2 text-muted-foreground text-xs'>
<span>Showing {logs.length} logs</span>
<div className='flex items-center gap-4'>
{isFetchingMore ? (
<div className='flex items-center gap-2' />
) : hasMore ? (
<button
type='button'
onClick={loadMoreLogs}
className='text-primary text-xs hover:underline'
>
Load more logs
</button>
) : (
<span>End of logs</span>
)}
</div>
</div>
</td>
</tr>
</tbody>
</table>
</div>
)
})}
{/* Infinite scroll loader */}
{hasMore && (
<div className='flex items-center justify-center py-4'>
<div
ref={loaderRef}
className='flex items-center gap-2 text-muted-foreground'
>
{isFetchingMore ? (
<>
<Loader2 className='h-4 w-4 animate-spin' />
<span className='text-sm'>Loading more...</span>
</>
) : (
<span className='text-sm'>Scroll to load more</span>
)}
</div>
</div>
)}
</div>
)}
</div>
</div>

View File

@@ -22,7 +22,19 @@ export interface ToolCallMetadata {
}
export interface CostMetadata {
model?: string
models?: Record<
string,
{
input: number
output: number
total: number
tokens?: {
prompt?: number
completion?: number
total?: number
}
}
>
input?: number
output?: number
total?: number
@@ -53,6 +65,7 @@ export interface TraceSpan {
relativeStartMs?: number // Time in ms from the start of the parent span
blockId?: string // Added to track the original block ID for relationship mapping
input?: Record<string, any> // Added to store input data for this span
output?: Record<string, any> // Added to store output data for this span
}
export interface WorkflowLog {
@@ -70,6 +83,29 @@ export interface WorkflowLog {
totalDuration?: number
cost?: CostMetadata
blockInput?: Record<string, any>
enhanced?: boolean
blockStats?: {
total: number
success: number
error: number
skipped: number
}
blockExecutions?: Array<{
id: string
blockId: string
blockName: string
blockType: string
startedAt: string
endedAt: string
durationMs: number
status: 'success' | 'error' | 'skipped'
errorMessage?: string
errorStackTrace?: string
inputData: any
outputData: any
cost?: CostMetadata
metadata: any
}>
}
}

View File

@@ -30,6 +30,7 @@ import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
import { Skeleton } from '@/components/ui/skeleton'
import { Textarea } from '@/components/ui/textarea'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console-logger'
import { getBaseDomain } from '@/lib/urls/utils'
import { cn } from '@/lib/utils'
@@ -54,7 +55,7 @@ interface ChatDeployProps {
type AuthType = 'public' | 'password' | 'email'
const getDomainSuffix = (() => {
const suffix = process.env.NODE_ENV === 'development' ? `.${getBaseDomain()}` : '.simstudio.ai'
const suffix = env.NODE_ENV === 'development' ? `.${getBaseDomain()}` : '.simstudio.ai'
return () => suffix
})()

View File

@@ -1,53 +1,57 @@
'use client'
import { useEffect, useState } from 'react'
import { AlertTriangle, RefreshCw } from 'lucide-react'
import { Button } from '@/components/ui/button'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/w/components/providers/workspace-permissions-provider'
interface ConnectionStatusProps {
isConnected: boolean
}
export function ConnectionStatus({ isConnected }: ConnectionStatusProps) {
const [showOfflineNotice, setShowOfflineNotice] = useState(false)
const userPermissions = useUserPermissionsContext()
useEffect(() => {
let timeoutId: NodeJS.Timeout
const handleRefresh = () => {
window.location.reload()
}
if (!isConnected) {
// Show offline notice after 6 seconds of being disconnected
timeoutId = setTimeout(() => {
setShowOfflineNotice(true)
}, 6000) // 6 seconds
} else {
// Hide notice immediately when reconnected
setShowOfflineNotice(false)
}
return () => {
if (timeoutId) {
clearTimeout(timeoutId)
}
}
}, [isConnected])
// Don't render anything if connected or if we haven't been disconnected long enough
if (!showOfflineNotice) {
// Don't render anything if not in offline mode
if (!userPermissions.isOfflineMode) {
return null
}
return (
<div className='flex items-center gap-1.5'>
<div className='flex items-center gap-1.5 text-red-600'>
<div className='flex items-center gap-2 rounded-md border border-red-200 bg-red-50 px-3 py-2'>
<div className='flex items-center gap-2 text-red-700'>
<div className='relative flex items-center justify-center'>
<div className='absolute h-3 w-3 animate-ping rounded-full bg-red-500/20' />
<div className='relative h-2 w-2 rounded-full bg-red-500' />
{!isConnected && (
<div className='absolute h-4 w-4 animate-ping rounded-full bg-red-500/20' />
)}
<AlertTriangle className='relative h-4 w-4' />
</div>
<div className='flex flex-col'>
<span className='font-medium text-xs leading-tight'>Connection lost</span>
<span className='text-xs leading-tight opacity-90'>
Changes not saved - please refresh
<span className='font-medium text-xs leading-tight'>
{isConnected ? 'Reconnected' : 'Connection lost - please refresh'}
</span>
<span className='text-red-600 text-xs leading-tight'>
{isConnected ? 'Refresh to continue editing' : 'Read-only mode active'}
</span>
</div>
</div>
<Tooltip>
<TooltipTrigger asChild>
<Button
onClick={handleRefresh}
variant='ghost'
size='sm'
className='h-7 w-7 p-0 text-red-700 hover:bg-red-100 hover:text-red-800'
>
<RefreshCw className='h-4 w-4' />
</Button>
</TooltipTrigger>
<TooltipContent className='z-[9999]'>Refresh page to continue editing</TooltipContent>
</Tooltip>
</div>
)
}

View File

@@ -44,16 +44,6 @@ export function UserAvatarStack({
}
}, [users, maxVisible])
// Show connection status component regardless of user count
// This will handle the offline notice when disconnected for 15 seconds
const connectionStatusElement = <ConnectionStatus isConnected={isConnected} />
// Only show presence when there are multiple users (>1)
// But always show connection status
if (users.length <= 1) {
return connectionStatusElement
}
// Determine spacing based on size
const spacingClass = {
sm: '-space-x-1',
@@ -62,46 +52,55 @@ export function UserAvatarStack({
}[size]
return (
<div className={`flex items-center ${spacingClass} ${className}`}>
{/* Connection status - always present */}
{connectionStatusElement}
<div className={`flex items-center gap-3 ${className}`}>
{/* Connection status - always check, shows when offline */}
<ConnectionStatus isConnected={isConnected} />
{/* Render visible user avatars */}
{visibleUsers.map((user, index) => (
<UserAvatar
key={user.connectionId}
connectionId={user.connectionId}
name={user.name}
color={user.color}
size={size}
index={index}
tooltipContent={
user.name ? (
<div className='text-center'>
<div className='font-medium'>{user.name}</div>
{user.info && <div className='mt-1 text-muted-foreground text-xs'>{user.info}</div>}
</div>
) : null
}
/>
))}
{/* Only show avatar stack when there are multiple users (>1) */}
{users.length > 1 && (
<div className={`flex items-center ${spacingClass}`}>
{/* Render visible user avatars */}
{visibleUsers.map((user, index) => (
<UserAvatar
key={user.connectionId}
connectionId={user.connectionId}
name={user.name}
color={user.color}
size={size}
index={index}
tooltipContent={
user.name ? (
<div className='text-center'>
<div className='font-medium'>{user.name}</div>
{user.info && (
<div className='mt-1 text-muted-foreground text-xs'>{user.info}</div>
)}
</div>
) : null
}
/>
))}
{/* Render overflow indicator if there are more users */}
{overflowCount > 0 && (
<UserAvatar
connectionId='overflow-indicator' // Use a unique string identifier
name={`+${overflowCount}`}
size={size}
index={visibleUsers.length}
tooltipContent={
<div className='text-center'>
<div className='font-medium'>
{overflowCount} more user{overflowCount > 1 ? 's' : ''}
</div>
<div className='mt-1 text-muted-foreground text-xs'>{users.length} total online</div>
</div>
}
/>
{/* Render overflow indicator if there are more users */}
{overflowCount > 0 && (
<UserAvatar
connectionId='overflow-indicator' // Use a unique string identifier
name={`+${overflowCount}`}
size={size}
index={visibleUsers.length}
tooltipContent={
<div className='text-center'>
<div className='font-medium'>
{overflowCount} more user{overflowCount > 1 ? 's' : ''}
</div>
<div className='mt-1 text-muted-foreground text-xs'>
{users.length} total online
</div>
</div>
}
/>
)}
</div>
)}
</div>
)

View File

@@ -458,7 +458,7 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
* Handle deleting the current workflow
*/
const handleDeleteWorkflow = () => {
if (!activeWorkflowId || !userPermissions.canEdit) return
if (!activeWorkflowId || !userPermissions.canAdmin) return
const sidebarWorkflows = getSidebarOrderedWorkflows()
const currentIndex = sidebarWorkflows.findIndex((w) => w.id === activeWorkflowId)
@@ -670,7 +670,11 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
</h2>
</TooltipTrigger>
{!canEdit && (
<TooltipContent>Edit permissions required to rename workflows</TooltipContent>
<TooltipContent>
{userPermissions.isOfflineMode
? 'Connection lost - please refresh'
: 'Edit permissions required to rename workflows'}
</TooltipContent>
)}
</Tooltip>
)}
@@ -691,12 +695,12 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
* Render delete workflow button with confirmation dialog
*/
const renderDeleteButton = () => {
const canEdit = userPermissions.canEdit
const canAdmin = userPermissions.canAdmin
const hasMultipleWorkflows = Object.keys(workflows).length > 1
const isDisabled = !canEdit || !hasMultipleWorkflows
const isDisabled = !canAdmin || !hasMultipleWorkflows
const getTooltipText = () => {
if (!canEdit) return 'Admin permission required to delete workflows'
if (!canAdmin) return 'Admin permission required to delete workflows'
if (!hasMultipleWorkflows) return 'Cannot delete the last workflow'
return 'Delete Workflow'
}
@@ -934,7 +938,11 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
)}
</TooltipTrigger>
<TooltipContent>
{canEdit ? 'Duplicate Workflow' : 'Admin permission required to duplicate workflows'}
{canEdit
? 'Duplicate Workflow'
: userPermissions.isOfflineMode
? 'Connection lost - please refresh'
: 'Admin permission required to duplicate workflows'}
</TooltipContent>
</Tooltip>
)
@@ -975,7 +983,9 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
</TooltipTrigger>
<TooltipContent command='Shift+L'>
{!userPermissions.canEdit
? 'Admin permission required to use auto-layout'
? userPermissions.isOfflineMode
? 'Connection lost - please refresh'
: 'Admin permission required to use auto-layout'
: 'Auto Layout'}
</TooltipContent>
</Tooltip>

View File

@@ -5,6 +5,12 @@ import { ArrowUp } from 'lucide-react'
import { Button } from '@/components/ui/button'
import { Input } from '@/components/ui/input'
import { ScrollArea } from '@/components/ui/scroll-area'
import { createLogger } from '@/lib/logs/console-logger'
import {
extractBlockIdFromOutputId,
extractPathFromOutputId,
parseOutputContentSafely,
} from '@/lib/response-format'
import type { BlockLog, ExecutionResult } from '@/executor/types'
import { useExecutionStore } from '@/stores/execution/store'
import { useChatStore } from '@/stores/panel/chat/store'
@@ -14,6 +20,8 @@ import { useWorkflowExecution } from '../../../../hooks/use-workflow-execution'
import { ChatMessage } from './components/chat-message/chat-message'
import { OutputSelect } from './components/output-select/output-select'
const logger = createLogger('ChatPanel')
interface ChatProps {
panelWidth: number
chatMessage: string
@@ -60,8 +68,8 @@ export function Chat({ panelWidth, chatMessage, setChatMessage }: ChatProps) {
const selected = selectedWorkflowOutputs[activeWorkflowId]
if (!selected || selected.length === 0) {
const defaultSelection = outputEntries.length > 0 ? [outputEntries[0].id] : []
return defaultSelection
// Return empty array when nothing is explicitly selected
return []
}
// Ensure we have no duplicates in the selection
@@ -74,7 +82,7 @@ export function Chat({ panelWidth, chatMessage, setChatMessage }: ChatProps) {
}
return selected
}, [selectedWorkflowOutputs, activeWorkflowId, outputEntries, setSelectedWorkflowOutput])
}, [selectedWorkflowOutputs, activeWorkflowId, setSelectedWorkflowOutput])
// Auto-scroll to bottom when new messages are added
useEffect(() => {
@@ -141,25 +149,22 @@ export function Chat({ panelWidth, chatMessage, setChatMessage }: ChatProps) {
if (nonStreamingLogs.length > 0) {
const outputsToRender = selectedOutputs.filter((outputId) => {
// Extract block ID correctly - handle both formats:
// - "blockId" (direct block ID)
// - "blockId_response.result" (block ID with path)
const blockIdForOutput = outputId.includes('_')
? outputId.split('_')[0]
: outputId.split('.')[0]
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
return nonStreamingLogs.some((log) => log.blockId === blockIdForOutput)
})
for (const outputId of outputsToRender) {
const blockIdForOutput = outputId.includes('_')
? outputId.split('_')[0]
: outputId.split('.')[0]
const path = outputId.substring(blockIdForOutput.length + 1)
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
const path = extractPathFromOutputId(outputId, blockIdForOutput)
const log = nonStreamingLogs.find((l) => l.blockId === blockIdForOutput)
if (log) {
let outputValue: any = log.output
if (path) {
// Parse JSON content safely
outputValue = parseOutputContentSafely(outputValue)
const pathParts = path.split('.')
for (const part of pathParts) {
if (
@@ -211,42 +216,41 @@ export function Chat({ panelWidth, chatMessage, setChatMessage }: ChatProps) {
}
}
} catch (e) {
console.error('Error parsing stream data:', e)
logger.error('Error parsing stream data:', e)
}
}
}
}
}
processStream().catch((e) => console.error('Error processing stream:', e))
processStream().catch((e) => logger.error('Error processing stream:', e))
} else if (result && 'success' in result && result.success && 'logs' in result) {
const finalOutputs: any[] = []
if (selectedOutputs && selectedOutputs.length > 0) {
if (selectedOutputs?.length > 0) {
for (const outputId of selectedOutputs) {
// Find the log that corresponds to the start of the outputId
const log = result.logs?.find(
(l: BlockLog) => l.blockId === outputId || outputId.startsWith(`${l.blockId}_`)
)
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
const path = extractPathFromOutputId(outputId, blockIdForOutput)
const log = result.logs?.find((l: BlockLog) => l.blockId === blockIdForOutput)
if (log) {
let output = log.output
// Check if there is a path to traverse
if (outputId.length > log.blockId.length) {
const path = outputId.substring(log.blockId.length + 1)
if (path) {
const pathParts = path.split('.')
let current = output
for (const part of pathParts) {
if (current && typeof current === 'object' && part in current) {
current = current[part]
} else {
current = undefined
break
}
if (path) {
// Parse JSON content safely
output = parseOutputContentSafely(output)
const pathParts = path.split('.')
let current = output
for (const part of pathParts) {
if (current && typeof current === 'object' && part in current) {
current = current[part]
} else {
current = undefined
break
}
output = current
}
output = current
}
if (output !== undefined) {
finalOutputs.push(output)
@@ -255,10 +259,8 @@ export function Chat({ panelWidth, chatMessage, setChatMessage }: ChatProps) {
}
}
// If no specific outputs could be resolved, fall back to the final workflow output
if (finalOutputs.length === 0 && result.output) {
finalOutputs.push(result.output)
}
// Only show outputs if something was explicitly selected
// If no outputs are selected, don't show anything
// Add a new message for each resolved output
finalOutputs.forEach((output) => {
@@ -266,19 +268,8 @@ export function Chat({ panelWidth, chatMessage, setChatMessage }: ChatProps) {
if (typeof output === 'string') {
content = output
} else if (output && typeof output === 'object') {
// Handle cases where output is { response: ... }
const outputObj = output as Record<string, any>
const response = outputObj.response
if (response) {
if (typeof response.content === 'string') {
content = response.content
} else {
// Pretty print for better readability
content = `\`\`\`json\n${JSON.stringify(response, null, 2)}\n\`\`\``
}
} else {
content = `\`\`\`json\n${JSON.stringify(output, null, 2)}\n\`\`\``
}
// For structured responses, pretty print the JSON
content = `\`\`\`json\n${JSON.stringify(output, null, 2)}\n\`\`\``
}
if (content) {

View File

@@ -1,8 +1,10 @@
import { useEffect, useMemo, useRef, useState } from 'react'
import { Check, ChevronDown } from 'lucide-react'
import { Button } from '@/components/ui/button'
import { extractFieldsFromSchema, parseResponseFormatSafely } from '@/lib/response-format'
import { cn } from '@/lib/utils'
import { getBlock } from '@/blocks'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
interface OutputSelectProps {
@@ -48,8 +50,31 @@ export function OutputSelect({
? block.name.replace(/\s+/g, '').toLowerCase()
: `block-${block.id}`
// Check for custom response format first
const responseFormatValue = useSubBlockStore.getState().getValue(block.id, 'responseFormat')
const responseFormat = parseResponseFormatSafely(responseFormatValue, block.id)
let outputsToProcess: Record<string, any> = {}
if (responseFormat) {
// Use custom schema properties if response format is specified
const schemaFields = extractFieldsFromSchema(responseFormat)
if (schemaFields.length > 0) {
// Convert schema fields to output structure
schemaFields.forEach((field) => {
outputsToProcess[field.name] = { type: field.type }
})
} else {
// Fallback to default outputs if schema extraction failed
outputsToProcess = block.outputs || {}
}
} else {
// Use default block outputs
outputsToProcess = block.outputs || {}
}
// Add response outputs
if (block.outputs && typeof block.outputs === 'object') {
if (Object.keys(outputsToProcess).length > 0) {
const addOutput = (path: string, outputObj: any, prefix = '') => {
const fullPath = prefix ? `${prefix}.${path}` : path
@@ -100,7 +125,7 @@ export function OutputSelect({
}
// Process all output properties directly (flattened structure)
Object.entries(block.outputs).forEach(([key, value]) => {
Object.entries(outputsToProcess).forEach(([key, value]) => {
addOutput(key, value)
})
}

View File

@@ -125,35 +125,33 @@ export function ConsoleEntry({ entry, consoleWidth }: ConsoleEntryProps) {
<div className='flex items-start gap-2'>
<Terminal className='mt-1 h-4 w-4 text-muted-foreground' />
<div className='overflow-wrap-anywhere relative flex-1 whitespace-normal break-normal font-mono text-sm'>
{typeof entry.output === 'object' &&
entry.output !== null &&
hasNestedStructure(entry.output) && (
<div className='absolute top-0 right-0 z-10'>
<Button
variant='ghost'
size='sm'
className='h-6 px-2 text-muted-foreground hover:text-foreground'
onClick={(e) => {
e.stopPropagation()
setExpandAllJson(!expandAllJson)
}}
>
<span className='flex items-center'>
{expandAllJson ? (
<>
<ChevronUp className='mr-1 h-3 w-3' />
<span className='text-xs'>Collapse</span>
</>
) : (
<>
<ChevronDown className='mr-1 h-3 w-3' />
<span className='text-xs'>Expand</span>
</>
)}
</span>
</Button>
</div>
)}
{entry.output != null && (
<div className='absolute top-0 right-0 z-10'>
<Button
variant='ghost'
size='sm'
className='h-6 px-2 text-muted-foreground hover:text-foreground'
onClick={(e) => {
e.stopPropagation()
setExpandAllJson(!expandAllJson)
}}
>
<span className='flex items-center'>
{expandAllJson ? (
<>
<ChevronUp className='mr-1 h-3 w-3' />
<span className='text-xs'>Collapse</span>
</>
) : (
<>
<ChevronDown className='mr-1 h-3 w-3' />
<span className='text-xs'>Expand</span>
</>
)}
</span>
</Button>
</div>
)}
<JSONView data={entry.output} initiallyExpanded={expandAllJson} />
</div>
</div>

View File

@@ -1,6 +1,7 @@
import { useCallback } from 'react'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { cn } from '@/lib/utils'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/w/components/providers/workspace-permissions-provider'
import type { BlockConfig } from '@/blocks/types'
export type ToolbarBlockProps = {
@@ -9,6 +10,8 @@ export type ToolbarBlockProps = {
}
export function ToolbarBlock({ config, disabled = false }: ToolbarBlockProps) {
const userPermissions = useUserPermissionsContext()
const handleDragStart = (e: React.DragEvent) => {
if (disabled) {
e.preventDefault()
@@ -66,7 +69,11 @@ export function ToolbarBlock({ config, disabled = false }: ToolbarBlockProps) {
return (
<Tooltip>
<TooltipTrigger asChild>{blockContent}</TooltipTrigger>
<TooltipContent>Edit permissions required to add blocks</TooltipContent>
<TooltipContent>
{userPermissions.isOfflineMode
? 'Connection lost - please refresh'
: 'Edit permissions required to add blocks'}
</TooltipContent>
</Tooltip>
)
}

View File

@@ -1,6 +1,7 @@
import { useCallback } from 'react'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { cn } from '@/lib/utils'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/w/components/providers/workspace-permissions-provider'
import { LoopTool } from '../../../loop-node/loop-config'
type LoopToolbarItemProps = {
@@ -9,6 +10,8 @@ type LoopToolbarItemProps = {
// Custom component for the Loop Tool
export default function LoopToolbarItem({ disabled = false }: LoopToolbarItemProps) {
const userPermissions = useUserPermissionsContext()
const handleDragStart = (e: React.DragEvent) => {
if (disabled) {
e.preventDefault()
@@ -74,7 +77,11 @@ export default function LoopToolbarItem({ disabled = false }: LoopToolbarItemPro
return (
<Tooltip>
<TooltipTrigger asChild>{blockContent}</TooltipTrigger>
<TooltipContent>Edit permissions required to add blocks</TooltipContent>
<TooltipContent>
{userPermissions.isOfflineMode
? 'Connection lost - please refresh'
: 'Edit permissions required to add blocks'}
</TooltipContent>
</Tooltip>
)
}

View File

@@ -1,6 +1,7 @@
import { useCallback } from 'react'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { cn } from '@/lib/utils'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/w/components/providers/workspace-permissions-provider'
import { ParallelTool } from '../../../parallel-node/parallel-config'
type ParallelToolbarItemProps = {
@@ -9,6 +10,7 @@ type ParallelToolbarItemProps = {
// Custom component for the Parallel Tool
export default function ParallelToolbarItem({ disabled = false }: ParallelToolbarItemProps) {
const userPermissions = useUserPermissionsContext()
const handleDragStart = (e: React.DragEvent) => {
if (disabled) {
e.preventDefault()
@@ -75,7 +77,11 @@ export default function ParallelToolbarItem({ disabled = false }: ParallelToolba
return (
<Tooltip>
<TooltipTrigger asChild>{blockContent}</TooltipTrigger>
<TooltipContent>Edit permissions required to add blocks</TooltipContent>
<TooltipContent>
{userPermissions.isOfflineMode
? 'Connection lost - please refresh'
: 'Edit permissions required to add blocks'}
</TooltipContent>
</Tooltip>
)
}

View File

@@ -2,6 +2,7 @@ import { ArrowLeftRight, ArrowUpDown, Circle, CircleOff, Copy, Trash2 } from 'lu
import { Button } from '@/components/ui/button'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { cn } from '@/lib/utils'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/w/components/providers/workspace-permissions-provider'
import { useCollaborativeWorkflow } from '@/hooks/use-collaborative-workflow'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
@@ -22,9 +23,17 @@ export function ActionBar({ blockId, blockType, disabled = false }: ActionBarPro
const horizontalHandles = useWorkflowStore(
(state) => state.blocks[blockId]?.horizontalHandles ?? false
)
const userPermissions = useUserPermissionsContext()
const isStarterBlock = blockType === 'starter'
const getTooltipMessage = (defaultMessage: string) => {
if (disabled) {
return userPermissions.isOfflineMode ? 'Connection lost - please refresh' : 'Read-only mode'
}
return defaultMessage
}
return (
<div
className={cn(
@@ -68,7 +77,7 @@ export function ActionBar({ blockId, blockType, disabled = false }: ActionBarPro
</Button>
</TooltipTrigger>
<TooltipContent side='right'>
{disabled ? 'Read-only mode' : isEnabled ? 'Disable Block' : 'Enable Block'}
{getTooltipMessage(isEnabled ? 'Disable Block' : 'Enable Block')}
</TooltipContent>
</Tooltip>
@@ -89,9 +98,7 @@ export function ActionBar({ blockId, blockType, disabled = false }: ActionBarPro
<Copy className='h-4 w-4' />
</Button>
</TooltipTrigger>
<TooltipContent side='right'>
{disabled ? 'Read-only mode' : 'Duplicate Block'}
</TooltipContent>
<TooltipContent side='right'>{getTooltipMessage('Duplicate Block')}</TooltipContent>
</Tooltip>
)}
@@ -116,7 +123,7 @@ export function ActionBar({ blockId, blockType, disabled = false }: ActionBarPro
</Button>
</TooltipTrigger>
<TooltipContent side='right'>
{disabled ? 'Read-only mode' : horizontalHandles ? 'Vertical Ports' : 'Horizontal Ports'}
{getTooltipMessage(horizontalHandles ? 'Vertical Ports' : 'Horizontal Ports')}
</TooltipContent>
</Tooltip>
@@ -140,9 +147,7 @@ export function ActionBar({ blockId, blockType, disabled = false }: ActionBarPro
<Trash2 className='h-4 w-4' />
</Button>
</TooltipTrigger>
<TooltipContent side='right'>
{disabled ? 'Read-only mode' : 'Delete Block'}
</TooltipContent>
<TooltipContent side='right'>{getTooltipMessage('Delete Block')}</TooltipContent>
</Tooltip>
)}
</div>

View File

@@ -1,3 +1,4 @@
import { RepeatIcon, SplitIcon } from 'lucide-react'
import { Card } from '@/components/ui/card'
import { cn } from '@/lib/utils'
import {
@@ -77,8 +78,20 @@ export function ConnectionBlocks({
// Get block configuration for icon and color
const blockConfig = getBlock(connection.type)
const displayName = connection.name // Use the actual block name instead of transforming it
const Icon = blockConfig?.icon
const bgColor = blockConfig?.bgColor || '#6B7280' // Fallback to gray
// Handle special blocks that aren't in the registry (loop and parallel)
let Icon = blockConfig?.icon
let bgColor = blockConfig?.bgColor || '#6B7280' // Fallback to gray
if (!blockConfig) {
if (connection.type === 'loop') {
Icon = RepeatIcon as typeof Icon
bgColor = '#2FB3FF' // Blue color for loop blocks
} else if (connection.type === 'parallel') {
Icon = SplitIcon as typeof Icon
bgColor = '#FEE12B' // Yellow color for parallel blocks
}
}
return (
<Card

View File

@@ -73,8 +73,6 @@ export function Code({
}
}, [generationType])
// State management
const [storeValue, setStoreValue] = useSubBlockValue(blockId, subBlockId)
const [code, setCode] = useState<string>('')
const [_lineCount, setLineCount] = useState(1)
const [showTags, setShowTags] = useState(false)
@@ -98,34 +96,13 @@ export function Code({
const toggleCollapsed = () => {
setCollapsedValue(blockId, collapsedStateKey, !isCollapsed)
}
// Use preview value when in preview mode, otherwise use store value or prop value
const value = isPreview ? previewValue : propValue !== undefined ? propValue : storeValue
// Create refs to hold the handlers
const handleStreamStartRef = useRef<() => void>(() => {})
const handleGeneratedContentRef = useRef<(generatedCode: string) => void>(() => {})
const handleStreamChunkRef = useRef<(chunk: string) => void>(() => {})
// AI Code Generation Hook
const handleStreamStart = () => {
setCode('')
// Optionally clear the store value too, though handleStreamChunk will update it
// setStoreValue('')
}
const handleGeneratedContent = (generatedCode: string) => {
setCode(generatedCode)
if (!isPreview && !disabled) {
setStoreValue(generatedCode)
}
}
// Handle streaming chunks directly into the editor
const handleStreamChunk = (chunk: string) => {
setCode((currentCode) => {
const newCode = currentCode + chunk
if (!isPreview && !disabled) {
setStoreValue(newCode)
}
return newCode
})
}
const {
isLoading: isAiLoading,
isStreaming: isAiStreaming,
@@ -140,11 +117,48 @@ export function Code({
} = useCodeGeneration({
generationType: generationType,
initialContext: code,
onGeneratedContent: handleGeneratedContent,
onStreamChunk: handleStreamChunk,
onStreamStart: handleStreamStart,
onGeneratedContent: (content: string) => handleGeneratedContentRef.current?.(content),
onStreamChunk: (chunk: string) => handleStreamChunkRef.current?.(chunk),
onStreamStart: () => handleStreamStartRef.current?.(),
})
// State management - useSubBlockValue with explicit streaming control
const [storeValue, setStoreValue] = useSubBlockValue(blockId, subBlockId, false, {
debounceMs: 150,
isStreaming: isAiStreaming, // Use AI streaming state directly
onStreamingEnd: () => {
logger.debug('AI streaming ended, value persisted', { blockId, subBlockId })
},
})
// Use preview value when in preview mode, otherwise use store value or prop value
const value = isPreview ? previewValue : propValue !== undefined ? propValue : storeValue
// Define the handlers now that we have access to setStoreValue
handleStreamStartRef.current = () => {
setCode('')
// Streaming state is now controlled by isAiStreaming
}
handleGeneratedContentRef.current = (generatedCode: string) => {
setCode(generatedCode)
if (!isPreview && !disabled) {
setStoreValue(generatedCode)
// Final value will be persisted when isAiStreaming becomes false
}
}
handleStreamChunkRef.current = (chunk: string) => {
setCode((currentCode) => {
const newCode = currentCode + chunk
if (!isPreview && !disabled) {
// Update the value - it won't be persisted until streaming ends
setStoreValue(newCode)
}
return newCode
})
}
// Effects
useEffect(() => {
const valueString = value?.toString() ?? ''

View File

@@ -19,7 +19,6 @@ import {
type OAuthProvider,
parseProvider,
} from '@/lib/oauth'
import { saveToStorage } from '@/stores/workflows/persistence'
const logger = createLogger('OAuthRequiredModal')
@@ -157,42 +156,11 @@ export function OAuthRequiredModal({
(scope) => !scope.includes('userinfo.email') && !scope.includes('userinfo.profile')
)
const handleRedirectToSettings = () => {
try {
// Determine the appropriate serviceId and providerId
const providerId = getProviderIdFromServiceId(effectiveServiceId)
// Store information about the required connection
saveToStorage<string>('pending_service_id', effectiveServiceId)
saveToStorage<string[]>('pending_oauth_scopes', requiredScopes)
saveToStorage<string>('pending_oauth_return_url', window.location.href)
saveToStorage<string>('pending_oauth_provider_id', providerId)
saveToStorage<boolean>('from_oauth_modal', true)
// Close the modal
onClose()
// Open the settings modal with the credentials tab
const event = new CustomEvent('open-settings', {
detail: { tab: 'credentials' },
})
window.dispatchEvent(event)
} catch (error) {
logger.error('Error redirecting to settings:', { error })
}
}
const handleConnectDirectly = async () => {
try {
// Determine the appropriate serviceId and providerId
const providerId = getProviderIdFromServiceId(effectiveServiceId)
// Store information about the required connection
saveToStorage<string>('pending_service_id', effectiveServiceId)
saveToStorage<string[]>('pending_oauth_scopes', requiredScopes)
saveToStorage<string>('pending_oauth_return_url', window.location.href)
saveToStorage<string>('pending_oauth_provider_id', providerId)
// Close the modal
onClose()
@@ -258,14 +226,6 @@ export function OAuthRequiredModal({
<Button type='button' onClick={handleConnectDirectly} className='sm:order-3'>
Connect Now
</Button>
<Button
type='button'
variant='secondary'
onClick={handleRedirectToSettings}
className='sm:order-2'
>
Go to Settings
</Button>
</DialogFooter>
</DialogContent>
</Dialog>

View File

@@ -21,31 +21,24 @@ import {
type OAuthProvider,
parseProvider,
} from '@/lib/oauth'
import { saveToStorage } from '@/stores/workflows/persistence'
import type { SubBlockConfig } from '@/blocks/types'
import { useSubBlockValue } from '../../hooks/use-sub-block-value'
import { OAuthRequiredModal } from './components/oauth-required-modal'
const logger = createLogger('CredentialSelector')
interface CredentialSelectorProps {
value: string
onChange: (value: string) => void
provider: OAuthProvider
requiredScopes?: string[]
label?: string
blockId: string
subBlock: SubBlockConfig
disabled?: boolean
serviceId?: string
isPreview?: boolean
previewValue?: any | null
}
export function CredentialSelector({
value,
onChange,
provider,
requiredScopes = [],
label = 'Select credential',
blockId,
subBlock,
disabled = false,
serviceId,
isPreview = false,
previewValue,
}: CredentialSelectorProps) {
@@ -55,14 +48,22 @@ export function CredentialSelector({
const [showOAuthModal, setShowOAuthModal] = useState(false)
const [selectedId, setSelectedId] = useState('')
// Use collaborative state management via useSubBlockValue hook
const [storeValue, setStoreValue] = useSubBlockValue(blockId, subBlock.id)
// Extract values from subBlock config
const provider = subBlock.provider as OAuthProvider
const requiredScopes = subBlock.requiredScopes || []
const label = subBlock.placeholder || 'Select credential'
const serviceId = subBlock.serviceId
// Get the effective value (preview or store value)
const effectiveValue = isPreview && previewValue !== undefined ? previewValue : storeValue
// Initialize selectedId with the effective value
useEffect(() => {
if (isPreview && previewValue !== undefined) {
setSelectedId(previewValue || '')
} else {
setSelectedId(value)
}
}, [value, isPreview, previewValue])
setSelectedId(effectiveValue || '')
}, [effectiveValue])
// Derive service and provider IDs using useMemo
const effectiveServiceId = useMemo(() => {
@@ -85,7 +86,9 @@ export function CredentialSelector({
// If we have a value but it's not in the credentials, reset it
if (selectedId && !data.credentials.some((cred: Credential) => cred.id === selectedId)) {
setSelectedId('')
onChange('')
if (!isPreview) {
setStoreValue('')
}
}
// Auto-select logic:
@@ -99,11 +102,15 @@ export function CredentialSelector({
const defaultCred = data.credentials.find((cred: Credential) => cred.isDefault)
if (defaultCred) {
setSelectedId(defaultCred.id)
onChange(defaultCred.id)
if (!isPreview) {
setStoreValue(defaultCred.id)
}
} else if (data.credentials.length === 1) {
// If only one credential, select it
setSelectedId(data.credentials[0].id)
onChange(data.credentials[0].id)
if (!isPreview) {
setStoreValue(data.credentials[0].id)
}
}
}
}
@@ -112,7 +119,7 @@ export function CredentialSelector({
} finally {
setIsLoading(false)
}
}, [effectiveProviderId, onChange, selectedId])
}, [effectiveProviderId, selectedId, isPreview, setStoreValue])
// Fetch credentials on initial mount
useEffect(() => {
@@ -121,11 +128,7 @@ export function CredentialSelector({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
// Update local state when external value changes
useEffect(() => {
const currentValue = isPreview ? previewValue : value
setSelectedId(currentValue || '')
}, [value, isPreview, previewValue])
// This effect is no longer needed since we're using effectiveValue directly
// Listen for visibility changes to update credentials when user returns from settings
useEffect(() => {
@@ -158,19 +161,13 @@ export function CredentialSelector({
const handleSelect = (credentialId: string) => {
setSelectedId(credentialId)
if (!isPreview) {
onChange(credentialId)
setStoreValue(credentialId)
}
setOpen(false)
}
// Handle adding a new credential
const handleAddCredential = () => {
// Store information about the required connection
saveToStorage<string>('pending_service_id', effectiveServiceId)
saveToStorage<string[]>('pending_oauth_scopes', requiredScopes)
saveToStorage<string>('pending_oauth_return_url', window.location.href)
saveToStorage<string>('pending_oauth_provider_id', effectiveProviderId)
// Show the OAuth modal
setShowOAuthModal(true)
setOpen(false)

View File

@@ -19,7 +19,6 @@ import {
getServiceIdFromScopes,
type OAuthProvider,
} from '@/lib/oauth'
import { saveToStorage } from '@/stores/workflows/persistence'
import { OAuthRequiredModal } from '../../credential-selector/components/oauth-required-modal'
export interface ConfluenceFileInfo {
@@ -355,15 +354,6 @@ export function ConfluenceFileSelector({
// Handle adding a new credential
const handleAddCredential = () => {
const effectiveServiceId = getServiceId()
const providerId = getProviderId()
// Store information about the required connection
saveToStorage<string>('pending_service_id', effectiveServiceId)
saveToStorage<string[]>('pending_oauth_scopes', requiredScopes)
saveToStorage<string>('pending_oauth_return_url', window.location.href)
saveToStorage<string>('pending_oauth_provider_id', providerId)
// Show the OAuth modal
setShowOAuthModal(true)
setOpen(false)

View File

@@ -24,7 +24,6 @@ import {
type OAuthProvider,
parseProvider,
} from '@/lib/oauth'
import { saveToStorage } from '@/stores/workflows/persistence'
import { OAuthRequiredModal } from '../../credential-selector/components/oauth-required-modal'
const logger = createLogger('GoogleDrivePicker')
@@ -79,6 +78,7 @@ export function GoogleDrivePicker({
const [isLoading, setIsLoading] = useState(false)
const [isLoadingSelectedFile, setIsLoadingSelectedFile] = useState(false)
const [showOAuthModal, setShowOAuthModal] = useState(false)
const [credentialsLoaded, setCredentialsLoaded] = useState(false)
const initialFetchRef = useRef(false)
const [openPicker, _authResponse] = useDrivePicker()
@@ -97,6 +97,7 @@ export function GoogleDrivePicker({
// Fetch available credentials for this provider
const fetchCredentials = useCallback(async () => {
setIsLoading(true)
setCredentialsLoaded(false)
try {
const providerId = getProviderId()
const response = await fetch(`/api/auth/oauth/credentials?provider=${providerId}`)
@@ -128,6 +129,7 @@ export function GoogleDrivePicker({
logger.error('Error fetching credentials:', { error })
} finally {
setIsLoading(false)
setCredentialsLoaded(true)
}
}, [provider, getProviderId, selectedCredentialId])
@@ -154,9 +156,16 @@ export function GoogleDrivePicker({
return data.file
}
} else {
logger.error('Error fetching file by ID:', {
error: await response.text(),
})
const errorText = await response.text()
logger.error('Error fetching file by ID:', { error: errorText })
// If file not found or access denied, clear the selection
if (response.status === 404 || response.status === 403) {
logger.info('File not accessible, clearing selection')
setSelectedFileId('')
onChange('')
onFileInfoChange?.(null)
}
}
return null
} catch (error) {
@@ -166,7 +175,7 @@ export function GoogleDrivePicker({
setIsLoadingSelectedFile(false)
}
},
[selectedCredentialId, onFileInfoChange]
[selectedCredentialId, onChange, onFileInfoChange]
)
// Fetch credentials on initial mount
@@ -177,20 +186,61 @@ export function GoogleDrivePicker({
}
}, [fetchCredentials])
// Fetch the selected file metadata once credentials are loaded or changed
useEffect(() => {
// If we have a file ID selected and credentials are ready but we still don't have the file info, fetch it
if (value && selectedCredentialId && !selectedFile) {
fetchFileById(value)
}
}, [value, selectedCredentialId, selectedFile, fetchFileById])
// Keep internal selectedFileId in sync with the value prop
useEffect(() => {
if (value !== selectedFileId) {
const previousFileId = selectedFileId
setSelectedFileId(value)
// Only clear selected file info if we had a different file before (not initial load)
if (previousFileId && previousFileId !== value && selectedFile) {
setSelectedFile(null)
}
}
}, [value])
}, [value, selectedFileId, selectedFile])
// Track previous credential ID to detect changes
const prevCredentialIdRef = useRef<string>('')
// Clear selected file when credentials are removed or changed
useEffect(() => {
const prevCredentialId = prevCredentialIdRef.current
prevCredentialIdRef.current = selectedCredentialId
if (!selectedCredentialId) {
// No credentials - clear everything
if (selectedFile) {
setSelectedFile(null)
setSelectedFileId('')
onChange('')
}
} else if (prevCredentialId && prevCredentialId !== selectedCredentialId) {
// Credentials changed (not initial load) - clear file info to force refetch
if (selectedFile) {
setSelectedFile(null)
}
}
}, [selectedCredentialId, selectedFile, onChange])
// Fetch the selected file metadata once credentials are loaded or changed
useEffect(() => {
// Only fetch if we have both a file ID and credentials, credentials are loaded, but no file info yet
if (
value &&
selectedCredentialId &&
credentialsLoaded &&
!selectedFile &&
!isLoadingSelectedFile
) {
fetchFileById(value)
}
}, [
value,
selectedCredentialId,
credentialsLoaded,
selectedFile,
isLoadingSelectedFile,
fetchFileById,
])
// Fetch the access token for the selected credential
const fetchAccessToken = async (): Promise<string | null> => {
@@ -286,15 +336,6 @@ export function GoogleDrivePicker({
// Handle adding a new credential
const handleAddCredential = () => {
const effectiveServiceId = getServiceId()
const providerId = getProviderId()
// Store information about the required connection
saveToStorage<string>('pending_service_id', effectiveServiceId)
saveToStorage<string[]>('pending_oauth_scopes', requiredScopes)
saveToStorage<string>('pending_oauth_return_url', window.location.href)
saveToStorage<string>('pending_oauth_provider_id', providerId)
// Show the OAuth modal
setShowOAuthModal(true)
setOpen(false)
@@ -399,7 +440,7 @@ export function GoogleDrivePicker({
{getFileIcon(selectedFile, 'sm')}
<span className='truncate font-normal'>{selectedFile.name}</span>
</div>
) : selectedFileId && (isLoadingSelectedFile || !selectedCredentialId) ? (
) : selectedFileId && isLoadingSelectedFile && selectedCredentialId ? (
<div className='flex items-center gap-2'>
<RefreshCw className='h-4 w-4 animate-spin' />
<span className='text-muted-foreground'>Loading document...</span>

View File

@@ -20,7 +20,6 @@ import {
getServiceIdFromScopes,
type OAuthProvider,
} from '@/lib/oauth'
import { saveToStorage } from '@/stores/workflows/persistence'
import { OAuthRequiredModal } from '../../credential-selector/components/oauth-required-modal'
const logger = new Logger('jira_issue_selector')
@@ -420,15 +419,6 @@ export function JiraIssueSelector({
// Handle adding a new credential
const handleAddCredential = () => {
const effectiveServiceId = getServiceId()
const providerId = getProviderId()
// Store information about the required connection
saveToStorage<string>('pending_service_id', effectiveServiceId)
saveToStorage<string[]>('pending_oauth_scopes', requiredScopes)
saveToStorage<string>('pending_oauth_return_url', window.location.href)
saveToStorage<string>('pending_oauth_provider_id', providerId)
// Show the OAuth modal
setShowOAuthModal(true)
setOpen(false)

View File

@@ -23,7 +23,6 @@ import {
type OAuthProvider,
parseProvider,
} from '@/lib/oauth'
import { saveToStorage } from '@/stores/workflows/persistence'
import { OAuthRequiredModal } from '../../credential-selector/components/oauth-required-modal'
const logger = createLogger('MicrosoftFileSelector')
@@ -75,6 +74,7 @@ export function MicrosoftFileSelector({
const [availableFiles, setAvailableFiles] = useState<MicrosoftFileInfo[]>([])
const [searchQuery, setSearchQuery] = useState<string>('')
const [showOAuthModal, setShowOAuthModal] = useState(false)
const [credentialsLoaded, setCredentialsLoaded] = useState(false)
const initialFetchRef = useRef(false)
// Determine the appropriate service ID based on provider and scopes
@@ -92,6 +92,7 @@ export function MicrosoftFileSelector({
// Fetch available credentials for this provider
const fetchCredentials = useCallback(async () => {
setIsLoading(true)
setCredentialsLoaded(false)
try {
const providerId = getProviderId()
const response = await fetch(`/api/auth/oauth/credentials?provider=${providerId}`)
@@ -123,6 +124,7 @@ export function MicrosoftFileSelector({
logger.error('Error fetching credentials:', { error })
} finally {
setIsLoading(false)
setCredentialsLoaded(true)
}
}, [provider, getProviderId, selectedCredentialId])
@@ -183,9 +185,16 @@ export function MicrosoftFileSelector({
return data.file
}
} else {
logger.error('Error fetching file by ID:', {
error: await response.text(),
})
const errorText = await response.text()
logger.error('Error fetching file by ID:', { error: errorText })
// If file not found or access denied, clear the selection
if (response.status === 404 || response.status === 403) {
logger.info('File not accessible, clearing selection')
setSelectedFileId('')
onChange('')
onFileInfoChange?.(null)
}
}
return null
} catch (error) {
@@ -224,20 +233,61 @@ export function MicrosoftFileSelector({
}
}, [searchQuery, selectedCredentialId, fetchAvailableFiles])
// Fetch the selected file metadata once credentials are loaded or changed
useEffect(() => {
// If we have a file ID selected and credentials are ready but we still don't have the file info, fetch it
if (value && selectedCredentialId && !selectedFile) {
fetchFileById(value)
}
}, [value, selectedCredentialId, selectedFile, fetchFileById])
// Keep internal selectedFileId in sync with the value prop
useEffect(() => {
if (value !== selectedFileId) {
const previousFileId = selectedFileId
setSelectedFileId(value)
// Only clear selected file info if we had a different file before (not initial load)
if (previousFileId && previousFileId !== value && selectedFile) {
setSelectedFile(null)
}
}
}, [value])
}, [value, selectedFileId, selectedFile])
// Track previous credential ID to detect changes
const prevCredentialIdRef = useRef<string>('')
// Clear selected file when credentials are removed or changed
useEffect(() => {
const prevCredentialId = prevCredentialIdRef.current
prevCredentialIdRef.current = selectedCredentialId
if (!selectedCredentialId) {
// No credentials - clear everything
if (selectedFile) {
setSelectedFile(null)
setSelectedFileId('')
onChange('')
}
} else if (prevCredentialId && prevCredentialId !== selectedCredentialId) {
// Credentials changed (not initial load) - clear file info to force refetch
if (selectedFile) {
setSelectedFile(null)
}
}
}, [selectedCredentialId, selectedFile, onChange])
// Fetch the selected file metadata once credentials are loaded or changed
useEffect(() => {
// Only fetch if we have both a file ID and credentials, credentials are loaded, but no file info yet
if (
value &&
selectedCredentialId &&
credentialsLoaded &&
!selectedFile &&
!isLoadingSelectedFile
) {
fetchFileById(value)
}
}, [
value,
selectedCredentialId,
credentialsLoaded,
selectedFile,
isLoadingSelectedFile,
fetchFileById,
])
// Handle selecting a file from the available files
const handleFileSelect = (file: MicrosoftFileInfo) => {
@@ -251,15 +301,6 @@ export function MicrosoftFileSelector({
// Handle adding a new credential
const handleAddCredential = () => {
const effectiveServiceId = getServiceId()
const providerId = getProviderId()
// Store information about the required connection
saveToStorage<string>('pending_service_id', effectiveServiceId)
saveToStorage<string[]>('pending_oauth_scopes', requiredScopes)
saveToStorage<string>('pending_oauth_return_url', window.location.href)
saveToStorage<string>('pending_oauth_provider_id', providerId)
// Show the OAuth modal
setShowOAuthModal(true)
setOpen(false)
@@ -381,7 +422,7 @@ export function MicrosoftFileSelector({
{getFileIcon(selectedFile, 'sm')}
<span className='truncate font-normal'>{selectedFile.name}</span>
</div>
) : selectedFileId && (isLoadingSelectedFile || !selectedCredentialId) ? (
) : selectedFileId && isLoadingSelectedFile && selectedCredentialId ? (
<div className='flex items-center gap-2'>
<RefreshCw className='h-4 w-4 animate-spin' />
<span className='text-muted-foreground'>Loading document...</span>

View File

@@ -20,7 +20,6 @@ import {
getServiceIdFromScopes,
type OAuthProvider,
} from '@/lib/oauth'
import { saveToStorage } from '@/stores/workflows/persistence'
import { OAuthRequiredModal } from '../../credential-selector/components/oauth-required-modal'
const logger = new Logger('TeamsMessageSelector')
@@ -399,15 +398,6 @@ export function TeamsMessageSelector({
// Handle adding a new credential
const handleAddCredential = () => {
const effectiveServiceId = getServiceId()
const providerId = getProviderId()
// Store information about the required connection
saveToStorage<string>('pending_service_id', effectiveServiceId)
saveToStorage<string[]>('pending_oauth_scopes', requiredScopes)
saveToStorage<string>('pending_oauth_return_url', window.location.href)
saveToStorage<string>('pending_oauth_provider_id', providerId)
// Show the OAuth modal
setShowOAuthModal(true)
setOpen(false)

View File

@@ -16,7 +16,6 @@ import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover
import { createLogger } from '@/lib/logs/console-logger'
import { type Credential, getProviderIdFromServiceId, getServiceIdFromScopes } from '@/lib/oauth'
import { OAuthRequiredModal } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/components/credential-selector/components/oauth-required-modal'
import { saveToStorage } from '@/stores/workflows/persistence'
const logger = createLogger('FolderSelector')
@@ -274,15 +273,6 @@ export function FolderSelector({
// Handle adding a new credential
const handleAddCredential = () => {
const effectiveServiceId = getServiceId()
const providerId = getProviderId()
// Store information about the required connection
saveToStorage<string>('pending_service_id', effectiveServiceId)
saveToStorage<string[]>('pending_oauth_scopes', requiredScopes)
saveToStorage<string>('pending_oauth_return_url', window.location.href)
saveToStorage<string>('pending_oauth_provider_id', providerId)
// Show the OAuth modal
setShowOAuthModal(true)
setOpen(false)

View File

@@ -20,7 +20,6 @@ import {
getServiceIdFromScopes,
type OAuthProvider,
} from '@/lib/oauth'
import { saveToStorage } from '@/stores/workflows/persistence'
import { OAuthRequiredModal } from '../../credential-selector/components/oauth-required-modal'
const logger = new Logger('jira_project_selector')
@@ -371,15 +370,6 @@ export function JiraProjectSelector({
// Handle adding a new credential
const handleAddCredential = () => {
const effectiveServiceId = getServiceId()
const providerId = getProviderId()
// Store information about the required connection
saveToStorage<string>('pending_service_id', effectiveServiceId)
saveToStorage<string[]>('pending_oauth_scopes', requiredScopes)
saveToStorage<string>('pending_oauth_return_url', window.location.href)
saveToStorage<string>('pending_oauth_provider_id', providerId)
// Show the OAuth modal
setShowOAuthModal(true)
setOpen(false)

View File

@@ -50,7 +50,11 @@ export function ResponseFormat({
isPreview = false,
previewValue,
}: ResponseFormatProps) {
const [storeValue, setStoreValue] = useSubBlockValue<JSONProperty[]>(blockId, subBlockId)
// useSubBlockValue now includes debouncing by default
const [storeValue, setStoreValue] = useSubBlockValue<JSONProperty[]>(blockId, subBlockId, false, {
debounceMs: 200, // Slightly longer debounce for complex structures
})
const [showPreview, setShowPreview] = useState(false)
const value = isPreview ? previewValue : storeValue
@@ -290,7 +294,13 @@ export function ResponseFormat({
{showPreview && (
<div className='rounded border bg-muted/30 p-2'>
<pre className='max-h-32 overflow-auto text-xs'>
{JSON.stringify(generateJSON(properties), null, 2)}
{(() => {
try {
return JSON.stringify(generateJSON(properties), null, 2)
} catch (error) {
return `Error generating preview: ${error instanceof Error ? error.message : 'Unknown error'}`
}
})()}
</pre>
</div>
)}

View File

@@ -0,0 +1,213 @@
import { useCallback, useEffect, useState } from 'react'
import { Check, ChevronDown, ExternalLink, Plus, RefreshCw } from 'lucide-react'
import { Button } from '@/components/ui/button'
import {
Command,
CommandEmpty,
CommandGroup,
CommandItem,
CommandList,
} from '@/components/ui/command'
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover'
import { createLogger } from '@/lib/logs/console-logger'
import {
type Credential,
OAUTH_PROVIDERS,
type OAuthProvider,
type OAuthService,
parseProvider,
} from '@/lib/oauth'
import { OAuthRequiredModal } from '../../credential-selector/components/oauth-required-modal'
const logger = createLogger('ToolCredentialSelector')
// Helper functions for provider icons and names
const getProviderIcon = (providerName: OAuthProvider) => {
const { baseProvider } = parseProvider(providerName)
const baseProviderConfig = OAUTH_PROVIDERS[baseProvider]
if (!baseProviderConfig) {
return <ExternalLink className='h-4 w-4' />
}
// Always use the base provider icon for a more consistent UI
return baseProviderConfig.icon({ className: 'h-4 w-4' })
}
const getProviderName = (providerName: OAuthProvider) => {
const { baseProvider } = parseProvider(providerName)
const baseProviderConfig = OAUTH_PROVIDERS[baseProvider]
if (baseProviderConfig) {
return baseProviderConfig.name
}
// Fallback: capitalize the provider name
return providerName
.split('-')
.map((part) => part.charAt(0).toUpperCase() + part.slice(1))
.join(' ')
}
interface ToolCredentialSelectorProps {
value: string
onChange: (value: string) => void
provider: OAuthProvider
requiredScopes?: string[]
label?: string
serviceId?: OAuthService
disabled?: boolean
}
export function ToolCredentialSelector({
value,
onChange,
provider,
requiredScopes = [],
label = 'Select account',
serviceId,
disabled = false,
}: ToolCredentialSelectorProps) {
const [open, setOpen] = useState(false)
const [credentials, setCredentials] = useState<Credential[]>([])
const [isLoading, setIsLoading] = useState(false)
const [showOAuthModal, setShowOAuthModal] = useState(false)
const [selectedId, setSelectedId] = useState('')
// Update selected ID when value changes
useEffect(() => {
setSelectedId(value)
}, [value])
const fetchCredentials = useCallback(async () => {
setIsLoading(true)
try {
const response = await fetch(`/api/auth/oauth/credentials?provider=${provider}`)
if (response.ok) {
const data = await response.json()
setCredentials(data.credentials || [])
// If we have a selected value but it's not in the credentials list, clear it
if (value && !data.credentials?.some((cred: Credential) => cred.id === value)) {
onChange('')
}
} else {
logger.error('Error fetching credentials:', { error: await response.text() })
setCredentials([])
}
} catch (error) {
logger.error('Error fetching credentials:', { error })
setCredentials([])
} finally {
setIsLoading(false)
}
}, [provider, value, onChange])
// Fetch credentials on mount and when provider changes
useEffect(() => {
fetchCredentials()
}, [fetchCredentials])
const handleSelect = (credentialId: string) => {
setSelectedId(credentialId)
onChange(credentialId)
setOpen(false)
}
const handleOAuthClose = () => {
setShowOAuthModal(false)
// Refetch credentials to include any new ones
fetchCredentials()
}
const selectedCredential = credentials.find((cred) => cred.id === selectedId)
return (
<>
<Popover open={open} onOpenChange={setOpen}>
<PopoverTrigger asChild>
<Button
variant='outline'
role='combobox'
aria-expanded={open}
className='w-full justify-between'
disabled={disabled}
>
{selectedCredential ? (
<div className='flex items-center gap-2 overflow-hidden'>
{getProviderIcon(provider)}
<span className='truncate font-normal'>{selectedCredential.name}</span>
</div>
) : (
<div className='flex items-center gap-2'>
{getProviderIcon(provider)}
<span className='text-muted-foreground'>{label}</span>
</div>
)}
<ChevronDown className='ml-2 h-4 w-4 shrink-0 opacity-50' />
</Button>
</PopoverTrigger>
<PopoverContent className='w-[300px] p-0' align='start'>
<Command>
<CommandList>
<CommandEmpty>
{isLoading ? (
<div className='flex items-center justify-center p-4'>
<RefreshCw className='h-4 w-4 animate-spin' />
<span className='ml-2'>Loading...</span>
</div>
) : credentials.length === 0 ? (
<div className='p-4 text-center'>
<p className='font-medium text-sm'>No accounts connected.</p>
<p className='text-muted-foreground text-xs'>
Connect a {getProviderName(provider)} account to continue.
</p>
</div>
) : (
<div className='p-4 text-center'>
<p className='font-medium text-sm'>No accounts found.</p>
</div>
)}
</CommandEmpty>
{credentials.length > 0 && (
<CommandGroup>
{credentials.map((credential) => (
<CommandItem
key={credential.id}
value={credential.id}
onSelect={() => handleSelect(credential.id)}
>
<div className='flex items-center gap-2'>
{getProviderIcon(credential.provider)}
<span className='font-normal'>{credential.name}</span>
</div>
{credential.id === selectedId && <Check className='ml-auto h-4 w-4' />}
</CommandItem>
))}
</CommandGroup>
)}
<CommandGroup>
<CommandItem onSelect={() => setShowOAuthModal(true)}>
<div className='flex items-center gap-2'>
<Plus className='h-4 w-4' />
<span className='font-normal'>Connect {getProviderName(provider)} account</span>
</div>
</CommandItem>
</CommandGroup>
</CommandList>
</Command>
</PopoverContent>
</Popover>
<OAuthRequiredModal
isOpen={showOAuthModal}
onClose={handleOAuthClose}
provider={provider}
toolName={label}
requiredScopes={requiredScopes}
serviceId={serviceId}
/>
</>
)
}

View File

@@ -22,10 +22,10 @@ import { useWorkflowStore } from '@/stores/workflows/workflow/store'
import { getTool } from '@/tools/utils'
import { useSubBlockValue } from '../../hooks/use-sub-block-value'
import { ChannelSelectorInput } from '../channel-selector/channel-selector-input'
import { CredentialSelector } from '../credential-selector/credential-selector'
import { ShortInput } from '../short-input'
import { type CustomTool, CustomToolModal } from './components/custom-tool-modal/custom-tool-modal'
import { ToolCommand } from './components/tool-command/tool-command'
import { ToolCredentialSelector } from './components/tool-credential-selector'
interface ToolInputProps {
blockId: string
@@ -347,6 +347,8 @@ export function ToolInput({
const [customToolModalOpen, setCustomToolModalOpen] = useState(false)
const [editingToolIndex, setEditingToolIndex] = useState<number | null>(null)
const [searchQuery, setSearchQuery] = useState('')
const [draggedIndex, setDraggedIndex] = useState<number | null>(null)
const [dragOverIndex, setDragOverIndex] = useState<number | null>(null)
const isWide = useWorkflowStore((state) => state.blocks[blockId]?.isWide)
const customTools = useCustomToolsStore((state) => state.getAllTools())
const subBlockStore = useSubBlockStore()
@@ -668,6 +670,46 @@ export function ToolInput({
)
}
const handleDragStart = (e: React.DragEvent, index: number) => {
if (isPreview || disabled) return
setDraggedIndex(index)
e.dataTransfer.effectAllowed = 'move'
e.dataTransfer.setData('text/html', '')
}
const handleDragOver = (e: React.DragEvent, index: number) => {
if (isPreview || disabled || draggedIndex === null) return
e.preventDefault()
e.dataTransfer.dropEffect = 'move'
setDragOverIndex(index)
}
const handleDragEnd = () => {
setDraggedIndex(null)
setDragOverIndex(null)
}
const handleDrop = (e: React.DragEvent, dropIndex: number) => {
if (isPreview || disabled || draggedIndex === null || draggedIndex === dropIndex) return
e.preventDefault()
const newTools = [...selectedTools]
const draggedTool = newTools[draggedIndex]
newTools.splice(draggedIndex, 1)
if (dropIndex === selectedTools.length) {
newTools.push(draggedTool)
} else {
const adjustedDropIndex = draggedIndex < dropIndex ? dropIndex - 1 : dropIndex
newTools.splice(adjustedDropIndex, 0, draggedTool)
}
setStoreValue(newTools)
setDraggedIndex(null)
setDragOverIndex(null)
}
const IconComponent = ({ icon: Icon, className }: { icon: any; className?: string }) => {
if (!Icon) return null
return <Icon className={className} />
@@ -827,9 +869,34 @@ export function ToolInput({
return (
<div
key={`${tool.type}-${toolIndex}`}
className={cn('group flex flex-col', isWide ? 'w-[calc(50%-0.25rem)]' : 'w-full')}
className={cn(
'group relative flex flex-col transition-all duration-200 ease-in-out',
isWide ? 'w-[calc(50%-0.25rem)]' : 'w-full',
draggedIndex === toolIndex ? 'scale-95 opacity-40' : '',
dragOverIndex === toolIndex && draggedIndex !== toolIndex && draggedIndex !== null
? 'translate-y-1 transform'
: '',
selectedTools.length > 1 && !isPreview && !disabled
? 'cursor-grab active:cursor-grabbing'
: ''
)}
draggable={!isPreview && !disabled}
onDragStart={(e) => handleDragStart(e, toolIndex)}
onDragOver={(e) => handleDragOver(e, toolIndex)}
onDragEnd={handleDragEnd}
onDrop={(e) => handleDrop(e, toolIndex)}
>
<div className='flex flex-col overflow-visible rounded-md border bg-card'>
{/* Subtle drop indicator - use border highlight instead of separate line */}
<div
className={cn(
'flex flex-col overflow-visible rounded-md border bg-card',
dragOverIndex === toolIndex &&
draggedIndex !== toolIndex &&
draggedIndex !== null
? 'border-t-2 border-t-muted-foreground/40'
: ''
)}
>
<div
className={cn(
'flex items-center justify-between bg-accent/50 p-2',
@@ -993,13 +1060,14 @@ export function ToolInput({
<div className='font-medium text-muted-foreground text-xs'>
Account
</div>
<CredentialSelector
<ToolCredentialSelector
value={tool.params.credential || ''}
onChange={(value) => handleCredentialChange(toolIndex, value)}
provider={oauthConfig.provider as OAuthProvider}
requiredScopes={oauthConfig.additionalScopes || []}
label={`Select ${oauthConfig.provider} account`}
serviceId={oauthConfig.provider}
disabled={disabled}
/>
</div>
)
@@ -1091,6 +1159,20 @@ export function ToolInput({
)
})}
{/* Drop zone for the end of the list */}
{selectedTools.length > 0 && draggedIndex !== null && (
<div
className={cn(
'h-2 w-full rounded transition-all duration-200 ease-in-out',
dragOverIndex === selectedTools.length
? 'border-b-2 border-b-muted-foreground/40'
: ''
)}
onDragOver={(e) => handleDragOver(e, selectedTools.length)}
onDrop={(e) => handleDrop(e, selectedTools.length)}
/>
)}
<Popover open={open} onOpenChange={setOpen}>
<PopoverTrigger asChild>
<Button

View File

@@ -16,7 +16,7 @@ import { createLogger } from '@/lib/logs/console-logger'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
import { useSubBlockValue } from '../../hooks/use-sub-block-value'
import { CredentialSelector } from '../credential-selector/credential-selector'
import { ToolCredentialSelector } from '../tool-input/components/tool-credential-selector'
import { WebhookModal } from './components/webhook-modal'
const logger = createLogger('WebhookConfig')
@@ -564,7 +564,7 @@ export function WebhookConfig({
{error && <div className='mb-2 text-red-500 text-sm dark:text-red-400'>{error}</div>}
<div className='mb-3'>
<CredentialSelector
<ToolCredentialSelector
value={gmailCredentialId}
onChange={handleCredentialChange}
provider='google-email'

View File

@@ -1,11 +1,15 @@
import { useCallback, useEffect, useRef } from 'react'
import { isEqual } from 'lodash'
import { createLogger } from '@/lib/logs/console-logger'
import { useCollaborativeWorkflow } from '@/hooks/use-collaborative-workflow'
import { getProviderFromModel } from '@/providers/utils'
import { useGeneralStore } from '@/stores/settings/general/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
const logger = createLogger('SubBlockValue')
// Helper function to dispatch collaborative subblock updates
const dispatchSubblockUpdate = (blockId: string, subBlockId: string, value: any) => {
const event = new CustomEvent('update-subblock-value', {
@@ -154,20 +158,31 @@ function storeApiKeyValue(
}
}
interface UseSubBlockValueOptions {
debounceMs?: number
isStreaming?: boolean // Explicit streaming state
onStreamingEnd?: () => void
}
/**
* Custom hook to get and set values for a sub-block in a workflow.
* Handles complex object values properly by using deep equality comparison.
* Includes automatic debouncing and explicit streaming mode for AI generation.
*
* @param blockId The ID of the block containing the sub-block
* @param subBlockId The ID of the sub-block
* @param triggerWorkflowUpdate Whether to trigger a workflow update when the value changes
* @returns A tuple containing the current value and a setter function
* @param options Configuration for debouncing and streaming behavior
* @returns A tuple containing the current value and setter function
*/
export function useSubBlockValue<T = any>(
blockId: string,
subBlockId: string,
triggerWorkflowUpdate = false
triggerWorkflowUpdate = false,
options?: UseSubBlockValueOptions
): readonly [T | null, (value: T) => void] {
const { debounceMs = 150, isStreaming = false, onStreamingEnd } = options || {}
const { collaborativeSetSubblockValue } = useCollaborativeWorkflow()
const blockType = useWorkflowStore(
@@ -187,6 +202,12 @@ export function useSubBlockValue<T = any>(
// Previous model reference for detecting model changes
const prevModelRef = useRef<string | null>(null)
// Debouncing refs
const debounceTimerRef = useRef<NodeJS.Timeout | null>(null)
const lastEmittedValueRef = useRef<T | null>(null)
const streamingValueRef = useRef<T | null>(null)
const wasStreamingRef = useRef<boolean>(false)
// Get value from subblock store - always call this hook unconditionally
const storeValue = useSubBlockStore(
useCallback((state) => state.getValue(blockId, subBlockId), [blockId, subBlockId])
@@ -211,6 +232,36 @@ export function useSubBlockValue<T = any>(
// Compute the modelValue based on block type
const modelValue = isProviderBasedBlock ? (modelSubBlockValue as string) : null
// Cleanup timer on unmount
useEffect(() => {
return () => {
if (debounceTimerRef.current) {
clearTimeout(debounceTimerRef.current)
}
}
}, [])
// Emit the value to socket/DB
const emitValue = useCallback(
(value: T) => {
collaborativeSetSubblockValue(blockId, subBlockId, value)
lastEmittedValueRef.current = value
},
[blockId, subBlockId, collaborativeSetSubblockValue]
)
// Handle streaming mode changes
useEffect(() => {
// If we just exited streaming mode, emit the final value
if (wasStreamingRef.current && !isStreaming && streamingValueRef.current !== null) {
logger.debug('Streaming ended, persisting final value', { blockId, subBlockId })
emitValue(streamingValueRef.current)
streamingValueRef.current = null
onStreamingEnd?.()
}
wasStreamingRef.current = isStreaming
}, [isStreaming, blockId, subBlockId, emitValue, onStreamingEnd])
// Hook to set a value in the subblock store
const setValue = useCallback(
(newValue: T) => {
@@ -218,6 +269,22 @@ export function useSubBlockValue<T = any>(
if (!isEqual(valueRef.current, newValue)) {
valueRef.current = newValue
// Always update local store immediately for UI responsiveness
useSubBlockStore.setState((state) => ({
workflowValues: {
...state.workflowValues,
[useWorkflowRegistry.getState().activeWorkflowId || '']: {
...state.workflowValues[useWorkflowRegistry.getState().activeWorkflowId || ''],
[blockId]: {
...state.workflowValues[useWorkflowRegistry.getState().activeWorkflowId || '']?.[
blockId
],
[subBlockId]: newValue,
},
},
},
}))
// Ensure we're passing the actual value, not a reference that might change
const valueCopy =
newValue === null
@@ -231,8 +298,27 @@ export function useSubBlockValue<T = any>(
storeApiKeyValue(blockId, blockType, modelValue, newValue, storeValue)
}
// Use collaborative function which handles both local store update and socket emission
collaborativeSetSubblockValue(blockId, subBlockId, valueCopy)
// Clear any existing debounce timer
if (debounceTimerRef.current) {
clearTimeout(debounceTimerRef.current)
debounceTimerRef.current = null
}
// If streaming, just store the value without emitting
if (isStreaming) {
streamingValueRef.current = valueCopy
} else {
// Detect large changes for extended debounce
const isLargeChange = detectLargeChange(lastEmittedValueRef.current, valueCopy)
const effectiveDebounceMs = isLargeChange ? debounceMs * 2 : debounceMs
// Debounce the socket emission
debounceTimerRef.current = setTimeout(() => {
if (valueRef.current !== null && valueRef.current !== lastEmittedValueRef.current) {
emitValue(valueCopy)
}
}, effectiveDebounceMs)
}
if (triggerWorkflowUpdate) {
useWorkflowStore.getState().triggerUpdate()
@@ -247,7 +333,9 @@ export function useSubBlockValue<T = any>(
storeValue,
triggerWorkflowUpdate,
modelValue,
collaborativeSetSubblockValue,
isStreaming,
debounceMs,
emitValue,
]
)
@@ -320,5 +408,29 @@ export function useSubBlockValue<T = any>(
}
}, [storeValue, initialValue])
// Return appropriate tuple based on whether options were provided
return [storeValue !== undefined ? storeValue : initialValue, setValue] as const
}
// Helper function to detect large changes
function detectLargeChange(oldValue: any, newValue: any): boolean {
// Handle null/undefined
if (oldValue == null && newValue == null) return false
if (oldValue == null || newValue == null) return true
// For strings, check if it's a large paste or deletion
if (typeof oldValue === 'string' && typeof newValue === 'string') {
const sizeDiff = Math.abs(newValue.length - oldValue.length)
// Consider it a large change if more than 50 characters changed at once
return sizeDiff > 50
}
// For arrays, check length difference
if (Array.isArray(oldValue) && Array.isArray(newValue)) {
const sizeDiff = Math.abs(newValue.length - oldValue.length)
return sizeDiff > 5
}
// For other types, always treat as small change
return false
}

View File

@@ -297,27 +297,11 @@ export function SubBlock({
case 'oauth-input':
return (
<CredentialSelector
value={
isPreview ? previewValue || '' : typeof config.value === 'string' ? config.value : ''
}
onChange={(value) => {
// Only allow changes in non-preview mode and when not disabled
if (!isPreview && !disabled) {
const event = new CustomEvent('update-subblock-value', {
detail: {
blockId,
subBlockId: config.id,
value,
},
})
window.dispatchEvent(event)
}
}}
provider={config.provider as any}
requiredScopes={config.requiredScopes || []}
label={config.placeholder || 'Select a credential'}
serviceId={config.serviceId}
blockId={blockId}
subBlock={config}
disabled={isDisabled}
isPreview={isPreview}
previewValue={previewValue}
/>
)
case 'file-selector':

View File

@@ -654,7 +654,9 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
</TooltipTrigger>
<TooltipContent side='top'>
{!userPermissions.canEdit
? 'Read-only mode'
? userPermissions.isOfflineMode
? 'Connection lost - please refresh'
: 'Read-only mode'
: blockAdvancedMode
? 'Switch to Basic Mode'
: 'Switch to Advanced Mode'}
@@ -750,7 +752,9 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
</TooltipTrigger>
<TooltipContent side='top'>
{!userPermissions.canEdit
? 'Read-only mode'
? userPermissions.isOfflineMode
? 'Connection lost - please refresh'
: 'Read-only mode'
: isWide
? 'Narrow Block'
: 'Expand Block'}

View File

@@ -29,6 +29,35 @@ export interface ConnectedBlock {
}
}
function parseResponseFormatSafely(responseFormatValue: any, blockId: string): any {
if (!responseFormatValue) {
return undefined
}
if (typeof responseFormatValue === 'object' && responseFormatValue !== null) {
return responseFormatValue
}
if (typeof responseFormatValue === 'string') {
const trimmedValue = responseFormatValue.trim()
if (trimmedValue.startsWith('<') && trimmedValue.includes('>')) {
return trimmedValue
}
if (trimmedValue === '') {
return undefined
}
try {
return JSON.parse(trimmedValue)
} catch (error) {
return undefined
}
}
return undefined
}
// Helper function to extract fields from JSON Schema
function extractFieldsFromSchema(schema: any): Field[] {
if (!schema || typeof schema !== 'object') {
@@ -75,17 +104,8 @@ export function useBlockConnections(blockId: string) {
// Get the response format from the subblock store
const responseFormatValue = useSubBlockStore.getState().getValue(sourceId, 'responseFormat')
let responseFormat
try {
responseFormat =
typeof responseFormatValue === 'string' && responseFormatValue
? JSON.parse(responseFormatValue)
: responseFormatValue // Handle case where it's already an object
} catch (e) {
logger.error('Failed to parse response format:', { e })
responseFormat = undefined
}
// Safely parse response format with proper error handling
const responseFormat = parseResponseFormatSafely(responseFormatValue, sourceId)
// Get the default output type from the block's outputs
const defaultOutputs: Field[] = Object.entries(sourceBlock.outputs || {}).map(([key]) => ({
@@ -118,17 +138,8 @@ export function useBlockConnections(blockId: string) {
.getState()
.getValue(edge.source, 'responseFormat')
let responseFormat
try {
responseFormat =
typeof responseFormatValue === 'string' && responseFormatValue
? JSON.parse(responseFormatValue)
: responseFormatValue // Handle case where it's already an object
} catch (e) {
logger.error('Failed to parse response format:', { e })
responseFormat = undefined
}
// Safely parse response format with proper error handling
const responseFormat = parseResponseFormatSafely(responseFormatValue, edge.source)
// Get the default output type from the block's outputs
const defaultOutputs: Field[] = Object.entries(sourceBlock.outputs || {}).map(([key]) => ({

View File

@@ -2,6 +2,7 @@ import { useCallback, useState } from 'react'
import { v4 as uuidv4 } from 'uuid'
import { createLogger } from '@/lib/logs/console-logger'
import { buildTraceSpans } from '@/lib/logs/trace-spans'
import { processStreamingBlockLogs } from '@/lib/tokenization'
import type { BlockOutput } from '@/blocks/types'
import { Executor } from '@/executor'
import type { BlockLog, ExecutionResult, StreamingExecution } from '@/executor/types'
@@ -211,15 +212,25 @@ export function useWorkflowExecution() {
result.metadata = { duration: 0, startTime: new Date().toISOString() }
}
;(result.metadata as any).source = 'chat'
result.logs?.forEach((log: BlockLog) => {
if (streamedContent.has(log.blockId)) {
const content = streamedContent.get(log.blockId) || ''
if (log.output) {
log.output.content = content
// Update streamed content and apply tokenization
if (result.logs) {
result.logs.forEach((log: BlockLog) => {
if (streamedContent.has(log.blockId)) {
const content = streamedContent.get(log.blockId) || ''
// For console display, show the actual structured block output instead of formatted streaming content
// This ensures console logs match the block state structure
// Use replaceOutput to completely replace the output instead of merging
useConsoleStore.getState().updateConsole(log.blockId, {
replaceOutput: log.output,
success: true,
})
}
useConsoleStore.getState().updateConsole(log.blockId, content)
}
})
})
// Process all logs for streaming tokenization
const processedCount = processStreamingBlockLogs(result.logs, streamedContent)
logger.info(`Processed ${processedCount} blocks for streaming tokenization`)
}
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ event: 'final', data: result })}\n\n`)

View File

@@ -1,6 +1,7 @@
'use client'
import React, { createContext, useContext, useMemo } from 'react'
import type React from 'react'
import { createContext, useContext, useEffect, useMemo, useState } from 'react'
import { useParams } from 'next/navigation'
import { createLogger } from '@/lib/logs/console-logger'
import { useUserPermissions, type WorkspaceUserPermissions } from '@/hooks/use-user-permissions'
@@ -8,6 +9,7 @@ import {
useWorkspacePermissions,
type WorkspacePermissions,
} from '@/hooks/use-workspace-permissions'
import { usePresence } from '../../[workflowId]/hooks/use-presence'
const logger = createLogger('WorkspacePermissionsProvider')
@@ -18,88 +20,140 @@ interface WorkspacePermissionsContextType {
permissionsError: string | null
updatePermissions: (newPermissions: WorkspacePermissions) => void
// Computed user permissions
userPermissions: WorkspaceUserPermissions
// Computed user permissions (connection-aware)
userPermissions: WorkspaceUserPermissions & { isOfflineMode?: boolean }
// Connection state management
setOfflineMode: (isOffline: boolean) => void
}
const WorkspacePermissionsContext = createContext<WorkspacePermissionsContextType | null>(null)
const WorkspacePermissionsContext = createContext<WorkspacePermissionsContextType>({
workspacePermissions: null,
permissionsLoading: false,
permissionsError: null,
updatePermissions: () => {},
userPermissions: {
canRead: false,
canEdit: false,
canAdmin: false,
userPermissions: 'read',
isLoading: false,
error: null,
},
setOfflineMode: () => {},
})
interface WorkspacePermissionsProviderProps {
children: React.ReactNode
}
const WorkspacePermissionsProvider = React.memo<WorkspacePermissionsProviderProps>(
({ children }) => {
const params = useParams()
const workspaceId = params.workspaceId as string
/**
* Provider that manages workspace permissions and user access
* Also provides connection-aware permissions that enforce read-only mode when offline
*/
export function WorkspacePermissionsProvider({ children }: WorkspacePermissionsProviderProps) {
const params = useParams()
const workspaceId = params?.workspaceId as string
if (!workspaceId) {
logger.warn('Workspace ID is undefined from params:', params)
// Manage offline mode state locally
const [isOfflineMode, setIsOfflineMode] = useState(false)
const [hasBeenConnected, setHasBeenConnected] = useState(false)
// Fetch workspace permissions and loading state
const {
permissions: workspacePermissions,
loading: permissionsLoading,
error: permissionsError,
updatePermissions,
} = useWorkspacePermissions(workspaceId)
// Get base user permissions from workspace permissions
const baseUserPermissions = useUserPermissions(
workspacePermissions,
permissionsLoading,
permissionsError
)
// Get connection status and update offline mode accordingly
const { isConnected } = usePresence()
useEffect(() => {
if (isConnected) {
// Mark that we've been connected at least once
setHasBeenConnected(true)
// On initial connection, allow going online
if (!hasBeenConnected) {
setIsOfflineMode(false)
}
// If we were previously connected and this is a reconnection, stay offline (user must refresh)
} else if (hasBeenConnected) {
// Only enter offline mode if we were previously connected and now disconnected
setIsOfflineMode(true)
}
// If not connected and never been connected, stay in initial state (not offline mode)
}, [isConnected, hasBeenConnected])
// Create connection-aware permissions that override user permissions when offline
const userPermissions = useMemo((): WorkspaceUserPermissions & { isOfflineMode?: boolean } => {
if (isOfflineMode) {
// In offline mode, force read-only permissions regardless of actual user permissions
return {
...baseUserPermissions,
canEdit: false,
canAdmin: false,
// Keep canRead true so users can still view content
canRead: baseUserPermissions.canRead,
isOfflineMode: true,
}
}
const {
permissions: workspacePermissions,
loading: permissionsLoading,
error: permissionsError,
updatePermissions,
} = useWorkspacePermissions(workspaceId)
// When online, use normal permissions
return {
...baseUserPermissions,
isOfflineMode: false,
}
}, [baseUserPermissions, isOfflineMode])
const userPermissions = useUserPermissions(
const contextValue = useMemo(
() => ({
workspacePermissions,
permissionsLoading,
permissionsError
)
permissionsError,
updatePermissions,
userPermissions,
setOfflineMode: setIsOfflineMode,
}),
[workspacePermissions, permissionsLoading, permissionsError, updatePermissions, userPermissions]
)
const contextValue = useMemo(
() => ({
workspacePermissions,
permissionsLoading,
permissionsError,
updatePermissions,
userPermissions,
}),
[
workspacePermissions,
permissionsLoading,
permissionsError,
updatePermissions,
userPermissions,
]
)
return (
<WorkspacePermissionsContext.Provider value={contextValue}>
{children}
</WorkspacePermissionsContext.Provider>
)
}
)
WorkspacePermissionsProvider.displayName = 'WorkspacePermissionsProvider'
export { WorkspacePermissionsProvider }
return (
<WorkspacePermissionsContext.Provider value={contextValue}>
{children}
</WorkspacePermissionsContext.Provider>
)
}
/**
* Hook to access workspace permissions context
* This replaces individual useWorkspacePermissions calls to avoid duplicate API requests
* Hook to access workspace permissions and data from context
* This provides both raw workspace permissions and computed user permissions
*/
export function useWorkspacePermissionsContext(): WorkspacePermissionsContextType {
const context = useContext(WorkspacePermissionsContext)
if (!context) {
throw new Error(
'useWorkspacePermissionsContext must be used within a WorkspacePermissionsProvider'
)
}
return context
}
/**
* Hook to access user permissions from context
* This replaces individual useUserPermissions calls
* This replaces individual useUserPermissions calls and includes connection-aware permissions
*/
export function useUserPermissionsContext(): WorkspaceUserPermissions {
export function useUserPermissionsContext(): WorkspaceUserPermissions & {
isOfflineMode?: boolean
} {
const { userPermissions } = useWorkspacePermissionsContext()
return userPermissions
}

View File

@@ -14,7 +14,9 @@ import {
} from '@/components/ui/dropdown-menu'
import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { createLogger } from '@/lib/logs/console-logger'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/w/components/providers/workspace-permissions-provider'
import { useFolderStore } from '@/stores/folders/store'
const logger = createLogger('FolderContextMenu')
@@ -43,6 +45,9 @@ export function FolderContextMenu({
const params = useParams()
const workspaceId = params.workspaceId as string
// Get user permissions for the workspace
const userPermissions = useUserPermissionsContext()
const { createFolder, updateFolder, deleteFolder } = useFolderStore()
const handleCreateWorkflow = () => {
@@ -58,12 +63,17 @@ export function FolderContextMenu({
setShowRenameDialog(true)
}
const handleDelete = () => {
const handleDelete = async () => {
if (onDelete) {
onDelete(folderId)
} else {
// Default delete behavior
deleteFolder(folderId, workspaceId)
// Default delete behavior with proper error handling
try {
await deleteFolder(folderId, workspaceId)
logger.info(`Successfully deleted folder from context menu: ${folderName}`)
} catch (error) {
logger.error('Failed to delete folder from context menu:', { error, folderId, folderName })
}
}
}
@@ -129,23 +139,46 @@ export function FolderContextMenu({
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align='end' onClick={(e) => e.stopPropagation()}>
<DropdownMenuItem onClick={handleCreateWorkflow}>
<File className='mr-2 h-4 w-4' />
New Workflow
</DropdownMenuItem>
<DropdownMenuItem onClick={handleCreateSubfolder}>
<Folder className='mr-2 h-4 w-4' />
New Subfolder
</DropdownMenuItem>
<DropdownMenuSeparator />
<DropdownMenuItem onClick={handleRename}>
<Pencil className='mr-2 h-4 w-4' />
Rename
</DropdownMenuItem>
<DropdownMenuItem onClick={handleDelete} className='text-destructive'>
<Trash2 className='mr-2 h-4 w-4' />
Delete
</DropdownMenuItem>
{userPermissions.canEdit && (
<>
<DropdownMenuItem onClick={handleCreateWorkflow}>
<File className='mr-2 h-4 w-4' />
New Workflow
</DropdownMenuItem>
<DropdownMenuItem onClick={handleCreateSubfolder}>
<Folder className='mr-2 h-4 w-4' />
New Subfolder
</DropdownMenuItem>
<DropdownMenuSeparator />
<DropdownMenuItem onClick={handleRename}>
<Pencil className='mr-2 h-4 w-4' />
Rename
</DropdownMenuItem>
</>
)}
{userPermissions.canAdmin ? (
<DropdownMenuItem onClick={handleDelete} className='text-destructive'>
<Trash2 className='mr-2 h-4 w-4' />
Delete
</DropdownMenuItem>
) : (
<Tooltip>
<TooltipTrigger asChild>
<div>
<DropdownMenuItem
className='cursor-not-allowed text-muted-foreground opacity-50'
onClick={(e) => e.preventDefault()}
>
<Trash2 className='mr-2 h-4 w-4' />
Delete
</DropdownMenuItem>
</div>
</TooltipTrigger>
<TooltipContent>
<p>Admin access required to delete folders</p>
</TooltipContent>
</Tooltip>
)}
</DropdownMenuContent>
</DropdownMenu>

View File

@@ -7,6 +7,7 @@ import { useParams, usePathname, useRouter } from 'next/navigation'
import { Skeleton } from '@/components/ui/skeleton'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { useSession } from '@/lib/auth-client'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console-logger'
import {
getKeyboardShortcutText,
@@ -27,7 +28,7 @@ import { WorkspaceHeader } from './components/workspace-header/workspace-header'
const logger = createLogger('Sidebar')
const IS_DEV = process.env.NODE_ENV === 'development'
const IS_DEV = env.NODE_ENV === 'development'
export function Sidebar() {
useGlobalShortcuts()

View File

@@ -33,6 +33,7 @@ interface WorkflowPreviewProps {
isPannable?: boolean
defaultPosition?: { x: number; y: number }
defaultZoom?: number
onNodeClick?: (blockId: string, mousePosition: { x: number; y: number }) => void
}
// Define node types - the components now handle preview mode internally
@@ -55,7 +56,24 @@ export function WorkflowPreview({
isPannable = false,
defaultPosition,
defaultZoom,
onNodeClick,
}: WorkflowPreviewProps) {
// Handle migrated logs that don't have complete workflow state
if (!workflowState || !workflowState.blocks || !workflowState.edges) {
return (
<div
style={{ height, width }}
className='flex items-center justify-center rounded-lg border border-gray-200 bg-gray-50 dark:border-gray-700 dark:bg-gray-900'
>
<div className='text-center text-gray-500 dark:text-gray-400'>
<div className='mb-2 font-medium text-lg'> Logged State Not Found</div>
<div className='text-sm'>
This log was migrated from the old system and doesn't contain workflow state data.
</div>
</div>
</div>
)
}
const blocksStructure = useMemo(
() => ({
count: Object.keys(workflowState.blocks || {}).length,
@@ -82,8 +100,8 @@ export function WorkflowPreview({
const edgesStructure = useMemo(
() => ({
count: workflowState.edges.length,
ids: workflowState.edges.map((e) => e.id).join(','),
count: workflowState.edges?.length || 0,
ids: workflowState.edges?.map((e) => e.id).join(',') || '',
}),
[workflowState.edges]
)
@@ -113,7 +131,7 @@ export function WorkflowPreview({
const nodes: Node[] = useMemo(() => {
const nodeArray: Node[] = []
Object.entries(workflowState.blocks).forEach(([blockId, block]) => {
Object.entries(workflowState.blocks || {}).forEach(([blockId, block]) => {
if (!block || !block.type) {
logger.warn(`Skipping invalid block: ${blockId}`)
return
@@ -184,7 +202,7 @@ export function WorkflowPreview({
})
if (block.type === 'loop') {
const childBlocks = Object.entries(workflowState.blocks).filter(
const childBlocks = Object.entries(workflowState.blocks || {}).filter(
([_, childBlock]) => childBlock.data?.parentId === blockId
)
@@ -221,7 +239,7 @@ export function WorkflowPreview({
}, [blocksStructure, loopsStructure, parallelsStructure, showSubBlocks, workflowState.blocks])
const edges: Edge[] = useMemo(() => {
return workflowState.edges.map((edge) => ({
return (workflowState.edges || []).map((edge) => ({
id: edge.id,
source: edge.source,
target: edge.target,
@@ -256,6 +274,14 @@ export function WorkflowPreview({
elementsSelectable={false}
nodesDraggable={false}
nodesConnectable={false}
onNodeClick={
onNodeClick
? (event, node) => {
logger.debug('Node clicked:', { nodeId: node.id, event })
onNodeClick(node.id, { x: event.clientX, y: event.clientY })
}
: undefined
}
>
<Background />
</ReactFlow>

View File

@@ -1,11 +1,12 @@
import { DocumentIcon } from '@/components/icons'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console-logger'
import type { FileParserOutput } from '@/tools/file/types'
import type { BlockConfig, SubBlockConfig, SubBlockLayout, SubBlockType } from '../types'
const logger = createLogger('FileBlock')
const shouldEnableURLInput = process.env.NODE_ENV === 'production'
const shouldEnableURLInput = env.NODE_ENV === 'production'
const inputMethodBlock: SubBlockConfig = {
id: 'inputMethod',

View File

@@ -1,8 +1,9 @@
import { MistralIcon } from '@/components/icons'
import { env } from '@/lib/env'
import type { MistralParserOutput } from '@/tools/mistral/types'
import type { BlockConfig, SubBlockConfig, SubBlockLayout, SubBlockType } from '../types'
const shouldEnableFileUpload = process.env.NODE_ENV === 'production'
const shouldEnableFileUpload = env.NODE_ENV === 'production'
const inputMethodBlock: SubBlockConfig = {
id: 'inputMethod',

View File

@@ -31,6 +31,18 @@ export const RedditBlock: BlockConfig<
],
},
// Reddit OAuth Authentication
{
id: 'credential',
title: 'Reddit Account',
type: 'oauth-input',
layout: 'full',
provider: 'reddit',
serviceId: 'reddit',
requiredScopes: ['identity', 'read'],
placeholder: 'Select Reddit account',
},
// Common fields - appear for all actions
{
id: 'subreddit',
@@ -151,27 +163,31 @@ export const RedditBlock: BlockConfig<
},
params: (inputs) => {
const action = inputs.action || 'get_posts'
const { credential, ...rest } = inputs
if (action === 'get_comments') {
return {
postId: inputs.postId,
subreddit: inputs.subreddit,
sort: inputs.commentSort,
limit: inputs.commentLimit ? Number.parseInt(inputs.commentLimit) : undefined,
postId: rest.postId,
subreddit: rest.subreddit,
sort: rest.commentSort,
limit: rest.commentLimit ? Number.parseInt(rest.commentLimit) : undefined,
credential: credential,
}
}
return {
subreddit: inputs.subreddit,
sort: inputs.sort,
limit: inputs.limit ? Number.parseInt(inputs.limit) : undefined,
time: inputs.sort === 'top' ? inputs.time : undefined,
subreddit: rest.subreddit,
sort: rest.sort,
limit: rest.limit ? Number.parseInt(rest.limit) : undefined,
time: rest.sort === 'top' ? rest.time : undefined,
credential: credential,
}
},
},
},
inputs: {
action: { type: 'string', required: true },
credential: { type: 'string', required: true },
subreddit: { type: 'string', required: true },
sort: { type: 'string', required: true },
time: { type: 'string', required: false },

View File

@@ -11,6 +11,7 @@ import {
Section,
Text,
} from '@react-email/components'
import { env } from '@/lib/env'
import { baseStyles } from './base-styles'
import EmailFooter from './footer'
@@ -20,7 +21,7 @@ interface WorkspaceInvitationEmailProps {
invitationLink?: string
}
const baseUrl = process.env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'
export const WorkspaceInvitationEmail = ({
workspaceName = 'Workspace',

View File

@@ -1,7 +1,8 @@
import { describe, expect, test, vi } from 'vitest'
import { extractFieldsFromSchema, parseResponseFormatSafely } from '@/lib/response-format'
import type { BlockState } from '@/stores/workflows/workflow/types'
import { generateLoopBlocks } from '@/stores/workflows/workflow/utils'
import { checkTagTrigger, extractFieldsFromSchema } from './tag-dropdown'
import { checkTagTrigger } from './tag-dropdown'
vi.mock('@/stores/workflows/workflow/store', () => ({
useWorkflowStore: vi.fn(() => ({
@@ -24,6 +25,15 @@ vi.mock('@/stores/panel/variables/store', () => ({
})),
}))
vi.mock('@/stores/workflows/subblock/store', () => ({
useSubBlockStore: vi.fn(() => ({
getValue: vi.fn(() => null),
getState: vi.fn(() => ({
getValue: vi.fn(() => null),
})),
})),
}))
describe('TagDropdown Loop Suggestions', () => {
test('should generate correct loop suggestions for forEach loops', () => {
const blocks: Record<string, BlockState> = {
@@ -603,3 +613,180 @@ describe('TagDropdown Tag Selection Logic', () => {
})
})
})
describe('TagDropdown Response Format Support', () => {
it.concurrent(
'should use custom schema properties when response format is specified',
async () => {
// Mock the subblock store to return a custom response format
const mockGetValue = vi.fn()
const mockUseSubBlockStore = vi.mocked(
await import('@/stores/workflows/subblock/store')
).useSubBlockStore
// Set up the mock to return the example schema from the user
const responseFormatValue = JSON.stringify({
name: 'short_schema',
description: 'A minimal example schema with a single string property.',
strict: true,
schema: {
type: 'object',
properties: {
example_property: {
type: 'string',
description: 'A simple string property.',
},
},
additionalProperties: false,
required: ['example_property'],
},
})
mockGetValue.mockImplementation((blockId: string, subBlockId: string) => {
if (blockId === 'agent1' && subBlockId === 'responseFormat') {
return responseFormatValue
}
return null
})
mockUseSubBlockStore.mockReturnValue({
getValue: mockGetValue,
getState: () => ({
getValue: mockGetValue,
}),
} as any)
// Test the parseResponseFormatSafely function
const parsedFormat = parseResponseFormatSafely(responseFormatValue, 'agent1')
expect(parsedFormat).toEqual({
name: 'short_schema',
description: 'A minimal example schema with a single string property.',
strict: true,
schema: {
type: 'object',
properties: {
example_property: {
type: 'string',
description: 'A simple string property.',
},
},
additionalProperties: false,
required: ['example_property'],
},
})
// Test the extractFieldsFromSchema function with the parsed format
const fields = extractFieldsFromSchema(parsedFormat)
expect(fields).toEqual([
{
name: 'example_property',
type: 'string',
description: 'A simple string property.',
},
])
}
)
it.concurrent(
'should fallback to default outputs when response format parsing fails',
async () => {
// Test with invalid JSON
const invalidFormat = parseResponseFormatSafely('invalid json', 'agent1')
expect(invalidFormat).toBeNull()
// Test with null/undefined values
expect(parseResponseFormatSafely(null, 'agent1')).toBeNull()
expect(parseResponseFormatSafely(undefined, 'agent1')).toBeNull()
expect(parseResponseFormatSafely('', 'agent1')).toBeNull()
}
)
it.concurrent('should handle response format with nested schema correctly', async () => {
const responseFormat = {
schema: {
type: 'object',
properties: {
user: {
type: 'object',
description: 'User information',
properties: {
name: { type: 'string', description: 'User name' },
age: { type: 'number', description: 'User age' },
},
},
status: { type: 'string', description: 'Response status' },
},
},
}
const fields = extractFieldsFromSchema(responseFormat)
expect(fields).toEqual([
{ name: 'user', type: 'object', description: 'User information' },
{ name: 'status', type: 'string', description: 'Response status' },
])
})
it.concurrent('should handle response format without schema wrapper', async () => {
const responseFormat = {
type: 'object',
properties: {
result: { type: 'boolean', description: 'Operation result' },
message: { type: 'string', description: 'Status message' },
},
}
const fields = extractFieldsFromSchema(responseFormat)
expect(fields).toEqual([
{ name: 'result', type: 'boolean', description: 'Operation result' },
{ name: 'message', type: 'string', description: 'Status message' },
])
})
it.concurrent('should return object as-is when it is already parsed', async () => {
const responseFormat = {
name: 'test_schema',
schema: {
properties: {
data: { type: 'string' },
},
},
}
const result = parseResponseFormatSafely(responseFormat, 'agent1')
expect(result).toEqual(responseFormat)
})
it.concurrent('should simulate block tag generation with custom response format', async () => {
// Simulate the tag generation logic that would happen in the component
const blockName = 'Agent 1'
const normalizedBlockName = blockName.replace(/\s+/g, '').toLowerCase() // 'agent1'
// Mock response format
const responseFormat = {
schema: {
properties: {
example_property: { type: 'string', description: 'A simple string property.' },
another_field: { type: 'number', description: 'Another field.' },
},
},
}
const schemaFields = extractFieldsFromSchema(responseFormat)
// Generate block tags as they would be in the component
const blockTags = schemaFields.map((field) => `${normalizedBlockName}.${field.name}`)
expect(blockTags).toEqual(['agent1.example_property', 'agent1.another_field'])
// Verify the fields extracted correctly
expect(schemaFields).toEqual([
{ name: 'example_property', type: 'string', description: 'A simple string property.' },
{ name: 'another_field', type: 'number', description: 'Another field.' },
])
})
})

View File

@@ -1,18 +1,16 @@
import type React from 'react'
import { useCallback, useEffect, useMemo, useState } from 'react'
import { BlockPathCalculator } from '@/lib/block-path-calculator'
import { createLogger } from '@/lib/logs/console-logger'
import { extractFieldsFromSchema, parseResponseFormatSafely } from '@/lib/response-format'
import { cn } from '@/lib/utils'
import { getBlock } from '@/blocks'
import { Serializer } from '@/serializer'
import { useVariablesStore } from '@/stores/panel/variables/store'
import type { Variable } from '@/stores/panel/variables/types'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
const logger = createLogger('TagDropdown')
// Type definitions for component data structures
interface BlockTagGroup {
blockName: string
blockId: string
@@ -21,49 +19,6 @@ interface BlockTagGroup {
distance: number
}
interface Field {
name: string
type: string
description?: string
}
// Helper function to extract fields from JSON Schema
export function extractFieldsFromSchema(schema: any): Field[] {
if (!schema || typeof schema !== 'object') {
return []
}
// Handle legacy format with fields array
if (Array.isArray(schema.fields)) {
return schema.fields
}
// Handle new JSON Schema format
const schemaObj = schema.schema || schema
if (!schemaObj || !schemaObj.properties || typeof schemaObj.properties !== 'object') {
return []
}
// Extract fields from schema properties
return Object.entries(schemaObj.properties).map(([name, prop]: [string, any]) => {
// Handle array format like ['string', 'array']
if (Array.isArray(prop)) {
return {
name,
type: prop.includes('array') ? 'array' : prop[0] || 'string',
description: undefined,
}
}
// Handle object format like { type: 'string', description: '...' }
return {
name,
type: prop.type || 'string',
description: prop.description,
}
})
}
interface TagDropdownProps {
visible: boolean
onSelect: (newValue: string) => void
@@ -169,18 +124,68 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
}
const blockConfig = getBlock(sourceBlock.type)
// Handle special blocks that aren't in the registry (loop and parallel)
if (!blockConfig) {
if (sourceBlock.type === 'loop' || sourceBlock.type === 'parallel') {
// Create a mock config with results output for loop/parallel blocks
const mockConfig = {
outputs: {
results: 'array', // These blocks have a results array output
},
}
const blockName = sourceBlock.name || sourceBlock.type
const normalizedBlockName = blockName.replace(/\s+/g, '').toLowerCase()
// Generate output paths for the mock config
const outputPaths = generateOutputPaths(mockConfig.outputs)
const blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
const blockTagGroups: BlockTagGroup[] = [
{
blockName,
blockId: activeSourceBlockId,
blockType: sourceBlock.type,
tags: blockTags,
distance: 0,
},
]
return {
tags: blockTags,
variableInfoMap: {},
blockTagGroups,
}
}
return { tags: [], variableInfoMap: {}, blockTagGroups: [] }
}
const blockName = sourceBlock.name || sourceBlock.type
const normalizedBlockName = blockName.replace(/\s+/g, '').toLowerCase()
// Handle blocks with no outputs (like starter) - show as just <blockname>
// Check for custom response format first
const responseFormatValue = useSubBlockStore
.getState()
.getValue(activeSourceBlockId, 'responseFormat')
const responseFormat = parseResponseFormatSafely(responseFormatValue, activeSourceBlockId)
let blockTags: string[]
if (Object.keys(blockConfig.outputs).length === 0) {
if (responseFormat) {
// Use custom schema properties if response format is specified
const schemaFields = extractFieldsFromSchema(responseFormat)
if (schemaFields.length > 0) {
blockTags = schemaFields.map((field) => `${normalizedBlockName}.${field.name}`)
} else {
// Fallback to default if schema extraction failed
const outputPaths = generateOutputPaths(blockConfig.outputs)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
} else if (Object.keys(blockConfig.outputs).length === 0) {
// Handle blocks with no outputs (like starter) - show as just <blockname>
blockTags = [normalizedBlockName]
} else {
// Use default block outputs
const outputPaths = generateOutputPaths(blockConfig.outputs)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
@@ -202,6 +207,16 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
}
}
// Check for invalid blocks before serialization to prevent race conditions
const hasInvalidBlocks = Object.values(blocks).some((block) => !block || !block.type)
if (hasInvalidBlocks) {
return {
tags: [],
variableInfoMap: {},
blockTagGroups: [],
}
}
// Create serialized workflow for BlockPathCalculator
const serializer = new Serializer()
const serializedWorkflow = serializer.serializeWorkflow(blocks, edges, loops, parallels)
@@ -260,28 +275,65 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
{} as Record<string, { type: string; id: string }>
)
// Generate loop tags if current block is in a loop
const loopTags: string[] = []
// Generate loop contextual block group if current block is in a loop
let loopBlockGroup: BlockTagGroup | null = null
const containingLoop = Object.entries(loops).find(([_, loop]) => loop.nodes.includes(blockId))
let containingLoopBlockId: string | null = null
if (containingLoop) {
const [_loopId, loop] = containingLoop
const [loopId, loop] = containingLoop
containingLoopBlockId = loopId
const loopType = loop.loopType || 'for'
loopTags.push('loop.index')
const contextualTags: string[] = ['index']
if (loopType === 'forEach') {
loopTags.push('loop.currentItem')
loopTags.push('loop.items')
contextualTags.push('currentItem')
contextualTags.push('items')
}
// Add the containing loop block's results to the contextual tags
const containingLoopBlock = blocks[loopId]
if (containingLoopBlock) {
const loopBlockName = containingLoopBlock.name || containingLoopBlock.type
const normalizedLoopBlockName = loopBlockName.replace(/\s+/g, '').toLowerCase()
contextualTags.push(`${normalizedLoopBlockName}.results`)
// Create a block group for the loop contextual tags
loopBlockGroup = {
blockName: loopBlockName,
blockId: loopId,
blockType: 'loop',
tags: contextualTags,
distance: 0, // Contextual tags have highest priority
}
}
}
// Generate parallel tags if current block is in parallel
const parallelTags: string[] = []
// Generate parallel contextual block group if current block is in parallel
let parallelBlockGroup: BlockTagGroup | null = null
const containingParallel = Object.entries(parallels || {}).find(([_, parallel]) =>
parallel.nodes.includes(blockId)
)
let containingParallelBlockId: string | null = null
if (containingParallel) {
parallelTags.push('parallel.index')
parallelTags.push('parallel.currentItem')
parallelTags.push('parallel.items')
const [parallelId] = containingParallel
containingParallelBlockId = parallelId
const contextualTags: string[] = ['index', 'currentItem', 'items']
// Add the containing parallel block's results to the contextual tags
const containingParallelBlock = blocks[parallelId]
if (containingParallelBlock) {
const parallelBlockName = containingParallelBlock.name || containingParallelBlock.type
const normalizedParallelBlockName = parallelBlockName.replace(/\s+/g, '').toLowerCase()
contextualTags.push(`${normalizedParallelBlockName}.results`)
// Create a block group for the parallel contextual tags
parallelBlockGroup = {
blockName: parallelBlockName,
blockId: parallelId,
blockType: 'parallel',
tags: contextualTags,
distance: 0, // Contextual tags have highest priority
}
}
}
// Create block tag groups from accessible blocks
@@ -293,16 +345,70 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
if (!accessibleBlock) continue
const blockConfig = getBlock(accessibleBlock.type)
if (!blockConfig) continue
// Handle special blocks that aren't in the registry (loop and parallel)
if (!blockConfig) {
// For loop and parallel blocks, create a mock config with results output
if (accessibleBlock.type === 'loop' || accessibleBlock.type === 'parallel') {
// Skip this block if it's the containing loop/parallel block - we'll handle it with contextual tags
if (
accessibleBlockId === containingLoopBlockId ||
accessibleBlockId === containingParallelBlockId
) {
continue
}
const mockConfig = {
outputs: {
results: 'array', // These blocks have a results array output
},
}
const blockName = accessibleBlock.name || accessibleBlock.type
const normalizedBlockName = blockName.replace(/\s+/g, '').toLowerCase()
// Generate output paths for the mock config
const outputPaths = generateOutputPaths(mockConfig.outputs)
const blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
blockTagGroups.push({
blockName,
blockId: accessibleBlockId,
blockType: accessibleBlock.type,
tags: blockTags,
distance: blockDistances[accessibleBlockId] || 0,
})
allBlockTags.push(...blockTags)
}
continue
}
const blockName = accessibleBlock.name || accessibleBlock.type
const normalizedBlockName = blockName.replace(/\s+/g, '').toLowerCase()
// Handle blocks with no outputs (like starter) - show as just <blockname>
// Check for custom response format first
const responseFormatValue = useSubBlockStore
.getState()
.getValue(accessibleBlockId, 'responseFormat')
const responseFormat = parseResponseFormatSafely(responseFormatValue, accessibleBlockId)
let blockTags: string[]
if (Object.keys(blockConfig.outputs).length === 0) {
if (responseFormat) {
// Use custom schema properties if response format is specified
const schemaFields = extractFieldsFromSchema(responseFormat)
if (schemaFields.length > 0) {
blockTags = schemaFields.map((field) => `${normalizedBlockName}.${field.name}`)
} else {
// Fallback to default if schema extraction failed
const outputPaths = generateOutputPaths(blockConfig.outputs)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
} else if (Object.keys(blockConfig.outputs).length === 0) {
// Handle blocks with no outputs (like starter) - show as just <blockname>
blockTags = [normalizedBlockName]
} else {
// Use default block outputs
const outputPaths = generateOutputPaths(blockConfig.outputs)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
@@ -318,13 +424,32 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
allBlockTags.push(...blockTags)
}
// Sort block groups by distance (closest first)
// Add contextual block groups at the beginning (they have highest priority)
const finalBlockTagGroups: BlockTagGroup[] = []
if (loopBlockGroup) {
finalBlockTagGroups.push(loopBlockGroup)
}
if (parallelBlockGroup) {
finalBlockTagGroups.push(parallelBlockGroup)
}
// Sort regular block groups by distance (closest first) and add them
blockTagGroups.sort((a, b) => a.distance - b.distance)
finalBlockTagGroups.push(...blockTagGroups)
// Collect all tags for the main tags array
const contextualTags: string[] = []
if (loopBlockGroup) {
contextualTags.push(...loopBlockGroup.tags)
}
if (parallelBlockGroup) {
contextualTags.push(...parallelBlockGroup.tags)
}
return {
tags: [...variableTags, ...loopTags, ...parallelTags, ...allBlockTags],
tags: [...variableTags, ...contextualTags, ...allBlockTags],
variableInfoMap,
blockTagGroups,
blockTagGroups: finalBlockTagGroups,
}
}, [blocks, edges, loops, parallels, blockId, activeSourceBlockId, workflowVariables])
@@ -335,18 +460,12 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
}, [tags, searchTerm])
// Group filtered tags by category
const { variableTags, loopTags, parallelTags, filteredBlockTagGroups } = useMemo(() => {
const { variableTags, filteredBlockTagGroups } = useMemo(() => {
const varTags: string[] = []
const loopTags: string[] = []
const parTags: string[] = []
filteredTags.forEach((tag) => {
if (tag.startsWith('variable.')) {
varTags.push(tag)
} else if (tag.startsWith('loop.')) {
loopTags.push(tag)
} else if (tag.startsWith('parallel.')) {
parTags.push(tag)
}
})
@@ -360,8 +479,6 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
return {
variableTags: varTags,
loopTags: loopTags,
parallelTags: parTags,
filteredBlockTagGroups,
}
}, [filteredTags, blockTagGroups, searchTerm])
@@ -369,8 +486,8 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
// Create ordered tags for keyboard navigation
const orderedTags = useMemo(() => {
const allBlockTags = filteredBlockTagGroups.flatMap((group) => group.tags)
return [...variableTags, ...loopTags, ...parallelTags, ...allBlockTags]
}, [variableTags, loopTags, parallelTags, filteredBlockTagGroups])
return [...variableTags, ...allBlockTags]
}, [variableTags, filteredBlockTagGroups])
// Create efficient tag index lookup map
const tagIndexMap = useMemo(() => {
@@ -383,7 +500,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
// Handle tag selection and text replacement
const handleTagSelect = useCallback(
(tag: string) => {
(tag: string, blockGroup?: BlockTagGroup) => {
const textBeforeCursor = inputValue.slice(0, cursorPosition)
const textAfterCursor = inputValue.slice(cursorPosition)
@@ -391,8 +508,10 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
const lastOpenBracket = textBeforeCursor.lastIndexOf('<')
if (lastOpenBracket === -1) return
// Process variable tags to maintain compatibility
// Process different types of tags
let processedTag = tag
// Handle variable tags
if (tag.startsWith('variable.')) {
const variableName = tag.substring('variable.'.length)
const variableObj = Object.values(variables).find(
@@ -403,6 +522,19 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
processedTag = tag
}
}
// Handle contextual loop/parallel tags
else if (
blockGroup &&
(blockGroup.blockType === 'loop' || blockGroup.blockType === 'parallel')
) {
// Check if this is a contextual tag (without dots) that needs a prefix
if (!tag.includes('.') && ['index', 'currentItem', 'items'].includes(tag)) {
processedTag = `${blockGroup.blockType}.${tag}`
} else {
// It's already a properly formatted tag (like blockname.results)
processedTag = tag
}
}
// Handle existing closing bracket
const nextCloseBracket = textAfterCursor.indexOf('>')
@@ -455,7 +587,12 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
e.preventDefault()
e.stopPropagation()
if (selectedIndex >= 0 && selectedIndex < orderedTags.length) {
handleTagSelect(orderedTags[selectedIndex])
const selectedTag = orderedTags[selectedIndex]
// Find which block group this tag belongs to
const belongsToGroup = filteredBlockTagGroups.find((group) =>
group.tags.includes(selectedTag)
)
handleTagSelect(selectedTag, belongsToGroup)
}
break
case 'Escape':
@@ -469,7 +606,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
window.addEventListener('keydown', handleKeyboardEvent, true)
return () => window.removeEventListener('keydown', handleKeyboardEvent, true)
}
}, [visible, selectedIndex, orderedTags, handleTagSelect, onClose])
}, [visible, selectedIndex, orderedTags, filteredBlockTagGroups, handleTagSelect, onClose])
// Early return if dropdown should not be visible
if (!visible || tags.length === 0 || orderedTags.length === 0) return null
@@ -542,152 +679,21 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
</>
)}
{/* Loop section */}
{loopTags.length > 0 && (
<>
{variableTags.length > 0 && <div className='my-0' />}
<div className='px-2 pt-2.5 pb-0.5 font-medium text-muted-foreground text-xs'>
Loop
</div>
<div className='-mx-1 -px-1'>
{loopTags.map((tag: string) => {
const tagIndex = tagIndexMap.get(tag) ?? -1
const loopProperty = tag.split('.')[1]
// Choose appropriate icon and description based on loop property
let tagIcon = 'L'
let tagDescription = ''
const bgColor = '#8857E6'
if (loopProperty === 'currentItem') {
tagIcon = 'i'
tagDescription = 'Current item'
} else if (loopProperty === 'items') {
tagIcon = 'I'
tagDescription = 'All items'
} else if (loopProperty === 'index') {
tagIcon = '#'
tagDescription = 'Index'
}
return (
<button
key={tag}
className={cn(
'flex w-full items-center gap-2 px-3 py-1.5 text-left text-sm',
'hover:bg-accent hover:text-accent-foreground',
'focus:bg-accent focus:text-accent-foreground focus:outline-none',
tagIndex === selectedIndex &&
tagIndex >= 0 &&
'bg-accent text-accent-foreground'
)}
onMouseEnter={() => setSelectedIndex(tagIndex >= 0 ? tagIndex : 0)}
onMouseDown={(e) => {
e.preventDefault()
e.stopPropagation()
handleTagSelect(tag)
}}
onClick={(e) => {
e.preventDefault()
e.stopPropagation()
handleTagSelect(tag)
}}
>
<div
className='flex h-5 w-5 items-center justify-center rounded'
style={{ backgroundColor: bgColor }}
>
<span className='h-3 w-3 font-bold text-white text-xs'>{tagIcon}</span>
</div>
<span className='flex-1 truncate'>{tag}</span>
<span className='ml-auto text-muted-foreground text-xs'>
{tagDescription}
</span>
</button>
)
})}
</div>
</>
)}
{/* Parallel section */}
{parallelTags.length > 0 && (
<>
{loopTags.length > 0 && <div className='my-0' />}
<div className='px-2 pt-2.5 pb-0.5 font-medium text-muted-foreground text-xs'>
Parallel
</div>
<div className='-mx-1 -px-1'>
{parallelTags.map((tag: string) => {
const tagIndex = tagIndexMap.get(tag) ?? -1
const parallelProperty = tag.split('.')[1]
// Choose appropriate icon and description based on parallel property
let tagIcon = 'P'
let tagDescription = ''
const bgColor = '#FF5757'
if (parallelProperty === 'currentItem') {
tagIcon = 'i'
tagDescription = 'Current item'
} else if (parallelProperty === 'items') {
tagIcon = 'I'
tagDescription = 'All items'
} else if (parallelProperty === 'index') {
tagIcon = '#'
tagDescription = 'Index'
}
return (
<button
key={tag}
className={cn(
'flex w-full items-center gap-2 px-3 py-1.5 text-left text-sm',
'hover:bg-accent hover:text-accent-foreground',
'focus:bg-accent focus:text-accent-foreground focus:outline-none',
tagIndex === selectedIndex &&
tagIndex >= 0 &&
'bg-accent text-accent-foreground'
)}
onMouseEnter={() => setSelectedIndex(tagIndex >= 0 ? tagIndex : 0)}
onMouseDown={(e) => {
e.preventDefault()
e.stopPropagation()
handleTagSelect(tag)
}}
onClick={(e) => {
e.preventDefault()
e.stopPropagation()
handleTagSelect(tag)
}}
>
<div
className='flex h-5 w-5 items-center justify-center rounded'
style={{ backgroundColor: bgColor }}
>
<span className='h-3 w-3 font-bold text-white text-xs'>{tagIcon}</span>
</div>
<span className='flex-1 truncate'>{tag}</span>
<span className='ml-auto text-muted-foreground text-xs'>
{tagDescription}
</span>
</button>
)
})}
</div>
</>
)}
{/* Block sections */}
{filteredBlockTagGroups.length > 0 && (
<>
{(variableTags.length > 0 || loopTags.length > 0 || parallelTags.length > 0) && (
<div className='my-0' />
)}
{variableTags.length > 0 && <div className='my-0' />}
{filteredBlockTagGroups.map((group) => {
// Get block color from configuration
const blockConfig = getBlock(group.blockType)
const blockColor = blockConfig?.bgColor || '#2F55FF'
let blockColor = blockConfig?.bgColor || '#2F55FF'
// Handle special colors for loop and parallel blocks
if (group.blockType === 'loop') {
blockColor = '#8857E6' // Purple color for loop blocks
} else if (group.blockType === 'parallel') {
blockColor = '#FF5757' // Red color for parallel blocks
}
return (
<div key={group.blockId}>
@@ -697,11 +703,37 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
<div>
{group.tags.map((tag: string) => {
const tagIndex = tagIndexMap.get(tag) ?? -1
// Extract path after block name (e.g., "field" from "blockname.field")
// For root reference blocks, show the block name instead of empty path
const tagParts = tag.split('.')
const path = tagParts.slice(1).join('.')
const displayText = path || group.blockName
// Handle display text based on tag type
let displayText: string
let tagDescription = ''
let tagIcon = group.blockName.charAt(0).toUpperCase()
if (
(group.blockType === 'loop' || group.blockType === 'parallel') &&
!tag.includes('.')
) {
// Contextual tags like 'index', 'currentItem', 'items'
displayText = tag
if (tag === 'index') {
tagIcon = '#'
tagDescription = 'Index'
} else if (tag === 'currentItem') {
tagIcon = 'i'
tagDescription = 'Current item'
} else if (tag === 'items') {
tagIcon = 'I'
tagDescription = 'All items'
}
} else {
// Regular block output tags like 'blockname.field' or 'blockname.results'
const tagParts = tag.split('.')
const path = tagParts.slice(1).join('.')
displayText = path || group.blockName
if (path === 'results') {
tagDescription = 'Results array'
}
}
return (
<button
@@ -718,12 +750,12 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
onMouseDown={(e) => {
e.preventDefault()
e.stopPropagation()
handleTagSelect(tag)
handleTagSelect(tag, group)
}}
onClick={(e) => {
e.preventDefault()
e.stopPropagation()
handleTagSelect(tag)
handleTagSelect(tag, group)
}}
>
<div
@@ -731,12 +763,15 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
style={{ backgroundColor: blockColor }}
>
<span className='h-3 w-3 font-bold text-white text-xs'>
{group.blockName.charAt(0).toUpperCase()}
{tagIcon}
</span>
</div>
<span className='max-w-[calc(100%-32px)] truncate'>
{displayText}
</span>
<span className='flex-1 truncate'>{displayText}</span>
{tagDescription && (
<span className='ml-auto text-muted-foreground text-xs'>
{tagDescription}
</span>
)}
</button>
)
})}

View File

@@ -21,7 +21,7 @@ const TooltipContent = React.forwardRef<
ref={ref}
sideOffset={sideOffset}
className={cn(
'fade-in-0 zoom-in-95 data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 animate-in overflow-hidden rounded-md bg-black px-3 py-1.5 text-white text-xs shadow-md data-[state=closed]:animate-out dark:bg-white dark:text-black',
'fade-in-0 zoom-in-95 data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-[60] animate-in overflow-hidden rounded-md bg-black px-3 py-1.5 text-white text-xs shadow-md data-[state=closed]:animate-out dark:bg-white dark:text-black',
className
)}
{...props}

View File

@@ -11,6 +11,7 @@ import {
} from 'react'
import { useParams } from 'next/navigation'
import { io, type Socket } from 'socket.io-client'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console-logger'
const logger = createLogger('SocketContext')
@@ -49,6 +50,7 @@ interface SocketContextType {
onUserJoined: (handler: (data: any) => void) => void
onUserLeft: (handler: (data: any) => void) => void
onWorkflowDeleted: (handler: (data: any) => void) => void
onWorkflowReverted: (handler: (data: any) => void) => void
}
const SocketContext = createContext<SocketContextType>({
@@ -70,6 +72,7 @@ const SocketContext = createContext<SocketContextType>({
onUserJoined: () => {},
onUserLeft: () => {},
onWorkflowDeleted: () => {},
onWorkflowReverted: () => {},
})
export const useSocket = () => useContext(SocketContext)
@@ -99,6 +102,7 @@ export function SocketProvider({ children, user }: SocketProviderProps) {
userJoined?: (data: any) => void
userLeft?: (data: any) => void
workflowDeleted?: (data: any) => void
workflowReverted?: (data: any) => void
}>({})
// Helper function to generate a fresh socket token
@@ -134,7 +138,7 @@ export function SocketProvider({ children, user }: SocketProviderProps) {
// Generate initial token for socket authentication
const token = await generateSocketToken()
const socketUrl = process.env.NEXT_PUBLIC_SOCKET_URL || 'http://localhost:3002'
const socketUrl = env.NEXT_PUBLIC_SOCKET_URL || 'http://localhost:3002'
logger.info('Attempting to connect to Socket.IO server', {
url: socketUrl,
@@ -146,9 +150,9 @@ export function SocketProvider({ children, user }: SocketProviderProps) {
const socketInstance = io(socketUrl, {
transports: ['websocket', 'polling'], // Keep polling fallback for reliability
withCredentials: true,
reconnectionAttempts: 5, // Socket.IO handles base reconnection
reconnectionAttempts: Number.POSITIVE_INFINITY, // Socket.IO handles base reconnection
reconnectionDelay: 1000, // Start with 1 second delay
reconnectionDelayMax: 5000, // Max 5 second delay
reconnectionDelayMax: 30000, // Max 30 second delay
timeout: 10000, // Back to original timeout
auth: (cb) => {
// Generate a fresh token for each connection attempt (including reconnections)
@@ -280,6 +284,12 @@ export function SocketProvider({ children, user }: SocketProviderProps) {
eventHandlers.current.workflowDeleted?.(data)
})
// Workflow revert events
socketInstance.on('workflow-reverted', (data) => {
logger.info(`Workflow ${data.workflowId} has been reverted to deployed state`)
eventHandlers.current.workflowReverted?.(data)
})
// Cursor update events
socketInstance.on('cursor-update', (data) => {
setPresenceUsers((prev) =>
@@ -556,6 +566,10 @@ export function SocketProvider({ children, user }: SocketProviderProps) {
eventHandlers.current.workflowDeleted = handler
}, [])
const onWorkflowReverted = useCallback((handler: (data: any) => void) => {
eventHandlers.current.workflowReverted = handler
}, [])
return (
<SocketContext.Provider
value={{
@@ -577,6 +591,7 @@ export function SocketProvider({ children, user }: SocketProviderProps) {
onUserJoined,
onUserLeft,
onWorkflowDeleted,
onWorkflowReverted,
}}
>
{children}

View File

@@ -38,4 +38,4 @@ declare global {
}
export const db = global.database || drizzleClient
if (process.env.NODE_ENV !== 'production') global.database = db
if (env.NODE_ENV !== 'production') global.database = db

View File

@@ -736,7 +736,29 @@ describe('AgentBlockHandler', () => {
})
})
it('should throw an error for invalid JSON in responseFormat', async () => {
it('should handle invalid JSON in responseFormat gracefully', async () => {
mockFetch.mockImplementationOnce(() => {
return Promise.resolve({
ok: true,
headers: {
get: (name: string) => {
if (name === 'Content-Type') return 'application/json'
if (name === 'X-Execution-Data') return null
return null
},
},
json: () =>
Promise.resolve({
content: 'Regular text response',
model: 'mock-model',
tokens: { prompt: 10, completion: 20, total: 30 },
timing: { total: 100 },
toolCalls: [],
cost: undefined,
}),
})
})
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format this output.',
@@ -744,9 +766,60 @@ describe('AgentBlockHandler', () => {
responseFormat: '{invalid-json',
}
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
'Invalid response'
)
// Should not throw an error, but continue with default behavior
const result = await handler.execute(mockBlock, inputs, mockContext)
expect(result).toEqual({
content: 'Regular text response',
model: 'mock-model',
tokens: { prompt: 10, completion: 20, total: 30 },
toolCalls: { list: [], count: 0 },
providerTiming: { total: 100 },
cost: undefined,
})
})
it('should handle variable references in responseFormat gracefully', async () => {
mockFetch.mockImplementationOnce(() => {
return Promise.resolve({
ok: true,
headers: {
get: (name: string) => {
if (name === 'Content-Type') return 'application/json'
if (name === 'X-Execution-Data') return null
return null
},
},
json: () =>
Promise.resolve({
content: 'Regular text response',
model: 'mock-model',
tokens: { prompt: 10, completion: 20, total: 30 },
timing: { total: 100 },
toolCalls: [],
cost: undefined,
}),
})
})
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format this output.',
apiKey: 'test-api-key',
responseFormat: '<start.input>',
}
// Should not throw an error, but continue with default behavior
const result = await handler.execute(mockBlock, inputs, mockContext)
expect(result).toEqual({
content: 'Regular text response',
model: 'mock-model',
tokens: { prompt: 10, completion: 20, total: 30 },
toolCalls: { list: [], count: 0 },
providerTiming: { total: 100 },
cost: undefined,
})
})
it('should handle errors from the provider request', async () => {

View File

@@ -58,22 +58,63 @@ export class AgentBlockHandler implements BlockHandler {
private parseResponseFormat(responseFormat?: string | object): any {
if (!responseFormat || responseFormat === '') return undefined
try {
const parsed =
typeof responseFormat === 'string' ? JSON.parse(responseFormat) : responseFormat
if (parsed && typeof parsed === 'object' && !parsed.schema && !parsed.name) {
// If already an object, process it directly
if (typeof responseFormat === 'object' && responseFormat !== null) {
const formatObj = responseFormat as any
if (!formatObj.schema && !formatObj.name) {
return {
name: 'response_schema',
schema: parsed,
schema: responseFormat,
strict: true,
}
}
return parsed
} catch (error: any) {
logger.error('Failed to parse response format:', { error })
throw new Error(`Invalid response format: ${error.message}`)
return responseFormat
}
// Handle string values
if (typeof responseFormat === 'string') {
const trimmedValue = responseFormat.trim()
// Check for variable references like <start.input>
if (trimmedValue.startsWith('<') && trimmedValue.includes('>')) {
logger.info('Response format contains variable reference:', {
value: trimmedValue,
})
// Variable references should have been resolved by the resolver before reaching here
// If we still have a variable reference, it means it couldn't be resolved
// Return undefined to use default behavior (no structured response)
return undefined
}
// Try to parse as JSON
try {
const parsed = JSON.parse(trimmedValue)
if (parsed && typeof parsed === 'object' && !parsed.schema && !parsed.name) {
return {
name: 'response_schema',
schema: parsed,
strict: true,
}
}
return parsed
} catch (error: any) {
logger.warn('Failed to parse response format as JSON, using default behavior:', {
error: error.message,
value: trimmedValue,
})
// Return undefined instead of throwing - this allows execution to continue
// without structured response format
return undefined
}
}
// For any other type, return undefined
logger.warn('Unexpected response format type, using default behavior:', {
type: typeof responseFormat,
value: responseFormat,
})
return undefined
}
private async formatTools(inputTools: ToolInput[], context: ExecutionContext): Promise<any[]> {

View File

@@ -159,7 +159,7 @@ export class WorkflowBlockHandler implements BlockHandler {
logger.info(`Loaded child workflow: ${workflowData.name} (${workflowId})`)
// Extract the workflow state
// Extract the workflow state (API returns normalized data in state field)
const workflowState = workflowData.state
if (!workflowState || !workflowState.blocks) {
@@ -167,7 +167,7 @@ export class WorkflowBlockHandler implements BlockHandler {
return null
}
// Use blocks directly since DB format should match UI format
// Use blocks directly since API returns data from normalized tables
const serializedWorkflow = this.serializer.serializeWorkflow(
workflowState.blocks,
workflowState.edges || [],

View File

@@ -30,6 +30,7 @@ import type {
NormalizedBlockOutput,
StreamingExecution,
} from './types'
import { streamingResponseFormatProcessor } from './utils'
const logger = createLogger('Executor')
@@ -242,7 +243,25 @@ export class Executor {
const streamingExec = output as StreamingExecution
const [streamForClient, streamForExecutor] = streamingExec.stream.tee()
const clientStreamingExec = { ...streamingExec, stream: streamForClient }
// Apply response format processing to the client stream if needed
const blockId = (streamingExec.execution as any).blockId
// Get response format from initial block states (passed from useWorkflowExecution)
// The initialBlockStates contain the subblock values including responseFormat
let responseFormat: any
if (this.initialBlockStates?.[blockId]) {
const blockState = this.initialBlockStates[blockId] as any
responseFormat = blockState.responseFormat
}
const processedClientStream = streamingResponseFormatProcessor.processStream(
streamForClient,
blockId,
context.selectedOutputIds || [],
responseFormat
)
const clientStreamingExec = { ...streamingExec, stream: processedClientStream }
try {
// Handle client stream with proper error handling
@@ -267,7 +286,41 @@ export class Executor {
const blockId = (streamingExec.execution as any).blockId
const blockState = context.blockStates.get(blockId)
if (blockState?.output) {
blockState.output.content = fullContent
// Check if we have response format - if so, preserve structured response
let responseFormat: any
if (this.initialBlockStates?.[blockId]) {
const initialBlockState = this.initialBlockStates[blockId] as any
responseFormat = initialBlockState.responseFormat
}
if (responseFormat && fullContent) {
// For structured responses, always try to parse the raw streaming content
// The streamForExecutor contains the raw JSON response, not the processed display text
try {
const parsedContent = JSON.parse(fullContent)
// Preserve metadata but spread parsed fields at root level (same as manual execution)
const structuredOutput = {
...parsedContent,
tokens: blockState.output.tokens,
toolCalls: blockState.output.toolCalls,
providerTiming: blockState.output.providerTiming,
cost: blockState.output.cost,
}
blockState.output = structuredOutput
// Also update the corresponding block log with the structured output
const blockLog = context.blockLogs.find((log) => log.blockId === blockId)
if (blockLog) {
blockLog.output = structuredOutput
}
} catch (parseError) {
// If parsing fails, fall back to setting content
blockState.output.content = fullContent
}
} else {
// No response format, use standard content setting
blockState.output.content = fullContent
}
}
} catch (readerError: any) {
logger.error('Error reading stream for executor:', readerError)
@@ -275,7 +328,40 @@ export class Executor {
const blockId = (streamingExec.execution as any).blockId
const blockState = context.blockStates.get(blockId)
if (blockState?.output && fullContent) {
blockState.output.content = fullContent
// Check if we have response format for error handling too
let responseFormat: any
if (this.initialBlockStates?.[blockId]) {
const initialBlockState = this.initialBlockStates[blockId] as any
responseFormat = initialBlockState.responseFormat
}
if (responseFormat) {
// For structured responses, always try to parse the raw streaming content
// The streamForExecutor contains the raw JSON response, not the processed display text
try {
const parsedContent = JSON.parse(fullContent)
const structuredOutput = {
...parsedContent,
tokens: blockState.output.tokens,
toolCalls: blockState.output.toolCalls,
providerTiming: blockState.output.providerTiming,
cost: blockState.output.cost,
}
blockState.output = structuredOutput
// Also update the corresponding block log with the structured output
const blockLog = context.blockLogs.find((log) => log.blockId === blockId)
if (blockLog) {
blockLog.output = structuredOutput
}
} catch (parseError) {
// If parsing fails, fall back to setting content
blockState.output.content = fullContent
}
} else {
// No response format, use standard content setting
blockState.output.content = fullContent
}
}
} finally {
try {
@@ -1257,6 +1343,7 @@ export class Executor {
context.blockLogs.push(blockLog)
// Skip console logging for infrastructure blocks like loops and parallels
// For streaming blocks, we'll add the console entry after stream processing
if (block.metadata?.id !== 'loop' && block.metadata?.id !== 'parallel') {
addConsole({
output: blockLog.output,

View File

@@ -269,3 +269,15 @@ export interface Tool<P = any, O = Record<string, any>> {
export interface ToolRegistry {
[key: string]: Tool
}
/**
* Interface for a stream processor that can process a stream based on a response format.
*/
export interface ResponseFormatStreamProcessor {
processStream(
originalStream: ReadableStream,
blockId: string,
selectedOutputIds: string[],
responseFormat?: any
): ReadableStream
}

View File

@@ -0,0 +1,354 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { StreamingResponseFormatProcessor, streamingResponseFormatProcessor } from './utils'
vi.mock('@/lib/logs/console-logger', () => ({
createLogger: vi.fn().mockReturnValue({
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
}),
}))
describe('StreamingResponseFormatProcessor', () => {
let processor: StreamingResponseFormatProcessor
beforeEach(() => {
processor = new StreamingResponseFormatProcessor()
})
afterEach(() => {
vi.clearAllMocks()
})
describe('processStream', () => {
it.concurrent('should return original stream when no response format selection', async () => {
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode('{"content": "test"}'))
controller.close()
},
})
const result = processor.processStream(
mockStream,
'block-1',
['block-1.content'], // No underscore, not response format
{ schema: { properties: { username: { type: 'string' } } } }
)
expect(result).toBe(mockStream)
})
it.concurrent('should return original stream when no response format provided', async () => {
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode('{"content": "test"}'))
controller.close()
},
})
const result = processor.processStream(
mockStream,
'block-1',
['block-1_username'], // Has underscore but no response format
undefined
)
expect(result).toBe(mockStream)
})
it.concurrent('should process stream and extract single selected field', async () => {
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode('{"username": "alice", "age": 25}'))
controller.close()
},
})
const processedStream = processor.processStream(mockStream, 'block-1', ['block-1_username'], {
schema: { properties: { username: { type: 'string' }, age: { type: 'number' } } },
})
const reader = processedStream.getReader()
const decoder = new TextDecoder()
let result = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
result += decoder.decode(value)
}
expect(result).toBe('alice')
})
it.concurrent('should process stream and extract multiple selected fields', async () => {
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue(
new TextEncoder().encode('{"username": "bob", "age": 30, "email": "bob@test.com"}')
)
controller.close()
},
})
const processedStream = processor.processStream(
mockStream,
'block-1',
['block-1_username', 'block-1_age'],
{ schema: { properties: { username: { type: 'string' }, age: { type: 'number' } } } }
)
const reader = processedStream.getReader()
const decoder = new TextDecoder()
let result = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
result += decoder.decode(value)
}
expect(result).toBe('bob\n30')
})
it.concurrent('should handle non-string field values by JSON stringifying them', async () => {
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue(
new TextEncoder().encode(
'{"config": {"theme": "dark", "notifications": true}, "count": 42}'
)
)
controller.close()
},
})
const processedStream = processor.processStream(
mockStream,
'block-1',
['block-1_config', 'block-1_count'],
{ schema: { properties: { config: { type: 'object' }, count: { type: 'number' } } } }
)
const reader = processedStream.getReader()
const decoder = new TextDecoder()
let result = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
result += decoder.decode(value)
}
expect(result).toBe('{"theme":"dark","notifications":true}\n42')
})
it.concurrent('should handle streaming JSON that comes in chunks', async () => {
const mockStream = new ReadableStream({
start(controller) {
// Simulate streaming JSON in chunks
controller.enqueue(new TextEncoder().encode('{"username": "charlie"'))
controller.enqueue(new TextEncoder().encode(', "age": 35}'))
controller.close()
},
})
const processedStream = processor.processStream(mockStream, 'block-1', ['block-1_username'], {
schema: { properties: { username: { type: 'string' }, age: { type: 'number' } } },
})
const reader = processedStream.getReader()
const decoder = new TextDecoder()
let result = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
result += decoder.decode(value)
}
expect(result).toBe('charlie')
})
it.concurrent('should handle missing fields gracefully', async () => {
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode('{"username": "diana"}'))
controller.close()
},
})
const processedStream = processor.processStream(
mockStream,
'block-1',
['block-1_username', 'block-1_missing_field'],
{ schema: { properties: { username: { type: 'string' } } } }
)
const reader = processedStream.getReader()
const decoder = new TextDecoder()
let result = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
result += decoder.decode(value)
}
expect(result).toBe('diana')
})
it.concurrent('should handle invalid JSON gracefully', async () => {
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode('invalid json'))
controller.close()
},
})
const processedStream = processor.processStream(mockStream, 'block-1', ['block-1_username'], {
schema: { properties: { username: { type: 'string' } } },
})
const reader = processedStream.getReader()
const decoder = new TextDecoder()
let result = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
result += decoder.decode(value)
}
expect(result).toBe('')
})
it.concurrent('should filter selected fields for correct block ID', async () => {
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode('{"username": "eve", "age": 28}'))
controller.close()
},
})
const processedStream = processor.processStream(
mockStream,
'block-1',
['block-1_username', 'block-2_age'], // Different block ID should be filtered out
{ schema: { properties: { username: { type: 'string' }, age: { type: 'number' } } } }
)
const reader = processedStream.getReader()
const decoder = new TextDecoder()
let result = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
result += decoder.decode(value)
}
expect(result).toBe('eve')
})
it.concurrent('should handle empty result when no matching fields', async () => {
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode('{"other_field": "value"}'))
controller.close()
},
})
const processedStream = processor.processStream(mockStream, 'block-1', ['block-1_username'], {
schema: { properties: { username: { type: 'string' } } },
})
const reader = processedStream.getReader()
const decoder = new TextDecoder()
let result = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
result += decoder.decode(value)
}
expect(result).toBe('')
})
})
describe('singleton instance', () => {
it.concurrent('should export a singleton instance', () => {
expect(streamingResponseFormatProcessor).toBeInstanceOf(StreamingResponseFormatProcessor)
})
it.concurrent('should return the same instance on multiple imports', () => {
const instance1 = streamingResponseFormatProcessor
const instance2 = streamingResponseFormatProcessor
expect(instance1).toBe(instance2)
})
})
describe('edge cases', () => {
it.concurrent('should handle empty stream', async () => {
const mockStream = new ReadableStream({
start(controller) {
controller.close()
},
})
const processedStream = processor.processStream(mockStream, 'block-1', ['block-1_username'], {
schema: { properties: { username: { type: 'string' } } },
})
const reader = processedStream.getReader()
const decoder = new TextDecoder()
let result = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
result += decoder.decode(value)
}
expect(result).toBe('')
})
it.concurrent('should handle very large JSON objects', async () => {
const largeObject = {
username: 'frank',
data: 'x'.repeat(10000), // Large string
nested: {
deep: {
value: 'test',
},
},
}
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode(JSON.stringify(largeObject)))
controller.close()
},
})
const processedStream = processor.processStream(mockStream, 'block-1', ['block-1_username'], {
schema: { properties: { username: { type: 'string' } } },
})
const reader = processedStream.getReader()
const decoder = new TextDecoder()
let result = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
result += decoder.decode(value)
}
expect(result).toBe('frank')
})
})
})

201
apps/sim/executor/utils.ts Normal file
View File

@@ -0,0 +1,201 @@
import { createLogger } from '@/lib/logs/console-logger'
import type { ResponseFormatStreamProcessor } from './types'
const logger = createLogger('ExecutorUtils')
/**
* Processes a streaming response to extract only the selected response format fields
* instead of streaming the full JSON wrapper.
*/
export class StreamingResponseFormatProcessor implements ResponseFormatStreamProcessor {
processStream(
originalStream: ReadableStream,
blockId: string,
selectedOutputIds: string[],
responseFormat?: any
): ReadableStream {
// Check if this block has response format selected outputs
const hasResponseFormatSelection = selectedOutputIds.some((outputId) => {
const blockIdForOutput = outputId.includes('_')
? outputId.split('_')[0]
: outputId.split('.')[0]
return blockIdForOutput === blockId && outputId.includes('_')
})
// If no response format selection, return original stream unchanged
if (!hasResponseFormatSelection || !responseFormat) {
return originalStream
}
// Get the selected field names for this block
const selectedFields = selectedOutputIds
.filter((outputId) => {
const blockIdForOutput = outputId.includes('_')
? outputId.split('_')[0]
: outputId.split('.')[0]
return blockIdForOutput === blockId && outputId.includes('_')
})
.map((outputId) => outputId.substring(blockId.length + 1))
logger.info('Processing streaming response format', {
blockId,
selectedFields,
hasResponseFormat: !!responseFormat,
selectedFieldsCount: selectedFields.length,
})
return this.createProcessedStream(originalStream, selectedFields, blockId)
}
private createProcessedStream(
originalStream: ReadableStream,
selectedFields: string[],
blockId: string
): ReadableStream {
let buffer = ''
let hasProcessedComplete = false // Track if we've already processed the complete JSON
const self = this
return new ReadableStream({
async start(controller) {
const reader = originalStream.getReader()
const decoder = new TextDecoder()
try {
while (true) {
const { done, value } = await reader.read()
if (done) {
// Handle any remaining buffer at the end only if we haven't processed complete JSON yet
if (buffer.trim() && !hasProcessedComplete) {
self.processCompleteJson(buffer, selectedFields, controller)
}
controller.close()
break
}
const chunk = decoder.decode(value, { stream: true })
buffer += chunk
// Try to process the current buffer only if we haven't processed complete JSON yet
if (!hasProcessedComplete) {
const processedChunk = self.processStreamingChunk(buffer, selectedFields)
if (processedChunk) {
controller.enqueue(new TextEncoder().encode(processedChunk))
hasProcessedComplete = true // Mark as processed to prevent duplicate processing
}
}
}
} catch (error) {
logger.error('Error processing streaming response format:', { error, blockId })
controller.error(error)
} finally {
reader.releaseLock()
}
},
})
}
private processStreamingChunk(buffer: string, selectedFields: string[]): string | null {
// For streaming response format, we need to parse the JSON as it comes in
// and extract only the field values we care about
// Try to parse as complete JSON first
try {
const parsed = JSON.parse(buffer.trim())
if (typeof parsed === 'object' && parsed !== null) {
// We have a complete JSON object, extract the selected fields
// Process all selected fields and format them properly
const results: string[] = []
for (const field of selectedFields) {
if (field in parsed) {
const value = parsed[field]
const formattedValue = typeof value === 'string' ? value : JSON.stringify(value)
results.push(formattedValue)
}
}
if (results.length > 0) {
// Join multiple fields with newlines for readability
const result = results.join('\n')
return result
}
return null
}
} catch (e) {
// Not complete JSON yet, continue buffering
}
// For real-time extraction during streaming, we'd need more sophisticated parsing
// For now, let's handle the case where we receive chunks that might be partial JSON
// Simple heuristic: if buffer contains what looks like a complete JSON object
const openBraces = (buffer.match(/\{/g) || []).length
const closeBraces = (buffer.match(/\}/g) || []).length
if (openBraces > 0 && openBraces === closeBraces) {
// Likely a complete JSON object
try {
const parsed = JSON.parse(buffer.trim())
if (typeof parsed === 'object' && parsed !== null) {
// Process all selected fields and format them properly
const results: string[] = []
for (const field of selectedFields) {
if (field in parsed) {
const value = parsed[field]
const formattedValue = typeof value === 'string' ? value : JSON.stringify(value)
results.push(formattedValue)
}
}
if (results.length > 0) {
// Join multiple fields with newlines for readability
const result = results.join('\n')
return result
}
return null
}
} catch (e) {
// Still not valid JSON, continue
}
}
return null
}
private processCompleteJson(
buffer: string,
selectedFields: string[],
controller: ReadableStreamDefaultController
): void {
try {
const parsed = JSON.parse(buffer.trim())
if (typeof parsed === 'object' && parsed !== null) {
// Process all selected fields and format them properly
const results: string[] = []
for (const field of selectedFields) {
if (field in parsed) {
const value = parsed[field]
const formattedValue = typeof value === 'string' ? value : JSON.stringify(value)
results.push(formattedValue)
}
}
if (results.length > 0) {
// Join multiple fields with newlines for readability
const result = results.join('\n')
controller.enqueue(new TextEncoder().encode(result))
}
}
} catch (error) {
logger.warn('Failed to parse complete JSON in streaming processor:', { error })
}
}
}
// Create singleton instance
export const streamingResponseFormatProcessor = new StreamingResponseFormatProcessor()

View File

@@ -25,6 +25,7 @@ export function useCollaborativeWorkflow() {
onUserJoined,
onUserLeft,
onWorkflowDeleted,
onWorkflowReverted,
} = useSocket()
const { activeWorkflowId } = useWorkflowRegistry()
@@ -262,12 +263,80 @@ export function useCollaborativeWorkflow() {
}
}
const handleWorkflowReverted = async (data: any) => {
const { workflowId } = data
logger.info(`Workflow ${workflowId} has been reverted to deployed state`)
// If the reverted workflow is the currently active one, reload the workflow state
if (activeWorkflowId === workflowId) {
logger.info(`Currently active workflow ${workflowId} was reverted, reloading state`)
try {
// Fetch the updated workflow state from the server (which loads from normalized tables)
const response = await fetch(`/api/workflows/${workflowId}`)
if (response.ok) {
const responseData = await response.json()
const workflowData = responseData.data
if (workflowData?.state) {
// Update the workflow store with the reverted state
isApplyingRemoteChange.current = true
try {
// Update the main workflow state using the API response
useWorkflowStore.setState({
blocks: workflowData.state.blocks || {},
edges: workflowData.state.edges || [],
loops: workflowData.state.loops || {},
parallels: workflowData.state.parallels || {},
isDeployed: workflowData.state.isDeployed || false,
deployedAt: workflowData.state.deployedAt,
lastSaved: workflowData.state.lastSaved || Date.now(),
hasActiveSchedule: workflowData.state.hasActiveSchedule || false,
hasActiveWebhook: workflowData.state.hasActiveWebhook || false,
deploymentStatuses: workflowData.state.deploymentStatuses || {},
})
// Update subblock store with reverted values
const subblockValues: Record<string, Record<string, any>> = {}
Object.entries(workflowData.state.blocks || {}).forEach(([blockId, block]) => {
const blockState = block as any
subblockValues[blockId] = {}
Object.entries(blockState.subBlocks || {}).forEach(([subblockId, subblock]) => {
subblockValues[blockId][subblockId] = (subblock as any).value
})
})
// Update subblock store for this workflow
useSubBlockStore.setState((state: any) => ({
workflowValues: {
...state.workflowValues,
[workflowId]: subblockValues,
},
}))
logger.info(`Successfully loaded reverted workflow state for ${workflowId}`)
} finally {
isApplyingRemoteChange.current = false
}
} else {
logger.error('No state found in workflow data after revert', { workflowData })
}
} else {
logger.error(`Failed to fetch workflow data after revert: ${response.statusText}`)
}
} catch (error) {
logger.error('Error reloading workflow state after revert:', error)
}
}
}
// Register event handlers
onWorkflowOperation(handleWorkflowOperation)
onSubblockUpdate(handleSubblockUpdate)
onUserJoined(handleUserJoined)
onUserLeft(handleUserLeft)
onWorkflowDeleted(handleWorkflowDeleted)
onWorkflowReverted(handleWorkflowReverted)
return () => {
// Cleanup handled by socket context
@@ -278,6 +347,7 @@ export function useCollaborativeWorkflow() {
onUserJoined,
onUserLeft,
onWorkflowDeleted,
onWorkflowReverted,
workflowStore,
subBlockStore,
activeWorkflowId,

View File

@@ -1,13 +1,14 @@
import { stripeClient } from '@better-auth/stripe/client'
import { emailOTPClient, genericOAuthClient, organizationClient } from 'better-auth/client/plugins'
import { createAuthClient } from 'better-auth/react'
import { env } from './env'
const clientEnv = {
NEXT_PUBLIC_VERCEL_URL: process.env.NEXT_PUBLIC_VERCEL_URL,
NEXT_PUBLIC_APP_URL: process.env.NEXT_PUBLIC_APP_URL,
NODE_ENV: process.env.NODE_ENV,
VERCEL_ENV: process.env.VERCEL_ENV || '',
BETTER_AUTH_URL: process.env.BETTER_AUTH_URL,
NEXT_PUBLIC_VERCEL_URL: env.NEXT_PUBLIC_VERCEL_URL,
NEXT_PUBLIC_APP_URL: env.NEXT_PUBLIC_APP_URL,
NODE_ENV: env.NODE_ENV,
VERCEL_ENV: env.VERCEL_ENV || '',
BETTER_AUTH_URL: env.BETTER_AUTH_URL,
}
export function getBaseURL() {

View File

@@ -135,6 +135,7 @@ export const auth = betterAuth({
'notion',
'microsoft',
'slack',
'reddit',
],
},
},
@@ -825,6 +826,57 @@ export const auth = betterAuth({
},
},
// Reddit provider
{
providerId: 'reddit',
clientId: env.REDDIT_CLIENT_ID as string,
clientSecret: env.REDDIT_CLIENT_SECRET as string,
authorizationUrl: 'https://www.reddit.com/api/v1/authorize',
tokenUrl: 'https://www.reddit.com/api/v1/access_token',
userInfoUrl: 'https://oauth.reddit.com/api/v1/me',
scopes: ['identity', 'read'],
responseType: 'code',
pkce: false,
accessType: 'offline',
authentication: 'basic',
prompt: 'consent',
redirectURI: `${env.NEXT_PUBLIC_APP_URL}/api/auth/oauth2/callback/reddit`,
getUserInfo: async (tokens) => {
try {
const response = await fetch('https://oauth.reddit.com/api/v1/me', {
headers: {
Authorization: `Bearer ${tokens.accessToken}`,
'User-Agent': 'sim-studio/1.0',
},
})
if (!response.ok) {
logger.error('Error fetching Reddit user info:', {
status: response.status,
statusText: response.statusText,
})
return null
}
const data = await response.json()
const now = new Date()
return {
id: data.id,
name: data.name || 'Reddit User',
email: `${data.name}@reddit.user`, // Reddit doesn't provide email in identity scope
image: data.icon_img || null,
emailVerified: false,
createdAt: now,
updatedAt: now,
}
} catch (error) {
logger.error('Error in Reddit getUserInfo:', { error })
return null
}
},
},
{
providerId: 'linear',
clientId: env.LINEAR_CLIENT_ID as string,

View File

@@ -6,7 +6,11 @@ import {
verifyUnsubscribeToken,
} from './unsubscribe'
vi.stubEnv('BETTER_AUTH_SECRET', 'test-secret-key')
vi.mock('../env', () => ({
env: {
BETTER_AUTH_SECRET: 'test-secret-key',
},
}))
describe('unsubscribe utilities', () => {
const testEmail = 'test@example.com'
@@ -75,10 +79,9 @@ describe('unsubscribe utilities', () => {
it.concurrent('should handle legacy tokens (2 parts) and default to marketing', () => {
// Generate a real legacy token using the actual hashing logic to ensure backward compatibility
const salt = 'abc123'
const secret = 'test-secret-key'
const { createHash } = require('crypto')
const hash = createHash('sha256')
.update(`${testEmail}:${salt}:${process.env.BETTER_AUTH_SECRET}`)
.digest('hex')
const hash = createHash('sha256').update(`${testEmail}:${salt}:${secret}`).digest('hex')
const legacyToken = `${salt}:${hash}`
// This should return valid since we're using the actual legacy format properly

View File

@@ -3,6 +3,7 @@ import { eq } from 'drizzle-orm'
import { createLogger } from '@/lib/logs/console-logger'
import { db } from '@/db'
import { settings, user } from '@/db/schema'
import { env } from '../env'
import type { EmailType } from './mailer'
const logger = createLogger('Unsubscribe')
@@ -20,7 +21,7 @@ export interface EmailPreferences {
export function generateUnsubscribeToken(email: string, emailType = 'marketing'): string {
const salt = randomBytes(16).toString('hex')
const hash = createHash('sha256')
.update(`${email}:${salt}:${emailType}:${process.env.BETTER_AUTH_SECRET}`)
.update(`${email}:${salt}:${emailType}:${env.BETTER_AUTH_SECRET}`)
.digest('hex')
return `${salt}:${hash}:${emailType}`
@@ -41,7 +42,7 @@ export function verifyUnsubscribeToken(
if (parts.length === 2) {
const [salt, expectedHash] = parts
const hash = createHash('sha256')
.update(`${email}:${salt}:${process.env.BETTER_AUTH_SECRET}`)
.update(`${email}:${salt}:${env.BETTER_AUTH_SECRET}`)
.digest('hex')
return { valid: hash === expectedHash, emailType: 'marketing' }
@@ -52,7 +53,7 @@ export function verifyUnsubscribeToken(
if (!salt || !expectedHash || !emailType) return { valid: false }
const hash = createHash('sha256')
.update(`${email}:${salt}:${emailType}:${process.env.BETTER_AUTH_SECRET}`)
.update(`${email}:${salt}:${emailType}:${env.BETTER_AUTH_SECRET}`)
.digest('hex')
return { valid: hash === expectedHash, emailType }

View File

@@ -103,7 +103,11 @@ export const env = createEnv({
LINEAR_CLIENT_SECRET: z.string().optional(),
SLACK_CLIENT_ID: z.string().optional(),
SLACK_CLIENT_SECRET: z.string().optional(),
REDDIT_CLIENT_ID: z.string().optional(),
REDDIT_CLIENT_SECRET: z.string().optional(),
SOCKET_SERVER_URL: z.string().url().optional(),
SOCKET_PORT: z.number().optional(),
PORT: z.number().optional(),
},
client: {

View File

@@ -93,10 +93,10 @@ export async function executeCode(
nodeModules: packages,
timeout: null,
// Add environment variables if needed
envVars: Object.entries(process.env).reduce(
envVars: Object.entries(env).reduce(
(acc, [key, value]) => {
if (value !== undefined) {
acc[key] = value
acc[key] = value as string
}
return acc
},

View File

@@ -5,6 +5,7 @@
* It is separate from the user-facing logging system in logging.ts.
*/
import chalk from 'chalk'
import { env } from '../env'
/**
* LogLevel enum defines the severity levels for logging
@@ -55,7 +56,7 @@ const LOG_CONFIG = {
}
// Get current environment
const ENV = (process.env.NODE_ENV || 'development') as keyof typeof LOG_CONFIG
const ENV = (env.NODE_ENV || 'development') as keyof typeof LOG_CONFIG
const config = LOG_CONFIG[ENV] || LOG_CONFIG.development
// Format objects for logging

View File

@@ -0,0 +1,34 @@
import { beforeEach, describe, expect, test } from 'vitest'
import { EnhancedExecutionLogger } from './enhanced-execution-logger'
describe('EnhancedExecutionLogger', () => {
let logger: EnhancedExecutionLogger
beforeEach(() => {
logger = new EnhancedExecutionLogger()
})
describe('class instantiation', () => {
test('should create logger instance', () => {
expect(logger).toBeDefined()
expect(logger).toBeInstanceOf(EnhancedExecutionLogger)
})
})
describe('getTriggerPrefix', () => {
test('should return correct prefixes for trigger types', () => {
// Access the private method for testing
const getTriggerPrefix = (logger as any).getTriggerPrefix.bind(logger)
expect(getTriggerPrefix('api')).toBe('API')
expect(getTriggerPrefix('webhook')).toBe('Webhook')
expect(getTriggerPrefix('schedule')).toBe('Scheduled')
expect(getTriggerPrefix('manual')).toBe('Manual')
expect(getTriggerPrefix('chat')).toBe('Chat')
expect(getTriggerPrefix('unknown' as any)).toBe('Unknown')
})
})
// Note: Database integration tests would require proper mocking setup
// For now, we're testing the basic functionality without database calls
})

View File

@@ -0,0 +1,396 @@
import { eq } from 'drizzle-orm'
import { v4 as uuidv4 } from 'uuid'
import { db } from '@/db'
import { workflowExecutionBlocks, workflowExecutionLogs } from '@/db/schema'
import { createLogger } from './console-logger'
import { snapshotService } from './snapshot-service'
import type {
BlockExecutionLog,
BlockInputData,
BlockOutputData,
CostBreakdown,
ExecutionEnvironment,
ExecutionTrigger,
ExecutionLoggerService as IExecutionLoggerService,
TraceSpan,
WorkflowExecutionLog,
WorkflowExecutionSnapshot,
WorkflowState,
} from './types'
const logger = createLogger('EnhancedExecutionLogger')
export class EnhancedExecutionLogger implements IExecutionLoggerService {
async startWorkflowExecution(params: {
workflowId: string
executionId: string
trigger: ExecutionTrigger
environment: ExecutionEnvironment
workflowState: WorkflowState
}): Promise<{
workflowLog: WorkflowExecutionLog
snapshot: WorkflowExecutionSnapshot
}> {
const { workflowId, executionId, trigger, environment, workflowState } = params
logger.debug(`Starting workflow execution ${executionId} for workflow ${workflowId}`)
const snapshotResult = await snapshotService.createSnapshotWithDeduplication(
workflowId,
workflowState
)
const startTime = new Date()
const [workflowLog] = await db
.insert(workflowExecutionLogs)
.values({
id: uuidv4(),
workflowId,
executionId,
stateSnapshotId: snapshotResult.snapshot.id,
level: 'info',
message: `${this.getTriggerPrefix(trigger.type)} execution started`,
trigger: trigger.type,
startedAt: startTime,
endedAt: null,
totalDurationMs: null,
blockCount: 0,
successCount: 0,
errorCount: 0,
skippedCount: 0,
totalCost: null,
totalInputCost: null,
totalOutputCost: null,
totalTokens: null,
metadata: {
environment,
trigger,
},
})
.returning()
logger.debug(`Created workflow log ${workflowLog.id} for execution ${executionId}`)
return {
workflowLog: {
id: workflowLog.id,
workflowId: workflowLog.workflowId,
executionId: workflowLog.executionId,
stateSnapshotId: workflowLog.stateSnapshotId,
level: workflowLog.level as 'info' | 'error',
message: workflowLog.message,
trigger: workflowLog.trigger as ExecutionTrigger['type'],
startedAt: workflowLog.startedAt.toISOString(),
endedAt: workflowLog.endedAt?.toISOString() || workflowLog.startedAt.toISOString(),
totalDurationMs: workflowLog.totalDurationMs || 0,
blockCount: workflowLog.blockCount,
successCount: workflowLog.successCount,
errorCount: workflowLog.errorCount,
skippedCount: workflowLog.skippedCount,
totalCost: Number(workflowLog.totalCost) || 0,
totalInputCost: Number(workflowLog.totalInputCost) || 0,
totalOutputCost: Number(workflowLog.totalOutputCost) || 0,
totalTokens: workflowLog.totalTokens || 0,
metadata: workflowLog.metadata as WorkflowExecutionLog['metadata'],
createdAt: workflowLog.createdAt.toISOString(),
},
snapshot: snapshotResult.snapshot,
}
}
async logBlockExecution(params: {
executionId: string
workflowId: string
blockId: string
blockName: string
blockType: string
input: BlockInputData
output: BlockOutputData
timing: {
startedAt: string
endedAt: string
durationMs: number
}
status: BlockExecutionLog['status']
error?: {
message: string
stackTrace?: string
}
cost?: CostBreakdown
metadata?: BlockExecutionLog['metadata']
}): Promise<BlockExecutionLog> {
const {
executionId,
workflowId,
blockId,
blockName,
blockType,
input,
output,
timing,
status,
error,
cost,
metadata,
} = params
logger.debug(`Logging block execution ${blockId} for execution ${executionId}`)
const blockLogId = uuidv4()
const [blockLog] = await db
.insert(workflowExecutionBlocks)
.values({
id: blockLogId,
executionId,
workflowId,
blockId,
blockName,
blockType,
startedAt: new Date(timing.startedAt),
endedAt: new Date(timing.endedAt),
durationMs: timing.durationMs,
status,
errorMessage: error?.message || null,
errorStackTrace: error?.stackTrace || null,
inputData: input,
outputData: output,
costInput: cost?.input ? cost.input.toString() : null,
costOutput: cost?.output ? cost.output.toString() : null,
costTotal: cost?.total ? cost.total.toString() : null,
tokensPrompt: cost?.tokens?.prompt || null,
tokensCompletion: cost?.tokens?.completion || null,
tokensTotal: cost?.tokens?.total || null,
modelUsed: cost?.model || null,
metadata: metadata || {},
})
.returning()
logger.debug(`Created block log ${blockLog.id} for block ${blockId}`)
return {
id: blockLog.id,
executionId: blockLog.executionId,
workflowId: blockLog.workflowId,
blockId: blockLog.blockId,
blockName: blockLog.blockName || '',
blockType: blockLog.blockType,
startedAt: blockLog.startedAt.toISOString(),
endedAt: blockLog.endedAt?.toISOString() || timing.endedAt,
durationMs: blockLog.durationMs || timing.durationMs,
status: blockLog.status as BlockExecutionLog['status'],
errorMessage: blockLog.errorMessage || undefined,
errorStackTrace: blockLog.errorStackTrace || undefined,
inputData: input,
outputData: output,
cost: cost || null,
metadata: (blockLog.metadata as BlockExecutionLog['metadata']) || {},
createdAt: blockLog.createdAt.toISOString(),
}
}
async completeWorkflowExecution(params: {
executionId: string
endedAt: string
totalDurationMs: number
blockStats: {
total: number
success: number
error: number
skipped: number
}
costSummary: {
totalCost: number
totalInputCost: number
totalOutputCost: number
totalTokens: number
totalPromptTokens: number
totalCompletionTokens: number
models: Record<
string,
{
input: number
output: number
total: number
tokens: { prompt: number; completion: number; total: number }
}
>
}
finalOutput: BlockOutputData
traceSpans?: TraceSpan[]
}): Promise<WorkflowExecutionLog> {
const {
executionId,
endedAt,
totalDurationMs,
blockStats,
costSummary,
finalOutput,
traceSpans,
} = params
logger.debug(`Completing workflow execution ${executionId}`)
const level = blockStats.error > 0 ? 'error' : 'info'
const message =
blockStats.error > 0
? `Workflow execution failed: ${blockStats.error} error(s), ${blockStats.success} success(es)`
: `Workflow execution completed: ${blockStats.success} block(s) executed successfully`
const [updatedLog] = await db
.update(workflowExecutionLogs)
.set({
level,
message,
endedAt: new Date(endedAt),
totalDurationMs,
blockCount: blockStats.total,
successCount: blockStats.success,
errorCount: blockStats.error,
skippedCount: blockStats.skipped,
totalCost: costSummary.totalCost.toString(),
totalInputCost: costSummary.totalInputCost.toString(),
totalOutputCost: costSummary.totalOutputCost.toString(),
totalTokens: costSummary.totalTokens,
metadata: {
traceSpans,
finalOutput,
tokenBreakdown: {
prompt: costSummary.totalPromptTokens,
completion: costSummary.totalCompletionTokens,
total: costSummary.totalTokens,
},
models: costSummary.models,
},
})
.where(eq(workflowExecutionLogs.executionId, executionId))
.returning()
if (!updatedLog) {
throw new Error(`Workflow log not found for execution ${executionId}`)
}
logger.debug(`Completed workflow execution ${executionId}`)
return {
id: updatedLog.id,
workflowId: updatedLog.workflowId,
executionId: updatedLog.executionId,
stateSnapshotId: updatedLog.stateSnapshotId,
level: updatedLog.level as 'info' | 'error',
message: updatedLog.message,
trigger: updatedLog.trigger as ExecutionTrigger['type'],
startedAt: updatedLog.startedAt.toISOString(),
endedAt: updatedLog.endedAt?.toISOString() || endedAt,
totalDurationMs: updatedLog.totalDurationMs || totalDurationMs,
blockCount: updatedLog.blockCount,
successCount: updatedLog.successCount,
errorCount: updatedLog.errorCount,
skippedCount: updatedLog.skippedCount,
totalCost: Number(updatedLog.totalCost) || 0,
totalInputCost: Number(updatedLog.totalInputCost) || 0,
totalOutputCost: Number(updatedLog.totalOutputCost) || 0,
totalTokens: updatedLog.totalTokens || 0,
metadata: updatedLog.metadata as WorkflowExecutionLog['metadata'],
createdAt: updatedLog.createdAt.toISOString(),
}
}
async getBlockExecutionsForWorkflow(executionId: string): Promise<BlockExecutionLog[]> {
const blockLogs = await db
.select()
.from(workflowExecutionBlocks)
.where(eq(workflowExecutionBlocks.executionId, executionId))
.orderBy(workflowExecutionBlocks.startedAt)
return blockLogs.map((log) => ({
id: log.id,
executionId: log.executionId,
workflowId: log.workflowId,
blockId: log.blockId,
blockName: log.blockName || '',
blockType: log.blockType,
startedAt: log.startedAt.toISOString(),
endedAt: log.endedAt?.toISOString() || log.startedAt.toISOString(),
durationMs: log.durationMs || 0,
status: log.status as BlockExecutionLog['status'],
errorMessage: log.errorMessage || undefined,
errorStackTrace: log.errorStackTrace || undefined,
inputData: log.inputData as BlockInputData,
outputData: log.outputData as BlockOutputData,
cost: log.costTotal
? {
input: Number(log.costInput) || 0,
output: Number(log.costOutput) || 0,
total: Number(log.costTotal) || 0,
tokens: {
prompt: log.tokensPrompt || 0,
completion: log.tokensCompletion || 0,
total: log.tokensTotal || 0,
},
model: log.modelUsed || '',
pricing: {
input: 0,
output: 0,
updatedAt: new Date().toISOString(),
},
}
: null,
metadata: (log.metadata as BlockExecutionLog['metadata']) || {},
createdAt: log.createdAt.toISOString(),
}))
}
async getWorkflowExecution(executionId: string): Promise<WorkflowExecutionLog | null> {
const [workflowLog] = await db
.select()
.from(workflowExecutionLogs)
.where(eq(workflowExecutionLogs.executionId, executionId))
.limit(1)
if (!workflowLog) return null
return {
id: workflowLog.id,
workflowId: workflowLog.workflowId,
executionId: workflowLog.executionId,
stateSnapshotId: workflowLog.stateSnapshotId,
level: workflowLog.level as 'info' | 'error',
message: workflowLog.message,
trigger: workflowLog.trigger as ExecutionTrigger['type'],
startedAt: workflowLog.startedAt.toISOString(),
endedAt: workflowLog.endedAt?.toISOString() || workflowLog.startedAt.toISOString(),
totalDurationMs: workflowLog.totalDurationMs || 0,
blockCount: workflowLog.blockCount,
successCount: workflowLog.successCount,
errorCount: workflowLog.errorCount,
skippedCount: workflowLog.skippedCount,
totalCost: Number(workflowLog.totalCost) || 0,
totalInputCost: Number(workflowLog.totalInputCost) || 0,
totalOutputCost: Number(workflowLog.totalOutputCost) || 0,
totalTokens: workflowLog.totalTokens || 0,
metadata: workflowLog.metadata as WorkflowExecutionLog['metadata'],
createdAt: workflowLog.createdAt.toISOString(),
}
}
private getTriggerPrefix(triggerType: ExecutionTrigger['type']): string {
switch (triggerType) {
case 'api':
return 'API'
case 'webhook':
return 'Webhook'
case 'schedule':
return 'Scheduled'
case 'manual':
return 'Manual'
case 'chat':
return 'Chat'
default:
return 'Unknown'
}
}
}
export const enhancedExecutionLogger = new EnhancedExecutionLogger()

View File

@@ -0,0 +1,197 @@
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
import type { ExecutionEnvironment, ExecutionTrigger, WorkflowState } from './types'
export function createTriggerObject(
type: ExecutionTrigger['type'],
additionalData?: Record<string, unknown>
): ExecutionTrigger {
return {
type,
source: type,
timestamp: new Date().toISOString(),
...(additionalData && { data: additionalData }),
}
}
export function createEnvironmentObject(
workflowId: string,
executionId: string,
userId?: string,
workspaceId?: string,
variables?: Record<string, string>
): ExecutionEnvironment {
return {
variables: variables || {},
workflowId,
executionId,
userId: userId || '',
workspaceId: workspaceId || '',
}
}
export async function loadWorkflowStateForExecution(workflowId: string): Promise<WorkflowState> {
const normalizedData = await loadWorkflowFromNormalizedTables(workflowId)
if (!normalizedData) {
throw new Error(
`Workflow ${workflowId} has no normalized data available. Ensure the workflow is properly saved to normalized tables.`
)
}
return {
blocks: normalizedData.blocks || {},
edges: normalizedData.edges || [],
loops: normalizedData.loops || {},
parallels: normalizedData.parallels || {},
}
}
export function calculateBlockStats(traceSpans: any[]): {
total: number
success: number
error: number
skipped: number
} {
if (!traceSpans || traceSpans.length === 0) {
return { total: 0, success: 0, error: 0, skipped: 0 }
}
// Recursively collect all block spans from the trace span tree
const collectBlockSpans = (spans: any[]): any[] => {
const blocks: any[] = []
for (const span of spans) {
// Check if this span is an actual workflow block
if (
span.type &&
span.type !== 'workflow' &&
span.type !== 'provider' &&
span.type !== 'model' &&
span.blockId
) {
blocks.push(span)
}
// Recursively check children
if (span.children && Array.isArray(span.children)) {
blocks.push(...collectBlockSpans(span.children))
}
}
return blocks
}
const blockSpans = collectBlockSpans(traceSpans)
const total = blockSpans.length
const success = blockSpans.filter((span) => span.status === 'success').length
const error = blockSpans.filter((span) => span.status === 'error').length
const skipped = blockSpans.filter((span) => span.status === 'skipped').length
return { total, success, error, skipped }
}
export function calculateCostSummary(traceSpans: any[]): {
totalCost: number
totalInputCost: number
totalOutputCost: number
totalTokens: number
totalPromptTokens: number
totalCompletionTokens: number
models: Record<
string,
{
input: number
output: number
total: number
tokens: { prompt: number; completion: number; total: number }
}
>
} {
if (!traceSpans || traceSpans.length === 0) {
return {
totalCost: 0,
totalInputCost: 0,
totalOutputCost: 0,
totalTokens: 0,
totalPromptTokens: 0,
totalCompletionTokens: 0,
models: {},
}
}
// Recursively collect all spans with cost information from the trace span tree
const collectCostSpans = (spans: any[]): any[] => {
const costSpans: any[] = []
for (const span of spans) {
if (span.cost) {
costSpans.push(span)
}
if (span.children && Array.isArray(span.children)) {
costSpans.push(...collectCostSpans(span.children))
}
}
return costSpans
}
const costSpans = collectCostSpans(traceSpans)
let totalCost = 0
let totalInputCost = 0
let totalOutputCost = 0
let totalTokens = 0
let totalPromptTokens = 0
let totalCompletionTokens = 0
const models: Record<
string,
{
input: number
output: number
total: number
tokens: { prompt: number; completion: number; total: number }
}
> = {}
for (const span of costSpans) {
totalCost += span.cost.total || 0
totalInputCost += span.cost.input || 0
totalOutputCost += span.cost.output || 0
// Tokens are at span.tokens, not span.cost.tokens
totalTokens += span.tokens?.total || 0
totalPromptTokens += span.tokens?.prompt || 0
totalCompletionTokens += span.tokens?.completion || 0
// Aggregate model-specific costs - model is at span.model, not span.cost.model
if (span.model) {
const model = span.model
if (!models[model]) {
models[model] = {
input: 0,
output: 0,
total: 0,
tokens: { prompt: 0, completion: 0, total: 0 },
}
}
models[model].input += span.cost.input || 0
models[model].output += span.cost.output || 0
models[model].total += span.cost.total || 0
// Tokens are at span.tokens, not span.cost.tokens
models[model].tokens.prompt += span.tokens?.prompt || 0
models[model].tokens.completion += span.tokens?.completion || 0
models[model].tokens.total += span.tokens?.total || 0
}
}
return {
totalCost,
totalInputCost,
totalOutputCost,
totalTokens,
totalPromptTokens,
totalCompletionTokens,
models,
}
}

View File

@@ -0,0 +1,199 @@
import { createLogger } from '@/lib/logs/console-logger'
import { enhancedExecutionLogger } from './enhanced-execution-logger'
import {
calculateBlockStats,
calculateCostSummary,
createEnvironmentObject,
createTriggerObject,
loadWorkflowStateForExecution,
} from './enhanced-logging-factory'
import type { ExecutionEnvironment, ExecutionTrigger, WorkflowState } from './types'
const logger = createLogger('EnhancedLoggingSession')
export interface SessionStartParams {
userId?: string
workspaceId?: string
variables?: Record<string, string>
triggerData?: Record<string, unknown>
}
export interface SessionCompleteParams {
endedAt?: string
totalDurationMs?: number
finalOutput?: any
traceSpans?: any[]
}
export class EnhancedLoggingSession {
private workflowId: string
private executionId: string
private triggerType: ExecutionTrigger['type']
private requestId?: string
private trigger?: ExecutionTrigger
private environment?: ExecutionEnvironment
private workflowState?: WorkflowState
private enhancedLogger = enhancedExecutionLogger
constructor(
workflowId: string,
executionId: string,
triggerType: ExecutionTrigger['type'],
requestId?: string
) {
this.workflowId = workflowId
this.executionId = executionId
this.triggerType = triggerType
this.requestId = requestId
}
async start(params: SessionStartParams = {}): Promise<void> {
const { userId, workspaceId, variables, triggerData } = params
try {
this.trigger = createTriggerObject(this.triggerType, triggerData)
this.environment = createEnvironmentObject(
this.workflowId,
this.executionId,
userId,
workspaceId,
variables
)
this.workflowState = await loadWorkflowStateForExecution(this.workflowId)
await enhancedExecutionLogger.startWorkflowExecution({
workflowId: this.workflowId,
executionId: this.executionId,
trigger: this.trigger,
environment: this.environment,
workflowState: this.workflowState,
})
if (this.requestId) {
logger.debug(
`[${this.requestId}] Started enhanced logging for execution ${this.executionId}`
)
}
} catch (error) {
if (this.requestId) {
logger.error(`[${this.requestId}] Failed to start enhanced logging:`, error)
}
throw error
}
}
/**
* Set up enhanced logging on an executor instance
* Note: Enhanced logging now works through trace spans only, no direct executor integration needed
*/
setupExecutor(executor: any): void {
// No longer setting enhanced logger on executor - trace spans handle everything
if (this.requestId) {
logger.debug(
`[${this.requestId}] Enhanced logging session ready for execution ${this.executionId}`
)
}
}
async complete(params: SessionCompleteParams = {}): Promise<void> {
const { endedAt, totalDurationMs, finalOutput, traceSpans } = params
try {
const blockStats = calculateBlockStats(traceSpans || [])
const costSummary = calculateCostSummary(traceSpans || [])
await enhancedExecutionLogger.completeWorkflowExecution({
executionId: this.executionId,
endedAt: endedAt || new Date().toISOString(),
totalDurationMs: totalDurationMs || 0,
blockStats,
costSummary,
finalOutput: finalOutput || {},
traceSpans: traceSpans || [],
})
if (this.requestId) {
logger.debug(
`[${this.requestId}] Completed enhanced logging for execution ${this.executionId}`
)
}
} catch (error) {
if (this.requestId) {
logger.error(`[${this.requestId}] Failed to complete enhanced logging:`, error)
}
}
}
async completeWithError(error?: any): Promise<void> {
try {
const blockStats = { total: 0, success: 0, error: 1, skipped: 0 }
const costSummary = {
totalCost: 0,
totalInputCost: 0,
totalOutputCost: 0,
totalTokens: 0,
totalPromptTokens: 0,
totalCompletionTokens: 0,
models: {},
}
await enhancedExecutionLogger.completeWorkflowExecution({
executionId: this.executionId,
endedAt: new Date().toISOString(),
totalDurationMs: 0,
blockStats,
costSummary,
finalOutput: null,
traceSpans: [],
})
if (this.requestId) {
logger.debug(
`[${this.requestId}] Completed enhanced logging with error for execution ${this.executionId}`
)
}
} catch (enhancedError) {
if (this.requestId) {
logger.error(
`[${this.requestId}] Failed to complete enhanced logging for error:`,
enhancedError
)
}
}
}
async safeStart(params: SessionStartParams = {}): Promise<boolean> {
try {
await this.start(params)
return true
} catch (error) {
if (this.requestId) {
logger.error(
`[${this.requestId}] Enhanced logging start failed, continuing execution:`,
error
)
}
return false
}
}
async safeComplete(params: SessionCompleteParams = {}): Promise<void> {
try {
await this.complete(params)
} catch (error) {
if (this.requestId) {
logger.error(`[${this.requestId}] Enhanced logging completion failed:`, error)
}
}
}
async safeCompleteWithError(error?: any): Promise<void> {
try {
await this.completeWithError(error)
} catch (enhancedError) {
if (this.requestId) {
logger.error(`[${this.requestId}] Enhanced logging error completion failed:`, enhancedError)
}
}
}
}

View File

@@ -0,0 +1,219 @@
import { beforeEach, describe, expect, test } from 'vitest'
import { SnapshotService } from './snapshot-service'
import type { WorkflowState } from './types'
describe('SnapshotService', () => {
let service: SnapshotService
beforeEach(() => {
service = new SnapshotService()
})
describe('computeStateHash', () => {
test('should generate consistent hashes for identical states', () => {
const state: WorkflowState = {
blocks: {
block1: {
id: 'block1',
name: 'Test Agent',
type: 'agent',
position: { x: 100, y: 200 },
subBlocks: {},
outputs: {},
enabled: true,
horizontalHandles: true,
isWide: false,
advancedMode: false,
height: 0,
},
},
edges: [{ id: 'edge1', source: 'block1', target: 'block2' }],
loops: {},
parallels: {},
}
const hash1 = service.computeStateHash(state)
const hash2 = service.computeStateHash(state)
expect(hash1).toBe(hash2)
expect(hash1).toHaveLength(64) // SHA-256 hex string
})
test('should ignore position changes', () => {
const baseState: WorkflowState = {
blocks: {
block1: {
id: 'block1',
name: 'Test Agent',
type: 'agent',
position: { x: 100, y: 200 },
subBlocks: {},
outputs: {},
enabled: true,
horizontalHandles: true,
isWide: false,
advancedMode: false,
height: 0,
},
},
edges: [],
loops: {},
parallels: {},
}
const stateWithDifferentPosition: WorkflowState = {
...baseState,
blocks: {
block1: {
...baseState.blocks.block1,
position: { x: 500, y: 600 }, // Different position
},
},
}
const hash1 = service.computeStateHash(baseState)
const hash2 = service.computeStateHash(stateWithDifferentPosition)
expect(hash1).toBe(hash2)
})
test('should detect meaningful changes', () => {
const baseState: WorkflowState = {
blocks: {
block1: {
id: 'block1',
name: 'Test Agent',
type: 'agent',
position: { x: 100, y: 200 },
subBlocks: {},
outputs: {},
enabled: true,
horizontalHandles: true,
isWide: false,
advancedMode: false,
height: 0,
},
},
edges: [],
loops: {},
parallels: {},
}
const stateWithDifferentPrompt: WorkflowState = {
...baseState,
blocks: {
block1: {
...baseState.blocks.block1,
// Different block state - we can change outputs to make it different
outputs: { response: { content: 'different result' } as Record<string, any> },
},
},
}
const hash1 = service.computeStateHash(baseState)
const hash2 = service.computeStateHash(stateWithDifferentPrompt)
expect(hash1).not.toBe(hash2)
})
test('should handle edge order consistently', () => {
const state1: WorkflowState = {
blocks: {},
edges: [
{ id: 'edge1', source: 'a', target: 'b' },
{ id: 'edge2', source: 'b', target: 'c' },
],
loops: {},
parallels: {},
}
const state2: WorkflowState = {
blocks: {},
edges: [
{ id: 'edge2', source: 'b', target: 'c' }, // Different order
{ id: 'edge1', source: 'a', target: 'b' },
],
loops: {},
parallels: {},
}
const hash1 = service.computeStateHash(state1)
const hash2 = service.computeStateHash(state2)
expect(hash1).toBe(hash2) // Should be same despite different order
})
test('should handle empty states', () => {
const emptyState: WorkflowState = {
blocks: {},
edges: [],
loops: {},
parallels: {},
}
const hash = service.computeStateHash(emptyState)
expect(hash).toHaveLength(64)
})
test('should handle complex nested structures', () => {
const complexState: WorkflowState = {
blocks: {
block1: {
id: 'block1',
name: 'Complex Agent',
type: 'agent',
position: { x: 100, y: 200 },
subBlocks: {
prompt: {
id: 'prompt',
type: 'short-input',
value: 'Test prompt',
},
model: {
id: 'model',
type: 'short-input',
value: 'gpt-4',
},
},
outputs: {
response: { content: 'Agent response' } as Record<string, any>,
},
enabled: true,
horizontalHandles: true,
isWide: false,
advancedMode: true,
height: 200,
},
},
edges: [{ id: 'edge1', source: 'block1', target: 'block2', sourceHandle: 'output' }],
loops: {
loop1: {
id: 'loop1',
nodes: ['block1'],
iterations: 10,
loopType: 'for',
},
},
parallels: {
parallel1: {
id: 'parallel1',
nodes: ['block1'],
count: 3,
parallelType: 'count',
},
},
}
const hash = service.computeStateHash(complexState)
expect(hash).toHaveLength(64)
// Should be consistent
const hash2 = service.computeStateHash(complexState)
expect(hash).toBe(hash2)
})
})
})

Some files were not shown because too many files have changed in this diff Show More