Compare commits

...

65 Commits

Author SHA1 Message Date
Siddharth Ganesan
2e4945508c I hate thsi 2025-08-30 16:36:45 -07:00
Waleed
1feef4ce4b improvement(tools): update mysql to respect ssl pref (#1205) 2025-08-30 16:36:45 -07:00
Waleed
2404f8af14 feat(parsers): added pptx, md, & html parsers (#1202)
* feat(parsers): added pptx, md, & html parsers

* ack PR comments

* file renaming, reorganization
2025-08-30 16:36:45 -07:00
Waleed
ba72e35d43 fix(deps): downgrade nextjs (#1200) 2025-08-30 16:36:45 -07:00
Waleed
ab52458191 improvement(kb): add fallbacks for kb configs (#1199) 2025-08-30 16:36:45 -07:00
Waleed
ef5e2b699c feat(kb): add adjustable concurrency and batching to uploads and embeddings (#1198) 2025-08-30 16:36:45 -07:00
Waleed
ba45404423 imporvement(pg): added wand config for writing sql queries for generic db blocks & supabase postgrest syntax (#1197)
* add parallel ai, postgres, mysql, slight modifications to dark mode styling

* bun install frozen lockfile

* new deps

* improve security, add wand to short input and update wand config
2025-08-30 16:36:44 -07:00
Waleed
4ce2fc760a feat(tools): add parallel ai, postgres, mysql, slight modifications to dark mode styling (#1192)
* add parallel ai, postgres, mysql, slight modifications to dark mode styling

* bun install frozen lockfile

* new deps
2025-08-30 16:36:44 -07:00
Waleed
71e06f2b31 fix(billing-ui): open settings when enterprise sub folks press usage indicator (#1194) 2025-08-30 16:36:44 -07:00
Siddharth Ganesan
ce04d56d79 Use direct fetch (#1193) 2025-08-30 16:36:44 -07:00
Siddharth Ganesan
626e9a37da Fix/wand (#1191)
* Switch to node

* Refactor
2025-08-30 16:36:44 -07:00
Siddharth Ganesan
100ae1d23e Switch to node (#1190) 2025-08-30 16:36:44 -07:00
Vikhyath Mondreti
792df1a9f0 run bun install 2025-08-30 16:36:44 -07:00
Vikhyath Mondreti
d313a0f171 Revert "feat(integrations): added parallel AI, mySQL, and postgres block/tools (#1126)"
This reverts commit 766279bb8b.
2025-08-30 16:36:44 -07:00
Vikhyath Mondreti
29270d15ff change bun install to be based on frozen-lockfile flag"
"
2025-08-30 16:36:44 -07:00
Vikhyath Mondreti
879711d786 revert drizzle-orm version 2025-08-30 16:36:44 -07:00
Vikhyath Mondreti
f98138a550 remove bun lock 2025-08-30 16:36:44 -07:00
Vikhyath Mondreti
888609a93c revert package.json 2025-08-30 16:36:44 -07:00
Siddharth Ganesan
a516325733 Stuff 2025-08-30 15:46:20 -07:00
Siddharth Ganesan
396c9db204 Updates 2025-08-30 13:43:10 -07:00
Siddharth Ganesan
a1acbc9616 Load final state 2025-08-30 11:24:40 -07:00
Siddharth Ganesan
5a74ab28e2 Cleaning 2025-08-30 11:14:41 -07:00
Siddharth Ganesan
cf5532c852 Checkpoint 2025-08-29 18:32:06 -07:00
Siddharth Ganesan
3e6d454de3 Fix execution input 2025-08-29 17:34:31 -07:00
Siddharth Ganesan
4c4b3351e6 Checkpoint 2025-08-29 16:53:36 -07:00
Siddharth Ganesan
0c1ee239fe Broken checkpoint 2025-08-29 15:27:15 -07:00
Siddharth Ganesan
9c065a1c2a Execution dropdown 2025-08-29 14:55:55 -07:00
Siddharth Ganesan
dc92a79f33 Debugger 2025-08-29 12:53:03 -07:00
Siddharth Ganesan
efb0d22d3f Debugger fixes 2025-08-29 12:50:18 -07:00
Siddharth Ganesan
9af445fa25 Updates 2025-08-29 12:21:46 -07:00
Siddharth Ganesan
e09088bafc Updates 2025-08-29 12:07:09 -07:00
Siddharth Ganesan
994c81ba3c Functionality 2025-08-29 11:59:42 -07:00
Siddharth Ganesan
4bba1eb8f6 Startpos stuff 2025-08-29 11:54:06 -07:00
Siddharth Ganesan
de06e8c35c Rollback checkpoint 2025-08-29 11:00:33 -07:00
Siddharth Ganesan
61534b05dd Breakpoint color 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
694538e1ee Lint 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
6df565e4c8 Panel toggle 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
422df2be0f Stuff 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
692b385ece Checkpoint 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
728f5812ac HIde env vars 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
1d51706057 Loops and parallels 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
c166c60d9b Ui updates 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
120b7ffd5c Ui 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
ecc2a55f9e Breakpoint ui 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
f10b7c0493 Stuff 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
5835df3496 Fix resume 2025-08-29 10:25:11 -07:00
Siddharth Ganesan
c4924776b6 Update 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
9444661d98 Updates 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
9137b2eab3 Variable highlighting 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
fa0ef07981 Remove number styling 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
463ba208f4 Fix env vars 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
2cedac5ffb Fix workflow vars 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
9c3f559a91 Fix scoping 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
0d7ab06bd1 Fix 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
7bda4468b8 Ui updates 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
bdb9b866ab UI improvements 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
460d515df2 Execution status 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
7b49515798 Fixes 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
049f188d2e Var improvements 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
6c8a7f0594 Checkpoitn 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
3727b5d395 Checkpoint 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
75a3f4cce7 Checkpoint 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
63616a1239 Updates 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
7cb6dfc211 Updates 2025-08-29 10:25:10 -07:00
Siddharth Ganesan
b94d942204 Updates 2025-08-29 10:25:09 -07:00
99 changed files with 6048 additions and 1245 deletions

View File

@@ -26,7 +26,7 @@ jobs:
node-version: latest
- name: Install dependencies
run: bun install
run: bun install --frozen-lockfile
- name: Run tests with coverage
env:

View File

@@ -543,6 +543,8 @@ export async function executeWorkflowForChat(
userId: deployment.userId,
workspaceId: '', // TODO: Get from workflow
variables: workflowVariables,
initialInput: { input, conversationId },
executionType: 'chat',
})
const stream = new ReadableStream({

View File

@@ -12,9 +12,9 @@ import {
import { getCopilotModel } from '@/lib/copilot/config'
import type { CopilotProviderConfig } from '@/lib/copilot/types'
import { env } from '@/lib/env'
import { generateChatTitle } from '@/lib/generate-chat-title'
import { createLogger } from '@/lib/logs/console/logger'
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/sim-agent'
import { generateChatTitle } from '@/lib/sim-agent/utils'
import { createFileContent, isSupportedFileType } from '@/lib/uploads/file-utils'
import { S3_COPILOT_CONFIG } from '@/lib/uploads/setup'
import { downloadFile, getStorageProvider } from '@/lib/uploads/storage-client'

View File

@@ -76,11 +76,9 @@ export async function POST(request: NextRequest) {
logger.info('File parse request received:', { filePath, fileType })
// Handle multiple files
if (Array.isArray(filePath)) {
const results = []
for (const path of filePath) {
// Skip empty or invalid paths
if (!path || (typeof path === 'string' && path.trim() === '')) {
results.push({
success: false,
@@ -91,12 +89,10 @@ export async function POST(request: NextRequest) {
}
const result = await parseFileSingle(path, fileType)
// Add processing time to metadata
if (result.metadata) {
result.metadata.processingTime = Date.now() - startTime
}
// Transform each result to match expected frontend format
if (result.success) {
results.push({
success: true,
@@ -105,7 +101,7 @@ export async function POST(request: NextRequest) {
name: result.filePath.split('/').pop() || 'unknown',
fileType: result.metadata?.fileType || 'application/octet-stream',
size: result.metadata?.size || 0,
binary: false, // We only return text content
binary: false,
},
filePath: result.filePath,
})
@@ -120,15 +116,12 @@ export async function POST(request: NextRequest) {
})
}
// Handle single file
const result = await parseFileSingle(filePath, fileType)
// Add processing time to metadata
if (result.metadata) {
result.metadata.processingTime = Date.now() - startTime
}
// Transform single file result to match expected frontend format
if (result.success) {
return NextResponse.json({
success: true,
@@ -142,8 +135,6 @@ export async function POST(request: NextRequest) {
})
}
// Only return 500 for actual server errors, not file processing failures
// File processing failures (like file not found, parsing errors) should return 200 with success:false
return NextResponse.json(result)
} catch (error) {
logger.error('Error in file parse API:', error)
@@ -164,7 +155,6 @@ export async function POST(request: NextRequest) {
async function parseFileSingle(filePath: string, fileType?: string): Promise<ParseResult> {
logger.info('Parsing file:', filePath)
// Validate that filePath is not empty
if (!filePath || filePath.trim() === '') {
return {
success: false,
@@ -173,7 +163,6 @@ async function parseFileSingle(filePath: string, fileType?: string): Promise<Par
}
}
// Validate path for security before any processing
const pathValidation = validateFilePath(filePath)
if (!pathValidation.isValid) {
return {
@@ -183,49 +172,40 @@ async function parseFileSingle(filePath: string, fileType?: string): Promise<Par
}
}
// Check if this is an external URL
if (filePath.startsWith('http://') || filePath.startsWith('https://')) {
return handleExternalUrl(filePath, fileType)
}
// Check if this is a cloud storage path (S3 or Blob)
const isS3Path = filePath.includes('/api/files/serve/s3/')
const isBlobPath = filePath.includes('/api/files/serve/blob/')
// Use cloud handler if it's a cloud path or we're in cloud mode
if (isS3Path || isBlobPath || isUsingCloudStorage()) {
return handleCloudFile(filePath, fileType)
}
// Use local handler for local files
return handleLocalFile(filePath, fileType)
}
/**
* Validate file path for security
* Validate file path for security - prevents null byte injection and path traversal attacks
*/
function validateFilePath(filePath: string): { isValid: boolean; error?: string } {
// Check for null bytes
if (filePath.includes('\0')) {
return { isValid: false, error: 'Invalid path: null byte detected' }
}
// Check for path traversal attempts
if (filePath.includes('..')) {
return { isValid: false, error: 'Access denied: path traversal detected' }
}
// Check for tilde characters (home directory access)
if (filePath.includes('~')) {
return { isValid: false, error: 'Invalid path: tilde character not allowed' }
}
// Check for absolute paths outside allowed directories
if (filePath.startsWith('/') && !filePath.startsWith('/api/files/serve/')) {
return { isValid: false, error: 'Path outside allowed directory' }
}
// Check for Windows absolute paths
if (/^[A-Za-z]:\\/.test(filePath)) {
return { isValid: false, error: 'Path outside allowed directory' }
}
@@ -260,12 +240,10 @@ async function handleExternalUrl(url: string, fileType?: string): Promise<ParseR
logger.info(`Downloaded file from URL: ${url}, size: ${buffer.length} bytes`)
// Extract filename from URL
const urlPath = new URL(url).pathname
const filename = urlPath.split('/').pop() || 'download'
const extension = path.extname(filename).toLowerCase().substring(1)
// Process the file based on its content type
if (extension === 'pdf') {
return await handlePdfBuffer(buffer, filename, fileType, url)
}
@@ -276,7 +254,6 @@ async function handleExternalUrl(url: string, fileType?: string): Promise<ParseR
return await handleGenericTextBuffer(buffer, filename, extension, fileType, url)
}
// For binary or unknown files
return handleGenericBuffer(buffer, filename, extension, fileType)
} catch (error) {
logger.error(`Error handling external URL ${url}:`, error)
@@ -289,35 +266,29 @@ async function handleExternalUrl(url: string, fileType?: string): Promise<ParseR
}
/**
* Handle file stored in cloud storage (S3 or Azure Blob)
* Handle file stored in cloud storage
*/
async function handleCloudFile(filePath: string, fileType?: string): Promise<ParseResult> {
try {
// Extract the cloud key from the path
let cloudKey: string
if (filePath.includes('/api/files/serve/s3/')) {
cloudKey = decodeURIComponent(filePath.split('/api/files/serve/s3/')[1])
} else if (filePath.includes('/api/files/serve/blob/')) {
cloudKey = decodeURIComponent(filePath.split('/api/files/serve/blob/')[1])
} else if (filePath.startsWith('/api/files/serve/')) {
// Backwards-compatibility: path like "/api/files/serve/<key>"
cloudKey = decodeURIComponent(filePath.substring('/api/files/serve/'.length))
} else {
// Assume raw key provided
cloudKey = filePath
}
logger.info('Extracted cloud key:', cloudKey)
// Download the file from cloud storage - this can throw for access errors
const fileBuffer = await downloadFile(cloudKey)
logger.info(`Downloaded file from cloud storage: ${cloudKey}, size: ${fileBuffer.length} bytes`)
// Extract the filename from the cloud key
const filename = cloudKey.split('/').pop() || cloudKey
const extension = path.extname(filename).toLowerCase().substring(1)
// Process the file based on its content type
if (extension === 'pdf') {
return await handlePdfBuffer(fileBuffer, filename, fileType, filePath)
}
@@ -325,22 +296,19 @@ async function handleCloudFile(filePath: string, fileType?: string): Promise<Par
return await handleCsvBuffer(fileBuffer, filename, fileType, filePath)
}
if (isSupportedFileType(extension)) {
// For other supported types that we have parsers for
return await handleGenericTextBuffer(fileBuffer, filename, extension, fileType, filePath)
}
// For binary or unknown files
return handleGenericBuffer(fileBuffer, filename, extension, fileType)
} catch (error) {
logger.error(`Error handling cloud file ${filePath}:`, error)
// Check if this is a download/access error that should trigger a 500 response
// For download/access errors, throw to trigger 500 response
const errorMessage = (error as Error).message
if (errorMessage.includes('Access denied') || errorMessage.includes('Forbidden')) {
// For access errors, throw to trigger 500 response
throw new Error(`Error accessing file from cloud storage: ${errorMessage}`)
}
// For other errors (parsing, processing), return success:false
// For other errors (parsing, processing), return success:false and an error message
return {
success: false,
error: `Error accessing file from cloud storage: ${errorMessage}`,
@@ -354,28 +322,23 @@ async function handleCloudFile(filePath: string, fileType?: string): Promise<Par
*/
async function handleLocalFile(filePath: string, fileType?: string): Promise<ParseResult> {
try {
// Extract filename from path
const filename = filePath.split('/').pop() || filePath
const fullPath = path.join(UPLOAD_DIR_SERVER, filename)
logger.info('Processing local file:', fullPath)
// Check if file exists
try {
await fsPromises.access(fullPath)
} catch {
throw new Error(`File not found: ${filename}`)
}
// Parse the file directly
const result = await parseFile(fullPath)
// Get file stats for metadata
const stats = await fsPromises.stat(fullPath)
const fileBuffer = await readFile(fullPath)
const hash = createHash('md5').update(fileBuffer).digest('hex')
// Extract file extension for type detection
const extension = path.extname(filename).toLowerCase().substring(1)
return {
@@ -386,7 +349,7 @@ async function handleLocalFile(filePath: string, fileType?: string): Promise<Par
fileType: fileType || getMimeType(extension),
size: stats.size,
hash,
processingTime: 0, // Will be set by caller
processingTime: 0,
},
}
} catch (error) {
@@ -425,15 +388,14 @@ async function handlePdfBuffer(
fileType: fileType || 'application/pdf',
size: fileBuffer.length,
hash: createHash('md5').update(fileBuffer).digest('hex'),
processingTime: 0, // Will be set by caller
processingTime: 0,
},
}
} catch (error) {
logger.error('Failed to parse PDF in memory:', error)
// Create fallback message for PDF parsing failure
const content = createPdfFailureMessage(
0, // We can't determine page count without parsing
0,
fileBuffer.length,
originalPath || filename,
(error as Error).message
@@ -447,7 +409,7 @@ async function handlePdfBuffer(
fileType: fileType || 'application/pdf',
size: fileBuffer.length,
hash: createHash('md5').update(fileBuffer).digest('hex'),
processingTime: 0, // Will be set by caller
processingTime: 0,
},
}
}
@@ -465,7 +427,6 @@ async function handleCsvBuffer(
try {
logger.info(`Parsing CSV in memory: ${filename}`)
// Use the parseBuffer function from our library
const { parseBuffer } = await import('@/lib/file-parsers')
const result = await parseBuffer(fileBuffer, 'csv')
@@ -477,7 +438,7 @@ async function handleCsvBuffer(
fileType: fileType || 'text/csv',
size: fileBuffer.length,
hash: createHash('md5').update(fileBuffer).digest('hex'),
processingTime: 0, // Will be set by caller
processingTime: 0,
},
}
} catch (error) {
@@ -490,7 +451,7 @@ async function handleCsvBuffer(
fileType: 'text/csv',
size: 0,
hash: '',
processingTime: 0, // Will be set by caller
processingTime: 0,
},
}
}
@@ -509,7 +470,6 @@ async function handleGenericTextBuffer(
try {
logger.info(`Parsing text file in memory: ${filename}`)
// Try to use a specialized parser if available
try {
const { parseBuffer, isSupportedFileType } = await import('@/lib/file-parsers')
@@ -524,7 +484,7 @@ async function handleGenericTextBuffer(
fileType: fileType || getMimeType(extension),
size: fileBuffer.length,
hash: createHash('md5').update(fileBuffer).digest('hex'),
processingTime: 0, // Will be set by caller
processingTime: 0,
},
}
}
@@ -532,7 +492,6 @@ async function handleGenericTextBuffer(
logger.warn('Specialized parser failed, falling back to generic parsing:', parserError)
}
// Fallback to generic text parsing
const content = fileBuffer.toString('utf-8')
return {
@@ -543,7 +502,7 @@ async function handleGenericTextBuffer(
fileType: fileType || getMimeType(extension),
size: fileBuffer.length,
hash: createHash('md5').update(fileBuffer).digest('hex'),
processingTime: 0, // Will be set by caller
processingTime: 0,
},
}
} catch (error) {
@@ -556,7 +515,7 @@ async function handleGenericTextBuffer(
fileType: 'text/plain',
size: 0,
hash: '',
processingTime: 0, // Will be set by caller
processingTime: 0,
},
}
}
@@ -584,7 +543,7 @@ function handleGenericBuffer(
fileType: fileType || getMimeType(extension),
size: fileBuffer.length,
hash: createHash('md5').update(fileBuffer).digest('hex'),
processingTime: 0, // Will be set by caller
processingTime: 0,
},
}
}
@@ -594,8 +553,6 @@ function handleGenericBuffer(
*/
async function parseBufferAsPdf(buffer: Buffer) {
try {
// Import parsers dynamically to avoid initialization issues in tests
// First try to use the main PDF parser
try {
const { PdfParser } = await import('@/lib/file-parsers/pdf-parser')
const parser = new PdfParser()
@@ -606,7 +563,6 @@ async function parseBufferAsPdf(buffer: Buffer) {
}
throw new Error('PDF parser does not support buffer parsing')
} catch (error) {
// Fallback to raw PDF parser
logger.warn('Main PDF parser failed, using raw parser for buffer:', error)
const { RawPdfParser } = await import('@/lib/file-parsers/raw-pdf-parser')
const rawParser = new RawPdfParser()
@@ -655,7 +611,7 @@ Please use a PDF viewer for best results.`
}
/**
* Create error message for PDF parsing failure
* Create error message for PDF parsing failure and make it more readable
*/
function createPdfFailureMessage(
pageCount: number,

View File

@@ -2,7 +2,7 @@ import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { SUPPORTED_FIELD_TYPES } from '@/lib/constants/knowledge'
import { SUPPORTED_FIELD_TYPES } from '@/lib/knowledge/consts'
import {
cleanupUnusedTagDefinitions,
createOrUpdateTagDefinitionsBulk,

View File

@@ -2,7 +2,7 @@ import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { SUPPORTED_FIELD_TYPES } from '@/lib/constants/knowledge'
import { SUPPORTED_FIELD_TYPES } from '@/lib/knowledge/consts'
import { createTagDefinition, getTagDefinitions } from '@/lib/knowledge/tags/service'
import { createLogger } from '@/lib/logs/console/logger'
import { checkKnowledgeBaseAccess } from '@/app/api/knowledge/utils'

View File

@@ -1,6 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { TAG_SLOTS } from '@/lib/constants/knowledge'
import { TAG_SLOTS } from '@/lib/knowledge/consts'
import { getDocumentTagDefinitions } from '@/lib/knowledge/tags/service'
import { createLogger } from '@/lib/logs/console/logger'
import { estimateTokenCount } from '@/lib/tokenization/estimators'

View File

@@ -41,6 +41,7 @@ export async function GET(
executionId,
workflowId: workflowLog.workflowId,
workflowState: snapshot.stateData,
executionData: workflowLog.executionData || {},
executionMetadata: {
trigger: workflowLog.trigger,
startedAt: workflowLog.startedAt.toISOString(),

View File

@@ -399,6 +399,9 @@ export async function GET() {
userId: workflowRecord.userId,
workspaceId: workflowRecord.workspaceId || '',
variables: variables || {},
initialInput: input,
startBlockId: schedule.blockId || undefined,
executionType: 'schedule',
})
const executor = new Executor({
@@ -467,10 +470,19 @@ export async function GET() {
// Create a minimal log entry for early failures
try {
const input = {
workflowId: schedule.workflowId,
_context: {
workflowId: schedule.workflowId,
},
}
await loggingSession.safeStart({
userId: workflowRecord.userId,
workspaceId: workflowRecord.workspaceId || '',
variables: {},
initialInput: input,
startBlockId: schedule.blockId || undefined,
executionType: 'schedule',
})
await loggingSession.safeCompleteWithError({
@@ -586,10 +598,17 @@ export async function GET() {
requestId
)
const input = {
workflowId: schedule.workflowId,
_context: {
workflowId: schedule.workflowId,
},
}
await failureLoggingSession.safeStart({
userId: workflowRecord.userId,
workspaceId: workflowRecord.workspaceId || '',
variables: {},
initialInput: input,
})
await failureLoggingSession.safeCompleteWithError({

View File

@@ -1,3 +1,4 @@
import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console/logger'
@@ -11,13 +12,13 @@ const DeleteSchema = z.object({
database: z.string().min(1, 'Database name is required'),
username: z.string().min(1, 'Username is required'),
password: z.string().min(1, 'Password is required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('preferred'),
table: z.string().min(1, 'Table name is required'),
where: z.string().min(1, 'WHERE clause is required'),
})
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const requestId = randomUUID().slice(0, 8)
try {
const body = await request.json()

View File

@@ -1,3 +1,4 @@
import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console/logger'
@@ -11,12 +12,12 @@ const ExecuteSchema = z.object({
database: z.string().min(1, 'Database name is required'),
username: z.string().min(1, 'Username is required'),
password: z.string().min(1, 'Password is required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('preferred'),
query: z.string().min(1, 'Query is required'),
})
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const requestId = randomUUID().slice(0, 8)
try {
const body = await request.json()
@@ -26,7 +27,6 @@ export async function POST(request: NextRequest) {
`[${requestId}] Executing raw SQL on ${params.host}:${params.port}/${params.database}`
)
// Validate query before execution
const validation = validateQuery(params.query)
if (!validation.isValid) {
logger.warn(`[${requestId}] Query validation failed: ${validation.error}`)

View File

@@ -1,3 +1,4 @@
import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console/logger'
@@ -11,7 +12,7 @@ const InsertSchema = z.object({
database: z.string().min(1, 'Database name is required'),
username: z.string().min(1, 'Username is required'),
password: z.string().min(1, 'Password is required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('preferred'),
table: z.string().min(1, 'Table name is required'),
data: z.union([
z
@@ -38,13 +39,10 @@ const InsertSchema = z.object({
})
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const requestId = randomUUID().slice(0, 8)
try {
const body = await request.json()
logger.info(`[${requestId}] Received data field type: ${typeof body.data}, value:`, body.data)
const params = InsertSchema.parse(body)
logger.info(

View File

@@ -1,3 +1,4 @@
import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console/logger'
@@ -11,12 +12,12 @@ const QuerySchema = z.object({
database: z.string().min(1, 'Database name is required'),
username: z.string().min(1, 'Username is required'),
password: z.string().min(1, 'Password is required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('preferred'),
query: z.string().min(1, 'Query is required'),
})
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const requestId = randomUUID().slice(0, 8)
try {
const body = await request.json()
@@ -26,7 +27,6 @@ export async function POST(request: NextRequest) {
`[${requestId}] Executing MySQL query on ${params.host}:${params.port}/${params.database}`
)
// Validate query before execution
const validation = validateQuery(params.query)
if (!validation.isValid) {
logger.warn(`[${requestId}] Query validation failed: ${validation.error}`)

View File

@@ -1,3 +1,4 @@
import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console/logger'
@@ -11,7 +12,7 @@ const UpdateSchema = z.object({
database: z.string().min(1, 'Database name is required'),
username: z.string().min(1, 'Username is required'),
password: z.string().min(1, 'Password is required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('preferred'),
table: z.string().min(1, 'Table name is required'),
data: z.union([
z
@@ -36,7 +37,7 @@ const UpdateSchema = z.object({
})
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const requestId = randomUUID().slice(0, 8)
try {
const body = await request.json()

View File

@@ -6,7 +6,7 @@ export interface MySQLConnectionConfig {
database: string
username: string
password: string
ssl?: string
ssl?: 'disabled' | 'required' | 'preferred'
}
export async function createMySQLConnection(config: MySQLConnectionConfig) {
@@ -18,13 +18,13 @@ export async function createMySQLConnection(config: MySQLConnectionConfig) {
password: config.password,
}
// Handle SSL configuration
if (config.ssl === 'required') {
if (config.ssl === 'disabled') {
// Don't set ssl property at all to disable SSL
} else if (config.ssl === 'required') {
connectionConfig.ssl = { rejectUnauthorized: true }
} else if (config.ssl === 'preferred') {
connectionConfig.ssl = { rejectUnauthorized: false }
}
// For 'disabled', we don't set the ssl property at all
return mysql.createConnection(connectionConfig)
}
@@ -54,7 +54,6 @@ export async function executeQuery(
export function validateQuery(query: string): { isValid: boolean; error?: string } {
const trimmedQuery = query.trim().toLowerCase()
// Block dangerous SQL operations
const dangerousPatterns = [
/drop\s+database/i,
/drop\s+schema/i,
@@ -91,7 +90,6 @@ export function validateQuery(query: string): { isValid: boolean; error?: string
}
}
// Only allow specific statement types for execute endpoint
const allowedStatements = /^(select|insert|update|delete|with|show|describe|explain)\s+/i
if (!allowedStatements.test(trimmedQuery)) {
return {
@@ -116,6 +114,8 @@ export function buildInsertQuery(table: string, data: Record<string, unknown>) {
}
export function buildUpdateQuery(table: string, data: Record<string, unknown>, where: string) {
validateWhereClause(where)
const sanitizedTable = sanitizeIdentifier(table)
const columns = Object.keys(data)
const values = Object.values(data)
@@ -127,14 +127,33 @@ export function buildUpdateQuery(table: string, data: Record<string, unknown>, w
}
export function buildDeleteQuery(table: string, where: string) {
validateWhereClause(where)
const sanitizedTable = sanitizeIdentifier(table)
const query = `DELETE FROM ${sanitizedTable} WHERE ${where}`
return { query, values: [] }
}
function validateWhereClause(where: string): void {
const dangerousPatterns = [
/;\s*(drop|delete|insert|update|create|alter|grant|revoke)/i,
/union\s+select/i,
/into\s+outfile/i,
/load_file/i,
/--/,
/\/\*/,
/\*\//,
]
for (const pattern of dangerousPatterns) {
if (pattern.test(where)) {
throw new Error('WHERE clause contains potentially dangerous operation')
}
}
}
export function sanitizeIdentifier(identifier: string): string {
// Handle schema.table format
if (identifier.includes('.')) {
const parts = identifier.split('.')
return parts.map((part) => sanitizeSingleIdentifier(part)).join('.')
@@ -144,16 +163,13 @@ export function sanitizeIdentifier(identifier: string): string {
}
function sanitizeSingleIdentifier(identifier: string): string {
// Remove any existing backticks to prevent double-escaping
const cleaned = identifier.replace(/`/g, '')
// Validate identifier contains only safe characters
if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(cleaned)) {
throw new Error(
`Invalid identifier: ${identifier}. Identifiers must start with a letter or underscore and contain only letters, numbers, and underscores.`
)
}
// Wrap in backticks for MySQL
return `\`${cleaned}\``
}

View File

@@ -1,11 +1,8 @@
import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console/logger'
import {
buildDeleteQuery,
createPostgresConnection,
executeQuery,
} from '@/app/api/tools/postgresql/utils'
import { createPostgresConnection, executeDelete } from '@/app/api/tools/postgresql/utils'
const logger = createLogger('PostgreSQLDeleteAPI')
@@ -15,13 +12,13 @@ const DeleteSchema = z.object({
database: z.string().min(1, 'Database name is required'),
username: z.string().min(1, 'Username is required'),
password: z.string().min(1, 'Password is required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('preferred'),
table: z.string().min(1, 'Table name is required'),
where: z.string().min(1, 'WHERE clause is required'),
})
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const requestId = randomUUID().slice(0, 8)
try {
const body = await request.json()
@@ -31,7 +28,7 @@ export async function POST(request: NextRequest) {
`[${requestId}] Deleting data from ${params.table} on ${params.host}:${params.port}/${params.database}`
)
const client = await createPostgresConnection({
const sql = createPostgresConnection({
host: params.host,
port: params.port,
database: params.database,
@@ -41,8 +38,7 @@ export async function POST(request: NextRequest) {
})
try {
const { query, values } = buildDeleteQuery(params.table, params.where)
const result = await executeQuery(client, query, values)
const result = await executeDelete(sql, params.table, params.where)
logger.info(`[${requestId}] Delete executed successfully, ${result.rowCount} row(s) deleted`)
@@ -52,7 +48,7 @@ export async function POST(request: NextRequest) {
rowCount: result.rowCount,
})
} finally {
await client.end()
await sql.end()
}
} catch (error) {
if (error instanceof z.ZodError) {

View File

@@ -1,3 +1,4 @@
import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console/logger'
@@ -15,12 +16,12 @@ const ExecuteSchema = z.object({
database: z.string().min(1, 'Database name is required'),
username: z.string().min(1, 'Username is required'),
password: z.string().min(1, 'Password is required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('preferred'),
query: z.string().min(1, 'Query is required'),
})
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const requestId = randomUUID().slice(0, 8)
try {
const body = await request.json()
@@ -30,7 +31,6 @@ export async function POST(request: NextRequest) {
`[${requestId}] Executing raw SQL on ${params.host}:${params.port}/${params.database}`
)
// Validate query before execution
const validation = validateQuery(params.query)
if (!validation.isValid) {
logger.warn(`[${requestId}] Query validation failed: ${validation.error}`)
@@ -40,7 +40,7 @@ export async function POST(request: NextRequest) {
)
}
const client = await createPostgresConnection({
const sql = createPostgresConnection({
host: params.host,
port: params.port,
database: params.database,
@@ -50,7 +50,7 @@ export async function POST(request: NextRequest) {
})
try {
const result = await executeQuery(client, params.query)
const result = await executeQuery(sql, params.query)
logger.info(`[${requestId}] SQL executed successfully, ${result.rowCount} row(s) affected`)
@@ -60,7 +60,7 @@ export async function POST(request: NextRequest) {
rowCount: result.rowCount,
})
} finally {
await client.end()
await sql.end()
}
} catch (error) {
if (error instanceof z.ZodError) {

View File

@@ -1,11 +1,8 @@
import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console/logger'
import {
buildInsertQuery,
createPostgresConnection,
executeQuery,
} from '@/app/api/tools/postgresql/utils'
import { createPostgresConnection, executeInsert } from '@/app/api/tools/postgresql/utils'
const logger = createLogger('PostgreSQLInsertAPI')
@@ -15,7 +12,7 @@ const InsertSchema = z.object({
database: z.string().min(1, 'Database name is required'),
username: z.string().min(1, 'Username is required'),
password: z.string().min(1, 'Password is required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('preferred'),
table: z.string().min(1, 'Table name is required'),
data: z.union([
z
@@ -42,21 +39,18 @@ const InsertSchema = z.object({
})
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const requestId = randomUUID().slice(0, 8)
try {
const body = await request.json()
// Debug: Log the data field to see what we're getting
logger.info(`[${requestId}] Received data field type: ${typeof body.data}, value:`, body.data)
const params = InsertSchema.parse(body)
logger.info(
`[${requestId}] Inserting data into ${params.table} on ${params.host}:${params.port}/${params.database}`
)
const client = await createPostgresConnection({
const sql = createPostgresConnection({
host: params.host,
port: params.port,
database: params.database,
@@ -66,8 +60,7 @@ export async function POST(request: NextRequest) {
})
try {
const { query, values } = buildInsertQuery(params.table, params.data)
const result = await executeQuery(client, query, values)
const result = await executeInsert(sql, params.table, params.data)
logger.info(`[${requestId}] Insert executed successfully, ${result.rowCount} row(s) inserted`)
@@ -77,7 +70,7 @@ export async function POST(request: NextRequest) {
rowCount: result.rowCount,
})
} finally {
await client.end()
await sql.end()
}
} catch (error) {
if (error instanceof z.ZodError) {

View File

@@ -1,3 +1,4 @@
import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console/logger'
@@ -11,12 +12,12 @@ const QuerySchema = z.object({
database: z.string().min(1, 'Database name is required'),
username: z.string().min(1, 'Username is required'),
password: z.string().min(1, 'Password is required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('preferred'),
query: z.string().min(1, 'Query is required'),
})
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const requestId = randomUUID().slice(0, 8)
try {
const body = await request.json()
@@ -26,7 +27,7 @@ export async function POST(request: NextRequest) {
`[${requestId}] Executing PostgreSQL query on ${params.host}:${params.port}/${params.database}`
)
const client = await createPostgresConnection({
const sql = createPostgresConnection({
host: params.host,
port: params.port,
database: params.database,
@@ -36,7 +37,7 @@ export async function POST(request: NextRequest) {
})
try {
const result = await executeQuery(client, params.query)
const result = await executeQuery(sql, params.query)
logger.info(`[${requestId}] Query executed successfully, returned ${result.rowCount} rows`)
@@ -46,7 +47,7 @@ export async function POST(request: NextRequest) {
rowCount: result.rowCount,
})
} finally {
await client.end()
await sql.end()
}
} catch (error) {
if (error instanceof z.ZodError) {

View File

@@ -1,11 +1,8 @@
import { randomUUID } from 'crypto'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createLogger } from '@/lib/logs/console/logger'
import {
buildUpdateQuery,
createPostgresConnection,
executeQuery,
} from '@/app/api/tools/postgresql/utils'
import { createPostgresConnection, executeUpdate } from '@/app/api/tools/postgresql/utils'
const logger = createLogger('PostgreSQLUpdateAPI')
@@ -15,7 +12,7 @@ const UpdateSchema = z.object({
database: z.string().min(1, 'Database name is required'),
username: z.string().min(1, 'Username is required'),
password: z.string().min(1, 'Password is required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('required'),
ssl: z.enum(['disabled', 'required', 'preferred']).default('preferred'),
table: z.string().min(1, 'Table name is required'),
data: z.union([
z
@@ -40,7 +37,7 @@ const UpdateSchema = z.object({
})
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const requestId = randomUUID().slice(0, 8)
try {
const body = await request.json()
@@ -50,7 +47,7 @@ export async function POST(request: NextRequest) {
`[${requestId}] Updating data in ${params.table} on ${params.host}:${params.port}/${params.database}`
)
const client = await createPostgresConnection({
const sql = createPostgresConnection({
host: params.host,
port: params.port,
database: params.database,
@@ -60,8 +57,7 @@ export async function POST(request: NextRequest) {
})
try {
const { query, values } = buildUpdateQuery(params.table, params.data, params.where)
const result = await executeQuery(client, query, values)
const result = await executeUpdate(sql, params.table, params.data, params.where)
logger.info(`[${requestId}] Update executed successfully, ${result.rowCount} row(s) updated`)
@@ -71,7 +67,7 @@ export async function POST(request: NextRequest) {
rowCount: result.rowCount,
})
} finally {
await client.end()
await sql.end()
}
} catch (error) {
if (error instanceof z.ZodError) {

View File

@@ -1,43 +1,41 @@
import { Client } from 'pg'
import postgres from 'postgres'
import type { PostgresConnectionConfig } from '@/tools/postgresql/types'
export async function createPostgresConnection(config: PostgresConnectionConfig): Promise<Client> {
const client = new Client({
export function createPostgresConnection(config: PostgresConnectionConfig) {
const sslConfig =
config.ssl === 'disabled'
? false
: config.ssl === 'required'
? 'require'
: config.ssl === 'preferred'
? 'prefer'
: 'require'
const sql = postgres({
host: config.host,
port: config.port,
database: config.database,
user: config.username,
username: config.username,
password: config.password,
ssl:
config.ssl === 'disabled'
? false
: config.ssl === 'required'
? true
: config.ssl === 'preferred'
? { rejectUnauthorized: false }
: false,
connectionTimeoutMillis: 10000, // 10 seconds
query_timeout: 30000, // 30 seconds
ssl: sslConfig,
connect_timeout: 10, // 10 seconds
idle_timeout: 20, // 20 seconds
max_lifetime: 60 * 30, // 30 minutes
max: 1, // Single connection for tool usage
})
try {
await client.connect()
return client
} catch (error) {
await client.end()
throw error
}
return sql
}
export async function executeQuery(
client: Client,
sql: any,
query: string,
params: unknown[] = []
): Promise<{ rows: unknown[]; rowCount: number }> {
const result = await client.query(query, params)
const result = await sql.unsafe(query, params)
return {
rows: result.rows || [],
rowCount: result.rowCount || 0,
rows: Array.isArray(result) ? result : [result],
rowCount: Array.isArray(result) ? result.length : result ? 1 : 0,
}
}
@@ -84,7 +82,6 @@ export function validateQuery(query: string): { isValid: boolean; error?: string
}
}
// Only allow specific statement types for execute endpoint
const allowedStatements = /^(select|insert|update|delete|with|explain|analyze|show)\s+/i
if (!allowedStatements.test(trimmedQuery)) {
return {
@@ -98,7 +95,6 @@ export function validateQuery(query: string): { isValid: boolean; error?: string
}
export function sanitizeIdentifier(identifier: string): string {
// Handle schema.table format
if (identifier.includes('.')) {
const parts = identifier.split('.')
return parts.map((part) => sanitizeSingleIdentifier(part)).join('.')
@@ -107,28 +103,41 @@ export function sanitizeIdentifier(identifier: string): string {
return sanitizeSingleIdentifier(identifier)
}
function validateWhereClause(where: string): void {
const dangerousPatterns = [
/;\s*(drop|delete|insert|update|create|alter|grant|revoke)/i,
/union\s+select/i,
/into\s+outfile/i,
/load_file/i,
/--/,
/\/\*/,
/\*\//,
]
for (const pattern of dangerousPatterns) {
if (pattern.test(where)) {
throw new Error('WHERE clause contains potentially dangerous operation')
}
}
}
function sanitizeSingleIdentifier(identifier: string): string {
// Remove any existing double quotes to prevent double-escaping
const cleaned = identifier.replace(/"/g, '')
// Validate identifier contains only safe characters
if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(cleaned)) {
throw new Error(
`Invalid identifier: ${identifier}. Identifiers must start with a letter or underscore and contain only letters, numbers, and underscores.`
)
}
// Wrap in double quotes for PostgreSQL
return `"${cleaned}"`
}
export function buildInsertQuery(
export async function executeInsert(
sql: any,
table: string,
data: Record<string, unknown>
): {
query: string
values: unknown[]
} {
): Promise<{ rows: unknown[]; rowCount: number }> {
const sanitizedTable = sanitizeIdentifier(table)
const columns = Object.keys(data)
const sanitizedColumns = columns.map((col) => sanitizeIdentifier(col))
@@ -136,18 +145,22 @@ export function buildInsertQuery(
const values = columns.map((col) => data[col])
const query = `INSERT INTO ${sanitizedTable} (${sanitizedColumns.join(', ')}) VALUES (${placeholders.join(', ')}) RETURNING *`
const result = await sql.unsafe(query, values)
return { query, values }
return {
rows: Array.isArray(result) ? result : [result],
rowCount: Array.isArray(result) ? result.length : result ? 1 : 0,
}
}
export function buildUpdateQuery(
export async function executeUpdate(
sql: any,
table: string,
data: Record<string, unknown>,
where: string
): {
query: string
values: unknown[]
} {
): Promise<{ rows: unknown[]; rowCount: number }> {
validateWhereClause(where)
const sanitizedTable = sanitizeIdentifier(table)
const columns = Object.keys(data)
const sanitizedColumns = columns.map((col) => sanitizeIdentifier(col))
@@ -155,19 +168,27 @@ export function buildUpdateQuery(
const values = columns.map((col) => data[col])
const query = `UPDATE ${sanitizedTable} SET ${setClause} WHERE ${where} RETURNING *`
const result = await sql.unsafe(query, values)
return { query, values }
return {
rows: Array.isArray(result) ? result : [result],
rowCount: Array.isArray(result) ? result.length : result ? 1 : 0,
}
}
export function buildDeleteQuery(
export async function executeDelete(
sql: any,
table: string,
where: string
): {
query: string
values: unknown[]
} {
): Promise<{ rows: unknown[]; rowCount: number }> {
validateWhereClause(where)
const sanitizedTable = sanitizeIdentifier(table)
const query = `DELETE FROM ${sanitizedTable} WHERE ${where} RETURNING *`
const result = await sql.unsafe(query, [])
return { query, values: [] }
return {
rows: Array.isArray(result) ? result : [result],
rowCount: Array.isArray(result) ? result.length : result ? 1 : 0,
}
}

View File

@@ -4,7 +4,7 @@ import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
export const dynamic = 'force-dynamic'
export const runtime = 'edge'
export const runtime = 'nodejs'
export const maxDuration = 60
const logger = createLogger('WandGenerateAPI')
@@ -49,6 +49,15 @@ interface RequestBody {
history?: ChatMessage[]
}
// Helper: safe stringify for error payloads that may include circular structures
function safeStringify(value: unknown): string {
try {
return JSON.stringify(value)
} catch {
return '[unserializable]'
}
}
export async function POST(req: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
logger.info(`[${requestId}] Received wand generation request`)
@@ -110,124 +119,172 @@ export async function POST(req: NextRequest) {
`[${requestId}] About to create stream with model: ${useWandAzure ? wandModelName : 'gpt-4o'}`
)
// Add AbortController with timeout
const abortController = new AbortController()
const timeoutId = setTimeout(() => {
abortController.abort('Stream timeout after 30 seconds')
}, 30000)
// Use native fetch for streaming to avoid OpenAI SDK issues with Node.js runtime
const apiUrl = useWandAzure
? `${azureEndpoint}/openai/deployments/${wandModelName}/chat/completions?api-version=${azureApiVersion}`
: 'https://api.openai.com/v1/chat/completions'
// Forward request abort signal if available
req.signal?.addEventListener('abort', () => {
abortController.abort('Request cancelled by client')
})
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
const streamCompletion = await client.chat.completions.create(
{
if (useWandAzure) {
headers['api-key'] = azureApiKey!
} else {
headers.Authorization = `Bearer ${openaiApiKey}`
}
logger.debug(`[${requestId}] Making streaming request to: ${apiUrl}`)
const response = await fetch(apiUrl, {
method: 'POST',
headers,
body: JSON.stringify({
model: useWandAzure ? wandModelName : 'gpt-4o',
messages: messages,
temperature: 0.3,
max_tokens: 10000,
stream: true,
stream_options: { include_usage: true },
},
{
signal: abortController.signal, // Add AbortSignal
}
)
clearTimeout(timeoutId) // Clear timeout after successful creation
logger.info(`[${requestId}] Stream created successfully, starting reader pattern`)
logger.debug(`[${requestId}] Stream connection established successfully`)
return new Response(
new ReadableStream({
async start(controller) {
const encoder = new TextEncoder()
try {
logger.info(`[${requestId}] Starting streaming with timeout protection`)
let chunkCount = 0
let hasUsageData = false
// Use for await with AbortController timeout protection
for await (const chunk of streamCompletion) {
chunkCount++
if (chunkCount === 1) {
logger.info(`[${requestId}] Received first chunk via for await`)
}
// Process the chunk
const content = chunk.choices?.[0]?.delta?.content || ''
if (content) {
// Use SSE format identical to chat streaming
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ chunk: content })}\n\n`)
)
}
// Check for usage data
if (chunk.usage) {
hasUsageData = true
logger.info(
`[${requestId}] Received usage data: ${JSON.stringify(chunk.usage)}`
)
}
// Log every 5th chunk to avoid spam
if (chunkCount % 5 === 0) {
logger.debug(`[${requestId}] Processed ${chunkCount} chunks so far`)
}
}
logger.info(
`[${requestId}] Reader pattern completed. Total chunks: ${chunkCount}, Usage data received: ${hasUsageData}`
)
// Send completion signal in SSE format
logger.info(`[${requestId}] Sending completion signal`)
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`))
logger.info(`[${requestId}] Closing controller`)
controller.close()
logger.info(`[${requestId}] Wand generation streaming completed successfully`)
} catch (streamError: any) {
if (streamError.name === 'AbortError') {
logger.info(
`[${requestId}] Stream was aborted (timeout or cancel): ${streamError.message}`
)
controller.enqueue(
encoder.encode(
`data: ${JSON.stringify({ error: 'Stream cancelled', done: true })}\n\n`
)
)
} else {
logger.error(`[${requestId}] Streaming error`, { error: streamError.message })
controller.enqueue(
encoder.encode(
`data: ${JSON.stringify({ error: 'Streaming failed', done: true })}\n\n`
)
)
}
controller.close()
}
},
}),
{
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
'X-Accel-Buffering': 'no',
},
}
)
})
if (!response.ok) {
const errorText = await response.text()
logger.error(`[${requestId}] API request failed`, {
status: response.status,
statusText: response.statusText,
error: errorText,
})
throw new Error(`API request failed: ${response.status} ${response.statusText}`)
}
logger.info(`[${requestId}] Stream response received, starting processing`)
// Create a TransformStream to process the SSE data
const encoder = new TextEncoder()
const decoder = new TextDecoder()
const readable = new ReadableStream({
async start(controller) {
const reader = response.body?.getReader()
if (!reader) {
controller.close()
return
}
try {
let buffer = ''
let chunkCount = 0
while (true) {
const { done, value } = await reader.read()
if (done) {
logger.info(`[${requestId}] Stream completed. Total chunks: ${chunkCount}`)
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`))
controller.close()
break
}
// Decode the chunk
buffer += decoder.decode(value, { stream: true })
// Process complete SSE messages
const lines = buffer.split('\n')
buffer = lines.pop() || '' // Keep incomplete line in buffer
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6).trim()
if (data === '[DONE]') {
logger.info(`[${requestId}] Received [DONE] signal`)
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`)
)
controller.close()
return
}
try {
const parsed = JSON.parse(data)
const content = parsed.choices?.[0]?.delta?.content
if (content) {
chunkCount++
if (chunkCount === 1) {
logger.info(`[${requestId}] Received first content chunk`)
}
// Forward the content
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ chunk: content })}\n\n`)
)
}
// Log usage if present
if (parsed.usage) {
logger.info(
`[${requestId}] Received usage data: ${JSON.stringify(parsed.usage)}`
)
}
// Log progress periodically
if (chunkCount % 10 === 0) {
logger.debug(`[${requestId}] Processed ${chunkCount} chunks`)
}
} catch (parseError) {
// Skip invalid JSON lines
logger.debug(
`[${requestId}] Skipped non-JSON line: ${data.substring(0, 100)}`
)
}
}
}
}
logger.info(`[${requestId}] Wand generation streaming completed successfully`)
} catch (streamError: any) {
logger.error(`[${requestId}] Streaming error`, {
name: streamError?.name,
message: streamError?.message || 'Unknown error',
stack: streamError?.stack,
})
// Send error to client
const errorData = `data: ${JSON.stringify({ error: 'Streaming failed', done: true })}\n\n`
controller.enqueue(encoder.encode(errorData))
controller.close()
} finally {
reader.releaseLock()
}
},
})
// Return Response with proper headers for Node.js runtime
return new Response(readable, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache, no-transform',
Connection: 'keep-alive',
'X-Accel-Buffering': 'no', // Disable Nginx buffering
'Transfer-Encoding': 'chunked', // Important for Node.js runtime
},
})
} catch (error: any) {
logger.error(`[${requestId}] Streaming error`, {
error: error.message || 'Unknown error',
stack: error.stack,
logger.error(`[${requestId}] Failed to create stream`, {
name: error?.name,
message: error?.message || 'Unknown error',
code: error?.code,
status: error?.status,
responseStatus: error?.response?.status,
responseData: error?.response?.data ? safeStringify(error.response.data) : undefined,
stack: error?.stack,
useWandAzure,
model: useWandAzure ? wandModelName : 'gpt-4o',
endpoint: useWandAzure ? azureEndpoint : 'api.openai.com',
apiVersion: useWandAzure ? azureApiVersion : 'N/A',
})
return NextResponse.json(
@@ -261,8 +318,19 @@ export async function POST(req: NextRequest) {
return NextResponse.json({ success: true, content: generatedContent })
} catch (error: any) {
logger.error(`[${requestId}] Wand generation failed`, {
error: error.message || 'Unknown error',
stack: error.stack,
name: error?.name,
message: error?.message || 'Unknown error',
code: error?.code,
status: error?.status,
responseStatus: error instanceof OpenAI.APIError ? error.status : error?.response?.status,
responseData: (error as any)?.response?.data
? safeStringify((error as any).response.data)
: undefined,
stack: error?.stack,
useWandAzure,
model: useWandAzure ? wandModelName : 'gpt-4o',
endpoint: useWandAzure ? azureEndpoint : 'api.openai.com',
apiVersion: useWandAzure ? azureApiVersion : 'N/A',
})
let clientErrorMessage = 'Wand generation failed. Please try again later.'

View File

@@ -146,6 +146,8 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any): P
userId: workflow.userId,
workspaceId: workflow.workspaceId,
variables,
initialInput: processedInput || {},
executionType: 'api',
})
// Replace environment variables in the block states

View File

@@ -4,6 +4,7 @@ import { LoggingSession } from '@/lib/logs/execution/logging-session'
import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans'
import { validateWorkflowAccess } from '@/app/api/workflows/middleware'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
import { loadWorkflowStateForExecution } from '@/lib/logs/execution/logging-factory'
const logger = createLogger('WorkflowLogAPI')
@@ -30,6 +31,22 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
success: result.success,
})
// Log current normalized state before starting logging session (what snapshot will save)
try {
const normalizedState = await loadWorkflowStateForExecution(id)
logger.info(`[${requestId}] 🔍 Normalized workflow state at persistence time:`, {
blocks: Object.entries(normalizedState.blocks || {}).map(([bid, b]: [string, any]) => ({
id: bid,
type: (b as any).type,
triggerMode: (b as any).triggerMode,
enabled: (b as any).enabled,
})),
edgesCount: (normalizedState.edges || []).length,
})
} catch (e) {
logger.warn(`[${requestId}] Failed to load normalized state for logging snapshot context`)
}
// Check if this execution is from chat using only the explicit source flag
const isChatExecution = result.metadata?.source === 'chat'

View File

@@ -11,7 +11,7 @@ import { createLogger } from '@/lib/logs/console/logger'
import { getAssetUrl } from '@/lib/utils'
import '@/app/globals.css'
import { SessionProvider } from '@/lib/session-context'
import { SessionProvider } from '@/lib/session/session-context'
import { ThemeProvider } from '@/app/theme-provider'
import { ZoomPrevention } from '@/app/zoom-prevention'

View File

@@ -21,6 +21,10 @@ const ACCEPTED_FILE_TYPES = [
'text/csv',
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'text/markdown',
'application/vnd.ms-powerpoint',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'text/html',
]
interface FileWithPreview extends File {
@@ -74,7 +78,7 @@ export function UploadModal({
return `File "${file.name}" is too large. Maximum size is 100MB.`
}
if (!ACCEPTED_FILE_TYPES.includes(file.type)) {
return `File "${file.name}" has an unsupported format. Please use PDF, DOC, DOCX, TXT, CSV, XLS, or XLSX files.`
return `File "${file.name}" has an unsupported format. Please use PDF, DOC, DOCX, TXT, CSV, XLS, XLSX, MD, PPT, PPTX, or HTML files.`
}
return null
}
@@ -203,7 +207,8 @@ export function UploadModal({
{isDragging ? 'Drop files here!' : 'Drop files here or click to browse'}
</p>
<p className='text-muted-foreground text-xs'>
Supports PDF, DOC, DOCX, TXT, CSV, XLS, XLSX (max 100MB each)
Supports PDF, DOC, DOCX, TXT, CSV, XLS, XLSX, MD, PPT, PPTX, HTML (max 100MB
each)
</p>
</div>
</div>

View File

@@ -29,6 +29,10 @@ const ACCEPTED_FILE_TYPES = [
'text/csv',
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'text/markdown',
'application/vnd.ms-powerpoint',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'text/html',
]
interface FileWithPreview extends File {
@@ -168,7 +172,7 @@ export function CreateModal({ open, onOpenChange, onKnowledgeBaseCreated }: Crea
// Check file type
if (!ACCEPTED_FILE_TYPES.includes(file.type)) {
setFileError(
`File ${file.name} has an unsupported format. Please use PDF, DOC, DOCX, TXT, CSV, XLS, or XLSX.`
`File ${file.name} has an unsupported format. Please use PDF, DOC, DOCX, TXT, CSV, XLS, XLSX, MD, PPT, PPTX, or HTML.`
)
hasError = true
continue
@@ -511,7 +515,8 @@ export function CreateModal({ open, onOpenChange, onKnowledgeBaseCreated }: Crea
: 'Drop files here or click to browse'}
</p>
<p className='text-muted-foreground text-xs'>
Supports PDF, DOC, DOCX, TXT, CSV, XLS, XLSX (max 100MB each)
Supports PDF, DOC, DOCX, TXT, CSV, XLS, XLSX, MD, PPT, PPTX, HTML (max
100MB each)
</p>
</div>
</div>
@@ -552,7 +557,8 @@ export function CreateModal({ open, onOpenChange, onKnowledgeBaseCreated }: Crea
: 'Drop more files or click to browse'}
</p>
<p className='text-muted-foreground text-xs'>
PDF, DOC, DOCX, TXT, CSV, XLS, XLSX (max 100MB each)
PDF, DOC, DOCX, TXT, CSV, XLS, XLSX, MD, PPT, PPTX, HTML (max 100MB
each)
</p>
</div>
</div>

View File

@@ -25,7 +25,7 @@ import {
TooltipProvider,
TooltipTrigger,
} from '@/components/ui'
import { MAX_TAG_SLOTS, type TagSlot } from '@/lib/constants/knowledge'
import { MAX_TAG_SLOTS, type TagSlot } from '@/lib/knowledge/consts'
import { createLogger } from '@/lib/logs/console/logger'
import { useKnowledgeBaseTagDefinitions } from '@/hooks/use-knowledge-base-tag-definitions'
import { useNextAvailableSlot } from '@/hooks/use-next-available-slot'

View File

@@ -6,7 +6,7 @@ import { Button } from '@/components/ui/button'
import { Collapsible, CollapsibleContent, CollapsibleTrigger } from '@/components/ui/collapsible'
import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
import { TAG_SLOTS, type TagSlot } from '@/lib/constants/knowledge'
import { TAG_SLOTS, type TagSlot } from '@/lib/knowledge/consts'
import { useKnowledgeBaseTagDefinitions } from '@/hooks/use-knowledge-base-tag-definitions'
export type TagData = {

View File

@@ -16,6 +16,7 @@ import { TraceSpansDisplay } from '@/app/workspace/[workspaceId]/logs/components
import { formatDate } from '@/app/workspace/[workspaceId]/logs/utils/format-date'
import { formatCost } from '@/providers/utils'
import type { WorkflowLog } from '@/stores/logs/filters/types'
import { useParams, useRouter } from 'next/navigation'
interface LogSidebarProps {
log: WorkflowLog | null
@@ -199,6 +200,8 @@ export function Sidebar({
const [isModelsExpanded, setIsModelsExpanded] = useState(false)
const [isFrozenCanvasOpen, setIsFrozenCanvasOpen] = useState(false)
const scrollAreaRef = useRef<HTMLDivElement>(null)
const router = useRouter()
const params = useParams() as { workspaceId?: string }
// Update currentLogId when log changes
useEffect(() => {
@@ -529,15 +532,31 @@ export function Sidebar({
<h3 className='mb-1 font-medium text-muted-foreground text-xs'>
Workflow State
</h3>
<Button
variant='outline'
size='sm'
onClick={() => setIsFrozenCanvasOpen(true)}
className='w-full justify-start gap-2'
>
<Eye className='h-4 w-4' />
View Snapshot
</Button>
<div className='flex w-full gap-2'>
<Button
variant='outline'
size='sm'
onClick={() => setIsFrozenCanvasOpen(true)}
className='flex-1 justify-start gap-2'
>
<Eye className='h-4 w-4' />
View Snapshot
</Button>
<Button
variant='secondary'
size='sm'
onClick={() => {
try {
const href = `/workspace/${encodeURIComponent(String(params?.workspaceId || ''))}/w/${encodeURIComponent(String(log.workflowId || ''))}`
router.push(href)
} catch {}
}}
className='flex-1 justify-start gap-2'
>
<Eye className='h-4 w-4' />
Open Live Debug
</Button>
</div>
<p className='mt-1 text-muted-foreground text-xs'>
See the exact workflow state and block inputs/outputs at execution time
</p>

View File

@@ -8,8 +8,6 @@ import {
Layers,
Play,
RefreshCw,
SkipForward,
StepForward,
Store,
Trash2,
WifiOff,
@@ -44,6 +42,8 @@ import {
getKeyboardShortcutText,
useKeyboardShortcuts,
} from '@/app/workspace/[workspaceId]/w/hooks/use-keyboard-shortcuts'
import { useExecutionStore } from '@/stores/execution/store'
import { useDebugCanvasStore } from '@/stores/execution/debug-canvas/store'
import { useFolderStore } from '@/stores/folders/store'
import { usePanelStore } from '@/stores/panel/store'
import { useGeneralStore } from '@/stores/settings/general/store'
@@ -111,6 +111,9 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
const [isExpanded, setIsExpanded] = useState(false)
const [isTemplateModalOpen, setIsTemplateModalOpen] = useState(false)
const [isAutoLayouting, setIsAutoLayouting] = useState(false)
// Remove chat modal state
// const [isChatPromptOpen, setIsChatPromptOpen] = useState(false)
// const [chatPrompt, setChatPrompt] = useState('')
// Delete workflow state - grouped for better organization
const [deleteState, setDeleteState] = useState({
@@ -146,6 +149,13 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
}
}, [setActiveTab, isOpen, togglePanel])
const openDebugPanel = useCallback(() => {
setActiveTab('debug')
if (!isOpen) {
togglePanel()
}
}, [setActiveTab, isOpen, togglePanel])
// Shared condition for keyboard shortcut and button disabled state
const isWorkflowBlocked = isExecuting || hasValidationErrors
@@ -808,6 +818,7 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
if (isDebugging) {
// Stop debugging
try { useDebugCanvasStore.getState().clear() } catch {}
handleCancelDebug()
} else {
// Check if there are executable blocks before starting debug mode
@@ -819,15 +830,31 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
return // Do nothing if no executable blocks
}
// Start debugging
// Determine starter id for focus
const starter = Object.values(blocks).find((b) => b.type === 'starter') as any
const starterId = starter?.id as string | undefined
// Enable debug UI but do NOT start execution
if (!isDebugModeEnabled) {
toggleDebugMode()
}
if (usageExceeded) {
openSubscriptionSettings()
} else {
openConsolePanel()
handleRunWorkflow(undefined, true) // Start in debug mode
// Activate debug session state so the panel is active
const execStore = useExecutionStore.getState()
execStore.setIsExecuting(false)
execStore.setIsDebugging(true)
// Set the Start block as pending - it will execute on first Step
execStore.setPendingBlocks(starterId ? [starterId] : [])
// Show Debug tab and mark starter as the current block to execute
openDebugPanel()
if (starterId) {
execStore.setActiveBlocks(new Set([starterId]))
}
// Ensure debug canvas starts in a clean state
try { useDebugCanvasStore.getState().clear() } catch {}
}
}
}, [
@@ -838,8 +865,7 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
blocks,
handleCancelDebug,
toggleDebugMode,
handleRunWorkflow,
openConsolePanel,
openDebugPanel,
])
/**
@@ -859,40 +885,7 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
return (
<div className='flex items-center gap-1'>
<Tooltip>
<TooltipTrigger asChild>
<Button
onClick={() => {
openConsolePanel()
handleStepDebug()
}}
className={debugButtonClass}
disabled={isControlDisabled}
>
<StepForward className='h-5 w-5' />
<span className='sr-only'>Step Forward</span>
</Button>
</TooltipTrigger>
<TooltipContent>Step Forward</TooltipContent>
</Tooltip>
<Tooltip>
<TooltipTrigger asChild>
<Button
onClick={() => {
openConsolePanel()
handleResumeDebug()
}}
className={debugButtonClass}
disabled={isControlDisabled}
>
<SkipForward className='h-5 w-5' />
<span className='sr-only'>Resume Until End</span>
</Button>
</TooltipTrigger>
<TooltipContent>Resume Until End</TooltipContent>
</Tooltip>
{/* Keep only cancel (X) here; step/resume moved to panel */}
<Tooltip>
<TooltipTrigger asChild>
<Button
@@ -1214,7 +1207,7 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
{isExpanded && renderPublishButton()}
{renderDeleteButton()}
{renderDuplicateButton()}
{!isDebugging && renderDebugModeToggle()}
{renderDebugModeToggle()}
{renderDeployButton()}
{isDebugging ? renderDebugControlsBar() : renderRunButton()}
@@ -1226,6 +1219,8 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) {
workflowId={activeWorkflowId}
/>
)}
{/* Removed chat prompt dialog; chat input now lives in DebugPanel */}
</div>
)
}

View File

@@ -10,6 +10,7 @@ import {
import { ScrollArea } from '@/components/ui/scroll-area'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { useCopilotStore } from '@/stores/copilot/store'
import { useExecutionStore } from '@/stores/execution/store'
import { useChatStore } from '@/stores/panel/chat/store'
import { useConsoleStore } from '@/stores/panel/console/store'
import { usePanelStore } from '@/stores/panel/store'
@@ -17,6 +18,7 @@ import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { Chat } from './components/chat/chat'
import { Console } from './components/console/console'
import { Copilot } from './components/copilot/copilot'
import { DebugPanel } from './components/debug/debug'
import { Variables } from './components/variables/variables'
export function Panel() {
@@ -44,6 +46,9 @@ export function Panel() {
const exportChatCSV = useChatStore((state) => state.exportChatCSV)
const { activeWorkflowId } = useWorkflowRegistry()
// Get debug state
const isDebugging = useExecutionStore((state) => state.isDebugging)
// Copilot store for chat management
const {
chats,
@@ -216,7 +221,11 @@ export function Panel() {
)
// Handle tab clicks - no loading, just switch tabs
const handleTabClick = async (tab: 'chat' | 'console' | 'variables' | 'copilot') => {
const handleTabClick = async (tab: 'chat' | 'console' | 'variables' | 'copilot' | 'debug') => {
// Don't allow clicking debug tab if not debugging
if (tab === 'debug' && !isDebugging) {
return
}
setActiveTab(tab)
if (!isOpen) {
togglePanel()
@@ -284,10 +293,30 @@ export function Panel() {
}
}, [activeWorkflowId, copilotWorkflowId, ensureCopilotDataLoaded])
// When debug mode ends, switch to a different tab if debug was active
useEffect(() => {
if (!isDebugging && activeTab === 'debug') {
setActiveTab('console')
}
}, [isDebugging, activeTab, setActiveTab])
// When debug mode starts, automatically open the debug panel
useEffect(() => {
if (isDebugging) {
setActiveTab('debug')
if (!isOpen) {
togglePanel()
}
}
}, [isDebugging, setActiveTab, isOpen, togglePanel])
return (
<>
{/* Tab Selector - Always visible */}
<div className='fixed top-[76px] right-4 z-20 flex h-9 w-[308px] items-center gap-1 rounded-[14px] border bg-card px-[2.5px] py-1 shadow-xs'>
<div
className='fixed top-[76px] right-4 z-20 flex h-9 items-center gap-1 rounded-[14px] border bg-card px-[2.5px] py-1 shadow-xs'
style={{ width: isDebugging ? '380px' : '308px' }}
>
<button
onClick={() => handleTabClick('chat')}
className={`panel-tab-base inline-flex flex-1 cursor-pointer items-center justify-center rounded-[10px] border border-transparent py-1 font-[450] text-sm outline-none transition-colors duration-200 ${
@@ -320,6 +349,16 @@ export function Panel() {
>
Variables
</button>
{isDebugging && (
<button
onClick={() => handleTabClick('debug')}
className={`panel-tab-base inline-flex flex-1 cursor-pointer items-center justify-center rounded-[10px] border border-transparent py-1 font-[450] text-sm outline-none transition-colors duration-200 ${
isOpen && activeTab === 'debug' ? 'panel-tab-active' : 'panel-tab-inactive'
}`}
>
Debug
</button>
)}
</div>
{/* Panel Content - Only visible when isOpen is true */}
@@ -512,6 +551,9 @@ export function Panel() {
<div style={{ display: activeTab === 'variables' ? 'block' : 'none', height: '100%' }}>
<Variables />
</div>
<div style={{ display: activeTab === 'debug' ? 'block' : 'none', height: '100%' }}>
<DebugPanel />
</div>
</div>
</div>
)}

View File

@@ -385,16 +385,16 @@ export function Code({
<div
className={cn(
'group relative min-h-[100px] rounded-md border bg-background font-mono text-sm transition-colors',
'group relative min-h-[100px] rounded-md border border-input bg-background font-mono text-sm transition-colors',
isConnecting && 'ring-2 ring-blue-500 ring-offset-2',
!isValidJson && 'border-2 border-destructive bg-destructive/10'
!isValidJson && 'border-destructive bg-destructive/10'
)}
title={!isValidJson ? 'Invalid JSON' : undefined}
onDragOver={(e) => e.preventDefault()}
onDrop={handleDrop}
>
<div className='absolute top-2 right-3 z-10 flex items-center gap-1 opacity-0 transition-opacity group-hover:opacity-100'>
{!isCollapsed && !isAiStreaming && !isPreview && (
{wandConfig?.enabled && !isCollapsed && !isAiStreaming && !isPreview && (
<Button
variant='ghost'
size='icon'
@@ -486,7 +486,7 @@ export function Code({
outline: 'none',
}}
className={cn(
'code-editor-area caret-primary',
'code-editor-area caret-primary dark:caret-white',
'bg-transparent focus:outline-none',
(isCollapsed || isAiStreaming) && 'cursor-not-allowed opacity-50'
)}

View File

@@ -6,7 +6,7 @@ import { Button } from '@/components/ui/button'
import { formatDisplayText } from '@/components/ui/formatted-text'
import { Input } from '@/components/ui/input'
import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
import { MAX_TAG_SLOTS } from '@/lib/constants/knowledge'
import { MAX_TAG_SLOTS } from '@/lib/knowledge/consts'
import { cn } from '@/lib/utils'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import type { SubBlockConfig } from '@/blocks/types'

View File

@@ -1,12 +1,16 @@
import { useEffect, useMemo, useRef, useState } from 'react'
import { Wand2 } from 'lucide-react'
import { useReactFlow } from 'reactflow'
import { Button } from '@/components/ui/button'
import { checkEnvVarTrigger, EnvVarDropdown } from '@/components/ui/env-var-dropdown'
import { formatDisplayText } from '@/components/ui/formatted-text'
import { Input } from '@/components/ui/input'
import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
import { createLogger } from '@/lib/logs/console/logger'
import { cn } from '@/lib/utils'
import { WandPromptBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/wand-prompt-bar/wand-prompt-bar'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import { useWand } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-wand'
import type { SubBlockConfig } from '@/blocks/types'
import { useTagSelection } from '@/hooks/use-tag-selection'
import { useOperationQueueStore } from '@/stores/operation-queue/store'
@@ -40,19 +44,39 @@ export function ShortInput({
previewValue,
disabled = false,
}: ShortInputProps) {
// Local state for immediate UI updates during streaming
const [localContent, setLocalContent] = useState<string>('')
const [isFocused, setIsFocused] = useState(false)
const [showEnvVars, setShowEnvVars] = useState(false)
const [showTags, setShowTags] = useState(false)
const validatePropValue = (value: any): string => {
if (value === undefined || value === null) return ''
if (typeof value === 'string') return value
try {
return String(value)
} catch {
return ''
}
}
const [storeValue, setStoreValue] = useSubBlockValue(blockId, subBlockId)
// Wand functionality (only if wandConfig is enabled)
const wandHook = config.wandConfig?.enabled
? useWand({
wandConfig: config.wandConfig,
currentValue: localContent,
onStreamStart: () => {
// Clear the content when streaming starts
setLocalContent('')
},
onStreamChunk: (chunk) => {
// Update local content with each chunk as it arrives
setLocalContent((current) => current + chunk)
},
onGeneratedContent: (content) => {
// Final content update
setLocalContent(content)
},
})
: null
// State management - useSubBlockValue with explicit streaming control
const [storeValue, setStoreValue] = useSubBlockValue(blockId, subBlockId, false, {
isStreaming: wandHook?.isStreaming || false,
onStreamingEnd: () => {
logger.debug('Wand streaming ended, value persisted', { blockId, subBlockId })
},
})
const [searchTerm, setSearchTerm] = useState('')
const [cursorPosition, setCursorPosition] = useState(0)
const inputRef = useRef<HTMLInputElement>(null)
@@ -65,7 +89,29 @@ export function ShortInput({
const reactFlowInstance = useReactFlow()
// Use preview value when in preview mode, otherwise use store value or prop value
const value = isPreview ? previewValue : propValue !== undefined ? propValue : storeValue
const baseValue = isPreview ? previewValue : propValue !== undefined ? propValue : storeValue
// During streaming, use local content; otherwise use base value
const value = wandHook?.isStreaming ? localContent : baseValue
// Sync local content with base value when not streaming
useEffect(() => {
if (!wandHook?.isStreaming) {
const baseValueString = baseValue?.toString() ?? ''
if (baseValueString !== localContent) {
setLocalContent(baseValueString)
}
}
}, [baseValue, wandHook?.isStreaming])
// Update store value during streaming (but won't persist until streaming ends)
useEffect(() => {
if (wandHook?.isStreaming && localContent !== '') {
if (!isPreview && !disabled) {
setStoreValue(localContent)
}
}
}, [localContent, wandHook?.isStreaming, isPreview, disabled, setStoreValue])
// Check if this input is API key related
const isApiKeyField = useMemo(() => {
@@ -297,91 +343,130 @@ export function ShortInput({
}
return (
<div className='relative w-full'>
<Input
ref={inputRef}
className={cn(
'allow-scroll w-full overflow-auto text-transparent caret-foreground placeholder:text-muted-foreground/50',
isConnecting &&
config?.connectionDroppable !== false &&
'ring-2 ring-blue-500 ring-offset-2 focus-visible:ring-blue-500'
)}
placeholder={placeholder ?? ''}
type='text'
value={displayValue}
onChange={handleChange}
onFocus={() => {
setIsFocused(true)
<>
<WandPromptBar
isVisible={wandHook?.isPromptVisible || false}
isLoading={wandHook?.isLoading || false}
isStreaming={wandHook?.isStreaming || false}
promptValue={wandHook?.promptInputValue || ''}
onSubmit={(prompt: string) => wandHook?.generateStream({ prompt }) || undefined}
onCancel={
wandHook?.isStreaming
? wandHook?.cancelGeneration
: wandHook?.hidePromptInline || (() => {})
}
onChange={(value: string) => wandHook?.updatePromptValue?.(value)}
placeholder={config.wandConfig?.placeholder || 'Describe what you want to generate...'}
/>
// If this is an API key field, automatically show env vars dropdown
if (isApiKeyField) {
setShowEnvVars(true)
setSearchTerm('')
<div className='group relative w-full'>
<Input
ref={inputRef}
className={cn(
'allow-scroll w-full overflow-auto text-transparent caret-foreground placeholder:text-muted-foreground/50',
isConnecting &&
config?.connectionDroppable !== false &&
'ring-2 ring-blue-500 ring-offset-2 focus-visible:ring-blue-500'
)}
placeholder={placeholder ?? ''}
type='text'
value={displayValue}
onChange={handleChange}
onFocus={() => {
setIsFocused(true)
// Set cursor position to the end of the input
const inputLength = value?.toString().length ?? 0
setCursorPosition(inputLength)
} else {
// If this is an API key field, automatically show env vars dropdown
if (isApiKeyField) {
setShowEnvVars(true)
setSearchTerm('')
// Set cursor position to the end of the input
const inputLength = value?.toString().length ?? 0
setCursorPosition(inputLength)
} else {
setShowEnvVars(false)
setShowTags(false)
setSearchTerm('')
}
}}
onBlur={() => {
setIsFocused(false)
setShowEnvVars(false)
setShowTags(false)
setSearchTerm('')
}
}}
onBlur={() => {
setIsFocused(false)
setShowEnvVars(false)
try {
useOperationQueueStore.getState().flushDebouncedForBlock(blockId)
} catch {}
}}
onDrop={handleDrop}
onDragOver={handleDragOver}
onScroll={handleScroll}
onPaste={handlePaste}
onWheel={handleWheel}
onKeyDown={handleKeyDown}
autoComplete='off'
style={{ overflowX: 'auto' }}
disabled={disabled}
/>
<div
ref={overlayRef}
className='pointer-events-none absolute inset-0 flex items-center overflow-x-auto bg-transparent px-3 text-sm'
style={{ overflowX: 'auto' }}
>
try {
useOperationQueueStore.getState().flushDebouncedForBlock(blockId)
} catch {}
}}
onDrop={handleDrop}
onDragOver={handleDragOver}
onScroll={handleScroll}
onPaste={handlePaste}
onWheel={handleWheel}
onKeyDown={handleKeyDown}
autoComplete='off'
style={{ overflowX: 'auto' }}
disabled={disabled}
/>
<div
className='w-full whitespace-pre'
style={{ scrollbarWidth: 'none', minWidth: 'fit-content' }}
ref={overlayRef}
className='pointer-events-none absolute inset-0 flex items-center overflow-x-auto bg-transparent px-3 text-sm'
style={{ overflowX: 'auto' }}
>
{password && !isFocused
? '•'.repeat(value?.toString().length ?? 0)
: formatDisplayText(value?.toString() ?? '', true)}
<div
className='w-full whitespace-pre'
style={{ scrollbarWidth: 'none', minWidth: 'fit-content' }}
>
{password && !isFocused
? '•'.repeat(value?.toString().length ?? 0)
: formatDisplayText(value?.toString() ?? '', true)}
</div>
</div>
</div>
<EnvVarDropdown
visible={showEnvVars}
onSelect={handleEnvVarSelect}
searchTerm={searchTerm}
inputValue={value?.toString() ?? ''}
cursorPosition={cursorPosition}
onClose={() => {
setShowEnvVars(false)
setSearchTerm('')
}}
/>
<TagDropdown
visible={showTags}
onSelect={handleEnvVarSelect}
blockId={blockId}
activeSourceBlockId={activeSourceBlockId}
inputValue={value?.toString() ?? ''}
cursorPosition={cursorPosition}
onClose={() => {
setShowTags(false)
setActiveSourceBlockId(null)
}}
/>
</div>
{/* Wand Button */}
{wandHook && !isPreview && !wandHook.isStreaming && (
<div className='-translate-y-1/2 absolute top-1/2 right-3 z-10 flex items-center gap-1 opacity-0 transition-opacity group-hover:opacity-100'>
<Button
variant='ghost'
size='icon'
onClick={
wandHook.isPromptVisible ? wandHook.hidePromptInline : wandHook.showPromptInline
}
disabled={wandHook.isLoading || wandHook.isStreaming || disabled}
aria-label='Generate content with AI'
className='h-8 w-8 rounded-full border border-transparent bg-muted/80 text-muted-foreground shadow-sm transition-all duration-200 hover:border-primary/20 hover:bg-muted hover:text-primary hover:shadow'
>
<Wand2 className='h-4 w-4' />
</Button>
</div>
)}
{!wandHook?.isStreaming && (
<>
<EnvVarDropdown
visible={showEnvVars}
onSelect={handleEnvVarSelect}
searchTerm={searchTerm}
inputValue={value?.toString() ?? ''}
cursorPosition={cursorPosition}
onClose={() => {
setShowEnvVars(false)
setSearchTerm('')
}}
/>
<TagDropdown
visible={showTags}
onSelect={handleEnvVarSelect}
blockId={blockId}
activeSourceBlockId={activeSourceBlockId}
inputValue={value?.toString() ?? ''}
cursorPosition={cursorPosition}
onClose={() => {
setShowTags(false)
setActiveSourceBlockId(null)
}}
/>
</>
)}
</div>
</>
)
}

View File

@@ -344,7 +344,13 @@ export function TriggerConfig({
// Check if the trigger is connected
// Both webhook and credential-based triggers now have webhook database entries
const isTriggerConnected = Boolean(triggerId && actualTriggerId)
// In preview, consider it configured if the snapshot contains any trigger fields
const isConfiguredInPreview = isPreview && Boolean(
(propValue?.triggerPath && propValue.triggerPath.length > 0) ||
(propValue?.triggerConfig && Object.keys(propValue.triggerConfig).length > 0) ||
propValue?.triggerId
)
const isTriggerConnected = isConfiguredInPreview || Boolean(triggerId && actualTriggerId)
// Debug logging to help with troubleshooting
useEffect(() => {

View File

@@ -486,10 +486,15 @@ export function SubBlock({
</TooltipContent>
</Tooltip>
)}
{config.id === 'responseFormat' && !isValidJson && (
{config.id === 'responseFormat' && (
<Tooltip>
<TooltipTrigger asChild>
<AlertTriangle className='h-4 w-4 cursor-pointer text-destructive' />
<AlertTriangle
className={cn(
'h-4 w-4 cursor-pointer text-destructive',
!isValidJson ? 'opacity-100' : 'opacity-0'
)}
/>
</TooltipTrigger>
<TooltipContent side='top'>
<p>Invalid JSON</p>

View File

@@ -13,6 +13,8 @@ import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/provide
import type { BlockConfig, SubBlockConfig, SubBlockType } from '@/blocks/types'
import { useCollaborativeWorkflow } from '@/hooks/use-collaborative-workflow'
import { useExecutionStore } from '@/stores/execution/store'
import { usePanelStore } from '@/stores/panel/store'
import { useGeneralStore } from '@/stores/settings/general/store'
import { useWorkflowDiffStore } from '@/stores/workflow-diff'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
@@ -435,8 +437,12 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
stateToUse = mergedState?.subBlocks || {}
}
const isAdvancedMode = useWorkflowStore.getState().blocks[blockId]?.advancedMode ?? false
const isTriggerMode = useWorkflowStore.getState().blocks[blockId]?.triggerMode ?? false
const isAdvancedMode = data.isPreview
? ((data.blockState as any)?.advancedMode ?? false)
: useWorkflowStore.getState().blocks[blockId]?.advancedMode ?? false
const isTriggerMode = data.isPreview
? ((data.blockState as any)?.triggerMode ?? false)
: useWorkflowStore.getState().blocks[blockId]?.triggerMode ?? false
const effectiveAdvanced = currentWorkflow.isDiffMode ? displayAdvancedMode : isAdvancedMode
const effectiveTrigger = currentWorkflow.isDiffMode ? displayTriggerMode : isTriggerMode
@@ -580,6 +586,72 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
type === 'schedule' && !isLoadingScheduleInfo && scheduleInfo !== null
const userPermissions = useUserPermissionsContext()
// Debug mode and active selection
const isDebugModeEnabled = useGeneralStore((s) => s.isDebugModeEnabled)
const activeBlockIds = useExecutionStore((s) => s.activeBlockIds)
const panelFocusedBlockId = useExecutionStore((s) => s.panelFocusedBlockId)
const setPanelFocusedBlockId = useExecutionStore((s) => s.setPanelFocusedBlockId)
const executingBlockIds = useExecutionStore((s) => s.executingBlockIds)
const setActiveBlocks = useExecutionStore((s) => s.setActiveBlocks)
const setActiveTab = usePanelStore((s) => s.setActiveTab)
const breakpointId = useExecutionStore((s) => s.breakpointId)
const debugContext = useExecutionStore((s) => s.debugContext)
const startPositionIds = useExecutionStore((s) => s.startPositionIds)
const handleDebugOpen = (e: React.MouseEvent) => {
if (!isDebugModeEnabled) return
e.stopPropagation()
setActiveBlocks(new Set([id]))
setActiveTab('debug')
// Always select this block for the debug panel focus
setPanelFocusedBlockId(id)
}
// In debug mode, use executingBlockIds to detect actual executing blocks (not selection);
// outside debug, fall back to activeBlockIds driven by the executor
const isExecutingNow = isDebugModeEnabled ? executingBlockIds.has(id) : activeBlockIds.has(id)
const isCurrentBlock = isDebugModeEnabled && isPending
const isPanelFocused = isDebugModeEnabled && panelFocusedBlockId === id
// Check if block has errored during debug execution
const hasError =
isDebugModeEnabled && debugContext
? (() => {
// Check direct block state for error
const directState = debugContext.blockStates?.get(id)
if (
directState?.output &&
typeof directState.output === 'object' &&
'error' in directState.output
) {
return true
}
// Check virtual executions for errors (for blocks inside parallels)
for (const [key, state] of debugContext.blockStates?.entries() || []) {
// Check if this is a virtual ID for our block
if (typeof key === 'string' && key.startsWith(`${id}_parallel_`)) {
if (state?.output && typeof state.output === 'object' && 'error' in state.output) {
return true
}
}
}
// Also check block logs for this block
const hasErrorLog = debugContext.blockLogs?.some((log: any) => {
if (log.blockId === id && !log.success) return true
// Check if log is for a virtual version of this block
if (
typeof log.blockId === 'string' &&
log.blockId.startsWith(`${id}_parallel_`) &&
!log.success
) {
return true
}
return false
})
return hasErrorLog || false
})()
: false
return (
<div className='group relative'>
<Card
@@ -589,20 +661,54 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
'transition-block-bg transition-ring',
displayIsWide ? 'w-[480px]' : 'w-[320px]',
!isEnabled && 'shadow-sm',
isActive && 'animate-pulse-ring ring-2 ring-blue-500',
isPending && 'ring-2 ring-amber-500',
// Diff highlighting
diffStatus === 'new' && 'bg-green-50/50 ring-2 ring-green-500 dark:bg-green-900/10',
diffStatus === 'edited' && 'bg-orange-50/50 ring-2 ring-orange-500 dark:bg-orange-900/10',
// Error state - highest priority (only border, no background)
hasError && 'ring-2 ring-red-500',
// Panel-focused block highlight (unless errored)
!hasError && isPanelFocused && 'bg-blue-50/60 dark:bg-blue-900/5',
// Executing blocks match staging: pulsing blue ring
!hasError && isExecutingNow && 'animate-pulse-ring ring-2 ring-blue-500',
// Pending blocks show blue border when not executing
!hasError && !isExecutingNow && isCurrentBlock && 'ring-2 ring-blue-500',
// Diff highlighting (only if not in debug error state)
!hasError &&
diffStatus === 'new' &&
'bg-green-50/50 ring-2 ring-green-500 dark:bg-green-900/10',
!hasError &&
diffStatus === 'edited' &&
'bg-orange-50/50 ring-2 ring-orange-500 dark:bg-orange-900/10',
// Deleted block highlighting (in original workflow)
isDeletedBlock && 'bg-red-50/50 ring-2 ring-red-500 dark:bg-red-900/10',
'z-[20]'
)}
onClick={handleDebugOpen}
>
{/* Show debug indicator for pending blocks */}
{isPending && (
<div className='-top-6 -translate-x-1/2 absolute left-1/2 z-10 transform rounded-t-md bg-amber-500 px-2 py-0.5 text-white text-xs'>
Next Step
{/* Show error indicator for errored blocks */}
{hasError && (
<div className='-top-6 -translate-x-1/2 absolute left-1/2 z-10 transform rounded-t-md bg-red-500 px-2 py-0.5 text-white text-xs'>
Error
</div>
)}
{/* Show debug indicator for current blocks in debug mode (pending or executing) - but not if errored */}
{!hasError && isDebugModeEnabled && (isPending || executingBlockIds.has(id)) && (
<div className='-top-6 -translate-x-1/2 absolute left-1/2 z-10 transform rounded-t-md bg-blue-500 px-2 py-0.5 text-white text-xs'>
Current
</div>
)}
{/* Bottom indicators: breakpoint and start position side by side */}
{isDebugModeEnabled && (breakpointId === id || startPositionIds.has(id)) && (
<div className='-bottom-6 -translate-x-1/2 absolute left-1/2 z-10 flex transform items-end gap-2'>
{breakpointId === id && (
<div className='rounded-b-md bg-orange-500 px-2 py-0.5 text-white text-xs'>
Breakpoint
</div>
)}
{startPositionIds.has(id) && (
<div className='rounded-b-md bg-purple-600 px-2 py-0.5 text-white text-xs'>
Start Position
</div>
)}
</div>
)}

View File

@@ -4,36 +4,38 @@ import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import type { DeploymentStatus } from '@/stores/workflows/registry/types'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
import type { BlockState, Loop, Parallel, WorkflowState } from '@/stores/workflows/workflow/types'
import { useDebugCanvasStore } from '@/stores/execution/debug-canvas/store'
/**
* Interface for the current workflow abstraction
*/
export interface CurrentWorkflow {
// Current workflow state properties
blocks: Record<string, BlockState>
edges: Edge[]
loops: Record<string, Loop>
parallels: Record<string, Parallel>
lastSaved?: number
isDeployed?: boolean
deployedAt?: Date
deploymentStatuses?: Record<string, DeploymentStatus>
needsRedeployment?: boolean
hasActiveWebhook?: boolean
// Current workflow state properties
blocks: Record<string, BlockState>
edges: Edge[]
loops: Record<string, Loop>
parallels: Record<string, Parallel>
lastSaved?: number
isDeployed?: boolean
deployedAt?: Date
deploymentStatuses?: Record<string, DeploymentStatus>
needsRedeployment?: boolean
hasActiveWebhook?: boolean
// Mode information
isDiffMode: boolean
isNormalMode: boolean
// Mode information
isDiffMode: boolean
isNormalMode: boolean
isDebugCanvasMode?: boolean
// Full workflow state (for cases that need the complete object)
workflowState: WorkflowState
// Full workflow state (for cases that need the complete object)
workflowState: WorkflowState
// Helper methods
getBlockById: (blockId: string) => BlockState | undefined
getBlockCount: () => number
getEdgeCount: () => number
hasBlocks: () => boolean
hasEdges: () => boolean
// Helper methods
getBlockById: (blockId: string) => BlockState | undefined
getBlockCount: () => number
getEdgeCount: () => number
hasBlocks: () => boolean
hasEdges: () => boolean
}
/**
@@ -41,48 +43,91 @@ export interface CurrentWorkflow {
* Automatically handles diff vs normal mode without exposing the complexity to consumers.
*/
export function useCurrentWorkflow(): CurrentWorkflow {
// Get normal workflow state
const normalWorkflow = useWorkflowStore((state) => state.getWorkflowState())
// Get normal workflow state
const normalWorkflow = useWorkflowStore((state) => state.getWorkflowState())
// Get diff state - now including isDiffReady
const { isShowingDiff, isDiffReady, diffWorkflow } = useWorkflowDiffStore()
// Get diff state - now including isDiffReady
const { isShowingDiff, isDiffReady, diffWorkflow } = useWorkflowDiffStore()
// Create the abstracted interface
const currentWorkflow = useMemo((): CurrentWorkflow => {
// Determine which workflow to use - only use diff if it's ready
const hasDiffBlocks =
!!diffWorkflow && Object.keys((diffWorkflow as any).blocks || {}).length > 0
const shouldUseDiff = isShowingDiff && isDiffReady && hasDiffBlocks
const activeWorkflow = shouldUseDiff ? diffWorkflow : normalWorkflow
// Get debug canvas override
const debugCanvas = useDebugCanvasStore((s) => ({ isActive: s.isActive, workflowState: s.workflowState }))
return {
// Current workflow state
blocks: activeWorkflow.blocks,
edges: activeWorkflow.edges,
loops: activeWorkflow.loops || {},
parallels: activeWorkflow.parallels || {},
lastSaved: activeWorkflow.lastSaved,
isDeployed: activeWorkflow.isDeployed,
deployedAt: activeWorkflow.deployedAt,
deploymentStatuses: activeWorkflow.deploymentStatuses,
needsRedeployment: activeWorkflow.needsRedeployment,
hasActiveWebhook: activeWorkflow.hasActiveWebhook,
// Create the abstracted interface
const currentWorkflow = useMemo((): CurrentWorkflow => {
// Prefer debug canvas if active
const hasDebugCanvas = !!debugCanvas.isActive && !!debugCanvas.workflowState
if (hasDebugCanvas) {
console.log('[useCurrentWorkflow] Using debug canvas state', {
isActive: debugCanvas.isActive,
hasWorkflowState: !!debugCanvas.workflowState,
blockCount: debugCanvas.workflowState ? Object.keys(debugCanvas.workflowState.blocks || {}).length : 0,
edgeCount: debugCanvas.workflowState ? (debugCanvas.workflowState.edges || []).length : 0
})
const activeWorkflow = debugCanvas.workflowState as WorkflowState
return {
blocks: activeWorkflow.blocks,
edges: activeWorkflow.edges,
loops: activeWorkflow.loops || {},
parallels: activeWorkflow.parallels || {},
lastSaved: activeWorkflow.lastSaved,
isDeployed: activeWorkflow.isDeployed,
deployedAt: activeWorkflow.deployedAt,
deploymentStatuses: activeWorkflow.deploymentStatuses,
needsRedeployment: activeWorkflow.needsRedeployment,
hasActiveWebhook: activeWorkflow.hasActiveWebhook,
isDiffMode: false,
isNormalMode: false,
isDebugCanvasMode: true,
workflowState: activeWorkflow,
getBlockById: (blockId: string) => activeWorkflow.blocks[blockId],
getBlockCount: () => Object.keys(activeWorkflow.blocks).length,
getEdgeCount: () => activeWorkflow.edges.length,
hasBlocks: () => Object.keys(activeWorkflow.blocks).length > 0,
hasEdges: () => activeWorkflow.edges.length > 0,
}
}
// Mode information - update to reflect ready state
isDiffMode: shouldUseDiff,
isNormalMode: !shouldUseDiff,
// Determine which workflow to use - only use diff if it's ready
const hasDiffBlocks = !!diffWorkflow && Object.keys((diffWorkflow as any).blocks || {}).length > 0
const shouldUseDiff = isShowingDiff && isDiffReady && hasDiffBlocks
const activeWorkflow = shouldUseDiff ? diffWorkflow : normalWorkflow
console.log('[useCurrentWorkflow] Not using debug canvas', {
debugCanvasIsActive: debugCanvas.isActive,
debugCanvasHasState: !!debugCanvas.workflowState,
usingDiff: shouldUseDiff,
normalBlockCount: Object.keys(normalWorkflow.blocks || {}).length
})
// Full workflow state (for cases that need the complete object)
workflowState: activeWorkflow,
return {
// Current workflow state
blocks: activeWorkflow.blocks,
edges: activeWorkflow.edges,
loops: activeWorkflow.loops || {},
parallels: activeWorkflow.parallels || {},
lastSaved: activeWorkflow.lastSaved,
isDeployed: activeWorkflow.isDeployed,
deployedAt: activeWorkflow.deployedAt,
deploymentStatuses: activeWorkflow.deploymentStatuses,
needsRedeployment: activeWorkflow.needsRedeployment,
hasActiveWebhook: activeWorkflow.hasActiveWebhook,
// Helper methods
getBlockById: (blockId: string) => activeWorkflow.blocks[blockId],
getBlockCount: () => Object.keys(activeWorkflow.blocks).length,
getEdgeCount: () => activeWorkflow.edges.length,
hasBlocks: () => Object.keys(activeWorkflow.blocks).length > 0,
hasEdges: () => activeWorkflow.edges.length > 0,
}
}, [normalWorkflow, isShowingDiff, isDiffReady, diffWorkflow])
// Mode information - update to reflect ready state
isDiffMode: shouldUseDiff,
isNormalMode: !shouldUseDiff,
isDebugCanvasMode: false,
return currentWorkflow
// Full workflow state (for cases that need the complete object)
workflowState: activeWorkflow,
// Helper methods
getBlockById: (blockId: string) => activeWorkflow.blocks[blockId],
getBlockCount: () => Object.keys(activeWorkflow.blocks).length,
getEdgeCount: () => activeWorkflow.edges.length,
hasBlocks: () => Object.keys(activeWorkflow.blocks).length > 0,
hasEdges: () => activeWorkflow.edges.length > 0,
}
}, [normalWorkflow, isShowingDiff, isDiffReady, diffWorkflow, debugCanvas.isActive, debugCanvas.workflowState])
return currentWorkflow
}

View File

@@ -9,6 +9,7 @@ import { Executor } from '@/executor'
import type { BlockLog, ExecutionResult, StreamingExecution } from '@/executor/types'
import { Serializer } from '@/serializer'
import type { SerializedWorkflow } from '@/serializer/types'
import { useDebugSnapshotStore } from '@/stores/execution/debug-snapshots/store'
import { useExecutionStore } from '@/stores/execution/store'
import { useConsoleStore } from '@/stores/panel/console/store'
import { useVariablesStore } from '@/stores/panel/variables/store'
@@ -62,6 +63,8 @@ export function useWorkflowExecution() {
setExecutor,
setDebugContext,
setActiveBlocks,
setExecutingBlockIds,
startPositionIds,
} = useExecutionStore()
const [executionResult, setExecutionResult] = useState<ExecutionResult | null>(null)
@@ -70,7 +73,7 @@ export function useWorkflowExecution() {
*/
const validateDebugState = useCallback((): DebugValidationResult => {
if (!executor || !debugContext || pendingBlocks.length === 0) {
const missing = []
const missing = [] as string[]
if (!executor) missing.push('executor')
if (!debugContext) missing.push('debugContext')
if (pendingBlocks.length === 0) missing.push('pendingBlocks')
@@ -93,6 +96,7 @@ export function useWorkflowExecution() {
setExecutor(null)
setPendingBlocks([])
setActiveBlocks(new Set())
setExecutingBlockIds(new Set())
// Reset debug mode setting if it was enabled
if (isDebugModeEnabled) {
@@ -105,6 +109,7 @@ export function useWorkflowExecution() {
setExecutor,
setPendingBlocks,
setActiveBlocks,
setExecutingBlockIds,
isDebugModeEnabled,
])
@@ -120,7 +125,7 @@ export function useWorkflowExecution() {
}, [])
/**
* Handles debug session completion
* Handles debug session completion - keep debug session open for inspection
*/
const handleDebugSessionComplete = useCallback(
async (result: ExecutionResult) => {
@@ -130,10 +135,14 @@ export function useWorkflowExecution() {
// Persist logs
await persistLogs(uuidv4(), result)
// Reset debug state
resetDebugState()
// Keep debug mode open for inspection: stop executing, clear pending
setIsExecuting(false)
setPendingBlocks([])
setExecutingBlockIds(new Set())
// Keep debugContext and executor so the panel can inspect state
// Do not reset isDebugging
},
[activeWorkflowId, resetDebugState]
[activeWorkflowId, setIsExecuting, setPendingBlocks, setExecutingBlockIds]
)
/**
@@ -148,16 +157,30 @@ export function useWorkflowExecution() {
// Update debug context and pending blocks
if (result.metadata?.context) {
setDebugContext(result.metadata.context)
// Capture snapshot for revert/backstep
try {
useDebugSnapshotStore.getState().captureFromContext(result.metadata.context as any)
useDebugSnapshotStore
.getState()
.pushFromContext(result.metadata.context as any, result.metadata?.pendingBlocks || [])
} catch {}
}
if (result.metadata?.pendingBlocks) {
setPendingBlocks(result.metadata.pendingBlocks)
// Filter triggers from next pending
const filtered = (result.metadata.pendingBlocks as string[]).filter((id) => {
const block = currentWorkflow.blocks[id]
if (!block) return false
const cfg = getBlock(block.type)
return cfg?.category !== 'triggers'
})
setPendingBlocks(filtered)
}
},
[setDebugContext, setPendingBlocks]
[setDebugContext, setPendingBlocks, currentWorkflow.blocks]
)
/**
* Handles debug execution errors
* Handles debug execution errors - keep debug open for inspection
*/
const handleDebugExecutionError = useCallback(
async (error: any, operation: string) => {
@@ -176,10 +199,13 @@ export function useWorkflowExecution() {
// Persist logs
await persistLogs(uuidv4(), errorResult)
// Reset debug state
resetDebugState()
// Keep debug session open for inspection
setIsExecuting(false)
setPendingBlocks([])
setExecutingBlockIds(new Set())
// Keep isDebugging, debugContext, and executor intact
},
[debugContext, activeWorkflowId, resetDebugState]
[debugContext, activeWorkflowId, setIsExecuting, setPendingBlocks, setExecutingBlockIds]
)
const persistLogs = async (
@@ -268,8 +294,8 @@ export function useWorkflowExecution() {
const isChatExecution =
workflowInput && typeof workflowInput === 'object' && 'input' in workflowInput
// For chat executions, we'll use a streaming approach
if (isChatExecution) {
// For chat executions, use streaming only when NOT debugging
if (isChatExecution && !enableDebug) {
const stream = new ReadableStream({
async start(controller) {
const encoder = new TextEncoder()
@@ -390,7 +416,7 @@ export function useWorkflowExecution() {
}
try {
const result = await executeWorkflow(workflowInput, onStream, executionId)
const result = await executeWorkflow(workflowInput, onStream, executionId, false)
// Check if execution was cancelled
if (
@@ -448,7 +474,6 @@ export function useWorkflowExecution() {
} catch (error: any) {
controller.error(error)
} finally {
controller.close()
setIsExecuting(false)
setIsDebugging(false)
setActiveBlocks(new Set())
@@ -458,12 +483,23 @@ export function useWorkflowExecution() {
return { success: true, stream }
}
// For manual (non-chat) execution
// For manual (non-streaming) execution including debug and non-chat
const executionId = uuidv4()
try {
const result = await executeWorkflow(workflowInput, undefined, executionId)
const result = await executeWorkflow(workflowInput, undefined, executionId, enableDebug)
if (result && 'metadata' in result && result.metadata?.isDebugSession) {
setDebugContext(result.metadata.context || null)
try {
if (result.metadata?.context) {
useDebugSnapshotStore.getState().captureFromContext(result.metadata.context as any)
useDebugSnapshotStore
.getState()
.pushFromContext(
result.metadata.context as any,
result.metadata?.pendingBlocks || []
)
}
} catch {}
if (result.metadata.pendingBlocks) {
setPendingBlocks(result.metadata.pendingBlocks)
}
@@ -508,13 +544,15 @@ export function useWorkflowExecution() {
setExecutor,
setPendingBlocks,
setActiveBlocks,
startPositionIds,
]
)
const executeWorkflow = async (
workflowInput?: any,
onStream?: (se: StreamingExecution) => Promise<void>,
executionId?: string
executionId?: string,
debugRequested?: boolean
): Promise<ExecutionResult | StreamingExecution> => {
// Use currentWorkflow but check if we're in diff mode
const {
@@ -602,7 +640,7 @@ export function useWorkflowExecution() {
const envVars = getAllVariables()
const envVarValues = Object.entries(envVars).reduce(
(acc, [key, variable]) => {
acc[key] = variable.value
acc[key] = (variable as any).value
return acc
},
{} as Record<string, string>
@@ -672,7 +710,9 @@ export function useWorkflowExecution() {
setExecutor(newExecutor)
// Execute workflow
return newExecutor.execute(activeWorkflowId || '')
const execResult = await newExecutor.execute(activeWorkflowId || '')
return execResult
}
const handleExecutionError = (error: any) => {
@@ -748,14 +788,33 @@ export function useWorkflowExecution() {
// Validate debug state
const validation = validateDebugState()
if (!validation.isValid) {
resetDebugState()
// Keep session open for inspection; simply stop executing
setIsExecuting(false)
return
}
// Compute executable set without triggers
const nonTriggerPending = pendingBlocks.filter((id) => {
const block = currentWorkflow.blocks[id]
if (!block) return false
const cfg = getBlock(block.type)
return cfg?.category !== 'triggers'
})
if (nonTriggerPending.length === 0) {
// Nothing executable
setIsExecuting(false)
return
}
try {
logger.info('Executing debug step with blocks:', pendingBlocks)
const result = await executor!.continueExecution(pendingBlocks, debugContext!)
logger.info('Executing debug step with blocks:', nonTriggerPending)
// Mark current pending blocks as executing for UI pulse
setExecutingBlockIds(new Set(nonTriggerPending))
const result = await executor!.continueExecution(nonTriggerPending, debugContext!)
logger.info('Debug step execution result:', result)
// Clear executing state after step returns
setExecutingBlockIds(new Set())
if (isDebugSessionComplete(result)) {
await handleDebugSessionComplete(result)
@@ -763,6 +822,7 @@ export function useWorkflowExecution() {
handleDebugSessionContinuation(result)
}
} catch (error: any) {
setExecutingBlockIds(new Set())
await handleDebugExecutionError(error, 'step')
}
}, [
@@ -771,11 +831,13 @@ export function useWorkflowExecution() {
pendingBlocks,
activeWorkflowId,
validateDebugState,
resetDebugState,
setIsExecuting,
setExecutingBlockIds,
isDebugSessionComplete,
handleDebugSessionComplete,
handleDebugSessionContinuation,
handleDebugExecutionError,
currentWorkflow.blocks,
])
/**
@@ -791,7 +853,8 @@ export function useWorkflowExecution() {
// Validate debug state
const validation = validateDebugState()
if (!validation.isValid) {
resetDebugState()
// Keep session open for inspection; simply stop executing
setIsExecuting(false)
return
}
@@ -808,6 +871,14 @@ export function useWorkflowExecution() {
let currentContext = { ...debugContext! }
let currentPendingBlocks = [...pendingBlocks]
// Filter initial pending
currentPendingBlocks = currentPendingBlocks.filter((id) => {
const block = currentWorkflow.blocks[id]
if (!block) return false
const cfg = getBlock(block.type)
return cfg?.category !== 'triggers'
})
logger.info('Starting resume execution with blocks:', currentPendingBlocks)
// Continue execution until there are no more pending blocks
@@ -819,7 +890,9 @@ export function useWorkflowExecution() {
`Resume iteration ${iterationCount + 1}, executing ${currentPendingBlocks.length} blocks`
)
setExecutingBlockIds(new Set(currentPendingBlocks))
currentResult = await executor!.continueExecution(currentPendingBlocks, currentContext)
setExecutingBlockIds(new Set())
logger.info('Resume iteration result:', {
success: currentResult.success,
@@ -835,9 +908,14 @@ export function useWorkflowExecution() {
break
}
// Update pending blocks for next iteration
// Update pending blocks for next iteration, filtered
if (currentResult.metadata?.pendingBlocks) {
currentPendingBlocks = currentResult.metadata.pendingBlocks
currentPendingBlocks = (currentResult.metadata.pendingBlocks as string[]).filter((id) => {
const block = currentWorkflow.blocks[id]
if (!block) return false
const cfg = getBlock(block.type)
return cfg?.category !== 'triggers'
})
} else {
logger.info('No pending blocks in result, ending resume')
break
@@ -864,6 +942,7 @@ export function useWorkflowExecution() {
// Handle completion
await handleDebugSessionComplete(currentResult)
} catch (error: any) {
setExecutingBlockIds(new Set())
await handleDebugExecutionError(error, 'resume')
}
}, [
@@ -872,9 +951,11 @@ export function useWorkflowExecution() {
pendingBlocks,
activeWorkflowId,
validateDebugState,
resetDebugState,
setIsExecuting,
setExecutingBlockIds,
handleDebugSessionComplete,
handleDebugExecutionError,
currentWorkflow.blocks,
])
/**

View File

@@ -131,6 +131,17 @@ export async function executeWorkflowWithLogging(
// Merge subblock states from the appropriate store
const mergedStates = mergeSubblockState(validBlocks)
// Log the current workflow state before filtering
logger.info('🔍 Current workflow state before filtering:', {
totalBlocks: Object.keys(mergedStates).length,
blocks: Object.entries(mergedStates).map(([id, block]) => ({
id,
type: block.type,
triggerMode: block.triggerMode,
category: block.type ? getBlock(block.type)?.category : undefined,
})),
})
// Filter out trigger blocks for manual execution
const filteredStates = Object.entries(mergedStates).reduce(
(acc, [id, block]) => {
@@ -142,16 +153,29 @@ export async function executeWorkflowWithLogging(
const blockConfig = getBlock(block.type)
const isTriggerBlock = blockConfig?.category === 'triggers'
const isInTriggerMode = block.triggerMode === true
// Skip trigger blocks during manual execution
if (!isTriggerBlock) {
// Skip trigger blocks AND blocks in trigger mode during manual execution
if (!isTriggerBlock && !isInTriggerMode) {
acc[id] = block
} else {
logger.info(`🚫 Filtering out block ${id} - trigger category: ${isTriggerBlock}, trigger mode: ${isInTriggerMode}`)
}
return acc
},
{} as typeof mergedStates
)
// Log the filtered state that will be used for execution (not snapshots)
logger.info('📦 Filtered workflow state for execution:', {
totalBlocks: Object.keys(filteredStates).length,
blocks: Object.entries(filteredStates).map(([id, block]) => ({
id,
type: block.type,
triggerMode: block.triggerMode,
})),
})
const currentBlockStates = Object.entries(filteredStates).reduce(
(acc, [id, block]) => {
acc[id] = Object.entries(block.subBlocks).reduce(

View File

@@ -1635,6 +1635,8 @@ const WorkflowContent = React.memo(() => {
)
}
const isReadOnly = currentWorkflow.isDebugCanvasMode === true ? true : !effectivePermissions.canEdit
return (
<div className='flex h-screen w-full flex-col overflow-hidden'>
<div className='relative h-full w-full flex-1 transition-all duration-200'>
@@ -1650,11 +1652,11 @@ const WorkflowContent = React.memo(() => {
edges={edgesWithSelection}
onNodesChange={onNodesChange}
onEdgesChange={onEdgesChange}
onConnect={effectivePermissions.canEdit ? onConnect : undefined}
onConnect={isReadOnly ? undefined : onConnect}
nodeTypes={nodeTypes}
edgeTypes={edgeTypes}
onDrop={effectivePermissions.canEdit ? onDrop : undefined}
onDragOver={effectivePermissions.canEdit ? onDragOver : undefined}
onDrop={isReadOnly ? undefined : onDrop}
onDragOver={isReadOnly ? undefined : onDragOver}
fitView
minZoom={0.1}
maxZoom={1.3}
@@ -1674,22 +1676,22 @@ const WorkflowContent = React.memo(() => {
onEdgeClick={onEdgeClick}
elementsSelectable={true}
selectNodesOnDrag={false}
nodesConnectable={effectivePermissions.canEdit}
nodesDraggable={effectivePermissions.canEdit}
nodesConnectable={!isReadOnly}
nodesDraggable={!isReadOnly}
draggable={false}
noWheelClassName='allow-scroll'
edgesFocusable={true}
edgesUpdatable={effectivePermissions.canEdit}
edgesUpdatable={!isReadOnly}
className='workflow-container h-full'
onNodeDrag={effectivePermissions.canEdit ? onNodeDrag : undefined}
onNodeDragStop={effectivePermissions.canEdit ? onNodeDragStop : undefined}
onNodeDragStart={effectivePermissions.canEdit ? onNodeDragStart : undefined}
onNodeDrag={isReadOnly ? undefined : onNodeDrag}
onNodeDragStop={isReadOnly ? undefined : onNodeDragStop}
onNodeDragStart={isReadOnly ? undefined : onNodeDragStart}
snapToGrid={false}
snapGrid={[20, 20]}
elevateEdgesOnSelect={true}
elevateNodesOnSelect={true}
autoPanOnConnect={effectivePermissions.canEdit}
autoPanOnNodeDrag={effectivePermissions.canEdit}
autoPanOnConnect={!isReadOnly}
autoPanOnNodeDrag={!isReadOnly}
>
<Background
color='hsl(var(--workflow-dots))'

View File

@@ -26,7 +26,7 @@ import {
AlertDialogTitle,
} from '@/components/ui/alert-dialog'
import { ScrollArea } from '@/components/ui/scroll-area'
import { MAX_TAG_SLOTS } from '@/lib/constants/knowledge'
import { MAX_TAG_SLOTS } from '@/lib/knowledge/consts'
import { createLogger } from '@/lib/logs/console/logger'
import { getDocumentIcon } from '@/app/workspace/[workspaceId]/knowledge/components/icons/document-icons'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'

View File

@@ -17,7 +17,7 @@ import {
SelectValue,
} from '@/components/ui'
import { ScrollArea } from '@/components/ui/scroll-area'
import { MAX_TAG_SLOTS, TAG_SLOTS, type TagSlot } from '@/lib/constants/knowledge'
import { MAX_TAG_SLOTS, TAG_SLOTS, type TagSlot } from '@/lib/knowledge/consts'
import { createLogger } from '@/lib/logs/console/logger'
import type { DocumentTag } from '@/app/workspace/[workspaceId]/knowledge/components/document-tag-entry/document-tag-entry'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'

View File

@@ -1007,8 +1007,11 @@ export function Sidebar() {
>
<UsageIndicator
onClick={() => {
const isBlocked = useSubscriptionStore.getState().getBillingStatus() === 'blocked'
if (isBlocked) {
const subscriptionStore = useSubscriptionStore.getState()
const isBlocked = subscriptionStore.getBillingStatus() === 'blocked'
const canUpgrade = subscriptionStore.canUpgrade()
if (isBlocked || !canUpgrade) {
if (typeof window !== 'undefined') {
window.dispatchEvent(
new CustomEvent('open-settings', { detail: { tab: 'subscription' } })

View File

@@ -1,4 +1,5 @@
import { task } from '@trigger.dev/sdk'
import { env } from '@/lib/env'
import { processDocumentAsync } from '@/lib/knowledge/documents/service'
import { createLogger } from '@/lib/logs/console/logger'
@@ -25,15 +26,15 @@ export type DocumentProcessingPayload = {
export const processDocument = task({
id: 'knowledge-process-document',
maxDuration: 300,
maxDuration: env.KB_CONFIG_MAX_DURATION || 300,
retry: {
maxAttempts: 3,
factor: 2,
minTimeoutInMs: 1000,
maxTimeoutInMs: 10000,
maxAttempts: env.KB_CONFIG_MAX_ATTEMPTS || 3,
factor: env.KB_CONFIG_RETRY_FACTOR || 2,
minTimeoutInMs: env.KB_CONFIG_MIN_TIMEOUT || 1000,
maxTimeoutInMs: env.KB_CONFIG_MAX_TIMEOUT || 10000,
},
queue: {
concurrencyLimit: 20,
concurrencyLimit: env.KB_CONFIG_CONCURRENCY_LIMIT || 20,
name: 'document-processing-queue',
},
run: async (payload: DocumentProcessingPayload) => {

View File

@@ -99,6 +99,10 @@ export async function executeWebhookJob(payload: WebhookExecutionPayload) {
userId: payload.userId,
workspaceId: '', // TODO: Get from workflow if needed
variables: decryptedEnvVars,
initialInput: payload.body || {},
triggerData: { provider: payload.provider, blockId: payload.blockId },
startBlockId: payload.blockId,
executionType: 'webhook',
})
// Merge subblock states (matching workflow-execution pattern)

View File

@@ -118,6 +118,72 @@ export const MySQLBlock: BlockConfig<MySQLResponse> = {
placeholder: 'SELECT * FROM users WHERE active = true',
condition: { field: 'operation', value: 'query' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert MySQL database developer. Write MySQL SQL queries based on the user's request.
### CONTEXT
{context}
### CRITICAL INSTRUCTION
Return ONLY the SQL query. Do not include any explanations, markdown formatting, comments, or additional text. Just the raw SQL query.
### QUERY GUIDELINES
1. **Syntax**: Use MySQL-specific syntax and functions
2. **Performance**: Write efficient queries with proper indexing considerations
3. **Security**: Use parameterized queries when applicable
4. **Readability**: Format queries with proper indentation and spacing
5. **Best Practices**: Follow MySQL naming conventions
### MYSQL FEATURES
- Use MySQL-specific functions (IFNULL, DATE_FORMAT, CONCAT, etc.)
- Leverage MySQL features like GROUP_CONCAT, AUTO_INCREMENT
- Use proper MySQL data types (VARCHAR, DATETIME, DECIMAL, JSON, etc.)
- Include appropriate LIMIT clauses for large result sets
### EXAMPLES
**Simple Select**: "Get all active users"
→ SELECT id, name, email, created_at
FROM users
WHERE active = 1
ORDER BY created_at DESC;
**Complex Join**: "Get users with their order counts and total spent"
→ SELECT
u.id,
u.name,
u.email,
COUNT(o.id) as order_count,
IFNULL(SUM(o.total), 0) as total_spent
FROM users u
LEFT JOIN orders o ON u.id = o.user_id
WHERE u.active = 1
GROUP BY u.id, u.name, u.email
HAVING COUNT(o.id) > 0
ORDER BY total_spent DESC;
**With Subquery**: "Get top 10 products by sales"
→ SELECT
p.id,
p.name,
(SELECT SUM(oi.quantity * oi.price)
FROM order_items oi
JOIN orders o ON oi.order_id = o.id
WHERE oi.product_id = p.id
AND o.created_at >= DATE_SUB(NOW(), INTERVAL 30 DAY)
) as total_sales
FROM products p
WHERE p.active = 1
ORDER BY total_sales DESC
LIMIT 10;
### REMEMBER
Return ONLY the SQL query - no explanations, no markdown, no extra text.`,
placeholder: 'Describe the SQL query you need...',
generationType: 'sql-query',
},
},
{
id: 'query',
@@ -127,6 +193,72 @@ export const MySQLBlock: BlockConfig<MySQLResponse> = {
placeholder: 'SELECT * FROM table_name',
condition: { field: 'operation', value: 'execute' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert MySQL database developer. Write MySQL SQL queries based on the user's request.
### CONTEXT
{context}
### CRITICAL INSTRUCTION
Return ONLY the SQL query. Do not include any explanations, markdown formatting, comments, or additional text. Just the raw SQL query.
### QUERY GUIDELINES
1. **Syntax**: Use MySQL-specific syntax and functions
2. **Performance**: Write efficient queries with proper indexing considerations
3. **Security**: Use parameterized queries when applicable
4. **Readability**: Format queries with proper indentation and spacing
5. **Best Practices**: Follow MySQL naming conventions
### MYSQL FEATURES
- Use MySQL-specific functions (IFNULL, DATE_FORMAT, CONCAT, etc.)
- Leverage MySQL features like GROUP_CONCAT, AUTO_INCREMENT
- Use proper MySQL data types (VARCHAR, DATETIME, DECIMAL, JSON, etc.)
- Include appropriate LIMIT clauses for large result sets
### EXAMPLES
**Simple Select**: "Get all active users"
→ SELECT id, name, email, created_at
FROM users
WHERE active = 1
ORDER BY created_at DESC;
**Complex Join**: "Get users with their order counts and total spent"
→ SELECT
u.id,
u.name,
u.email,
COUNT(o.id) as order_count,
IFNULL(SUM(o.total), 0) as total_spent
FROM users u
LEFT JOIN orders o ON u.id = o.user_id
WHERE u.active = 1
GROUP BY u.id, u.name, u.email
HAVING COUNT(o.id) > 0
ORDER BY total_spent DESC;
**With Subquery**: "Get top 10 products by sales"
→ SELECT
p.id,
p.name,
(SELECT SUM(oi.quantity * oi.price)
FROM order_items oi
JOIN orders o ON oi.order_id = o.id
WHERE oi.product_id = p.id
AND o.created_at >= DATE_SUB(NOW(), INTERVAL 30 DAY)
) as total_sales
FROM products p
WHERE p.active = 1
ORDER BY total_sales DESC
LIMIT 10;
### REMEMBER
Return ONLY the SQL query - no explanations, no markdown, no extra text.`,
placeholder: 'Describe the SQL query you need...',
generationType: 'sql-query',
},
},
// Data for insert operations
{

View File

@@ -118,6 +118,73 @@ export const PostgreSQLBlock: BlockConfig<PostgresResponse> = {
placeholder: 'SELECT * FROM users WHERE active = true',
condition: { field: 'operation', value: 'query' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert PostgreSQL database developer. Write PostgreSQL SQL queries based on the user's request.
### CONTEXT
{context}
### CRITICAL INSTRUCTION
Return ONLY the SQL query. Do not include any explanations, markdown formatting, comments, or additional text. Just the raw SQL query.
### QUERY GUIDELINES
1. **Syntax**: Use PostgreSQL-specific syntax and functions
2. **Performance**: Write efficient queries with proper indexing considerations
3. **Security**: Use parameterized queries when applicable
4. **Readability**: Format queries with proper indentation and spacing
5. **Best Practices**: Follow PostgreSQL naming conventions
### POSTGRESQL FEATURES
- Use PostgreSQL-specific functions (COALESCE, EXTRACT, etc.)
- Leverage advanced features like CTEs, window functions, arrays
- Use proper PostgreSQL data types (TEXT, TIMESTAMPTZ, JSONB, etc.)
- Include appropriate LIMIT clauses for large result sets
### EXAMPLES
**Simple Select**: "Get all active users"
→ SELECT id, name, email, created_at
FROM users
WHERE active = true
ORDER BY created_at DESC;
**Complex Join**: "Get users with their order counts and total spent"
→ SELECT
u.id,
u.name,
u.email,
COUNT(o.id) as order_count,
COALESCE(SUM(o.total), 0) as total_spent
FROM users u
LEFT JOIN orders o ON u.id = o.user_id
WHERE u.active = true
GROUP BY u.id, u.name, u.email
HAVING COUNT(o.id) > 0
ORDER BY total_spent DESC;
**With CTE**: "Get top 10 products by sales"
→ WITH product_sales AS (
SELECT
p.id,
p.name,
SUM(oi.quantity * oi.price) as total_sales
FROM products p
JOIN order_items oi ON p.id = oi.product_id
JOIN orders o ON oi.order_id = o.id
WHERE o.created_at >= CURRENT_DATE - INTERVAL '30 days'
GROUP BY p.id, p.name
)
SELECT * FROM product_sales
ORDER BY total_sales DESC
LIMIT 10;
### REMEMBER
Return ONLY the SQL query - no explanations, no markdown, no extra text.`,
placeholder: 'Describe the SQL query you need...',
generationType: 'sql-query',
},
},
{
id: 'query',
@@ -127,6 +194,73 @@ export const PostgreSQLBlock: BlockConfig<PostgresResponse> = {
placeholder: 'SELECT * FROM table_name',
condition: { field: 'operation', value: 'execute' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert PostgreSQL database developer. Write PostgreSQL SQL queries based on the user's request.
### CONTEXT
{context}
### CRITICAL INSTRUCTION
Return ONLY the SQL query. Do not include any explanations, markdown formatting, comments, or additional text. Just the raw SQL query.
### QUERY GUIDELINES
1. **Syntax**: Use PostgreSQL-specific syntax and functions
2. **Performance**: Write efficient queries with proper indexing considerations
3. **Security**: Use parameterized queries when applicable
4. **Readability**: Format queries with proper indentation and spacing
5. **Best Practices**: Follow PostgreSQL naming conventions
### POSTGRESQL FEATURES
- Use PostgreSQL-specific functions (COALESCE, EXTRACT, etc.)
- Leverage advanced features like CTEs, window functions, arrays
- Use proper PostgreSQL data types (TEXT, TIMESTAMPTZ, JSONB, etc.)
- Include appropriate LIMIT clauses for large result sets
### EXAMPLES
**Simple Select**: "Get all active users"
→ SELECT id, name, email, created_at
FROM users
WHERE active = true
ORDER BY created_at DESC;
**Complex Join**: "Get users with their order counts and total spent"
→ SELECT
u.id,
u.name,
u.email,
COUNT(o.id) as order_count,
COALESCE(SUM(o.total), 0) as total_spent
FROM users u
LEFT JOIN orders o ON u.id = o.user_id
WHERE u.active = true
GROUP BY u.id, u.name, u.email
HAVING COUNT(o.id) > 0
ORDER BY total_spent DESC;
**With CTE**: "Get top 10 products by sales"
→ WITH product_sales AS (
SELECT
p.id,
p.name,
SUM(oi.quantity * oi.price) as total_sales
FROM products p
JOIN order_items oi ON p.id = oi.product_id
JOIN orders o ON oi.order_id = o.id
WHERE o.created_at >= CURRENT_DATE - INTERVAL '30 days'
GROUP BY p.id, p.name
)
SELECT * FROM product_sales
ORDER BY total_sales DESC
LIMIT 10;
### REMEMBER
Return ONLY the SQL query - no explanations, no markdown, no extra text.`,
placeholder: 'Describe the SQL query you need...',
generationType: 'sql-query',
},
},
// Data for insert operations
{

View File

@@ -94,6 +94,66 @@ export const SupabaseBlock: BlockConfig<SupabaseResponse> = {
placeholder: 'id=eq.123',
condition: { field: 'operation', value: 'get_row' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert in PostgREST API syntax. Generate PostgREST filter expressions based on the user's request.
### CONTEXT
{context}
### CRITICAL INSTRUCTION
Return ONLY the PostgREST filter expression. Do not include any explanations, markdown formatting, or additional text. Just the raw filter expression.
### POSTGREST FILTER SYNTAX
PostgREST uses a specific syntax for filtering data. The format is:
column=operator.value
### OPERATORS
- **eq** - equals: \`id=eq.123\`
- **neq** - not equals: \`status=neq.inactive\`
- **gt** - greater than: \`age=gt.18\`
- **gte** - greater than or equal: \`score=gte.80\`
- **lt** - less than: \`price=lt.100\`
- **lte** - less than or equal: \`rating=lte.5\`
- **like** - pattern matching: \`name=like.*john*\`
- **ilike** - case-insensitive like: \`email=ilike.*@gmail.com\`
- **in** - in list: \`category=in.(tech,science,art)\`
- **is** - is null/not null: \`deleted_at=is.null\`
- **not** - negation: \`not.and=(status.eq.active,verified.eq.true)\`
### COMBINING FILTERS
- **AND**: Use \`&\` or \`and=(...)\`: \`id=eq.123&status=eq.active\`
- **OR**: Use \`or=(...)\`: \`or=(status.eq.active,status.eq.pending)\`
### EXAMPLES
**Simple equality**: "Find user with ID 123"
→ id=eq.123
**Text search**: "Find users with Gmail addresses"
→ email=ilike.*@gmail.com
**Range filter**: "Find products under $50"
→ price=lt.50
**Multiple conditions**: "Find active users over 18"
→ age=gt.18&status=eq.active
**OR condition**: "Find active or pending orders"
→ or=(status.eq.active,status.eq.pending)
**In list**: "Find posts in specific categories"
→ category=in.(tech,science,health)
**Null check**: "Find users without a profile picture"
→ profile_image=is.null
### REMEMBER
Return ONLY the PostgREST filter expression - no explanations, no markdown, no extra text.`,
placeholder: 'Describe the filter condition you need...',
generationType: 'postgrest',
},
},
{
id: 'filter',
@@ -103,6 +163,66 @@ export const SupabaseBlock: BlockConfig<SupabaseResponse> = {
placeholder: 'id=eq.123',
condition: { field: 'operation', value: 'update' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert in PostgREST API syntax. Generate PostgREST filter expressions based on the user's request.
### CONTEXT
{context}
### CRITICAL INSTRUCTION
Return ONLY the PostgREST filter expression. Do not include any explanations, markdown formatting, or additional text. Just the raw filter expression.
### POSTGREST FILTER SYNTAX
PostgREST uses a specific syntax for filtering data. The format is:
column=operator.value
### OPERATORS
- **eq** - equals: \`id=eq.123\`
- **neq** - not equals: \`status=neq.inactive\`
- **gt** - greater than: \`age=gt.18\`
- **gte** - greater than or equal: \`score=gte.80\`
- **lt** - less than: \`price=lt.100\`
- **lte** - less than or equal: \`rating=lte.5\`
- **like** - pattern matching: \`name=like.*john*\`
- **ilike** - case-insensitive like: \`email=ilike.*@gmail.com\`
- **in** - in list: \`category=in.(tech,science,art)\`
- **is** - is null/not null: \`deleted_at=is.null\`
- **not** - negation: \`not.and=(status.eq.active,verified.eq.true)\`
### COMBINING FILTERS
- **AND**: Use \`&\` or \`and=(...)\`: \`id=eq.123&status=eq.active\`
- **OR**: Use \`or=(...)\`: \`or=(status.eq.active,status.eq.pending)\`
### EXAMPLES
**Simple equality**: "Find user with ID 123"
→ id=eq.123
**Text search**: "Find users with Gmail addresses"
→ email=ilike.*@gmail.com
**Range filter**: "Find products under $50"
→ price=lt.50
**Multiple conditions**: "Find active users over 18"
→ age=gt.18&status=eq.active
**OR condition**: "Find active or pending orders"
→ or=(status.eq.active,status.eq.pending)
**In list**: "Find posts in specific categories"
→ category=in.(tech,science,health)
**Null check**: "Find users without a profile picture"
→ profile_image=is.null
### REMEMBER
Return ONLY the PostgREST filter expression - no explanations, no markdown, no extra text.`,
placeholder: 'Describe the filter condition you need...',
generationType: 'postgrest',
},
},
{
id: 'filter',
@@ -112,6 +232,66 @@ export const SupabaseBlock: BlockConfig<SupabaseResponse> = {
placeholder: 'id=eq.123',
condition: { field: 'operation', value: 'delete' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert in PostgREST API syntax. Generate PostgREST filter expressions based on the user's request.
### CONTEXT
{context}
### CRITICAL INSTRUCTION
Return ONLY the PostgREST filter expression. Do not include any explanations, markdown formatting, or additional text. Just the raw filter expression.
### POSTGREST FILTER SYNTAX
PostgREST uses a specific syntax for filtering data. The format is:
column=operator.value
### OPERATORS
- **eq** - equals: \`id=eq.123\`
- **neq** - not equals: \`status=neq.inactive\`
- **gt** - greater than: \`age=gt.18\`
- **gte** - greater than or equal: \`score=gte.80\`
- **lt** - less than: \`price=lt.100\`
- **lte** - less than or equal: \`rating=lte.5\`
- **like** - pattern matching: \`name=like.*john*\`
- **ilike** - case-insensitive like: \`email=ilike.*@gmail.com\`
- **in** - in list: \`category=in.(tech,science,art)\`
- **is** - is null/not null: \`deleted_at=is.null\`
- **not** - negation: \`not.and=(status.eq.active,verified.eq.true)\`
### COMBINING FILTERS
- **AND**: Use \`&\` or \`and=(...)\`: \`id=eq.123&status=eq.active\`
- **OR**: Use \`or=(...)\`: \`or=(status.eq.active,status.eq.pending)\`
### EXAMPLES
**Simple equality**: "Find user with ID 123"
→ id=eq.123
**Text search**: "Find users with Gmail addresses"
→ email=ilike.*@gmail.com
**Range filter**: "Find products under $50"
→ price=lt.50
**Multiple conditions**: "Find active users over 18"
→ age=gt.18&status=eq.active
**OR condition**: "Find active or pending orders"
→ or=(status.eq.active,status.eq.pending)
**In list**: "Find posts in specific categories"
→ category=in.(tech,science,health)
**Null check**: "Find users without a profile picture"
→ profile_image=is.null
### REMEMBER
Return ONLY the PostgREST filter expression - no explanations, no markdown, no extra text.`,
placeholder: 'Describe the filter condition you need...',
generationType: 'postgrest',
},
},
// Optional filter for query operation
{
@@ -121,6 +301,66 @@ export const SupabaseBlock: BlockConfig<SupabaseResponse> = {
layout: 'full',
placeholder: 'status=eq.active',
condition: { field: 'operation', value: 'query' },
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert in PostgREST API syntax. Generate PostgREST filter expressions based on the user's request.
### CONTEXT
{context}
### CRITICAL INSTRUCTION
Return ONLY the PostgREST filter expression. Do not include any explanations, markdown formatting, or additional text. Just the raw filter expression.
### POSTGREST FILTER SYNTAX
PostgREST uses a specific syntax for filtering data. The format is:
column=operator.value
### OPERATORS
- **eq** - equals: \`id=eq.123\`
- **neq** - not equals: \`status=neq.inactive\`
- **gt** - greater than: \`age=gt.18\`
- **gte** - greater than or equal: \`score=gte.80\`
- **lt** - less than: \`price=lt.100\`
- **lte** - less than or equal: \`rating=lte.5\`
- **like** - pattern matching: \`name=like.*john*\`
- **ilike** - case-insensitive like: \`email=ilike.*@gmail.com\`
- **in** - in list: \`category=in.(tech,science,art)\`
- **is** - is null/not null: \`deleted_at=is.null\`
- **not** - negation: \`not.and=(status.eq.active,verified.eq.true)\`
### COMBINING FILTERS
- **AND**: Use \`&\` or \`and=(...)\`: \`id=eq.123&status=eq.active\`
- **OR**: Use \`or=(...)\`: \`or=(status.eq.active,status.eq.pending)\`
### EXAMPLES
**Simple equality**: "Find user with ID 123"
→ id=eq.123
**Text search**: "Find users with Gmail addresses"
→ email=ilike.*@gmail.com
**Range filter**: "Find products under $50"
→ price=lt.50
**Multiple conditions**: "Find active users over 18"
→ age=gt.18&status=eq.active
**OR condition**: "Find active or pending orders"
→ or=(status.eq.active,status.eq.pending)
**In list**: "Find posts in specific categories"
→ category=in.(tech,science,health)
**Null check**: "Find users without a profile picture"
→ profile_image=is.null
### REMEMBER
Return ONLY the PostgREST filter expression - no explanations, no markdown, no extra text.`,
placeholder: 'Describe the filter condition...',
generationType: 'postgrest',
},
},
// Optional order by for query operation
{

View File

@@ -17,6 +17,8 @@ export type GenerationType =
| 'json-object'
| 'system-prompt'
| 'custom-tool-schema'
| 'sql-query'
| 'postgrest'
// SubBlock types
export type SubBlockType =

View File

@@ -14,9 +14,9 @@ const Slider = React.forwardRef<
{...props}
>
<SliderPrimitive.Track className='relative h-2 w-full grow overflow-hidden rounded-full bg-secondary'>
<SliderPrimitive.Range className='absolute h-full bg-primary' />
<SliderPrimitive.Range className='absolute h-full bg-primary dark:bg-white' />
</SliderPrimitive.Track>
<SliderPrimitive.Thumb className='block h-5 w-5 rounded-full border-2 border-primary bg-background ring-offset-background transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50' />
<SliderPrimitive.Thumb className='block h-5 w-5 rounded-full border-2 border-primary bg-background ring-offset-background transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 dark:border-white dark:bg-black' />
</SliderPrimitive.Root>
))
Slider.displayName = SliderPrimitive.Root.displayName

View File

@@ -961,9 +961,13 @@ export class Executor {
const connectedToStartBlock = this.actualWorkflow.connections
.filter((conn) => conn.source === initBlock.id)
.map((conn) => conn.target)
// Skip trigger-category targets when seeding from starter (manual/debug runs)
connectedToStartBlock.forEach((blockId) => {
context.activeExecutionPath.add(blockId)
const targetBlock = this.actualWorkflow.blocks.find((b) => b.id === blockId)
const isTriggerCategory = (targetBlock as any)?.metadata?.category === 'triggers'
if (!isTriggerCategory) {
context.activeExecutionPath.add(blockId)
}
})
}

View File

@@ -203,14 +203,18 @@ export class PathTracker {
if (!context.activeExecutionPath.has(conn.target)) {
const targetBlock = this.getBlock(conn.target)
const targetBlockType = targetBlock?.metadata?.id
const isTriggerCategory = (targetBlock as any)?.metadata?.category === 'triggers'
// Use routing strategy to determine if this connection should be activated
if (!Routing.shouldSkipConnection(conn.sourceHandle, targetBlockType || '')) {
context.activeExecutionPath.add(conn.target)
// Do not activate or traverse trigger blocks during downstream activation from manual paths
if (!isTriggerCategory) {
context.activeExecutionPath.add(conn.target)
// Recursively activate downstream paths if the target block should activate downstream
if (Routing.shouldActivateDownstream(targetBlockType || '')) {
this.activateDownstreamPathsSelectively(conn.target, context)
// Recursively activate downstream paths if the target block should activate downstream
if (Routing.shouldActivateDownstream(targetBlockType || '')) {
this.activateDownstreamPathsSelectively(conn.target, context)
}
}
}
}
@@ -233,7 +237,11 @@ export class PathTracker {
)
for (const conn of targetConnections) {
context.activeExecutionPath.add(conn.target)
const targetBlock = this.getBlock(conn.target)
const isTriggerCategory = (targetBlock as any)?.metadata?.category === 'triggers'
if (!isTriggerCategory) {
context.activeExecutionPath.add(conn.target)
}
logger.debug(`Condition ${block.id} activated path to: ${conn.target}`)
// Check if the selected target should activate downstream paths
@@ -282,13 +290,16 @@ export class PathTracker {
if (this.shouldActivateConnection(conn, hasError, isPartOfLoop, blockLoops, context)) {
const targetBlock = this.getBlock(conn.target)
const targetBlockType = targetBlock?.metadata?.id
const isTriggerCategory = (targetBlock as any)?.metadata?.category === 'triggers'
// Use routing strategy to determine if this connection should be activated
if (Routing.shouldSkipConnection(conn.sourceHandle, targetBlockType || '')) {
continue
}
context.activeExecutionPath.add(conn.target)
if (!isTriggerCategory) {
context.activeExecutionPath.add(conn.target)
}
}
}
}

View File

@@ -782,6 +782,16 @@ export function useCollaborativeWorkflow() {
const newTriggerMode = !currentBlock.triggerMode
// If enabling trigger mode, proactively remove incoming edges for consistency across clients
if (newTriggerMode) {
const incomingEdges = Object.values(workflowStore.edges).filter((e) => e.target === id)
for (const edge of incomingEdges) {
executeQueuedOperation('remove', 'edge', { id: edge.id }, () =>
workflowStore.removeEdge(edge.id)
)
}
}
executeQueuedOperation(
'update-trigger-mode',
'block',

View File

@@ -1,7 +1,7 @@
'use client'
import { useCallback, useEffect, useState } from 'react'
import type { TagSlot } from '@/lib/constants/knowledge'
import type { TagSlot } from '@/lib/knowledge/consts'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('useKnowledgeBaseTagDefinitions')

View File

@@ -1,7 +1,7 @@
'use client'
import { useCallback, useEffect, useState } from 'react'
import type { TagSlot } from '@/lib/constants/knowledge'
import type { TagSlot } from '@/lib/knowledge/consts'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('useTagDefinitions')

View File

@@ -10,7 +10,7 @@ import { createAuthClient } from 'better-auth/react'
import type { auth } from '@/lib/auth'
import { env, getEnv } from '@/lib/env'
import { isProd } from '@/lib/environment'
import { SessionContext, type SessionHookResult } from '@/lib/session-context'
import { SessionContext, type SessionHookResult } from '@/lib/session/session-context'
export function getBaseURL() {
let baseURL

View File

@@ -1,53 +0,0 @@
/**
* Knowledge base and document constants
*/
// Tag slot configuration by field type
// Each field type maps to specific database columns
export const TAG_SLOT_CONFIG = {
text: {
slots: ['tag1', 'tag2', 'tag3', 'tag4', 'tag5', 'tag6', 'tag7'] as const,
maxSlots: 7,
},
// Future field types would be added here with their own database columns
// date: {
// slots: ['tag8', 'tag9'] as const,
// maxSlots: 2,
// },
// number: {
// slots: ['tag10', 'tag11'] as const,
// maxSlots: 2,
// },
} as const
// Currently supported field types
export const SUPPORTED_FIELD_TYPES = Object.keys(TAG_SLOT_CONFIG) as Array<
keyof typeof TAG_SLOT_CONFIG
>
// All tag slots (for backward compatibility)
export const TAG_SLOTS = TAG_SLOT_CONFIG.text.slots
// Maximum number of tag slots for text type (for backward compatibility)
export const MAX_TAG_SLOTS = TAG_SLOT_CONFIG.text.maxSlots
// Type for tag slot names
export type TagSlot = (typeof TAG_SLOTS)[number]
// Helper function to get available slots for a field type
export function getSlotsForFieldType(fieldType: string): readonly string[] {
const config = TAG_SLOT_CONFIG[fieldType as keyof typeof TAG_SLOT_CONFIG]
if (!config) {
return [] // Return empty array for unsupported field types - system will naturally handle this
}
return config.slots
}
// Helper function to get max slots for a field type
export function getMaxSlotsForFieldType(fieldType: string): number {
const config = TAG_SLOT_CONFIG[fieldType as keyof typeof TAG_SLOT_CONFIG]
if (!config) {
return 0 // Return 0 for unsupported field types
}
return config.maxSlots
}

View File

@@ -139,6 +139,17 @@ export const env = createEnv({
RATE_LIMIT_ENTERPRISE_SYNC: z.string().optional().default('150'), // Enterprise tier sync API executions per minute
RATE_LIMIT_ENTERPRISE_ASYNC: z.string().optional().default('1000'), // Enterprise tier async API executions per minute
// Knowledge Base Processing Configuration - Shared across all processing methods
KB_CONFIG_MAX_DURATION: z.number().optional().default(300), // Max processing duration in s
KB_CONFIG_MAX_ATTEMPTS: z.number().optional().default(3), // Max retry attempts
KB_CONFIG_RETRY_FACTOR: z.number().optional().default(2), // Retry backoff factor
KB_CONFIG_MIN_TIMEOUT: z.number().optional().default(1000), // Min timeout in ms
KB_CONFIG_MAX_TIMEOUT: z.number().optional().default(10000), // Max timeout in ms
KB_CONFIG_CONCURRENCY_LIMIT: z.number().optional().default(20), // Queue concurrency limit
KB_CONFIG_BATCH_SIZE: z.number().optional().default(20), // Processing batch size
KB_CONFIG_DELAY_BETWEEN_BATCHES: z.number().optional().default(100), // Delay between batches in ms
KB_CONFIG_DELAY_BETWEEN_DOCUMENTS: z.number().optional().default(50), // Delay between documents in ms
// Real-time Communication
SOCKET_SERVER_URL: z.string().url().optional(), // WebSocket server URL for real-time features
SOCKET_PORT: z.number().optional(), // Port for WebSocket server

View File

@@ -1,139 +1,108 @@
import { createReadStream, existsSync } from 'fs'
import { Readable } from 'stream'
import csvParser from 'csv-parser'
import { existsSync, readFileSync } from 'fs'
import * as Papa from 'papaparse'
import type { FileParseResult, FileParser } from '@/lib/file-parsers/types'
import { sanitizeTextForUTF8 } from '@/lib/file-parsers/utils'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('CsvParser')
const PARSE_OPTIONS = {
header: true,
skipEmptyLines: true,
transformHeader: (header: string) => sanitizeTextForUTF8(String(header)),
transform: (value: string) => sanitizeTextForUTF8(String(value || '')),
}
export class CsvParser implements FileParser {
async parseFile(filePath: string): Promise<FileParseResult> {
return new Promise((resolve, reject) => {
try {
// Validate input
if (!filePath) {
return reject(new Error('No file path provided'))
}
// Check if file exists
if (!existsSync(filePath)) {
return reject(new Error(`File not found: ${filePath}`))
}
const results: Record<string, any>[] = []
const headers: string[] = []
createReadStream(filePath)
.on('error', (error: Error) => {
logger.error('CSV stream error:', error)
reject(new Error(`Failed to read CSV file: ${error.message}`))
})
.pipe(csvParser())
.on('headers', (headerList: string[]) => {
headers.push(...headerList)
})
.on('data', (data: Record<string, any>) => {
results.push(data)
})
.on('end', () => {
// Convert CSV data to a formatted string representation
let content = ''
// Add headers
if (headers.length > 0) {
const cleanHeaders = headers.map((h) => sanitizeTextForUTF8(String(h)))
content += `${cleanHeaders.join(', ')}\n`
}
// Add rows
results.forEach((row) => {
const cleanValues = Object.values(row).map((v) =>
sanitizeTextForUTF8(String(v || ''))
)
content += `${cleanValues.join(', ')}\n`
})
resolve({
content: sanitizeTextForUTF8(content),
metadata: {
rowCount: results.length,
headers: headers,
rawData: results,
},
})
})
.on('error', (error: Error) => {
logger.error('CSV parsing error:', error)
reject(new Error(`Failed to parse CSV file: ${error.message}`))
})
} catch (error) {
logger.error('CSV general error:', error)
reject(new Error(`Failed to process CSV file: ${(error as Error).message}`))
try {
if (!filePath) {
throw new Error('No file path provided')
}
})
if (!existsSync(filePath)) {
throw new Error(`File not found: ${filePath}`)
}
const fileContent = readFileSync(filePath, 'utf8')
const parseResult = Papa.parse(fileContent, PARSE_OPTIONS)
if (parseResult.errors && parseResult.errors.length > 0) {
const errorMessages = parseResult.errors.map((err) => err.message).join(', ')
logger.error('CSV parsing errors:', parseResult.errors)
throw new Error(`Failed to parse CSV file: ${errorMessages}`)
}
const results = parseResult.data as Record<string, any>[]
const headers = parseResult.meta.fields || []
let content = ''
if (headers.length > 0) {
const cleanHeaders = headers.map((h) => sanitizeTextForUTF8(String(h)))
content += `${cleanHeaders.join(', ')}\n`
}
results.forEach((row) => {
const cleanValues = Object.values(row).map((v) => sanitizeTextForUTF8(String(v || '')))
content += `${cleanValues.join(', ')}\n`
})
return {
content: sanitizeTextForUTF8(content),
metadata: {
rowCount: results.length,
headers: headers,
rawData: results,
},
}
} catch (error) {
logger.error('CSV general error:', error)
throw new Error(`Failed to process CSV file: ${(error as Error).message}`)
}
}
async parseBuffer(buffer: Buffer): Promise<FileParseResult> {
return new Promise((resolve, reject) => {
try {
logger.info('Parsing buffer, size:', buffer.length)
try {
logger.info('Parsing buffer, size:', buffer.length)
const results: Record<string, any>[] = []
const headers: string[] = []
const fileContent = buffer.toString('utf8')
// Create a readable stream from the buffer
const bufferStream = new Readable()
bufferStream.push(buffer)
bufferStream.push(null) // Signal the end of the stream
const parseResult = Papa.parse(fileContent, PARSE_OPTIONS)
bufferStream
.on('error', (error: Error) => {
logger.error('CSV buffer stream error:', error)
reject(new Error(`Failed to read CSV buffer: ${error.message}`))
})
.pipe(csvParser())
.on('headers', (headerList: string[]) => {
headers.push(...headerList)
})
.on('data', (data: Record<string, any>) => {
results.push(data)
})
.on('end', () => {
// Convert CSV data to a formatted string representation
let content = ''
// Add headers
if (headers.length > 0) {
const cleanHeaders = headers.map((h) => sanitizeTextForUTF8(String(h)))
content += `${cleanHeaders.join(', ')}\n`
}
// Add rows
results.forEach((row) => {
const cleanValues = Object.values(row).map((v) =>
sanitizeTextForUTF8(String(v || ''))
)
content += `${cleanValues.join(', ')}\n`
})
resolve({
content: sanitizeTextForUTF8(content),
metadata: {
rowCount: results.length,
headers: headers,
rawData: results,
},
})
})
.on('error', (error: Error) => {
logger.error('CSV parsing error:', error)
reject(new Error(`Failed to parse CSV buffer: ${error.message}`))
})
} catch (error) {
logger.error('CSV buffer parsing error:', error)
reject(new Error(`Failed to process CSV buffer: ${(error as Error).message}`))
if (parseResult.errors && parseResult.errors.length > 0) {
const errorMessages = parseResult.errors.map((err) => err.message).join(', ')
logger.error('CSV parsing errors:', parseResult.errors)
throw new Error(`Failed to parse CSV buffer: ${errorMessages}`)
}
})
const results = parseResult.data as Record<string, any>[]
const headers = parseResult.meta.fields || []
let content = ''
if (headers.length > 0) {
const cleanHeaders = headers.map((h) => sanitizeTextForUTF8(String(h)))
content += `${cleanHeaders.join(', ')}\n`
}
results.forEach((row) => {
const cleanValues = Object.values(row).map((v) => sanitizeTextForUTF8(String(v || '')))
content += `${cleanValues.join(', ')}\n`
})
return {
content: sanitizeTextForUTF8(content),
metadata: {
rowCount: results.length,
headers: headers,
rawData: results,
},
}
} catch (error) {
logger.error('CSV buffer parsing error:', error)
throw new Error(`Failed to process CSV buffer: ${(error as Error).message}`)
}
}
}

View File

@@ -9,19 +9,16 @@ const logger = createLogger('DocParser')
export class DocParser implements FileParser {
async parseFile(filePath: string): Promise<FileParseResult> {
try {
// Validate input
if (!filePath) {
throw new Error('No file path provided')
}
// Check if file exists
if (!existsSync(filePath)) {
throw new Error(`File not found: ${filePath}`)
}
logger.info(`Parsing DOC file: ${filePath}`)
// Read the file
const buffer = await readFile(filePath)
return this.parseBuffer(buffer)
} catch (error) {
@@ -38,45 +35,37 @@ export class DocParser implements FileParser {
throw new Error('Empty buffer provided')
}
// Try to dynamically import the word extractor
let WordExtractor
let parseOfficeAsync
try {
WordExtractor = (await import('word-extractor')).default
const officeParser = await import('officeparser')
parseOfficeAsync = officeParser.parseOfficeAsync
} catch (importError) {
logger.warn('word-extractor not available, using fallback extraction')
logger.warn('officeparser not available, using fallback extraction')
return this.fallbackExtraction(buffer)
}
try {
const extractor = new WordExtractor()
const extracted = await extractor.extract(buffer)
const result = await parseOfficeAsync(buffer)
const content = sanitizeTextForUTF8(extracted.getBody())
const headers = extracted.getHeaders()
const footers = extracted.getFooters()
// Combine body with headers/footers if they exist
let fullContent = content
if (headers?.trim()) {
fullContent = `${sanitizeTextForUTF8(headers)}\n\n${fullContent}`
}
if (footers?.trim()) {
fullContent = `${fullContent}\n\n${sanitizeTextForUTF8(footers)}`
if (!result) {
throw new Error('officeparser returned no result')
}
logger.info('DOC parsing completed successfully')
const resultString = typeof result === 'string' ? result : String(result)
const content = sanitizeTextForUTF8(resultString.trim())
logger.info('DOC parsing completed successfully with officeparser')
return {
content: fullContent.trim(),
content: content,
metadata: {
hasHeaders: !!headers?.trim(),
hasFooters: !!footers?.trim(),
characterCount: fullContent.length,
extractionMethod: 'word-extractor',
characterCount: content.length,
extractionMethod: 'officeparser',
},
}
} catch (extractError) {
logger.warn('word-extractor failed, using fallback:', extractError)
logger.warn('officeparser failed, using fallback:', extractError)
return this.fallbackExtraction(buffer)
}
} catch (error) {
@@ -85,25 +74,16 @@ export class DocParser implements FileParser {
}
}
/**
* Fallback extraction method for when word-extractor is not available
* This is a very basic extraction that looks for readable text in the binary
*/
private fallbackExtraction(buffer: Buffer): FileParseResult {
logger.info('Using fallback text extraction for DOC file')
// Convert buffer to string and try to extract readable text
// This is very basic and won't work well for complex DOC files
const text = buffer.toString('utf8', 0, Math.min(buffer.length, 100000)) // Limit to first 100KB
const text = buffer.toString('utf8', 0, Math.min(buffer.length, 100000))
// Extract sequences of printable ASCII characters
const readableText = text
.match(/[\x20-\x7E\s]{4,}/g) // Find sequences of 4+ printable characters
.match(/[\x20-\x7E\s]{4,}/g)
?.filter(
(chunk) =>
chunk.trim().length > 10 && // Minimum length
/[a-zA-Z]/.test(chunk) && // Must contain letters
!/^[\x00-\x1F]*$/.test(chunk) // Not just control characters
chunk.trim().length > 10 && /[a-zA-Z]/.test(chunk) && !/^[\x00-\x1F]*$/.test(chunk)
)
.join(' ')
.replace(/\s+/g, ' ')
@@ -118,8 +98,7 @@ export class DocParser implements FileParser {
metadata: {
extractionMethod: 'fallback',
characterCount: content.length,
warning:
'Basic text extraction used. For better results, install word-extractor package or convert to DOCX format.',
warning: 'Basic text extraction used. For better results, convert to DOCX format.',
},
}
}

View File

@@ -14,15 +14,12 @@ interface MammothResult {
export class DocxParser implements FileParser {
async parseFile(filePath: string): Promise<FileParseResult> {
try {
// Validate input
if (!filePath) {
throw new Error('No file path provided')
}
// Read the file
const buffer = await readFile(filePath)
// Use parseBuffer for consistent implementation
return this.parseBuffer(buffer)
} catch (error) {
logger.error('DOCX file error:', error)
@@ -34,10 +31,8 @@ export class DocxParser implements FileParser {
try {
logger.info('Parsing buffer, size:', buffer.length)
// Extract text with mammoth
const result = await mammoth.extractRawText({ buffer })
// Extract HTML for metadata (optional - won't fail if this fails)
let htmlResult: MammothResult = { value: '', messages: [] }
try {
htmlResult = await mammoth.convertToHtml({ buffer })

View File

@@ -0,0 +1,283 @@
import { readFile } from 'fs/promises'
import * as cheerio from 'cheerio'
import type { FileParseResult, FileParser } from '@/lib/file-parsers/types'
import { sanitizeTextForUTF8 } from '@/lib/file-parsers/utils'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('HtmlParser')
export class HtmlParser implements FileParser {
async parseFile(filePath: string): Promise<FileParseResult> {
try {
if (!filePath) {
throw new Error('No file path provided')
}
const buffer = await readFile(filePath)
return this.parseBuffer(buffer)
} catch (error) {
logger.error('HTML file error:', error)
throw new Error(`Failed to parse HTML file: ${(error as Error).message}`)
}
}
async parseBuffer(buffer: Buffer): Promise<FileParseResult> {
try {
logger.info('Parsing HTML buffer, size:', buffer.length)
const htmlContent = buffer.toString('utf-8')
const $ = cheerio.load(htmlContent)
// Extract meta information before removing tags
const title = $('title').text().trim()
const metaDescription = $('meta[name="description"]').attr('content') || ''
$('script, style, noscript, meta, link, iframe, object, embed, svg').remove()
$.root()
.contents()
.filter(function () {
return this.type === 'comment'
})
.remove()
const content = this.extractStructuredText($)
const sanitizedContent = sanitizeTextForUTF8(content)
const characterCount = sanitizedContent.length
const wordCount = sanitizedContent.split(/\s+/).filter((word) => word.length > 0).length
const estimatedTokenCount = Math.ceil(characterCount / 4)
const headings = this.extractHeadings($)
const links = this.extractLinks($)
return {
content: sanitizedContent,
metadata: {
title,
metaDescription,
characterCount,
wordCount,
tokenCount: estimatedTokenCount,
headings,
links: links.slice(0, 50),
hasImages: $('img').length > 0,
imageCount: $('img').length,
hasTable: $('table').length > 0,
tableCount: $('table').length,
hasList: $('ul, ol').length > 0,
listCount: $('ul, ol').length,
},
}
} catch (error) {
logger.error('HTML buffer parsing error:', error)
throw new Error(`Failed to parse HTML buffer: ${(error as Error).message}`)
}
}
/**
* Extract structured text content preserving document hierarchy
*/
private extractStructuredText($: cheerio.CheerioAPI): string {
const contentParts: string[] = []
const rootElement = $('body').length > 0 ? $('body') : $.root()
this.processElement($, rootElement, contentParts, 0)
return contentParts.join('\n').trim()
}
/**
* Recursively process elements to extract text with structure
*/
private processElement(
$: cheerio.CheerioAPI,
element: cheerio.Cheerio<any>,
contentParts: string[],
depth: number
): void {
element.contents().each((_, node) => {
if (node.type === 'text') {
const text = $(node).text().trim()
if (text) {
contentParts.push(text)
}
} else if (node.type === 'tag') {
const $node = $(node)
const tagName = node.tagName?.toLowerCase()
switch (tagName) {
case 'h1':
case 'h2':
case 'h3':
case 'h4':
case 'h5':
case 'h6': {
const headingText = $node.text().trim()
if (headingText) {
contentParts.push(`\n${headingText}\n`)
}
break
}
case 'p': {
const paragraphText = $node.text().trim()
if (paragraphText) {
contentParts.push(`${paragraphText}\n`)
}
break
}
case 'br':
contentParts.push('\n')
break
case 'hr':
contentParts.push('\n---\n')
break
case 'li': {
const listItemText = $node.text().trim()
if (listItemText) {
const indent = ' '.repeat(Math.min(depth, 3))
contentParts.push(`${indent}${listItemText}`)
}
break
}
case 'ul':
case 'ol':
contentParts.push('\n')
this.processElement($, $node, contentParts, depth + 1)
contentParts.push('\n')
break
case 'table':
this.processTable($, $node, contentParts)
break
case 'blockquote': {
const quoteText = $node.text().trim()
if (quoteText) {
contentParts.push(`\n> ${quoteText}\n`)
}
break
}
case 'pre':
case 'code': {
const codeText = $node.text().trim()
if (codeText) {
contentParts.push(`\n\`\`\`\n${codeText}\n\`\`\`\n`)
}
break
}
case 'div':
case 'section':
case 'article':
case 'main':
case 'aside':
case 'nav':
case 'header':
case 'footer':
this.processElement($, $node, contentParts, depth)
break
case 'a': {
const linkText = $node.text().trim()
const href = $node.attr('href')
if (linkText) {
if (href?.startsWith('http')) {
contentParts.push(`${linkText} (${href})`)
} else {
contentParts.push(linkText)
}
}
break
}
case 'img': {
const alt = $node.attr('alt')
if (alt) {
contentParts.push(`[Image: ${alt}]`)
}
break
}
default:
this.processElement($, $node, contentParts, depth)
}
}
})
}
/**
* Process table elements to extract structured data
*/
private processTable(
$: cheerio.CheerioAPI,
table: cheerio.Cheerio<any>,
contentParts: string[]
): void {
contentParts.push('\n[Table]')
table.find('tr').each((_, row) => {
const $row = $(row)
const cells: string[] = []
$row.find('td, th').each((_, cell) => {
const cellText = $(cell).text().trim()
cells.push(cellText || '')
})
if (cells.length > 0) {
contentParts.push(`| ${cells.join(' | ')} |`)
}
})
contentParts.push('[/Table]\n')
}
/**
* Extract heading structure for metadata
*/
private extractHeadings($: cheerio.CheerioAPI): Array<{ level: number; text: string }> {
const headings: Array<{ level: number; text: string }> = []
$('h1, h2, h3, h4, h5, h6').each((_, element) => {
const $element = $(element)
const tagName = element.tagName?.toLowerCase()
const level = Number.parseInt(tagName?.charAt(1) || '1', 10)
const text = $element.text().trim()
if (text) {
headings.push({ level, text })
}
})
return headings
}
/**
* Extract links from the document
*/
private extractLinks($: cheerio.CheerioAPI): Array<{ text: string; href: string }> {
const links: Array<{ text: string; href: string }> = []
$('a[href]').each((_, element) => {
const $element = $(element)
const href = $element.attr('href')
const text = $element.text().trim()
if (href && text && href.startsWith('http')) {
links.push({ text, href })
}
})
return links
}
}

View File

@@ -51,6 +51,23 @@ const mockMdParseFile = vi.fn().mockResolvedValue({
},
})
const mockPptxParseFile = vi.fn().mockResolvedValue({
content: 'Parsed PPTX content',
metadata: {
slideCount: 5,
extractionMethod: 'officeparser',
},
})
const mockHtmlParseFile = vi.fn().mockResolvedValue({
content: 'Parsed HTML content',
metadata: {
title: 'Test HTML Document',
headingCount: 3,
linkCount: 2,
},
})
const createMockModule = () => {
const mockParsers: Record<string, FileParser> = {
pdf: { parseFile: mockPdfParseFile },
@@ -58,6 +75,10 @@ const createMockModule = () => {
docx: { parseFile: mockDocxParseFile },
txt: { parseFile: mockTxtParseFile },
md: { parseFile: mockMdParseFile },
pptx: { parseFile: mockPptxParseFile },
ppt: { parseFile: mockPptxParseFile },
html: { parseFile: mockHtmlParseFile },
htm: { parseFile: mockHtmlParseFile },
}
return {
@@ -143,6 +164,18 @@ describe('File Parsers', () => {
})),
}))
vi.doMock('@/lib/file-parsers/pptx-parser', () => ({
PptxParser: vi.fn().mockImplementation(() => ({
parseFile: mockPptxParseFile,
})),
}))
vi.doMock('@/lib/file-parsers/html-parser', () => ({
HtmlParser: vi.fn().mockImplementation(() => ({
parseFile: mockHtmlParseFile,
})),
}))
global.console = {
...console,
log: vi.fn(),
@@ -261,6 +294,82 @@ describe('File Parsers', () => {
const { parseFile } = await import('@/lib/file-parsers/index')
const result = await parseFile('/test/files/document.md')
expect(result).toEqual(expectedResult)
})
it('should parse PPTX files successfully', async () => {
const expectedResult = {
content: 'Parsed PPTX content',
metadata: {
slideCount: 5,
extractionMethod: 'officeparser',
},
}
mockPptxParseFile.mockResolvedValueOnce(expectedResult)
mockExistsSync.mockReturnValue(true)
const { parseFile } = await import('@/lib/file-parsers/index')
const result = await parseFile('/test/files/presentation.pptx')
expect(result).toEqual(expectedResult)
})
it('should parse PPT files successfully', async () => {
const expectedResult = {
content: 'Parsed PPTX content',
metadata: {
slideCount: 5,
extractionMethod: 'officeparser',
},
}
mockPptxParseFile.mockResolvedValueOnce(expectedResult)
mockExistsSync.mockReturnValue(true)
const { parseFile } = await import('@/lib/file-parsers/index')
const result = await parseFile('/test/files/presentation.ppt')
expect(result).toEqual(expectedResult)
})
it('should parse HTML files successfully', async () => {
const expectedResult = {
content: 'Parsed HTML content',
metadata: {
title: 'Test HTML Document',
headingCount: 3,
linkCount: 2,
},
}
mockHtmlParseFile.mockResolvedValueOnce(expectedResult)
mockExistsSync.mockReturnValue(true)
const { parseFile } = await import('@/lib/file-parsers/index')
const result = await parseFile('/test/files/document.html')
expect(result).toEqual(expectedResult)
})
it('should parse HTM files successfully', async () => {
const expectedResult = {
content: 'Parsed HTML content',
metadata: {
title: 'Test HTML Document',
headingCount: 3,
linkCount: 2,
},
}
mockHtmlParseFile.mockResolvedValueOnce(expectedResult)
mockExistsSync.mockReturnValue(true)
const { parseFile } = await import('@/lib/file-parsers/index')
const result = await parseFile('/test/files/document.htm')
expect(result).toEqual(expectedResult)
})
it('should throw error for unsupported file types', async () => {
@@ -292,6 +401,10 @@ describe('File Parsers', () => {
expect(isSupportedFileType('docx')).toBe(true)
expect(isSupportedFileType('txt')).toBe(true)
expect(isSupportedFileType('md')).toBe(true)
expect(isSupportedFileType('pptx')).toBe(true)
expect(isSupportedFileType('ppt')).toBe(true)
expect(isSupportedFileType('html')).toBe(true)
expect(isSupportedFileType('htm')).toBe(true)
})
it('should return false for unsupported file types', async () => {
@@ -308,6 +421,8 @@ describe('File Parsers', () => {
expect(isSupportedFileType('CSV')).toBe(true)
expect(isSupportedFileType('TXT')).toBe(true)
expect(isSupportedFileType('MD')).toBe(true)
expect(isSupportedFileType('PPTX')).toBe(true)
expect(isSupportedFileType('HTML')).toBe(true)
})
it('should handle errors gracefully', async () => {

View File

@@ -7,7 +7,6 @@ import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('FileParser')
// Lazy-loaded parsers to avoid initialization issues
let parserInstances: Record<string, FileParser> | null = null
/**
@@ -18,25 +17,20 @@ function getParserInstances(): Record<string, FileParser> {
parserInstances = {}
try {
// Import parsers only when needed - with try/catch for each one
try {
logger.info('Attempting to load PDF parser...')
try {
// First try to use the pdf-parse library
// Import the PdfParser using ES module import to avoid test file access
const { PdfParser } = require('@/lib/file-parsers/pdf-parser')
parserInstances.pdf = new PdfParser()
logger.info('PDF parser loaded successfully')
} catch (pdfParseError) {
// If that fails, fallback to our raw PDF parser
logger.error('Failed to load primary PDF parser:', pdfParseError)
} catch (pdfLibError) {
logger.error('Failed to load primary PDF parser:', pdfLibError)
logger.info('Falling back to raw PDF parser')
parserInstances.pdf = new RawPdfParser()
logger.info('Raw PDF parser loaded successfully')
}
} catch (error) {
logger.error('Failed to load any PDF parser:', error)
// Create a simple fallback that just returns the file size and a message
parserInstances.pdf = {
async parseFile(filePath: string): Promise<FileParseResult> {
const buffer = await readFile(filePath)
@@ -100,10 +94,26 @@ function getParserInstances(): Record<string, FileParser> {
try {
const { XlsxParser } = require('@/lib/file-parsers/xlsx-parser')
parserInstances.xlsx = new XlsxParser()
parserInstances.xls = new XlsxParser() // Both xls and xlsx use the same parser
parserInstances.xls = new XlsxParser()
} catch (error) {
logger.error('Failed to load XLSX parser:', error)
}
try {
const { PptxParser } = require('@/lib/file-parsers/pptx-parser')
parserInstances.pptx = new PptxParser()
parserInstances.ppt = new PptxParser()
} catch (error) {
logger.error('Failed to load PPTX parser:', error)
}
try {
const { HtmlParser } = require('@/lib/file-parsers/html-parser')
parserInstances.html = new HtmlParser()
parserInstances.htm = new HtmlParser()
} catch (error) {
logger.error('Failed to load HTML parser:', error)
}
} catch (error) {
logger.error('Error loading file parsers:', error)
}
@@ -119,12 +129,10 @@ function getParserInstances(): Record<string, FileParser> {
*/
export async function parseFile(filePath: string): Promise<FileParseResult> {
try {
// Validate input
if (!filePath) {
throw new Error('No file path provided')
}
// Check if file exists
if (!existsSync(filePath)) {
throw new Error(`File not found: ${filePath}`)
}
@@ -158,7 +166,6 @@ export async function parseFile(filePath: string): Promise<FileParseResult> {
*/
export async function parseBuffer(buffer: Buffer, extension: string): Promise<FileParseResult> {
try {
// Validate input
if (!buffer || buffer.length === 0) {
throw new Error('Empty buffer provided')
}
@@ -182,7 +189,6 @@ export async function parseBuffer(buffer: Buffer, extension: string): Promise<Fi
logger.info('Using parser for extension:', normalizedExtension)
const parser = parsers[normalizedExtension]
// Check if parser supports buffer parsing
if (parser.parseBuffer) {
return await parser.parseBuffer(buffer)
}
@@ -207,5 +213,4 @@ export function isSupportedFileType(extension: string): extension is SupportedFi
}
}
// Type exports
export type { FileParseResult, FileParser, SupportedFileType }

View File

@@ -1,5 +1,6 @@
import { readFile } from 'fs/promises'
import type { FileParseResult, FileParser } from '@/lib/file-parsers/types'
import { sanitizeTextForUTF8 } from '@/lib/file-parsers/utils'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('MdParser')
@@ -7,15 +8,12 @@ const logger = createLogger('MdParser')
export class MdParser implements FileParser {
async parseFile(filePath: string): Promise<FileParseResult> {
try {
// Validate input
if (!filePath) {
throw new Error('No file path provided')
}
// Read the file
const buffer = await readFile(filePath)
// Use parseBuffer for consistent implementation
return this.parseBuffer(buffer)
} catch (error) {
logger.error('MD file error:', error)
@@ -27,14 +25,14 @@ export class MdParser implements FileParser {
try {
logger.info('Parsing buffer, size:', buffer.length)
// Extract content
const result = buffer.toString('utf-8')
const content = sanitizeTextForUTF8(result)
return {
content: result,
content,
metadata: {
characterCount: result.length,
tokenCount: result.length / 4,
characterCount: content.length,
tokenCount: Math.floor(content.length / 4),
},
}
} catch (error) {

View File

@@ -1,22 +1,21 @@
import { readFile } from 'fs/promises'
// @ts-ignore
import * as pdfParseLib from 'pdf-parse/lib/pdf-parse.js'
import { PDFDocument } from 'pdf-lib'
import type { FileParseResult, FileParser } from '@/lib/file-parsers/types'
import { createLogger } from '@/lib/logs/console/logger'
import { RawPdfParser } from './raw-pdf-parser'
const logger = createLogger('PdfParser')
const rawPdfParser = new RawPdfParser()
export class PdfParser implements FileParser {
async parseFile(filePath: string): Promise<FileParseResult> {
try {
logger.info('Starting to parse file:', filePath)
// Make sure we're only parsing the provided file path
if (!filePath) {
throw new Error('No file path provided')
}
// Read the file
logger.info('Reading file...')
const dataBuffer = await readFile(filePath)
logger.info('File read successfully, size:', dataBuffer.length)
@@ -32,93 +31,66 @@ export class PdfParser implements FileParser {
try {
logger.info('Starting to parse buffer, size:', dataBuffer.length)
// Try to parse with pdf-parse library first
try {
logger.info('Attempting to parse with pdf-parse library...')
logger.info('Attempting to parse with pdf-lib library...')
// Parse PDF with direct function call to avoid test file access
logger.info('Starting PDF parsing...')
const data = await pdfParseLib.default(dataBuffer)
logger.info('PDF parsed successfully with pdf-parse, pages:', data.numpages)
const pdfDoc = await PDFDocument.load(dataBuffer)
const pages = pdfDoc.getPages()
const pageCount = pages.length
logger.info('PDF parsed successfully with pdf-lib, pages:', pageCount)
const metadata: Record<string, any> = {
pageCount,
}
try {
const title = pdfDoc.getTitle()
const author = pdfDoc.getAuthor()
const subject = pdfDoc.getSubject()
const creator = pdfDoc.getCreator()
const producer = pdfDoc.getProducer()
const creationDate = pdfDoc.getCreationDate()
const modificationDate = pdfDoc.getModificationDate()
if (title) metadata.title = title
if (author) metadata.author = author
if (subject) metadata.subject = subject
if (creator) metadata.creator = creator
if (producer) metadata.producer = producer
if (creationDate) metadata.creationDate = creationDate.toISOString()
if (modificationDate) metadata.modificationDate = modificationDate.toISOString()
} catch (metadataError) {
logger.warn('Could not extract PDF metadata:', metadataError)
}
logger.info(
'pdf-lib loaded successfully, but text extraction requires fallback to raw parser'
)
const rawResult = await rawPdfParser.parseBuffer(dataBuffer)
return {
content: data.text,
content: rawResult.content,
metadata: {
pageCount: data.numpages,
info: data.info,
version: data.version,
...rawResult.metadata,
...metadata,
source: 'pdf-lib + raw-parser',
},
}
} catch (pdfParseError: unknown) {
logger.error('PDF-parse library failed:', pdfParseError)
} catch (pdfLibError: unknown) {
logger.error('PDF-lib library failed:', pdfLibError)
// Fallback to manual text extraction
logger.info('Falling back to manual text extraction...')
// Extract basic PDF info from raw content
const rawContent = dataBuffer.toString('utf-8', 0, Math.min(10000, dataBuffer.length))
let version = 'Unknown'
let pageCount = 0
// Try to extract PDF version
const versionMatch = rawContent.match(/%PDF-(\d+\.\d+)/)
if (versionMatch?.[1]) {
version = versionMatch[1]
}
// Try to get page count
const pageMatches = rawContent.match(/\/Type\s*\/Page\b/g)
if (pageMatches) {
pageCount = pageMatches.length
}
// Try to extract text by looking for text-related operators in the PDF
let extractedText = ''
// Look for text in the PDF content using common patterns
const textMatches = rawContent.match(/BT[\s\S]*?ET/g)
if (textMatches && textMatches.length > 0) {
extractedText = textMatches
.map((textBlock) => {
// Extract text objects (Tj, TJ) from the text block
const textObjects = textBlock.match(/\([^)]*\)\s*Tj|\[[^\]]*\]\s*TJ/g)
if (textObjects) {
return textObjects
.map((obj) => {
// Clean up text objects
return (
obj
.replace(
/\(([^)]*)\)\s*Tj|\[([^\]]*)\]\s*TJ/g,
(match, p1, p2) => p1 || p2 || ''
)
// Clean up PDF escape sequences
.replace(/\\(\d{3}|[()\\])/g, '')
.replace(/\\\\/g, '\\')
.replace(/\\\(/g, '(')
.replace(/\\\)/g, ')')
)
})
.join(' ')
}
return ''
})
.join('\n')
}
// If we couldn't extract text or the text is too short, return a fallback message
if (!extractedText || extractedText.length < 50) {
extractedText = `This PDF contains ${pageCount} page(s) but text extraction was not successful.`
}
logger.info('Falling back to raw PDF parser...')
const rawResult = await rawPdfParser.parseBuffer(dataBuffer)
return {
content: extractedText,
...rawResult,
metadata: {
pageCount,
version,
...rawResult.metadata,
fallback: true,
error: (pdfParseError as Error).message || 'Unknown error',
source: 'raw-parser-only',
error: (pdfLibError as Error).message || 'Unknown error',
},
}
}

View File

@@ -0,0 +1,106 @@
import { existsSync } from 'fs'
import { readFile } from 'fs/promises'
import type { FileParseResult, FileParser } from '@/lib/file-parsers/types'
import { sanitizeTextForUTF8 } from '@/lib/file-parsers/utils'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('PptxParser')
export class PptxParser implements FileParser {
async parseFile(filePath: string): Promise<FileParseResult> {
try {
if (!filePath) {
throw new Error('No file path provided')
}
if (!existsSync(filePath)) {
throw new Error(`File not found: ${filePath}`)
}
logger.info(`Parsing PowerPoint file: ${filePath}`)
const buffer = await readFile(filePath)
return this.parseBuffer(buffer)
} catch (error) {
logger.error('PowerPoint file parsing error:', error)
throw new Error(`Failed to parse PowerPoint file: ${(error as Error).message}`)
}
}
async parseBuffer(buffer: Buffer): Promise<FileParseResult> {
try {
logger.info('Parsing PowerPoint buffer, size:', buffer.length)
if (!buffer || buffer.length === 0) {
throw new Error('Empty buffer provided')
}
let parseOfficeAsync
try {
const officeParser = await import('officeparser')
parseOfficeAsync = officeParser.parseOfficeAsync
} catch (importError) {
logger.warn('officeparser not available, using fallback extraction')
return this.fallbackExtraction(buffer)
}
try {
const result = await parseOfficeAsync(buffer)
if (!result || typeof result !== 'string') {
throw new Error('officeparser returned invalid result')
}
const content = sanitizeTextForUTF8(result.trim())
logger.info('PowerPoint parsing completed successfully with officeparser')
return {
content: content,
metadata: {
characterCount: content.length,
extractionMethod: 'officeparser',
},
}
} catch (extractError) {
logger.warn('officeparser failed, using fallback:', extractError)
return this.fallbackExtraction(buffer)
}
} catch (error) {
logger.error('PowerPoint buffer parsing error:', error)
throw new Error(`Failed to parse PowerPoint buffer: ${(error as Error).message}`)
}
}
private fallbackExtraction(buffer: Buffer): FileParseResult {
logger.info('Using fallback text extraction for PowerPoint file')
const text = buffer.toString('utf8', 0, Math.min(buffer.length, 200000))
const readableText = text
.match(/[\x20-\x7E\s]{4,}/g)
?.filter(
(chunk) =>
chunk.trim().length > 10 &&
/[a-zA-Z]/.test(chunk) &&
!/^[\x00-\x1F]*$/.test(chunk) &&
!/^[^\w\s]*$/.test(chunk)
)
.join(' ')
.replace(/\s+/g, ' ')
.trim()
const content = readableText
? sanitizeTextForUTF8(readableText)
: 'Unable to extract text from PowerPoint file. Please ensure the file contains readable text content.'
return {
content,
metadata: {
extractionMethod: 'fallback',
characterCount: content.length,
warning: 'Basic text extraction used',
},
}
}
}

View File

@@ -6,14 +6,9 @@ import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('RawPdfParser')
// Promisify zlib functions
const inflateAsync = promisify(zlib.inflate)
const unzipAsync = promisify(zlib.unzip)
/**
* A simple PDF parser that extracts readable text from a PDF file.
* This is used as a fallback when the pdf-parse library fails.
*/
export class RawPdfParser implements FileParser {
async parseFile(filePath: string): Promise<FileParseResult> {
try {
@@ -23,7 +18,6 @@ export class RawPdfParser implements FileParser {
throw new Error('No file path provided')
}
// Read the file
logger.info('Reading file...')
const dataBuffer = await readFile(filePath)
logger.info('File read successfully, size:', dataBuffer.length)
@@ -46,31 +40,22 @@ export class RawPdfParser implements FileParser {
try {
logger.info('Starting to parse buffer, size:', dataBuffer.length)
// Instead of trying to parse the binary PDF data directly,
// we'll extract only the text sections that are readable
// First convert to string but only for pattern matching, not for display
const rawContent = dataBuffer.toString('utf-8')
// Extract basic PDF info
let version = 'Unknown'
let pageCount = 0
// Try to extract PDF version
const versionMatch = rawContent.match(/%PDF-(\d+\.\d+)/)
if (versionMatch?.[1]) {
version = versionMatch[1]
}
// Count pages using multiple methods for redundancy
// Method 1: Count "/Type /Page" occurrences (most reliable)
const typePageMatches = rawContent.match(/\/Type\s*\/Page\b/gi)
if (typePageMatches) {
pageCount = typePageMatches.length
logger.info('Found page count using /Type /Page:', pageCount)
}
// Method 2: Look for "/Page" dictionary references
if (pageCount === 0) {
const pageMatches = rawContent.match(/\/Page\s*\//gi)
if (pageMatches) {
@@ -79,19 +64,15 @@ export class RawPdfParser implements FileParser {
}
}
// Method 3: Look for "/Pages" object references
if (pageCount === 0) {
const pagesObjMatches = rawContent.match(/\/Pages\s+\d+\s+\d+\s+R/gi)
if (pagesObjMatches && pagesObjMatches.length > 0) {
// Extract the object reference
const pagesObjRef = pagesObjMatches[0].match(/\/Pages\s+(\d+)\s+\d+\s+R/i)
if (pagesObjRef?.[1]) {
const objNum = pagesObjRef[1]
// Find the referenced object
const objRegex = new RegExp(`${objNum}\\s+0\\s+obj[\\s\\S]*?endobj`, 'i')
const objMatch = rawContent.match(objRegex)
if (objMatch) {
// Look for /Count within the Pages object
const countMatch = objMatch[0].match(/\/Count\s+(\d+)/i)
if (countMatch?.[1]) {
pageCount = Number.parseInt(countMatch[1], 10)
@@ -102,50 +83,40 @@ export class RawPdfParser implements FileParser {
}
}
// Method 4: Count trailer references to get an approximate count
if (pageCount === 0) {
const trailerMatches = rawContent.match(/trailer/gi)
if (trailerMatches) {
// This is just a rough estimate, not accurate
pageCount = Math.max(1, Math.ceil(trailerMatches.length / 2))
logger.info('Estimated page count using trailer references:', pageCount)
}
}
// Default to at least 1 page if we couldn't find any
if (pageCount === 0) {
pageCount = 1
logger.info('Defaulting to 1 page as no count was found')
}
// Extract text content using text markers commonly found in PDFs
let extractedText = ''
// Method 1: Extract text between BT (Begin Text) and ET (End Text) markers
const textMatches = rawContent.match(/BT[\s\S]*?ET/g)
if (textMatches && textMatches.length > 0) {
logger.info('Found', textMatches.length, 'text blocks')
extractedText = textMatches
.map((textBlock) => {
// Extract text objects (Tj, TJ) from the text block
const textObjects = textBlock.match(/(\([^)]*\)|\[[^\]]*\])\s*(Tj|TJ)/g)
if (textObjects && textObjects.length > 0) {
return textObjects
.map((obj) => {
// Clean up text objects
let text = ''
if (obj.includes('Tj')) {
// Handle Tj operator (simple string)
const match = obj.match(/\(([^)]*)\)\s*Tj/)
if (match?.[1]) {
text = match[1]
}
} else if (obj.includes('TJ')) {
// Handle TJ operator (array of strings and positioning)
const match = obj.match(/\[(.*)\]\s*TJ/)
if (match?.[1]) {
// Extract only the string parts from the array
const parts = match[1].match(/\([^)]*\)/g)
if (parts) {
text = parts.map((p) => p.slice(1, -1)).join(' ')
@@ -153,7 +124,6 @@ export class RawPdfParser implements FileParser {
}
}
// Clean up PDF escape sequences
return text
.replace(/\\(\d{3})/g, (_, octal) =>
String.fromCharCode(Number.parseInt(octal, 8))
@@ -170,50 +140,42 @@ export class RawPdfParser implements FileParser {
.trim()
}
// Try to extract metadata from XML
let metadataText = ''
const xmlMatch = rawContent.match(/<x:xmpmeta[\s\S]*?<\/x:xmpmeta>/)
if (xmlMatch) {
const xmlContent = xmlMatch[0]
logger.info('Found XML metadata')
// Extract document title
const titleMatch = xmlContent.match(/<dc:title>[\s\S]*?<rdf:li[^>]*>(.*?)<\/rdf:li>/i)
if (titleMatch?.[1]) {
const title = titleMatch[1].replace(/<[^>]+>/g, '').trim()
metadataText += `Document Title: ${title}\n\n`
}
// Extract creator/author
const creatorMatch = xmlContent.match(/<dc:creator>[\s\S]*?<rdf:li[^>]*>(.*?)<\/rdf:li>/i)
if (creatorMatch?.[1]) {
const creator = creatorMatch[1].replace(/<[^>]+>/g, '').trim()
metadataText += `Author: ${creator}\n`
}
// Extract creation date
const dateMatch = xmlContent.match(/<xmp:CreateDate>(.*?)<\/xmp:CreateDate>/i)
if (dateMatch?.[1]) {
metadataText += `Created: ${dateMatch[1].trim()}\n`
}
// Extract producer
const producerMatch = xmlContent.match(/<pdf:Producer>(.*?)<\/pdf:Producer>/i)
if (producerMatch?.[1]) {
metadataText += `Producer: ${producerMatch[1].trim()}\n`
}
}
// Try to extract actual text content from content streams
if (!extractedText || extractedText.length < 100 || extractedText.includes('/Type /Page')) {
logger.info('Trying advanced text extraction from content streams')
// Find content stream references
const contentRefs = rawContent.match(/\/Contents\s+\[?\s*(\d+)\s+\d+\s+R\s*\]?/g)
if (contentRefs && contentRefs.length > 0) {
logger.info('Found', contentRefs.length, 'content stream references')
// Extract object numbers from content references
const objNumbers = contentRefs
.map((ref) => {
const match = ref.match(/\/Contents\s+\[?\s*(\d+)\s+\d+\s+R\s*\]?/)
@@ -223,7 +185,6 @@ export class RawPdfParser implements FileParser {
logger.info('Content stream object numbers:', objNumbers)
// Try to find those objects in the content
if (objNumbers.length > 0) {
let textFromStreams = ''
@@ -232,12 +193,10 @@ export class RawPdfParser implements FileParser {
const objMatch = rawContent.match(objRegex)
if (objMatch) {
// Look for stream content within the object
const streamMatch = objMatch[0].match(/stream\r?\n([\s\S]*?)\r?\nendstream/)
if (streamMatch?.[1]) {
const streamContent = streamMatch[1]
// Look for text operations in the stream (Tj, TJ, etc.)
const textFragments = streamContent.match(/\([^)]+\)\s*Tj|\[[^\]]*\]\s*TJ/g)
if (textFragments && textFragments.length > 0) {
const extractedFragments = textFragments
@@ -290,35 +249,27 @@ export class RawPdfParser implements FileParser {
}
}
// Try to decompress PDF streams
// This is especially helpful for PDFs with compressed content
if (!extractedText || extractedText.length < 100) {
logger.info('Trying to decompress PDF streams')
// Find compressed streams (FlateDecode)
const compressedStreams = rawContent.match(
/\/Filter\s*\/FlateDecode[\s\S]*?stream[\s\S]*?endstream/g
)
if (compressedStreams && compressedStreams.length > 0) {
logger.info('Found', compressedStreams.length, 'compressed streams')
// Process each stream
const decompressedContents = await Promise.all(
compressedStreams.map(async (stream) => {
try {
// Extract stream content between stream and endstream
const streamMatch = stream.match(/stream\r?\n([\s\S]*?)\r?\nendstream/)
if (!streamMatch || !streamMatch[1]) return ''
const compressedData = Buffer.from(streamMatch[1], 'binary')
// Try different decompression methods
try {
// Try inflate (most common)
const decompressed = await inflateAsync(compressedData)
const content = decompressed.toString('utf-8')
// Check if it contains readable text
const readable = content.replace(/[^\x20-\x7E\r\n]/g, ' ').trim()
if (
readable.length > 50 &&
@@ -329,12 +280,10 @@ export class RawPdfParser implements FileParser {
return readable
}
} catch (_inflateErr) {
// Try unzip as fallback
try {
const decompressed = await unzipAsync(compressedData)
const content = decompressed.toString('utf-8')
// Check if it contains readable text
const readable = content.replace(/[^\x20-\x7E\r\n]/g, ' ').trim()
if (
readable.length > 50 &&
@@ -345,12 +294,10 @@ export class RawPdfParser implements FileParser {
return readable
}
} catch (_unzipErr) {
// Both methods failed, continue to next stream
return ''
}
}
} catch (_error) {
// Error processing this stream, skip it
return ''
}
@@ -358,7 +305,6 @@ export class RawPdfParser implements FileParser {
})
)
// Filter out empty results and combine
const decompressedText = decompressedContents
.filter((text) => text && text.length > 0)
.join('\n\n')
@@ -370,26 +316,19 @@ export class RawPdfParser implements FileParser {
}
}
// Method 2: Look for text stream data
if (!extractedText || extractedText.length < 50) {
logger.info('Trying alternative text extraction method with streams')
// Find text streams
const streamMatches = rawContent.match(/stream[\s\S]*?endstream/g)
if (streamMatches && streamMatches.length > 0) {
logger.info('Found', streamMatches.length, 'streams')
// Process each stream to look for text content
const textContent = streamMatches
.map((stream) => {
// Remove 'stream' and 'endstream' markers
const content = stream.replace(/^stream\r?\n|\r?\nendstream$/g, '')
// Look for readable ASCII text (more strict heuristic)
// Only keep ASCII printable characters
const readable = content.replace(/[^\x20-\x7E\r\n]/g, ' ').trim()
// Only keep content that looks like real text (has spaces, periods, etc.)
if (
readable.length > 20 &&
readable.includes(' ') &&
@@ -400,7 +339,7 @@ export class RawPdfParser implements FileParser {
}
return ''
})
.filter((text) => text.length > 0 && text.split(' ').length > 5) // Must have at least 5 words
.filter((text) => text.length > 0 && text.split(' ').length > 5)
.join('\n\n')
if (textContent.length > 0) {
@@ -409,22 +348,17 @@ export class RawPdfParser implements FileParser {
}
}
// Method 3: Look for object streams
if (!extractedText || extractedText.length < 50) {
logger.info('Trying object streams for text')
// Find object stream content
const objMatches = rawContent.match(/\d+\s+\d+\s+obj[\s\S]*?endobj/g)
if (objMatches && objMatches.length > 0) {
logger.info('Found', objMatches.length, 'objects')
// Process objects looking for text content
const textContent = objMatches
.map((obj) => {
// Find readable text in the object - only keep ASCII printable characters
const readable = obj.replace(/[^\x20-\x7E\r\n]/g, ' ').trim()
// Only include if it looks like actual text (strict heuristic)
if (
readable.length > 50 &&
readable.includes(' ') &&
@@ -445,8 +379,6 @@ export class RawPdfParser implements FileParser {
}
}
// If what we extracted is just PDF structure information rather than readable text,
// provide a clearer message
if (
extractedText &&
(extractedText.includes('endobj') ||
@@ -459,53 +391,41 @@ export class RawPdfParser implements FileParser {
)
extractedText = metadataText
} else if (metadataText && !extractedText.includes('Document Title:')) {
// Prepend metadata to extracted text if available
extractedText = metadataText + (extractedText ? `\n\n${extractedText}` : '')
}
// Validate that the extracted text looks meaningful
// Count how many recognizable words/characters it contains
const validCharCount = (extractedText || '').replace(/[^\x20-\x7E\r\n]/g, '').length
const totalCharCount = (extractedText || '').length
const validRatio = validCharCount / (totalCharCount || 1)
// Check for common PDF artifacts that indicate binary corruption
const hasBinaryArtifacts =
extractedText &&
(extractedText.includes('\\u') ||
extractedText.includes('\\x') ||
extractedText.includes('\0') ||
/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F-\xFF]{10,}/g.test(extractedText) ||
validRatio < 0.7) // Less than 70% valid characters
validRatio < 0.7)
// Check if the content looks like gibberish
const looksLikeGibberish =
extractedText &&
// Too many special characters
(extractedText.replace(/[a-zA-Z0-9\s.,:'"()[\]{}]/g, '').length / extractedText.length >
0.3 ||
// Not enough spaces (real text has spaces between words)
extractedText.split(' ').length < extractedText.length / 20)
// If no text was extracted, or if it's binary/gibberish,
// provide a helpful message instead
if (!extractedText || extractedText.length < 50 || hasBinaryArtifacts || looksLikeGibberish) {
logger.info('Could not extract meaningful text, providing fallback message')
logger.info('Valid character ratio:', validRatio)
logger.info('Has binary artifacts:', hasBinaryArtifacts)
logger.info('Looks like gibberish:', looksLikeGibberish)
// Start with metadata if available
if (metadataText) {
extractedText = `${metadataText}\n`
} else {
extractedText = ''
}
// Add basic PDF info
extractedText += `This is a PDF document with ${pageCount} page(s) and version ${version}.\n\n`
// Try to find a title in the PDF structure that we might have missed
const titleInStructure =
rawContent.match(/title\s*:\s*([^\n]+)/i) ||
rawContent.match(/Microsoft Word -\s*([^\n]+)/i)

View File

@@ -8,15 +8,12 @@ const logger = createLogger('TxtParser')
export class TxtParser implements FileParser {
async parseFile(filePath: string): Promise<FileParseResult> {
try {
// Validate input
if (!filePath) {
throw new Error('No file path provided')
}
// Read the file
const buffer = await readFile(filePath)
// Use parseBuffer for consistent implementation
return this.parseBuffer(buffer)
} catch (error) {
logger.error('TXT file error:', error)
@@ -28,7 +25,6 @@ export class TxtParser implements FileParser {
try {
logger.info('Parsing buffer, size:', buffer.length)
// Extract content and sanitize for UTF-8 storage
const rawContent = buffer.toString('utf-8')
const result = sanitizeTextForUTF8(rawContent)

View File

@@ -8,4 +8,16 @@ export interface FileParser {
parseBuffer?(buffer: Buffer): Promise<FileParseResult>
}
export type SupportedFileType = 'pdf' | 'csv' | 'doc' | 'docx' | 'txt' | 'md' | 'xlsx' | 'xls'
export type SupportedFileType =
| 'pdf'
| 'csv'
| 'doc'
| 'docx'
| 'txt'
| 'md'
| 'xlsx'
| 'xls'
| 'html'
| 'htm'
| 'pptx'
| 'ppt'

View File

@@ -9,19 +9,16 @@ const logger = createLogger('XlsxParser')
export class XlsxParser implements FileParser {
async parseFile(filePath: string): Promise<FileParseResult> {
try {
// Validate input
if (!filePath) {
throw new Error('No file path provided')
}
// Check if file exists
if (!existsSync(filePath)) {
throw new Error(`File not found: ${filePath}`)
}
logger.info(`Parsing XLSX file: ${filePath}`)
// Read the workbook
const workbook = XLSX.readFile(filePath)
return this.processWorkbook(workbook)
} catch (error) {
@@ -38,7 +35,6 @@ export class XlsxParser implements FileParser {
throw new Error('Empty buffer provided')
}
// Read the workbook from buffer
const workbook = XLSX.read(buffer, { type: 'buffer' })
return this.processWorkbook(workbook)
} catch (error) {
@@ -53,25 +49,20 @@ export class XlsxParser implements FileParser {
let content = ''
let totalRows = 0
// Process each worksheet
for (const sheetName of sheetNames) {
const worksheet = workbook.Sheets[sheetName]
// Convert to array of objects
const sheetData = XLSX.utils.sheet_to_json(worksheet, { header: 1 })
sheets[sheetName] = sheetData
totalRows += sheetData.length
// Add sheet content to the overall content string (clean sheet name)
const cleanSheetName = sanitizeTextForUTF8(sheetName)
content += `Sheet: ${cleanSheetName}\n`
content += `=${'='.repeat(cleanSheetName.length + 6)}\n\n`
if (sheetData.length > 0) {
// Process each row
sheetData.forEach((row: unknown, rowIndex: number) => {
if (Array.isArray(row) && row.length > 0) {
// Convert row to string, handling undefined/null values and cleaning non-UTF8 characters
const rowString = row
.map((cell) => {
if (cell === null || cell === undefined) {
@@ -93,7 +84,6 @@ export class XlsxParser implements FileParser {
logger.info(`XLSX parsing completed: ${sheetNames.length} sheets, ${totalRows} total rows`)
// Final cleanup of the entire content to ensure UTF-8 compatibility
const cleanContent = sanitizeTextForUTF8(content).trim()
return {

View File

@@ -0,0 +1,24 @@
export const TAG_SLOT_CONFIG = {
text: {
slots: ['tag1', 'tag2', 'tag3', 'tag4', 'tag5', 'tag6', 'tag7'] as const,
maxSlots: 7,
},
} as const
export const SUPPORTED_FIELD_TYPES = Object.keys(TAG_SLOT_CONFIG) as Array<
keyof typeof TAG_SLOT_CONFIG
>
export const TAG_SLOTS = TAG_SLOT_CONFIG.text.slots
export const MAX_TAG_SLOTS = TAG_SLOT_CONFIG.text.maxSlots
export type TagSlot = (typeof TAG_SLOTS)[number]
export function getSlotsForFieldType(fieldType: string): readonly string[] {
const config = TAG_SLOT_CONFIG[fieldType as keyof typeof TAG_SLOT_CONFIG]
if (!config) {
return []
}
return config.slots
}

View File

@@ -1,9 +1,9 @@
import crypto, { randomUUID } from 'crypto'
import { tasks } from '@trigger.dev/sdk'
import { and, asc, desc, eq, inArray, isNull, sql } from 'drizzle-orm'
import { getSlotsForFieldType, type TAG_SLOT_CONFIG } from '@/lib/constants/knowledge'
import { generateEmbeddings } from '@/lib/embeddings/utils'
import { env } from '@/lib/env'
import { getSlotsForFieldType, type TAG_SLOT_CONFIG } from '@/lib/knowledge/consts'
import { processDocument } from '@/lib/knowledge/documents/document-processor'
import { getNextAvailableSlot } from '@/lib/knowledge/tags/service'
import { createLogger } from '@/lib/logs/console/logger'
@@ -17,8 +17,8 @@ import type { DocumentSortField, SortOrder } from './types'
const logger = createLogger('DocumentService')
const TIMEOUTS = {
OVERALL_PROCESSING: 600000,
EMBEDDINGS_API: 180000,
OVERALL_PROCESSING: (env.KB_CONFIG_MAX_DURATION || 300) * 1000,
EMBEDDINGS_API: (env.KB_CONFIG_MAX_TIMEOUT || 10000) * 18,
} as const
/**
@@ -38,17 +38,17 @@ function withTimeout<T>(
}
const PROCESSING_CONFIG = {
maxConcurrentDocuments: 4,
batchSize: 10,
delayBetweenBatches: 200,
delayBetweenDocuments: 100,
maxConcurrentDocuments: Math.max(1, Math.floor((env.KB_CONFIG_CONCURRENCY_LIMIT || 20) / 5)) || 4,
batchSize: Math.max(1, Math.floor((env.KB_CONFIG_BATCH_SIZE || 20) / 2)) || 10,
delayBetweenBatches: (env.KB_CONFIG_DELAY_BETWEEN_BATCHES || 100) * 2,
delayBetweenDocuments: (env.KB_CONFIG_DELAY_BETWEEN_DOCUMENTS || 50) * 2,
}
const REDIS_PROCESSING_CONFIG = {
maxConcurrentDocuments: 12,
batchSize: 20,
delayBetweenBatches: 100,
delayBetweenDocuments: 50,
maxConcurrentDocuments: env.KB_CONFIG_CONCURRENCY_LIMIT || 20,
batchSize: env.KB_CONFIG_BATCH_SIZE || 20,
delayBetweenBatches: env.KB_CONFIG_DELAY_BETWEEN_BATCHES || 100,
delayBetweenDocuments: env.KB_CONFIG_DELAY_BETWEEN_DOCUMENTS || 50,
}
let documentQueue: DocumentProcessingQueue | null = null
@@ -59,8 +59,8 @@ export function getDocumentQueue(): DocumentProcessingQueue {
const config = redisClient ? REDIS_PROCESSING_CONFIG : PROCESSING_CONFIG
documentQueue = new DocumentProcessingQueue({
maxConcurrent: config.maxConcurrentDocuments,
retryDelay: 2000,
maxRetries: 5,
retryDelay: env.KB_CONFIG_MIN_TIMEOUT || 1000,
maxRetries: env.KB_CONFIG_MAX_ATTEMPTS || 3,
})
}
return documentQueue

View File

@@ -4,7 +4,7 @@ import {
getSlotsForFieldType,
SUPPORTED_FIELD_TYPES,
type TAG_SLOT_CONFIG,
} from '@/lib/constants/knowledge'
} from '@/lib/knowledge/consts'
import type { BulkTagDefinitionsData, DocumentTagDefinition } from '@/lib/knowledge/tags/types'
import type {
CreateTagDefinitionData,

View File

@@ -36,11 +36,14 @@ export class ExecutionLogger implements IExecutionLoggerService {
trigger: ExecutionTrigger
environment: ExecutionEnvironment
workflowState: WorkflowState
initialInput?: Record<string, unknown>
startedFromBlockId?: string
executionType?: 'api' | 'webhook' | 'schedule' | 'manual' | 'chat'
}): Promise<{
workflowLog: WorkflowExecutionLog
snapshot: WorkflowExecutionSnapshot
}> {
const { workflowId, executionId, trigger, environment, workflowState } = params
const { workflowId, executionId, trigger, environment, workflowState, initialInput, startedFromBlockId, executionType } = params
logger.debug(`Starting workflow execution ${executionId} for workflow ${workflowId}`)
@@ -66,6 +69,9 @@ export class ExecutionLogger implements IExecutionLoggerService {
executionData: {
environment,
trigger,
initialInput: initialInput || {},
startedFromBlockId: startedFromBlockId || undefined,
executionType: executionType || trigger.type,
},
})
.returning()
@@ -137,6 +143,39 @@ export class ExecutionLogger implements IExecutionLoggerService {
// Extract files from trace spans and final output
const executionFiles = this.extractFilesFromExecution(traceSpans, finalOutput)
// Read the existing executionData so we can merge new fields without losing initialInput/environment/trigger
const [existing] = await db
.select({ executionData: workflowExecutionLogs.executionData })
.from(workflowExecutionLogs)
.where(eq(workflowExecutionLogs.executionId, executionId))
.limit(1)
const existingExecutionData = (existing?.executionData as any) || {}
// Build simple block execution summaries from trace spans (flat list)
const blockExecutions: any[] = []
const collectBlocks = (spans?: any[]) => {
if (!Array.isArray(spans)) return
spans.forEach((span) => {
if (span?.blockId) {
blockExecutions.push({
id: span.id,
blockId: span.blockId,
blockName: span.name,
blockType: span.type,
startedAt: span.startTime,
endedAt: span.endTime,
durationMs: span.duration,
status: span.status || 'success',
inputData: span.input || {},
outputData: span.output || {},
})
}
if (span?.children && Array.isArray(span.children)) collectBlocks(span.children)
})
}
collectBlocks(traceSpans)
const [updatedLog] = await db
.update(workflowExecutionLogs)
.set({
@@ -145,8 +184,10 @@ export class ExecutionLogger implements IExecutionLoggerService {
totalDurationMs,
files: executionFiles.length > 0 ? executionFiles : null,
executionData: {
...existingExecutionData,
traceSpans,
finalOutput,
blockExecutions: blockExecutions,
tokenBreakdown: {
prompt: costSummary.totalPromptTokens,
completion: costSummary.totalCompletionTokens,

View File

@@ -21,6 +21,9 @@ export interface SessionStartParams {
workspaceId?: string
variables?: Record<string, string>
triggerData?: Record<string, unknown>
initialInput?: Record<string, unknown>
startBlockId?: string
executionType?: 'api' | 'webhook' | 'schedule' | 'manual' | 'chat'
}
export interface SessionCompleteParams {
@@ -61,7 +64,7 @@ export class LoggingSession {
}
async start(params: SessionStartParams = {}): Promise<void> {
const { userId, workspaceId, variables, triggerData } = params
const { userId, workspaceId, variables, triggerData, initialInput, startBlockId, executionType } = params
try {
this.trigger = createTriggerObject(this.triggerType, triggerData)
@@ -80,6 +83,9 @@ export class LoggingSession {
trigger: this.trigger,
environment: this.environment,
workflowState: this.workflowState,
initialInput,
startedFromBlockId: startBlockId,
executionType: executionType || this.triggerType,
})
if (this.requestId) {

View File

@@ -11,6 +11,7 @@ import type {
} from '@/lib/logs/types'
import { db } from '@/db'
import { workflowExecutionSnapshots } from '@/db/schema'
import { filterEdgesForTriggers } from '@/lib/workflows/trigger-rules'
const logger = createLogger('SnapshotService')
@@ -27,8 +28,27 @@ export class SnapshotService implements ISnapshotService {
workflowId: string,
state: WorkflowState
): Promise<SnapshotCreationResult> {
// Ensure consistency: apply the same trigger-edge filtering used by the editor/execution
const filteredState = filterEdgesForTriggers(state)
// Hash the position-less state for deduplication (functional equivalence)
const stateHash = this.computeStateHash(state)
const stateHash = this.computeStateHash(filteredState)
// Log a concise preview of the state being considered for snapshot
try {
logger.info('📸 Preparing workflow snapshot', {
workflowId,
stateHash,
blocks: Object.entries(filteredState.blocks || {}).map(([id, b]: [string, any]) => ({
id,
type: (b as any)?.type,
name: (b as any)?.name,
triggerMode: (b as any)?.triggerMode === true,
enabled: (b as any)?.enabled !== false,
})),
edgesCount: (filteredState.edges || []).length,
})
} catch {}
const existingSnapshot = await this.getSnapshotByHash(workflowId, stateHash)
if (existingSnapshot) {
@@ -45,7 +65,7 @@ export class SnapshotService implements ISnapshotService {
id: uuidv4(),
workflowId,
stateHash,
stateData: state, // Full state with positions, subblock values, etc.
stateData: filteredState, // Full state with positions, subblock values, etc., after consistent filtering
}
const [newSnapshot] = await db
@@ -53,8 +73,24 @@ export class SnapshotService implements ISnapshotService {
.values(snapshotData)
.returning()
logger.info('✅ Saved workflow snapshot', {
workflowId,
snapshotId: newSnapshot.id,
stateHash,
blocksCount: Object.keys(filteredState.blocks || {}).length,
edgesCount: (filteredState.edges || []).length,
})
// Emit the exact state saved (debug level to avoid log noise); redact sensitive values if needed
try {
// Lazy import to avoid cycles
const utils = await import('@/lib/utils')
const redactedState = utils.redactApiKeys(newSnapshot.stateData as any)
logger.debug('🧩 Snapshot state data (exact):', redactedState)
} catch {}
logger.debug(`Created new snapshot for workflow ${workflowId} with hash ${stateHash}`)
logger.debug(`Stored full state with ${Object.keys(state.blocks || {}).length} blocks`)
logger.debug(`Stored full state with ${Object.keys(filteredState.blocks || {}).length} blocks`)
return {
snapshot: {
...newSnapshot,

View File

@@ -109,6 +109,13 @@ export interface WorkflowExecutionLog {
error: string
stackTrace?: string
}
// Newly added: persist the original triggering input (starter/chat/api/webhook)
initialInput?: Record<string, unknown>
// Newly added: where execution began and type metadata
startedFromBlockId?: string
executionType?: 'api' | 'webhook' | 'schedule' | 'manual' | 'chat'
// Optional precomputed block execution summaries
blockExecutions?: any[]
}
// Top-level cost information
cost?: {

View File

@@ -4,7 +4,6 @@ import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/sim-agent'
const logger = createLogger('SimAgentClient')
// Base URL for the sim-agent service
const SIM_AGENT_BASE_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
export interface SimAgentRequest {
@@ -45,7 +44,6 @@ class SimAgentClient {
try {
const url = `${this.baseUrl}${endpoint}`
// Use provided API key or try to get it from environment
const requestHeaders: Record<string, string> = {
'Content-Type': 'application/json',
...headers,

View File

@@ -0,0 +1,43 @@
import { getBlock } from '@/blocks'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
/**
* Decide whether incoming edges should be blocked for a target block.
* - Block if the block is a pure trigger category (webhook, etc.)
* - Block if the block is currently in triggerMode
* - Block if the block is the starter block
*/
export function shouldBlockIncomingEdgesForTarget(blockType: string, triggerMode: boolean | undefined): boolean {
// Starter blocks should never have incoming edges
if (blockType === 'starter') return true
// Runtime toggle
if (triggerMode === true) return true
// Pure trigger categories
try {
const config = getBlock(blockType)
if (config?.category === 'triggers') return true
} catch {}
return false
}
/**
* Return a copy of state with edges to trigger-like targets removed.
*/
export function filterEdgesForTriggers(state: WorkflowState): WorkflowState {
const blocks = state.blocks || {}
const edges = state.edges || []
const filteredEdges = edges.filter((edge) => {
const target = blocks[edge.target]
if (!target) return false // Drop dangling edges defensively
return !shouldBlockIncomingEdgesForTarget(target.type, target.triggerMode)
})
return {
...state,
edges: filteredEdges,
}
}

View File

@@ -69,19 +69,18 @@
"@react-email/components": "^0.0.34",
"@sentry/nextjs": "^9.15.0",
"@trigger.dev/sdk": "4.0.1",
"@types/pg": "8.15.5",
"@types/three": "0.177.0",
"@vercel/og": "^0.6.5",
"@vercel/speed-insights": "^1.2.0",
"ai": "^4.3.2",
"better-auth": "^1.2.9",
"browser-image-compression": "^2.0.2",
"cheerio": "1.1.2",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"cmdk": "^1.0.0",
"croner": "^9.0.0",
"csv-parse": "^5.6.0",
"csv-parser": "^3.2.0",
"dat.gui": "0.7.9",
"date-fns": "4.1.0",
"drizzle-orm": "^0.41.0",
@@ -90,6 +89,7 @@
"geist": "1.4.2",
"groq-sdk": "^0.15.0",
"html-to-text": "^9.0.5",
"iconv-lite": "0.7.0",
"input-otp": "^1.4.2",
"ioredis": "^5.6.0",
"jose": "6.0.11",
@@ -99,12 +99,13 @@
"lucide-react": "^0.479.0",
"mammoth": "^1.9.0",
"mysql2": "3.14.3",
"next": "^15.3.2",
"next": "^15.4.1",
"next-runtime-env": "3.3.0",
"next-themes": "^0.4.6",
"officeparser": "^5.2.0",
"openai": "^4.91.1",
"pdf-parse": "^1.1.1",
"pg": "8.16.3",
"papaparse": "5.5.3",
"pdf-lib": "^1.17.1",
"postgres": "^3.4.5",
"prismjs": "^1.30.0",
"react": "19.1.0",
@@ -119,13 +120,14 @@
"rehype-highlight": "7.0.2",
"remark-gfm": "4.0.1",
"resend": "^4.1.2",
"rtf-parser": "1.3.3",
"rtf-stream-parser": "3.8.0",
"socket.io": "^4.8.1",
"stripe": "^17.7.0",
"tailwind-merge": "^2.6.0",
"tailwindcss-animate": "^1.0.7",
"three": "0.177.0",
"uuid": "^11.1.0",
"word-extractor": "1.0.4",
"xlsx": "0.18.5",
"zod": "^3.24.2"
},
@@ -136,10 +138,12 @@
"@testing-library/user-event": "^14.6.1",
"@trigger.dev/build": "4.0.1",
"@types/html-to-text": "^9.0.4",
"@types/iconv-lite": "0.0.1",
"@types/js-yaml": "4.0.9",
"@types/jsdom": "21.1.7",
"@types/lodash": "^4.17.16",
"@types/node": "24.2.1",
"@types/papaparse": "5.3.16",
"@types/prismjs": "^1.26.5",
"@types/react": "^19",
"@types/react-dom": "^19",
@@ -162,5 +166,9 @@
"canvas",
"better-sqlite3",
"sharp"
]
],
"overrides": {
"next": "^15.4.1",
"@next/env": "^15.4.1"
}
}

View File

@@ -6,6 +6,7 @@ import { createLogger } from '@/lib/logs/console/logger'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
import * as schema from '@/db/schema'
import { workflow, workflowBlocks, workflowEdges, workflowSubflows } from '@/db/schema'
import { shouldBlockIncomingEdgesForTarget } from '@/lib/workflows/trigger-rules'
const logger = createLogger('SocketDatabase')
@@ -597,7 +598,21 @@ async function handleBlockOperationTx(
throw new Error(`Block ${payload.id} not found in workflow ${workflowId}`)
}
logger.debug(`Updated block trigger mode: ${payload.id} -> ${payload.triggerMode}`)
// When enabling trigger mode, remove all incoming edges to this block at the database level
if (payload.triggerMode === true) {
const removed = await tx
.delete(workflowEdges)
.where(
and(eq(workflowEdges.workflowId, workflowId), eq(workflowEdges.targetBlockId, payload.id))
)
.returning({ id: workflowEdges.id })
logger.debug(
`Updated block trigger mode: ${payload.id} -> ${payload.triggerMode}. Removed ${removed.length} incoming edges for trigger mode.`
)
} else {
logger.debug(`Updated block trigger mode: ${payload.id} -> ${payload.triggerMode}`)
}
break
}
@@ -743,6 +758,24 @@ async function handleEdgeOperationTx(
throw new Error('Missing required fields for add edge operation')
}
// Guard: do not allow incoming edges to trigger-like targets
const [targetBlock] = await tx
.select({ id: workflowBlocks.id, type: workflowBlocks.type, triggerMode: workflowBlocks.triggerMode })
.from(workflowBlocks)
.where(and(eq(workflowBlocks.workflowId, workflowId), eq(workflowBlocks.id, payload.target)))
.limit(1)
if (!targetBlock) {
throw new Error(`Target block ${payload.target} not found in workflow ${workflowId}`)
}
if (shouldBlockIncomingEdgesForTarget(targetBlock.type as string, targetBlock.triggerMode as boolean)) {
logger.debug(
`Rejected edge add ${payload.id}: incoming edges not allowed to ${payload.target} (type=${targetBlock.type}, triggerMode=${Boolean(targetBlock.triggerMode)})`
)
return
}
await tx.insert(workflowEdges).values({
id: payload.id,
workflowId,

View File

@@ -0,0 +1,24 @@
import { create } from 'zustand'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
interface DebugCanvasState {
isActive: boolean
workflowState: WorkflowState | null
}
interface DebugCanvasActions {
activate: (workflowState: WorkflowState) => void
deactivate: () => void
setWorkflowState: (workflowState: WorkflowState | null) => void
clear: () => void
}
export const useDebugCanvasStore = create<DebugCanvasState & DebugCanvasActions>()((set) => ({
isActive: false,
workflowState: null,
activate: (workflowState) => set({ isActive: true, workflowState }),
deactivate: () => set({ isActive: false, workflowState: null }),
setWorkflowState: (workflowState) => set({ workflowState }),
clear: () => set({ isActive: false, workflowState: null }),
}))

View File

@@ -0,0 +1,95 @@
import { create } from 'zustand'
import type { ExecutionContext } from '@/executor/types'
interface BlockSnapshot {
output: any
executed: boolean
executionTime?: number
}
interface SnapshotEntry {
blockSnapshots: Map<string, BlockSnapshot>
envVarValues?: Record<string, string>
workflowVariables?: Record<string, any>
pendingBlocks: string[]
createdAt: number
}
interface DebugSnapshotState {
blockSnapshots: Map<string, BlockSnapshot>
envVarValues?: Record<string, string>
workflowVariables?: Record<string, any>
history: SnapshotEntry[]
}
interface DebugSnapshotActions {
captureFromContext: (ctx: ExecutionContext) => void
pushFromContext: (ctx: ExecutionContext, pendingBlocks: string[]) => void
stepBack: () => SnapshotEntry | null
clear: () => void
}
function buildBlockSnapshots(ctx: ExecutionContext): Map<string, BlockSnapshot> {
const next = new Map<string, BlockSnapshot>()
try {
ctx.blockStates.forEach((state, key) => {
next.set(String(key), {
output: state?.output ?? {},
executed: !!state?.executed,
executionTime: state?.executionTime,
})
})
} catch {}
return next
}
export const useDebugSnapshotStore = create<DebugSnapshotState & DebugSnapshotActions>()(
(set, get) => ({
blockSnapshots: new Map<string, BlockSnapshot>(),
envVarValues: undefined,
workflowVariables: undefined,
history: [],
captureFromContext: (ctx: ExecutionContext) => {
const next = buildBlockSnapshots(ctx)
set({
blockSnapshots: next,
envVarValues: ctx.environmentVariables || undefined,
workflowVariables: ctx.workflowVariables || undefined,
})
},
pushFromContext: (ctx: ExecutionContext, pendingBlocks: string[]) => {
const entry: SnapshotEntry = {
blockSnapshots: buildBlockSnapshots(ctx),
envVarValues: ctx.environmentVariables || undefined,
workflowVariables: ctx.workflowVariables || undefined,
pendingBlocks: [...pendingBlocks],
createdAt: Date.now(),
}
set((state) => ({ history: [...state.history, entry] }))
},
stepBack: () => {
const { history } = get()
if (history.length <= 1) return null
const nextHistory = history.slice(0, -1)
const prev = nextHistory[nextHistory.length - 1]
set({
history: nextHistory,
blockSnapshots: prev.blockSnapshots,
envVarValues: prev.envVarValues,
workflowVariables: prev.workflowVariables,
})
return prev
},
clear: () =>
set({
blockSnapshots: new Map(),
envVarValues: undefined,
workflowVariables: undefined,
history: [],
}),
})
)

View File

@@ -61,5 +61,20 @@ export const useExecutionStore = create<ExecutionState & ExecutionActions>()((se
setExecutor: (executor) => set({ executor }),
setDebugContext: (debugContext) => set({ debugContext }),
setAutoPanDisabled: (disabled) => set({ autoPanDisabled: disabled }),
setPanelFocusedBlockId: (id) => set({ panelFocusedBlockId: id }),
setExecutingBlockIds: (ids) => set({ executingBlockIds: new Set(ids) }),
setBreakpointId: (id) => set({ breakpointId: id }),
setStartPositions: (ids) => set({ startPositionIds: new Set(Array.from(ids).slice(0, 1)) }),
toggleStartPosition: (id) => {
set((state) => {
const isActive = state.startPositionIds.has(id)
// Enforce single selection
const next = isActive ? new Set<string>() : new Set<string>([id])
return { startPositionIds: next }
})
},
clearStartPositions: () => set({ startPositionIds: new Set() }),
reset: () => set(initialState),
}))

View File

@@ -9,6 +9,10 @@ export interface ExecutionState {
executor: Executor | null
debugContext: ExecutionContext | null
autoPanDisabled: boolean
panelFocusedBlockId?: string | null
executingBlockIds: Set<string>
breakpointId: string | null
startPositionIds: Set<string>
}
export interface ExecutionActions {
@@ -19,6 +23,12 @@ export interface ExecutionActions {
setExecutor: (executor: Executor | null) => void
setDebugContext: (context: ExecutionContext | null) => void
setAutoPanDisabled: (disabled: boolean) => void
setPanelFocusedBlockId: (id: string | null) => void
setExecutingBlockIds: (ids: Set<string>) => void
setBreakpointId: (id: string | null) => void
setStartPositions: (ids: Set<string>) => void
toggleStartPosition: (id: string) => void
clearStartPositions: () => void
reset: () => void
}
@@ -30,6 +40,10 @@ export const initialState: ExecutionState = {
executor: null,
debugContext: null,
autoPanDisabled: false,
panelFocusedBlockId: null,
executingBlockIds: new Set(),
breakpointId: null,
startPositionIds: new Set(),
}
// Types for panning functionality

View File

@@ -1,4 +1,4 @@
export type PanelTab = 'console' | 'variables' | 'chat' | 'copilot'
export type PanelTab = 'console' | 'variables' | 'chat' | 'copilot' | 'debug'
export interface PanelStore {
isOpen: boolean

View File

@@ -70,7 +70,7 @@ export const deleteTool: ToolConfig<MySQLDeleteParams, MySQLResponse> = {
database: params.database,
username: params.username,
password: params.password,
ssl: params.ssl || 'preferred',
ssl: params.ssl || 'required',
table: params.table,
where: params.where,
}),

View File

@@ -64,7 +64,7 @@ export const executeTool: ToolConfig<MySQLExecuteParams, MySQLResponse> = {
database: params.database,
username: params.username,
password: params.password,
ssl: params.ssl || 'preferred',
ssl: params.ssl || 'required',
query: params.query,
}),
},

View File

@@ -70,7 +70,7 @@ export const insertTool: ToolConfig<MySQLInsertParams, MySQLResponse> = {
database: params.database,
username: params.username,
password: params.password,
ssl: params.ssl || 'preferred',
ssl: params.ssl || 'required',
table: params.table,
data: params.data,
}),

View File

@@ -64,7 +64,7 @@ export const queryTool: ToolConfig<MySQLQueryParams, MySQLResponse> = {
database: params.database,
username: params.username,
password: params.password,
ssl: params.ssl || 'preferred',
ssl: params.ssl || 'required',
query: params.query,
}),
},

View File

@@ -76,7 +76,7 @@ export const updateTool: ToolConfig<MySQLUpdateParams, MySQLResponse> = {
database: params.database,
username: params.username,
password: params.password,
ssl: params.ssl || 'preferred',
ssl: params.ssl || 'required',
table: params.table,
data: params.data,
where: params.where,

422
bun.lock

File diff suppressed because it is too large Load Diff

View File

@@ -27,24 +27,22 @@
},
"overrides": {
"react": "19.1.0",
"react-dom": "19.1.0"
"react-dom": "19.1.0",
"next": "^15.4.1",
"@next/env": "^15.4.1"
},
"dependencies": {
"@linear/sdk": "40.0.0",
"@t3-oss/env-nextjs": "0.13.4",
"@vercel/analytics": "1.5.0",
"drizzle-orm": "0.44.5",
"geist": "^1.4.2",
"pg": "8.16.3",
"react-colorful": "5.6.1",
"remark-gfm": "4.0.1",
"socket.io-client": "4.8.1"
},
"devDependencies": {
"@biomejs/biome": "2.0.0-beta.5",
"@next/env": "^15.3.2",
"@types/word-extractor": "1.0.6",
"dotenv-cli": "^8.0.0",
"@next/env": "^15.4.1",
"husky": "9.1.7",
"lint-staged": "16.0.0",
"turbo": "2.5.6"