feat(native-bg-tasks): support webhooks and async workflow executions without trigger.dev (#1106)

* feat(native-bg-tasks): support webhooks and async workflow executions without trigger"

* fix tests

* fix env var defaults and revert async workflow execution to always use trigger

* fix UI for hiding async

* hide entire toggle
This commit is contained in:
Vikhyath Mondreti
2025-08-22 14:43:21 -07:00
committed by GitHub
parent 1ee4263e60
commit be810013c7
8 changed files with 670 additions and 624 deletions

View File

@@ -354,6 +354,18 @@ export function mockExecutionDependencies() {
}))
}
/**
* Mock Trigger.dev SDK (tasks.trigger and task factory) for tests that import background modules
*/
export function mockTriggerDevSdk() {
vi.mock('@trigger.dev/sdk', () => ({
tasks: {
trigger: vi.fn().mockResolvedValue({ id: 'mock-task-id' }),
},
task: vi.fn().mockReturnValue({}),
}))
}
export function mockWorkflowAccessValidation(shouldSucceed = true) {
if (shouldSucceed) {
vi.mock('@/app/api/workflows/middleware', () => ({

View File

@@ -5,7 +5,22 @@ import { NextRequest } from 'next/server'
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest, mockExecutionDependencies } from '@/app/api/__test-utils__/utils'
import {
createMockRequest,
mockExecutionDependencies,
mockTriggerDevSdk,
} from '@/app/api/__test-utils__/utils'
// Prefer mocking the background module to avoid loading Trigger.dev at all during tests
vi.mock('@/background/webhook-execution', () => ({
executeWebhookJob: vi.fn().mockResolvedValue({
success: true,
workflowId: 'test-workflow-id',
executionId: 'test-exec-id',
output: {},
executedAt: new Date().toISOString(),
}),
}))
const hasProcessedMessageMock = vi.fn().mockResolvedValue(false)
const markMessageAsProcessedMock = vi.fn().mockResolvedValue(true)
@@ -111,6 +126,7 @@ describe('Webhook Trigger API Route', () => {
vi.resetAllMocks()
mockExecutionDependencies()
mockTriggerDevSdk()
vi.doMock('@/services/queue', () => ({
RateLimiter: vi.fn().mockImplementation(() => ({
@@ -309,11 +325,7 @@ describe('Webhook Trigger API Route', () => {
const req = createMockRequest('POST', { event: 'test', id: 'test-123' })
const params = Promise.resolve({ path: 'test-path' })
vi.doMock('@trigger.dev/sdk', () => ({
tasks: {
trigger: vi.fn().mockResolvedValue({ id: 'mock-task-id' }),
},
}))
mockTriggerDevSdk()
const { POST } = await import('@/app/api/webhooks/trigger/[path]/route')
const response = await POST(req, { params })
@@ -339,11 +351,7 @@ describe('Webhook Trigger API Route', () => {
const req = createMockRequest('POST', { event: 'bearer.test' }, headers)
const params = Promise.resolve({ path: 'test-path' })
vi.doMock('@trigger.dev/sdk', () => ({
tasks: {
trigger: vi.fn().mockResolvedValue({ id: 'mock-task-id' }),
},
}))
mockTriggerDevSdk()
const { POST } = await import('@/app/api/webhooks/trigger/[path]/route')
const response = await POST(req, { params })
@@ -369,11 +377,7 @@ describe('Webhook Trigger API Route', () => {
const req = createMockRequest('POST', { event: 'custom.header.test' }, headers)
const params = Promise.resolve({ path: 'test-path' })
vi.doMock('@trigger.dev/sdk', () => ({
tasks: {
trigger: vi.fn().mockResolvedValue({ id: 'mock-task-id' }),
},
}))
mockTriggerDevSdk()
const { POST } = await import('@/app/api/webhooks/trigger/[path]/route')
const response = await POST(req, { params })

View File

@@ -2,12 +2,14 @@ import { tasks } from '@trigger.dev/sdk'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { checkServerSideUsageLimits } from '@/lib/billing'
import { env, isTruthy } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import {
handleSlackChallenge,
handleWhatsAppVerification,
validateMicrosoftTeamsSignature,
} from '@/lib/webhooks/utils'
import { executeWebhookJob } from '@/background/webhook-execution'
import { db } from '@/db'
import { subscription, webhook, workflow } from '@/db/schema'
import { RateLimiter } from '@/services/queue'
@@ -17,6 +19,7 @@ const logger = createLogger('WebhookTriggerAPI')
export const dynamic = 'force-dynamic'
export const maxDuration = 300
export const runtime = 'nodejs'
/**
* Webhook Verification Handler (GET)
@@ -330,10 +333,9 @@ export async function POST(
// Continue processing - better to risk usage limit bypass than fail webhook
}
// --- PHASE 5: Queue webhook execution via trigger.dev ---
// --- PHASE 5: Queue webhook execution (trigger.dev or direct based on env) ---
try {
// Queue the webhook execution task
const handle = await tasks.trigger('webhook-execution', {
const payload = {
webhookId: foundWebhook.id,
workflowId: foundWorkflow.id,
userId: foundWorkflow.userId,
@@ -342,11 +344,24 @@ export async function POST(
headers: Object.fromEntries(request.headers.entries()),
path,
blockId: foundWebhook.blockId,
})
}
logger.info(
`[${requestId}] Queued webhook execution task ${handle.id} for ${foundWebhook.provider} webhook`
)
const useTrigger = isTruthy(env.TRIGGER_DEV_ENABLED)
if (useTrigger) {
const handle = await tasks.trigger('webhook-execution', payload)
logger.info(
`[${requestId}] Queued webhook execution task ${handle.id} for ${foundWebhook.provider} webhook`
)
} else {
// Fire-and-forget direct execution to avoid blocking webhook response
void executeWebhookJob(payload).catch((error) => {
logger.error(`[${requestId}] Direct webhook execution failed`, error)
})
logger.info(
`[${requestId}] Queued direct webhook execution for ${foundWebhook.provider} webhook (Trigger.dev disabled)`
)
}
// Return immediate acknowledgment with provider-specific format
if (foundWebhook.provider === 'microsoftteams') {

View File

@@ -540,7 +540,7 @@ export async function POST(
)
}
// Rate limit passed - trigger the task
// Rate limit passed - always use Trigger.dev for async executions
const handle = await tasks.trigger('workflow-execution', {
workflowId,
userId: authenticatedUserId,

View File

@@ -11,6 +11,7 @@ import {
DropdownMenuTrigger,
} from '@/components/ui/dropdown-menu'
import { Label } from '@/components/ui/label'
import { getEnv, isTruthy } from '@/lib/env'
interface ExampleCommandProps {
command: string
@@ -32,6 +33,7 @@ export function ExampleCommand({
}: ExampleCommandProps) {
const [mode, setMode] = useState<ExampleMode>('sync')
const [exampleType, setExampleType] = useState<ExampleType>('execute')
const isAsyncEnabled = isTruthy(getEnv('NEXT_PUBLIC_TRIGGER_DEV_ENABLED'))
// Format the curl command to use a placeholder for the API key
const formatCurlCommand = (command: string, apiKey: string) => {
@@ -146,62 +148,67 @@ export function ExampleCommand({
<div className='space-y-1.5'>
<div className='flex items-center justify-between'>
{showLabel && <Label className='font-medium text-sm'>Example</Label>}
<div className='flex items-center gap-1'>
<Button
variant='outline'
size='sm'
onClick={() => setMode('sync')}
className={`h-6 min-w-[50px] px-2 py-1 text-xs transition-none ${
mode === 'sync'
? 'border-primary bg-primary text-primary-foreground hover:border-primary hover:bg-primary hover:text-primary-foreground'
: ''
}`}
>
Sync
</Button>
<Button
variant='outline'
size='sm'
onClick={() => setMode('async')}
className={`h-6 min-w-[50px] px-2 py-1 text-xs transition-none ${
mode === 'async'
? 'border-primary bg-primary text-primary-foreground hover:border-primary hover:bg-primary hover:text-primary-foreground'
: ''
}`}
>
Async
</Button>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant='outline'
size='sm'
className='h-6 min-w-[140px] justify-between px-2 py-1 text-xs'
disabled={mode === 'sync'}
>
<span className='truncate'>{getExampleTitle()}</span>
<ChevronDown className='ml-1 h-3 w-3 flex-shrink-0' />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align='end'>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('execute')}
>
Async Execution
</DropdownMenuItem>
<DropdownMenuItem className='cursor-pointer' onClick={() => setExampleType('status')}>
Check Job Status
</DropdownMenuItem>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('rate-limits')}
>
Rate Limits & Usage
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
{isAsyncEnabled && (
<div className='flex items-center gap-1'>
<Button
variant='outline'
size='sm'
onClick={() => setMode('sync')}
className={`h-6 min-w-[50px] px-2 py-1 text-xs transition-none ${
mode === 'sync'
? 'border-primary bg-primary text-primary-foreground hover:border-primary hover:bg-primary hover:text-primary-foreground'
: ''
}`}
>
Sync
</Button>
<Button
variant='outline'
size='sm'
onClick={() => setMode('async')}
className={`h-6 min-w-[50px] px-2 py-1 text-xs transition-none ${
mode === 'async'
? 'border-primary bg-primary text-primary-foreground hover:border-primary hover:bg-primary hover:text-primary-foreground'
: ''
}`}
>
Async
</Button>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant='outline'
size='sm'
className='h-6 min-w-[140px] justify-between px-2 py-1 text-xs'
disabled={mode === 'sync'}
>
<span className='truncate'>{getExampleTitle()}</span>
<ChevronDown className='ml-1 h-3 w-3 flex-shrink-0' />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align='end'>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('execute')}
>
Async Execution
</DropdownMenuItem>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('status')}
>
Check Job Status
</DropdownMenuItem>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('rate-limits')}
>
Rate Limits & Usage
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
)}
</div>
<div className='group relative h-[120px] rounded-md border bg-background transition-colors hover:bg-muted/50'>

View File

@@ -17,362 +17,363 @@ import { mergeSubblockState } from '@/stores/workflows/server-utils'
const logger = createLogger('TriggerWebhookExecution')
export type WebhookExecutionPayload = {
webhookId: string
workflowId: string
userId: string
provider: string
body: any
headers: Record<string, string>
path: string
blockId?: string
}
export async function executeWebhookJob(payload: WebhookExecutionPayload) {
const executionId = uuidv4()
const requestId = executionId.slice(0, 8)
logger.info(`[${requestId}] Starting webhook execution`, {
webhookId: payload.webhookId,
workflowId: payload.workflowId,
provider: payload.provider,
userId: payload.userId,
executionId,
})
// Initialize logging session outside try block so it's available in catch
const loggingSession = new LoggingSession(payload.workflowId, executionId, 'webhook', requestId)
try {
// Check usage limits first
const usageCheck = await checkServerSideUsageLimits(payload.userId)
if (usageCheck.isExceeded) {
logger.warn(
`[${requestId}] User ${payload.userId} has exceeded usage limits. Skipping webhook execution.`,
{
currentUsage: usageCheck.currentUsage,
limit: usageCheck.limit,
workflowId: payload.workflowId,
}
)
throw new Error(
usageCheck.message ||
'Usage limit exceeded. Please upgrade your plan to continue using webhooks.'
)
}
// Load workflow from normalized tables
const workflowData = await loadWorkflowFromNormalizedTables(payload.workflowId)
if (!workflowData) {
throw new Error(`Workflow not found: ${payload.workflowId}`)
}
const { blocks, edges, loops, parallels } = workflowData
// Get environment variables (matching workflow-execution pattern)
const [userEnv] = await db
.select()
.from(environmentTable)
.where(eq(environmentTable.userId, payload.userId))
.limit(1)
let decryptedEnvVars: Record<string, string> = {}
if (userEnv) {
const decryptionPromises = Object.entries((userEnv.variables as any) || {}).map(
async ([key, encryptedValue]) => {
try {
const { decrypted } = await decryptSecret(encryptedValue as string)
return [key, decrypted] as const
} catch (error: any) {
logger.error(`[${requestId}] Failed to decrypt environment variable "${key}":`, error)
throw new Error(`Failed to decrypt environment variable "${key}": ${error.message}`)
}
}
)
const decryptedPairs = await Promise.all(decryptionPromises)
decryptedEnvVars = Object.fromEntries(decryptedPairs)
}
// Start logging session
await loggingSession.safeStart({
userId: payload.userId,
workspaceId: '', // TODO: Get from workflow if needed
variables: decryptedEnvVars,
})
// Merge subblock states (matching workflow-execution pattern)
const mergedStates = mergeSubblockState(blocks, {})
// Process block states for execution
const processedBlockStates = Object.entries(mergedStates).reduce(
(acc, [blockId, blockState]) => {
acc[blockId] = Object.entries(blockState.subBlocks).reduce(
(subAcc, [key, subBlock]) => {
subAcc[key] = subBlock.value
return subAcc
},
{} as Record<string, any>
)
return acc
},
{} as Record<string, Record<string, any>>
)
// Handle workflow variables (for now, use empty object since we don't have workflow metadata)
const workflowVariables = {}
// Create serialized workflow
const serializer = new Serializer()
const serializedWorkflow = serializer.serializeWorkflow(
mergedStates,
edges,
loops || {},
parallels || {},
true // Enable validation during execution
)
// Handle special Airtable case
if (payload.provider === 'airtable') {
logger.info(`[${requestId}] Processing Airtable webhook via fetchAndProcessAirtablePayloads`)
// Load the actual webhook record from database to get providerConfig
const [webhookRecord] = await db
.select()
.from(webhook)
.where(eq(webhook.id, payload.webhookId))
.limit(1)
if (!webhookRecord) {
throw new Error(`Webhook record not found: ${payload.webhookId}`)
}
const webhookData = {
id: payload.webhookId,
provider: payload.provider,
providerConfig: webhookRecord.providerConfig,
}
// Create a mock workflow object for Airtable processing
const mockWorkflow = {
id: payload.workflowId,
userId: payload.userId,
}
// Get the processed Airtable input
const airtableInput = await fetchAndProcessAirtablePayloads(
webhookData,
mockWorkflow,
requestId
)
// If we got input (changes), execute the workflow like other providers
if (airtableInput) {
logger.info(`[${requestId}] Executing workflow with Airtable changes`)
// Create executor and execute (same as standard webhook flow)
const executor = new Executor({
workflow: serializedWorkflow,
currentBlockStates: processedBlockStates,
envVarValues: decryptedEnvVars,
workflowInput: airtableInput,
workflowVariables,
contextExtensions: {
executionId,
workspaceId: '',
},
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
// Execute the workflow
const result = await executor.execute(payload.workflowId, payload.blockId)
// Check if we got a StreamingExecution result
const executionResult =
'stream' in result && 'execution' in result ? result.execution : result
logger.info(`[${requestId}] Airtable webhook execution completed`, {
success: executionResult.success,
workflowId: payload.workflowId,
})
// Update workflow run counts on success
if (executionResult.success) {
await updateWorkflowRunCounts(payload.workflowId)
// Track execution in user stats
await db
.update(userStats)
.set({
totalWebhookTriggers: sql`total_webhook_triggers + 1`,
lastActive: sql`now()`,
})
.where(eq(userStats.userId, payload.userId))
}
// Build trace spans and complete logging session
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
finalOutput: executionResult.output || {},
traceSpans: traceSpans as any,
})
return {
success: executionResult.success,
workflowId: payload.workflowId,
executionId,
output: executionResult.output,
executedAt: new Date().toISOString(),
provider: payload.provider,
}
}
// No changes to process
logger.info(`[${requestId}] No Airtable changes to process`)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
finalOutput: { message: 'No Airtable changes to process' },
traceSpans: [],
})
return {
success: true,
workflowId: payload.workflowId,
executionId,
output: { message: 'No Airtable changes to process' },
executedAt: new Date().toISOString(),
}
}
// Format input for standard webhooks
const mockWebhook = {
provider: payload.provider,
blockId: payload.blockId,
}
const mockWorkflow = {
id: payload.workflowId,
userId: payload.userId,
}
const mockRequest = {
headers: new Map(Object.entries(payload.headers)),
} as any
const input = formatWebhookInput(mockWebhook, mockWorkflow, payload.body, mockRequest)
if (!input && payload.provider === 'whatsapp') {
logger.info(`[${requestId}] No messages in WhatsApp payload, skipping execution`)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
finalOutput: { message: 'No messages in WhatsApp payload' },
traceSpans: [],
})
return {
success: true,
workflowId: payload.workflowId,
executionId,
output: { message: 'No messages in WhatsApp payload' },
executedAt: new Date().toISOString(),
}
}
// Create executor and execute
const executor = new Executor({
workflow: serializedWorkflow,
currentBlockStates: processedBlockStates,
envVarValues: decryptedEnvVars,
workflowInput: input || {},
workflowVariables,
contextExtensions: {
executionId,
workspaceId: '', // TODO: Get from workflow if needed - see comment on line 103
},
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
logger.info(`[${requestId}] Executing workflow for ${payload.provider} webhook`)
// Execute the workflow
const result = await executor.execute(payload.workflowId, payload.blockId)
// Check if we got a StreamingExecution result
const executionResult = 'stream' in result && 'execution' in result ? result.execution : result
logger.info(`[${requestId}] Webhook execution completed`, {
success: executionResult.success,
workflowId: payload.workflowId,
provider: payload.provider,
})
// Update workflow run counts on success
if (executionResult.success) {
await updateWorkflowRunCounts(payload.workflowId)
// Track execution in user stats
await db
.update(userStats)
.set({
totalWebhookTriggers: sql`total_webhook_triggers + 1`,
lastActive: sql`now()`,
})
.where(eq(userStats.userId, payload.userId))
}
// Build trace spans and complete logging session
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
finalOutput: executionResult.output || {},
traceSpans: traceSpans as any,
})
return {
success: executionResult.success,
workflowId: payload.workflowId,
executionId,
output: executionResult.output,
executedAt: new Date().toISOString(),
provider: payload.provider,
}
} catch (error: any) {
logger.error(`[${requestId}] Webhook execution failed`, {
error: error.message,
stack: error.stack,
workflowId: payload.workflowId,
provider: payload.provider,
})
// Complete logging session with error (matching workflow-execution pattern)
try {
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
error: {
message: error.message || 'Webhook execution failed',
stackTrace: error.stack,
},
})
} catch (loggingError) {
logger.error(`[${requestId}] Failed to complete logging session`, loggingError)
}
throw error
}
}
export const webhookExecution = task({
id: 'webhook-execution',
retry: {
maxAttempts: 1,
},
run: async (payload: {
webhookId: string
workflowId: string
userId: string
provider: string
body: any
headers: Record<string, string>
path: string
blockId?: string
}) => {
const executionId = uuidv4()
const requestId = executionId.slice(0, 8)
logger.info(`[${requestId}] Starting webhook execution via trigger.dev`, {
webhookId: payload.webhookId,
workflowId: payload.workflowId,
provider: payload.provider,
userId: payload.userId,
executionId,
})
// Initialize logging session outside try block so it's available in catch
const loggingSession = new LoggingSession(payload.workflowId, executionId, 'webhook', requestId)
try {
// Check usage limits first
const usageCheck = await checkServerSideUsageLimits(payload.userId)
if (usageCheck.isExceeded) {
logger.warn(
`[${requestId}] User ${payload.userId} has exceeded usage limits. Skipping webhook execution.`,
{
currentUsage: usageCheck.currentUsage,
limit: usageCheck.limit,
workflowId: payload.workflowId,
}
)
throw new Error(
usageCheck.message ||
'Usage limit exceeded. Please upgrade your plan to continue using webhooks.'
)
}
// Load workflow from normalized tables
const workflowData = await loadWorkflowFromNormalizedTables(payload.workflowId)
if (!workflowData) {
throw new Error(`Workflow not found: ${payload.workflowId}`)
}
const { blocks, edges, loops, parallels } = workflowData
// Get environment variables (matching workflow-execution pattern)
const [userEnv] = await db
.select()
.from(environmentTable)
.where(eq(environmentTable.userId, payload.userId))
.limit(1)
let decryptedEnvVars: Record<string, string> = {}
if (userEnv) {
const decryptionPromises = Object.entries((userEnv.variables as any) || {}).map(
async ([key, encryptedValue]) => {
try {
const { decrypted } = await decryptSecret(encryptedValue as string)
return [key, decrypted] as const
} catch (error: any) {
logger.error(`[${requestId}] Failed to decrypt environment variable "${key}":`, error)
throw new Error(`Failed to decrypt environment variable "${key}": ${error.message}`)
}
}
)
const decryptedPairs = await Promise.all(decryptionPromises)
decryptedEnvVars = Object.fromEntries(decryptedPairs)
}
// Start logging session
await loggingSession.safeStart({
userId: payload.userId,
workspaceId: '', // TODO: Get from workflow if needed
variables: decryptedEnvVars,
})
// Merge subblock states (matching workflow-execution pattern)
const mergedStates = mergeSubblockState(blocks, {})
// Process block states for execution
const processedBlockStates = Object.entries(mergedStates).reduce(
(acc, [blockId, blockState]) => {
acc[blockId] = Object.entries(blockState.subBlocks).reduce(
(subAcc, [key, subBlock]) => {
subAcc[key] = subBlock.value
return subAcc
},
{} as Record<string, any>
)
return acc
},
{} as Record<string, Record<string, any>>
)
// Handle workflow variables (for now, use empty object since we don't have workflow metadata)
const workflowVariables = {}
// Create serialized workflow
const serializer = new Serializer()
const serializedWorkflow = serializer.serializeWorkflow(
mergedStates,
edges,
loops || {},
parallels || {},
true // Enable validation during execution
)
// Handle special Airtable case
if (payload.provider === 'airtable') {
logger.info(
`[${requestId}] Processing Airtable webhook via fetchAndProcessAirtablePayloads`
)
// Load the actual webhook record from database to get providerConfig
const [webhookRecord] = await db
.select()
.from(webhook)
.where(eq(webhook.id, payload.webhookId))
.limit(1)
if (!webhookRecord) {
throw new Error(`Webhook record not found: ${payload.webhookId}`)
}
const webhookData = {
id: payload.webhookId,
provider: payload.provider,
providerConfig: webhookRecord.providerConfig,
}
// Create a mock workflow object for Airtable processing
const mockWorkflow = {
id: payload.workflowId,
userId: payload.userId,
}
// Get the processed Airtable input
const airtableInput = await fetchAndProcessAirtablePayloads(
webhookData,
mockWorkflow,
requestId
)
// If we got input (changes), execute the workflow like other providers
if (airtableInput) {
logger.info(`[${requestId}] Executing workflow with Airtable changes`)
// Create executor and execute (same as standard webhook flow)
const executor = new Executor({
workflow: serializedWorkflow,
currentBlockStates: processedBlockStates,
envVarValues: decryptedEnvVars,
workflowInput: airtableInput,
workflowVariables,
contextExtensions: {
executionId,
workspaceId: '',
},
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
// Execute the workflow
const result = await executor.execute(payload.workflowId, payload.blockId)
// Check if we got a StreamingExecution result
const executionResult =
'stream' in result && 'execution' in result ? result.execution : result
logger.info(`[${requestId}] Airtable webhook execution completed`, {
success: executionResult.success,
workflowId: payload.workflowId,
})
// Update workflow run counts on success
if (executionResult.success) {
await updateWorkflowRunCounts(payload.workflowId)
// Track execution in user stats
await db
.update(userStats)
.set({
totalWebhookTriggers: sql`total_webhook_triggers + 1`,
lastActive: sql`now()`,
})
.where(eq(userStats.userId, payload.userId))
}
// Build trace spans and complete logging session
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
finalOutput: executionResult.output || {},
traceSpans: traceSpans as any,
})
return {
success: executionResult.success,
workflowId: payload.workflowId,
executionId,
output: executionResult.output,
executedAt: new Date().toISOString(),
provider: payload.provider,
}
}
// No changes to process
logger.info(`[${requestId}] No Airtable changes to process`)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
finalOutput: { message: 'No Airtable changes to process' },
traceSpans: [],
})
return {
success: true,
workflowId: payload.workflowId,
executionId,
output: { message: 'No Airtable changes to process' },
executedAt: new Date().toISOString(),
}
}
// Format input for standard webhooks
const mockWebhook = {
provider: payload.provider,
blockId: payload.blockId,
}
const mockWorkflow = {
id: payload.workflowId,
userId: payload.userId,
}
const mockRequest = {
headers: new Map(Object.entries(payload.headers)),
} as any
const input = formatWebhookInput(mockWebhook, mockWorkflow, payload.body, mockRequest)
if (!input && payload.provider === 'whatsapp') {
logger.info(`[${requestId}] No messages in WhatsApp payload, skipping execution`)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
finalOutput: { message: 'No messages in WhatsApp payload' },
traceSpans: [],
})
return {
success: true,
workflowId: payload.workflowId,
executionId,
output: { message: 'No messages in WhatsApp payload' },
executedAt: new Date().toISOString(),
}
}
// Create executor and execute
const executor = new Executor({
workflow: serializedWorkflow,
currentBlockStates: processedBlockStates,
envVarValues: decryptedEnvVars,
workflowInput: input || {},
workflowVariables,
contextExtensions: {
executionId,
workspaceId: '', // TODO: Get from workflow if needed - see comment on line 103
},
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
logger.info(`[${requestId}] Executing workflow for ${payload.provider} webhook`)
// Execute the workflow
const result = await executor.execute(payload.workflowId, payload.blockId)
// Check if we got a StreamingExecution result
const executionResult =
'stream' in result && 'execution' in result ? result.execution : result
logger.info(`[${requestId}] Webhook execution completed`, {
success: executionResult.success,
workflowId: payload.workflowId,
provider: payload.provider,
})
// Update workflow run counts on success
if (executionResult.success) {
await updateWorkflowRunCounts(payload.workflowId)
// Track execution in user stats
await db
.update(userStats)
.set({
totalWebhookTriggers: sql`total_webhook_triggers + 1`,
lastActive: sql`now()`,
})
.where(eq(userStats.userId, payload.userId))
}
// Build trace spans and complete logging session
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
finalOutput: executionResult.output || {},
traceSpans: traceSpans as any,
})
return {
success: executionResult.success,
workflowId: payload.workflowId,
executionId,
output: executionResult.output,
executedAt: new Date().toISOString(),
provider: payload.provider,
}
} catch (error: any) {
logger.error(`[${requestId}] Webhook execution failed`, {
error: error.message,
stack: error.stack,
workflowId: payload.workflowId,
provider: payload.provider,
})
// Complete logging session with error (matching workflow-execution pattern)
try {
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
error: {
message: error.message || 'Webhook execution failed',
stackTrace: error.stack,
},
})
} catch (loggingError) {
logger.error(`[${requestId}] Failed to complete logging session`, loggingError)
}
throw error // Let Trigger.dev handle retries
}
},
run: async (payload: WebhookExecutionPayload) => executeWebhookJob(payload),
})

View File

@@ -16,200 +16,202 @@ import { mergeSubblockState } from '@/stores/workflows/server-utils'
const logger = createLogger('TriggerWorkflowExecution')
export type WorkflowExecutionPayload = {
workflowId: string
userId: string
input?: any
triggerType?: 'api' | 'webhook' | 'schedule' | 'manual' | 'chat'
metadata?: Record<string, any>
}
export async function executeWorkflowJob(payload: WorkflowExecutionPayload) {
const workflowId = payload.workflowId
const executionId = uuidv4()
const requestId = executionId.slice(0, 8)
logger.info(`[${requestId}] Starting workflow execution: ${workflowId}`, {
userId: payload.userId,
triggerType: payload.triggerType,
executionId,
})
// Initialize logging session
const triggerType = payload.triggerType || 'api'
const loggingSession = new LoggingSession(workflowId, executionId, triggerType, requestId)
try {
const usageCheck = await checkServerSideUsageLimits(payload.userId)
if (usageCheck.isExceeded) {
logger.warn(
`[${requestId}] User ${payload.userId} has exceeded usage limits. Skipping workflow execution.`,
{
currentUsage: usageCheck.currentUsage,
limit: usageCheck.limit,
workflowId: payload.workflowId,
}
)
throw new Error(
usageCheck.message ||
'Usage limit exceeded. Please upgrade your plan to continue using workflows.'
)
}
// Load workflow data from deployed state (this task is only used for API executions right now)
const workflowData = await loadDeployedWorkflowState(workflowId)
const { blocks, edges, loops, parallels } = workflowData
// Merge subblock states (server-safe version doesn't need workflowId)
const mergedStates = mergeSubblockState(blocks, {})
// Process block states for execution
const processedBlockStates = Object.entries(mergedStates).reduce(
(acc, [blockId, blockState]) => {
acc[blockId] = Object.entries(blockState.subBlocks).reduce(
(subAcc, [key, subBlock]) => {
subAcc[key] = subBlock.value
return subAcc
},
{} as Record<string, any>
)
return acc
},
{} as Record<string, Record<string, any>>
)
// Get environment variables
const [userEnv] = await db
.select()
.from(environmentTable)
.where(eq(environmentTable.userId, payload.userId))
.limit(1)
let decryptedEnvVars: Record<string, string> = {}
if (userEnv) {
const decryptionPromises = Object.entries((userEnv.variables as any) || {}).map(
async ([key, encryptedValue]) => {
try {
const { decrypted } = await decryptSecret(encryptedValue as string)
return [key, decrypted] as const
} catch (error: any) {
logger.error(`[${requestId}] Failed to decrypt environment variable "${key}":`, error)
throw new Error(`Failed to decrypt environment variable "${key}": ${error.message}`)
}
}
)
const decryptedPairs = await Promise.all(decryptionPromises)
decryptedEnvVars = Object.fromEntries(decryptedPairs)
}
// Start logging session
await loggingSession.safeStart({
userId: payload.userId,
workspaceId: '', // TODO: Get from workflow if needed
variables: decryptedEnvVars,
})
// Create serialized workflow
const serializer = new Serializer()
const serializedWorkflow = serializer.serializeWorkflow(
mergedStates,
edges,
loops || {},
parallels || {},
true // Enable validation during execution
)
// Create executor and execute
const executor = new Executor({
workflow: serializedWorkflow,
currentBlockStates: processedBlockStates,
envVarValues: decryptedEnvVars,
workflowInput: payload.input || {},
workflowVariables: {},
contextExtensions: {
executionId,
workspaceId: '', // TODO: Get from workflow if needed - see comment on line 120
},
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
const result = await executor.execute(workflowId)
// Handle streaming vs regular result
const executionResult = 'stream' in result && 'execution' in result ? result.execution : result
logger.info(`[${requestId}] Workflow execution completed: ${workflowId}`, {
success: executionResult.success,
executionTime: executionResult.metadata?.duration,
executionId,
})
// Update workflow run counts on success
if (executionResult.success) {
await updateWorkflowRunCounts(workflowId)
// Track execution in user stats
const statsUpdate =
triggerType === 'api'
? { totalApiCalls: sql`total_api_calls + 1` }
: triggerType === 'webhook'
? { totalWebhookTriggers: sql`total_webhook_triggers + 1` }
: triggerType === 'schedule'
? { totalScheduledExecutions: sql`total_scheduled_executions + 1` }
: { totalManualExecutions: sql`total_manual_executions + 1` }
await db
.update(userStats)
.set({
...statsUpdate,
lastActive: sql`now()`,
})
.where(eq(userStats.userId, payload.userId))
}
// Build trace spans and complete logging session (for both success and failure)
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
finalOutput: executionResult.output || {},
traceSpans: traceSpans as any,
})
return {
success: executionResult.success,
workflowId: payload.workflowId,
executionId,
output: executionResult.output,
executedAt: new Date().toISOString(),
metadata: payload.metadata,
}
} catch (error: any) {
logger.error(`[${requestId}] Workflow execution failed: ${workflowId}`, {
error: error.message,
stack: error.stack,
})
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
error: {
message: error.message || 'Workflow execution failed',
stackTrace: error.stack,
},
})
throw error
}
}
export const workflowExecution = task({
id: 'workflow-execution',
retry: {
maxAttempts: 1,
},
run: async (payload: {
workflowId: string
userId: string
input?: any
triggerType?: string
metadata?: Record<string, any>
}) => {
const workflowId = payload.workflowId
const executionId = uuidv4()
const requestId = executionId.slice(0, 8)
logger.info(`[${requestId}] Starting Trigger.dev workflow execution: ${workflowId}`, {
userId: payload.userId,
triggerType: payload.triggerType,
executionId,
})
// Initialize logging session
const triggerType =
(payload.triggerType as 'api' | 'webhook' | 'schedule' | 'manual' | 'chat') || 'api'
const loggingSession = new LoggingSession(workflowId, executionId, triggerType, requestId)
try {
const usageCheck = await checkServerSideUsageLimits(payload.userId)
if (usageCheck.isExceeded) {
logger.warn(
`[${requestId}] User ${payload.userId} has exceeded usage limits. Skipping workflow execution.`,
{
currentUsage: usageCheck.currentUsage,
limit: usageCheck.limit,
workflowId: payload.workflowId,
}
)
throw new Error(
usageCheck.message ||
'Usage limit exceeded. Please upgrade your plan to continue using workflows.'
)
}
// Load workflow data from deployed state (this task is only used for API executions right now)
const workflowData = await loadDeployedWorkflowState(workflowId)
const { blocks, edges, loops, parallels } = workflowData
// Merge subblock states (server-safe version doesn't need workflowId)
const mergedStates = mergeSubblockState(blocks, {})
// Process block states for execution
const processedBlockStates = Object.entries(mergedStates).reduce(
(acc, [blockId, blockState]) => {
acc[blockId] = Object.entries(blockState.subBlocks).reduce(
(subAcc, [key, subBlock]) => {
subAcc[key] = subBlock.value
return subAcc
},
{} as Record<string, any>
)
return acc
},
{} as Record<string, Record<string, any>>
)
// Get environment variables
const [userEnv] = await db
.select()
.from(environmentTable)
.where(eq(environmentTable.userId, payload.userId))
.limit(1)
let decryptedEnvVars: Record<string, string> = {}
if (userEnv) {
const decryptionPromises = Object.entries((userEnv.variables as any) || {}).map(
async ([key, encryptedValue]) => {
try {
const { decrypted } = await decryptSecret(encryptedValue as string)
return [key, decrypted] as const
} catch (error: any) {
logger.error(`[${requestId}] Failed to decrypt environment variable "${key}":`, error)
throw new Error(`Failed to decrypt environment variable "${key}": ${error.message}`)
}
}
)
const decryptedPairs = await Promise.all(decryptionPromises)
decryptedEnvVars = Object.fromEntries(decryptedPairs)
}
// Start logging session
await loggingSession.safeStart({
userId: payload.userId,
workspaceId: '', // TODO: Get from workflow if needed
variables: decryptedEnvVars,
})
// Create serialized workflow
const serializer = new Serializer()
const serializedWorkflow = serializer.serializeWorkflow(
mergedStates,
edges,
loops || {},
parallels || {},
true // Enable validation during execution
)
// Create executor and execute
const executor = new Executor({
workflow: serializedWorkflow,
currentBlockStates: processedBlockStates,
envVarValues: decryptedEnvVars,
workflowInput: payload.input || {},
workflowVariables: {},
contextExtensions: {
executionId,
workspaceId: '', // TODO: Get from workflow if needed - see comment on line 120
},
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
const result = await executor.execute(workflowId)
// Handle streaming vs regular result
const executionResult =
'stream' in result && 'execution' in result ? result.execution : result
logger.info(`[${requestId}] Workflow execution completed: ${workflowId}`, {
success: executionResult.success,
executionTime: executionResult.metadata?.duration,
executionId,
})
// Update workflow run counts on success
if (executionResult.success) {
await updateWorkflowRunCounts(workflowId)
// Track execution in user stats
const statsUpdate =
triggerType === 'api'
? { totalApiCalls: sql`total_api_calls + 1` }
: triggerType === 'webhook'
? { totalWebhookTriggers: sql`total_webhook_triggers + 1` }
: triggerType === 'schedule'
? { totalScheduledExecutions: sql`total_scheduled_executions + 1` }
: { totalManualExecutions: sql`total_manual_executions + 1` }
await db
.update(userStats)
.set({
...statsUpdate,
lastActive: sql`now()`,
})
.where(eq(userStats.userId, payload.userId))
}
// Build trace spans and complete logging session (for both success and failure)
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
finalOutput: executionResult.output || {},
traceSpans: traceSpans as any,
})
return {
success: executionResult.success,
workflowId: payload.workflowId,
executionId,
output: executionResult.output,
executedAt: new Date().toISOString(),
metadata: payload.metadata,
}
} catch (error: any) {
logger.error(`[${requestId}] Workflow execution failed: ${workflowId}`, {
error: error.message,
stack: error.stack,
})
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
error: {
message: error.message || 'Workflow execution failed',
stackTrace: error.stack,
},
})
throw error // Let Trigger.dev handle retries
}
},
run: async (payload: WorkflowExecutionPayload) => executeWorkflowJob(payload),
})

View File

@@ -97,6 +97,7 @@ export const env = createEnv({
// Background Jobs & Scheduling
TRIGGER_SECRET_KEY: z.string().min(1).optional(), // Trigger.dev secret key for background jobs
TRIGGER_DEV_ENABLED: z.boolean().optional(), // Toggle to enable/disable Trigger.dev for async jobs
CRON_SECRET: z.string().optional(), // Secret for authenticating cron job requests
JOB_RETENTION_DAYS: z.string().optional().default('1'), // Days to retain job logs/data
@@ -217,6 +218,9 @@ export const env = createEnv({
NEXT_PUBLIC_BRAND_ACCENT_COLOR: z.string().regex(/^#[0-9A-Fa-f]{6}$/).optional(), // Accent brand color (hex format)
NEXT_PUBLIC_BRAND_ACCENT_HOVER_COLOR: z.string().regex(/^#[0-9A-Fa-f]{6}$/).optional(), // Accent brand hover state (hex format)
NEXT_PUBLIC_BRAND_BACKGROUND_COLOR: z.string().regex(/^#[0-9A-Fa-f]{6}$/).optional(), // Brand background color (hex format)
// Feature Flags
NEXT_PUBLIC_TRIGGER_DEV_ENABLED: z.boolean().optional(), // Client-side gate for async executions UI
},
// Variables available on both server and client
@@ -250,6 +254,7 @@ export const env = createEnv({
NEXT_PUBLIC_BRAND_ACCENT_COLOR: process.env.NEXT_PUBLIC_BRAND_ACCENT_COLOR,
NEXT_PUBLIC_BRAND_ACCENT_HOVER_COLOR: process.env.NEXT_PUBLIC_BRAND_ACCENT_HOVER_COLOR,
NEXT_PUBLIC_BRAND_BACKGROUND_COLOR: process.env.NEXT_PUBLIC_BRAND_BACKGROUND_COLOR,
NEXT_PUBLIC_TRIGGER_DEV_ENABLED: process.env.NEXT_PUBLIC_TRIGGER_DEV_ENABLED,
NODE_ENV: process.env.NODE_ENV,
NEXT_TELEMETRY_DISABLED: process.env.NEXT_TELEMETRY_DISABLED,
},