mirror of
https://github.com/simstudioai/sim.git
synced 2026-02-04 19:55:08 -05:00
Compare commits
26 Commits
improvemen
...
feat/copil
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8b87a07508 | ||
|
|
728463ace7 | ||
|
|
8cea43d926 | ||
|
|
2198a6caae | ||
|
|
0ba8c0ad29 | ||
|
|
a9e2f4a82e | ||
|
|
1e99f45590 | ||
|
|
1ec0ec4c1a | ||
|
|
2ce78e8c60 | ||
|
|
ea759b2d00 | ||
|
|
09d7fc671f | ||
|
|
ea210a56a8 | ||
|
|
c05b70be1e | ||
|
|
413c53208e | ||
|
|
b8ccd71423 | ||
|
|
8f556684a6 | ||
|
|
a391019995 | ||
|
|
d6179e7691 | ||
|
|
e68e653d5c | ||
|
|
36ec68d93e | ||
|
|
fce566cc2f | ||
|
|
1933e1aad5 | ||
|
|
793adda986 | ||
|
|
8d846c5983 | ||
|
|
362f4c2918 | ||
|
|
c77e351067 |
@@ -220,9 +220,9 @@ Workflows have maximum execution time limits based on your subscription plan:
|
||||
| Plan | Sync Execution | Async Execution |
|
||||
|------|----------------|-----------------|
|
||||
| **Free** | 5 minutes | 10 minutes |
|
||||
| **Pro** | 60 minutes | 90 minutes |
|
||||
| **Team** | 60 minutes | 90 minutes |
|
||||
| **Enterprise** | 60 minutes | 90 minutes |
|
||||
| **Pro** | 50 minutes | 90 minutes |
|
||||
| **Team** | 50 minutes | 90 minutes |
|
||||
| **Enterprise** | 50 minutes | 90 minutes |
|
||||
|
||||
**Sync executions** run immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI.
|
||||
**Async executions** (triggered via API with `async: true`, webhooks, or schedules) run in the background. Async time limits are up to 2x the sync limit, capped at 90 minutes.
|
||||
|
||||
@@ -7,8 +7,14 @@ import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { generateChatTitle } from '@/lib/copilot/chat-title'
|
||||
import { getCopilotModel } from '@/lib/copilot/config'
|
||||
import { SIM_AGENT_API_URL_DEFAULT, SIM_AGENT_VERSION } from '@/lib/copilot/constants'
|
||||
import { SIM_AGENT_VERSION } from '@/lib/copilot/constants'
|
||||
import { COPILOT_MODEL_IDS, COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
|
||||
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
|
||||
import {
|
||||
createStreamEventWriter,
|
||||
resetStreamBuffer,
|
||||
setStreamMeta,
|
||||
} from '@/lib/copilot/orchestrator/stream-buffer'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
@@ -21,13 +27,12 @@ import type { CopilotProviderConfig } from '@/lib/copilot/types'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { CopilotFiles } from '@/lib/uploads'
|
||||
import { createFileContent } from '@/lib/uploads/utils/file-utils'
|
||||
import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
|
||||
import { tools } from '@/tools/registry'
|
||||
import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils'
|
||||
|
||||
const logger = createLogger('CopilotChatAPI')
|
||||
|
||||
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
|
||||
|
||||
const FileAttachmentSchema = z.object({
|
||||
id: z.string(),
|
||||
key: z.string(),
|
||||
@@ -40,7 +45,8 @@ const ChatMessageSchema = z.object({
|
||||
message: z.string().min(1, 'Message is required'),
|
||||
userMessageId: z.string().optional(), // ID from frontend for the user message
|
||||
chatId: z.string().optional(),
|
||||
workflowId: z.string().min(1, 'Workflow ID is required'),
|
||||
workflowId: z.string().optional(),
|
||||
workflowName: z.string().optional(),
|
||||
model: z.enum(COPILOT_MODEL_IDS).optional().default('claude-4.5-opus'),
|
||||
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
|
||||
prefetch: z.boolean().optional(),
|
||||
@@ -100,7 +106,8 @@ export async function POST(req: NextRequest) {
|
||||
message,
|
||||
userMessageId,
|
||||
chatId,
|
||||
workflowId,
|
||||
workflowId: providedWorkflowId,
|
||||
workflowName,
|
||||
model,
|
||||
mode,
|
||||
prefetch,
|
||||
@@ -113,6 +120,20 @@ export async function POST(req: NextRequest) {
|
||||
contexts,
|
||||
commands,
|
||||
} = ChatMessageSchema.parse(body)
|
||||
|
||||
// Resolve workflowId - if not provided, use first workflow or find by name
|
||||
const resolved = await resolveWorkflowIdForUser(
|
||||
authenticatedUserId,
|
||||
providedWorkflowId,
|
||||
workflowName
|
||||
)
|
||||
if (!resolved) {
|
||||
return createBadRequestResponse(
|
||||
'No workflows found. Create a workflow first or provide a valid workflowId.'
|
||||
)
|
||||
}
|
||||
const workflowId = resolved.workflowId
|
||||
|
||||
// Ensure we have a consistent user message ID for this request
|
||||
const userMessageIdToUse = userMessageId || crypto.randomUUID()
|
||||
try {
|
||||
@@ -465,77 +486,53 @@ export async function POST(req: NextRequest) {
|
||||
})
|
||||
} catch {}
|
||||
|
||||
const simAgentResponse = await fetch(`${SIM_AGENT_API_URL}/api/chat-completion-streaming`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
|
||||
},
|
||||
body: JSON.stringify(requestPayload),
|
||||
})
|
||||
|
||||
if (!simAgentResponse.ok) {
|
||||
if (simAgentResponse.status === 401 || simAgentResponse.status === 402) {
|
||||
// Rethrow status only; client will render appropriate assistant message
|
||||
return new NextResponse(null, { status: simAgentResponse.status })
|
||||
}
|
||||
|
||||
const errorText = await simAgentResponse.text().catch(() => '')
|
||||
logger.error(`[${tracker.requestId}] Sim agent API error:`, {
|
||||
status: simAgentResponse.status,
|
||||
error: errorText,
|
||||
})
|
||||
|
||||
return NextResponse.json(
|
||||
{ error: `Sim agent API error: ${simAgentResponse.statusText}` },
|
||||
{ status: simAgentResponse.status }
|
||||
)
|
||||
}
|
||||
|
||||
// If streaming is requested, forward the stream and update chat later
|
||||
if (stream && simAgentResponse.body) {
|
||||
// Create user message to save
|
||||
const userMessage = {
|
||||
id: userMessageIdToUse, // Consistent ID used for request and persistence
|
||||
role: 'user',
|
||||
content: message,
|
||||
timestamp: new Date().toISOString(),
|
||||
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
|
||||
...(Array.isArray(contexts) && contexts.length > 0 && { contexts }),
|
||||
...(Array.isArray(contexts) &&
|
||||
contexts.length > 0 && {
|
||||
contentBlocks: [{ type: 'contexts', contexts: contexts as any, timestamp: Date.now() }],
|
||||
}),
|
||||
}
|
||||
|
||||
// Create a pass-through stream that captures the response
|
||||
if (stream) {
|
||||
const streamId = userMessageIdToUse
|
||||
let eventWriter: ReturnType<typeof createStreamEventWriter> | null = null
|
||||
let clientDisconnected = false
|
||||
const transformedStream = new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
let assistantContent = ''
|
||||
const toolCalls: any[] = []
|
||||
let buffer = ''
|
||||
const isFirstDone = true
|
||||
let responseIdFromStart: string | undefined
|
||||
let responseIdFromDone: string | undefined
|
||||
// Track tool call progress to identify a safe done event
|
||||
const announcedToolCallIds = new Set<string>()
|
||||
const startedToolExecutionIds = new Set<string>()
|
||||
const completedToolExecutionIds = new Set<string>()
|
||||
let lastDoneResponseId: string | undefined
|
||||
let lastSafeDoneResponseId: string | undefined
|
||||
|
||||
// Send chatId as first event
|
||||
if (actualChatId) {
|
||||
const chatIdEvent = `data: ${JSON.stringify({
|
||||
type: 'chat_id',
|
||||
chatId: actualChatId,
|
||||
})}\n\n`
|
||||
controller.enqueue(encoder.encode(chatIdEvent))
|
||||
logger.debug(`[${tracker.requestId}] Sent initial chatId event to client`)
|
||||
await resetStreamBuffer(streamId)
|
||||
await setStreamMeta(streamId, { status: 'active', userId: authenticatedUserId })
|
||||
eventWriter = createStreamEventWriter(streamId)
|
||||
|
||||
const shouldFlushEvent = (event: Record<string, any>) =>
|
||||
event.type === 'tool_call' ||
|
||||
event.type === 'tool_result' ||
|
||||
event.type === 'tool_error' ||
|
||||
event.type === 'subagent_end' ||
|
||||
event.type === 'structured_result' ||
|
||||
event.type === 'subagent_result' ||
|
||||
event.type === 'done' ||
|
||||
event.type === 'error'
|
||||
|
||||
const pushEvent = async (event: Record<string, any>) => {
|
||||
if (!eventWriter) return
|
||||
const entry = await eventWriter.write(event)
|
||||
if (shouldFlushEvent(event)) {
|
||||
await eventWriter.flush()
|
||||
}
|
||||
const payload = {
|
||||
...event,
|
||||
eventId: entry.eventId,
|
||||
streamId,
|
||||
}
|
||||
try {
|
||||
if (!clientDisconnected) {
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(payload)}\n\n`))
|
||||
}
|
||||
} catch {
|
||||
clientDisconnected = true
|
||||
await eventWriter.flush()
|
||||
}
|
||||
}
|
||||
|
||||
if (actualChatId) {
|
||||
await pushEvent({ type: 'chat_id', chatId: actualChatId })
|
||||
}
|
||||
|
||||
// Start title generation in parallel if needed
|
||||
if (actualChatId && !currentChat?.title && conversationHistory.length === 0) {
|
||||
generateChatTitle(message)
|
||||
.then(async (title) => {
|
||||
@@ -547,311 +544,64 @@ export async function POST(req: NextRequest) {
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(copilotChats.id, actualChatId!))
|
||||
|
||||
const titleEvent = `data: ${JSON.stringify({
|
||||
type: 'title_updated',
|
||||
title: title,
|
||||
})}\n\n`
|
||||
controller.enqueue(encoder.encode(titleEvent))
|
||||
logger.info(`[${tracker.requestId}] Generated and saved title: ${title}`)
|
||||
await pushEvent({ type: 'title_updated', title })
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
logger.error(`[${tracker.requestId}] Title generation failed:`, error)
|
||||
})
|
||||
} else {
|
||||
logger.debug(`[${tracker.requestId}] Skipping title generation`)
|
||||
}
|
||||
|
||||
// Forward the sim agent stream and capture assistant response
|
||||
const reader = simAgentResponse.body!.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) {
|
||||
break
|
||||
}
|
||||
|
||||
// Decode and parse SSE events for logging and capturing content
|
||||
const decodedChunk = decoder.decode(value, { stream: true })
|
||||
buffer += decodedChunk
|
||||
|
||||
const lines = buffer.split('\n')
|
||||
buffer = lines.pop() || '' // Keep incomplete line in buffer
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.trim() === '') continue // Skip empty lines
|
||||
|
||||
if (line.startsWith('data: ') && line.length > 6) {
|
||||
try {
|
||||
const jsonStr = line.slice(6)
|
||||
|
||||
// Check if the JSON string is unusually large (potential streaming issue)
|
||||
if (jsonStr.length > 50000) {
|
||||
// 50KB limit
|
||||
logger.warn(`[${tracker.requestId}] Large SSE event detected`, {
|
||||
size: jsonStr.length,
|
||||
preview: `${jsonStr.substring(0, 100)}...`,
|
||||
})
|
||||
}
|
||||
|
||||
const event = JSON.parse(jsonStr)
|
||||
|
||||
// Log different event types comprehensively
|
||||
switch (event.type) {
|
||||
case 'content':
|
||||
if (event.data) {
|
||||
assistantContent += event.data
|
||||
}
|
||||
break
|
||||
|
||||
case 'reasoning':
|
||||
logger.debug(
|
||||
`[${tracker.requestId}] Reasoning chunk received (${(event.data || event.content || '').length} chars)`
|
||||
)
|
||||
break
|
||||
|
||||
case 'tool_call':
|
||||
if (!event.data?.partial) {
|
||||
toolCalls.push(event.data)
|
||||
if (event.data?.id) {
|
||||
announcedToolCallIds.add(event.data.id)
|
||||
}
|
||||
}
|
||||
break
|
||||
|
||||
case 'tool_generating':
|
||||
if (event.toolCallId) {
|
||||
startedToolExecutionIds.add(event.toolCallId)
|
||||
}
|
||||
break
|
||||
|
||||
case 'tool_result':
|
||||
if (event.toolCallId) {
|
||||
completedToolExecutionIds.add(event.toolCallId)
|
||||
}
|
||||
break
|
||||
|
||||
case 'tool_error':
|
||||
logger.error(`[${tracker.requestId}] Tool error:`, {
|
||||
toolCallId: event.toolCallId,
|
||||
toolName: event.toolName,
|
||||
error: event.error,
|
||||
success: event.success,
|
||||
})
|
||||
if (event.toolCallId) {
|
||||
completedToolExecutionIds.add(event.toolCallId)
|
||||
}
|
||||
break
|
||||
|
||||
case 'start':
|
||||
if (event.data?.responseId) {
|
||||
responseIdFromStart = event.data.responseId
|
||||
}
|
||||
break
|
||||
|
||||
case 'done':
|
||||
if (event.data?.responseId) {
|
||||
responseIdFromDone = event.data.responseId
|
||||
lastDoneResponseId = responseIdFromDone
|
||||
|
||||
// Mark this done as safe only if no tool call is currently in progress or pending
|
||||
const announced = announcedToolCallIds.size
|
||||
const completed = completedToolExecutionIds.size
|
||||
const started = startedToolExecutionIds.size
|
||||
const hasToolInProgress = announced > completed || started > completed
|
||||
if (!hasToolInProgress) {
|
||||
lastSafeDoneResponseId = responseIdFromDone
|
||||
}
|
||||
}
|
||||
break
|
||||
|
||||
case 'error':
|
||||
break
|
||||
|
||||
default:
|
||||
}
|
||||
|
||||
// Emit to client: rewrite 'error' events into user-friendly assistant message
|
||||
if (event?.type === 'error') {
|
||||
try {
|
||||
const displayMessage: string =
|
||||
(event?.data && (event.data.displayMessage as string)) ||
|
||||
'Sorry, I encountered an error. Please try again.'
|
||||
const formatted = `_${displayMessage}_`
|
||||
// Accumulate so it persists to DB as assistant content
|
||||
assistantContent += formatted
|
||||
// Send as content chunk
|
||||
try {
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`data: ${JSON.stringify({ type: 'content', data: formatted })}\n\n`
|
||||
)
|
||||
)
|
||||
} catch (enqueueErr) {
|
||||
reader.cancel()
|
||||
break
|
||||
}
|
||||
// Then close this response cleanly for the client
|
||||
try {
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ type: 'done' })}\n\n`)
|
||||
)
|
||||
} catch (enqueueErr) {
|
||||
reader.cancel()
|
||||
break
|
||||
}
|
||||
} catch {}
|
||||
// Do not forward the original error event
|
||||
} else {
|
||||
// Forward original event to client
|
||||
try {
|
||||
controller.enqueue(encoder.encode(`data: ${jsonStr}\n\n`))
|
||||
} catch (enqueueErr) {
|
||||
reader.cancel()
|
||||
break
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
// Enhanced error handling for large payloads and parsing issues
|
||||
const lineLength = line.length
|
||||
const isLargePayload = lineLength > 10000
|
||||
|
||||
if (isLargePayload) {
|
||||
logger.error(
|
||||
`[${tracker.requestId}] Failed to parse large SSE event (${lineLength} chars)`,
|
||||
{
|
||||
error: e,
|
||||
preview: `${line.substring(0, 200)}...`,
|
||||
size: lineLength,
|
||||
}
|
||||
)
|
||||
} else {
|
||||
logger.warn(
|
||||
`[${tracker.requestId}] Failed to parse SSE event: "${line.substring(0, 200)}..."`,
|
||||
e
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if (line.trim() && line !== 'data: [DONE]') {
|
||||
logger.debug(`[${tracker.requestId}] Non-SSE line from sim agent: "${line}"`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process any remaining buffer
|
||||
if (buffer.trim()) {
|
||||
logger.debug(`[${tracker.requestId}] Processing remaining buffer: "${buffer}"`)
|
||||
if (buffer.startsWith('data: ')) {
|
||||
try {
|
||||
const jsonStr = buffer.slice(6)
|
||||
const event = JSON.parse(jsonStr)
|
||||
if (event.type === 'content' && event.data) {
|
||||
assistantContent += event.data
|
||||
}
|
||||
// Forward remaining event, applying same error rewrite behavior
|
||||
if (event?.type === 'error') {
|
||||
const displayMessage: string =
|
||||
(event?.data && (event.data.displayMessage as string)) ||
|
||||
'Sorry, I encountered an error. Please try again.'
|
||||
const formatted = `_${displayMessage}_`
|
||||
assistantContent += formatted
|
||||
try {
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`data: ${JSON.stringify({ type: 'content', data: formatted })}\n\n`
|
||||
)
|
||||
)
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ type: 'done' })}\n\n`)
|
||||
)
|
||||
} catch (enqueueErr) {
|
||||
reader.cancel()
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
controller.enqueue(encoder.encode(`data: ${jsonStr}\n\n`))
|
||||
} catch (enqueueErr) {
|
||||
reader.cancel()
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
logger.warn(`[${tracker.requestId}] Failed to parse final buffer: "${buffer}"`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log final streaming summary
|
||||
logger.info(`[${tracker.requestId}] Streaming complete summary:`, {
|
||||
totalContentLength: assistantContent.length,
|
||||
toolCallsCount: toolCalls.length,
|
||||
hasContent: assistantContent.length > 0,
|
||||
toolNames: toolCalls.map((tc) => tc?.name).filter(Boolean),
|
||||
const result = await orchestrateCopilotStream(requestPayload, {
|
||||
userId: authenticatedUserId,
|
||||
workflowId,
|
||||
chatId: actualChatId,
|
||||
autoExecuteTools: true,
|
||||
interactive: true,
|
||||
onEvent: async (event) => {
|
||||
await pushEvent(event)
|
||||
},
|
||||
})
|
||||
|
||||
// NOTE: Messages are saved by the client via update-messages endpoint with full contentBlocks.
|
||||
// Server only updates conversationId here to avoid overwriting client's richer save.
|
||||
if (currentChat) {
|
||||
// Persist only a safe conversationId to avoid continuing from a state that expects tool outputs
|
||||
const previousConversationId = currentChat?.conversationId as string | undefined
|
||||
const responseId = lastSafeDoneResponseId || previousConversationId || undefined
|
||||
|
||||
if (responseId) {
|
||||
await db
|
||||
.update(copilotChats)
|
||||
.set({
|
||||
updatedAt: new Date(),
|
||||
conversationId: responseId,
|
||||
})
|
||||
.where(eq(copilotChats.id, actualChatId!))
|
||||
|
||||
logger.info(
|
||||
`[${tracker.requestId}] Updated conversationId for chat ${actualChatId}`,
|
||||
{
|
||||
updatedConversationId: responseId,
|
||||
}
|
||||
)
|
||||
}
|
||||
if (currentChat && result.conversationId) {
|
||||
await db
|
||||
.update(copilotChats)
|
||||
.set({
|
||||
updatedAt: new Date(),
|
||||
conversationId: result.conversationId,
|
||||
})
|
||||
.where(eq(copilotChats.id, actualChatId!))
|
||||
}
|
||||
await eventWriter.close()
|
||||
await setStreamMeta(streamId, { status: 'complete', userId: authenticatedUserId })
|
||||
} catch (error) {
|
||||
logger.error(`[${tracker.requestId}] Error processing stream:`, error)
|
||||
|
||||
// Send an error event to the client before closing so it knows what happened
|
||||
try {
|
||||
const errorMessage =
|
||||
error instanceof Error && error.message === 'terminated'
|
||||
? 'Connection to AI service was interrupted. Please try again.'
|
||||
: 'An unexpected error occurred while processing the response.'
|
||||
const encoder = new TextEncoder()
|
||||
|
||||
// Send error as content so it shows in the chat
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`data: ${JSON.stringify({ type: 'content', data: `\n\n_${errorMessage}_` })}\n\n`
|
||||
)
|
||||
)
|
||||
// Send done event to properly close the stream on client
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ type: 'done' })}\n\n`))
|
||||
} catch (enqueueError) {
|
||||
// Stream might already be closed, that's ok
|
||||
logger.warn(
|
||||
`[${tracker.requestId}] Could not send error event to client:`,
|
||||
enqueueError
|
||||
)
|
||||
}
|
||||
logger.error(`[${tracker.requestId}] Orchestration error:`, error)
|
||||
await eventWriter.close()
|
||||
await setStreamMeta(streamId, {
|
||||
status: 'error',
|
||||
userId: authenticatedUserId,
|
||||
error: error instanceof Error ? error.message : 'Stream error',
|
||||
})
|
||||
await pushEvent({
|
||||
type: 'error',
|
||||
data: {
|
||||
displayMessage: 'An unexpected error occurred while processing the response.',
|
||||
},
|
||||
})
|
||||
} finally {
|
||||
try {
|
||||
controller.close()
|
||||
} catch {
|
||||
// Controller might already be closed
|
||||
}
|
||||
controller.close()
|
||||
}
|
||||
},
|
||||
async cancel() {
|
||||
clientDisconnected = true
|
||||
if (eventWriter) {
|
||||
await eventWriter.flush()
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
const response = new Response(transformedStream, {
|
||||
return new Response(transformedStream, {
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
@@ -859,43 +609,31 @@ export async function POST(req: NextRequest) {
|
||||
'X-Accel-Buffering': 'no',
|
||||
},
|
||||
})
|
||||
|
||||
logger.info(`[${tracker.requestId}] Returning streaming response to client`, {
|
||||
duration: tracker.getDuration(),
|
||||
chatId: actualChatId,
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
Connection: 'keep-alive',
|
||||
},
|
||||
})
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
// For non-streaming responses
|
||||
const responseData = await simAgentResponse.json()
|
||||
logger.info(`[${tracker.requestId}] Non-streaming response from sim agent:`, {
|
||||
const nonStreamingResult = await orchestrateCopilotStream(requestPayload, {
|
||||
userId: authenticatedUserId,
|
||||
workflowId,
|
||||
chatId: actualChatId,
|
||||
autoExecuteTools: true,
|
||||
interactive: true,
|
||||
})
|
||||
|
||||
const responseData = {
|
||||
content: nonStreamingResult.content,
|
||||
toolCalls: nonStreamingResult.toolCalls,
|
||||
model: selectedModel,
|
||||
provider: providerConfig?.provider || env.COPILOT_PROVIDER || 'openai',
|
||||
}
|
||||
|
||||
logger.info(`[${tracker.requestId}] Non-streaming response from orchestrator:`, {
|
||||
hasContent: !!responseData.content,
|
||||
contentLength: responseData.content?.length || 0,
|
||||
model: responseData.model,
|
||||
provider: responseData.provider,
|
||||
toolCallsCount: responseData.toolCalls?.length || 0,
|
||||
hasTokens: !!responseData.tokens,
|
||||
})
|
||||
|
||||
// Log tool calls if present
|
||||
if (responseData.toolCalls?.length > 0) {
|
||||
responseData.toolCalls.forEach((toolCall: any) => {
|
||||
logger.info(`[${tracker.requestId}] Tool call in response:`, {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
success: toolCall.success,
|
||||
result: `${JSON.stringify(toolCall.result).substring(0, 200)}...`,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Save messages if we have a chat
|
||||
if (currentChat && responseData.content) {
|
||||
const userMessage = {
|
||||
@@ -947,6 +685,9 @@ export async function POST(req: NextRequest) {
|
||||
.set({
|
||||
messages: updatedMessages,
|
||||
updatedAt: new Date(),
|
||||
...(nonStreamingResult.conversationId
|
||||
? { conversationId: nonStreamingResult.conversationId }
|
||||
: {}),
|
||||
})
|
||||
.where(eq(copilotChats.id, actualChatId!))
|
||||
}
|
||||
@@ -998,10 +739,7 @@ export async function GET(req: NextRequest) {
|
||||
try {
|
||||
const { searchParams } = new URL(req.url)
|
||||
const workflowId = searchParams.get('workflowId')
|
||||
|
||||
if (!workflowId) {
|
||||
return createBadRequestResponse('workflowId is required')
|
||||
}
|
||||
const chatId = searchParams.get('chatId')
|
||||
|
||||
// Get authenticated user using consolidated helper
|
||||
const { userId: authenticatedUserId, isAuthenticated } =
|
||||
@@ -1010,6 +748,47 @@ export async function GET(req: NextRequest) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
// If chatId is provided, fetch a single chat
|
||||
if (chatId) {
|
||||
const [chat] = await db
|
||||
.select({
|
||||
id: copilotChats.id,
|
||||
title: copilotChats.title,
|
||||
model: copilotChats.model,
|
||||
messages: copilotChats.messages,
|
||||
planArtifact: copilotChats.planArtifact,
|
||||
config: copilotChats.config,
|
||||
createdAt: copilotChats.createdAt,
|
||||
updatedAt: copilotChats.updatedAt,
|
||||
})
|
||||
.from(copilotChats)
|
||||
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, authenticatedUserId)))
|
||||
.limit(1)
|
||||
|
||||
if (!chat) {
|
||||
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const transformedChat = {
|
||||
id: chat.id,
|
||||
title: chat.title,
|
||||
model: chat.model,
|
||||
messages: Array.isArray(chat.messages) ? chat.messages : [],
|
||||
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
|
||||
planArtifact: chat.planArtifact || null,
|
||||
config: chat.config || null,
|
||||
createdAt: chat.createdAt,
|
||||
updatedAt: chat.updatedAt,
|
||||
}
|
||||
|
||||
logger.info(`Retrieved chat ${chatId}`)
|
||||
return NextResponse.json({ success: true, chat: transformedChat })
|
||||
}
|
||||
|
||||
if (!workflowId) {
|
||||
return createBadRequestResponse('workflowId or chatId is required')
|
||||
}
|
||||
|
||||
// Fetch chats for this user and workflow
|
||||
const chats = await db
|
||||
.select({
|
||||
|
||||
130
apps/sim/app/api/copilot/chat/stream/route.ts
Normal file
130
apps/sim/app/api/copilot/chat/stream/route.ts
Normal file
@@ -0,0 +1,130 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import {
|
||||
getStreamMeta,
|
||||
readStreamEvents,
|
||||
type StreamMeta,
|
||||
} from '@/lib/copilot/orchestrator/stream-buffer'
|
||||
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
|
||||
import { SSE_HEADERS } from '@/lib/core/utils/sse'
|
||||
|
||||
const logger = createLogger('CopilotChatStreamAPI')
|
||||
const POLL_INTERVAL_MS = 250
|
||||
const MAX_STREAM_MS = 10 * 60 * 1000
|
||||
|
||||
function encodeEvent(event: Record<string, any>): Uint8Array {
|
||||
return new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`)
|
||||
}
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const { userId: authenticatedUserId, isAuthenticated } =
|
||||
await authenticateCopilotRequestSessionOnly()
|
||||
|
||||
if (!isAuthenticated || !authenticatedUserId) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const url = new URL(request.url)
|
||||
const streamId = url.searchParams.get('streamId') || ''
|
||||
const fromParam = url.searchParams.get('from') || '0'
|
||||
const fromEventId = Number(fromParam || 0)
|
||||
// If batch=true, return buffered events as JSON instead of SSE
|
||||
const batchMode = url.searchParams.get('batch') === 'true'
|
||||
const toParam = url.searchParams.get('to')
|
||||
const toEventId = toParam ? Number(toParam) : undefined
|
||||
|
||||
if (!streamId) {
|
||||
return NextResponse.json({ error: 'streamId is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const meta = (await getStreamMeta(streamId)) as StreamMeta | null
|
||||
logger.info('[Resume] Stream lookup', {
|
||||
streamId,
|
||||
fromEventId,
|
||||
toEventId,
|
||||
batchMode,
|
||||
hasMeta: !!meta,
|
||||
metaStatus: meta?.status,
|
||||
})
|
||||
if (!meta) {
|
||||
return NextResponse.json({ error: 'Stream not found' }, { status: 404 })
|
||||
}
|
||||
if (meta.userId && meta.userId !== authenticatedUserId) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Batch mode: return all buffered events as JSON
|
||||
if (batchMode) {
|
||||
const events = await readStreamEvents(streamId, fromEventId)
|
||||
const filteredEvents = toEventId ? events.filter((e) => e.eventId <= toEventId) : events
|
||||
logger.info('[Resume] Batch response', {
|
||||
streamId,
|
||||
fromEventId,
|
||||
toEventId,
|
||||
eventCount: filteredEvents.length,
|
||||
})
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
events: filteredEvents,
|
||||
status: meta.status,
|
||||
})
|
||||
}
|
||||
|
||||
const startTime = Date.now()
|
||||
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
let lastEventId = Number.isFinite(fromEventId) ? fromEventId : 0
|
||||
|
||||
const flushEvents = async () => {
|
||||
const events = await readStreamEvents(streamId, lastEventId)
|
||||
if (events.length > 0) {
|
||||
logger.info('[Resume] Flushing events', {
|
||||
streamId,
|
||||
fromEventId: lastEventId,
|
||||
eventCount: events.length,
|
||||
})
|
||||
}
|
||||
for (const entry of events) {
|
||||
lastEventId = entry.eventId
|
||||
const payload = {
|
||||
...entry.event,
|
||||
eventId: entry.eventId,
|
||||
streamId: entry.streamId,
|
||||
}
|
||||
controller.enqueue(encodeEvent(payload))
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
await flushEvents()
|
||||
|
||||
while (Date.now() - startTime < MAX_STREAM_MS) {
|
||||
const currentMeta = await getStreamMeta(streamId)
|
||||
if (!currentMeta) break
|
||||
|
||||
await flushEvents()
|
||||
|
||||
if (currentMeta.status === 'complete' || currentMeta.status === 'error') {
|
||||
break
|
||||
}
|
||||
|
||||
if (request.signal.aborted) {
|
||||
break
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS))
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Stream replay failed', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
} finally {
|
||||
controller.close()
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
return new Response(stream, { headers: SSE_HEADERS })
|
||||
}
|
||||
@@ -21,6 +21,7 @@ const UpdateCreatorProfileSchema = z.object({
|
||||
name: z.string().min(1, 'Name is required').max(100, 'Max 100 characters').optional(),
|
||||
profileImageUrl: z.string().optional().or(z.literal('')),
|
||||
details: CreatorProfileDetailsSchema.optional(),
|
||||
verified: z.boolean().optional(), // Verification status (super users only)
|
||||
})
|
||||
|
||||
// Helper to check if user has permission to manage profile
|
||||
@@ -97,11 +98,29 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
return NextResponse.json({ error: 'Profile not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Check permissions
|
||||
const canEdit = await hasPermission(session.user.id, existing[0])
|
||||
if (!canEdit) {
|
||||
logger.warn(`[${requestId}] User denied permission to update profile: ${id}`)
|
||||
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
|
||||
// Verification changes require super user permission
|
||||
if (data.verified !== undefined) {
|
||||
const { verifyEffectiveSuperUser } = await import('@/lib/templates/permissions')
|
||||
const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
|
||||
if (!effectiveSuperUser) {
|
||||
logger.warn(`[${requestId}] Non-super user attempted to change creator verification: ${id}`)
|
||||
return NextResponse.json(
|
||||
{ error: 'Only super users can change verification status' },
|
||||
{ status: 403 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// For non-verified updates, check regular permissions
|
||||
const hasNonVerifiedUpdates =
|
||||
data.name !== undefined || data.profileImageUrl !== undefined || data.details !== undefined
|
||||
|
||||
if (hasNonVerifiedUpdates) {
|
||||
const canEdit = await hasPermission(session.user.id, existing[0])
|
||||
if (!canEdit) {
|
||||
logger.warn(`[${requestId}] User denied permission to update profile: ${id}`)
|
||||
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
|
||||
}
|
||||
}
|
||||
|
||||
const updateData: any = {
|
||||
@@ -111,6 +130,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
if (data.name !== undefined) updateData.name = data.name
|
||||
if (data.profileImageUrl !== undefined) updateData.profileImageUrl = data.profileImageUrl
|
||||
if (data.details !== undefined) updateData.details = data.details
|
||||
if (data.verified !== undefined) updateData.verified = data.verified
|
||||
|
||||
const updated = await db
|
||||
.update(templateCreators)
|
||||
|
||||
@@ -1,113 +0,0 @@
|
||||
import { db } from '@sim/db'
|
||||
import { templateCreators } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { verifyEffectiveSuperUser } from '@/lib/templates/permissions'
|
||||
|
||||
const logger = createLogger('CreatorVerificationAPI')
|
||||
|
||||
export const revalidate = 0
|
||||
|
||||
// POST /api/creators/[id]/verify - Verify a creator (super users only)
|
||||
export async function POST(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
|
||||
const requestId = generateRequestId()
|
||||
const { id } = await params
|
||||
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized verification attempt for creator: ${id}`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Check if user is a super user
|
||||
const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
|
||||
if (!effectiveSuperUser) {
|
||||
logger.warn(`[${requestId}] Non-super user attempted to verify creator: ${id}`)
|
||||
return NextResponse.json({ error: 'Only super users can verify creators' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Check if creator exists
|
||||
const existingCreator = await db
|
||||
.select()
|
||||
.from(templateCreators)
|
||||
.where(eq(templateCreators.id, id))
|
||||
.limit(1)
|
||||
|
||||
if (existingCreator.length === 0) {
|
||||
logger.warn(`[${requestId}] Creator not found for verification: ${id}`)
|
||||
return NextResponse.json({ error: 'Creator not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Update creator verified status to true
|
||||
await db
|
||||
.update(templateCreators)
|
||||
.set({ verified: true, updatedAt: new Date() })
|
||||
.where(eq(templateCreators.id, id))
|
||||
|
||||
logger.info(`[${requestId}] Creator verified: ${id} by super user: ${session.user.id}`)
|
||||
|
||||
return NextResponse.json({
|
||||
message: 'Creator verified successfully',
|
||||
creatorId: id,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error verifying creator ${id}`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
// DELETE /api/creators/[id]/verify - Unverify a creator (super users only)
|
||||
export async function DELETE(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string }> }
|
||||
) {
|
||||
const requestId = generateRequestId()
|
||||
const { id } = await params
|
||||
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized unverification attempt for creator: ${id}`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Check if user is a super user
|
||||
const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
|
||||
if (!effectiveSuperUser) {
|
||||
logger.warn(`[${requestId}] Non-super user attempted to unverify creator: ${id}`)
|
||||
return NextResponse.json({ error: 'Only super users can unverify creators' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Check if creator exists
|
||||
const existingCreator = await db
|
||||
.select()
|
||||
.from(templateCreators)
|
||||
.where(eq(templateCreators.id, id))
|
||||
.limit(1)
|
||||
|
||||
if (existingCreator.length === 0) {
|
||||
logger.warn(`[${requestId}] Creator not found for unverification: ${id}`)
|
||||
return NextResponse.json({ error: 'Creator not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Update creator verified status to false
|
||||
await db
|
||||
.update(templateCreators)
|
||||
.set({ verified: false, updatedAt: new Date() })
|
||||
.where(eq(templateCreators.id, id))
|
||||
|
||||
logger.info(`[${requestId}] Creator unverified: ${id} by super user: ${session.user.id}`)
|
||||
|
||||
return NextResponse.json({
|
||||
message: 'Creator unverified successfully',
|
||||
creatorId: id,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error unverifying creator ${id}`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
import { db } from '@sim/db'
|
||||
import { asyncJobs, db } from '@sim/db'
|
||||
import { workflowExecutionLogs } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq, lt, sql } from 'drizzle-orm'
|
||||
import { and, eq, inArray, lt, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { verifyCronAuth } from '@/lib/auth/internal'
|
||||
import { JOB_RETENTION_HOURS, JOB_STATUS } from '@/lib/core/async-jobs'
|
||||
import { getMaxExecutionTimeout } from '@/lib/core/execution-limits'
|
||||
|
||||
const logger = createLogger('CleanupStaleExecutions')
|
||||
@@ -80,12 +81,102 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
logger.info(`Stale execution cleanup completed. Cleaned: ${cleaned}, Failed: ${failed}`)
|
||||
|
||||
// Clean up stale async jobs (stuck in processing)
|
||||
let asyncJobsMarkedFailed = 0
|
||||
|
||||
try {
|
||||
const staleAsyncJobs = await db
|
||||
.update(asyncJobs)
|
||||
.set({
|
||||
status: JOB_STATUS.FAILED,
|
||||
completedAt: new Date(),
|
||||
error: `Job terminated: stuck in processing for more than ${STALE_THRESHOLD_MINUTES} minutes`,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(
|
||||
and(eq(asyncJobs.status, JOB_STATUS.PROCESSING), lt(asyncJobs.startedAt, staleThreshold))
|
||||
)
|
||||
.returning({ id: asyncJobs.id })
|
||||
|
||||
asyncJobsMarkedFailed = staleAsyncJobs.length
|
||||
if (asyncJobsMarkedFailed > 0) {
|
||||
logger.info(`Marked ${asyncJobsMarkedFailed} stale async jobs as failed`)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to clean up stale async jobs:', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
|
||||
// Clean up stale pending jobs (never started, e.g., due to server crash before startJob())
|
||||
let stalePendingJobsMarkedFailed = 0
|
||||
|
||||
try {
|
||||
const stalePendingJobs = await db
|
||||
.update(asyncJobs)
|
||||
.set({
|
||||
status: JOB_STATUS.FAILED,
|
||||
completedAt: new Date(),
|
||||
error: `Job terminated: stuck in pending state for more than ${STALE_THRESHOLD_MINUTES} minutes (never started)`,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(
|
||||
and(eq(asyncJobs.status, JOB_STATUS.PENDING), lt(asyncJobs.createdAt, staleThreshold))
|
||||
)
|
||||
.returning({ id: asyncJobs.id })
|
||||
|
||||
stalePendingJobsMarkedFailed = stalePendingJobs.length
|
||||
if (stalePendingJobsMarkedFailed > 0) {
|
||||
logger.info(`Marked ${stalePendingJobsMarkedFailed} stale pending jobs as failed`)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to clean up stale pending jobs:', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
|
||||
// Delete completed/failed jobs older than retention period
|
||||
const retentionThreshold = new Date(Date.now() - JOB_RETENTION_HOURS * 60 * 60 * 1000)
|
||||
let asyncJobsDeleted = 0
|
||||
|
||||
try {
|
||||
const deletedJobs = await db
|
||||
.delete(asyncJobs)
|
||||
.where(
|
||||
and(
|
||||
inArray(asyncJobs.status, [JOB_STATUS.COMPLETED, JOB_STATUS.FAILED]),
|
||||
lt(asyncJobs.completedAt, retentionThreshold)
|
||||
)
|
||||
)
|
||||
.returning({ id: asyncJobs.id })
|
||||
|
||||
asyncJobsDeleted = deletedJobs.length
|
||||
if (asyncJobsDeleted > 0) {
|
||||
logger.info(
|
||||
`Deleted ${asyncJobsDeleted} old async jobs (retention: ${JOB_RETENTION_HOURS}h)`
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to delete old async jobs:', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
found: staleExecutions.length,
|
||||
cleaned,
|
||||
failed,
|
||||
thresholdMinutes: STALE_THRESHOLD_MINUTES,
|
||||
executions: {
|
||||
found: staleExecutions.length,
|
||||
cleaned,
|
||||
failed,
|
||||
thresholdMinutes: STALE_THRESHOLD_MINUTES,
|
||||
},
|
||||
asyncJobs: {
|
||||
staleProcessingMarkedFailed: asyncJobsMarkedFailed,
|
||||
stalePendingMarkedFailed: stalePendingJobsMarkedFailed,
|
||||
oldDeleted: asyncJobsDeleted,
|
||||
staleThresholdMinutes: STALE_THRESHOLD_MINUTES,
|
||||
retentionHours: JOB_RETENTION_HOURS,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Error in stale execution cleanup job:', error)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { runs } from '@trigger.dev/sdk'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { checkHybridAuth } from '@/lib/auth/hybrid'
|
||||
import { getJobQueue, JOB_STATUS } from '@/lib/core/async-jobs'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { createErrorResponse } from '@/app/api/workflows/utils'
|
||||
|
||||
@@ -15,8 +15,6 @@ export async function GET(
|
||||
const requestId = generateRequestId()
|
||||
|
||||
try {
|
||||
logger.debug(`[${requestId}] Getting status for task: ${taskId}`)
|
||||
|
||||
const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
|
||||
if (!authResult.success || !authResult.userId) {
|
||||
logger.warn(`[${requestId}] Unauthorized task status request`)
|
||||
@@ -25,76 +23,60 @@ export async function GET(
|
||||
|
||||
const authenticatedUserId = authResult.userId
|
||||
|
||||
const run = await runs.retrieve(taskId)
|
||||
const jobQueue = await getJobQueue()
|
||||
const job = await jobQueue.getJob(taskId)
|
||||
|
||||
logger.debug(`[${requestId}] Task ${taskId} status: ${run.status}`)
|
||||
|
||||
const payload = run.payload as any
|
||||
if (payload?.workflowId) {
|
||||
const { verifyWorkflowAccess } = await import('@/socket/middleware/permissions')
|
||||
const accessCheck = await verifyWorkflowAccess(authenticatedUserId, payload.workflowId)
|
||||
if (!accessCheck.hasAccess) {
|
||||
logger.warn(`[${requestId}] User ${authenticatedUserId} denied access to task ${taskId}`, {
|
||||
workflowId: payload.workflowId,
|
||||
})
|
||||
return createErrorResponse('Access denied', 403)
|
||||
}
|
||||
logger.debug(`[${requestId}] User ${authenticatedUserId} has access to task ${taskId}`)
|
||||
} else {
|
||||
if (payload?.userId && payload.userId !== authenticatedUserId) {
|
||||
logger.warn(
|
||||
`[${requestId}] User ${authenticatedUserId} attempted to access task ${taskId} owned by ${payload.userId}`
|
||||
)
|
||||
return createErrorResponse('Access denied', 403)
|
||||
}
|
||||
if (!payload?.userId) {
|
||||
logger.warn(
|
||||
`[${requestId}] Task ${taskId} has no ownership information in payload. Denying access for security.`
|
||||
)
|
||||
return createErrorResponse('Access denied', 403)
|
||||
}
|
||||
if (!job) {
|
||||
return createErrorResponse('Task not found', 404)
|
||||
}
|
||||
|
||||
const statusMap = {
|
||||
QUEUED: 'queued',
|
||||
WAITING_FOR_DEPLOY: 'queued',
|
||||
EXECUTING: 'processing',
|
||||
RESCHEDULED: 'processing',
|
||||
FROZEN: 'processing',
|
||||
COMPLETED: 'completed',
|
||||
CANCELED: 'cancelled',
|
||||
FAILED: 'failed',
|
||||
CRASHED: 'failed',
|
||||
INTERRUPTED: 'failed',
|
||||
SYSTEM_FAILURE: 'failed',
|
||||
EXPIRED: 'failed',
|
||||
} as const
|
||||
if (job.metadata?.workflowId) {
|
||||
const { verifyWorkflowAccess } = await import('@/socket/middleware/permissions')
|
||||
const accessCheck = await verifyWorkflowAccess(
|
||||
authenticatedUserId,
|
||||
job.metadata.workflowId as string
|
||||
)
|
||||
if (!accessCheck.hasAccess) {
|
||||
logger.warn(`[${requestId}] Access denied to workflow ${job.metadata.workflowId}`)
|
||||
return createErrorResponse('Access denied', 403)
|
||||
}
|
||||
} else if (job.metadata?.userId && job.metadata.userId !== authenticatedUserId) {
|
||||
logger.warn(`[${requestId}] Access denied to user ${job.metadata.userId}`)
|
||||
return createErrorResponse('Access denied', 403)
|
||||
} else if (!job.metadata?.userId && !job.metadata?.workflowId) {
|
||||
logger.warn(`[${requestId}] Access denied to job ${taskId}`)
|
||||
return createErrorResponse('Access denied', 403)
|
||||
}
|
||||
|
||||
const mappedStatus = statusMap[run.status as keyof typeof statusMap] || 'unknown'
|
||||
const mappedStatus = job.status === JOB_STATUS.PENDING ? 'queued' : job.status
|
||||
|
||||
const response: any = {
|
||||
success: true,
|
||||
taskId,
|
||||
status: mappedStatus,
|
||||
metadata: {
|
||||
startedAt: run.startedAt,
|
||||
startedAt: job.startedAt,
|
||||
},
|
||||
}
|
||||
|
||||
if (mappedStatus === 'completed') {
|
||||
response.output = run.output // This contains the workflow execution results
|
||||
response.metadata.completedAt = run.finishedAt
|
||||
response.metadata.duration = run.durationMs
|
||||
if (job.status === JOB_STATUS.COMPLETED) {
|
||||
response.output = job.output
|
||||
response.metadata.completedAt = job.completedAt
|
||||
if (job.startedAt && job.completedAt) {
|
||||
response.metadata.duration = job.completedAt.getTime() - job.startedAt.getTime()
|
||||
}
|
||||
}
|
||||
|
||||
if (mappedStatus === 'failed') {
|
||||
response.error = run.error
|
||||
response.metadata.completedAt = run.finishedAt
|
||||
response.metadata.duration = run.durationMs
|
||||
if (job.status === JOB_STATUS.FAILED) {
|
||||
response.error = job.error
|
||||
response.metadata.completedAt = job.completedAt
|
||||
if (job.startedAt && job.completedAt) {
|
||||
response.metadata.duration = job.completedAt.getTime() - job.startedAt.getTime()
|
||||
}
|
||||
}
|
||||
|
||||
if (mappedStatus === 'processing' || mappedStatus === 'queued') {
|
||||
response.estimatedDuration = 180000 // 3 minutes max from our config
|
||||
if (job.status === JOB_STATUS.PROCESSING || job.status === JOB_STATUS.PENDING) {
|
||||
response.estimatedDuration = 180000
|
||||
}
|
||||
|
||||
return NextResponse.json(response)
|
||||
|
||||
413
apps/sim/app/api/mcp/copilot/route.ts
Normal file
413
apps/sim/app/api/mcp/copilot/route.ts
Normal file
@@ -0,0 +1,413 @@
|
||||
import {
|
||||
type CallToolResult,
|
||||
ErrorCode,
|
||||
type InitializeResult,
|
||||
isJSONRPCNotification,
|
||||
isJSONRPCRequest,
|
||||
type JSONRPCError,
|
||||
type JSONRPCMessage,
|
||||
type JSONRPCResponse,
|
||||
type ListToolsResult,
|
||||
type RequestId,
|
||||
} from '@modelcontextprotocol/sdk/types.js'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { checkHybridAuth } from '@/lib/auth/hybrid'
|
||||
import { getCopilotModel } from '@/lib/copilot/config'
|
||||
import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent'
|
||||
import {
|
||||
executeToolServerSide,
|
||||
prepareExecutionContext,
|
||||
} from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions'
|
||||
|
||||
const logger = createLogger('CopilotMcpAPI')
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
/**
|
||||
* MCP Server instructions that guide LLMs on how to use the Sim copilot tools.
|
||||
* This is included in the initialize response to help external LLMs understand
|
||||
* the workflow lifecycle and best practices.
|
||||
*/
|
||||
const MCP_SERVER_INSTRUCTIONS = `
|
||||
## Sim Workflow Copilot - Usage Guide
|
||||
|
||||
You are interacting with Sim's workflow automation platform. These tools orchestrate specialized AI agents that build workflows. Follow these guidelines carefully.
|
||||
|
||||
---
|
||||
|
||||
## Platform Knowledge
|
||||
|
||||
Sim is a workflow automation platform. Workflows are visual pipelines of blocks.
|
||||
|
||||
### Block Types
|
||||
|
||||
**Core Logic:**
|
||||
- **Agent** - The heart of Sim (LLM block with tools, memory, structured output, knowledge bases)
|
||||
- **Function** - JavaScript code execution
|
||||
- **Condition** - If/else branching
|
||||
- **Router** - AI-powered content-based routing
|
||||
- **Loop** - While/do-while iteration
|
||||
- **Parallel** - Simultaneous execution
|
||||
- **API** - HTTP requests
|
||||
|
||||
**Integrations (3rd Party):**
|
||||
- OAuth: Slack, Gmail, Google Calendar, Sheets, Outlook, Linear, GitHub, Notion
|
||||
- API: Stripe, Twilio, SendGrid, any REST API
|
||||
|
||||
### The Agent Block
|
||||
|
||||
The Agent block is the core of intelligent workflows:
|
||||
- **Tools** - Add integrations, custom tools, web search to give it capabilities
|
||||
- **Memory** - Multi-turn conversations with persistent context
|
||||
- **Structured Output** - JSON schema for reliable parsing
|
||||
- **Knowledge Bases** - RAG-powered document retrieval
|
||||
|
||||
**Design principle:** Put tools INSIDE agents rather than using standalone tool blocks.
|
||||
|
||||
### Triggers
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| Manual/Chat | User sends message in UI (start block: input, files, conversationId) |
|
||||
| API | REST endpoint with custom input schema |
|
||||
| Webhook | External services POST to trigger URL |
|
||||
| Schedule | Cron-based (hourly, daily, weekly) |
|
||||
|
||||
### Deployments
|
||||
|
||||
| Type | Trigger | Use Case |
|
||||
|------|---------|----------|
|
||||
| API | Start block | REST endpoint for programmatic access |
|
||||
| Chat | Start block | Managed chat UI with auth options |
|
||||
| MCP | Start block | Expose as MCP tool for AI agents |
|
||||
| General | Schedule/Webhook | Activate triggers to run automatically |
|
||||
|
||||
**Undeployed workflows only run in the builder UI.**
|
||||
|
||||
### Variable Syntax
|
||||
|
||||
Reference outputs from previous blocks: \`<blockname.field>\`
|
||||
Reference environment variables: \`{{ENV_VAR_NAME}}\`
|
||||
|
||||
Rules:
|
||||
- Block names must be lowercase, no spaces, no special characters
|
||||
- Use dot notation for nested fields: \`<blockname.field.subfield>\`
|
||||
|
||||
---
|
||||
|
||||
## Workflow Lifecycle
|
||||
|
||||
1. **Create**: For NEW workflows, FIRST call create_workflow to get a workflowId
|
||||
2. **Plan**: Use copilot_plan with the workflowId to plan the workflow
|
||||
3. **Edit**: Use copilot_edit with the workflowId AND the plan to build the workflow
|
||||
4. **Deploy**: ALWAYS deploy after building using copilot_deploy before testing/running
|
||||
5. **Test**: Use copilot_test to verify the workflow works correctly
|
||||
6. **Share**: Provide the user with the workflow URL after completion
|
||||
|
||||
---
|
||||
|
||||
## CRITICAL: Always Pass workflowId
|
||||
|
||||
- For NEW workflows: Call create_workflow FIRST, then use the returned workflowId
|
||||
- For EXISTING workflows: Pass the workflowId to all copilot tools
|
||||
- copilot_plan, copilot_edit, copilot_deploy, copilot_test, copilot_debug all REQUIRE workflowId
|
||||
|
||||
---
|
||||
|
||||
## CRITICAL: How to Handle Plans
|
||||
|
||||
The copilot_plan tool returns a structured plan object. You MUST:
|
||||
|
||||
1. **Do NOT modify the plan**: Pass the plan object EXACTLY as returned to copilot_edit
|
||||
2. **Do NOT interpret or summarize the plan**: The edit agent needs the raw plan data
|
||||
3. **Pass the plan in the context.plan field**: \`{ "context": { "plan": <plan_object> } }\`
|
||||
4. **Include ALL plan data**: Block configurations, connections, credentials, everything
|
||||
|
||||
Example flow:
|
||||
\`\`\`
|
||||
1. copilot_plan({ request: "build a workflow...", workflowId: "abc123" })
|
||||
-> Returns: { "plan": { "blocks": [...], "connections": [...], ... } }
|
||||
|
||||
2. copilot_edit({
|
||||
workflowId: "abc123",
|
||||
message: "Execute the plan",
|
||||
context: { "plan": <EXACT plan object from step 1> }
|
||||
})
|
||||
\`\`\`
|
||||
|
||||
**Why this matters**: The plan contains technical details (block IDs, field mappings, API schemas) that the edit agent needs verbatim. Summarizing or rephrasing loses critical information.
|
||||
|
||||
---
|
||||
|
||||
## CRITICAL: Error Handling
|
||||
|
||||
**If the user says "doesn't work", "broke", "failed", "error" → ALWAYS use copilot_debug FIRST.**
|
||||
|
||||
Don't guess. Don't plan. Debug first to find the actual problem.
|
||||
|
||||
---
|
||||
|
||||
## Important Rules
|
||||
|
||||
- ALWAYS deploy a workflow before attempting to run or test it
|
||||
- Workflows must be deployed to have an "active deployment" for execution
|
||||
- After building, call copilot_deploy with the appropriate deployment type (api, chat, or mcp)
|
||||
- Return the workflow URL to the user so they can access it in Sim
|
||||
|
||||
---
|
||||
|
||||
## Quick Operations (use direct tools)
|
||||
- list_workflows, list_workspaces, list_folders, get_workflow: Fast database queries
|
||||
- create_workflow: Create new workflow and get workflowId (CALL THIS FIRST for new workflows)
|
||||
- create_folder: Create new resources
|
||||
|
||||
## Workflow Building (use copilot tools)
|
||||
- copilot_plan: Plan workflow changes (REQUIRES workflowId) - returns a plan object
|
||||
- copilot_edit: Execute the plan (REQUIRES workflowId AND plan from copilot_plan)
|
||||
- copilot_deploy: Deploy workflows (REQUIRES workflowId)
|
||||
- copilot_test: Test workflow execution (REQUIRES workflowId)
|
||||
- copilot_debug: Diagnose errors (REQUIRES workflowId) - USE THIS FIRST for issues
|
||||
`
|
||||
|
||||
function createResponse(id: RequestId, result: unknown): JSONRPCResponse {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
result: result as JSONRPCResponse['result'],
|
||||
}
|
||||
}
|
||||
|
||||
function createError(id: RequestId, code: ErrorCode | number, message: string): JSONRPCError {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
error: { code, message },
|
||||
}
|
||||
}
|
||||
|
||||
export async function GET() {
|
||||
return NextResponse.json({
|
||||
name: 'copilot-subagents',
|
||||
version: '1.0.0',
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: { tools: {} },
|
||||
})
|
||||
}
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const auth = await checkHybridAuth(request, { requireWorkflowId: false })
|
||||
if (!auth.success || !auth.userId) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const body = (await request.json()) as JSONRPCMessage
|
||||
|
||||
if (isJSONRPCNotification(body)) {
|
||||
return new NextResponse(null, { status: 202 })
|
||||
}
|
||||
|
||||
if (!isJSONRPCRequest(body)) {
|
||||
return NextResponse.json(
|
||||
createError(0, ErrorCode.InvalidRequest, 'Invalid JSON-RPC message'),
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const { id, method, params } = body
|
||||
|
||||
switch (method) {
|
||||
case 'initialize': {
|
||||
const result: InitializeResult = {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: { tools: {} },
|
||||
serverInfo: { name: 'sim-copilot', version: '1.0.0' },
|
||||
instructions: MCP_SERVER_INSTRUCTIONS,
|
||||
}
|
||||
return NextResponse.json(createResponse(id, result))
|
||||
}
|
||||
case 'ping':
|
||||
return NextResponse.json(createResponse(id, {}))
|
||||
case 'tools/list':
|
||||
return handleToolsList(id)
|
||||
case 'tools/call':
|
||||
return handleToolsCall(
|
||||
id,
|
||||
params as { name: string; arguments?: Record<string, unknown> },
|
||||
auth.userId
|
||||
)
|
||||
default:
|
||||
return NextResponse.json(
|
||||
createError(id, ErrorCode.MethodNotFound, `Method not found: ${method}`),
|
||||
{ status: 404 }
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error handling MCP request', { error })
|
||||
return NextResponse.json(createError(0, ErrorCode.InternalError, 'Internal error'), {
|
||||
status: 500,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async function handleToolsList(id: RequestId): Promise<NextResponse> {
|
||||
const directTools = DIRECT_TOOL_DEFS.map((tool) => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
inputSchema: tool.inputSchema,
|
||||
}))
|
||||
|
||||
const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
inputSchema: tool.inputSchema,
|
||||
}))
|
||||
|
||||
const result: ListToolsResult = {
|
||||
tools: [...directTools, ...subagentTools],
|
||||
}
|
||||
|
||||
return NextResponse.json(createResponse(id, result))
|
||||
}
|
||||
|
||||
async function handleToolsCall(
|
||||
id: RequestId,
|
||||
params: { name: string; arguments?: Record<string, unknown> },
|
||||
userId: string
|
||||
): Promise<NextResponse> {
|
||||
const args = params.arguments || {}
|
||||
|
||||
// Check if this is a direct tool (fast, no LLM)
|
||||
const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name)
|
||||
if (directTool) {
|
||||
return handleDirectToolCall(id, directTool, args, userId)
|
||||
}
|
||||
|
||||
// Check if this is a subagent tool (uses LLM orchestration)
|
||||
const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name)
|
||||
if (subagentTool) {
|
||||
return handleSubagentToolCall(id, subagentTool, args, userId)
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
createError(id, ErrorCode.MethodNotFound, `Tool not found: ${params.name}`),
|
||||
{ status: 404 }
|
||||
)
|
||||
}
|
||||
|
||||
async function handleDirectToolCall(
|
||||
id: RequestId,
|
||||
toolDef: (typeof DIRECT_TOOL_DEFS)[number],
|
||||
args: Record<string, unknown>,
|
||||
userId: string
|
||||
): Promise<NextResponse> {
|
||||
try {
|
||||
const execContext = await prepareExecutionContext(userId, (args.workflowId as string) || '')
|
||||
|
||||
const toolCall = {
|
||||
id: crypto.randomUUID(),
|
||||
name: toolDef.toolId,
|
||||
status: 'pending' as const,
|
||||
params: args as Record<string, any>,
|
||||
startTime: Date.now(),
|
||||
}
|
||||
|
||||
const result = await executeToolServerSide(toolCall, execContext)
|
||||
|
||||
const response: CallToolResult = {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(result.output ?? result, null, 2),
|
||||
},
|
||||
],
|
||||
isError: !result.success,
|
||||
}
|
||||
|
||||
return NextResponse.json(createResponse(id, response))
|
||||
} catch (error) {
|
||||
logger.error('Direct tool execution failed', { tool: toolDef.name, error })
|
||||
return NextResponse.json(
|
||||
createError(id, ErrorCode.InternalError, `Tool execution failed: ${error}`),
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async function handleSubagentToolCall(
|
||||
id: RequestId,
|
||||
toolDef: (typeof SUBAGENT_TOOL_DEFS)[number],
|
||||
args: Record<string, unknown>,
|
||||
userId: string
|
||||
): Promise<NextResponse> {
|
||||
const requestText =
|
||||
(args.request as string) ||
|
||||
(args.message as string) ||
|
||||
(args.error as string) ||
|
||||
JSON.stringify(args)
|
||||
|
||||
const context = (args.context as Record<string, unknown>) || {}
|
||||
if (args.plan && !context.plan) {
|
||||
context.plan = args.plan
|
||||
}
|
||||
|
||||
const { model } = getCopilotModel('chat')
|
||||
|
||||
const result = await orchestrateSubagentStream(
|
||||
toolDef.agentId,
|
||||
{
|
||||
message: requestText,
|
||||
workflowId: args.workflowId,
|
||||
workspaceId: args.workspaceId,
|
||||
context,
|
||||
model,
|
||||
// Signal to the copilot backend that this is a headless request
|
||||
// so it can enforce workflowId requirements on tools
|
||||
headless: true,
|
||||
},
|
||||
{
|
||||
userId,
|
||||
workflowId: args.workflowId as string | undefined,
|
||||
workspaceId: args.workspaceId as string | undefined,
|
||||
}
|
||||
)
|
||||
|
||||
// When a respond tool (plan_respond, edit_respond, etc.) was used,
|
||||
// return only the structured result - not the full result with all internal tool calls.
|
||||
// This provides clean output for MCP consumers.
|
||||
let responseData: unknown
|
||||
if (result.structuredResult) {
|
||||
responseData = {
|
||||
success: result.structuredResult.success ?? result.success,
|
||||
type: result.structuredResult.type,
|
||||
summary: result.structuredResult.summary,
|
||||
data: result.structuredResult.data,
|
||||
}
|
||||
} else if (result.error) {
|
||||
responseData = {
|
||||
success: false,
|
||||
error: result.error,
|
||||
errors: result.errors,
|
||||
}
|
||||
} else {
|
||||
// Fallback: return content if no structured result
|
||||
responseData = {
|
||||
success: result.success,
|
||||
content: result.content,
|
||||
}
|
||||
}
|
||||
|
||||
const response: CallToolResult = {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(responseData, null, 2),
|
||||
},
|
||||
],
|
||||
isError: !result.success,
|
||||
}
|
||||
|
||||
return NextResponse.json(createResponse(id, response))
|
||||
}
|
||||
@@ -1,10 +1,9 @@
|
||||
import { db, workflowDeploymentVersion, workflowSchedule } from '@sim/db'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { tasks } from '@trigger.dev/sdk'
|
||||
import { and, eq, isNull, lt, lte, not, or, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { verifyCronAuth } from '@/lib/auth/internal'
|
||||
import { isTriggerDevEnabled } from '@/lib/core/config/feature-flags'
|
||||
import { getJobQueue, shouldExecuteInline } from '@/lib/core/async-jobs'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { executeScheduleJob } from '@/background/schedule-execution'
|
||||
|
||||
@@ -55,72 +54,67 @@ export async function GET(request: NextRequest) {
|
||||
logger.debug(`[${requestId}] Successfully queried schedules: ${dueSchedules.length} found`)
|
||||
logger.info(`[${requestId}] Processing ${dueSchedules.length} due scheduled workflows`)
|
||||
|
||||
if (isTriggerDevEnabled) {
|
||||
const triggerPromises = dueSchedules.map(async (schedule) => {
|
||||
const queueTime = schedule.lastQueuedAt ?? queuedAt
|
||||
const jobQueue = await getJobQueue()
|
||||
|
||||
try {
|
||||
const payload = {
|
||||
scheduleId: schedule.id,
|
||||
workflowId: schedule.workflowId,
|
||||
blockId: schedule.blockId || undefined,
|
||||
cronExpression: schedule.cronExpression || undefined,
|
||||
lastRanAt: schedule.lastRanAt?.toISOString(),
|
||||
failedCount: schedule.failedCount || 0,
|
||||
now: queueTime.toISOString(),
|
||||
scheduledFor: schedule.nextRunAt?.toISOString(),
|
||||
}
|
||||
const queuePromises = dueSchedules.map(async (schedule) => {
|
||||
const queueTime = schedule.lastQueuedAt ?? queuedAt
|
||||
|
||||
const handle = await tasks.trigger('schedule-execution', payload)
|
||||
logger.info(
|
||||
`[${requestId}] Queued schedule execution task ${handle.id} for workflow ${schedule.workflowId}`
|
||||
)
|
||||
return handle
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`[${requestId}] Failed to trigger schedule execution for workflow ${schedule.workflowId}`,
|
||||
error
|
||||
)
|
||||
return null
|
||||
}
|
||||
})
|
||||
const payload = {
|
||||
scheduleId: schedule.id,
|
||||
workflowId: schedule.workflowId,
|
||||
blockId: schedule.blockId || undefined,
|
||||
cronExpression: schedule.cronExpression || undefined,
|
||||
lastRanAt: schedule.lastRanAt?.toISOString(),
|
||||
failedCount: schedule.failedCount || 0,
|
||||
now: queueTime.toISOString(),
|
||||
scheduledFor: schedule.nextRunAt?.toISOString(),
|
||||
}
|
||||
|
||||
await Promise.allSettled(triggerPromises)
|
||||
|
||||
logger.info(`[${requestId}] Queued ${dueSchedules.length} schedule executions to Trigger.dev`)
|
||||
} else {
|
||||
const directExecutionPromises = dueSchedules.map(async (schedule) => {
|
||||
const queueTime = schedule.lastQueuedAt ?? queuedAt
|
||||
|
||||
const payload = {
|
||||
scheduleId: schedule.id,
|
||||
workflowId: schedule.workflowId,
|
||||
blockId: schedule.blockId || undefined,
|
||||
cronExpression: schedule.cronExpression || undefined,
|
||||
lastRanAt: schedule.lastRanAt?.toISOString(),
|
||||
failedCount: schedule.failedCount || 0,
|
||||
now: queueTime.toISOString(),
|
||||
scheduledFor: schedule.nextRunAt?.toISOString(),
|
||||
}
|
||||
|
||||
void executeScheduleJob(payload).catch((error) => {
|
||||
logger.error(
|
||||
`[${requestId}] Direct schedule execution failed for workflow ${schedule.workflowId}`,
|
||||
error
|
||||
)
|
||||
try {
|
||||
const jobId = await jobQueue.enqueue('schedule-execution', payload, {
|
||||
metadata: { workflowId: schedule.workflowId },
|
||||
})
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] Queued direct schedule execution for workflow ${schedule.workflowId} (Trigger.dev disabled)`
|
||||
`[${requestId}] Queued schedule execution task ${jobId} for workflow ${schedule.workflowId}`
|
||||
)
|
||||
})
|
||||
|
||||
await Promise.allSettled(directExecutionPromises)
|
||||
if (shouldExecuteInline()) {
|
||||
void (async () => {
|
||||
try {
|
||||
await jobQueue.startJob(jobId)
|
||||
const output = await executeScheduleJob(payload)
|
||||
await jobQueue.completeJob(jobId, output)
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
logger.error(
|
||||
`[${requestId}] Schedule execution failed for workflow ${schedule.workflowId}`,
|
||||
{ jobId, error: errorMessage }
|
||||
)
|
||||
try {
|
||||
await jobQueue.markJobFailed(jobId, errorMessage)
|
||||
} catch (markFailedError) {
|
||||
logger.error(`[${requestId}] Failed to mark job as failed`, {
|
||||
jobId,
|
||||
error:
|
||||
markFailedError instanceof Error
|
||||
? markFailedError.message
|
||||
: String(markFailedError),
|
||||
})
|
||||
}
|
||||
}
|
||||
})()
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`[${requestId}] Failed to queue schedule execution for workflow ${schedule.workflowId}`,
|
||||
error
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] Queued ${dueSchedules.length} direct schedule executions (Trigger.dev disabled)`
|
||||
)
|
||||
}
|
||||
await Promise.allSettled(queuePromises)
|
||||
|
||||
logger.info(`[${requestId}] Queued ${dueSchedules.length} schedule executions`)
|
||||
|
||||
return NextResponse.json({
|
||||
message: 'Scheduled workflow executions processed',
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
import { db } from '@sim/db'
|
||||
import { templates } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { verifyEffectiveSuperUser } from '@/lib/templates/permissions'
|
||||
|
||||
const logger = createLogger('TemplateApprovalAPI')
|
||||
|
||||
export const revalidate = 0
|
||||
|
||||
/**
|
||||
* POST /api/templates/[id]/approve - Approve a template (super users only)
|
||||
*/
|
||||
export async function POST(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
|
||||
const requestId = generateRequestId()
|
||||
const { id } = await params
|
||||
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized template approval attempt for ID: ${id}`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
|
||||
if (!effectiveSuperUser) {
|
||||
logger.warn(`[${requestId}] Non-super user attempted to approve template: ${id}`)
|
||||
return NextResponse.json({ error: 'Only super users can approve templates' }, { status: 403 })
|
||||
}
|
||||
|
||||
const existingTemplate = await db.select().from(templates).where(eq(templates.id, id)).limit(1)
|
||||
if (existingTemplate.length === 0) {
|
||||
logger.warn(`[${requestId}] Template not found for approval: ${id}`)
|
||||
return NextResponse.json({ error: 'Template not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
await db
|
||||
.update(templates)
|
||||
.set({ status: 'approved', updatedAt: new Date() })
|
||||
.where(eq(templates.id, id))
|
||||
|
||||
logger.info(`[${requestId}] Template approved: ${id} by super user: ${session.user.id}`)
|
||||
|
||||
return NextResponse.json({
|
||||
message: 'Template approved successfully',
|
||||
templateId: id,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error approving template ${id}`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* DELETE /api/templates/[id]/approve - Unapprove a template (super users only)
|
||||
*/
|
||||
export async function DELETE(
|
||||
_request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string }> }
|
||||
) {
|
||||
const requestId = generateRequestId()
|
||||
const { id } = await params
|
||||
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized template rejection attempt for ID: ${id}`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
|
||||
if (!effectiveSuperUser) {
|
||||
logger.warn(`[${requestId}] Non-super user attempted to reject template: ${id}`)
|
||||
return NextResponse.json({ error: 'Only super users can reject templates' }, { status: 403 })
|
||||
}
|
||||
|
||||
const existingTemplate = await db.select().from(templates).where(eq(templates.id, id)).limit(1)
|
||||
if (existingTemplate.length === 0) {
|
||||
logger.warn(`[${requestId}] Template not found for rejection: ${id}`)
|
||||
return NextResponse.json({ error: 'Template not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
await db
|
||||
.update(templates)
|
||||
.set({ status: 'rejected', updatedAt: new Date() })
|
||||
.where(eq(templates.id, id))
|
||||
|
||||
logger.info(`[${requestId}] Template rejected: ${id} by super user: ${session.user.id}`)
|
||||
|
||||
return NextResponse.json({
|
||||
message: 'Template rejected successfully',
|
||||
templateId: id,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error rejecting template ${id}`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
import { db } from '@sim/db'
|
||||
import { templates } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { verifyEffectiveSuperUser } from '@/lib/templates/permissions'
|
||||
|
||||
const logger = createLogger('TemplateRejectionAPI')
|
||||
|
||||
export const revalidate = 0
|
||||
|
||||
/**
|
||||
* POST /api/templates/[id]/reject - Reject a template (super users only)
|
||||
*/
|
||||
export async function POST(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
|
||||
const requestId = generateRequestId()
|
||||
const { id } = await params
|
||||
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized template rejection attempt for ID: ${id}`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
|
||||
if (!effectiveSuperUser) {
|
||||
logger.warn(`[${requestId}] Non-super user attempted to reject template: ${id}`)
|
||||
return NextResponse.json({ error: 'Only super users can reject templates' }, { status: 403 })
|
||||
}
|
||||
|
||||
const existingTemplate = await db.select().from(templates).where(eq(templates.id, id)).limit(1)
|
||||
if (existingTemplate.length === 0) {
|
||||
logger.warn(`[${requestId}] Template not found for rejection: ${id}`)
|
||||
return NextResponse.json({ error: 'Template not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
await db
|
||||
.update(templates)
|
||||
.set({ status: 'rejected', updatedAt: new Date() })
|
||||
.where(eq(templates.id, id))
|
||||
|
||||
logger.info(`[${requestId}] Template rejected: ${id} by super user: ${session.user.id}`)
|
||||
|
||||
return NextResponse.json({
|
||||
message: 'Template rejected successfully',
|
||||
templateId: id,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error rejecting template ${id}`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -106,6 +106,7 @@ const updateTemplateSchema = z.object({
|
||||
creatorId: z.string().optional(), // Creator profile ID
|
||||
tags: z.array(z.string()).max(10, 'Maximum 10 tags allowed').optional(),
|
||||
updateState: z.boolean().optional(), // Explicitly request state update from current workflow
|
||||
status: z.enum(['approved', 'rejected', 'pending']).optional(), // Status change (super users only)
|
||||
})
|
||||
|
||||
// PUT /api/templates/[id] - Update a template
|
||||
@@ -131,7 +132,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
)
|
||||
}
|
||||
|
||||
const { name, details, creatorId, tags, updateState } = validationResult.data
|
||||
const { name, details, creatorId, tags, updateState, status } = validationResult.data
|
||||
|
||||
const existingTemplate = await db.select().from(templates).where(eq(templates.id, id)).limit(1)
|
||||
|
||||
@@ -142,21 +143,44 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
|
||||
const template = existingTemplate[0]
|
||||
|
||||
if (!template.creatorId) {
|
||||
logger.warn(`[${requestId}] Template ${id} has no creator, denying update`)
|
||||
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
|
||||
// Status changes require super user permission
|
||||
if (status !== undefined) {
|
||||
const { verifyEffectiveSuperUser } = await import('@/lib/templates/permissions')
|
||||
const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
|
||||
if (!effectiveSuperUser) {
|
||||
logger.warn(`[${requestId}] Non-super user attempted to change template status: ${id}`)
|
||||
return NextResponse.json(
|
||||
{ error: 'Only super users can change template status' },
|
||||
{ status: 403 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const { verifyCreatorPermission } = await import('@/lib/templates/permissions')
|
||||
const { hasPermission, error: permissionError } = await verifyCreatorPermission(
|
||||
session.user.id,
|
||||
template.creatorId,
|
||||
'admin'
|
||||
)
|
||||
// For non-status updates, verify creator permission
|
||||
const hasNonStatusUpdates =
|
||||
name !== undefined ||
|
||||
details !== undefined ||
|
||||
creatorId !== undefined ||
|
||||
tags !== undefined ||
|
||||
updateState
|
||||
|
||||
if (!hasPermission) {
|
||||
logger.warn(`[${requestId}] User denied permission to update template ${id}`)
|
||||
return NextResponse.json({ error: permissionError || 'Access denied' }, { status: 403 })
|
||||
if (hasNonStatusUpdates) {
|
||||
if (!template.creatorId) {
|
||||
logger.warn(`[${requestId}] Template ${id} has no creator, denying update`)
|
||||
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
|
||||
}
|
||||
|
||||
const { verifyCreatorPermission } = await import('@/lib/templates/permissions')
|
||||
const { hasPermission, error: permissionError } = await verifyCreatorPermission(
|
||||
session.user.id,
|
||||
template.creatorId,
|
||||
'admin'
|
||||
)
|
||||
|
||||
if (!hasPermission) {
|
||||
logger.warn(`[${requestId}] User denied permission to update template ${id}`)
|
||||
return NextResponse.json({ error: permissionError || 'Access denied' }, { status: 403 })
|
||||
}
|
||||
}
|
||||
|
||||
const updateData: any = {
|
||||
@@ -167,6 +191,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
if (details !== undefined) updateData.details = details
|
||||
if (tags !== undefined) updateData.tags = tags
|
||||
if (creatorId !== undefined) updateData.creatorId = creatorId
|
||||
if (status !== undefined) updateData.status = status
|
||||
|
||||
if (updateState && template.workflowId) {
|
||||
const { verifyWorkflowAccess } = await import('@/socket/middleware/permissions')
|
||||
|
||||
116
apps/sim/app/api/v1/copilot/chat/route.ts
Normal file
116
apps/sim/app/api/v1/copilot/chat/route.ts
Normal file
@@ -0,0 +1,116 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getCopilotModel } from '@/lib/copilot/config'
|
||||
import { SIM_AGENT_VERSION } from '@/lib/copilot/constants'
|
||||
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
|
||||
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
|
||||
import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
|
||||
import { authenticateV1Request } from '@/app/api/v1/auth'
|
||||
|
||||
const logger = createLogger('CopilotHeadlessAPI')
|
||||
|
||||
const RequestSchema = z.object({
|
||||
message: z.string().min(1, 'message is required'),
|
||||
workflowId: z.string().optional(),
|
||||
workflowName: z.string().optional(),
|
||||
chatId: z.string().optional(),
|
||||
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
|
||||
model: z.string().optional(),
|
||||
autoExecuteTools: z.boolean().optional().default(true),
|
||||
timeout: z.number().optional().default(300000),
|
||||
})
|
||||
|
||||
/**
|
||||
* POST /api/v1/copilot/chat
|
||||
* Headless copilot endpoint for server-side orchestration.
|
||||
*
|
||||
* workflowId is optional - if not provided:
|
||||
* - If workflowName is provided, finds that workflow
|
||||
* - Otherwise uses the user's first workflow as context
|
||||
* - The copilot can still operate on any workflow using list_user_workflows
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const auth = await authenticateV1Request(req)
|
||||
if (!auth.authenticated || !auth.userId) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: auth.error || 'Unauthorized' },
|
||||
{ status: 401 }
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
const body = await req.json()
|
||||
const parsed = RequestSchema.parse(body)
|
||||
const defaults = getCopilotModel('chat')
|
||||
const selectedModel = parsed.model || defaults.model
|
||||
|
||||
// Resolve workflow ID
|
||||
const resolved = await resolveWorkflowIdForUser(
|
||||
auth.userId,
|
||||
parsed.workflowId,
|
||||
parsed.workflowName
|
||||
)
|
||||
if (!resolved) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'No workflows found. Create a workflow first or provide a valid workflowId.',
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Transform mode to transport mode (same as client API)
|
||||
// build and agent both map to 'agent' on the backend
|
||||
const effectiveMode = parsed.mode === 'agent' ? 'build' : parsed.mode
|
||||
const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode
|
||||
|
||||
// Always generate a chatId - required for artifacts system to work with subagents
|
||||
const chatId = parsed.chatId || crypto.randomUUID()
|
||||
|
||||
const requestPayload = {
|
||||
message: parsed.message,
|
||||
workflowId: resolved.workflowId,
|
||||
userId: auth.userId,
|
||||
stream: true,
|
||||
streamToolCalls: true,
|
||||
model: selectedModel,
|
||||
mode: transportMode,
|
||||
messageId: crypto.randomUUID(),
|
||||
version: SIM_AGENT_VERSION,
|
||||
headless: true, // Enable cross-workflow operations via workflowId params
|
||||
chatId,
|
||||
}
|
||||
|
||||
const result = await orchestrateCopilotStream(requestPayload, {
|
||||
userId: auth.userId,
|
||||
workflowId: resolved.workflowId,
|
||||
chatId,
|
||||
autoExecuteTools: parsed.autoExecuteTools,
|
||||
timeout: parsed.timeout,
|
||||
interactive: false,
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: result.success,
|
||||
content: result.content,
|
||||
toolCalls: result.toolCalls,
|
||||
chatId: result.chatId || chatId, // Return the chatId for conversation continuity
|
||||
conversationId: result.conversationId,
|
||||
error: result.error,
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Invalid request', details: error.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error('Headless copilot request failed', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return NextResponse.json({ success: false, error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,6 @@ import { userStats, workflow } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import OpenAI, { AzureOpenAI } from 'openai'
|
||||
import { getBYOKKey } from '@/lib/api-key/byok'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { logModelUsage } from '@/lib/billing/core/usage-log'
|
||||
@@ -12,6 +11,7 @@ import { env } from '@/lib/core/config/env'
|
||||
import { getCostMultiplier, isBillingEnabled } from '@/lib/core/config/feature-flags'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { verifyWorkspaceMembership } from '@/app/api/workflows/utils'
|
||||
import { extractResponseText, parseResponsesUsage } from '@/providers/openai/utils'
|
||||
import { getModelPricing } from '@/providers/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
@@ -28,18 +28,6 @@ const openaiApiKey = env.OPENAI_API_KEY
|
||||
|
||||
const useWandAzure = azureApiKey && azureEndpoint && azureApiVersion
|
||||
|
||||
const client = useWandAzure
|
||||
? new AzureOpenAI({
|
||||
apiKey: azureApiKey,
|
||||
apiVersion: azureApiVersion,
|
||||
endpoint: azureEndpoint,
|
||||
})
|
||||
: openaiApiKey
|
||||
? new OpenAI({
|
||||
apiKey: openaiApiKey,
|
||||
})
|
||||
: null
|
||||
|
||||
if (!useWandAzure && !openaiApiKey) {
|
||||
logger.warn(
|
||||
'Neither Azure OpenAI nor OpenAI API key found. Wand generation API will not function.'
|
||||
@@ -202,20 +190,18 @@ export async function POST(req: NextRequest) {
|
||||
}
|
||||
|
||||
let isBYOK = false
|
||||
let activeClient = client
|
||||
let byokApiKey: string | null = null
|
||||
let activeOpenAIKey = openaiApiKey
|
||||
|
||||
if (workspaceId && !useWandAzure) {
|
||||
const byokResult = await getBYOKKey(workspaceId, 'openai')
|
||||
if (byokResult) {
|
||||
isBYOK = true
|
||||
byokApiKey = byokResult.apiKey
|
||||
activeClient = new OpenAI({ apiKey: byokResult.apiKey })
|
||||
activeOpenAIKey = byokResult.apiKey
|
||||
logger.info(`[${requestId}] Using BYOK OpenAI key for wand generation`)
|
||||
}
|
||||
}
|
||||
|
||||
if (!activeClient) {
|
||||
if (!useWandAzure && !activeOpenAIKey) {
|
||||
logger.error(`[${requestId}] AI client not initialized. Missing API key.`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Wand generation service is not configured.' },
|
||||
@@ -276,17 +262,18 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
|
||||
)
|
||||
|
||||
const apiUrl = useWandAzure
|
||||
? `${azureEndpoint}/openai/deployments/${wandModelName}/chat/completions?api-version=${azureApiVersion}`
|
||||
: 'https://api.openai.com/v1/chat/completions'
|
||||
? `${azureEndpoint?.replace(/\/$/, '')}/openai/v1/responses?api-version=${azureApiVersion}`
|
||||
: 'https://api.openai.com/v1/responses'
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
'OpenAI-Beta': 'responses=v1',
|
||||
}
|
||||
|
||||
if (useWandAzure) {
|
||||
headers['api-key'] = azureApiKey!
|
||||
} else {
|
||||
headers.Authorization = `Bearer ${byokApiKey || openaiApiKey}`
|
||||
headers.Authorization = `Bearer ${activeOpenAIKey}`
|
||||
}
|
||||
|
||||
logger.debug(`[${requestId}] Making streaming request to: ${apiUrl}`)
|
||||
@@ -296,11 +283,10 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
|
||||
headers,
|
||||
body: JSON.stringify({
|
||||
model: useWandAzure ? wandModelName : 'gpt-4o',
|
||||
messages: messages,
|
||||
input: messages,
|
||||
temperature: 0.2,
|
||||
max_tokens: 10000,
|
||||
max_output_tokens: 10000,
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
}),
|
||||
})
|
||||
|
||||
@@ -327,16 +313,29 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
|
||||
return
|
||||
}
|
||||
|
||||
let finalUsage: any = null
|
||||
let usageRecorded = false
|
||||
|
||||
const recordUsage = async () => {
|
||||
if (usageRecorded || !finalUsage) {
|
||||
return
|
||||
}
|
||||
|
||||
usageRecorded = true
|
||||
await updateUserStatsForWand(session.user.id, finalUsage, requestId, isBYOK)
|
||||
}
|
||||
|
||||
try {
|
||||
let buffer = ''
|
||||
let chunkCount = 0
|
||||
let finalUsage: any = null
|
||||
let activeEventType: string | undefined
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
|
||||
if (done) {
|
||||
logger.info(`[${requestId}] Stream completed. Total chunks: ${chunkCount}`)
|
||||
await recordUsage()
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`))
|
||||
controller.close()
|
||||
break
|
||||
@@ -348,47 +347,90 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
|
||||
buffer = lines.pop() || ''
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6).trim()
|
||||
const trimmed = line.trim()
|
||||
if (!trimmed) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (data === '[DONE]') {
|
||||
logger.info(`[${requestId}] Received [DONE] signal`)
|
||||
if (trimmed.startsWith('event:')) {
|
||||
activeEventType = trimmed.slice(6).trim()
|
||||
continue
|
||||
}
|
||||
|
||||
if (finalUsage) {
|
||||
await updateUserStatsForWand(session.user.id, finalUsage, requestId, isBYOK)
|
||||
if (!trimmed.startsWith('data:')) {
|
||||
continue
|
||||
}
|
||||
|
||||
const data = trimmed.slice(5).trim()
|
||||
if (data === '[DONE]') {
|
||||
logger.info(`[${requestId}] Received [DONE] signal`)
|
||||
|
||||
await recordUsage()
|
||||
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`)
|
||||
)
|
||||
controller.close()
|
||||
return
|
||||
}
|
||||
|
||||
let parsed: any
|
||||
try {
|
||||
parsed = JSON.parse(data)
|
||||
} catch (parseError) {
|
||||
logger.debug(`[${requestId}] Skipped non-JSON line: ${data.substring(0, 100)}`)
|
||||
continue
|
||||
}
|
||||
|
||||
const eventType = parsed?.type ?? activeEventType
|
||||
|
||||
if (
|
||||
eventType === 'response.error' ||
|
||||
eventType === 'error' ||
|
||||
eventType === 'response.failed'
|
||||
) {
|
||||
throw new Error(parsed?.error?.message || 'Responses stream error')
|
||||
}
|
||||
|
||||
if (
|
||||
eventType === 'response.output_text.delta' ||
|
||||
eventType === 'response.output_json.delta'
|
||||
) {
|
||||
let content = ''
|
||||
if (typeof parsed.delta === 'string') {
|
||||
content = parsed.delta
|
||||
} else if (parsed.delta && typeof parsed.delta.text === 'string') {
|
||||
content = parsed.delta.text
|
||||
} else if (parsed.delta && parsed.delta.json !== undefined) {
|
||||
content = JSON.stringify(parsed.delta.json)
|
||||
} else if (parsed.json !== undefined) {
|
||||
content = JSON.stringify(parsed.json)
|
||||
} else if (typeof parsed.text === 'string') {
|
||||
content = parsed.text
|
||||
}
|
||||
|
||||
if (content) {
|
||||
chunkCount++
|
||||
if (chunkCount === 1) {
|
||||
logger.info(`[${requestId}] Received first content chunk`)
|
||||
}
|
||||
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`)
|
||||
encoder.encode(`data: ${JSON.stringify({ chunk: content })}\n\n`)
|
||||
)
|
||||
controller.close()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data)
|
||||
const content = parsed.choices?.[0]?.delta?.content
|
||||
|
||||
if (content) {
|
||||
chunkCount++
|
||||
if (chunkCount === 1) {
|
||||
logger.info(`[${requestId}] Received first content chunk`)
|
||||
}
|
||||
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ chunk: content })}\n\n`)
|
||||
)
|
||||
if (eventType === 'response.completed') {
|
||||
const usage = parseResponsesUsage(parsed?.response?.usage ?? parsed?.usage)
|
||||
if (usage) {
|
||||
finalUsage = {
|
||||
prompt_tokens: usage.promptTokens,
|
||||
completion_tokens: usage.completionTokens,
|
||||
total_tokens: usage.totalTokens,
|
||||
}
|
||||
|
||||
if (parsed.usage) {
|
||||
finalUsage = parsed.usage
|
||||
logger.info(
|
||||
`[${requestId}] Received usage data: ${JSON.stringify(parsed.usage)}`
|
||||
)
|
||||
}
|
||||
} catch (parseError) {
|
||||
logger.debug(
|
||||
`[${requestId}] Skipped non-JSON line: ${data.substring(0, 100)}`
|
||||
logger.info(
|
||||
`[${requestId}] Received usage data: ${JSON.stringify(finalUsage)}`
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -401,6 +443,12 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
|
||||
stack: streamError?.stack,
|
||||
})
|
||||
|
||||
try {
|
||||
await recordUsage()
|
||||
} catch (usageError) {
|
||||
logger.warn(`[${requestId}] Failed to record usage after stream error`, usageError)
|
||||
}
|
||||
|
||||
const errorData = `data: ${JSON.stringify({ error: 'Streaming failed', done: true })}\n\n`
|
||||
controller.enqueue(encoder.encode(errorData))
|
||||
controller.close()
|
||||
@@ -424,8 +472,6 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
|
||||
message: error?.message || 'Unknown error',
|
||||
code: error?.code,
|
||||
status: error?.status,
|
||||
responseStatus: error?.response?.status,
|
||||
responseData: error?.response?.data ? safeStringify(error.response.data) : undefined,
|
||||
stack: error?.stack,
|
||||
useWandAzure,
|
||||
model: useWandAzure ? wandModelName : 'gpt-4o',
|
||||
@@ -440,14 +486,43 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
|
||||
}
|
||||
}
|
||||
|
||||
const completion = await activeClient.chat.completions.create({
|
||||
model: useWandAzure ? wandModelName : 'gpt-4o',
|
||||
messages: messages,
|
||||
temperature: 0.3,
|
||||
max_tokens: 10000,
|
||||
const apiUrl = useWandAzure
|
||||
? `${azureEndpoint?.replace(/\/$/, '')}/openai/v1/responses?api-version=${azureApiVersion}`
|
||||
: 'https://api.openai.com/v1/responses'
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
'OpenAI-Beta': 'responses=v1',
|
||||
}
|
||||
|
||||
if (useWandAzure) {
|
||||
headers['api-key'] = azureApiKey!
|
||||
} else {
|
||||
headers.Authorization = `Bearer ${activeOpenAIKey}`
|
||||
}
|
||||
|
||||
const response = await fetch(apiUrl, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify({
|
||||
model: useWandAzure ? wandModelName : 'gpt-4o',
|
||||
input: messages,
|
||||
temperature: 0.2,
|
||||
max_output_tokens: 10000,
|
||||
}),
|
||||
})
|
||||
|
||||
const generatedContent = completion.choices[0]?.message?.content?.trim()
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
const apiError = new Error(
|
||||
`API request failed: ${response.status} ${response.statusText} - ${errorText}`
|
||||
)
|
||||
;(apiError as any).status = response.status
|
||||
throw apiError
|
||||
}
|
||||
|
||||
const completion = await response.json()
|
||||
const generatedContent = extractResponseText(completion.output)?.trim()
|
||||
|
||||
if (!generatedContent) {
|
||||
logger.error(
|
||||
@@ -461,8 +536,18 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
|
||||
|
||||
logger.info(`[${requestId}] Wand generation successful`)
|
||||
|
||||
if (completion.usage) {
|
||||
await updateUserStatsForWand(session.user.id, completion.usage, requestId, isBYOK)
|
||||
const usage = parseResponsesUsage(completion.usage)
|
||||
if (usage) {
|
||||
await updateUserStatsForWand(
|
||||
session.user.id,
|
||||
{
|
||||
prompt_tokens: usage.promptTokens,
|
||||
completion_tokens: usage.completionTokens,
|
||||
total_tokens: usage.totalTokens,
|
||||
},
|
||||
requestId,
|
||||
isBYOK
|
||||
)
|
||||
}
|
||||
|
||||
return NextResponse.json({ success: true, content: generatedContent })
|
||||
@@ -472,10 +557,6 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
|
||||
message: error?.message || 'Unknown error',
|
||||
code: error?.code,
|
||||
status: error?.status,
|
||||
responseStatus: error instanceof OpenAI.APIError ? error.status : error?.response?.status,
|
||||
responseData: (error as any)?.response?.data
|
||||
? safeStringify((error as any).response.data)
|
||||
: undefined,
|
||||
stack: error?.stack,
|
||||
useWandAzure,
|
||||
model: useWandAzure ? wandModelName : 'gpt-4o',
|
||||
@@ -484,26 +565,19 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
|
||||
})
|
||||
|
||||
let clientErrorMessage = 'Wand generation failed. Please try again later.'
|
||||
let status = 500
|
||||
let status = typeof (error as any)?.status === 'number' ? (error as any).status : 500
|
||||
|
||||
if (error instanceof OpenAI.APIError) {
|
||||
status = error.status || 500
|
||||
logger.error(
|
||||
`[${requestId}] ${useWandAzure ? 'Azure OpenAI' : 'OpenAI'} API Error: ${status} - ${error.message}`
|
||||
)
|
||||
|
||||
if (status === 401) {
|
||||
clientErrorMessage = 'Authentication failed. Please check your API key configuration.'
|
||||
} else if (status === 429) {
|
||||
clientErrorMessage = 'Rate limit exceeded. Please try again later.'
|
||||
} else if (status >= 500) {
|
||||
clientErrorMessage =
|
||||
'The wand generation service is currently unavailable. Please try again later.'
|
||||
}
|
||||
} else if (useWandAzure && error.message?.includes('DeploymentNotFound')) {
|
||||
if (useWandAzure && error?.message?.includes('DeploymentNotFound')) {
|
||||
clientErrorMessage =
|
||||
'Azure OpenAI deployment not found. Please check your model deployment configuration.'
|
||||
status = 404
|
||||
} else if (status === 401) {
|
||||
clientErrorMessage = 'Authentication failed. Please check your API key configuration.'
|
||||
} else if (status === 429) {
|
||||
clientErrorMessage = 'Rate limit exceeded. Please try again later.'
|
||||
} else if (status >= 500) {
|
||||
clientErrorMessage =
|
||||
'The wand generation service is currently unavailable. Please try again later.'
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
import { db, workflowDeploymentVersion } from '@sim/db'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import type { NextRequest } from 'next/server'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { syncMcpToolsForWorkflow } from '@/lib/mcp/workflow-mcp-sync'
|
||||
import { restorePreviousVersionWebhooks, saveTriggerWebhooksForDeploy } from '@/lib/webhooks/deploy'
|
||||
import { activateWorkflowVersion } from '@/lib/workflows/persistence/utils'
|
||||
import {
|
||||
cleanupDeploymentVersion,
|
||||
createSchedulesForDeploy,
|
||||
validateWorkflowSchedules,
|
||||
} from '@/lib/workflows/schedules'
|
||||
import { validateWorkflowPermissions } from '@/lib/workflows/utils'
|
||||
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
|
||||
import type { BlockState } from '@/stores/workflows/workflow/types'
|
||||
|
||||
const logger = createLogger('WorkflowActivateDeploymentAPI')
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
export const runtime = 'nodejs'
|
||||
|
||||
export async function POST(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string; version: string }> }
|
||||
) {
|
||||
const requestId = generateRequestId()
|
||||
const { id, version } = await params
|
||||
|
||||
try {
|
||||
const {
|
||||
error,
|
||||
session,
|
||||
workflow: workflowData,
|
||||
} = await validateWorkflowPermissions(id, requestId, 'admin')
|
||||
if (error) {
|
||||
return createErrorResponse(error.message, error.status)
|
||||
}
|
||||
|
||||
const actorUserId = session?.user?.id
|
||||
if (!actorUserId) {
|
||||
logger.warn(`[${requestId}] Unable to resolve actor user for deployment activation: ${id}`)
|
||||
return createErrorResponse('Unable to determine activating user', 400)
|
||||
}
|
||||
|
||||
const versionNum = Number(version)
|
||||
if (!Number.isFinite(versionNum)) {
|
||||
return createErrorResponse('Invalid version number', 400)
|
||||
}
|
||||
|
||||
const [versionRow] = await db
|
||||
.select({
|
||||
id: workflowDeploymentVersion.id,
|
||||
state: workflowDeploymentVersion.state,
|
||||
})
|
||||
.from(workflowDeploymentVersion)
|
||||
.where(
|
||||
and(
|
||||
eq(workflowDeploymentVersion.workflowId, id),
|
||||
eq(workflowDeploymentVersion.version, versionNum)
|
||||
)
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
if (!versionRow?.state) {
|
||||
return createErrorResponse('Deployment version not found', 404)
|
||||
}
|
||||
|
||||
const [currentActiveVersion] = await db
|
||||
.select({ id: workflowDeploymentVersion.id })
|
||||
.from(workflowDeploymentVersion)
|
||||
.where(
|
||||
and(
|
||||
eq(workflowDeploymentVersion.workflowId, id),
|
||||
eq(workflowDeploymentVersion.isActive, true)
|
||||
)
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
const previousVersionId = currentActiveVersion?.id
|
||||
|
||||
const deployedState = versionRow.state as { blocks?: Record<string, BlockState> }
|
||||
const blocks = deployedState.blocks
|
||||
if (!blocks || typeof blocks !== 'object') {
|
||||
return createErrorResponse('Invalid deployed state structure', 500)
|
||||
}
|
||||
|
||||
const scheduleValidation = validateWorkflowSchedules(blocks)
|
||||
if (!scheduleValidation.isValid) {
|
||||
return createErrorResponse(`Invalid schedule configuration: ${scheduleValidation.error}`, 400)
|
||||
}
|
||||
|
||||
const triggerSaveResult = await saveTriggerWebhooksForDeploy({
|
||||
request,
|
||||
workflowId: id,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
userId: actorUserId,
|
||||
blocks,
|
||||
requestId,
|
||||
deploymentVersionId: versionRow.id,
|
||||
previousVersionId,
|
||||
forceRecreateSubscriptions: true,
|
||||
})
|
||||
|
||||
if (!triggerSaveResult.success) {
|
||||
return createErrorResponse(
|
||||
triggerSaveResult.error?.message || 'Failed to sync trigger configuration',
|
||||
triggerSaveResult.error?.status || 500
|
||||
)
|
||||
}
|
||||
|
||||
const scheduleResult = await createSchedulesForDeploy(id, blocks, db, versionRow.id)
|
||||
|
||||
if (!scheduleResult.success) {
|
||||
await cleanupDeploymentVersion({
|
||||
workflowId: id,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
requestId,
|
||||
deploymentVersionId: versionRow.id,
|
||||
})
|
||||
if (previousVersionId) {
|
||||
await restorePreviousVersionWebhooks({
|
||||
request,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
userId: actorUserId,
|
||||
previousVersionId,
|
||||
requestId,
|
||||
})
|
||||
}
|
||||
return createErrorResponse(scheduleResult.error || 'Failed to sync schedules', 500)
|
||||
}
|
||||
|
||||
const result = await activateWorkflowVersion({ workflowId: id, version: versionNum })
|
||||
if (!result.success) {
|
||||
await cleanupDeploymentVersion({
|
||||
workflowId: id,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
requestId,
|
||||
deploymentVersionId: versionRow.id,
|
||||
})
|
||||
if (previousVersionId) {
|
||||
await restorePreviousVersionWebhooks({
|
||||
request,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
userId: actorUserId,
|
||||
previousVersionId,
|
||||
requestId,
|
||||
})
|
||||
}
|
||||
return createErrorResponse(result.error || 'Failed to activate deployment', 400)
|
||||
}
|
||||
|
||||
if (previousVersionId && previousVersionId !== versionRow.id) {
|
||||
try {
|
||||
logger.info(
|
||||
`[${requestId}] Cleaning up previous version ${previousVersionId} webhooks/schedules`
|
||||
)
|
||||
await cleanupDeploymentVersion({
|
||||
workflowId: id,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
requestId,
|
||||
deploymentVersionId: previousVersionId,
|
||||
skipExternalCleanup: true,
|
||||
})
|
||||
logger.info(`[${requestId}] Previous version cleanup completed`)
|
||||
} catch (cleanupError) {
|
||||
logger.error(
|
||||
`[${requestId}] Failed to clean up previous version ${previousVersionId}`,
|
||||
cleanupError
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
await syncMcpToolsForWorkflow({
|
||||
workflowId: id,
|
||||
requestId,
|
||||
state: versionRow.state,
|
||||
context: 'activate',
|
||||
})
|
||||
|
||||
return createSuccessResponse({
|
||||
success: true,
|
||||
deployedAt: result.deployedAt,
|
||||
warnings: triggerSaveResult.warnings,
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Error activating deployment for workflow: ${id}`, error)
|
||||
return createErrorResponse(error.message || 'Failed to activate deployment', 500)
|
||||
}
|
||||
}
|
||||
@@ -4,8 +4,17 @@ import { and, eq } from 'drizzle-orm'
|
||||
import type { NextRequest } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { syncMcpToolsForWorkflow } from '@/lib/mcp/workflow-mcp-sync'
|
||||
import { restorePreviousVersionWebhooks, saveTriggerWebhooksForDeploy } from '@/lib/webhooks/deploy'
|
||||
import { activateWorkflowVersion } from '@/lib/workflows/persistence/utils'
|
||||
import {
|
||||
cleanupDeploymentVersion,
|
||||
createSchedulesForDeploy,
|
||||
validateWorkflowSchedules,
|
||||
} from '@/lib/workflows/schedules'
|
||||
import { validateWorkflowPermissions } from '@/lib/workflows/utils'
|
||||
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
|
||||
import type { BlockState } from '@/stores/workflows/workflow/types'
|
||||
|
||||
const logger = createLogger('WorkflowDeploymentVersionAPI')
|
||||
|
||||
@@ -23,10 +32,14 @@ const patchBodySchema = z
|
||||
.max(500, 'Description must be 500 characters or less')
|
||||
.nullable()
|
||||
.optional(),
|
||||
isActive: z.literal(true).optional(), // Set to true to activate this version
|
||||
})
|
||||
.refine((data) => data.name !== undefined || data.description !== undefined, {
|
||||
message: 'At least one of name or description must be provided',
|
||||
})
|
||||
.refine(
|
||||
(data) => data.name !== undefined || data.description !== undefined || data.isActive === true,
|
||||
{
|
||||
message: 'At least one of name, description, or isActive must be provided',
|
||||
}
|
||||
)
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
export const runtime = 'nodejs'
|
||||
@@ -82,7 +95,22 @@ export async function PATCH(
|
||||
const { id, version } = await params
|
||||
|
||||
try {
|
||||
const { error } = await validateWorkflowPermissions(id, requestId, 'write')
|
||||
const body = await request.json()
|
||||
const validation = patchBodySchema.safeParse(body)
|
||||
|
||||
if (!validation.success) {
|
||||
return createErrorResponse(validation.error.errors[0]?.message || 'Invalid request body', 400)
|
||||
}
|
||||
|
||||
const { name, description, isActive } = validation.data
|
||||
|
||||
// Activation requires admin permission, other updates require write
|
||||
const requiredPermission = isActive ? 'admin' : 'write'
|
||||
const {
|
||||
error,
|
||||
session,
|
||||
workflow: workflowData,
|
||||
} = await validateWorkflowPermissions(id, requestId, requiredPermission)
|
||||
if (error) {
|
||||
return createErrorResponse(error.message, error.status)
|
||||
}
|
||||
@@ -92,15 +120,193 @@ export async function PATCH(
|
||||
return createErrorResponse('Invalid version', 400)
|
||||
}
|
||||
|
||||
const body = await request.json()
|
||||
const validation = patchBodySchema.safeParse(body)
|
||||
// Handle activation
|
||||
if (isActive) {
|
||||
const actorUserId = session?.user?.id
|
||||
if (!actorUserId) {
|
||||
logger.warn(`[${requestId}] Unable to resolve actor user for deployment activation: ${id}`)
|
||||
return createErrorResponse('Unable to determine activating user', 400)
|
||||
}
|
||||
|
||||
if (!validation.success) {
|
||||
return createErrorResponse(validation.error.errors[0]?.message || 'Invalid request body', 400)
|
||||
const [versionRow] = await db
|
||||
.select({
|
||||
id: workflowDeploymentVersion.id,
|
||||
state: workflowDeploymentVersion.state,
|
||||
})
|
||||
.from(workflowDeploymentVersion)
|
||||
.where(
|
||||
and(
|
||||
eq(workflowDeploymentVersion.workflowId, id),
|
||||
eq(workflowDeploymentVersion.version, versionNum)
|
||||
)
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
if (!versionRow?.state) {
|
||||
return createErrorResponse('Deployment version not found', 404)
|
||||
}
|
||||
|
||||
const [currentActiveVersion] = await db
|
||||
.select({ id: workflowDeploymentVersion.id })
|
||||
.from(workflowDeploymentVersion)
|
||||
.where(
|
||||
and(
|
||||
eq(workflowDeploymentVersion.workflowId, id),
|
||||
eq(workflowDeploymentVersion.isActive, true)
|
||||
)
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
const previousVersionId = currentActiveVersion?.id
|
||||
|
||||
const deployedState = versionRow.state as { blocks?: Record<string, BlockState> }
|
||||
const blocks = deployedState.blocks
|
||||
if (!blocks || typeof blocks !== 'object') {
|
||||
return createErrorResponse('Invalid deployed state structure', 500)
|
||||
}
|
||||
|
||||
const scheduleValidation = validateWorkflowSchedules(blocks)
|
||||
if (!scheduleValidation.isValid) {
|
||||
return createErrorResponse(
|
||||
`Invalid schedule configuration: ${scheduleValidation.error}`,
|
||||
400
|
||||
)
|
||||
}
|
||||
|
||||
const triggerSaveResult = await saveTriggerWebhooksForDeploy({
|
||||
request,
|
||||
workflowId: id,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
userId: actorUserId,
|
||||
blocks,
|
||||
requestId,
|
||||
deploymentVersionId: versionRow.id,
|
||||
previousVersionId,
|
||||
forceRecreateSubscriptions: true,
|
||||
})
|
||||
|
||||
if (!triggerSaveResult.success) {
|
||||
return createErrorResponse(
|
||||
triggerSaveResult.error?.message || 'Failed to sync trigger configuration',
|
||||
triggerSaveResult.error?.status || 500
|
||||
)
|
||||
}
|
||||
|
||||
const scheduleResult = await createSchedulesForDeploy(id, blocks, db, versionRow.id)
|
||||
|
||||
if (!scheduleResult.success) {
|
||||
await cleanupDeploymentVersion({
|
||||
workflowId: id,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
requestId,
|
||||
deploymentVersionId: versionRow.id,
|
||||
})
|
||||
if (previousVersionId) {
|
||||
await restorePreviousVersionWebhooks({
|
||||
request,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
userId: actorUserId,
|
||||
previousVersionId,
|
||||
requestId,
|
||||
})
|
||||
}
|
||||
return createErrorResponse(scheduleResult.error || 'Failed to sync schedules', 500)
|
||||
}
|
||||
|
||||
const result = await activateWorkflowVersion({ workflowId: id, version: versionNum })
|
||||
if (!result.success) {
|
||||
await cleanupDeploymentVersion({
|
||||
workflowId: id,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
requestId,
|
||||
deploymentVersionId: versionRow.id,
|
||||
})
|
||||
if (previousVersionId) {
|
||||
await restorePreviousVersionWebhooks({
|
||||
request,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
userId: actorUserId,
|
||||
previousVersionId,
|
||||
requestId,
|
||||
})
|
||||
}
|
||||
return createErrorResponse(result.error || 'Failed to activate deployment', 400)
|
||||
}
|
||||
|
||||
if (previousVersionId && previousVersionId !== versionRow.id) {
|
||||
try {
|
||||
logger.info(
|
||||
`[${requestId}] Cleaning up previous version ${previousVersionId} webhooks/schedules`
|
||||
)
|
||||
await cleanupDeploymentVersion({
|
||||
workflowId: id,
|
||||
workflow: workflowData as Record<string, unknown>,
|
||||
requestId,
|
||||
deploymentVersionId: previousVersionId,
|
||||
skipExternalCleanup: true,
|
||||
})
|
||||
logger.info(`[${requestId}] Previous version cleanup completed`)
|
||||
} catch (cleanupError) {
|
||||
logger.error(
|
||||
`[${requestId}] Failed to clean up previous version ${previousVersionId}`,
|
||||
cleanupError
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
await syncMcpToolsForWorkflow({
|
||||
workflowId: id,
|
||||
requestId,
|
||||
state: versionRow.state,
|
||||
context: 'activate',
|
||||
})
|
||||
|
||||
// Apply name/description updates if provided alongside activation
|
||||
let updatedName: string | null | undefined
|
||||
let updatedDescription: string | null | undefined
|
||||
if (name !== undefined || description !== undefined) {
|
||||
const activationUpdateData: { name?: string; description?: string | null } = {}
|
||||
if (name !== undefined) {
|
||||
activationUpdateData.name = name
|
||||
}
|
||||
if (description !== undefined) {
|
||||
activationUpdateData.description = description
|
||||
}
|
||||
|
||||
const [updated] = await db
|
||||
.update(workflowDeploymentVersion)
|
||||
.set(activationUpdateData)
|
||||
.where(
|
||||
and(
|
||||
eq(workflowDeploymentVersion.workflowId, id),
|
||||
eq(workflowDeploymentVersion.version, versionNum)
|
||||
)
|
||||
)
|
||||
.returning({
|
||||
name: workflowDeploymentVersion.name,
|
||||
description: workflowDeploymentVersion.description,
|
||||
})
|
||||
|
||||
if (updated) {
|
||||
updatedName = updated.name
|
||||
updatedDescription = updated.description
|
||||
logger.info(
|
||||
`[${requestId}] Updated deployment version ${version} metadata during activation`,
|
||||
{ name: activationUpdateData.name, description: activationUpdateData.description }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return createSuccessResponse({
|
||||
success: true,
|
||||
deployedAt: result.deployedAt,
|
||||
warnings: triggerSaveResult.warnings,
|
||||
...(updatedName !== undefined && { name: updatedName }),
|
||||
...(updatedDescription !== undefined && { description: updatedDescription }),
|
||||
})
|
||||
}
|
||||
|
||||
const { name, description } = validation.data
|
||||
|
||||
// Handle name/description updates
|
||||
const updateData: { name?: string; description?: string | null } = {}
|
||||
if (name !== undefined) {
|
||||
updateData.name = name
|
||||
|
||||
@@ -1,286 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { z } from 'zod'
|
||||
import { checkHybridAuth } from '@/lib/auth/hybrid'
|
||||
import { getTimeoutErrorMessage, isTimeoutError } from '@/lib/core/execution-limits'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { SSE_HEADERS } from '@/lib/core/utils/sse'
|
||||
import { markExecutionCancelled } from '@/lib/execution/cancellation'
|
||||
import { preprocessExecution } from '@/lib/execution/preprocessing'
|
||||
import { LoggingSession } from '@/lib/logs/execution/logging-session'
|
||||
import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans'
|
||||
import { executeWorkflowCore } from '@/lib/workflows/executor/execution-core'
|
||||
import { createSSECallbacks } from '@/lib/workflows/executor/execution-events'
|
||||
import { ExecutionSnapshot } from '@/executor/execution/snapshot'
|
||||
import type { ExecutionMetadata, SerializableExecutionState } from '@/executor/execution/types'
|
||||
import { hasExecutionResult } from '@/executor/utils/errors'
|
||||
|
||||
const logger = createLogger('ExecuteFromBlockAPI')
|
||||
|
||||
const ExecuteFromBlockSchema = z.object({
|
||||
startBlockId: z.string().min(1, 'Start block ID is required'),
|
||||
sourceSnapshot: z.object({
|
||||
blockStates: z.record(z.any()),
|
||||
executedBlocks: z.array(z.string()),
|
||||
blockLogs: z.array(z.any()),
|
||||
decisions: z.object({
|
||||
router: z.record(z.string()),
|
||||
condition: z.record(z.string()),
|
||||
}),
|
||||
completedLoops: z.array(z.string()),
|
||||
loopExecutions: z.record(z.any()).optional(),
|
||||
parallelExecutions: z.record(z.any()).optional(),
|
||||
parallelBlockMapping: z.record(z.any()).optional(),
|
||||
activeExecutionPath: z.array(z.string()),
|
||||
}),
|
||||
input: z.any().optional(),
|
||||
})
|
||||
|
||||
export const runtime = 'nodejs'
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
export async function POST(req: NextRequest, { params }: { params: Promise<{ id: string }> }) {
|
||||
const requestId = generateRequestId()
|
||||
const { id: workflowId } = await params
|
||||
|
||||
try {
|
||||
const auth = await checkHybridAuth(req, { requireWorkflowId: false })
|
||||
if (!auth.success || !auth.userId) {
|
||||
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
const userId = auth.userId
|
||||
|
||||
let body: unknown
|
||||
try {
|
||||
body = await req.json()
|
||||
} catch {
|
||||
return NextResponse.json({ error: 'Invalid JSON body' }, { status: 400 })
|
||||
}
|
||||
|
||||
const validation = ExecuteFromBlockSchema.safeParse(body)
|
||||
if (!validation.success) {
|
||||
logger.warn(`[${requestId}] Invalid request body:`, validation.error.errors)
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: 'Invalid request body',
|
||||
details: validation.error.errors.map((e) => ({
|
||||
path: e.path.join('.'),
|
||||
message: e.message,
|
||||
})),
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const { startBlockId, sourceSnapshot, input } = validation.data
|
||||
const executionId = uuidv4()
|
||||
|
||||
// Run preprocessing checks (billing, rate limits, usage limits)
|
||||
const preprocessResult = await preprocessExecution({
|
||||
workflowId,
|
||||
userId,
|
||||
triggerType: 'manual',
|
||||
executionId,
|
||||
requestId,
|
||||
checkRateLimit: false, // Manual executions don't rate limit
|
||||
checkDeployment: false, // Run-from-block doesn't require deployment
|
||||
})
|
||||
|
||||
if (!preprocessResult.success) {
|
||||
const { error } = preprocessResult
|
||||
logger.warn(`[${requestId}] Preprocessing failed for run-from-block`, {
|
||||
workflowId,
|
||||
error: error?.message,
|
||||
statusCode: error?.statusCode,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{ error: error?.message || 'Execution blocked' },
|
||||
{ status: error?.statusCode || 500 }
|
||||
)
|
||||
}
|
||||
|
||||
const workflowRecord = preprocessResult.workflowRecord
|
||||
if (!workflowRecord?.workspaceId) {
|
||||
return NextResponse.json({ error: 'Workflow not found or has no workspace' }, { status: 404 })
|
||||
}
|
||||
|
||||
const workspaceId = workflowRecord.workspaceId
|
||||
const workflowUserId = workflowRecord.userId
|
||||
|
||||
logger.info(`[${requestId}] Starting run-from-block execution`, {
|
||||
workflowId,
|
||||
startBlockId,
|
||||
executedBlocksCount: sourceSnapshot.executedBlocks.length,
|
||||
billingActorUserId: preprocessResult.actorUserId,
|
||||
})
|
||||
|
||||
const loggingSession = new LoggingSession(workflowId, executionId, 'manual', requestId)
|
||||
const abortController = new AbortController()
|
||||
let isStreamClosed = false
|
||||
let isTimedOut = false
|
||||
|
||||
const syncTimeout = preprocessResult.executionTimeout?.sync
|
||||
let timeoutId: NodeJS.Timeout | undefined
|
||||
if (syncTimeout) {
|
||||
timeoutId = setTimeout(() => {
|
||||
isTimedOut = true
|
||||
abortController.abort()
|
||||
}, syncTimeout)
|
||||
}
|
||||
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
async start(controller) {
|
||||
const { sendEvent, onBlockStart, onBlockComplete, onStream } = createSSECallbacks({
|
||||
executionId,
|
||||
workflowId,
|
||||
controller,
|
||||
isStreamClosed: () => isStreamClosed,
|
||||
setStreamClosed: () => {
|
||||
isStreamClosed = true
|
||||
},
|
||||
})
|
||||
|
||||
const metadata: ExecutionMetadata = {
|
||||
requestId,
|
||||
workflowId,
|
||||
userId,
|
||||
executionId,
|
||||
triggerType: 'manual',
|
||||
workspaceId,
|
||||
workflowUserId,
|
||||
useDraftState: true,
|
||||
isClientSession: true,
|
||||
startTime: new Date().toISOString(),
|
||||
}
|
||||
|
||||
const snapshot = new ExecutionSnapshot(metadata, {}, input || {}, {})
|
||||
|
||||
try {
|
||||
const startTime = new Date()
|
||||
|
||||
sendEvent({
|
||||
type: 'execution:started',
|
||||
timestamp: startTime.toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: { startTime: startTime.toISOString() },
|
||||
})
|
||||
|
||||
const result = await executeWorkflowCore({
|
||||
snapshot,
|
||||
loggingSession,
|
||||
abortSignal: abortController.signal,
|
||||
runFromBlock: {
|
||||
startBlockId,
|
||||
sourceSnapshot: sourceSnapshot as SerializableExecutionState,
|
||||
},
|
||||
callbacks: { onBlockStart, onBlockComplete, onStream },
|
||||
})
|
||||
|
||||
if (result.status === 'cancelled') {
|
||||
if (isTimedOut && syncTimeout) {
|
||||
const timeoutErrorMessage = getTimeoutErrorMessage(null, syncTimeout)
|
||||
logger.info(`[${requestId}] Run-from-block execution timed out`, {
|
||||
timeoutMs: syncTimeout,
|
||||
})
|
||||
|
||||
await loggingSession.markAsFailed(timeoutErrorMessage)
|
||||
|
||||
sendEvent({
|
||||
type: 'execution:error',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: {
|
||||
error: timeoutErrorMessage,
|
||||
duration: result.metadata?.duration || 0,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
sendEvent({
|
||||
type: 'execution:cancelled',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: { duration: result.metadata?.duration || 0 },
|
||||
})
|
||||
}
|
||||
} else {
|
||||
sendEvent({
|
||||
type: 'execution:completed',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: {
|
||||
success: result.success,
|
||||
output: result.output,
|
||||
duration: result.metadata?.duration || 0,
|
||||
startTime: result.metadata?.startTime || startTime.toISOString(),
|
||||
endTime: result.metadata?.endTime || new Date().toISOString(),
|
||||
},
|
||||
})
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
const isTimeout = isTimeoutError(error) || isTimedOut
|
||||
const errorMessage = isTimeout
|
||||
? getTimeoutErrorMessage(error, syncTimeout)
|
||||
: error instanceof Error
|
||||
? error.message
|
||||
: 'Unknown error'
|
||||
|
||||
logger.error(`[${requestId}] Run-from-block execution failed: ${errorMessage}`, {
|
||||
isTimeout,
|
||||
})
|
||||
|
||||
const executionResult = hasExecutionResult(error) ? error.executionResult : undefined
|
||||
const { traceSpans, totalDuration } = executionResult
|
||||
? buildTraceSpans(executionResult)
|
||||
: { traceSpans: [], totalDuration: 0 }
|
||||
|
||||
await loggingSession.safeCompleteWithError({
|
||||
totalDurationMs: totalDuration || executionResult?.metadata?.duration,
|
||||
error: { message: errorMessage },
|
||||
traceSpans,
|
||||
})
|
||||
|
||||
sendEvent({
|
||||
type: 'execution:error',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: {
|
||||
error: executionResult?.error || errorMessage,
|
||||
duration: executionResult?.metadata?.duration || 0,
|
||||
},
|
||||
})
|
||||
} finally {
|
||||
if (timeoutId) clearTimeout(timeoutId)
|
||||
if (!isStreamClosed) {
|
||||
try {
|
||||
controller.enqueue(new TextEncoder().encode('data: [DONE]\n\n'))
|
||||
controller.close()
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
},
|
||||
cancel() {
|
||||
isStreamClosed = true
|
||||
if (timeoutId) clearTimeout(timeoutId)
|
||||
abortController.abort()
|
||||
markExecutionCancelled(executionId).catch(() => {})
|
||||
},
|
||||
})
|
||||
|
||||
return new NextResponse(stream, {
|
||||
headers: { ...SSE_HEADERS, 'X-Execution-Id': executionId },
|
||||
})
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
|
||||
logger.error(`[${requestId}] Failed to start run-from-block execution:`, error)
|
||||
return NextResponse.json(
|
||||
{ error: errorMessage || 'Failed to start execution' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,9 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { tasks } from '@trigger.dev/sdk'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { validate as uuidValidate, v4 as uuidv4 } from 'uuid'
|
||||
import { z } from 'zod'
|
||||
import { checkHybridAuth } from '@/lib/auth/hybrid'
|
||||
import { isTriggerDevEnabled } from '@/lib/core/config/feature-flags'
|
||||
import { getJobQueue, shouldExecuteInline } from '@/lib/core/async-jobs'
|
||||
import {
|
||||
createTimeoutAbortController,
|
||||
getTimeoutErrorMessage,
|
||||
@@ -31,7 +30,7 @@ import {
|
||||
} from '@/lib/workflows/persistence/utils'
|
||||
import { createStreamingResponse } from '@/lib/workflows/streaming/streaming'
|
||||
import { createHttpResponseFromBlock, workflowHasResponseBlock } from '@/lib/workflows/utils'
|
||||
import type { WorkflowExecutionPayload } from '@/background/workflow-execution'
|
||||
import { executeWorkflowJob, type WorkflowExecutionPayload } from '@/background/workflow-execution'
|
||||
import { normalizeName } from '@/executor/constants'
|
||||
import { ExecutionSnapshot } from '@/executor/execution/snapshot'
|
||||
import type { ExecutionMetadata, IterationContext } from '@/executor/execution/types'
|
||||
@@ -60,6 +59,25 @@ const ExecuteWorkflowSchema = z.object({
|
||||
})
|
||||
.optional(),
|
||||
stopAfterBlockId: z.string().optional(),
|
||||
runFromBlock: z
|
||||
.object({
|
||||
startBlockId: z.string().min(1, 'Start block ID is required'),
|
||||
sourceSnapshot: z.object({
|
||||
blockStates: z.record(z.any()),
|
||||
executedBlocks: z.array(z.string()),
|
||||
blockLogs: z.array(z.any()),
|
||||
decisions: z.object({
|
||||
router: z.record(z.string()),
|
||||
condition: z.record(z.string()),
|
||||
}),
|
||||
completedLoops: z.array(z.string()),
|
||||
loopExecutions: z.record(z.any()).optional(),
|
||||
parallelExecutions: z.record(z.any()).optional(),
|
||||
parallelBlockMapping: z.record(z.any()).optional(),
|
||||
activeExecutionPath: z.array(z.string()),
|
||||
}),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
|
||||
export const runtime = 'nodejs'
|
||||
@@ -124,41 +142,66 @@ type AsyncExecutionParams = {
|
||||
userId: string
|
||||
input: any
|
||||
triggerType: CoreTriggerType
|
||||
executionId: string
|
||||
}
|
||||
|
||||
async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextResponse> {
|
||||
const { requestId, workflowId, userId, input, triggerType } = params
|
||||
|
||||
if (!isTriggerDevEnabled) {
|
||||
logger.warn(`[${requestId}] Async mode requested but TRIGGER_DEV_ENABLED is false`)
|
||||
return NextResponse.json(
|
||||
{ error: 'Async execution is not enabled. Set TRIGGER_DEV_ENABLED=true to use async mode.' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
const { requestId, workflowId, userId, input, triggerType, executionId } = params
|
||||
|
||||
const payload: WorkflowExecutionPayload = {
|
||||
workflowId,
|
||||
userId,
|
||||
input,
|
||||
triggerType,
|
||||
executionId,
|
||||
}
|
||||
|
||||
try {
|
||||
const handle = await tasks.trigger('workflow-execution', payload)
|
||||
const jobQueue = await getJobQueue()
|
||||
const jobId = await jobQueue.enqueue('workflow-execution', payload, {
|
||||
metadata: { workflowId, userId },
|
||||
})
|
||||
|
||||
logger.info(`[${requestId}] Queued async workflow execution`, {
|
||||
workflowId,
|
||||
jobId: handle.id,
|
||||
jobId,
|
||||
})
|
||||
|
||||
if (shouldExecuteInline()) {
|
||||
void (async () => {
|
||||
try {
|
||||
await jobQueue.startJob(jobId)
|
||||
const output = await executeWorkflowJob(payload)
|
||||
await jobQueue.completeJob(jobId, output)
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
logger.error(`[${requestId}] Async workflow execution failed`, {
|
||||
jobId,
|
||||
error: errorMessage,
|
||||
})
|
||||
try {
|
||||
await jobQueue.markJobFailed(jobId, errorMessage)
|
||||
} catch (markFailedError) {
|
||||
logger.error(`[${requestId}] Failed to mark job as failed`, {
|
||||
jobId,
|
||||
error:
|
||||
markFailedError instanceof Error
|
||||
? markFailedError.message
|
||||
: String(markFailedError),
|
||||
})
|
||||
}
|
||||
}
|
||||
})()
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: true,
|
||||
async: true,
|
||||
jobId: handle.id,
|
||||
jobId,
|
||||
executionId,
|
||||
message: 'Workflow execution queued',
|
||||
statusUrl: `${getBaseUrl()}/api/jobs/${handle.id}`,
|
||||
statusUrl: `${getBaseUrl()}/api/jobs/${jobId}`,
|
||||
},
|
||||
{ status: 202 }
|
||||
)
|
||||
@@ -226,6 +269,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
base64MaxBytes,
|
||||
workflowStateOverride,
|
||||
stopAfterBlockId,
|
||||
runFromBlock,
|
||||
} = validation.data
|
||||
|
||||
// For API key and internal JWT auth, the entire body is the input (except for our control fields)
|
||||
@@ -242,6 +286,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
base64MaxBytes,
|
||||
workflowStateOverride,
|
||||
stopAfterBlockId: _stopAfterBlockId,
|
||||
runFromBlock: _runFromBlock,
|
||||
workflowId: _workflowId, // Also exclude workflowId used for internal JWT auth
|
||||
...rest
|
||||
} = body
|
||||
@@ -320,6 +365,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
userId: actorUserId,
|
||||
input,
|
||||
triggerType: loggingTriggerType,
|
||||
executionId,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -444,6 +490,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
includeFileBase64,
|
||||
base64MaxBytes,
|
||||
stopAfterBlockId,
|
||||
runFromBlock,
|
||||
abortSignal: timeoutController.signal,
|
||||
})
|
||||
|
||||
@@ -492,6 +539,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
|
||||
const filteredResult = {
|
||||
success: result.success,
|
||||
executionId,
|
||||
output: outputWithBase64,
|
||||
error: result.error,
|
||||
metadata: result.metadata
|
||||
@@ -783,6 +831,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
includeFileBase64,
|
||||
base64MaxBytes,
|
||||
stopAfterBlockId,
|
||||
runFromBlock,
|
||||
})
|
||||
|
||||
if (result.status === 'paused') {
|
||||
|
||||
@@ -508,8 +508,10 @@ export default function TemplateDetails({ isWorkspaceContext = false }: Template
|
||||
|
||||
setIsApproving(true)
|
||||
try {
|
||||
const response = await fetch(`/api/templates/${template.id}/approve`, {
|
||||
method: 'POST',
|
||||
const response = await fetch(`/api/templates/${template.id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ status: 'approved' }),
|
||||
})
|
||||
|
||||
if (response.ok) {
|
||||
@@ -531,8 +533,10 @@ export default function TemplateDetails({ isWorkspaceContext = false }: Template
|
||||
|
||||
setIsRejecting(true)
|
||||
try {
|
||||
const response = await fetch(`/api/templates/${template.id}/reject`, {
|
||||
method: 'POST',
|
||||
const response = await fetch(`/api/templates/${template.id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ status: 'rejected' }),
|
||||
})
|
||||
|
||||
if (response.ok) {
|
||||
@@ -554,10 +558,11 @@ export default function TemplateDetails({ isWorkspaceContext = false }: Template
|
||||
|
||||
setIsVerifying(true)
|
||||
try {
|
||||
const endpoint = `/api/creators/${template.creator.id}/verify`
|
||||
const method = template.creator.verified ? 'DELETE' : 'POST'
|
||||
|
||||
const response = await fetch(endpoint, { method })
|
||||
const response = await fetch(`/api/creators/${template.creator.id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ verified: !template.creator.verified }),
|
||||
})
|
||||
|
||||
if (response.ok) {
|
||||
// Refresh page to show updated verification status
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
'use client'
|
||||
|
||||
import { memo, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import clsx from 'clsx'
|
||||
import { ChevronUp, LayoutList } from 'lucide-react'
|
||||
import Editor from 'react-simple-code-editor'
|
||||
@@ -1257,99 +1258,42 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
|
||||
return false
|
||||
}
|
||||
|
||||
const toolCallLogger = createLogger('CopilotToolCall')
|
||||
|
||||
async function sendToolDecision(
|
||||
toolCallId: string,
|
||||
status: 'accepted' | 'rejected' | 'background'
|
||||
) {
|
||||
try {
|
||||
await fetch('/api/copilot/confirm', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolCallId, status }),
|
||||
})
|
||||
} catch (error) {
|
||||
toolCallLogger.warn('Failed to send tool decision', {
|
||||
toolCallId,
|
||||
status,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async function handleRun(
|
||||
toolCall: CopilotToolCall,
|
||||
setToolCallState: any,
|
||||
onStateChange?: any,
|
||||
editedParams?: any
|
||||
) {
|
||||
const instance = getClientTool(toolCall.id)
|
||||
|
||||
if (!instance && isIntegrationTool(toolCall.name)) {
|
||||
onStateChange?.('executing')
|
||||
try {
|
||||
await useCopilotStore.getState().executeIntegrationTool(toolCall.id)
|
||||
} catch (e) {
|
||||
setToolCallState(toolCall, 'error', { error: e instanceof Error ? e.message : String(e) })
|
||||
onStateChange?.('error')
|
||||
try {
|
||||
await fetch('/api/copilot/tools/mark-complete', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
status: 500,
|
||||
message: e instanceof Error ? e.message : 'Tool execution failed',
|
||||
data: { error: e instanceof Error ? e.message : String(e) },
|
||||
}),
|
||||
})
|
||||
} catch {
|
||||
console.error('[handleRun] Failed to notify backend of tool error:', toolCall.id)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (!instance) return
|
||||
try {
|
||||
const mergedParams =
|
||||
editedParams ||
|
||||
(toolCall as any).params ||
|
||||
(toolCall as any).parameters ||
|
||||
(toolCall as any).input ||
|
||||
{}
|
||||
await instance.handleAccept?.(mergedParams)
|
||||
onStateChange?.('executing')
|
||||
} catch (e) {
|
||||
setToolCallState(toolCall, 'error', { error: e instanceof Error ? e.message : String(e) })
|
||||
}
|
||||
setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined)
|
||||
onStateChange?.('executing')
|
||||
await sendToolDecision(toolCall.id, 'accepted')
|
||||
}
|
||||
|
||||
async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onStateChange?: any) {
|
||||
const instance = getClientTool(toolCall.id)
|
||||
|
||||
if (!instance && isIntegrationTool(toolCall.name)) {
|
||||
setToolCallState(toolCall, 'rejected')
|
||||
onStateChange?.('rejected')
|
||||
|
||||
let notified = false
|
||||
for (let attempt = 0; attempt < 3 && !notified; attempt++) {
|
||||
try {
|
||||
const res = await fetch('/api/copilot/tools/mark-complete', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
status: 400,
|
||||
message: 'Tool execution skipped by user',
|
||||
data: { skipped: true, reason: 'user_skipped' },
|
||||
}),
|
||||
})
|
||||
if (res.ok) {
|
||||
notified = true
|
||||
}
|
||||
} catch (e) {
|
||||
if (attempt < 2) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!notified) {
|
||||
console.error('[handleSkip] Failed to notify backend after 3 attempts:', toolCall.id)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (instance) {
|
||||
try {
|
||||
await instance.handleReject?.()
|
||||
} catch {}
|
||||
}
|
||||
setToolCallState(toolCall, 'rejected')
|
||||
onStateChange?.('rejected')
|
||||
await sendToolDecision(toolCall.id, 'rejected')
|
||||
}
|
||||
|
||||
function getDisplayName(toolCall: CopilotToolCall): string {
|
||||
@@ -1509,7 +1453,7 @@ export function ToolCall({
|
||||
// Check if this integration tool is auto-allowed
|
||||
// Subscribe to autoAllowedTools so we re-render when it changes
|
||||
const autoAllowedTools = useCopilotStore((s) => s.autoAllowedTools)
|
||||
const { removeAutoAllowedTool } = useCopilotStore()
|
||||
const { removeAutoAllowedTool, setToolCallState } = useCopilotStore()
|
||||
const isAutoAllowed = isIntegrationTool(toolCall.name) && autoAllowedTools.includes(toolCall.name)
|
||||
|
||||
// Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change)
|
||||
@@ -2211,16 +2155,9 @@ export function ToolCall({
|
||||
<div className='mt-[10px]'>
|
||||
<Button
|
||||
onClick={async () => {
|
||||
try {
|
||||
const instance = getClientTool(toolCall.id)
|
||||
instance?.setState?.((ClientToolCallState as any).background)
|
||||
await instance?.markToolComplete?.(
|
||||
200,
|
||||
'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete'
|
||||
)
|
||||
forceUpdate({})
|
||||
onStateChange?.('background')
|
||||
} catch {}
|
||||
setToolCallState(toolCall, ClientToolCallState.background)
|
||||
onStateChange?.('background')
|
||||
await sendToolDecision(toolCall.id, 'background')
|
||||
}}
|
||||
variant='tertiary'
|
||||
title='Move to Background'
|
||||
@@ -2232,21 +2169,9 @@ export function ToolCall({
|
||||
<div className='mt-[10px]'>
|
||||
<Button
|
||||
onClick={async () => {
|
||||
try {
|
||||
const instance = getClientTool(toolCall.id)
|
||||
const elapsedSeconds = instance?.getElapsedSeconds?.() || 0
|
||||
instance?.setState?.((ClientToolCallState as any).background, {
|
||||
result: { _elapsedSeconds: elapsedSeconds },
|
||||
})
|
||||
const { updateToolCallParams } = useCopilotStore.getState()
|
||||
updateToolCallParams?.(toolCall.id, { _elapsedSeconds: Math.round(elapsedSeconds) })
|
||||
await instance?.markToolComplete?.(
|
||||
200,
|
||||
`User woke you up after ${Math.round(elapsedSeconds)} seconds`
|
||||
)
|
||||
forceUpdate({})
|
||||
onStateChange?.('background')
|
||||
} catch {}
|
||||
setToolCallState(toolCall, ClientToolCallState.background)
|
||||
onStateChange?.('background')
|
||||
await sendToolDecision(toolCall.id, 'background')
|
||||
}}
|
||||
variant='tertiary'
|
||||
title='Wake'
|
||||
|
||||
@@ -114,6 +114,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
|
||||
clearPlanArtifact,
|
||||
savePlanArtifact,
|
||||
loadAutoAllowedTools,
|
||||
resumeActiveStream,
|
||||
} = useCopilotStore()
|
||||
|
||||
// Initialize copilot
|
||||
@@ -126,6 +127,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
|
||||
loadAutoAllowedTools,
|
||||
currentChat,
|
||||
isSendingMessage,
|
||||
resumeActiveStream,
|
||||
})
|
||||
|
||||
// Handle scroll management (80px stickiness for copilot)
|
||||
@@ -421,8 +423,8 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Show loading state until fully initialized */}
|
||||
{!isInitialized ? (
|
||||
{/* Show loading state until fully initialized, but skip if actively streaming (resume case) */}
|
||||
{!isInitialized && !isSendingMessage ? (
|
||||
<div className='flex h-full w-full items-center justify-center'>
|
||||
<div className='flex flex-col items-center gap-3'>
|
||||
<p className='text-muted-foreground text-sm'>Loading copilot</p>
|
||||
|
||||
@@ -14,6 +14,7 @@ interface UseCopilotInitializationProps {
|
||||
loadAutoAllowedTools: () => Promise<void>
|
||||
currentChat: any
|
||||
isSendingMessage: boolean
|
||||
resumeActiveStream: () => Promise<boolean>
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -32,11 +33,13 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
|
||||
loadAutoAllowedTools,
|
||||
currentChat,
|
||||
isSendingMessage,
|
||||
resumeActiveStream,
|
||||
} = props
|
||||
|
||||
const [isInitialized, setIsInitialized] = useState(false)
|
||||
const lastWorkflowIdRef = useRef<string | null>(null)
|
||||
const hasMountedRef = useRef(false)
|
||||
const hasResumedRef = useRef(false)
|
||||
|
||||
/** Initialize on mount - loads chats if needed. Never loads during streaming */
|
||||
useEffect(() => {
|
||||
@@ -105,6 +108,16 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
|
||||
isSendingMessage,
|
||||
])
|
||||
|
||||
/** Try to resume active stream on mount - runs early, before waiting for chats */
|
||||
useEffect(() => {
|
||||
if (hasResumedRef.current || isSendingMessage) return
|
||||
hasResumedRef.current = true
|
||||
// Resume immediately on mount - don't wait for isInitialized
|
||||
resumeActiveStream().catch((err) => {
|
||||
logger.warn('[Copilot] Failed to resume active stream', err)
|
||||
})
|
||||
}, [isSendingMessage, resumeActiveStream])
|
||||
|
||||
/** Load auto-allowed tools once on mount - runs immediately, independent of workflow */
|
||||
const hasLoadedAutoAllowedToolsRef = useRef(false)
|
||||
useEffect(() => {
|
||||
|
||||
@@ -12,7 +12,6 @@ import {
|
||||
Tooltip,
|
||||
} from '@/components/emcn'
|
||||
import { Skeleton } from '@/components/ui'
|
||||
import { getEnv, isTruthy } from '@/lib/core/config/env'
|
||||
import { OutputSelect } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/chat/components/output-select/output-select'
|
||||
|
||||
interface WorkflowDeploymentInfo {
|
||||
@@ -78,7 +77,6 @@ export function ApiDeploy({
|
||||
async: false,
|
||||
})
|
||||
|
||||
const isAsyncEnabled = isTruthy(getEnv('NEXT_PUBLIC_TRIGGER_DEV_ENABLED'))
|
||||
const info = deploymentInfo ? { ...deploymentInfo, needsRedeployment } : null
|
||||
|
||||
const getBaseEndpoint = () => {
|
||||
@@ -272,7 +270,7 @@ response = requests.post(
|
||||
)
|
||||
|
||||
job = response.json()
|
||||
print(job) # Contains job_id for status checking`
|
||||
print(job) # Contains jobId and executionId`
|
||||
|
||||
case 'javascript':
|
||||
return `const response = await fetch("${endpoint}", {
|
||||
@@ -286,7 +284,7 @@ print(job) # Contains job_id for status checking`
|
||||
});
|
||||
|
||||
const job = await response.json();
|
||||
console.log(job); // Contains job_id for status checking`
|
||||
console.log(job); // Contains jobId and executionId`
|
||||
|
||||
case 'typescript':
|
||||
return `const response = await fetch("${endpoint}", {
|
||||
@@ -299,8 +297,8 @@ console.log(job); // Contains job_id for status checking`
|
||||
body: JSON.stringify(${JSON.stringify(payload)})
|
||||
});
|
||||
|
||||
const job: { job_id: string } = await response.json();
|
||||
console.log(job); // Contains job_id for status checking`
|
||||
const job: { jobId: string; executionId: string } = await response.json();
|
||||
console.log(job); // Contains jobId and executionId`
|
||||
|
||||
default:
|
||||
return ''
|
||||
@@ -539,55 +537,49 @@ console.log(limits);`
|
||||
/>
|
||||
</div>
|
||||
|
||||
{isAsyncEnabled && (
|
||||
<div>
|
||||
<div className='mb-[6.5px] flex items-center justify-between'>
|
||||
<Label className='block pl-[2px] font-medium text-[13px] text-[var(--text-primary)]'>
|
||||
Run workflow (async)
|
||||
</Label>
|
||||
<div className='flex items-center gap-[6px]'>
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger asChild>
|
||||
<Button
|
||||
variant='ghost'
|
||||
onClick={() => handleCopy('async', getAsyncCommand())}
|
||||
aria-label='Copy command'
|
||||
className='!p-1.5 -my-1.5'
|
||||
>
|
||||
{copied.async ? (
|
||||
<Check className='h-3 w-3' />
|
||||
) : (
|
||||
<Clipboard className='h-3 w-3' />
|
||||
)}
|
||||
</Button>
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content>
|
||||
<span>{copied.async ? 'Copied' : 'Copy'}</span>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
<Combobox
|
||||
size='sm'
|
||||
className='!w-fit !py-[2px] min-w-[100px] rounded-[6px] px-[9px]'
|
||||
options={[
|
||||
{ label: 'Execute Job', value: 'execute' },
|
||||
{ label: 'Check Status', value: 'status' },
|
||||
{ label: 'Rate Limits', value: 'rate-limits' },
|
||||
]}
|
||||
value={asyncExampleType}
|
||||
onChange={(value) => setAsyncExampleType(value as AsyncExampleType)}
|
||||
align='end'
|
||||
dropdownWidth={160}
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<div className='mb-[6.5px] flex items-center justify-between'>
|
||||
<Label className='block pl-[2px] font-medium text-[13px] text-[var(--text-primary)]'>
|
||||
Run workflow (async)
|
||||
</Label>
|
||||
<div className='flex items-center gap-[6px]'>
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger asChild>
|
||||
<Button
|
||||
variant='ghost'
|
||||
onClick={() => handleCopy('async', getAsyncCommand())}
|
||||
aria-label='Copy command'
|
||||
className='!p-1.5 -my-1.5'
|
||||
>
|
||||
{copied.async ? <Check className='h-3 w-3' /> : <Clipboard className='h-3 w-3' />}
|
||||
</Button>
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content>
|
||||
<span>{copied.async ? 'Copied' : 'Copy'}</span>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
<Combobox
|
||||
size='sm'
|
||||
className='!w-fit !py-[2px] min-w-[100px] rounded-[6px] px-[9px]'
|
||||
options={[
|
||||
{ label: 'Execute Job', value: 'execute' },
|
||||
{ label: 'Check Status', value: 'status' },
|
||||
{ label: 'Rate Limits', value: 'rate-limits' },
|
||||
]}
|
||||
value={asyncExampleType}
|
||||
onChange={(value) => setAsyncExampleType(value as AsyncExampleType)}
|
||||
align='end'
|
||||
dropdownWidth={160}
|
||||
/>
|
||||
</div>
|
||||
<Code.Viewer
|
||||
code={getAsyncCommand()}
|
||||
language={LANGUAGE_SYNTAX[language]}
|
||||
wrapText
|
||||
className='!min-h-0 rounded-[4px] border border-[var(--border-1)]'
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<Code.Viewer
|
||||
code={getAsyncCommand()}
|
||||
language={LANGUAGE_SYNTAX[language]}
|
||||
wrapText
|
||||
className='!min-h-0 rounded-[4px] border border-[var(--border-1)]'
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import type { PlanFeature } from '@/app/workspace/[workspaceId]/w/components/sid
|
||||
export const PRO_PLAN_FEATURES: PlanFeature[] = [
|
||||
{ icon: Zap, text: '150 runs per minute (sync)' },
|
||||
{ icon: Clock, text: '1,000 runs per minute (async)' },
|
||||
{ icon: Timer, text: '60 min sync execution limit' },
|
||||
{ icon: Timer, text: '50 min sync execution limit' },
|
||||
{ icon: HardDrive, text: '50GB file storage' },
|
||||
{ icon: Users, text: 'Unlimited invites' },
|
||||
{ icon: Database, text: 'Unlimited log retention' },
|
||||
@@ -24,7 +24,7 @@ export const PRO_PLAN_FEATURES: PlanFeature[] = [
|
||||
export const TEAM_PLAN_FEATURES: PlanFeature[] = [
|
||||
{ icon: Zap, text: '300 runs per minute (sync)' },
|
||||
{ icon: Clock, text: '2,500 runs per minute (async)' },
|
||||
{ icon: Timer, text: '60 min sync execution limit' },
|
||||
{ icon: Timer, text: '50 min sync execution limit' },
|
||||
{ icon: HardDrive, text: '500GB file storage (pooled)' },
|
||||
{ icon: Users, text: 'Unlimited invites' },
|
||||
{ icon: Database, text: 'Unlimited log retention' },
|
||||
|
||||
@@ -20,6 +20,7 @@ export type WorkflowExecutionPayload = {
|
||||
userId: string
|
||||
input?: any
|
||||
triggerType?: CoreTriggerType
|
||||
executionId?: string
|
||||
metadata?: Record<string, any>
|
||||
}
|
||||
|
||||
@@ -30,7 +31,7 @@ export type WorkflowExecutionPayload = {
|
||||
*/
|
||||
export async function executeWorkflowJob(payload: WorkflowExecutionPayload) {
|
||||
const workflowId = payload.workflowId
|
||||
const executionId = uuidv4()
|
||||
const executionId = payload.executionId || uuidv4()
|
||||
const requestId = executionId.slice(0, 8)
|
||||
|
||||
logger.info(`[${requestId}] Starting workflow execution job: ${workflowId}`, {
|
||||
|
||||
@@ -143,11 +143,147 @@ export const MistralParseBlock: BlockConfig<MistralParserOutput> = {
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* V2 Block - Restored from main branch for backwards compatibility
|
||||
* Hidden from toolbar, uses filePath subblock ID for advanced mode
|
||||
*/
|
||||
export const MistralParseV2Block: BlockConfig<MistralParserOutput> = {
|
||||
...MistralParseBlock,
|
||||
type: 'mistral_parse_v2',
|
||||
name: 'Mistral Parser',
|
||||
description: 'Extract text from PDF documents',
|
||||
hideFromToolbar: true,
|
||||
subBlocks: [
|
||||
{
|
||||
id: 'fileUpload',
|
||||
title: 'PDF Document',
|
||||
type: 'file-upload' as SubBlockType,
|
||||
canonicalParamId: 'document',
|
||||
acceptedTypes: 'application/pdf',
|
||||
placeholder: 'Upload a PDF document',
|
||||
mode: 'basic',
|
||||
maxSize: 50,
|
||||
},
|
||||
{
|
||||
id: 'filePath',
|
||||
title: 'PDF Document',
|
||||
type: 'short-input' as SubBlockType,
|
||||
canonicalParamId: 'document',
|
||||
placeholder: 'Document URL',
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'resultType',
|
||||
title: 'Output Format',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ id: 'markdown', label: 'Markdown' },
|
||||
{ id: 'text', label: 'Plain Text' },
|
||||
{ id: 'json', label: 'JSON' },
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'pages',
|
||||
title: 'Specific Pages',
|
||||
type: 'short-input',
|
||||
placeholder: 'e.g. 0,1,2 (leave empty for all pages)',
|
||||
},
|
||||
{
|
||||
id: 'apiKey',
|
||||
title: 'API Key',
|
||||
type: 'short-input' as SubBlockType,
|
||||
placeholder: 'Enter your Mistral API key',
|
||||
password: true,
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
tools: {
|
||||
access: ['mistral_parser_v2'],
|
||||
config: {
|
||||
tool: createVersionedToolSelector({
|
||||
baseToolSelector: () => 'mistral_parser',
|
||||
suffix: '_v2',
|
||||
fallbackToolId: 'mistral_parser_v2',
|
||||
}),
|
||||
params: (params) => {
|
||||
if (!params || !params.apiKey || params.apiKey.trim() === '') {
|
||||
throw new Error('Mistral API key is required')
|
||||
}
|
||||
|
||||
const parameters: Record<string, unknown> = {
|
||||
apiKey: params.apiKey.trim(),
|
||||
resultType: params.resultType || 'markdown',
|
||||
}
|
||||
|
||||
// Original V2 pattern: fileUpload (basic) or filePath (advanced) or document (wired)
|
||||
const documentInput = params.fileUpload || params.filePath || params.document
|
||||
if (!documentInput) {
|
||||
throw new Error('PDF document is required')
|
||||
}
|
||||
// Smart handling: object → fileUpload param, string → filePath param
|
||||
if (typeof documentInput === 'object') {
|
||||
parameters.fileUpload = documentInput
|
||||
} else if (typeof documentInput === 'string') {
|
||||
parameters.filePath = documentInput.trim()
|
||||
}
|
||||
|
||||
let pagesArray: number[] | undefined
|
||||
if (params.pages && params.pages.trim() !== '') {
|
||||
try {
|
||||
pagesArray = params.pages
|
||||
.split(',')
|
||||
.map((p: string) => p.trim())
|
||||
.filter((p: string) => p.length > 0)
|
||||
.map((p: string) => {
|
||||
const num = Number.parseInt(p, 10)
|
||||
if (Number.isNaN(num) || num < 0) {
|
||||
throw new Error(`Invalid page number: ${p}`)
|
||||
}
|
||||
return num
|
||||
})
|
||||
|
||||
if (pagesArray && pagesArray.length === 0) {
|
||||
pagesArray = undefined
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
throw new Error(`Page number format error: ${errorMessage}`)
|
||||
}
|
||||
}
|
||||
|
||||
if (pagesArray && pagesArray.length > 0) {
|
||||
parameters.pages = pagesArray
|
||||
}
|
||||
|
||||
return parameters
|
||||
},
|
||||
},
|
||||
},
|
||||
inputs: {
|
||||
document: { type: 'json', description: 'Document input (file upload or URL reference)' },
|
||||
filePath: { type: 'string', description: 'PDF document URL (advanced mode)' },
|
||||
fileUpload: { type: 'json', description: 'Uploaded PDF file (basic mode)' },
|
||||
apiKey: { type: 'string', description: 'Mistral API key' },
|
||||
resultType: { type: 'string', description: 'Output format type' },
|
||||
pages: { type: 'string', description: 'Page selection' },
|
||||
},
|
||||
outputs: {
|
||||
pages: { type: 'array', description: 'Array of page objects from Mistral OCR' },
|
||||
model: { type: 'string', description: 'Mistral OCR model identifier' },
|
||||
usage_info: { type: 'json', description: 'Usage statistics from the API' },
|
||||
document_annotation: { type: 'string', description: 'Structured annotation data' },
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* V3 Block - New file handling pattern with UserFile normalization
|
||||
* Uses fileReference subblock ID with canonicalParamId for proper file handling
|
||||
*/
|
||||
export const MistralParseV3Block: BlockConfig<MistralParserOutput> = {
|
||||
...MistralParseBlock,
|
||||
type: 'mistral_parse_v3',
|
||||
name: 'Mistral Parser',
|
||||
description: 'Extract text from PDF documents',
|
||||
hideFromToolbar: false,
|
||||
subBlocks: [
|
||||
{
|
||||
@@ -196,13 +332,9 @@ export const MistralParseV2Block: BlockConfig<MistralParserOutput> = {
|
||||
},
|
||||
],
|
||||
tools: {
|
||||
access: ['mistral_parser_v2'],
|
||||
access: ['mistral_parser_v3'],
|
||||
config: {
|
||||
tool: createVersionedToolSelector({
|
||||
baseToolSelector: () => 'mistral_parser',
|
||||
suffix: '_v2',
|
||||
fallbackToolId: 'mistral_parser_v2',
|
||||
}),
|
||||
tool: () => 'mistral_parser_v3',
|
||||
params: (params) => {
|
||||
if (!params || !params.apiKey || params.apiKey.trim() === '') {
|
||||
throw new Error('Mistral API key is required')
|
||||
@@ -213,6 +345,7 @@ export const MistralParseV2Block: BlockConfig<MistralParserOutput> = {
|
||||
resultType: params.resultType || 'markdown',
|
||||
}
|
||||
|
||||
// V3 pattern: normalize file inputs from basic/advanced modes
|
||||
const documentInput = normalizeFileInput(
|
||||
params.fileUpload || params.fileReference || params.document,
|
||||
{ single: true }
|
||||
|
||||
@@ -79,7 +79,11 @@ import { MemoryBlock } from '@/blocks/blocks/memory'
|
||||
import { MicrosoftExcelBlock, MicrosoftExcelV2Block } from '@/blocks/blocks/microsoft_excel'
|
||||
import { MicrosoftPlannerBlock } from '@/blocks/blocks/microsoft_planner'
|
||||
import { MicrosoftTeamsBlock } from '@/blocks/blocks/microsoft_teams'
|
||||
import { MistralParseBlock, MistralParseV2Block } from '@/blocks/blocks/mistral_parse'
|
||||
import {
|
||||
MistralParseBlock,
|
||||
MistralParseV2Block,
|
||||
MistralParseV3Block,
|
||||
} from '@/blocks/blocks/mistral_parse'
|
||||
import { MongoDBBlock } from '@/blocks/blocks/mongodb'
|
||||
import { MySQLBlock } from '@/blocks/blocks/mysql'
|
||||
import { Neo4jBlock } from '@/blocks/blocks/neo4j'
|
||||
@@ -255,6 +259,7 @@ export const registry: Record<string, BlockConfig> = {
|
||||
microsoft_teams: MicrosoftTeamsBlock,
|
||||
mistral_parse: MistralParseBlock,
|
||||
mistral_parse_v2: MistralParseV2Block,
|
||||
mistral_parse_v3: MistralParseV3Block,
|
||||
mongodb: MongoDBBlock,
|
||||
mysql: MySQLBlock,
|
||||
neo4j: Neo4jBlock,
|
||||
|
||||
@@ -549,11 +549,12 @@ export function useActivateDeploymentVersion() {
|
||||
workflowId,
|
||||
version,
|
||||
}: ActivateVersionVariables): Promise<ActivateVersionResult> => {
|
||||
const response = await fetch(`/api/workflows/${workflowId}/deployments/${version}/activate`, {
|
||||
method: 'POST',
|
||||
const response = await fetch(`/api/workflows/${workflowId}/deployments/${version}`, {
|
||||
method: 'PATCH',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ isActive: true }),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
|
||||
@@ -211,12 +211,16 @@ export function useExecutionStream() {
|
||||
currentExecutionRef.current = null
|
||||
|
||||
try {
|
||||
const response = await fetch(`/api/workflows/${workflowId}/execute-from-block`, {
|
||||
const response = await fetch(`/api/workflows/${workflowId}/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ startBlockId, sourceSnapshot, input }),
|
||||
body: JSON.stringify({
|
||||
stream: true,
|
||||
input,
|
||||
runFromBlock: { startBlockId, sourceSnapshot },
|
||||
}),
|
||||
signal: abortController.signal,
|
||||
})
|
||||
|
||||
|
||||
@@ -82,6 +82,7 @@ export interface SendMessageRequest {
|
||||
executionId?: string
|
||||
}>
|
||||
commands?: string[]
|
||||
resumeFromEventId?: number
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -120,7 +121,7 @@ export async function sendStreamingMessage(
|
||||
request: SendMessageRequest
|
||||
): Promise<StreamingResponse> {
|
||||
try {
|
||||
const { abortSignal, ...requestBody } = request
|
||||
const { abortSignal, resumeFromEventId, ...requestBody } = request
|
||||
try {
|
||||
const preview = Array.isArray((requestBody as any).contexts)
|
||||
? (requestBody as any).contexts.map((c: any) => ({
|
||||
@@ -136,8 +137,51 @@ export async function sendStreamingMessage(
|
||||
? (requestBody as any).contexts.length
|
||||
: 0,
|
||||
contextsPreview: preview,
|
||||
resumeFromEventId,
|
||||
})
|
||||
} catch {}
|
||||
|
||||
const streamId = request.userMessageId
|
||||
if (typeof resumeFromEventId === 'number') {
|
||||
if (!streamId) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'streamId is required to resume a stream',
|
||||
status: 400,
|
||||
}
|
||||
}
|
||||
const url = `/api/copilot/chat/stream?streamId=${encodeURIComponent(
|
||||
streamId
|
||||
)}&from=${encodeURIComponent(String(resumeFromEventId))}`
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
signal: abortSignal,
|
||||
credentials: 'include',
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorMessage = await handleApiError(response, 'Failed to resume streaming message')
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
status: response.status,
|
||||
}
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'No response body received',
|
||||
status: 500,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
stream: response.body,
|
||||
}
|
||||
}
|
||||
|
||||
const response = await fetch('/api/copilot/chat', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import OpenAI, { AzureOpenAI } from 'openai'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { extractResponseText } from '@/providers/openai/utils'
|
||||
|
||||
const logger = createLogger('SimAgentUtils')
|
||||
|
||||
@@ -12,47 +12,65 @@ const openaiApiKey = env.OPENAI_API_KEY
|
||||
|
||||
const useChatTitleAzure = azureApiKey && azureEndpoint && azureApiVersion
|
||||
|
||||
const client = useChatTitleAzure
|
||||
? new AzureOpenAI({
|
||||
apiKey: azureApiKey,
|
||||
apiVersion: azureApiVersion,
|
||||
endpoint: azureEndpoint,
|
||||
})
|
||||
: openaiApiKey
|
||||
? new OpenAI({
|
||||
apiKey: openaiApiKey,
|
||||
})
|
||||
: null
|
||||
|
||||
/**
|
||||
* Generates a short title for a chat based on the first message
|
||||
* @param message First user message in the chat
|
||||
* @returns A short title or null if API key is not available
|
||||
*/
|
||||
export async function generateChatTitle(message: string): Promise<string | null> {
|
||||
if (!client) {
|
||||
if (!useChatTitleAzure && !openaiApiKey) {
|
||||
return null
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await client.chat.completions.create({
|
||||
model: useChatTitleAzure ? chatTitleModelName : 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content:
|
||||
'Generate a very short title (3-5 words max) for a chat that starts with this message. The title should be concise and descriptive. Do not wrap the title in quotes.',
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: message,
|
||||
},
|
||||
],
|
||||
max_tokens: 20,
|
||||
temperature: 0.2,
|
||||
const apiUrl = useChatTitleAzure
|
||||
? `${azureEndpoint?.replace(/\/$/, '')}/openai/v1/responses?api-version=${azureApiVersion}`
|
||||
: 'https://api.openai.com/v1/responses'
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
'OpenAI-Beta': 'responses=v1',
|
||||
}
|
||||
|
||||
if (useChatTitleAzure) {
|
||||
headers['api-key'] = azureApiKey!
|
||||
} else {
|
||||
headers.Authorization = `Bearer ${openaiApiKey}`
|
||||
}
|
||||
|
||||
const response = await fetch(apiUrl, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify({
|
||||
model: useChatTitleAzure ? chatTitleModelName : 'gpt-4o',
|
||||
input: [
|
||||
{
|
||||
role: 'system',
|
||||
content:
|
||||
'Generate a very short title (3-5 words max) for a chat that starts with this message. The title should be concise and descriptive. Do not wrap the title in quotes.',
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: message,
|
||||
},
|
||||
],
|
||||
max_output_tokens: 20,
|
||||
temperature: 0.2,
|
||||
}),
|
||||
})
|
||||
|
||||
const title = response.choices[0]?.message?.content?.trim() || null
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
logger.error('Error generating chat title:', {
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
error: errorText,
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
const title = extractResponseText(data.output)?.trim() || null
|
||||
return title
|
||||
} catch (error) {
|
||||
logger.error('Error generating chat title:', error)
|
||||
|
||||
36
apps/sim/lib/copilot/orchestrator/config.ts
Normal file
36
apps/sim/lib/copilot/orchestrator/config.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
export const INTERRUPT_TOOL_NAMES = [
|
||||
'set_global_workflow_variables',
|
||||
'run_workflow',
|
||||
'manage_mcp_tool',
|
||||
'manage_custom_tool',
|
||||
'deploy_mcp',
|
||||
'deploy_chat',
|
||||
'deploy_api',
|
||||
'create_workspace_mcp_server',
|
||||
'set_environment_variables',
|
||||
'make_api_request',
|
||||
'oauth_request_access',
|
||||
'navigate_ui',
|
||||
'knowledge_base',
|
||||
] as const
|
||||
|
||||
export const INTERRUPT_TOOL_SET = new Set<string>(INTERRUPT_TOOL_NAMES)
|
||||
|
||||
export const SUBAGENT_TOOL_NAMES = [
|
||||
'debug',
|
||||
'edit',
|
||||
'plan',
|
||||
'test',
|
||||
'deploy',
|
||||
'auth',
|
||||
'research',
|
||||
'knowledge',
|
||||
'custom_tool',
|
||||
'tour',
|
||||
'info',
|
||||
'workflow',
|
||||
'evaluate',
|
||||
'superagent',
|
||||
] as const
|
||||
|
||||
export const SUBAGENT_TOOL_SET = new Set<string>(SUBAGENT_TOOL_NAMES)
|
||||
224
apps/sim/lib/copilot/orchestrator/index.ts
Normal file
224
apps/sim/lib/copilot/orchestrator/index.ts
Normal file
@@ -0,0 +1,224 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants'
|
||||
import {
|
||||
getToolCallIdFromEvent,
|
||||
handleSubagentRouting,
|
||||
markToolCallSeen,
|
||||
markToolResultSeen,
|
||||
normalizeSseEvent,
|
||||
sseHandlers,
|
||||
subAgentHandlers,
|
||||
wasToolCallSeen,
|
||||
wasToolResultSeen,
|
||||
} from '@/lib/copilot/orchestrator/sse-handlers'
|
||||
import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser'
|
||||
import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type {
|
||||
OrchestratorOptions,
|
||||
OrchestratorResult,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
ToolCallSummary,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
|
||||
const logger = createLogger('CopilotOrchestrator')
|
||||
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
|
||||
|
||||
export interface OrchestrateStreamOptions extends OrchestratorOptions {
|
||||
userId: string
|
||||
workflowId: string
|
||||
chatId?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Orchestrate a copilot SSE stream and execute tool calls server-side.
|
||||
*/
|
||||
export async function orchestrateCopilotStream(
|
||||
requestPayload: Record<string, any>,
|
||||
options: OrchestrateStreamOptions
|
||||
): Promise<OrchestratorResult> {
|
||||
const { userId, workflowId, chatId, timeout = 300000, abortSignal } = options
|
||||
const execContext = await prepareExecutionContext(userId, workflowId)
|
||||
|
||||
const context: StreamingContext = {
|
||||
chatId,
|
||||
conversationId: undefined,
|
||||
messageId: requestPayload?.messageId || crypto.randomUUID(),
|
||||
accumulatedContent: '',
|
||||
contentBlocks: [],
|
||||
toolCalls: new Map(),
|
||||
currentThinkingBlock: null,
|
||||
isInThinkingBlock: false,
|
||||
subAgentParentToolCallId: undefined,
|
||||
subAgentContent: {},
|
||||
subAgentToolCalls: {},
|
||||
pendingContent: '',
|
||||
streamComplete: false,
|
||||
wasAborted: false,
|
||||
errors: [],
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(`${SIM_AGENT_API_URL}/api/chat-completion-streaming`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
|
||||
},
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: abortSignal,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text().catch(() => '')
|
||||
throw new Error(
|
||||
`Copilot backend error (${response.status}): ${errorText || response.statusText}`
|
||||
)
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error('Copilot backend response missing body')
|
||||
}
|
||||
|
||||
const reader = response.body.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
context.errors.push('Request timed out')
|
||||
context.streamComplete = true
|
||||
reader.cancel().catch(() => {})
|
||||
}, timeout)
|
||||
|
||||
try {
|
||||
for await (const event of parseSSEStream(reader, decoder, abortSignal)) {
|
||||
if (abortSignal?.aborted) {
|
||||
context.wasAborted = true
|
||||
break
|
||||
}
|
||||
|
||||
const normalizedEvent = normalizeSseEvent(event)
|
||||
|
||||
// Skip tool_result events for tools the sim-side already executed.
|
||||
// The sim-side emits its own tool_result with complete data.
|
||||
// For server-side tools (not executed by sim), we still forward the Go backend's tool_result.
|
||||
const toolCallId = getToolCallIdFromEvent(normalizedEvent)
|
||||
const eventData = normalizedEvent.data
|
||||
|
||||
const isPartialToolCall =
|
||||
normalizedEvent.type === 'tool_call' && eventData?.partial === true
|
||||
|
||||
const shouldSkipToolCall =
|
||||
normalizedEvent.type === 'tool_call' &&
|
||||
!!toolCallId &&
|
||||
!isPartialToolCall &&
|
||||
(wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId))
|
||||
|
||||
if (
|
||||
normalizedEvent.type === 'tool_call' &&
|
||||
toolCallId &&
|
||||
!isPartialToolCall &&
|
||||
!shouldSkipToolCall
|
||||
) {
|
||||
markToolCallSeen(toolCallId)
|
||||
}
|
||||
|
||||
const shouldSkipToolResult =
|
||||
normalizedEvent.type === 'tool_result' &&
|
||||
(() => {
|
||||
if (!toolCallId) return false
|
||||
if (wasToolResultSeen(toolCallId)) return true
|
||||
markToolResultSeen(toolCallId)
|
||||
return false
|
||||
})()
|
||||
|
||||
if (!shouldSkipToolCall && !shouldSkipToolResult) {
|
||||
await forwardEvent(normalizedEvent, options)
|
||||
}
|
||||
|
||||
if (normalizedEvent.type === 'subagent_start') {
|
||||
const toolCallId = normalizedEvent.data?.tool_call_id
|
||||
if (toolCallId) {
|
||||
context.subAgentParentToolCallId = toolCallId
|
||||
context.subAgentContent[toolCallId] = ''
|
||||
context.subAgentToolCalls[toolCallId] = []
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (normalizedEvent.type === 'subagent_end') {
|
||||
context.subAgentParentToolCallId = undefined
|
||||
continue
|
||||
}
|
||||
|
||||
if (handleSubagentRouting(normalizedEvent, context)) {
|
||||
const handler = subAgentHandlers[normalizedEvent.type]
|
||||
if (handler) {
|
||||
await handler(normalizedEvent, context, execContext, options)
|
||||
}
|
||||
if (context.streamComplete) break
|
||||
continue
|
||||
}
|
||||
|
||||
const handler = sseHandlers[normalizedEvent.type]
|
||||
if (handler) {
|
||||
await handler(normalizedEvent, context, execContext, options)
|
||||
}
|
||||
if (context.streamComplete) break
|
||||
}
|
||||
} finally {
|
||||
clearTimeout(timeoutId)
|
||||
}
|
||||
|
||||
const result = buildResult(context)
|
||||
await options.onComplete?.(result)
|
||||
return result
|
||||
} catch (error) {
|
||||
const err = error instanceof Error ? error : new Error('Copilot orchestration failed')
|
||||
logger.error('Copilot orchestration failed', { error: err.message })
|
||||
await options.onError?.(err)
|
||||
return {
|
||||
success: false,
|
||||
content: '',
|
||||
contentBlocks: [],
|
||||
toolCalls: [],
|
||||
chatId: context.chatId,
|
||||
conversationId: context.conversationId,
|
||||
error: err.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function forwardEvent(event: SSEEvent, options: OrchestratorOptions): Promise<void> {
|
||||
try {
|
||||
await options.onEvent?.(event)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to forward SSE event', {
|
||||
type: event.type,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function buildResult(context: StreamingContext): OrchestratorResult {
|
||||
const toolCalls: ToolCallSummary[] = Array.from(context.toolCalls.values()).map((toolCall) => ({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
status: toolCall.status,
|
||||
params: toolCall.params,
|
||||
result: toolCall.result?.output,
|
||||
error: toolCall.error,
|
||||
durationMs:
|
||||
toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined,
|
||||
}))
|
||||
|
||||
return {
|
||||
success: context.errors.length === 0,
|
||||
content: context.accumulatedContent,
|
||||
contentBlocks: context.contentBlocks,
|
||||
toolCalls,
|
||||
chatId: context.chatId,
|
||||
conversationId: context.conversationId,
|
||||
errors: context.errors.length ? context.errors : undefined,
|
||||
}
|
||||
}
|
||||
140
apps/sim/lib/copilot/orchestrator/persistence.ts
Normal file
140
apps/sim/lib/copilot/orchestrator/persistence.ts
Normal file
@@ -0,0 +1,140 @@
|
||||
import { db } from '@sim/db'
|
||||
import { copilotChats } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import { getRedisClient } from '@/lib/core/config/redis'
|
||||
|
||||
const logger = createLogger('CopilotOrchestratorPersistence')
|
||||
|
||||
/**
|
||||
* Create a new copilot chat record.
|
||||
*/
|
||||
export async function createChat(params: {
|
||||
userId: string
|
||||
workflowId: string
|
||||
model: string
|
||||
}): Promise<{ id: string }> {
|
||||
const [chat] = await db
|
||||
.insert(copilotChats)
|
||||
.values({
|
||||
userId: params.userId,
|
||||
workflowId: params.workflowId,
|
||||
model: params.model,
|
||||
messages: [],
|
||||
})
|
||||
.returning({ id: copilotChats.id })
|
||||
|
||||
return { id: chat.id }
|
||||
}
|
||||
|
||||
/**
|
||||
* Load an existing chat for a user.
|
||||
*/
|
||||
export async function loadChat(chatId: string, userId: string) {
|
||||
const [chat] = await db
|
||||
.select()
|
||||
.from(copilotChats)
|
||||
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
|
||||
.limit(1)
|
||||
|
||||
return chat || null
|
||||
}
|
||||
|
||||
/**
|
||||
* Save chat messages and metadata.
|
||||
*/
|
||||
export async function saveMessages(
|
||||
chatId: string,
|
||||
messages: any[],
|
||||
options?: {
|
||||
title?: string
|
||||
conversationId?: string
|
||||
planArtifact?: string | null
|
||||
config?: { mode?: string; model?: string }
|
||||
}
|
||||
): Promise<void> {
|
||||
await db
|
||||
.update(copilotChats)
|
||||
.set({
|
||||
messages,
|
||||
updatedAt: new Date(),
|
||||
...(options?.title ? { title: options.title } : {}),
|
||||
...(options?.conversationId ? { conversationId: options.conversationId } : {}),
|
||||
...(options?.planArtifact !== undefined ? { planArtifact: options.planArtifact } : {}),
|
||||
...(options?.config ? { config: options.config } : {}),
|
||||
})
|
||||
.where(eq(copilotChats.id, chatId))
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the conversationId for a chat without overwriting messages.
|
||||
*/
|
||||
export async function updateChatConversationId(
|
||||
chatId: string,
|
||||
conversationId: string
|
||||
): Promise<void> {
|
||||
await db
|
||||
.update(copilotChats)
|
||||
.set({
|
||||
conversationId,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(copilotChats.id, chatId))
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a tool call confirmation status in Redis.
|
||||
*/
|
||||
export async function setToolConfirmation(
|
||||
toolCallId: string,
|
||||
status: 'accepted' | 'rejected' | 'background' | 'pending',
|
||||
message?: string
|
||||
): Promise<boolean> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
logger.warn('Redis client not available for tool confirmation')
|
||||
return false
|
||||
}
|
||||
|
||||
const key = `tool_call:${toolCallId}`
|
||||
const payload = {
|
||||
status,
|
||||
message: message || null,
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
|
||||
try {
|
||||
await redis.set(key, JSON.stringify(payload), 'EX', 86400)
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error('Failed to set tool confirmation', {
|
||||
toolCallId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a tool call confirmation status from Redis.
|
||||
*/
|
||||
export async function getToolConfirmation(toolCallId: string): Promise<{
|
||||
status: string
|
||||
message?: string
|
||||
timestamp?: string
|
||||
} | null> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return null
|
||||
|
||||
try {
|
||||
const data = await redis.get(`tool_call:${toolCallId}`)
|
||||
if (!data) return null
|
||||
return JSON.parse(data) as { status: string; message?: string; timestamp?: string }
|
||||
} catch (error) {
|
||||
logger.error('Failed to read tool confirmation', {
|
||||
toolCallId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return null
|
||||
}
|
||||
}
|
||||
589
apps/sim/lib/copilot/orchestrator/sse-handlers.ts
Normal file
589
apps/sim/lib/copilot/orchestrator/sse-handlers.ts
Normal file
@@ -0,0 +1,589 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { INTERRUPT_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config'
|
||||
import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence'
|
||||
import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type {
|
||||
ContentBlock,
|
||||
ExecutionContext,
|
||||
OrchestratorOptions,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
ToolCallState,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
const logger = createLogger('CopilotSseHandlers')
|
||||
|
||||
/**
|
||||
* Tracks tool call IDs for which a tool_call has already been forwarded/emitted (non-partial).
|
||||
*/
|
||||
const seenToolCalls = new Set<string>()
|
||||
|
||||
/**
|
||||
* Tracks tool call IDs for which a tool_result has already been emitted or forwarded.
|
||||
*/
|
||||
const seenToolResults = new Set<string>()
|
||||
|
||||
export function markToolCallSeen(toolCallId: string): void {
|
||||
seenToolCalls.add(toolCallId)
|
||||
setTimeout(
|
||||
() => {
|
||||
seenToolCalls.delete(toolCallId)
|
||||
},
|
||||
5 * 60 * 1000
|
||||
)
|
||||
}
|
||||
|
||||
export function wasToolCallSeen(toolCallId: string): boolean {
|
||||
return seenToolCalls.has(toolCallId)
|
||||
}
|
||||
|
||||
type EventDataObject = Record<string, any> | undefined
|
||||
|
||||
const parseEventData = (data: unknown): EventDataObject => {
|
||||
if (!data) return undefined
|
||||
if (typeof data !== 'string') {
|
||||
return data as EventDataObject
|
||||
}
|
||||
try {
|
||||
return JSON.parse(data) as EventDataObject
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
const hasToolFields = (data: EventDataObject): boolean => {
|
||||
if (!data) return false
|
||||
return (
|
||||
data.id !== undefined ||
|
||||
data.toolCallId !== undefined ||
|
||||
data.name !== undefined ||
|
||||
data.success !== undefined ||
|
||||
data.result !== undefined ||
|
||||
data.arguments !== undefined
|
||||
)
|
||||
}
|
||||
|
||||
const getEventData = (event: SSEEvent): EventDataObject => {
|
||||
const topLevel = parseEventData(event.data)
|
||||
if (!topLevel) return undefined
|
||||
if (hasToolFields(topLevel)) return topLevel
|
||||
const nested = parseEventData(topLevel.data)
|
||||
return nested || topLevel
|
||||
}
|
||||
|
||||
export function getToolCallIdFromEvent(event: SSEEvent): string | undefined {
|
||||
const data = getEventData(event)
|
||||
return event.toolCallId || data?.id || data?.toolCallId
|
||||
}
|
||||
|
||||
/** Normalizes SSE events so tool metadata is available at the top level. */
|
||||
export function normalizeSseEvent(event: SSEEvent): SSEEvent {
|
||||
if (!event) return event
|
||||
const data = getEventData(event)
|
||||
if (!data) return event
|
||||
const toolCallId = event.toolCallId || data.id || data.toolCallId
|
||||
const toolName = event.toolName || data.name || data.toolName
|
||||
const success = event.success ?? data.success
|
||||
const result = event.result ?? data.result
|
||||
const normalizedData = typeof event.data === 'string' ? data : event.data
|
||||
return {
|
||||
...event,
|
||||
data: normalizedData,
|
||||
toolCallId,
|
||||
toolName,
|
||||
success,
|
||||
result,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a tool call as executed by the sim-side.
|
||||
* This prevents the Go backend's duplicate tool_result from being forwarded.
|
||||
*/
|
||||
export function markToolResultSeen(toolCallId: string): void {
|
||||
seenToolResults.add(toolCallId)
|
||||
setTimeout(
|
||||
() => {
|
||||
seenToolResults.delete(toolCallId)
|
||||
},
|
||||
5 * 60 * 1000
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool call was executed by the sim-side.
|
||||
*/
|
||||
export function wasToolResultSeen(toolCallId: string): boolean {
|
||||
return seenToolResults.has(toolCallId)
|
||||
}
|
||||
|
||||
/**
|
||||
* Respond tools are internal to the copilot's subagent system.
|
||||
* They're used by subagents to signal completion and should NOT be executed by the sim side.
|
||||
* The copilot backend handles these internally.
|
||||
*/
|
||||
const RESPOND_TOOL_SET = new Set([
|
||||
'plan_respond',
|
||||
'edit_respond',
|
||||
'debug_respond',
|
||||
'info_respond',
|
||||
'research_respond',
|
||||
'deploy_respond',
|
||||
'superagent_respond',
|
||||
'discovery_respond',
|
||||
])
|
||||
|
||||
export type SSEHandler = (
|
||||
event: SSEEvent,
|
||||
context: StreamingContext,
|
||||
execContext: ExecutionContext,
|
||||
options: OrchestratorOptions
|
||||
) => void | Promise<void>
|
||||
|
||||
function addContentBlock(context: StreamingContext, block: Omit<ContentBlock, 'timestamp'>): void {
|
||||
context.contentBlocks.push({
|
||||
...block,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
|
||||
async function executeToolAndReport(
|
||||
toolCallId: string,
|
||||
context: StreamingContext,
|
||||
execContext: ExecutionContext,
|
||||
options?: OrchestratorOptions
|
||||
): Promise<void> {
|
||||
const toolCall = context.toolCalls.get(toolCallId)
|
||||
if (!toolCall) return
|
||||
|
||||
if (toolCall.status === 'executing') return
|
||||
if (wasToolResultSeen(toolCall.id)) return
|
||||
|
||||
toolCall.status = 'executing'
|
||||
try {
|
||||
const result = await executeToolServerSide(toolCall, execContext)
|
||||
toolCall.status = result.success ? 'success' : 'error'
|
||||
toolCall.result = result
|
||||
toolCall.error = result.error
|
||||
toolCall.endTime = Date.now()
|
||||
|
||||
// If create_workflow was successful, update the execution context with the new workflowId
|
||||
// This ensures subsequent tools in the same stream have access to the workflowId
|
||||
if (
|
||||
toolCall.name === 'create_workflow' &&
|
||||
result.success &&
|
||||
result.output?.workflowId &&
|
||||
!execContext.workflowId
|
||||
) {
|
||||
execContext.workflowId = result.output.workflowId
|
||||
if (result.output.workspaceId) {
|
||||
execContext.workspaceId = result.output.workspaceId
|
||||
}
|
||||
}
|
||||
|
||||
markToolResultSeen(toolCall.id)
|
||||
|
||||
await markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
result.success ? 200 : 500,
|
||||
result.error || (result.success ? 'Tool completed' : 'Tool failed'),
|
||||
result.output
|
||||
)
|
||||
|
||||
await options?.onEvent?.({
|
||||
type: 'tool_result',
|
||||
toolCallId: toolCall.id,
|
||||
toolName: toolCall.name,
|
||||
success: result.success,
|
||||
result: result.output,
|
||||
data: {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
success: result.success,
|
||||
result: result.output,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
toolCall.status = 'error'
|
||||
toolCall.error = error instanceof Error ? error.message : String(error)
|
||||
toolCall.endTime = Date.now()
|
||||
|
||||
markToolResultSeen(toolCall.id)
|
||||
|
||||
await markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error)
|
||||
|
||||
await options?.onEvent?.({
|
||||
type: 'tool_error',
|
||||
toolCallId: toolCall.id,
|
||||
data: {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
error: toolCall.error,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async function waitForToolDecision(
|
||||
toolCallId: string,
|
||||
timeoutMs: number
|
||||
): Promise<{ status: string; message?: string } | null> {
|
||||
const start = Date.now()
|
||||
while (Date.now() - start < timeoutMs) {
|
||||
const decision = await getToolConfirmation(toolCallId)
|
||||
if (decision?.status) {
|
||||
return decision
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 100))
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
export const sseHandlers: Record<string, SSEHandler> = {
|
||||
chat_id: (event, context) => {
|
||||
context.chatId = event.data?.chatId
|
||||
},
|
||||
title_updated: () => {},
|
||||
tool_result: (event, context) => {
|
||||
const data = getEventData(event)
|
||||
const toolCallId = event.toolCallId || data?.id
|
||||
if (!toolCallId) return
|
||||
const current = context.toolCalls.get(toolCallId)
|
||||
if (!current) return
|
||||
|
||||
// Determine success: explicit success field, or if there's result data without explicit failure
|
||||
const hasExplicitSuccess = data?.success !== undefined || data?.result?.success !== undefined
|
||||
const explicitSuccess = data?.success ?? data?.result?.success
|
||||
const hasResultData = data?.result !== undefined || data?.data !== undefined
|
||||
const hasError = !!data?.error || !!data?.result?.error
|
||||
|
||||
// If explicitly set, use that; otherwise infer from data presence
|
||||
const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError
|
||||
|
||||
current.status = success ? 'success' : 'error'
|
||||
current.endTime = Date.now()
|
||||
if (hasResultData) {
|
||||
current.result = {
|
||||
success,
|
||||
output: data?.result || data?.data,
|
||||
}
|
||||
}
|
||||
if (hasError) {
|
||||
current.error = data?.error || data?.result?.error
|
||||
}
|
||||
},
|
||||
tool_error: (event, context) => {
|
||||
const data = getEventData(event)
|
||||
const toolCallId = event.toolCallId || data?.id
|
||||
if (!toolCallId) return
|
||||
const current = context.toolCalls.get(toolCallId)
|
||||
if (!current) return
|
||||
current.status = 'error'
|
||||
current.error = data?.error || 'Tool execution failed'
|
||||
current.endTime = Date.now()
|
||||
},
|
||||
tool_generating: (event, context) => {
|
||||
const data = getEventData(event)
|
||||
const toolCallId = event.toolCallId || data?.toolCallId || data?.id
|
||||
const toolName = event.toolName || data?.toolName || data?.name
|
||||
if (!toolCallId || !toolName) return
|
||||
if (!context.toolCalls.has(toolCallId)) {
|
||||
context.toolCalls.set(toolCallId, {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status: 'pending',
|
||||
startTime: Date.now(),
|
||||
})
|
||||
}
|
||||
},
|
||||
tool_call: async (event, context, execContext, options) => {
|
||||
const toolData = getEventData(event) || {}
|
||||
const toolCallId = toolData.id || event.toolCallId
|
||||
const toolName = toolData.name || event.toolName
|
||||
if (!toolCallId || !toolName) return
|
||||
|
||||
const args = toolData.arguments || toolData.input || event.data?.input
|
||||
const isPartial = toolData.partial === true
|
||||
const existing = context.toolCalls.get(toolCallId)
|
||||
|
||||
// If we've already completed this tool call, ignore late/duplicate tool_call events
|
||||
// to avoid resetting UI/state back to pending and re-executing.
|
||||
if (
|
||||
existing?.endTime ||
|
||||
(existing && existing.status !== 'pending' && existing.status !== 'executing')
|
||||
) {
|
||||
if (!existing.params && args) {
|
||||
existing.params = args
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (existing) {
|
||||
if (args && !existing.params) existing.params = args
|
||||
} else {
|
||||
context.toolCalls.set(toolCallId, {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status: 'pending',
|
||||
params: args,
|
||||
startTime: Date.now(),
|
||||
})
|
||||
const created = context.toolCalls.get(toolCallId)!
|
||||
addContentBlock(context, { type: 'tool_call', toolCall: created })
|
||||
}
|
||||
|
||||
if (isPartial) return
|
||||
if (wasToolResultSeen(toolCallId)) return
|
||||
|
||||
const toolCall = context.toolCalls.get(toolCallId)
|
||||
if (!toolCall) return
|
||||
|
||||
// Subagent tools are executed by the copilot backend, not sim side
|
||||
if (SUBAGENT_TOOL_SET.has(toolName)) {
|
||||
return
|
||||
}
|
||||
|
||||
// Respond tools are internal to copilot's subagent system - skip execution
|
||||
// The copilot backend handles these internally to signal subagent completion
|
||||
if (RESPOND_TOOL_SET.has(toolName)) {
|
||||
toolCall.status = 'success'
|
||||
toolCall.endTime = Date.now()
|
||||
toolCall.result = {
|
||||
success: true,
|
||||
output: 'Internal respond tool - handled by copilot backend',
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const isInterruptTool = INTERRUPT_TOOL_SET.has(toolName)
|
||||
const isInteractive = options.interactive === true
|
||||
|
||||
if (isInterruptTool && isInteractive) {
|
||||
const decision = await waitForToolDecision(toolCallId, options.timeout || 600000)
|
||||
if (decision?.status === 'accepted' || decision?.status === 'success') {
|
||||
await executeToolAndReport(toolCallId, context, execContext, options)
|
||||
return
|
||||
}
|
||||
|
||||
if (decision?.status === 'rejected' || decision?.status === 'error') {
|
||||
toolCall.status = 'rejected'
|
||||
toolCall.endTime = Date.now()
|
||||
await markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
400,
|
||||
decision.message || 'Tool execution rejected',
|
||||
{ skipped: true, reason: 'user_rejected' }
|
||||
)
|
||||
markToolResultSeen(toolCall.id)
|
||||
await options.onEvent?.({
|
||||
type: 'tool_result',
|
||||
toolCallId: toolCall.id,
|
||||
data: {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
success: false,
|
||||
result: { skipped: true, reason: 'user_rejected' },
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if (decision?.status === 'background') {
|
||||
toolCall.status = 'skipped'
|
||||
toolCall.endTime = Date.now()
|
||||
await markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
202,
|
||||
decision.message || 'Tool execution moved to background',
|
||||
{ background: true }
|
||||
)
|
||||
markToolResultSeen(toolCall.id)
|
||||
await options.onEvent?.({
|
||||
type: 'tool_result',
|
||||
toolCallId: toolCall.id,
|
||||
data: {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
success: true,
|
||||
result: { background: true },
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if (options.autoExecuteTools !== false) {
|
||||
await executeToolAndReport(toolCallId, context, execContext, options)
|
||||
}
|
||||
},
|
||||
reasoning: (event, context) => {
|
||||
const phase = event.data?.phase || event.data?.data?.phase
|
||||
if (phase === 'start') {
|
||||
context.isInThinkingBlock = true
|
||||
context.currentThinkingBlock = {
|
||||
type: 'thinking',
|
||||
content: '',
|
||||
timestamp: Date.now(),
|
||||
}
|
||||
return
|
||||
}
|
||||
if (phase === 'end') {
|
||||
if (context.currentThinkingBlock) {
|
||||
context.contentBlocks.push(context.currentThinkingBlock)
|
||||
}
|
||||
context.isInThinkingBlock = false
|
||||
context.currentThinkingBlock = null
|
||||
return
|
||||
}
|
||||
const chunk =
|
||||
typeof event.data === 'string' ? event.data : event.data?.data || event.data?.content
|
||||
if (!chunk || !context.currentThinkingBlock) return
|
||||
context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}`
|
||||
},
|
||||
content: (event, context) => {
|
||||
const chunk =
|
||||
typeof event.data === 'string' ? event.data : event.data?.content || event.data?.data
|
||||
if (!chunk) return
|
||||
context.accumulatedContent += chunk
|
||||
addContentBlock(context, { type: 'text', content: chunk })
|
||||
},
|
||||
done: (event, context) => {
|
||||
if (event.data?.responseId) {
|
||||
context.conversationId = event.data.responseId
|
||||
}
|
||||
context.streamComplete = true
|
||||
},
|
||||
start: (event, context) => {
|
||||
if (event.data?.responseId) {
|
||||
context.conversationId = event.data.responseId
|
||||
}
|
||||
},
|
||||
error: (event, context) => {
|
||||
const message =
|
||||
event.data?.message ||
|
||||
event.data?.error ||
|
||||
(typeof event.data === 'string' ? event.data : null)
|
||||
if (message) {
|
||||
context.errors.push(message)
|
||||
}
|
||||
context.streamComplete = true
|
||||
},
|
||||
}
|
||||
|
||||
export const subAgentHandlers: Record<string, SSEHandler> = {
|
||||
content: (event, context) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId || !event.data) return
|
||||
const chunk = typeof event.data === 'string' ? event.data : event.data?.content || ''
|
||||
if (!chunk) return
|
||||
context.subAgentContent[parentToolCallId] =
|
||||
(context.subAgentContent[parentToolCallId] || '') + chunk
|
||||
addContentBlock(context, { type: 'subagent_text', content: chunk })
|
||||
},
|
||||
tool_call: async (event, context, execContext, options) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
const toolData = getEventData(event) || {}
|
||||
const toolCallId = toolData.id || event.toolCallId
|
||||
const toolName = toolData.name || event.toolName
|
||||
if (!toolCallId || !toolName) return
|
||||
const isPartial = toolData.partial === true
|
||||
const args = toolData.arguments || toolData.input || event.data?.input
|
||||
|
||||
const existing = context.toolCalls.get(toolCallId)
|
||||
// Ignore late/duplicate tool_call events once we already have a result
|
||||
if (wasToolResultSeen(toolCallId) || existing?.endTime) {
|
||||
return
|
||||
}
|
||||
|
||||
const toolCall: ToolCallState = {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status: 'pending',
|
||||
params: args,
|
||||
startTime: Date.now(),
|
||||
}
|
||||
|
||||
// Store in both places - but do NOT overwrite existing tool call state for the same id
|
||||
if (!context.subAgentToolCalls[parentToolCallId]) {
|
||||
context.subAgentToolCalls[parentToolCallId] = []
|
||||
}
|
||||
if (!context.subAgentToolCalls[parentToolCallId].some((tc) => tc.id === toolCallId)) {
|
||||
context.subAgentToolCalls[parentToolCallId].push(toolCall)
|
||||
}
|
||||
if (!context.toolCalls.has(toolCallId)) {
|
||||
context.toolCalls.set(toolCallId, toolCall)
|
||||
}
|
||||
|
||||
if (isPartial) return
|
||||
|
||||
// Respond tools are internal to copilot's subagent system - skip execution
|
||||
if (RESPOND_TOOL_SET.has(toolName)) {
|
||||
toolCall.status = 'success'
|
||||
toolCall.endTime = Date.now()
|
||||
toolCall.result = {
|
||||
success: true,
|
||||
output: 'Internal respond tool - handled by copilot backend',
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (options.autoExecuteTools !== false) {
|
||||
await executeToolAndReport(toolCallId, context, execContext, options)
|
||||
}
|
||||
},
|
||||
tool_result: (event, context) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
const data = getEventData(event)
|
||||
const toolCallId = event.toolCallId || data?.id
|
||||
if (!toolCallId) return
|
||||
|
||||
// Update in subAgentToolCalls
|
||||
const toolCalls = context.subAgentToolCalls[parentToolCallId] || []
|
||||
const subAgentToolCall = toolCalls.find((tc) => tc.id === toolCallId)
|
||||
|
||||
// Also update in main toolCalls (where we added it for execution)
|
||||
const mainToolCall = context.toolCalls.get(toolCallId)
|
||||
|
||||
// Use same success inference logic as main handler
|
||||
const hasExplicitSuccess = data?.success !== undefined || data?.result?.success !== undefined
|
||||
const explicitSuccess = data?.success ?? data?.result?.success
|
||||
const hasResultData = data?.result !== undefined || data?.data !== undefined
|
||||
const hasError = !!data?.error || !!data?.result?.error
|
||||
const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError
|
||||
|
||||
const status = success ? 'success' : 'error'
|
||||
const endTime = Date.now()
|
||||
const result = hasResultData ? { success, output: data?.result || data?.data } : undefined
|
||||
|
||||
if (subAgentToolCall) {
|
||||
subAgentToolCall.status = status
|
||||
subAgentToolCall.endTime = endTime
|
||||
if (result) subAgentToolCall.result = result
|
||||
if (hasError) subAgentToolCall.error = data?.error || data?.result?.error
|
||||
}
|
||||
|
||||
if (mainToolCall) {
|
||||
mainToolCall.status = status
|
||||
mainToolCall.endTime = endTime
|
||||
if (result) mainToolCall.result = result
|
||||
if (hasError) mainToolCall.error = data?.error || data?.result?.error
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean {
|
||||
if (!event.subagent) return false
|
||||
if (!context.subAgentParentToolCallId) {
|
||||
logger.warn('Subagent event missing parent tool call', {
|
||||
type: event.type,
|
||||
subagent: event.subagent,
|
||||
})
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
71
apps/sim/lib/copilot/orchestrator/sse-parser.ts
Normal file
71
apps/sim/lib/copilot/orchestrator/sse-parser.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
const logger = createLogger('CopilotSseParser')
|
||||
|
||||
/**
|
||||
* Parses SSE streams from the copilot backend into typed events.
|
||||
*/
|
||||
export async function* parseSSEStream(
|
||||
reader: ReadableStreamDefaultReader<Uint8Array>,
|
||||
decoder: TextDecoder,
|
||||
abortSignal?: AbortSignal
|
||||
): AsyncGenerator<SSEEvent> {
|
||||
let buffer = ''
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
if (abortSignal?.aborted) {
|
||||
logger.info('SSE stream aborted by signal')
|
||||
break
|
||||
}
|
||||
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
const lines = buffer.split('\n')
|
||||
buffer = lines.pop() || ''
|
||||
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue
|
||||
if (!line.startsWith('data: ')) continue
|
||||
|
||||
const jsonStr = line.slice(6)
|
||||
if (jsonStr === '[DONE]') continue
|
||||
|
||||
try {
|
||||
const event = JSON.parse(jsonStr) as SSEEvent
|
||||
if (event?.type) {
|
||||
yield event
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to parse SSE event', {
|
||||
preview: jsonStr.slice(0, 200),
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (buffer.trim() && buffer.startsWith('data: ')) {
|
||||
try {
|
||||
const event = JSON.parse(buffer.slice(6)) as SSEEvent
|
||||
if (event?.type) {
|
||||
yield event
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to parse final SSE buffer', {
|
||||
preview: buffer.slice(0, 200),
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
try {
|
||||
reader.releaseLock()
|
||||
} catch {
|
||||
logger.warn('Failed to release SSE reader lock')
|
||||
}
|
||||
}
|
||||
}
|
||||
262
apps/sim/lib/copilot/orchestrator/stream-buffer.ts
Normal file
262
apps/sim/lib/copilot/orchestrator/stream-buffer.ts
Normal file
@@ -0,0 +1,262 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { getRedisClient } from '@/lib/core/config/redis'
|
||||
|
||||
const logger = createLogger('CopilotStreamBuffer')
|
||||
|
||||
const STREAM_TTL_SECONDS = 60 * 60
|
||||
const STREAM_EVENT_LIMIT = 5000
|
||||
const STREAM_RESERVE_BATCH = 200
|
||||
const STREAM_FLUSH_INTERVAL_MS = 15
|
||||
const STREAM_FLUSH_MAX_BATCH = 200
|
||||
|
||||
const APPEND_STREAM_EVENT_LUA = `
|
||||
local seqKey = KEYS[1]
|
||||
local eventsKey = KEYS[2]
|
||||
local ttl = tonumber(ARGV[1])
|
||||
local limit = tonumber(ARGV[2])
|
||||
local streamId = ARGV[3]
|
||||
local eventJson = ARGV[4]
|
||||
|
||||
local id = redis.call('INCR', seqKey)
|
||||
local entry = '{"eventId":' .. id .. ',"streamId":' .. cjson.encode(streamId) .. ',"event":' .. eventJson .. '}'
|
||||
redis.call('ZADD', eventsKey, id, entry)
|
||||
redis.call('EXPIRE', eventsKey, ttl)
|
||||
redis.call('EXPIRE', seqKey, ttl)
|
||||
if limit > 0 then
|
||||
redis.call('ZREMRANGEBYRANK', eventsKey, 0, -limit-1)
|
||||
end
|
||||
return id
|
||||
`
|
||||
|
||||
function getStreamKeyPrefix(streamId: string) {
|
||||
return `copilot_stream:${streamId}`
|
||||
}
|
||||
|
||||
function getEventsKey(streamId: string) {
|
||||
return `${getStreamKeyPrefix(streamId)}:events`
|
||||
}
|
||||
|
||||
function getSeqKey(streamId: string) {
|
||||
return `${getStreamKeyPrefix(streamId)}:seq`
|
||||
}
|
||||
|
||||
function getMetaKey(streamId: string) {
|
||||
return `${getStreamKeyPrefix(streamId)}:meta`
|
||||
}
|
||||
|
||||
export type StreamStatus = 'active' | 'complete' | 'error'
|
||||
|
||||
export type StreamMeta = {
|
||||
status: StreamStatus
|
||||
userId?: string
|
||||
updatedAt?: string
|
||||
error?: string
|
||||
}
|
||||
|
||||
export type StreamEventEntry = {
|
||||
eventId: number
|
||||
streamId: string
|
||||
event: Record<string, any>
|
||||
}
|
||||
|
||||
export type StreamEventWriter = {
|
||||
write: (event: Record<string, any>) => Promise<StreamEventEntry>
|
||||
flush: () => Promise<void>
|
||||
close: () => Promise<void>
|
||||
}
|
||||
|
||||
export async function resetStreamBuffer(streamId: string): Promise<void> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return
|
||||
try {
|
||||
await redis.del(getEventsKey(streamId), getSeqKey(streamId), getMetaKey(streamId))
|
||||
} catch (error) {
|
||||
logger.warn('Failed to reset stream buffer', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export async function setStreamMeta(streamId: string, meta: StreamMeta): Promise<void> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return
|
||||
try {
|
||||
const payload: Record<string, string> = {
|
||||
status: meta.status,
|
||||
updatedAt: meta.updatedAt || new Date().toISOString(),
|
||||
}
|
||||
if (meta.userId) payload.userId = meta.userId
|
||||
if (meta.error) payload.error = meta.error
|
||||
await redis.hset(getMetaKey(streamId), payload)
|
||||
await redis.expire(getMetaKey(streamId), STREAM_TTL_SECONDS)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to update stream meta', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export async function getStreamMeta(streamId: string): Promise<StreamMeta | null> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return null
|
||||
try {
|
||||
const meta = await redis.hgetall(getMetaKey(streamId))
|
||||
if (!meta || Object.keys(meta).length === 0) return null
|
||||
return meta as StreamMeta
|
||||
} catch (error) {
|
||||
logger.warn('Failed to read stream meta', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
export async function appendStreamEvent(
|
||||
streamId: string,
|
||||
event: Record<string, any>
|
||||
): Promise<StreamEventEntry> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
return { eventId: 0, streamId, event }
|
||||
}
|
||||
|
||||
try {
|
||||
const eventJson = JSON.stringify(event)
|
||||
const nextId = await redis.eval(
|
||||
APPEND_STREAM_EVENT_LUA,
|
||||
2,
|
||||
getSeqKey(streamId),
|
||||
getEventsKey(streamId),
|
||||
STREAM_TTL_SECONDS,
|
||||
STREAM_EVENT_LIMIT,
|
||||
streamId,
|
||||
eventJson
|
||||
)
|
||||
const eventId = typeof nextId === 'number' ? nextId : Number(nextId)
|
||||
return { eventId, streamId, event }
|
||||
} catch (error) {
|
||||
logger.warn('Failed to append stream event', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return { eventId: 0, streamId, event }
|
||||
}
|
||||
}
|
||||
|
||||
export function createStreamEventWriter(streamId: string): StreamEventWriter {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
return {
|
||||
write: async (event) => ({ eventId: 0, streamId, event }),
|
||||
flush: async () => {},
|
||||
close: async () => {},
|
||||
}
|
||||
}
|
||||
|
||||
let pending: StreamEventEntry[] = []
|
||||
let nextEventId = 0
|
||||
let maxReservedId = 0
|
||||
let flushTimer: ReturnType<typeof setTimeout> | null = null
|
||||
let isFlushing = false
|
||||
|
||||
const scheduleFlush = () => {
|
||||
if (flushTimer) return
|
||||
flushTimer = setTimeout(() => {
|
||||
flushTimer = null
|
||||
void flush()
|
||||
}, STREAM_FLUSH_INTERVAL_MS)
|
||||
}
|
||||
|
||||
const reserveIds = async (minCount: number) => {
|
||||
const reserveCount = Math.max(STREAM_RESERVE_BATCH, minCount)
|
||||
const newMax = await redis.incrby(getSeqKey(streamId), reserveCount)
|
||||
const startId = newMax - reserveCount + 1
|
||||
if (nextEventId === 0 || nextEventId > maxReservedId) {
|
||||
nextEventId = startId
|
||||
maxReservedId = newMax
|
||||
}
|
||||
}
|
||||
|
||||
const flush = async () => {
|
||||
if (isFlushing || pending.length === 0) return
|
||||
isFlushing = true
|
||||
const batch = pending
|
||||
pending = []
|
||||
try {
|
||||
const key = getEventsKey(streamId)
|
||||
const zaddArgs: (string | number)[] = []
|
||||
for (const entry of batch) {
|
||||
zaddArgs.push(entry.eventId, JSON.stringify(entry))
|
||||
}
|
||||
const pipeline = redis.pipeline()
|
||||
pipeline.zadd(key, ...(zaddArgs as any))
|
||||
pipeline.expire(key, STREAM_TTL_SECONDS)
|
||||
pipeline.expire(getSeqKey(streamId), STREAM_TTL_SECONDS)
|
||||
pipeline.zremrangebyrank(key, 0, -STREAM_EVENT_LIMIT - 1)
|
||||
await pipeline.exec()
|
||||
} catch (error) {
|
||||
logger.warn('Failed to flush stream events', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
pending = batch.concat(pending)
|
||||
} finally {
|
||||
isFlushing = false
|
||||
if (pending.length > 0) scheduleFlush()
|
||||
}
|
||||
}
|
||||
|
||||
const write = async (event: Record<string, any>) => {
|
||||
if (nextEventId === 0 || nextEventId > maxReservedId) {
|
||||
await reserveIds(1)
|
||||
}
|
||||
const eventId = nextEventId++
|
||||
const entry: StreamEventEntry = { eventId, streamId, event }
|
||||
pending.push(entry)
|
||||
if (pending.length >= STREAM_FLUSH_MAX_BATCH) {
|
||||
await flush()
|
||||
} else {
|
||||
scheduleFlush()
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
const close = async () => {
|
||||
if (flushTimer) {
|
||||
clearTimeout(flushTimer)
|
||||
flushTimer = null
|
||||
}
|
||||
await flush()
|
||||
}
|
||||
|
||||
return { write, flush, close }
|
||||
}
|
||||
|
||||
export async function readStreamEvents(
|
||||
streamId: string,
|
||||
afterEventId: number
|
||||
): Promise<StreamEventEntry[]> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return []
|
||||
try {
|
||||
const raw = await redis.zrangebyscore(getEventsKey(streamId), afterEventId + 1, '+inf')
|
||||
return raw
|
||||
.map((entry) => {
|
||||
try {
|
||||
return JSON.parse(entry) as StreamEventEntry
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
})
|
||||
.filter((entry): entry is StreamEventEntry => Boolean(entry))
|
||||
} catch (error) {
|
||||
logger.warn('Failed to read stream events', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return []
|
||||
}
|
||||
}
|
||||
286
apps/sim/lib/copilot/orchestrator/subagent.ts
Normal file
286
apps/sim/lib/copilot/orchestrator/subagent.ts
Normal file
@@ -0,0 +1,286 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants'
|
||||
import {
|
||||
getToolCallIdFromEvent,
|
||||
handleSubagentRouting,
|
||||
markToolCallSeen,
|
||||
markToolResultSeen,
|
||||
normalizeSseEvent,
|
||||
sseHandlers,
|
||||
subAgentHandlers,
|
||||
wasToolCallSeen,
|
||||
wasToolResultSeen,
|
||||
} from '@/lib/copilot/orchestrator/sse-handlers'
|
||||
import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser'
|
||||
import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
OrchestratorOptions,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
ToolCallSummary,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
|
||||
|
||||
const logger = createLogger('CopilotSubagentOrchestrator')
|
||||
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
|
||||
|
||||
export interface SubagentOrchestratorOptions extends Omit<OrchestratorOptions, 'onComplete'> {
|
||||
userId: string
|
||||
workflowId?: string
|
||||
workspaceId?: string
|
||||
onComplete?: (result: SubagentOrchestratorResult) => void | Promise<void>
|
||||
}
|
||||
|
||||
export interface SubagentOrchestratorResult {
|
||||
success: boolean
|
||||
content: string
|
||||
toolCalls: ToolCallSummary[]
|
||||
structuredResult?: {
|
||||
type?: string
|
||||
summary?: string
|
||||
data?: any
|
||||
success?: boolean
|
||||
}
|
||||
error?: string
|
||||
errors?: string[]
|
||||
}
|
||||
|
||||
export async function orchestrateSubagentStream(
|
||||
agentId: string,
|
||||
requestPayload: Record<string, any>,
|
||||
options: SubagentOrchestratorOptions
|
||||
): Promise<SubagentOrchestratorResult> {
|
||||
const { userId, workflowId, workspaceId, timeout = 300000, abortSignal } = options
|
||||
const execContext = await buildExecutionContext(userId, workflowId, workspaceId)
|
||||
|
||||
const context: StreamingContext = {
|
||||
chatId: undefined,
|
||||
conversationId: undefined,
|
||||
messageId: requestPayload?.messageId || crypto.randomUUID(),
|
||||
accumulatedContent: '',
|
||||
contentBlocks: [],
|
||||
toolCalls: new Map(),
|
||||
currentThinkingBlock: null,
|
||||
isInThinkingBlock: false,
|
||||
subAgentParentToolCallId: undefined,
|
||||
subAgentContent: {},
|
||||
subAgentToolCalls: {},
|
||||
pendingContent: '',
|
||||
streamComplete: false,
|
||||
wasAborted: false,
|
||||
errors: [],
|
||||
}
|
||||
|
||||
let structuredResult: SubagentOrchestratorResult['structuredResult']
|
||||
|
||||
try {
|
||||
const response = await fetch(`${SIM_AGENT_API_URL}/api/subagent/${agentId}`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
|
||||
},
|
||||
body: JSON.stringify({ ...requestPayload, stream: true, userId }),
|
||||
signal: abortSignal,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text().catch(() => '')
|
||||
throw new Error(
|
||||
`Copilot backend error (${response.status}): ${errorText || response.statusText}`
|
||||
)
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error('Copilot backend response missing body')
|
||||
}
|
||||
|
||||
const reader = response.body.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
context.errors.push('Request timed out')
|
||||
context.streamComplete = true
|
||||
reader.cancel().catch(() => {})
|
||||
}, timeout)
|
||||
|
||||
try {
|
||||
for await (const event of parseSSEStream(reader, decoder, abortSignal)) {
|
||||
if (abortSignal?.aborted) {
|
||||
context.wasAborted = true
|
||||
break
|
||||
}
|
||||
|
||||
const normalizedEvent = normalizeSseEvent(event)
|
||||
|
||||
// Skip tool_result events for tools the sim-side already executed.
|
||||
// The sim-side emits its own tool_result with complete data.
|
||||
// For server-side tools (not executed by sim), we still forward the Go backend's tool_result.
|
||||
const toolCallId = getToolCallIdFromEvent(normalizedEvent)
|
||||
const eventData = normalizedEvent.data
|
||||
|
||||
const isPartialToolCall =
|
||||
normalizedEvent.type === 'tool_call' && eventData?.partial === true
|
||||
|
||||
const shouldSkipToolCall =
|
||||
normalizedEvent.type === 'tool_call' &&
|
||||
!!toolCallId &&
|
||||
!isPartialToolCall &&
|
||||
(wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId))
|
||||
|
||||
if (
|
||||
normalizedEvent.type === 'tool_call' &&
|
||||
toolCallId &&
|
||||
!isPartialToolCall &&
|
||||
!shouldSkipToolCall
|
||||
) {
|
||||
markToolCallSeen(toolCallId)
|
||||
}
|
||||
|
||||
const shouldSkipToolResult =
|
||||
normalizedEvent.type === 'tool_result' &&
|
||||
(() => {
|
||||
if (!toolCallId) return false
|
||||
if (wasToolResultSeen(toolCallId)) return true
|
||||
markToolResultSeen(toolCallId)
|
||||
return false
|
||||
})()
|
||||
|
||||
if (!shouldSkipToolCall && !shouldSkipToolResult) {
|
||||
await forwardEvent(normalizedEvent, options)
|
||||
}
|
||||
|
||||
if (
|
||||
normalizedEvent.type === 'structured_result' ||
|
||||
normalizedEvent.type === 'subagent_result'
|
||||
) {
|
||||
structuredResult = normalizeStructuredResult(normalizedEvent.data)
|
||||
context.streamComplete = true
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle subagent_start/subagent_end events to track nested subagent calls
|
||||
if (normalizedEvent.type === 'subagent_start') {
|
||||
const toolCallId = normalizedEvent.data?.tool_call_id
|
||||
if (toolCallId) {
|
||||
context.subAgentParentToolCallId = toolCallId
|
||||
context.subAgentContent[toolCallId] = ''
|
||||
context.subAgentToolCalls[toolCallId] = []
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (normalizedEvent.type === 'subagent_end') {
|
||||
context.subAgentParentToolCallId = undefined
|
||||
continue
|
||||
}
|
||||
|
||||
// For direct subagent calls, events may have the subagent field set (e.g., subagent: "discovery")
|
||||
// but no subagent_start event because this IS the top-level agent. Skip subagent routing
|
||||
// for events where the subagent field matches the current agentId - these are top-level events.
|
||||
const isTopLevelSubagentEvent =
|
||||
normalizedEvent.subagent === agentId && !context.subAgentParentToolCallId
|
||||
|
||||
// Only route to subagent handlers for nested subagent events (not matching current agentId)
|
||||
if (!isTopLevelSubagentEvent && handleSubagentRouting(normalizedEvent, context)) {
|
||||
const handler = subAgentHandlers[normalizedEvent.type]
|
||||
if (handler) {
|
||||
await handler(normalizedEvent, context, execContext, options)
|
||||
}
|
||||
if (context.streamComplete) break
|
||||
continue
|
||||
}
|
||||
|
||||
// Process as a regular SSE event (including top-level subagent events)
|
||||
const handler = sseHandlers[normalizedEvent.type]
|
||||
if (handler) {
|
||||
await handler(normalizedEvent, context, execContext, options)
|
||||
}
|
||||
if (context.streamComplete) break
|
||||
}
|
||||
} finally {
|
||||
clearTimeout(timeoutId)
|
||||
}
|
||||
|
||||
const result = buildResult(context, structuredResult)
|
||||
await options.onComplete?.(result)
|
||||
return result
|
||||
} catch (error) {
|
||||
const err = error instanceof Error ? error : new Error('Subagent orchestration failed')
|
||||
logger.error('Subagent orchestration failed', { error: err.message, agentId })
|
||||
await options.onError?.(err)
|
||||
return {
|
||||
success: false,
|
||||
content: context.accumulatedContent,
|
||||
toolCalls: [],
|
||||
error: err.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function forwardEvent(event: SSEEvent, options: OrchestratorOptions): Promise<void> {
|
||||
try {
|
||||
await options.onEvent?.(event)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to forward SSE event', {
|
||||
type: event.type,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeStructuredResult(data: any): SubagentOrchestratorResult['structuredResult'] {
|
||||
if (!data || typeof data !== 'object') {
|
||||
return undefined
|
||||
}
|
||||
return {
|
||||
type: data.result_type || data.type,
|
||||
summary: data.summary,
|
||||
data: data.data ?? data,
|
||||
success: data.success,
|
||||
}
|
||||
}
|
||||
|
||||
async function buildExecutionContext(
|
||||
userId: string,
|
||||
workflowId?: string,
|
||||
workspaceId?: string
|
||||
): Promise<ExecutionContext> {
|
||||
if (workflowId) {
|
||||
return prepareExecutionContext(userId, workflowId)
|
||||
}
|
||||
|
||||
const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId)
|
||||
return {
|
||||
userId,
|
||||
workflowId: workflowId || '',
|
||||
workspaceId,
|
||||
decryptedEnvVars,
|
||||
}
|
||||
}
|
||||
|
||||
function buildResult(
|
||||
context: StreamingContext,
|
||||
structuredResult?: SubagentOrchestratorResult['structuredResult']
|
||||
): SubagentOrchestratorResult {
|
||||
const toolCalls: ToolCallSummary[] = Array.from(context.toolCalls.values()).map((toolCall) => ({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
status: toolCall.status,
|
||||
params: toolCall.params,
|
||||
result: toolCall.result?.output,
|
||||
error: toolCall.error,
|
||||
durationMs:
|
||||
toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined,
|
||||
}))
|
||||
|
||||
return {
|
||||
success: context.errors.length === 0 && !context.wasAborted,
|
||||
content: context.accumulatedContent,
|
||||
toolCalls,
|
||||
structuredResult,
|
||||
errors: context.errors.length ? context.errors : undefined,
|
||||
}
|
||||
}
|
||||
1707
apps/sim/lib/copilot/orchestrator/tool-executor.ts
Normal file
1707
apps/sim/lib/copilot/orchestrator/tool-executor.ts
Normal file
File diff suppressed because it is too large
Load Diff
130
apps/sim/lib/copilot/orchestrator/types.ts
Normal file
130
apps/sim/lib/copilot/orchestrator/types.ts
Normal file
@@ -0,0 +1,130 @@
|
||||
import type { CopilotProviderConfig } from '@/lib/copilot/types'
|
||||
|
||||
export type SSEEventType =
|
||||
| 'chat_id'
|
||||
| 'title_updated'
|
||||
| 'content'
|
||||
| 'reasoning'
|
||||
| 'tool_call'
|
||||
| 'tool_generating'
|
||||
| 'tool_result'
|
||||
| 'tool_error'
|
||||
| 'subagent_start'
|
||||
| 'subagent_end'
|
||||
| 'structured_result'
|
||||
| 'subagent_result'
|
||||
| 'done'
|
||||
| 'error'
|
||||
| 'start'
|
||||
|
||||
export interface SSEEvent {
|
||||
type: SSEEventType
|
||||
data?: any
|
||||
subagent?: string
|
||||
toolCallId?: string
|
||||
toolName?: string
|
||||
success?: boolean
|
||||
result?: any
|
||||
}
|
||||
|
||||
export type ToolCallStatus = 'pending' | 'executing' | 'success' | 'error' | 'skipped' | 'rejected'
|
||||
|
||||
export interface ToolCallState {
|
||||
id: string
|
||||
name: string
|
||||
status: ToolCallStatus
|
||||
params?: Record<string, any>
|
||||
result?: ToolCallResult
|
||||
error?: string
|
||||
startTime?: number
|
||||
endTime?: number
|
||||
}
|
||||
|
||||
export interface ToolCallResult {
|
||||
success: boolean
|
||||
output?: any
|
||||
error?: string
|
||||
}
|
||||
|
||||
export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'subagent_text'
|
||||
|
||||
export interface ContentBlock {
|
||||
type: ContentBlockType
|
||||
content?: string
|
||||
toolCall?: ToolCallState
|
||||
timestamp: number
|
||||
}
|
||||
|
||||
export interface StreamingContext {
|
||||
chatId?: string
|
||||
conversationId?: string
|
||||
messageId: string
|
||||
accumulatedContent: string
|
||||
contentBlocks: ContentBlock[]
|
||||
toolCalls: Map<string, ToolCallState>
|
||||
currentThinkingBlock: ContentBlock | null
|
||||
isInThinkingBlock: boolean
|
||||
subAgentParentToolCallId?: string
|
||||
subAgentContent: Record<string, string>
|
||||
subAgentToolCalls: Record<string, ToolCallState[]>
|
||||
pendingContent: string
|
||||
streamComplete: boolean
|
||||
wasAborted: boolean
|
||||
errors: string[]
|
||||
}
|
||||
|
||||
export interface OrchestratorRequest {
|
||||
message: string
|
||||
workflowId: string
|
||||
userId: string
|
||||
chatId?: string
|
||||
mode?: 'agent' | 'ask' | 'plan'
|
||||
model?: string
|
||||
conversationId?: string
|
||||
contexts?: Array<{ type: string; content: string }>
|
||||
fileAttachments?: any[]
|
||||
commands?: string[]
|
||||
provider?: CopilotProviderConfig
|
||||
streamToolCalls?: boolean
|
||||
version?: string
|
||||
prefetch?: boolean
|
||||
userName?: string
|
||||
}
|
||||
|
||||
export interface OrchestratorOptions {
|
||||
autoExecuteTools?: boolean
|
||||
timeout?: number
|
||||
onEvent?: (event: SSEEvent) => void | Promise<void>
|
||||
onComplete?: (result: OrchestratorResult) => void | Promise<void>
|
||||
onError?: (error: Error) => void | Promise<void>
|
||||
abortSignal?: AbortSignal
|
||||
interactive?: boolean
|
||||
}
|
||||
|
||||
export interface OrchestratorResult {
|
||||
success: boolean
|
||||
content: string
|
||||
contentBlocks: ContentBlock[]
|
||||
toolCalls: ToolCallSummary[]
|
||||
chatId?: string
|
||||
conversationId?: string
|
||||
error?: string
|
||||
errors?: string[]
|
||||
}
|
||||
|
||||
export interface ToolCallSummary {
|
||||
id: string
|
||||
name: string
|
||||
status: ToolCallStatus
|
||||
params?: Record<string, any>
|
||||
result?: any
|
||||
error?: string
|
||||
durationMs?: number
|
||||
}
|
||||
|
||||
export interface ExecutionContext {
|
||||
userId: string
|
||||
workflowId: string
|
||||
workspaceId?: string
|
||||
decryptedEnvVars?: Record<string, string>
|
||||
}
|
||||
@@ -5,7 +5,10 @@ import {
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer'
|
||||
import {
|
||||
formatWorkflowStateForCopilot,
|
||||
normalizeWorkflowName,
|
||||
} from '@/lib/copilot/tools/shared/workflow-utils'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
|
||||
const logger = createLogger('GetWorkflowFromNameClientTool')
|
||||
@@ -67,11 +70,9 @@ export class GetWorkflowFromNameClientTool extends BaseClientTool {
|
||||
|
||||
// Try to find by name from registry first to get ID
|
||||
const registry = useWorkflowRegistry.getState()
|
||||
const targetName = normalizeWorkflowName(workflowName)
|
||||
const match = Object.values((registry as any).workflows || {}).find(
|
||||
(w: any) =>
|
||||
String(w?.name || '')
|
||||
.trim()
|
||||
.toLowerCase() === workflowName.toLowerCase()
|
||||
(w: any) => normalizeWorkflowName(w?.name) === targetName
|
||||
) as any
|
||||
|
||||
if (!match?.id) {
|
||||
@@ -98,15 +99,12 @@ export class GetWorkflowFromNameClientTool extends BaseClientTool {
|
||||
}
|
||||
|
||||
// Convert state to the same string format as get_user_workflow
|
||||
const workflowState = {
|
||||
const userWorkflow = formatWorkflowStateForCopilot({
|
||||
blocks: wf.state.blocks || {},
|
||||
edges: wf.state.edges || [],
|
||||
loops: wf.state.loops || {},
|
||||
parallels: wf.state.parallels || {},
|
||||
}
|
||||
// Sanitize workflow state for copilot (remove UI-specific data)
|
||||
const sanitizedState = sanitizeForCopilot(workflowState)
|
||||
const userWorkflow = JSON.stringify(sanitizedState, null, 2)
|
||||
})
|
||||
|
||||
await this.markToolComplete(200, `Retrieved workflow ${workflowName}`, { userWorkflow })
|
||||
this.setState(ClientToolCallState.success)
|
||||
|
||||
@@ -5,6 +5,7 @@ import {
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { extractWorkflowNames } from '@/lib/copilot/tools/shared/workflow-utils'
|
||||
|
||||
const logger = createLogger('ListUserWorkflowsClientTool')
|
||||
|
||||
@@ -41,9 +42,7 @@ export class ListUserWorkflowsClientTool extends BaseClientTool {
|
||||
|
||||
const json = await res.json()
|
||||
const workflows = Array.isArray(json?.data) ? json.data : []
|
||||
const names = workflows
|
||||
.map((w: any) => (typeof w?.name === 'string' ? w.name : null))
|
||||
.filter((n: string | null) => !!n)
|
||||
const names = extractWorkflowNames(workflows)
|
||||
|
||||
logger.info('Found workflows', { count: names.length })
|
||||
|
||||
|
||||
474
apps/sim/lib/copilot/tools/mcp/definitions.ts
Normal file
474
apps/sim/lib/copilot/tools/mcp/definitions.ts
Normal file
@@ -0,0 +1,474 @@
|
||||
export type DirectToolDef = {
|
||||
name: string
|
||||
description: string
|
||||
inputSchema: { type: 'object'; properties?: Record<string, unknown>; required?: string[] }
|
||||
toolId: string
|
||||
}
|
||||
|
||||
export type SubagentToolDef = {
|
||||
name: string
|
||||
description: string
|
||||
inputSchema: { type: 'object'; properties?: Record<string, unknown>; required?: string[] }
|
||||
agentId: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Direct tools that execute immediately without LLM orchestration.
|
||||
* These are fast database queries that don't need AI reasoning.
|
||||
*/
|
||||
export const DIRECT_TOOL_DEFS: DirectToolDef[] = [
|
||||
{
|
||||
name: 'list_workflows',
|
||||
toolId: 'list_user_workflows',
|
||||
description:
|
||||
'List all workflows the user has access to. Returns workflow IDs, names, and workspace info.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
workspaceId: {
|
||||
type: 'string',
|
||||
description: 'Optional workspace ID to filter workflows.',
|
||||
},
|
||||
folderId: {
|
||||
type: 'string',
|
||||
description: 'Optional folder ID to filter workflows.',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'list_workspaces',
|
||||
toolId: 'list_user_workspaces',
|
||||
description:
|
||||
'List all workspaces the user has access to. Returns workspace IDs, names, and roles.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'list_folders',
|
||||
toolId: 'list_folders',
|
||||
description: 'List all folders in a workspace.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
workspaceId: {
|
||||
type: 'string',
|
||||
description: 'Workspace ID to list folders from.',
|
||||
},
|
||||
},
|
||||
required: ['workspaceId'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'get_workflow',
|
||||
toolId: 'get_workflow_from_name',
|
||||
description: 'Get a workflow by name or ID. Returns the full workflow definition.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: {
|
||||
type: 'string',
|
||||
description: 'Workflow name to search for.',
|
||||
},
|
||||
workflowId: {
|
||||
type: 'string',
|
||||
description: 'Workflow ID to retrieve directly.',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'create_workflow',
|
||||
toolId: 'create_workflow',
|
||||
description: 'Create a new workflow. Returns the new workflow ID.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: {
|
||||
type: 'string',
|
||||
description: 'Name for the new workflow.',
|
||||
},
|
||||
workspaceId: {
|
||||
type: 'string',
|
||||
description: 'Optional workspace ID. Uses default workspace if not provided.',
|
||||
},
|
||||
folderId: {
|
||||
type: 'string',
|
||||
description: 'Optional folder ID to place the workflow in.',
|
||||
},
|
||||
description: {
|
||||
type: 'string',
|
||||
description: 'Optional description for the workflow.',
|
||||
},
|
||||
},
|
||||
required: ['name'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'create_folder',
|
||||
toolId: 'create_folder',
|
||||
description: 'Create a new folder in a workspace.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: {
|
||||
type: 'string',
|
||||
description: 'Name for the new folder.',
|
||||
},
|
||||
workspaceId: {
|
||||
type: 'string',
|
||||
description: 'Optional workspace ID. Uses default workspace if not provided.',
|
||||
},
|
||||
parentId: {
|
||||
type: 'string',
|
||||
description: 'Optional parent folder ID for nested folders.',
|
||||
},
|
||||
},
|
||||
required: ['name'],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
export const SUBAGENT_TOOL_DEFS: SubagentToolDef[] = [
|
||||
{
|
||||
name: 'copilot_build',
|
||||
agentId: 'build',
|
||||
description: `Build a workflow end-to-end in a single step. This is the fast mode equivalent for headless/MCP usage.
|
||||
|
||||
USE THIS WHEN:
|
||||
- Building a new workflow from scratch
|
||||
- Modifying an existing workflow
|
||||
- You want to gather information and build in one pass without separate plan→edit steps
|
||||
|
||||
WORKFLOW ID (REQUIRED):
|
||||
- For NEW workflows: First call create_workflow to get a workflowId, then pass it here
|
||||
- For EXISTING workflows: Always pass the workflowId parameter
|
||||
|
||||
CAN DO:
|
||||
- Gather information about blocks, credentials, patterns
|
||||
- Search documentation and patterns for best practices
|
||||
- Add, modify, or remove blocks
|
||||
- Configure block settings and connections
|
||||
- Set environment variables and workflow variables
|
||||
|
||||
CANNOT DO:
|
||||
- Run or test workflows (use copilot_test separately after deploying)
|
||||
- Deploy workflows (use copilot_deploy separately)
|
||||
|
||||
WORKFLOW:
|
||||
1. Call create_workflow to get a workflowId (for new workflows)
|
||||
2. Call copilot_build with the request and workflowId
|
||||
3. Build agent gathers info and builds in one pass
|
||||
4. Call copilot_deploy to deploy the workflow
|
||||
5. Optionally call copilot_test to verify it works`,
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: {
|
||||
type: 'string',
|
||||
description: 'What you want to build or modify in the workflow.',
|
||||
},
|
||||
workflowId: {
|
||||
type: 'string',
|
||||
description:
|
||||
'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.',
|
||||
},
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request', 'workflowId'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_discovery',
|
||||
agentId: 'discovery',
|
||||
description: `Find workflows by their contents or functionality when the user doesn't know the exact name or ID.
|
||||
|
||||
USE THIS WHEN:
|
||||
- User describes a workflow by what it does: "the one that sends emails", "my Slack notification workflow"
|
||||
- User refers to workflow contents: "the workflow with the OpenAI block"
|
||||
- User needs to search/match workflows by functionality or description
|
||||
|
||||
DO NOT USE (use direct tools instead):
|
||||
- User knows the workflow name → use get_workflow
|
||||
- User wants to list all workflows → use list_workflows
|
||||
- User wants to list workspaces → use list_workspaces
|
||||
- User wants to list folders → use list_folders`,
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
workspaceId: { type: 'string' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_plan',
|
||||
agentId: 'plan',
|
||||
description: `Plan workflow changes by gathering required information.
|
||||
|
||||
USE THIS WHEN:
|
||||
- Building a new workflow
|
||||
- Modifying an existing workflow
|
||||
- You need to understand what blocks and integrations are available
|
||||
- The workflow requires multiple blocks or connections
|
||||
|
||||
WORKFLOW ID (REQUIRED):
|
||||
- For NEW workflows: First call create_workflow to get a workflowId, then pass it here
|
||||
- For EXISTING workflows: Always pass the workflowId parameter
|
||||
|
||||
This tool gathers information about available blocks, credentials, and the current workflow state.
|
||||
|
||||
RETURNS: A plan object containing block configurations, connections, and technical details.
|
||||
IMPORTANT: Pass the returned plan EXACTLY to copilot_edit - do not modify or summarize it.`,
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: {
|
||||
type: 'string',
|
||||
description: 'What you want to build or modify in the workflow.',
|
||||
},
|
||||
workflowId: {
|
||||
type: 'string',
|
||||
description:
|
||||
'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.',
|
||||
},
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request', 'workflowId'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_edit',
|
||||
agentId: 'edit',
|
||||
description: `Execute a workflow plan and apply edits.
|
||||
|
||||
USE THIS WHEN:
|
||||
- You have a plan from copilot_plan that needs to be executed
|
||||
- Building or modifying a workflow based on the plan
|
||||
- Making changes to blocks, connections, or configurations
|
||||
|
||||
WORKFLOW ID (REQUIRED):
|
||||
- You MUST provide the workflowId parameter
|
||||
- For new workflows, get the workflowId from create_workflow first
|
||||
|
||||
PLAN (REQUIRED):
|
||||
- Pass the EXACT plan object from copilot_plan in the context.plan field
|
||||
- Do NOT modify, summarize, or interpret the plan - pass it verbatim
|
||||
- The plan contains technical details the edit agent needs exactly as-is
|
||||
|
||||
IMPORTANT: After copilot_edit completes, you MUST call copilot_deploy before the workflow can be run or tested.`,
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
message: { type: 'string', description: 'Optional additional instructions for the edit.' },
|
||||
workflowId: {
|
||||
type: 'string',
|
||||
description:
|
||||
'REQUIRED. The workflow ID to edit. Get this from create_workflow for new workflows.',
|
||||
},
|
||||
plan: {
|
||||
type: 'object',
|
||||
description:
|
||||
'The plan object from copilot_plan. Pass it EXACTLY as returned, do not modify.',
|
||||
},
|
||||
context: {
|
||||
type: 'object',
|
||||
description:
|
||||
'Additional context. Put the plan in context.plan if not using the plan field directly.',
|
||||
},
|
||||
},
|
||||
required: ['workflowId'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_debug',
|
||||
agentId: 'debug',
|
||||
description: `Diagnose errors or unexpected workflow behavior.
|
||||
|
||||
WORKFLOW ID (REQUIRED): Always provide the workflowId of the workflow to debug.`,
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
error: { type: 'string', description: 'The error message or description of the issue.' },
|
||||
workflowId: { type: 'string', description: 'REQUIRED. The workflow ID to debug.' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['error', 'workflowId'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_deploy',
|
||||
agentId: 'deploy',
|
||||
description: `Deploy or manage workflow deployments.
|
||||
|
||||
CRITICAL: You MUST deploy a workflow after building before it can be run or tested.
|
||||
Workflows without an active deployment will fail with "no active deployment" error.
|
||||
|
||||
WORKFLOW ID (REQUIRED):
|
||||
- Always provide the workflowId parameter
|
||||
- This must match the workflow you built with copilot_edit
|
||||
|
||||
USE THIS:
|
||||
- After copilot_edit completes to activate the workflow
|
||||
- To update deployment settings
|
||||
- To redeploy after making changes
|
||||
|
||||
DEPLOYMENT TYPES:
|
||||
- "deploy as api" - REST API endpoint
|
||||
- "deploy as chat" - Chat interface
|
||||
- "deploy as mcp" - MCP server`,
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: {
|
||||
type: 'string',
|
||||
description: 'The deployment request, e.g. "deploy as api" or "deploy as chat"',
|
||||
},
|
||||
workflowId: {
|
||||
type: 'string',
|
||||
description: 'REQUIRED. The workflow ID to deploy.',
|
||||
},
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request', 'workflowId'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_auth',
|
||||
agentId: 'auth',
|
||||
description: 'Handle OAuth connection flows.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_knowledge',
|
||||
agentId: 'knowledge',
|
||||
description: 'Create and manage knowledge bases.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_custom_tool',
|
||||
agentId: 'custom_tool',
|
||||
description: 'Create or manage custom tools.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_info',
|
||||
agentId: 'info',
|
||||
description: 'Inspect blocks, outputs, and workflow metadata.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
workflowId: { type: 'string' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_workflow',
|
||||
agentId: 'workflow',
|
||||
description: 'Manage workflow environment and configuration.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
workflowId: { type: 'string' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_research',
|
||||
agentId: 'research',
|
||||
description: 'Research external APIs and documentation.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_tour',
|
||||
agentId: 'tour',
|
||||
description: 'Explain platform features and usage.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_test',
|
||||
agentId: 'test',
|
||||
description: `Run workflows and verify outputs.
|
||||
|
||||
PREREQUISITE: The workflow MUST be deployed first using copilot_deploy.
|
||||
Undeployed workflows will fail with "no active deployment" error.
|
||||
|
||||
WORKFLOW ID (REQUIRED):
|
||||
- Always provide the workflowId parameter
|
||||
|
||||
USE THIS:
|
||||
- After deploying to verify the workflow works correctly
|
||||
- To test with sample inputs
|
||||
- To validate workflow behavior before sharing with user`,
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
workflowId: {
|
||||
type: 'string',
|
||||
description: 'REQUIRED. The workflow ID to test.',
|
||||
},
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request', 'workflowId'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'copilot_superagent',
|
||||
agentId: 'superagent',
|
||||
description: 'Execute direct external actions (email, Slack, etc.).',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request'],
|
||||
},
|
||||
},
|
||||
]
|
||||
@@ -6,9 +6,13 @@ import { eq } from 'drizzle-orm'
|
||||
import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool'
|
||||
import { validateSelectorIds } from '@/lib/copilot/validation/selector-validator'
|
||||
import type { PermissionGroupConfig } from '@/lib/permission-groups/types'
|
||||
import { applyAutoLayout } from '@/lib/workflows/autolayout'
|
||||
import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs'
|
||||
import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence'
|
||||
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
|
||||
import {
|
||||
loadWorkflowFromNormalizedTables,
|
||||
saveWorkflowToNormalizedTables,
|
||||
} from '@/lib/workflows/persistence/utils'
|
||||
import { isValidKey } from '@/lib/workflows/sanitization/key-validation'
|
||||
import { validateWorkflowState } from '@/lib/workflows/sanitization/validation'
|
||||
import { buildCanonicalIndex, isCanonicalPair } from '@/lib/workflows/subblocks/visibility'
|
||||
@@ -1399,6 +1403,101 @@ function filterDisallowedTools(
|
||||
return allowedTools
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalizes block IDs in operations to ensure they are valid UUIDs.
|
||||
* The LLM may generate human-readable IDs like "web_search" or "research_agent"
|
||||
* which need to be converted to proper UUIDs for database compatibility.
|
||||
*
|
||||
* Returns the normalized operations and a mapping from old IDs to new UUIDs.
|
||||
*/
|
||||
function normalizeBlockIdsInOperations(operations: EditWorkflowOperation[]): {
|
||||
normalizedOperations: EditWorkflowOperation[]
|
||||
idMapping: Map<string, string>
|
||||
} {
|
||||
const logger = createLogger('EditWorkflowServerTool')
|
||||
const idMapping = new Map<string, string>()
|
||||
|
||||
// First pass: collect all non-UUID block_ids from add/insert operations
|
||||
for (const op of operations) {
|
||||
if (op.operation_type === 'add' || op.operation_type === 'insert_into_subflow') {
|
||||
if (op.block_id && !UUID_REGEX.test(op.block_id)) {
|
||||
const newId = crypto.randomUUID()
|
||||
idMapping.set(op.block_id, newId)
|
||||
logger.debug('Normalizing block ID', { oldId: op.block_id, newId })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (idMapping.size === 0) {
|
||||
return { normalizedOperations: operations, idMapping }
|
||||
}
|
||||
|
||||
logger.info('Normalizing block IDs in operations', {
|
||||
normalizedCount: idMapping.size,
|
||||
mappings: Object.fromEntries(idMapping),
|
||||
})
|
||||
|
||||
// Helper to replace an ID if it's in the mapping
|
||||
const replaceId = (id: string | undefined): string | undefined => {
|
||||
if (!id) return id
|
||||
return idMapping.get(id) ?? id
|
||||
}
|
||||
|
||||
// Second pass: update all references to use new UUIDs
|
||||
const normalizedOperations = operations.map((op) => {
|
||||
const normalized: EditWorkflowOperation = {
|
||||
...op,
|
||||
block_id: replaceId(op.block_id) ?? op.block_id,
|
||||
}
|
||||
|
||||
if (op.params) {
|
||||
normalized.params = { ...op.params }
|
||||
|
||||
// Update subflowId references (for insert_into_subflow)
|
||||
if (normalized.params.subflowId) {
|
||||
normalized.params.subflowId = replaceId(normalized.params.subflowId)
|
||||
}
|
||||
|
||||
// Update connection references
|
||||
if (normalized.params.connections) {
|
||||
const normalizedConnections: Record<string, any> = {}
|
||||
for (const [handle, targets] of Object.entries(normalized.params.connections)) {
|
||||
if (typeof targets === 'string') {
|
||||
normalizedConnections[handle] = replaceId(targets)
|
||||
} else if (Array.isArray(targets)) {
|
||||
normalizedConnections[handle] = targets.map((t) => {
|
||||
if (typeof t === 'string') return replaceId(t)
|
||||
if (t && typeof t === 'object' && t.block) {
|
||||
return { ...t, block: replaceId(t.block) }
|
||||
}
|
||||
return t
|
||||
})
|
||||
} else if (targets && typeof targets === 'object' && (targets as any).block) {
|
||||
normalizedConnections[handle] = { ...targets, block: replaceId((targets as any).block) }
|
||||
} else {
|
||||
normalizedConnections[handle] = targets
|
||||
}
|
||||
}
|
||||
normalized.params.connections = normalizedConnections
|
||||
}
|
||||
|
||||
// Update nestedNodes block IDs
|
||||
if (normalized.params.nestedNodes) {
|
||||
const normalizedNestedNodes: Record<string, any> = {}
|
||||
for (const [childId, childBlock] of Object.entries(normalized.params.nestedNodes)) {
|
||||
const newChildId = replaceId(childId) ?? childId
|
||||
normalizedNestedNodes[newChildId] = childBlock
|
||||
}
|
||||
normalized.params.nestedNodes = normalizedNestedNodes
|
||||
}
|
||||
}
|
||||
|
||||
return normalized
|
||||
})
|
||||
|
||||
return { normalizedOperations, idMapping }
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply operations directly to the workflow JSON state
|
||||
*/
|
||||
@@ -1418,6 +1517,11 @@ function applyOperationsToWorkflowState(
|
||||
|
||||
// Log initial state
|
||||
const logger = createLogger('EditWorkflowServerTool')
|
||||
|
||||
// Normalize block IDs to UUIDs before processing
|
||||
const { normalizedOperations } = normalizeBlockIdsInOperations(operations)
|
||||
operations = normalizedOperations
|
||||
|
||||
logger.info('Applying operations to workflow:', {
|
||||
totalOperations: operations.length,
|
||||
operationTypes: operations.reduce((acc: any, op) => {
|
||||
@@ -3162,10 +3266,59 @@ export const editWorkflowServerTool: BaseServerTool<EditWorkflowParams, any> = {
|
||||
const skippedMessages =
|
||||
skippedItems.length > 0 ? skippedItems.map((item) => item.reason) : undefined
|
||||
|
||||
// Return the modified workflow state for the client to convert to YAML if needed
|
||||
// Persist the workflow state to the database
|
||||
const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState
|
||||
|
||||
// Apply autolayout to position blocks properly
|
||||
const layoutResult = applyAutoLayout(finalWorkflowState.blocks, finalWorkflowState.edges, {
|
||||
horizontalSpacing: 250,
|
||||
verticalSpacing: 100,
|
||||
padding: { x: 100, y: 100 },
|
||||
})
|
||||
|
||||
const layoutedBlocks =
|
||||
layoutResult.success && layoutResult.blocks ? layoutResult.blocks : finalWorkflowState.blocks
|
||||
|
||||
if (!layoutResult.success) {
|
||||
logger.warn('Autolayout failed, using default positions', {
|
||||
workflowId,
|
||||
error: layoutResult.error,
|
||||
})
|
||||
}
|
||||
|
||||
const workflowStateForDb = {
|
||||
blocks: layoutedBlocks,
|
||||
edges: finalWorkflowState.edges,
|
||||
loops: generateLoopBlocks(layoutedBlocks as any),
|
||||
parallels: generateParallelBlocks(layoutedBlocks as any),
|
||||
lastSaved: Date.now(),
|
||||
isDeployed: false,
|
||||
}
|
||||
|
||||
const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowStateForDb as any)
|
||||
if (!saveResult.success) {
|
||||
logger.error('Failed to persist workflow state to database', {
|
||||
workflowId,
|
||||
error: saveResult.error,
|
||||
})
|
||||
throw new Error(`Failed to save workflow: ${saveResult.error}`)
|
||||
}
|
||||
|
||||
// Update workflow's lastSynced timestamp
|
||||
await db
|
||||
.update(workflowTable)
|
||||
.set({
|
||||
lastSynced: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(workflowTable.id, workflowId))
|
||||
|
||||
logger.info('Workflow state persisted to database', { workflowId })
|
||||
|
||||
// Return the modified workflow state with autolayout applied
|
||||
return {
|
||||
success: true,
|
||||
workflowState: validation.sanitizedState || modifiedWorkflowState,
|
||||
workflowState: { ...finalWorkflowState, blocks: layoutedBlocks },
|
||||
// Include input validation errors so the LLM can see what was rejected
|
||||
...(inputErrors && {
|
||||
inputValidationErrors: inputErrors,
|
||||
|
||||
38
apps/sim/lib/copilot/tools/shared/workflow-utils.ts
Normal file
38
apps/sim/lib/copilot/tools/shared/workflow-utils.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer'
|
||||
|
||||
type CopilotWorkflowState = {
|
||||
blocks?: Record<string, any>
|
||||
edges?: any[]
|
||||
loops?: Record<string, any>
|
||||
parallels?: Record<string, any>
|
||||
}
|
||||
|
||||
export function formatWorkflowStateForCopilot(state: CopilotWorkflowState): string {
|
||||
const workflowState = {
|
||||
blocks: state.blocks || {},
|
||||
edges: state.edges || [],
|
||||
loops: state.loops || {},
|
||||
parallels: state.parallels || {},
|
||||
}
|
||||
const sanitized = sanitizeForCopilot(workflowState)
|
||||
return JSON.stringify(sanitized, null, 2)
|
||||
}
|
||||
|
||||
export function formatNormalizedWorkflowForCopilot(
|
||||
normalized: CopilotWorkflowState | null | undefined
|
||||
): string | null {
|
||||
if (!normalized) return null
|
||||
return formatWorkflowStateForCopilot(normalized)
|
||||
}
|
||||
|
||||
export function normalizeWorkflowName(name?: string | null): string {
|
||||
return String(name || '')
|
||||
.trim()
|
||||
.toLowerCase()
|
||||
}
|
||||
|
||||
export function extractWorkflowNames(workflows: Array<{ name?: string | null }>): string[] {
|
||||
return workflows
|
||||
.map((workflow) => (typeof workflow?.name === 'string' ? workflow.name : null))
|
||||
.filter((name): name is string => Boolean(name))
|
||||
}
|
||||
113
apps/sim/lib/core/async-jobs/backends/database.ts
Normal file
113
apps/sim/lib/core/async-jobs/backends/database.ts
Normal file
@@ -0,0 +1,113 @@
|
||||
import { asyncJobs, db } from '@sim/db'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { eq, sql } from 'drizzle-orm'
|
||||
import {
|
||||
type EnqueueOptions,
|
||||
JOB_STATUS,
|
||||
type Job,
|
||||
type JobMetadata,
|
||||
type JobQueueBackend,
|
||||
type JobStatus,
|
||||
type JobType,
|
||||
} from '@/lib/core/async-jobs/types'
|
||||
|
||||
const logger = createLogger('DatabaseJobQueue')
|
||||
|
||||
type AsyncJobRow = typeof asyncJobs.$inferSelect
|
||||
|
||||
function rowToJob(row: AsyncJobRow): Job {
|
||||
return {
|
||||
id: row.id,
|
||||
type: row.type as JobType,
|
||||
payload: row.payload,
|
||||
status: row.status as JobStatus,
|
||||
createdAt: row.createdAt,
|
||||
startedAt: row.startedAt ?? undefined,
|
||||
completedAt: row.completedAt ?? undefined,
|
||||
attempts: row.attempts,
|
||||
maxAttempts: row.maxAttempts,
|
||||
error: row.error ?? undefined,
|
||||
output: row.output as unknown,
|
||||
metadata: (row.metadata ?? {}) as JobMetadata,
|
||||
}
|
||||
}
|
||||
|
||||
export class DatabaseJobQueue implements JobQueueBackend {
|
||||
async enqueue<TPayload>(
|
||||
type: JobType,
|
||||
payload: TPayload,
|
||||
options?: EnqueueOptions
|
||||
): Promise<string> {
|
||||
const jobId = `run_${crypto.randomUUID().replace(/-/g, '').slice(0, 20)}`
|
||||
const now = new Date()
|
||||
|
||||
await db.insert(asyncJobs).values({
|
||||
id: jobId,
|
||||
type,
|
||||
payload: payload as Record<string, unknown>,
|
||||
status: JOB_STATUS.PENDING,
|
||||
createdAt: now,
|
||||
attempts: 0,
|
||||
maxAttempts: options?.maxAttempts ?? 3,
|
||||
metadata: (options?.metadata ?? {}) as Record<string, unknown>,
|
||||
updatedAt: now,
|
||||
})
|
||||
|
||||
logger.debug('Enqueued job', { jobId, type })
|
||||
return jobId
|
||||
}
|
||||
|
||||
async getJob(jobId: string): Promise<Job | null> {
|
||||
const [row] = await db.select().from(asyncJobs).where(eq(asyncJobs.id, jobId)).limit(1)
|
||||
|
||||
return row ? rowToJob(row) : null
|
||||
}
|
||||
|
||||
async startJob(jobId: string): Promise<void> {
|
||||
const now = new Date()
|
||||
|
||||
await db
|
||||
.update(asyncJobs)
|
||||
.set({
|
||||
status: JOB_STATUS.PROCESSING,
|
||||
startedAt: now,
|
||||
attempts: sql`${asyncJobs.attempts} + 1`,
|
||||
updatedAt: now,
|
||||
})
|
||||
.where(eq(asyncJobs.id, jobId))
|
||||
|
||||
logger.debug('Started job', { jobId })
|
||||
}
|
||||
|
||||
async completeJob(jobId: string, output: unknown): Promise<void> {
|
||||
const now = new Date()
|
||||
|
||||
await db
|
||||
.update(asyncJobs)
|
||||
.set({
|
||||
status: JOB_STATUS.COMPLETED,
|
||||
completedAt: now,
|
||||
output: output as Record<string, unknown>,
|
||||
updatedAt: now,
|
||||
})
|
||||
.where(eq(asyncJobs.id, jobId))
|
||||
|
||||
logger.debug('Completed job', { jobId })
|
||||
}
|
||||
|
||||
async markJobFailed(jobId: string, error: string): Promise<void> {
|
||||
const now = new Date()
|
||||
|
||||
await db
|
||||
.update(asyncJobs)
|
||||
.set({
|
||||
status: JOB_STATUS.FAILED,
|
||||
completedAt: now,
|
||||
error,
|
||||
updatedAt: now,
|
||||
})
|
||||
.where(eq(asyncJobs.id, jobId))
|
||||
|
||||
logger.debug('Marked job as failed', { jobId })
|
||||
}
|
||||
}
|
||||
3
apps/sim/lib/core/async-jobs/backends/index.ts
Normal file
3
apps/sim/lib/core/async-jobs/backends/index.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
export { DatabaseJobQueue } from './database'
|
||||
export { RedisJobQueue } from './redis'
|
||||
export { TriggerDevJobQueue } from './trigger-dev'
|
||||
176
apps/sim/lib/core/async-jobs/backends/redis.test.ts
Normal file
176
apps/sim/lib/core/async-jobs/backends/redis.test.ts
Normal file
@@ -0,0 +1,176 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { createMockRedis, loggerMock, type MockRedis } from '@sim/testing'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('@sim/logger', () => loggerMock)
|
||||
|
||||
import {
|
||||
JOB_MAX_LIFETIME_SECONDS,
|
||||
JOB_RETENTION_SECONDS,
|
||||
JOB_STATUS,
|
||||
} from '@/lib/core/async-jobs/types'
|
||||
import { RedisJobQueue } from './redis'
|
||||
|
||||
describe('RedisJobQueue', () => {
|
||||
let mockRedis: MockRedis
|
||||
let queue: RedisJobQueue
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
mockRedis = createMockRedis()
|
||||
queue = new RedisJobQueue(mockRedis as never)
|
||||
})
|
||||
|
||||
describe('enqueue', () => {
|
||||
it.concurrent('should create a job with pending status', async () => {
|
||||
const localRedis = createMockRedis()
|
||||
const localQueue = new RedisJobQueue(localRedis as never)
|
||||
|
||||
const jobId = await localQueue.enqueue('workflow-execution', { test: 'data' })
|
||||
|
||||
expect(jobId).toMatch(/^run_/)
|
||||
expect(localRedis.hset).toHaveBeenCalledTimes(1)
|
||||
|
||||
const [key, data] = localRedis.hset.mock.calls[0]
|
||||
expect(key).toBe(`async-jobs:job:${jobId}`)
|
||||
expect(data.status).toBe(JOB_STATUS.PENDING)
|
||||
expect(data.type).toBe('workflow-execution')
|
||||
})
|
||||
|
||||
it.concurrent('should set max lifetime TTL on enqueue', async () => {
|
||||
const localRedis = createMockRedis()
|
||||
const localQueue = new RedisJobQueue(localRedis as never)
|
||||
|
||||
const jobId = await localQueue.enqueue('workflow-execution', { test: 'data' })
|
||||
|
||||
expect(localRedis.expire).toHaveBeenCalledWith(
|
||||
`async-jobs:job:${jobId}`,
|
||||
JOB_MAX_LIFETIME_SECONDS
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('completeJob', () => {
|
||||
it.concurrent('should set status to completed and set TTL', async () => {
|
||||
const localRedis = createMockRedis()
|
||||
const localQueue = new RedisJobQueue(localRedis as never)
|
||||
const jobId = 'run_test123'
|
||||
|
||||
await localQueue.completeJob(jobId, { result: 'success' })
|
||||
|
||||
expect(localRedis.hset).toHaveBeenCalledWith(`async-jobs:job:${jobId}`, {
|
||||
status: JOB_STATUS.COMPLETED,
|
||||
completedAt: expect.any(String),
|
||||
output: JSON.stringify({ result: 'success' }),
|
||||
updatedAt: expect.any(String),
|
||||
})
|
||||
expect(localRedis.expire).toHaveBeenCalledWith(
|
||||
`async-jobs:job:${jobId}`,
|
||||
JOB_RETENTION_SECONDS
|
||||
)
|
||||
})
|
||||
|
||||
it.concurrent('should set TTL to 24 hours (86400 seconds)', async () => {
|
||||
const localRedis = createMockRedis()
|
||||
const localQueue = new RedisJobQueue(localRedis as never)
|
||||
|
||||
await localQueue.completeJob('run_test123', {})
|
||||
|
||||
expect(localRedis.expire).toHaveBeenCalledWith(expect.any(String), 86400)
|
||||
})
|
||||
})
|
||||
|
||||
describe('markJobFailed', () => {
|
||||
it.concurrent('should set status to failed and set TTL', async () => {
|
||||
const localRedis = createMockRedis()
|
||||
const localQueue = new RedisJobQueue(localRedis as never)
|
||||
const jobId = 'run_test456'
|
||||
const error = 'Something went wrong'
|
||||
|
||||
await localQueue.markJobFailed(jobId, error)
|
||||
|
||||
expect(localRedis.hset).toHaveBeenCalledWith(`async-jobs:job:${jobId}`, {
|
||||
status: JOB_STATUS.FAILED,
|
||||
completedAt: expect.any(String),
|
||||
error,
|
||||
updatedAt: expect.any(String),
|
||||
})
|
||||
expect(localRedis.expire).toHaveBeenCalledWith(
|
||||
`async-jobs:job:${jobId}`,
|
||||
JOB_RETENTION_SECONDS
|
||||
)
|
||||
})
|
||||
|
||||
it.concurrent('should set TTL to 24 hours (86400 seconds)', async () => {
|
||||
const localRedis = createMockRedis()
|
||||
const localQueue = new RedisJobQueue(localRedis as never)
|
||||
|
||||
await localQueue.markJobFailed('run_test456', 'error')
|
||||
|
||||
expect(localRedis.expire).toHaveBeenCalledWith(expect.any(String), 86400)
|
||||
})
|
||||
})
|
||||
|
||||
describe('startJob', () => {
|
||||
it.concurrent('should not set TTL when starting a job', async () => {
|
||||
const localRedis = createMockRedis()
|
||||
const localQueue = new RedisJobQueue(localRedis as never)
|
||||
|
||||
await localQueue.startJob('run_test789')
|
||||
|
||||
expect(localRedis.hset).toHaveBeenCalled()
|
||||
expect(localRedis.expire).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('getJob', () => {
|
||||
it.concurrent('should return null for non-existent job', async () => {
|
||||
const localRedis = createMockRedis()
|
||||
const localQueue = new RedisJobQueue(localRedis as never)
|
||||
localRedis.hgetall.mockResolvedValue({})
|
||||
|
||||
const job = await localQueue.getJob('run_nonexistent')
|
||||
|
||||
expect(job).toBeNull()
|
||||
})
|
||||
|
||||
it.concurrent('should deserialize job data correctly', async () => {
|
||||
const localRedis = createMockRedis()
|
||||
const localQueue = new RedisJobQueue(localRedis as never)
|
||||
const now = new Date()
|
||||
localRedis.hgetall.mockResolvedValue({
|
||||
id: 'run_test',
|
||||
type: 'workflow-execution',
|
||||
payload: JSON.stringify({ foo: 'bar' }),
|
||||
status: JOB_STATUS.COMPLETED,
|
||||
createdAt: now.toISOString(),
|
||||
startedAt: now.toISOString(),
|
||||
completedAt: now.toISOString(),
|
||||
attempts: '1',
|
||||
maxAttempts: '3',
|
||||
error: '',
|
||||
output: JSON.stringify({ result: 'ok' }),
|
||||
metadata: JSON.stringify({ workflowId: 'wf_123' }),
|
||||
})
|
||||
|
||||
const job = await localQueue.getJob('run_test')
|
||||
|
||||
expect(job).not.toBeNull()
|
||||
expect(job?.id).toBe('run_test')
|
||||
expect(job?.type).toBe('workflow-execution')
|
||||
expect(job?.payload).toEqual({ foo: 'bar' })
|
||||
expect(job?.status).toBe(JOB_STATUS.COMPLETED)
|
||||
expect(job?.output).toEqual({ result: 'ok' })
|
||||
expect(job?.metadata.workflowId).toBe('wf_123')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('JOB_RETENTION_SECONDS', () => {
|
||||
it.concurrent('should be 24 hours in seconds', async () => {
|
||||
expect(JOB_RETENTION_SECONDS).toBe(24 * 60 * 60)
|
||||
expect(JOB_RETENTION_SECONDS).toBe(86400)
|
||||
})
|
||||
})
|
||||
146
apps/sim/lib/core/async-jobs/backends/redis.ts
Normal file
146
apps/sim/lib/core/async-jobs/backends/redis.ts
Normal file
@@ -0,0 +1,146 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type Redis from 'ioredis'
|
||||
import {
|
||||
type EnqueueOptions,
|
||||
JOB_MAX_LIFETIME_SECONDS,
|
||||
JOB_RETENTION_SECONDS,
|
||||
JOB_STATUS,
|
||||
type Job,
|
||||
type JobMetadata,
|
||||
type JobQueueBackend,
|
||||
type JobStatus,
|
||||
type JobType,
|
||||
} from '@/lib/core/async-jobs/types'
|
||||
|
||||
const logger = createLogger('RedisJobQueue')
|
||||
|
||||
const KEYS = {
|
||||
job: (id: string) => `async-jobs:job:${id}`,
|
||||
} as const
|
||||
|
||||
function serializeJob(job: Job): Record<string, string> {
|
||||
return {
|
||||
id: job.id,
|
||||
type: job.type,
|
||||
payload: JSON.stringify(job.payload),
|
||||
status: job.status,
|
||||
createdAt: job.createdAt.toISOString(),
|
||||
startedAt: job.startedAt?.toISOString() ?? '',
|
||||
completedAt: job.completedAt?.toISOString() ?? '',
|
||||
attempts: job.attempts.toString(),
|
||||
maxAttempts: job.maxAttempts.toString(),
|
||||
error: job.error ?? '',
|
||||
output: job.output !== undefined ? JSON.stringify(job.output) : '',
|
||||
metadata: JSON.stringify(job.metadata),
|
||||
updatedAt: new Date().toISOString(),
|
||||
}
|
||||
}
|
||||
|
||||
function deserializeJob(data: Record<string, string>): Job | null {
|
||||
if (!data || !data.id) return null
|
||||
|
||||
try {
|
||||
return {
|
||||
id: data.id,
|
||||
type: data.type as JobType,
|
||||
payload: JSON.parse(data.payload),
|
||||
status: data.status as JobStatus,
|
||||
createdAt: new Date(data.createdAt),
|
||||
startedAt: data.startedAt ? new Date(data.startedAt) : undefined,
|
||||
completedAt: data.completedAt ? new Date(data.completedAt) : undefined,
|
||||
attempts: Number.parseInt(data.attempts, 10),
|
||||
maxAttempts: Number.parseInt(data.maxAttempts, 10),
|
||||
error: data.error || undefined,
|
||||
output: data.output ? JSON.parse(data.output) : undefined,
|
||||
metadata: JSON.parse(data.metadata) as JobMetadata,
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to deserialize job', { error, data })
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
export class RedisJobQueue implements JobQueueBackend {
|
||||
private redis: Redis
|
||||
|
||||
constructor(redis: Redis) {
|
||||
this.redis = redis
|
||||
}
|
||||
|
||||
async enqueue<TPayload>(
|
||||
type: JobType,
|
||||
payload: TPayload,
|
||||
options?: EnqueueOptions
|
||||
): Promise<string> {
|
||||
const jobId = `run_${crypto.randomUUID().replace(/-/g, '').slice(0, 20)}`
|
||||
const now = new Date()
|
||||
|
||||
const job: Job<TPayload> = {
|
||||
id: jobId,
|
||||
type,
|
||||
payload,
|
||||
status: JOB_STATUS.PENDING,
|
||||
createdAt: now,
|
||||
attempts: 0,
|
||||
maxAttempts: options?.maxAttempts ?? 3,
|
||||
metadata: options?.metadata ?? {},
|
||||
}
|
||||
|
||||
const key = KEYS.job(jobId)
|
||||
const serialized = serializeJob(job as Job)
|
||||
await this.redis.hset(key, serialized)
|
||||
await this.redis.expire(key, JOB_MAX_LIFETIME_SECONDS)
|
||||
|
||||
logger.debug('Enqueued job', { jobId, type })
|
||||
return jobId
|
||||
}
|
||||
|
||||
async getJob(jobId: string): Promise<Job | null> {
|
||||
const data = await this.redis.hgetall(KEYS.job(jobId))
|
||||
return deserializeJob(data)
|
||||
}
|
||||
|
||||
async startJob(jobId: string): Promise<void> {
|
||||
const now = new Date()
|
||||
const key = KEYS.job(jobId)
|
||||
|
||||
await this.redis.hset(key, {
|
||||
status: JOB_STATUS.PROCESSING,
|
||||
startedAt: now.toISOString(),
|
||||
updatedAt: now.toISOString(),
|
||||
})
|
||||
await this.redis.hincrby(key, 'attempts', 1)
|
||||
|
||||
logger.debug('Started job', { jobId })
|
||||
}
|
||||
|
||||
async completeJob(jobId: string, output: unknown): Promise<void> {
|
||||
const now = new Date()
|
||||
const key = KEYS.job(jobId)
|
||||
|
||||
await this.redis.hset(key, {
|
||||
status: JOB_STATUS.COMPLETED,
|
||||
completedAt: now.toISOString(),
|
||||
output: JSON.stringify(output),
|
||||
updatedAt: now.toISOString(),
|
||||
})
|
||||
await this.redis.expire(key, JOB_RETENTION_SECONDS)
|
||||
|
||||
logger.debug('Completed job', { jobId })
|
||||
}
|
||||
|
||||
async markJobFailed(jobId: string, error: string): Promise<void> {
|
||||
const now = new Date()
|
||||
const key = KEYS.job(jobId)
|
||||
|
||||
await this.redis.hset(key, {
|
||||
status: JOB_STATUS.FAILED,
|
||||
completedAt: now.toISOString(),
|
||||
error,
|
||||
updatedAt: now.toISOString(),
|
||||
})
|
||||
await this.redis.expire(key, JOB_RETENTION_SECONDS)
|
||||
|
||||
logger.debug('Marked job as failed', { jobId })
|
||||
}
|
||||
}
|
||||
119
apps/sim/lib/core/async-jobs/backends/trigger-dev.ts
Normal file
119
apps/sim/lib/core/async-jobs/backends/trigger-dev.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { runs, tasks } from '@trigger.dev/sdk'
|
||||
import {
|
||||
type EnqueueOptions,
|
||||
JOB_STATUS,
|
||||
type Job,
|
||||
type JobMetadata,
|
||||
type JobQueueBackend,
|
||||
type JobStatus,
|
||||
type JobType,
|
||||
} from '@/lib/core/async-jobs/types'
|
||||
|
||||
const logger = createLogger('TriggerDevJobQueue')
|
||||
|
||||
/**
|
||||
* Maps trigger.dev task IDs to our JobType
|
||||
*/
|
||||
const JOB_TYPE_TO_TASK_ID: Record<JobType, string> = {
|
||||
'workflow-execution': 'workflow-execution',
|
||||
'schedule-execution': 'schedule-execution',
|
||||
'webhook-execution': 'webhook-execution',
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps trigger.dev run status to our JobStatus
|
||||
*/
|
||||
function mapTriggerDevStatus(status: string): JobStatus {
|
||||
switch (status) {
|
||||
case 'QUEUED':
|
||||
case 'WAITING_FOR_DEPLOY':
|
||||
return JOB_STATUS.PENDING
|
||||
case 'EXECUTING':
|
||||
case 'RESCHEDULED':
|
||||
case 'FROZEN':
|
||||
return JOB_STATUS.PROCESSING
|
||||
case 'COMPLETED':
|
||||
return JOB_STATUS.COMPLETED
|
||||
case 'CANCELED':
|
||||
case 'FAILED':
|
||||
case 'CRASHED':
|
||||
case 'INTERRUPTED':
|
||||
case 'SYSTEM_FAILURE':
|
||||
case 'EXPIRED':
|
||||
return JOB_STATUS.FAILED
|
||||
default:
|
||||
return JOB_STATUS.PENDING
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapter that wraps the trigger.dev SDK to conform to JobQueueBackend interface.
|
||||
*/
|
||||
export class TriggerDevJobQueue implements JobQueueBackend {
|
||||
async enqueue<TPayload>(
|
||||
type: JobType,
|
||||
payload: TPayload,
|
||||
options?: EnqueueOptions
|
||||
): Promise<string> {
|
||||
const taskId = JOB_TYPE_TO_TASK_ID[type]
|
||||
if (!taskId) {
|
||||
throw new Error(`Unknown job type: ${type}`)
|
||||
}
|
||||
|
||||
const enrichedPayload =
|
||||
options?.metadata && typeof payload === 'object' && payload !== null
|
||||
? { ...payload, ...options.metadata }
|
||||
: payload
|
||||
|
||||
const handle = await tasks.trigger(taskId, enrichedPayload)
|
||||
|
||||
logger.debug('Enqueued job via trigger.dev', { jobId: handle.id, type, taskId })
|
||||
return handle.id
|
||||
}
|
||||
|
||||
async getJob(jobId: string): Promise<Job | null> {
|
||||
try {
|
||||
const run = await runs.retrieve(jobId)
|
||||
|
||||
const payload = run.payload as Record<string, unknown>
|
||||
const metadata: JobMetadata = {
|
||||
workflowId: payload?.workflowId as string | undefined,
|
||||
userId: payload?.userId as string | undefined,
|
||||
}
|
||||
|
||||
return {
|
||||
id: jobId,
|
||||
type: run.taskIdentifier as JobType,
|
||||
payload: run.payload,
|
||||
status: mapTriggerDevStatus(run.status),
|
||||
createdAt: run.createdAt ? new Date(run.createdAt) : new Date(),
|
||||
startedAt: run.startedAt ? new Date(run.startedAt) : undefined,
|
||||
completedAt: run.finishedAt ? new Date(run.finishedAt) : undefined,
|
||||
attempts: run.attemptCount ?? 1,
|
||||
maxAttempts: 3,
|
||||
error: run.error?.message,
|
||||
output: run.output as unknown,
|
||||
metadata,
|
||||
}
|
||||
} catch (error) {
|
||||
const isNotFound =
|
||||
(error instanceof Error && error.message.toLowerCase().includes('not found')) ||
|
||||
(error && typeof error === 'object' && 'status' in error && error.status === 404)
|
||||
|
||||
if (isNotFound) {
|
||||
logger.debug('Job not found in trigger.dev', { jobId })
|
||||
return null
|
||||
}
|
||||
|
||||
logger.error('Failed to get job from trigger.dev', { jobId, error })
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async startJob(_jobId: string): Promise<void> {}
|
||||
|
||||
async completeJob(_jobId: string, _output: unknown): Promise<void> {}
|
||||
|
||||
async markJobFailed(_jobId: string, _error: string): Promise<void> {}
|
||||
}
|
||||
88
apps/sim/lib/core/async-jobs/config.ts
Normal file
88
apps/sim/lib/core/async-jobs/config.ts
Normal file
@@ -0,0 +1,88 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { AsyncBackendType, JobQueueBackend } from '@/lib/core/async-jobs/types'
|
||||
import { isTriggerDevEnabled } from '@/lib/core/config/feature-flags'
|
||||
import { getRedisClient } from '@/lib/core/config/redis'
|
||||
|
||||
const logger = createLogger('AsyncJobsConfig')
|
||||
|
||||
let cachedBackend: JobQueueBackend | null = null
|
||||
let cachedBackendType: AsyncBackendType | null = null
|
||||
|
||||
/**
|
||||
* Determines which async backend to use based on environment configuration.
|
||||
* Follows the fallback chain: trigger.dev → redis → database
|
||||
*/
|
||||
export function getAsyncBackendType(): AsyncBackendType {
|
||||
if (isTriggerDevEnabled) {
|
||||
return 'trigger-dev'
|
||||
}
|
||||
|
||||
const redis = getRedisClient()
|
||||
if (redis) {
|
||||
return 'redis'
|
||||
}
|
||||
|
||||
return 'database'
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the job queue backend singleton.
|
||||
* Creates the appropriate backend based on environment configuration.
|
||||
*/
|
||||
export async function getJobQueue(): Promise<JobQueueBackend> {
|
||||
if (cachedBackend) {
|
||||
return cachedBackend
|
||||
}
|
||||
|
||||
const type = getAsyncBackendType()
|
||||
|
||||
switch (type) {
|
||||
case 'trigger-dev': {
|
||||
const { TriggerDevJobQueue } = await import('@/lib/core/async-jobs/backends/trigger-dev')
|
||||
cachedBackend = new TriggerDevJobQueue()
|
||||
break
|
||||
}
|
||||
case 'redis': {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
throw new Error('Redis client not available but redis backend was selected')
|
||||
}
|
||||
const { RedisJobQueue } = await import('@/lib/core/async-jobs/backends/redis')
|
||||
cachedBackend = new RedisJobQueue(redis)
|
||||
break
|
||||
}
|
||||
case 'database': {
|
||||
const { DatabaseJobQueue } = await import('@/lib/core/async-jobs/backends/database')
|
||||
cachedBackend = new DatabaseJobQueue()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
cachedBackendType = type
|
||||
logger.info(`Async job backend initialized: ${type}`)
|
||||
|
||||
return cachedBackend
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current backend type (for logging/debugging)
|
||||
*/
|
||||
export function getCurrentBackendType(): AsyncBackendType | null {
|
||||
return cachedBackendType
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if jobs should be executed inline (fire-and-forget).
|
||||
* For Redis/DB backends, we execute inline. Trigger.dev handles execution itself.
|
||||
*/
|
||||
export function shouldExecuteInline(): boolean {
|
||||
return getAsyncBackendType() !== 'trigger-dev'
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the cached backend (useful for testing)
|
||||
*/
|
||||
export function resetJobQueueCache(): void {
|
||||
cachedBackend = null
|
||||
cachedBackendType = null
|
||||
}
|
||||
22
apps/sim/lib/core/async-jobs/index.ts
Normal file
22
apps/sim/lib/core/async-jobs/index.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
export {
|
||||
getAsyncBackendType,
|
||||
getCurrentBackendType,
|
||||
getJobQueue,
|
||||
resetJobQueueCache,
|
||||
shouldExecuteInline,
|
||||
} from './config'
|
||||
export type {
|
||||
AsyncBackendType,
|
||||
EnqueueOptions,
|
||||
Job,
|
||||
JobMetadata,
|
||||
JobQueueBackend,
|
||||
JobStatus,
|
||||
JobType,
|
||||
} from './types'
|
||||
export {
|
||||
JOB_MAX_LIFETIME_SECONDS,
|
||||
JOB_RETENTION_HOURS,
|
||||
JOB_RETENTION_SECONDS,
|
||||
JOB_STATUS,
|
||||
} from './types'
|
||||
32
apps/sim/lib/core/async-jobs/types.test.ts
Normal file
32
apps/sim/lib/core/async-jobs/types.test.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { JOB_MAX_LIFETIME_SECONDS, JOB_RETENTION_HOURS, JOB_RETENTION_SECONDS } from './types'
|
||||
|
||||
describe('Job retention constants', () => {
|
||||
it.concurrent('JOB_RETENTION_HOURS should be 24', async () => {
|
||||
expect(JOB_RETENTION_HOURS).toBe(24)
|
||||
})
|
||||
|
||||
it.concurrent('JOB_RETENTION_SECONDS should be derived from JOB_RETENTION_HOURS', async () => {
|
||||
expect(JOB_RETENTION_SECONDS).toBe(JOB_RETENTION_HOURS * 60 * 60)
|
||||
})
|
||||
|
||||
it.concurrent('JOB_RETENTION_SECONDS should equal 86400 (24 hours)', async () => {
|
||||
expect(JOB_RETENTION_SECONDS).toBe(86400)
|
||||
})
|
||||
|
||||
it.concurrent('constants should be consistent with each other', async () => {
|
||||
const hoursToSeconds = JOB_RETENTION_HOURS * 60 * 60
|
||||
expect(JOB_RETENTION_SECONDS).toBe(hoursToSeconds)
|
||||
})
|
||||
|
||||
it.concurrent(
|
||||
'JOB_MAX_LIFETIME_SECONDS should be greater than JOB_RETENTION_SECONDS',
|
||||
async () => {
|
||||
expect(JOB_MAX_LIFETIME_SECONDS).toBeGreaterThan(JOB_RETENTION_SECONDS)
|
||||
expect(JOB_MAX_LIFETIME_SECONDS).toBe(48 * 60 * 60)
|
||||
}
|
||||
)
|
||||
})
|
||||
82
apps/sim/lib/core/async-jobs/types.ts
Normal file
82
apps/sim/lib/core/async-jobs/types.ts
Normal file
@@ -0,0 +1,82 @@
|
||||
/**
|
||||
* Types and constants for the async job queue system
|
||||
*/
|
||||
|
||||
/** Retention period for completed/failed jobs (in hours) */
|
||||
export const JOB_RETENTION_HOURS = 24
|
||||
|
||||
/** Retention period for completed/failed jobs (in seconds, for Redis TTL) */
|
||||
export const JOB_RETENTION_SECONDS = JOB_RETENTION_HOURS * 60 * 60
|
||||
|
||||
/** Max lifetime for jobs in Redis (in seconds) - cleanup for stuck pending/processing jobs */
|
||||
export const JOB_MAX_LIFETIME_SECONDS = 48 * 60 * 60
|
||||
|
||||
export const JOB_STATUS = {
|
||||
PENDING: 'pending',
|
||||
PROCESSING: 'processing',
|
||||
COMPLETED: 'completed',
|
||||
FAILED: 'failed',
|
||||
} as const
|
||||
|
||||
export type JobStatus = (typeof JOB_STATUS)[keyof typeof JOB_STATUS]
|
||||
|
||||
export type JobType = 'workflow-execution' | 'schedule-execution' | 'webhook-execution'
|
||||
|
||||
export interface Job<TPayload = unknown, TOutput = unknown> {
|
||||
id: string
|
||||
type: JobType
|
||||
payload: TPayload
|
||||
status: JobStatus
|
||||
createdAt: Date
|
||||
startedAt?: Date
|
||||
completedAt?: Date
|
||||
attempts: number
|
||||
maxAttempts: number
|
||||
error?: string
|
||||
output?: TOutput
|
||||
metadata: JobMetadata
|
||||
}
|
||||
|
||||
export interface JobMetadata {
|
||||
workflowId?: string
|
||||
userId?: string
|
||||
[key: string]: unknown
|
||||
}
|
||||
|
||||
export interface EnqueueOptions {
|
||||
maxAttempts?: number
|
||||
metadata?: JobMetadata
|
||||
}
|
||||
|
||||
/**
|
||||
* Backend interface for job queue implementations.
|
||||
* All backends must implement this interface.
|
||||
*/
|
||||
export interface JobQueueBackend {
|
||||
/**
|
||||
* Add a job to the queue
|
||||
*/
|
||||
enqueue<TPayload>(type: JobType, payload: TPayload, options?: EnqueueOptions): Promise<string>
|
||||
|
||||
/**
|
||||
* Get a job by ID
|
||||
*/
|
||||
getJob(jobId: string): Promise<Job | null>
|
||||
|
||||
/**
|
||||
* Mark a job as started/processing
|
||||
*/
|
||||
startJob(jobId: string): Promise<void>
|
||||
|
||||
/**
|
||||
* Mark a job as completed with output
|
||||
*/
|
||||
completeJob(jobId: string, output: unknown): Promise<void>
|
||||
|
||||
/**
|
||||
* Mark a job as failed with error message
|
||||
*/
|
||||
markJobFailed(jobId: string, error: string): Promise<void>
|
||||
}
|
||||
|
||||
export type AsyncBackendType = 'trigger-dev' | 'redis' | 'database'
|
||||
@@ -170,10 +170,15 @@ export const env = createEnv({
|
||||
RATE_LIMIT_ENTERPRISE_SYNC: z.string().optional().default('600'), // Enterprise tier sync API executions per minute
|
||||
RATE_LIMIT_ENTERPRISE_ASYNC: z.string().optional().default('5000'), // Enterprise tier async API executions per minute
|
||||
|
||||
EXECUTION_TIMEOUT_FREE: z.string().optional().default('300'),
|
||||
EXECUTION_TIMEOUT_PRO: z.string().optional().default('3600'),
|
||||
EXECUTION_TIMEOUT_TEAM: z.string().optional().default('3600'),
|
||||
EXECUTION_TIMEOUT_ENTERPRISE: z.string().optional().default('3600'),
|
||||
// Timeout Configuration
|
||||
EXECUTION_TIMEOUT_FREE: z.string().optional().default('300'), // 5 minutes
|
||||
EXECUTION_TIMEOUT_PRO: z.string().optional().default('3000'), // 50 minutes
|
||||
EXECUTION_TIMEOUT_TEAM: z.string().optional().default('3000'), // 50 minutes
|
||||
EXECUTION_TIMEOUT_ENTERPRISE: z.string().optional().default('3000'), // 50 minutes
|
||||
EXECUTION_TIMEOUT_ASYNC_FREE: z.string().optional().default('5400'), // 90 minutes
|
||||
EXECUTION_TIMEOUT_ASYNC_PRO: z.string().optional().default('5400'), // 90 minutes
|
||||
EXECUTION_TIMEOUT_ASYNC_TEAM: z.string().optional().default('5400'), // 90 minutes
|
||||
EXECUTION_TIMEOUT_ASYNC_ENTERPRISE: z.string().optional().default('5400'), // 90 minutes
|
||||
|
||||
// Knowledge Base Processing Configuration - Shared across all processing methods
|
||||
KB_CONFIG_MAX_DURATION: z.number().optional().default(600), // Max processing duration in seconds (10 minutes)
|
||||
@@ -345,7 +350,6 @@ export const env = createEnv({
|
||||
NEXT_PUBLIC_BRAND_BACKGROUND_COLOR: z.string().regex(/^#[0-9A-Fa-f]{6}$/).optional(), // Brand background color (hex format)
|
||||
|
||||
// Feature Flags
|
||||
NEXT_PUBLIC_TRIGGER_DEV_ENABLED: z.boolean().optional(), // Client-side gate for async executions UI
|
||||
NEXT_PUBLIC_SSO_ENABLED: z.boolean().optional(), // Enable SSO login UI components
|
||||
NEXT_PUBLIC_CREDENTIAL_SETS_ENABLED: z.boolean().optional(), // Enable credential sets (email polling) on self-hosted
|
||||
NEXT_PUBLIC_ACCESS_CONTROL_ENABLED: z.boolean().optional(), // Enable access control (permission groups) on self-hosted
|
||||
@@ -377,7 +381,6 @@ export const env = createEnv({
|
||||
NEXT_PUBLIC_BRAND_ACCENT_COLOR: process.env.NEXT_PUBLIC_BRAND_ACCENT_COLOR,
|
||||
NEXT_PUBLIC_BRAND_ACCENT_HOVER_COLOR: process.env.NEXT_PUBLIC_BRAND_ACCENT_HOVER_COLOR,
|
||||
NEXT_PUBLIC_BRAND_BACKGROUND_COLOR: process.env.NEXT_PUBLIC_BRAND_BACKGROUND_COLOR,
|
||||
NEXT_PUBLIC_TRIGGER_DEV_ENABLED: process.env.NEXT_PUBLIC_TRIGGER_DEV_ENABLED,
|
||||
NEXT_PUBLIC_SSO_ENABLED: process.env.NEXT_PUBLIC_SSO_ENABLED,
|
||||
NEXT_PUBLIC_CREDENTIAL_SETS_ENABLED: process.env.NEXT_PUBLIC_CREDENTIAL_SETS_ENABLED,
|
||||
NEXT_PUBLIC_ACCESS_CONTROL_ENABLED: process.env.NEXT_PUBLIC_ACCESS_CONTROL_ENABLED,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { isBillingEnabled } from '@/lib/core/config/feature-flags'
|
||||
import type { SubscriptionPlan } from '@/lib/core/rate-limiter/types'
|
||||
|
||||
interface ExecutionTimeoutConfig {
|
||||
@@ -8,13 +9,17 @@ interface ExecutionTimeoutConfig {
|
||||
|
||||
const DEFAULT_SYNC_TIMEOUTS_SECONDS = {
|
||||
free: 300,
|
||||
pro: 3600,
|
||||
team: 3600,
|
||||
enterprise: 3600,
|
||||
pro: 3000,
|
||||
team: 3000,
|
||||
enterprise: 3000,
|
||||
} as const
|
||||
|
||||
const ASYNC_MULTIPLIER = 2
|
||||
const MAX_ASYNC_TIMEOUT_SECONDS = 5400
|
||||
const DEFAULT_ASYNC_TIMEOUTS_SECONDS = {
|
||||
free: 5400,
|
||||
pro: 5400,
|
||||
team: 5400,
|
||||
enterprise: 5400,
|
||||
} as const
|
||||
|
||||
function getSyncTimeoutForPlan(plan: SubscriptionPlan): number {
|
||||
const envVarMap: Record<SubscriptionPlan, string | undefined> = {
|
||||
@@ -27,10 +32,13 @@ function getSyncTimeoutForPlan(plan: SubscriptionPlan): number {
|
||||
}
|
||||
|
||||
function getAsyncTimeoutForPlan(plan: SubscriptionPlan): number {
|
||||
const syncMs = getSyncTimeoutForPlan(plan)
|
||||
const asyncMs = syncMs * ASYNC_MULTIPLIER
|
||||
const maxAsyncMs = MAX_ASYNC_TIMEOUT_SECONDS * 1000
|
||||
return Math.min(asyncMs, maxAsyncMs)
|
||||
const envVarMap: Record<SubscriptionPlan, string | undefined> = {
|
||||
free: env.EXECUTION_TIMEOUT_ASYNC_FREE,
|
||||
pro: env.EXECUTION_TIMEOUT_ASYNC_PRO,
|
||||
team: env.EXECUTION_TIMEOUT_ASYNC_TEAM,
|
||||
enterprise: env.EXECUTION_TIMEOUT_ASYNC_ENTERPRISE,
|
||||
}
|
||||
return (Number.parseInt(envVarMap[plan] || '') || DEFAULT_ASYNC_TIMEOUTS_SECONDS[plan]) * 1000
|
||||
}
|
||||
|
||||
const EXECUTION_TIMEOUTS: Record<SubscriptionPlan, ExecutionTimeoutConfig> = {
|
||||
@@ -56,6 +64,9 @@ export function getExecutionTimeout(
|
||||
plan: SubscriptionPlan | undefined,
|
||||
type: 'sync' | 'async' = 'sync'
|
||||
): number {
|
||||
if (!isBillingEnabled) {
|
||||
return EXECUTION_TIMEOUTS.free[type]
|
||||
}
|
||||
return EXECUTION_TIMEOUTS[plan || 'free'][type]
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import type { ConsumeResult, RateLimitStorageAdapter, TokenStatus } from './stor
|
||||
import { MANUAL_EXECUTION_LIMIT, RATE_LIMITS, RateLimitError } from './types'
|
||||
|
||||
vi.mock('@sim/logger', () => loggerMock)
|
||||
vi.mock('@/lib/core/config/feature-flags', () => ({ isBillingEnabled: true }))
|
||||
|
||||
interface MockAdapter {
|
||||
consumeTokens: Mock
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { createStorageAdapter, type RateLimitStorageAdapter } from './storage'
|
||||
import {
|
||||
createStorageAdapter,
|
||||
type RateLimitStorageAdapter,
|
||||
type TokenBucketConfig,
|
||||
} from './storage'
|
||||
import {
|
||||
getRateLimit,
|
||||
MANUAL_EXECUTION_LIMIT,
|
||||
RATE_LIMIT_WINDOW_MS,
|
||||
RATE_LIMITS,
|
||||
type RateLimitCounterType,
|
||||
type SubscriptionPlan,
|
||||
type TriggerType,
|
||||
@@ -57,21 +53,6 @@ export class RateLimiter {
|
||||
return isAsync ? 'async' : 'sync'
|
||||
}
|
||||
|
||||
private getBucketConfig(
|
||||
plan: SubscriptionPlan,
|
||||
counterType: RateLimitCounterType
|
||||
): TokenBucketConfig {
|
||||
const config = RATE_LIMITS[plan]
|
||||
switch (counterType) {
|
||||
case 'api-endpoint':
|
||||
return config.apiEndpoint
|
||||
case 'async':
|
||||
return config.async
|
||||
case 'sync':
|
||||
return config.sync
|
||||
}
|
||||
}
|
||||
|
||||
private buildStorageKey(rateLimitKey: string, counterType: RateLimitCounterType): string {
|
||||
return `${rateLimitKey}:${counterType}`
|
||||
}
|
||||
@@ -84,15 +65,6 @@ export class RateLimiter {
|
||||
}
|
||||
}
|
||||
|
||||
private createUnlimitedStatus(config: TokenBucketConfig): RateLimitStatus {
|
||||
return {
|
||||
requestsPerMinute: MANUAL_EXECUTION_LIMIT,
|
||||
maxBurst: MANUAL_EXECUTION_LIMIT,
|
||||
remaining: MANUAL_EXECUTION_LIMIT,
|
||||
resetAt: new Date(Date.now() + config.refillIntervalMs),
|
||||
}
|
||||
}
|
||||
|
||||
async checkRateLimitWithSubscription(
|
||||
userId: string,
|
||||
subscription: SubscriptionInfo | null,
|
||||
@@ -107,7 +79,7 @@ export class RateLimiter {
|
||||
const plan = (subscription?.plan || 'free') as SubscriptionPlan
|
||||
const rateLimitKey = this.getRateLimitKey(userId, subscription)
|
||||
const counterType = this.getCounterType(triggerType, isAsync)
|
||||
const config = this.getBucketConfig(plan, counterType)
|
||||
const config = getRateLimit(plan, counterType)
|
||||
const storageKey = this.buildStorageKey(rateLimitKey, counterType)
|
||||
|
||||
const result = await this.storage.consumeTokens(storageKey, 1, config)
|
||||
@@ -152,10 +124,15 @@ export class RateLimiter {
|
||||
try {
|
||||
const plan = (subscription?.plan || 'free') as SubscriptionPlan
|
||||
const counterType = this.getCounterType(triggerType, isAsync)
|
||||
const config = this.getBucketConfig(plan, counterType)
|
||||
const config = getRateLimit(plan, counterType)
|
||||
|
||||
if (triggerType === 'manual') {
|
||||
return this.createUnlimitedStatus(config)
|
||||
return {
|
||||
requestsPerMinute: MANUAL_EXECUTION_LIMIT,
|
||||
maxBurst: MANUAL_EXECUTION_LIMIT,
|
||||
remaining: MANUAL_EXECUTION_LIMIT,
|
||||
resetAt: new Date(Date.now() + config.refillIntervalMs),
|
||||
}
|
||||
}
|
||||
|
||||
const rateLimitKey = this.getRateLimitKey(userId, subscription)
|
||||
@@ -178,7 +155,7 @@ export class RateLimiter {
|
||||
})
|
||||
const plan = (subscription?.plan || 'free') as SubscriptionPlan
|
||||
const counterType = this.getCounterType(triggerType, isAsync)
|
||||
const config = this.getBucketConfig(plan, counterType)
|
||||
const config = getRateLimit(plan, counterType)
|
||||
return {
|
||||
requestsPerMinute: config.refillRate,
|
||||
maxBurst: config.maxTokens,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { isBillingEnabled } from '@/lib/core/config/feature-flags'
|
||||
import type { CoreTriggerType } from '@/stores/logs/filters/types'
|
||||
import type { TokenBucketConfig } from './storage'
|
||||
|
||||
@@ -6,6 +7,8 @@ export type TriggerType = CoreTriggerType | 'form' | 'api-endpoint'
|
||||
|
||||
export type RateLimitCounterType = 'sync' | 'async' | 'api-endpoint'
|
||||
|
||||
type RateLimitConfigKey = 'sync' | 'async' | 'apiEndpoint'
|
||||
|
||||
export type SubscriptionPlan = 'free' | 'pro' | 'team' | 'enterprise'
|
||||
|
||||
export interface RateLimitConfig {
|
||||
@@ -18,6 +21,17 @@ export const RATE_LIMIT_WINDOW_MS = Number.parseInt(env.RATE_LIMIT_WINDOW_MS) ||
|
||||
|
||||
export const MANUAL_EXECUTION_LIMIT = Number.parseInt(env.MANUAL_EXECUTION_LIMIT) || 999999
|
||||
|
||||
const DEFAULT_RATE_LIMITS = {
|
||||
free: { sync: 50, async: 200, apiEndpoint: 30 },
|
||||
pro: { sync: 150, async: 1000, apiEndpoint: 100 },
|
||||
team: { sync: 300, async: 2500, apiEndpoint: 200 },
|
||||
enterprise: { sync: 600, async: 5000, apiEndpoint: 500 },
|
||||
} as const
|
||||
|
||||
function toConfigKey(type: RateLimitCounterType): RateLimitConfigKey {
|
||||
return type === 'api-endpoint' ? 'apiEndpoint' : type
|
||||
}
|
||||
|
||||
function createBucketConfig(ratePerMinute: number, burstMultiplier = 2): TokenBucketConfig {
|
||||
return {
|
||||
maxTokens: ratePerMinute * burstMultiplier,
|
||||
@@ -26,29 +40,64 @@ function createBucketConfig(ratePerMinute: number, burstMultiplier = 2): TokenBu
|
||||
}
|
||||
}
|
||||
|
||||
function getRateLimitForPlan(plan: SubscriptionPlan, type: RateLimitConfigKey): TokenBucketConfig {
|
||||
const envVarMap: Record<SubscriptionPlan, Record<RateLimitConfigKey, string | undefined>> = {
|
||||
free: {
|
||||
sync: env.RATE_LIMIT_FREE_SYNC,
|
||||
async: env.RATE_LIMIT_FREE_ASYNC,
|
||||
apiEndpoint: undefined,
|
||||
},
|
||||
pro: { sync: env.RATE_LIMIT_PRO_SYNC, async: env.RATE_LIMIT_PRO_ASYNC, apiEndpoint: undefined },
|
||||
team: {
|
||||
sync: env.RATE_LIMIT_TEAM_SYNC,
|
||||
async: env.RATE_LIMIT_TEAM_ASYNC,
|
||||
apiEndpoint: undefined,
|
||||
},
|
||||
enterprise: {
|
||||
sync: env.RATE_LIMIT_ENTERPRISE_SYNC,
|
||||
async: env.RATE_LIMIT_ENTERPRISE_ASYNC,
|
||||
apiEndpoint: undefined,
|
||||
},
|
||||
}
|
||||
|
||||
const rate = Number.parseInt(envVarMap[plan][type] || '') || DEFAULT_RATE_LIMITS[plan][type]
|
||||
return createBucketConfig(rate)
|
||||
}
|
||||
|
||||
export const RATE_LIMITS: Record<SubscriptionPlan, RateLimitConfig> = {
|
||||
free: {
|
||||
sync: createBucketConfig(Number.parseInt(env.RATE_LIMIT_FREE_SYNC) || 50),
|
||||
async: createBucketConfig(Number.parseInt(env.RATE_LIMIT_FREE_ASYNC) || 200),
|
||||
apiEndpoint: createBucketConfig(30),
|
||||
sync: getRateLimitForPlan('free', 'sync'),
|
||||
async: getRateLimitForPlan('free', 'async'),
|
||||
apiEndpoint: getRateLimitForPlan('free', 'apiEndpoint'),
|
||||
},
|
||||
pro: {
|
||||
sync: createBucketConfig(Number.parseInt(env.RATE_LIMIT_PRO_SYNC) || 150),
|
||||
async: createBucketConfig(Number.parseInt(env.RATE_LIMIT_PRO_ASYNC) || 1000),
|
||||
apiEndpoint: createBucketConfig(100),
|
||||
sync: getRateLimitForPlan('pro', 'sync'),
|
||||
async: getRateLimitForPlan('pro', 'async'),
|
||||
apiEndpoint: getRateLimitForPlan('pro', 'apiEndpoint'),
|
||||
},
|
||||
team: {
|
||||
sync: createBucketConfig(Number.parseInt(env.RATE_LIMIT_TEAM_SYNC) || 300),
|
||||
async: createBucketConfig(Number.parseInt(env.RATE_LIMIT_TEAM_ASYNC) || 2500),
|
||||
apiEndpoint: createBucketConfig(200),
|
||||
sync: getRateLimitForPlan('team', 'sync'),
|
||||
async: getRateLimitForPlan('team', 'async'),
|
||||
apiEndpoint: getRateLimitForPlan('team', 'apiEndpoint'),
|
||||
},
|
||||
enterprise: {
|
||||
sync: createBucketConfig(Number.parseInt(env.RATE_LIMIT_ENTERPRISE_SYNC) || 600),
|
||||
async: createBucketConfig(Number.parseInt(env.RATE_LIMIT_ENTERPRISE_ASYNC) || 5000),
|
||||
apiEndpoint: createBucketConfig(500),
|
||||
sync: getRateLimitForPlan('enterprise', 'sync'),
|
||||
async: getRateLimitForPlan('enterprise', 'async'),
|
||||
apiEndpoint: getRateLimitForPlan('enterprise', 'apiEndpoint'),
|
||||
},
|
||||
}
|
||||
|
||||
export function getRateLimit(
|
||||
plan: SubscriptionPlan | undefined,
|
||||
type: RateLimitCounterType
|
||||
): TokenBucketConfig {
|
||||
const key = toConfigKey(type)
|
||||
if (!isBillingEnabled) {
|
||||
return RATE_LIMITS.free[key]
|
||||
}
|
||||
return RATE_LIMITS[plan || 'free'][key]
|
||||
}
|
||||
|
||||
export class RateLimitError extends Error {
|
||||
statusCode: number
|
||||
constructor(message: string, statusCode = 429) {
|
||||
|
||||
@@ -185,6 +185,10 @@ export function formatDuration(
|
||||
const precision = options?.precision ?? 0
|
||||
|
||||
if (ms < 1) {
|
||||
// Zero or near-zero: show "0ms" instead of "0.00ms"
|
||||
if (ms === 0 || ms < 0.005) {
|
||||
return '0ms'
|
||||
}
|
||||
// Sub-millisecond: show with 2 decimal places
|
||||
return `${ms.toFixed(2)}ms`
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ export class McpClient {
|
||||
return result.tools.map((tool: Tool) => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
inputSchema: tool.inputSchema,
|
||||
inputSchema: tool.inputSchema as McpTool['inputSchema'],
|
||||
serverId: this.config.id,
|
||||
serverName: this.config.name,
|
||||
}))
|
||||
|
||||
@@ -57,14 +57,29 @@ export interface McpSecurityPolicy {
|
||||
auditLevel: 'none' | 'basic' | 'detailed'
|
||||
}
|
||||
|
||||
/**
|
||||
* JSON Schema property definition for tool parameters.
|
||||
* Follows JSON Schema specification with description support.
|
||||
*/
|
||||
export interface McpToolSchemaProperty {
|
||||
type: string
|
||||
description?: string
|
||||
items?: McpToolSchemaProperty
|
||||
properties?: Record<string, McpToolSchemaProperty>
|
||||
required?: string[]
|
||||
enum?: Array<string | number | boolean>
|
||||
default?: unknown
|
||||
}
|
||||
|
||||
/**
|
||||
* JSON Schema for tool input parameters.
|
||||
* Aligns with MCP SDK's Tool.inputSchema structure.
|
||||
*/
|
||||
export interface McpToolSchema {
|
||||
type: 'object'
|
||||
properties?: Record<string, unknown>
|
||||
properties?: Record<string, McpToolSchemaProperty>
|
||||
required?: string[]
|
||||
description?: string
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { db, webhook, workflow, workflowDeploymentVersion } from '@sim/db'
|
||||
import { credentialSet, subscription } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { tasks } from '@trigger.dev/sdk'
|
||||
import { and, eq, isNull, or } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { checkEnterprisePlan, checkTeamPlan } from '@/lib/billing/subscriptions/utils'
|
||||
import { isProd, isTriggerDevEnabled } from '@/lib/core/config/feature-flags'
|
||||
import { getJobQueue, shouldExecuteInline } from '@/lib/core/async-jobs'
|
||||
import { isProd } from '@/lib/core/config/feature-flags'
|
||||
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
|
||||
import { preprocessExecution } from '@/lib/execution/preprocessing'
|
||||
import { convertSquareBracketsToTwiML } from '@/lib/webhooks/utils'
|
||||
@@ -1015,18 +1015,39 @@ export async function queueWebhookExecution(
|
||||
...(credentialId ? { credentialId } : {}),
|
||||
}
|
||||
|
||||
if (isTriggerDevEnabled) {
|
||||
const handle = await tasks.trigger('webhook-execution', payload)
|
||||
logger.info(
|
||||
`[${options.requestId}] Queued webhook execution task ${handle.id} for ${foundWebhook.provider} webhook`
|
||||
)
|
||||
} else {
|
||||
void executeWebhookJob(payload).catch((error) => {
|
||||
logger.error(`[${options.requestId}] Direct webhook execution failed`, error)
|
||||
})
|
||||
logger.info(
|
||||
`[${options.requestId}] Queued direct webhook execution for ${foundWebhook.provider} webhook (Trigger.dev disabled)`
|
||||
)
|
||||
const jobQueue = await getJobQueue()
|
||||
const jobId = await jobQueue.enqueue('webhook-execution', payload, {
|
||||
metadata: { workflowId: foundWorkflow.id, userId: foundWorkflow.userId },
|
||||
})
|
||||
logger.info(
|
||||
`[${options.requestId}] Queued webhook execution task ${jobId} for ${foundWebhook.provider} webhook`
|
||||
)
|
||||
|
||||
if (shouldExecuteInline()) {
|
||||
void (async () => {
|
||||
try {
|
||||
await jobQueue.startJob(jobId)
|
||||
const output = await executeWebhookJob(payload)
|
||||
await jobQueue.completeJob(jobId, output)
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
logger.error(`[${options.requestId}] Webhook execution failed`, {
|
||||
jobId,
|
||||
error: errorMessage,
|
||||
})
|
||||
try {
|
||||
await jobQueue.markJobFailed(jobId, errorMessage)
|
||||
} catch (markFailedError) {
|
||||
logger.error(`[${options.requestId}] Failed to mark job as failed`, {
|
||||
jobId,
|
||||
error:
|
||||
markFailedError instanceof Error
|
||||
? markFailedError.message
|
||||
: String(markFailedError),
|
||||
})
|
||||
}
|
||||
}
|
||||
})()
|
||||
}
|
||||
|
||||
if (foundWebhook.provider === 'microsoft-teams') {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { db } from '@sim/db'
|
||||
import { permissions, userStats, workflow as workflowTable } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import { and, asc, eq, inArray, or } from 'drizzle-orm'
|
||||
import { NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { getWorkspaceWithOwner, type PermissionType } from '@/lib/workspaces/permissions/utils'
|
||||
@@ -15,6 +15,53 @@ export async function getWorkflowById(id: string) {
|
||||
return rows[0]
|
||||
}
|
||||
|
||||
export async function resolveWorkflowIdForUser(
|
||||
userId: string,
|
||||
workflowId?: string,
|
||||
workflowName?: string
|
||||
): Promise<{ workflowId: string; workflowName?: string } | null> {
|
||||
if (workflowId) {
|
||||
return { workflowId }
|
||||
}
|
||||
|
||||
const workspaceIds = await db
|
||||
.select({ entityId: permissions.entityId })
|
||||
.from(permissions)
|
||||
.where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace')))
|
||||
|
||||
const workspaceIdList = workspaceIds.map((row) => row.entityId)
|
||||
|
||||
const workflowConditions = [eq(workflowTable.userId, userId)]
|
||||
if (workspaceIdList.length > 0) {
|
||||
workflowConditions.push(inArray(workflowTable.workspaceId, workspaceIdList))
|
||||
}
|
||||
|
||||
const workflows = await db
|
||||
.select()
|
||||
.from(workflowTable)
|
||||
.where(or(...workflowConditions))
|
||||
.orderBy(asc(workflowTable.sortOrder), asc(workflowTable.createdAt), asc(workflowTable.id))
|
||||
|
||||
if (workflows.length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
if (workflowName) {
|
||||
const match = workflows.find(
|
||||
(w) =>
|
||||
String(w.name || '')
|
||||
.trim()
|
||||
.toLowerCase() === workflowName.toLowerCase()
|
||||
)
|
||||
if (match) {
|
||||
return { workflowId: match.id, workflowName: match.name || undefined }
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
return { workflowId: workflows[0].id, workflowName: workflows[0].name || undefined }
|
||||
}
|
||||
|
||||
type WorkflowRecord = ReturnType<typeof getWorkflowById> extends Promise<infer R>
|
||||
? NonNullable<R>
|
||||
: never
|
||||
|
||||
@@ -6,7 +6,6 @@ import { MAX_TOOL_ITERATIONS } from '@/providers'
|
||||
import {
|
||||
checkForForcedToolUsage,
|
||||
createReadableStreamFromAnthropicStream,
|
||||
generateToolUseId,
|
||||
} from '@/providers/anthropic/utils'
|
||||
import {
|
||||
getMaxOutputTokensForModel,
|
||||
@@ -433,11 +432,32 @@ export const anthropicProvider: ProviderConfig = {
|
||||
|
||||
const executionResults = await Promise.allSettled(toolExecutionPromises)
|
||||
|
||||
// Collect all tool_use and tool_result blocks for batching
|
||||
const toolUseBlocks: Array<{
|
||||
type: 'tool_use'
|
||||
id: string
|
||||
name: string
|
||||
input: Record<string, unknown>
|
||||
}> = []
|
||||
const toolResultBlocks: Array<{
|
||||
type: 'tool_result'
|
||||
tool_use_id: string
|
||||
content: string
|
||||
}> = []
|
||||
|
||||
for (const settledResult of executionResults) {
|
||||
if (settledResult.status === 'rejected' || !settledResult.value) continue
|
||||
|
||||
const { toolName, toolArgs, toolParams, result, startTime, endTime, duration } =
|
||||
settledResult.value
|
||||
const {
|
||||
toolUse,
|
||||
toolName,
|
||||
toolArgs,
|
||||
toolParams,
|
||||
result,
|
||||
startTime,
|
||||
endTime,
|
||||
duration,
|
||||
} = settledResult.value
|
||||
|
||||
timeSegments.push({
|
||||
type: 'tool',
|
||||
@@ -447,7 +467,7 @@ export const anthropicProvider: ProviderConfig = {
|
||||
duration: duration,
|
||||
})
|
||||
|
||||
let resultContent: any
|
||||
let resultContent: unknown
|
||||
if (result.success) {
|
||||
toolResults.push(result.output)
|
||||
resultContent = result.output
|
||||
@@ -469,29 +489,34 @@ export const anthropicProvider: ProviderConfig = {
|
||||
success: result.success,
|
||||
})
|
||||
|
||||
const toolUseId = generateToolUseId(toolName)
|
||||
|
||||
currentMessages.push({
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_use',
|
||||
id: toolUseId,
|
||||
name: toolName,
|
||||
input: toolArgs,
|
||||
} as any,
|
||||
],
|
||||
// Add to batched arrays using the ORIGINAL ID from Claude's response
|
||||
toolUseBlocks.push({
|
||||
type: 'tool_use',
|
||||
id: toolUse.id,
|
||||
name: toolName,
|
||||
input: toolArgs,
|
||||
})
|
||||
|
||||
toolResultBlocks.push({
|
||||
type: 'tool_result',
|
||||
tool_use_id: toolUse.id,
|
||||
content: JSON.stringify(resultContent),
|
||||
})
|
||||
}
|
||||
|
||||
// Add ONE assistant message with ALL tool_use blocks
|
||||
if (toolUseBlocks.length > 0) {
|
||||
currentMessages.push({
|
||||
role: 'assistant',
|
||||
content: toolUseBlocks as unknown as Anthropic.Messages.ContentBlock[],
|
||||
})
|
||||
}
|
||||
|
||||
// Add ONE user message with ALL tool_result blocks
|
||||
if (toolResultBlocks.length > 0) {
|
||||
currentMessages.push({
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: toolUseId,
|
||||
content: JSON.stringify(resultContent),
|
||||
} as any,
|
||||
],
|
||||
content: toolResultBlocks as unknown as Anthropic.Messages.ContentBlockParam[],
|
||||
})
|
||||
}
|
||||
|
||||
@@ -777,6 +802,8 @@ export const anthropicProvider: ProviderConfig = {
|
||||
const toolCallStartTime = Date.now()
|
||||
const toolName = toolUse.name
|
||||
const toolArgs = toolUse.input as Record<string, any>
|
||||
// Preserve the original tool_use ID from Claude's response
|
||||
const toolUseId = toolUse.id
|
||||
|
||||
try {
|
||||
const tool = request.tools?.find((t) => t.id === toolName)
|
||||
@@ -787,6 +814,7 @@ export const anthropicProvider: ProviderConfig = {
|
||||
const toolCallEndTime = Date.now()
|
||||
|
||||
return {
|
||||
toolUseId,
|
||||
toolName,
|
||||
toolArgs,
|
||||
toolParams,
|
||||
@@ -800,6 +828,7 @@ export const anthropicProvider: ProviderConfig = {
|
||||
logger.error('Error processing tool call:', { error, toolName })
|
||||
|
||||
return {
|
||||
toolUseId,
|
||||
toolName,
|
||||
toolArgs,
|
||||
toolParams: {},
|
||||
@@ -817,11 +846,32 @@ export const anthropicProvider: ProviderConfig = {
|
||||
|
||||
const executionResults = await Promise.allSettled(toolExecutionPromises)
|
||||
|
||||
// Collect all tool_use and tool_result blocks for batching
|
||||
const toolUseBlocks: Array<{
|
||||
type: 'tool_use'
|
||||
id: string
|
||||
name: string
|
||||
input: Record<string, unknown>
|
||||
}> = []
|
||||
const toolResultBlocks: Array<{
|
||||
type: 'tool_result'
|
||||
tool_use_id: string
|
||||
content: string
|
||||
}> = []
|
||||
|
||||
for (const settledResult of executionResults) {
|
||||
if (settledResult.status === 'rejected' || !settledResult.value) continue
|
||||
|
||||
const { toolName, toolArgs, toolParams, result, startTime, endTime, duration } =
|
||||
settledResult.value
|
||||
const {
|
||||
toolUseId,
|
||||
toolName,
|
||||
toolArgs,
|
||||
toolParams,
|
||||
result,
|
||||
startTime,
|
||||
endTime,
|
||||
duration,
|
||||
} = settledResult.value
|
||||
|
||||
timeSegments.push({
|
||||
type: 'tool',
|
||||
@@ -831,7 +881,7 @@ export const anthropicProvider: ProviderConfig = {
|
||||
duration: duration,
|
||||
})
|
||||
|
||||
let resultContent: any
|
||||
let resultContent: unknown
|
||||
if (result.success) {
|
||||
toolResults.push(result.output)
|
||||
resultContent = result.output
|
||||
@@ -853,29 +903,34 @@ export const anthropicProvider: ProviderConfig = {
|
||||
success: result.success,
|
||||
})
|
||||
|
||||
const toolUseId = generateToolUseId(toolName)
|
||||
|
||||
currentMessages.push({
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_use',
|
||||
id: toolUseId,
|
||||
name: toolName,
|
||||
input: toolArgs,
|
||||
} as any,
|
||||
],
|
||||
// Add to batched arrays using the ORIGINAL ID from Claude's response
|
||||
toolUseBlocks.push({
|
||||
type: 'tool_use',
|
||||
id: toolUseId,
|
||||
name: toolName,
|
||||
input: toolArgs,
|
||||
})
|
||||
|
||||
toolResultBlocks.push({
|
||||
type: 'tool_result',
|
||||
tool_use_id: toolUseId,
|
||||
content: JSON.stringify(resultContent),
|
||||
})
|
||||
}
|
||||
|
||||
// Add ONE assistant message with ALL tool_use blocks
|
||||
if (toolUseBlocks.length > 0) {
|
||||
currentMessages.push({
|
||||
role: 'assistant',
|
||||
content: toolUseBlocks as unknown as Anthropic.Messages.ContentBlock[],
|
||||
})
|
||||
}
|
||||
|
||||
// Add ONE user message with ALL tool_result blocks
|
||||
if (toolResultBlocks.length > 0) {
|
||||
currentMessages.push({
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: toolUseId,
|
||||
content: JSON.stringify(resultContent),
|
||||
} as any,
|
||||
],
|
||||
content: toolResultBlocks as unknown as Anthropic.Messages.ContentBlockParam[],
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1061,7 +1116,7 @@ export const anthropicProvider: ProviderConfig = {
|
||||
startTime: tc.startTime,
|
||||
endTime: tc.endTime,
|
||||
duration: tc.duration,
|
||||
result: tc.result,
|
||||
result: tc.result as Record<string, unknown> | undefined,
|
||||
}))
|
||||
: undefined,
|
||||
toolResults: toolResults.length > 0 ? toolResults : undefined,
|
||||
|
||||
@@ -1,26 +1,9 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { AzureOpenAI } from 'openai'
|
||||
import type { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import type { StreamingExecution } from '@/executor/types'
|
||||
import { MAX_TOOL_ITERATIONS } from '@/providers'
|
||||
import {
|
||||
checkForForcedToolUsage,
|
||||
createReadableStreamFromAzureOpenAIStream,
|
||||
} from '@/providers/azure-openai/utils'
|
||||
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
|
||||
import type {
|
||||
ProviderConfig,
|
||||
ProviderRequest,
|
||||
ProviderResponse,
|
||||
TimeSegment,
|
||||
} from '@/providers/types'
|
||||
import {
|
||||
calculateCost,
|
||||
prepareToolExecution,
|
||||
prepareToolsWithUsageControl,
|
||||
} from '@/providers/utils'
|
||||
import { executeTool } from '@/tools'
|
||||
import { executeResponsesProviderRequest } from '@/providers/openai/core'
|
||||
import type { ProviderConfig, ProviderRequest, ProviderResponse } from '@/providers/types'
|
||||
|
||||
const logger = createLogger('AzureOpenAIProvider')
|
||||
|
||||
@@ -38,16 +21,6 @@ export const azureOpenAIProvider: ProviderConfig = {
|
||||
executeRequest: async (
|
||||
request: ProviderRequest
|
||||
): Promise<ProviderResponse | StreamingExecution> => {
|
||||
logger.info('Preparing Azure OpenAI request', {
|
||||
model: request.model,
|
||||
hasSystemPrompt: !!request.systemPrompt,
|
||||
hasMessages: !!request.messages?.length,
|
||||
hasTools: !!request.tools?.length,
|
||||
toolCount: request.tools?.length || 0,
|
||||
hasResponseFormat: !!request.responseFormat,
|
||||
stream: !!request.stream,
|
||||
})
|
||||
|
||||
const azureEndpoint = request.azureEndpoint || env.AZURE_OPENAI_ENDPOINT
|
||||
const azureApiVersion =
|
||||
request.azureApiVersion || env.AZURE_OPENAI_API_VERSION || '2024-07-01-preview'
|
||||
@@ -58,520 +31,24 @@ export const azureOpenAIProvider: ProviderConfig = {
|
||||
)
|
||||
}
|
||||
|
||||
const azureOpenAI = new AzureOpenAI({
|
||||
apiKey: request.apiKey,
|
||||
apiVersion: azureApiVersion,
|
||||
endpoint: azureEndpoint,
|
||||
})
|
||||
|
||||
const allMessages = []
|
||||
|
||||
if (request.systemPrompt) {
|
||||
allMessages.push({
|
||||
role: 'system',
|
||||
content: request.systemPrompt,
|
||||
})
|
||||
if (!request.apiKey) {
|
||||
throw new Error('API key is required for Azure OpenAI')
|
||||
}
|
||||
|
||||
if (request.context) {
|
||||
allMessages.push({
|
||||
role: 'user',
|
||||
content: request.context,
|
||||
})
|
||||
}
|
||||
|
||||
if (request.messages) {
|
||||
allMessages.push(...request.messages)
|
||||
}
|
||||
|
||||
const tools = request.tools?.length
|
||||
? request.tools.map((tool) => ({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.id,
|
||||
description: tool.description,
|
||||
parameters: tool.parameters,
|
||||
},
|
||||
}))
|
||||
: undefined
|
||||
|
||||
const deploymentName = request.model.replace('azure/', '')
|
||||
const payload: any = {
|
||||
model: deploymentName,
|
||||
messages: allMessages,
|
||||
}
|
||||
const apiUrl = `${azureEndpoint.replace(/\/$/, '')}/openai/v1/responses?api-version=${azureApiVersion}`
|
||||
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
|
||||
|
||||
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
|
||||
if (request.verbosity !== undefined) payload.verbosity = request.verbosity
|
||||
|
||||
if (request.responseFormat) {
|
||||
payload.response_format = {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
name: request.responseFormat.name || 'response_schema',
|
||||
schema: request.responseFormat.schema || request.responseFormat,
|
||||
strict: request.responseFormat.strict !== false,
|
||||
},
|
||||
}
|
||||
|
||||
logger.info('Added JSON schema response format to Azure OpenAI request')
|
||||
}
|
||||
|
||||
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
|
||||
|
||||
if (tools?.length) {
|
||||
preparedTools = prepareToolsWithUsageControl(tools, request.tools, logger, 'azure-openai')
|
||||
const { tools: filteredTools, toolChoice } = preparedTools
|
||||
|
||||
if (filteredTools?.length && toolChoice) {
|
||||
payload.tools = filteredTools
|
||||
payload.tool_choice = toolChoice
|
||||
|
||||
logger.info('Azure OpenAI request configuration:', {
|
||||
toolCount: filteredTools.length,
|
||||
toolChoice:
|
||||
typeof toolChoice === 'string'
|
||||
? toolChoice
|
||||
: toolChoice.type === 'function'
|
||||
? `force:${toolChoice.function.name}`
|
||||
: toolChoice.type === 'tool'
|
||||
? `force:${toolChoice.name}`
|
||||
: toolChoice.type === 'any'
|
||||
? `force:${toolChoice.any?.name || 'unknown'}`
|
||||
: 'unknown',
|
||||
model: deploymentName,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const providerStartTime = Date.now()
|
||||
const providerStartTimeISO = new Date(providerStartTime).toISOString()
|
||||
|
||||
try {
|
||||
if (request.stream && (!tools || tools.length === 0)) {
|
||||
logger.info('Using streaming response for Azure OpenAI request')
|
||||
|
||||
const streamingParams: ChatCompletionCreateParamsStreaming = {
|
||||
...payload,
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
}
|
||||
const streamResponse = await azureOpenAI.chat.completions.create(streamingParams)
|
||||
|
||||
const streamingResult = {
|
||||
stream: createReadableStreamFromAzureOpenAIStream(streamResponse, (content, usage) => {
|
||||
streamingResult.execution.output.content = content
|
||||
streamingResult.execution.output.tokens = {
|
||||
input: usage.prompt_tokens,
|
||||
output: usage.completion_tokens,
|
||||
total: usage.total_tokens,
|
||||
}
|
||||
|
||||
const costResult = calculateCost(
|
||||
request.model,
|
||||
usage.prompt_tokens,
|
||||
usage.completion_tokens
|
||||
)
|
||||
streamingResult.execution.output.cost = {
|
||||
input: costResult.input,
|
||||
output: costResult.output,
|
||||
total: costResult.total,
|
||||
}
|
||||
|
||||
const streamEndTime = Date.now()
|
||||
const streamEndTimeISO = new Date(streamEndTime).toISOString()
|
||||
|
||||
if (streamingResult.execution.output.providerTiming) {
|
||||
streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO
|
||||
streamingResult.execution.output.providerTiming.duration =
|
||||
streamEndTime - providerStartTime
|
||||
|
||||
if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) {
|
||||
streamingResult.execution.output.providerTiming.timeSegments[0].endTime =
|
||||
streamEndTime
|
||||
streamingResult.execution.output.providerTiming.timeSegments[0].duration =
|
||||
streamEndTime - providerStartTime
|
||||
}
|
||||
}
|
||||
}),
|
||||
execution: {
|
||||
success: true,
|
||||
output: {
|
||||
content: '',
|
||||
model: request.model,
|
||||
tokens: { input: 0, output: 0, total: 0 },
|
||||
toolCalls: undefined,
|
||||
providerTiming: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
timeSegments: [
|
||||
{
|
||||
type: 'model',
|
||||
name: 'Streaming response',
|
||||
startTime: providerStartTime,
|
||||
endTime: Date.now(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
},
|
||||
],
|
||||
},
|
||||
cost: { input: 0, output: 0, total: 0 },
|
||||
},
|
||||
logs: [],
|
||||
metadata: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
},
|
||||
},
|
||||
} as StreamingExecution
|
||||
|
||||
return streamingResult as StreamingExecution
|
||||
}
|
||||
|
||||
const initialCallTime = Date.now()
|
||||
const originalToolChoice = payload.tool_choice
|
||||
const forcedTools = preparedTools?.forcedTools || []
|
||||
let usedForcedTools: string[] = []
|
||||
|
||||
let currentResponse = await azureOpenAI.chat.completions.create(payload)
|
||||
const firstResponseTime = Date.now() - initialCallTime
|
||||
|
||||
let content = currentResponse.choices[0]?.message?.content || ''
|
||||
const tokens = {
|
||||
input: currentResponse.usage?.prompt_tokens || 0,
|
||||
output: currentResponse.usage?.completion_tokens || 0,
|
||||
total: currentResponse.usage?.total_tokens || 0,
|
||||
}
|
||||
const toolCalls = []
|
||||
const toolResults = []
|
||||
const currentMessages = [...allMessages]
|
||||
let iterationCount = 0
|
||||
let modelTime = firstResponseTime
|
||||
let toolsTime = 0
|
||||
let hasUsedForcedTool = false
|
||||
|
||||
const timeSegments: TimeSegment[] = [
|
||||
{
|
||||
type: 'model',
|
||||
name: 'Initial response',
|
||||
startTime: initialCallTime,
|
||||
endTime: initialCallTime + firstResponseTime,
|
||||
duration: firstResponseTime,
|
||||
},
|
||||
]
|
||||
|
||||
const firstCheckResult = checkForForcedToolUsage(
|
||||
currentResponse,
|
||||
originalToolChoice,
|
||||
logger,
|
||||
forcedTools,
|
||||
usedForcedTools
|
||||
)
|
||||
hasUsedForcedTool = firstCheckResult.hasUsedForcedTool
|
||||
usedForcedTools = firstCheckResult.usedForcedTools
|
||||
|
||||
while (iterationCount < MAX_TOOL_ITERATIONS) {
|
||||
if (currentResponse.choices[0]?.message?.content) {
|
||||
content = currentResponse.choices[0].message.content
|
||||
}
|
||||
|
||||
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
|
||||
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
|
||||
break
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})`
|
||||
)
|
||||
|
||||
const toolsStartTime = Date.now()
|
||||
|
||||
const toolExecutionPromises = toolCallsInResponse.map(async (toolCall) => {
|
||||
const toolCallStartTime = Date.now()
|
||||
const toolName = toolCall.function.name
|
||||
|
||||
try {
|
||||
const toolArgs = JSON.parse(toolCall.function.arguments)
|
||||
const tool = request.tools?.find((t) => t.id === toolName)
|
||||
|
||||
if (!tool) return null
|
||||
|
||||
const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request)
|
||||
const result = await executeTool(toolName, executionParams)
|
||||
const toolCallEndTime = Date.now()
|
||||
|
||||
return {
|
||||
toolCall,
|
||||
toolName,
|
||||
toolParams,
|
||||
result,
|
||||
startTime: toolCallStartTime,
|
||||
endTime: toolCallEndTime,
|
||||
duration: toolCallEndTime - toolCallStartTime,
|
||||
}
|
||||
} catch (error) {
|
||||
const toolCallEndTime = Date.now()
|
||||
logger.error('Error processing tool call:', { error, toolName })
|
||||
|
||||
return {
|
||||
toolCall,
|
||||
toolName,
|
||||
toolParams: {},
|
||||
result: {
|
||||
success: false,
|
||||
output: undefined,
|
||||
error: error instanceof Error ? error.message : 'Tool execution failed',
|
||||
},
|
||||
startTime: toolCallStartTime,
|
||||
endTime: toolCallEndTime,
|
||||
duration: toolCallEndTime - toolCallStartTime,
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const executionResults = await Promise.allSettled(toolExecutionPromises)
|
||||
|
||||
currentMessages.push({
|
||||
role: 'assistant',
|
||||
content: null,
|
||||
tool_calls: toolCallsInResponse.map((tc) => ({
|
||||
id: tc.id,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tc.function.name,
|
||||
arguments: tc.function.arguments,
|
||||
},
|
||||
})),
|
||||
})
|
||||
|
||||
for (const settledResult of executionResults) {
|
||||
if (settledResult.status === 'rejected' || !settledResult.value) continue
|
||||
|
||||
const { toolCall, toolName, toolParams, result, startTime, endTime, duration } =
|
||||
settledResult.value
|
||||
|
||||
timeSegments.push({
|
||||
type: 'tool',
|
||||
name: toolName,
|
||||
startTime: startTime,
|
||||
endTime: endTime,
|
||||
duration: duration,
|
||||
})
|
||||
|
||||
let resultContent: any
|
||||
if (result.success) {
|
||||
toolResults.push(result.output)
|
||||
resultContent = result.output
|
||||
} else {
|
||||
resultContent = {
|
||||
error: true,
|
||||
message: result.error || 'Tool execution failed',
|
||||
tool: toolName,
|
||||
}
|
||||
}
|
||||
|
||||
toolCalls.push({
|
||||
name: toolName,
|
||||
arguments: toolParams,
|
||||
startTime: new Date(startTime).toISOString(),
|
||||
endTime: new Date(endTime).toISOString(),
|
||||
duration: duration,
|
||||
result: resultContent,
|
||||
success: result.success,
|
||||
})
|
||||
|
||||
currentMessages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: toolCall.id,
|
||||
content: JSON.stringify(resultContent),
|
||||
})
|
||||
}
|
||||
|
||||
const thisToolsTime = Date.now() - toolsStartTime
|
||||
toolsTime += thisToolsTime
|
||||
|
||||
const nextPayload = {
|
||||
...payload,
|
||||
messages: currentMessages,
|
||||
}
|
||||
|
||||
if (typeof originalToolChoice === 'object' && hasUsedForcedTool && forcedTools.length > 0) {
|
||||
const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool))
|
||||
|
||||
if (remainingTools.length > 0) {
|
||||
nextPayload.tool_choice = {
|
||||
type: 'function',
|
||||
function: { name: remainingTools[0] },
|
||||
}
|
||||
logger.info(`Forcing next tool: ${remainingTools[0]}`)
|
||||
} else {
|
||||
nextPayload.tool_choice = 'auto'
|
||||
logger.info('All forced tools have been used, switching to auto tool_choice')
|
||||
}
|
||||
}
|
||||
|
||||
const nextModelStartTime = Date.now()
|
||||
currentResponse = await azureOpenAI.chat.completions.create(nextPayload)
|
||||
|
||||
const nextCheckResult = checkForForcedToolUsage(
|
||||
currentResponse,
|
||||
nextPayload.tool_choice,
|
||||
logger,
|
||||
forcedTools,
|
||||
usedForcedTools
|
||||
)
|
||||
hasUsedForcedTool = nextCheckResult.hasUsedForcedTool
|
||||
usedForcedTools = nextCheckResult.usedForcedTools
|
||||
|
||||
const nextModelEndTime = Date.now()
|
||||
const thisModelTime = nextModelEndTime - nextModelStartTime
|
||||
|
||||
timeSegments.push({
|
||||
type: 'model',
|
||||
name: `Model response (iteration ${iterationCount + 1})`,
|
||||
startTime: nextModelStartTime,
|
||||
endTime: nextModelEndTime,
|
||||
duration: thisModelTime,
|
||||
})
|
||||
|
||||
modelTime += thisModelTime
|
||||
|
||||
if (currentResponse.choices[0]?.message?.content) {
|
||||
content = currentResponse.choices[0].message.content
|
||||
}
|
||||
|
||||
if (currentResponse.usage) {
|
||||
tokens.input += currentResponse.usage.prompt_tokens || 0
|
||||
tokens.output += currentResponse.usage.completion_tokens || 0
|
||||
tokens.total += currentResponse.usage.total_tokens || 0
|
||||
}
|
||||
|
||||
iterationCount++
|
||||
}
|
||||
|
||||
if (request.stream) {
|
||||
logger.info('Using streaming for final response after tool processing')
|
||||
|
||||
const accumulatedCost = calculateCost(request.model, tokens.input, tokens.output)
|
||||
|
||||
const streamingParams: ChatCompletionCreateParamsStreaming = {
|
||||
...payload,
|
||||
messages: currentMessages,
|
||||
tool_choice: 'auto',
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
}
|
||||
const streamResponse = await azureOpenAI.chat.completions.create(streamingParams)
|
||||
|
||||
const streamingResult = {
|
||||
stream: createReadableStreamFromAzureOpenAIStream(streamResponse, (content, usage) => {
|
||||
streamingResult.execution.output.content = content
|
||||
streamingResult.execution.output.tokens = {
|
||||
input: tokens.input + usage.prompt_tokens,
|
||||
output: tokens.output + usage.completion_tokens,
|
||||
total: tokens.total + usage.total_tokens,
|
||||
}
|
||||
|
||||
const streamCost = calculateCost(
|
||||
request.model,
|
||||
usage.prompt_tokens,
|
||||
usage.completion_tokens
|
||||
)
|
||||
streamingResult.execution.output.cost = {
|
||||
input: accumulatedCost.input + streamCost.input,
|
||||
output: accumulatedCost.output + streamCost.output,
|
||||
total: accumulatedCost.total + streamCost.total,
|
||||
}
|
||||
}),
|
||||
execution: {
|
||||
success: true,
|
||||
output: {
|
||||
content: '',
|
||||
model: request.model,
|
||||
tokens: {
|
||||
input: tokens.input,
|
||||
output: tokens.output,
|
||||
total: tokens.total,
|
||||
},
|
||||
toolCalls:
|
||||
toolCalls.length > 0
|
||||
? {
|
||||
list: toolCalls,
|
||||
count: toolCalls.length,
|
||||
}
|
||||
: undefined,
|
||||
providerTiming: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
modelTime: modelTime,
|
||||
toolsTime: toolsTime,
|
||||
firstResponseTime: firstResponseTime,
|
||||
iterations: iterationCount + 1,
|
||||
timeSegments: timeSegments,
|
||||
},
|
||||
cost: {
|
||||
input: accumulatedCost.input,
|
||||
output: accumulatedCost.output,
|
||||
total: accumulatedCost.total,
|
||||
},
|
||||
},
|
||||
logs: [],
|
||||
metadata: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
},
|
||||
},
|
||||
} as StreamingExecution
|
||||
|
||||
return streamingResult as StreamingExecution
|
||||
}
|
||||
|
||||
const providerEndTime = Date.now()
|
||||
const providerEndTimeISO = new Date(providerEndTime).toISOString()
|
||||
const totalDuration = providerEndTime - providerStartTime
|
||||
|
||||
return {
|
||||
content,
|
||||
model: request.model,
|
||||
tokens,
|
||||
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
||||
toolResults: toolResults.length > 0 ? toolResults : undefined,
|
||||
timing: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: providerEndTimeISO,
|
||||
duration: totalDuration,
|
||||
modelTime: modelTime,
|
||||
toolsTime: toolsTime,
|
||||
firstResponseTime: firstResponseTime,
|
||||
iterations: iterationCount + 1,
|
||||
timeSegments: timeSegments,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
const providerEndTime = Date.now()
|
||||
const providerEndTimeISO = new Date(providerEndTime).toISOString()
|
||||
const totalDuration = providerEndTime - providerStartTime
|
||||
|
||||
logger.error('Error in Azure OpenAI request:', {
|
||||
error,
|
||||
duration: totalDuration,
|
||||
})
|
||||
|
||||
const enhancedError = new Error(error instanceof Error ? error.message : String(error))
|
||||
// @ts-ignore
|
||||
enhancedError.timing = {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: providerEndTimeISO,
|
||||
duration: totalDuration,
|
||||
}
|
||||
|
||||
throw enhancedError
|
||||
}
|
||||
return executeResponsesProviderRequest(request, {
|
||||
providerId: 'azure-openai',
|
||||
providerLabel: 'Azure OpenAI',
|
||||
modelName: deploymentName,
|
||||
endpoint: apiUrl,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'OpenAI-Beta': 'responses=v1',
|
||||
'api-key': request.apiKey,
|
||||
},
|
||||
logger,
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
import type { Logger } from '@sim/logger'
|
||||
import type { ChatCompletionChunk } from 'openai/resources/chat/completions'
|
||||
import type { CompletionUsage } from 'openai/resources/completions'
|
||||
import type { Stream } from 'openai/streaming'
|
||||
import { checkForForcedToolUsageOpenAI, createOpenAICompatibleStream } from '@/providers/utils'
|
||||
|
||||
/**
|
||||
* Creates a ReadableStream from an Azure OpenAI streaming response.
|
||||
* Uses the shared OpenAI-compatible streaming utility.
|
||||
*/
|
||||
export function createReadableStreamFromAzureOpenAIStream(
|
||||
azureOpenAIStream: Stream<ChatCompletionChunk>,
|
||||
onComplete?: (content: string, usage: CompletionUsage) => void
|
||||
): ReadableStream {
|
||||
return createOpenAICompatibleStream(azureOpenAIStream, 'Azure OpenAI', onComplete)
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a forced tool was used in an Azure OpenAI response.
|
||||
* Uses the shared OpenAI-compatible forced tool usage helper.
|
||||
*/
|
||||
export function checkForForcedToolUsage(
|
||||
response: any,
|
||||
toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any },
|
||||
_logger: Logger,
|
||||
forcedTools: string[],
|
||||
usedForcedTools: string[]
|
||||
): { hasUsedForcedTool: boolean; usedForcedTools: string[] } {
|
||||
return checkForForcedToolUsageOpenAI(
|
||||
response,
|
||||
toolChoice,
|
||||
'Azure OpenAI',
|
||||
forcedTools,
|
||||
usedForcedTools,
|
||||
_logger
|
||||
)
|
||||
}
|
||||
@@ -67,8 +67,17 @@ export function checkForForcedToolUsage(
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a unique tool use ID for Bedrock.
|
||||
* AWS Bedrock requires toolUseId to be 1-64 characters, pattern [a-zA-Z0-9_-]+
|
||||
*/
|
||||
export function generateToolUseId(toolName: string): string {
|
||||
return `${toolName}-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`
|
||||
const timestamp = Date.now().toString(36) // Base36 timestamp (9 chars)
|
||||
const random = Math.random().toString(36).substring(2, 7) // 5 random chars
|
||||
const suffix = `-${timestamp}-${random}` // ~15 chars
|
||||
const maxNameLength = 64 - suffix.length
|
||||
const truncatedName = toolName.substring(0, maxNameLength).replace(/[^a-zA-Z0-9_-]/g, '_')
|
||||
return `${truncatedName}${suffix}`
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -76,7 +76,7 @@ export const deepseekProvider: ProviderConfig = {
|
||||
: undefined
|
||||
|
||||
const payload: any = {
|
||||
model: 'deepseek-chat',
|
||||
model: request.model,
|
||||
messages: allMessages,
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ import {
|
||||
convertUsageMetadata,
|
||||
createReadableStreamFromGeminiStream,
|
||||
ensureStructResponse,
|
||||
extractFunctionCallPart,
|
||||
extractAllFunctionCallParts,
|
||||
extractTextContent,
|
||||
mapToThinkingLevel,
|
||||
} from '@/providers/google/utils'
|
||||
@@ -32,7 +32,7 @@ import {
|
||||
prepareToolsWithUsageControl,
|
||||
} from '@/providers/utils'
|
||||
import { executeTool } from '@/tools'
|
||||
import type { ExecutionState, GeminiProviderType, GeminiUsage, ParsedFunctionCall } from './types'
|
||||
import type { ExecutionState, GeminiProviderType, GeminiUsage } from './types'
|
||||
|
||||
/**
|
||||
* Creates initial execution state
|
||||
@@ -79,101 +79,168 @@ function createInitialState(
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a tool call and updates state
|
||||
* Executes multiple tool calls in parallel and updates state.
|
||||
* Per Gemini docs, all function calls from a single response should be executed
|
||||
* together, with one model message containing all function calls and one user
|
||||
* message containing all function responses.
|
||||
*/
|
||||
async function executeToolCall(
|
||||
functionCallPart: Part,
|
||||
functionCall: ParsedFunctionCall,
|
||||
async function executeToolCallsBatch(
|
||||
functionCallParts: Part[],
|
||||
request: ProviderRequest,
|
||||
state: ExecutionState,
|
||||
forcedTools: string[],
|
||||
logger: ReturnType<typeof createLogger>
|
||||
): Promise<{ success: boolean; state: ExecutionState }> {
|
||||
const toolCallStartTime = Date.now()
|
||||
const toolName = functionCall.name
|
||||
|
||||
const tool = request.tools?.find((t) => t.id === toolName)
|
||||
if (!tool) {
|
||||
logger.warn(`Tool ${toolName} not found in registry, skipping`)
|
||||
if (functionCallParts.length === 0) {
|
||||
return { success: false, state }
|
||||
}
|
||||
|
||||
try {
|
||||
const { toolParams, executionParams } = prepareToolExecution(tool, functionCall.args, request)
|
||||
const result = await executeTool(toolName, executionParams)
|
||||
const toolCallEndTime = Date.now()
|
||||
const duration = toolCallEndTime - toolCallStartTime
|
||||
const executionPromises = functionCallParts.map(async (part) => {
|
||||
const toolCallStartTime = Date.now()
|
||||
const functionCall = part.functionCall!
|
||||
const toolName = functionCall.name ?? ''
|
||||
const args = (functionCall.args ?? {}) as Record<string, unknown>
|
||||
|
||||
const resultContent: Record<string, unknown> = result.success
|
||||
? ensureStructResponse(result.output)
|
||||
: { error: true, message: result.error || 'Tool execution failed', tool: toolName }
|
||||
|
||||
const toolCall: FunctionCallResponse = {
|
||||
name: toolName,
|
||||
arguments: toolParams,
|
||||
startTime: new Date(toolCallStartTime).toISOString(),
|
||||
endTime: new Date(toolCallEndTime).toISOString(),
|
||||
duration,
|
||||
result: resultContent,
|
||||
const tool = request.tools?.find((t) => t.id === toolName)
|
||||
if (!tool) {
|
||||
logger.warn(`Tool ${toolName} not found in registry, skipping`)
|
||||
return {
|
||||
success: false,
|
||||
part,
|
||||
toolName,
|
||||
args,
|
||||
resultContent: { error: true, message: `Tool ${toolName} not found`, tool: toolName },
|
||||
toolParams: {},
|
||||
startTime: toolCallStartTime,
|
||||
endTime: Date.now(),
|
||||
duration: Date.now() - toolCallStartTime,
|
||||
}
|
||||
}
|
||||
|
||||
const updatedContents: Content[] = [
|
||||
...state.contents,
|
||||
{
|
||||
role: 'model',
|
||||
parts: [functionCallPart],
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
name: functionCall.name,
|
||||
response: resultContent,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
try {
|
||||
const { toolParams, executionParams } = prepareToolExecution(tool, args, request)
|
||||
const result = await executeTool(toolName, executionParams)
|
||||
const toolCallEndTime = Date.now()
|
||||
const duration = toolCallEndTime - toolCallStartTime
|
||||
|
||||
const forcedToolCheck = checkForForcedToolUsage(
|
||||
[{ name: functionCall.name, args: functionCall.args }],
|
||||
state.currentToolConfig,
|
||||
forcedTools,
|
||||
state.usedForcedTools
|
||||
)
|
||||
const resultContent: Record<string, unknown> = result.success
|
||||
? ensureStructResponse(result.output)
|
||||
: { error: true, message: result.error || 'Tool execution failed', tool: toolName }
|
||||
|
||||
return {
|
||||
success: true,
|
||||
state: {
|
||||
...state,
|
||||
contents: updatedContents,
|
||||
toolCalls: [...state.toolCalls, toolCall],
|
||||
toolResults: result.success
|
||||
? [...state.toolResults, result.output as Record<string, unknown>]
|
||||
: state.toolResults,
|
||||
toolsTime: state.toolsTime + duration,
|
||||
timeSegments: [
|
||||
...state.timeSegments,
|
||||
{
|
||||
type: 'tool',
|
||||
name: toolName,
|
||||
startTime: toolCallStartTime,
|
||||
endTime: toolCallEndTime,
|
||||
duration,
|
||||
},
|
||||
],
|
||||
usedForcedTools: forcedToolCheck?.usedForcedTools ?? state.usedForcedTools,
|
||||
currentToolConfig: forcedToolCheck?.nextToolConfig ?? state.currentToolConfig,
|
||||
},
|
||||
return {
|
||||
success: result.success,
|
||||
part,
|
||||
toolName,
|
||||
args,
|
||||
resultContent,
|
||||
toolParams,
|
||||
result,
|
||||
startTime: toolCallStartTime,
|
||||
endTime: toolCallEndTime,
|
||||
duration,
|
||||
}
|
||||
} catch (error) {
|
||||
const toolCallEndTime = Date.now()
|
||||
logger.error('Error processing function call:', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
functionName: toolName,
|
||||
})
|
||||
return {
|
||||
success: false,
|
||||
part,
|
||||
toolName,
|
||||
args,
|
||||
resultContent: {
|
||||
error: true,
|
||||
message: error instanceof Error ? error.message : 'Tool execution failed',
|
||||
tool: toolName,
|
||||
},
|
||||
toolParams: {},
|
||||
startTime: toolCallStartTime,
|
||||
endTime: toolCallEndTime,
|
||||
duration: toolCallEndTime - toolCallStartTime,
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error processing function call:', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
functionName: toolName,
|
||||
})
|
||||
})
|
||||
|
||||
const results = await Promise.all(executionPromises)
|
||||
|
||||
// Check if at least one tool was found (not all failed due to missing tools)
|
||||
const hasValidResults = results.some((r) => r.result !== undefined)
|
||||
if (!hasValidResults && results.every((r) => !r.success)) {
|
||||
return { success: false, state }
|
||||
}
|
||||
|
||||
// Build batched messages per Gemini spec:
|
||||
// ONE model message with ALL function call parts
|
||||
// ONE user message with ALL function responses
|
||||
const modelParts: Part[] = results.map((r) => r.part)
|
||||
const userParts: Part[] = results.map((r) => ({
|
||||
functionResponse: {
|
||||
name: r.toolName,
|
||||
response: r.resultContent,
|
||||
},
|
||||
}))
|
||||
|
||||
const updatedContents: Content[] = [
|
||||
...state.contents,
|
||||
{ role: 'model', parts: modelParts },
|
||||
{ role: 'user', parts: userParts },
|
||||
]
|
||||
|
||||
// Collect all tool calls and results
|
||||
const newToolCalls: FunctionCallResponse[] = []
|
||||
const newToolResults: Record<string, unknown>[] = []
|
||||
const newTimeSegments: ExecutionState['timeSegments'] = []
|
||||
let totalToolsTime = 0
|
||||
|
||||
for (const r of results) {
|
||||
newToolCalls.push({
|
||||
name: r.toolName,
|
||||
arguments: r.toolParams,
|
||||
startTime: new Date(r.startTime).toISOString(),
|
||||
endTime: new Date(r.endTime).toISOString(),
|
||||
duration: r.duration,
|
||||
result: r.resultContent,
|
||||
})
|
||||
|
||||
if (r.success && r.result?.output) {
|
||||
newToolResults.push(r.result.output as Record<string, unknown>)
|
||||
}
|
||||
|
||||
newTimeSegments.push({
|
||||
type: 'tool',
|
||||
name: r.toolName,
|
||||
startTime: r.startTime,
|
||||
endTime: r.endTime,
|
||||
duration: r.duration,
|
||||
})
|
||||
|
||||
totalToolsTime += r.duration
|
||||
}
|
||||
|
||||
// Check forced tool usage for all executed tools
|
||||
const executedToolsInfo = results.map((r) => ({ name: r.toolName, args: r.args }))
|
||||
const forcedToolCheck = checkForForcedToolUsage(
|
||||
executedToolsInfo,
|
||||
state.currentToolConfig,
|
||||
forcedTools,
|
||||
state.usedForcedTools
|
||||
)
|
||||
|
||||
return {
|
||||
success: true,
|
||||
state: {
|
||||
...state,
|
||||
contents: updatedContents,
|
||||
toolCalls: [...state.toolCalls, ...newToolCalls],
|
||||
toolResults: [...state.toolResults, ...newToolResults],
|
||||
toolsTime: state.toolsTime + totalToolsTime,
|
||||
timeSegments: [...state.timeSegments, ...newTimeSegments],
|
||||
usedForcedTools: forcedToolCheck?.usedForcedTools ?? state.usedForcedTools,
|
||||
currentToolConfig: forcedToolCheck?.nextToolConfig ?? state.currentToolConfig,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -506,27 +573,25 @@ export async function executeGeminiRequest(
|
||||
// Tool execution loop
|
||||
const functionCalls = response.functionCalls
|
||||
if (functionCalls?.length) {
|
||||
logger.info(`Received function call from Gemini: ${functionCalls[0].name}`)
|
||||
const functionNames = functionCalls.map((fc) => fc.name).join(', ')
|
||||
logger.info(`Received ${functionCalls.length} function call(s) from Gemini: ${functionNames}`)
|
||||
|
||||
while (state.iterationCount < MAX_TOOL_ITERATIONS) {
|
||||
const functionCallPart = extractFunctionCallPart(currentResponse.candidates?.[0])
|
||||
if (!functionCallPart?.functionCall) {
|
||||
// Extract ALL function call parts from the response (Gemini can return multiple)
|
||||
const functionCallParts = extractAllFunctionCallParts(currentResponse.candidates?.[0])
|
||||
if (functionCallParts.length === 0) {
|
||||
content = extractTextContent(currentResponse.candidates?.[0])
|
||||
break
|
||||
}
|
||||
|
||||
const functionCall: ParsedFunctionCall = {
|
||||
name: functionCallPart.functionCall.name ?? '',
|
||||
args: (functionCallPart.functionCall.args ?? {}) as Record<string, unknown>,
|
||||
}
|
||||
|
||||
const callNames = functionCallParts.map((p) => p.functionCall?.name ?? 'unknown').join(', ')
|
||||
logger.info(
|
||||
`Processing function call: ${functionCall.name} (iteration ${state.iterationCount + 1})`
|
||||
`Processing ${functionCallParts.length} function call(s): ${callNames} (iteration ${state.iterationCount + 1})`
|
||||
)
|
||||
|
||||
const { success, state: updatedState } = await executeToolCall(
|
||||
functionCallPart,
|
||||
functionCall,
|
||||
// Execute ALL function calls in this batch
|
||||
const { success, state: updatedState } = await executeToolCallsBatch(
|
||||
functionCallParts,
|
||||
request,
|
||||
state,
|
||||
forcedTools,
|
||||
|
||||
@@ -109,6 +109,7 @@ export function extractFunctionCall(candidate: Candidate | undefined): ParsedFun
|
||||
|
||||
/**
|
||||
* Extracts the full Part containing the function call (preserves thoughtSignature)
|
||||
* @deprecated Use extractAllFunctionCallParts for proper multi-tool handling
|
||||
*/
|
||||
export function extractFunctionCallPart(candidate: Candidate | undefined): Part | null {
|
||||
if (!candidate?.content?.parts) return null
|
||||
@@ -122,6 +123,17 @@ export function extractFunctionCallPart(candidate: Candidate | undefined): Part
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts ALL Parts containing function calls from a candidate.
|
||||
* Gemini can return multiple function calls in a single response,
|
||||
* and all should be executed before continuing the conversation.
|
||||
*/
|
||||
export function extractAllFunctionCallParts(candidate: Candidate | undefined): Part[] {
|
||||
if (!candidate?.content?.parts) return []
|
||||
|
||||
return candidate.content.parts.filter((part) => part.functionCall)
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts usage metadata from SDK response to our format.
|
||||
* Per Gemini docs, total = promptTokenCount + candidatesTokenCount + toolUsePromptTokenCount + thoughtsTokenCount
|
||||
|
||||
@@ -320,6 +320,7 @@ export const groqProvider: ProviderConfig = {
|
||||
currentMessages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: toolCall.id,
|
||||
name: toolName,
|
||||
content: JSON.stringify(resultContent),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -383,6 +383,7 @@ export const mistralProvider: ProviderConfig = {
|
||||
currentMessages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: toolCall.id,
|
||||
name: toolName,
|
||||
content: JSON.stringify(resultContent),
|
||||
})
|
||||
}
|
||||
|
||||
807
apps/sim/providers/openai/core.ts
Normal file
807
apps/sim/providers/openai/core.ts
Normal file
@@ -0,0 +1,807 @@
|
||||
import type { Logger } from '@sim/logger'
|
||||
import type { StreamingExecution } from '@/executor/types'
|
||||
import { MAX_TOOL_ITERATIONS } from '@/providers'
|
||||
import type { Message, ProviderRequest, ProviderResponse, TimeSegment } from '@/providers/types'
|
||||
import {
|
||||
calculateCost,
|
||||
prepareToolExecution,
|
||||
prepareToolsWithUsageControl,
|
||||
trackForcedToolUsage,
|
||||
} from '@/providers/utils'
|
||||
import { executeTool } from '@/tools'
|
||||
import {
|
||||
buildResponsesInputFromMessages,
|
||||
convertResponseOutputToInputItems,
|
||||
convertToolsToResponses,
|
||||
createReadableStreamFromResponses,
|
||||
extractResponseText,
|
||||
extractResponseToolCalls,
|
||||
parseResponsesUsage,
|
||||
type ResponsesInputItem,
|
||||
type ResponsesToolCall,
|
||||
toResponsesToolChoice,
|
||||
} from './utils'
|
||||
|
||||
type PreparedTools = ReturnType<typeof prepareToolsWithUsageControl>
|
||||
type ToolChoice = PreparedTools['toolChoice']
|
||||
|
||||
/**
|
||||
* Recursively enforces OpenAI strict mode requirements on a JSON schema.
|
||||
* - Sets additionalProperties: false on all object types.
|
||||
* - Ensures required includes ALL property keys.
|
||||
*/
|
||||
function enforceStrictSchema(schema: any): any {
|
||||
if (!schema || typeof schema !== 'object') return schema
|
||||
|
||||
const result = { ...schema }
|
||||
|
||||
// If this is an object type, enforce strict requirements
|
||||
if (result.type === 'object') {
|
||||
result.additionalProperties = false
|
||||
|
||||
// Recursively process properties and ensure required includes all keys
|
||||
if (result.properties && typeof result.properties === 'object') {
|
||||
const propKeys = Object.keys(result.properties)
|
||||
result.required = propKeys // Strict mode requires ALL properties
|
||||
result.properties = Object.fromEntries(
|
||||
Object.entries(result.properties).map(([key, value]) => [key, enforceStrictSchema(value)])
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle array items
|
||||
if (result.type === 'array' && result.items) {
|
||||
result.items = enforceStrictSchema(result.items)
|
||||
}
|
||||
|
||||
// Handle anyOf, oneOf, allOf
|
||||
for (const keyword of ['anyOf', 'oneOf', 'allOf']) {
|
||||
if (Array.isArray(result[keyword])) {
|
||||
result[keyword] = result[keyword].map(enforceStrictSchema)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle $defs / definitions
|
||||
for (const defKey of ['$defs', 'definitions']) {
|
||||
if (result[defKey] && typeof result[defKey] === 'object') {
|
||||
result[defKey] = Object.fromEntries(
|
||||
Object.entries(result[defKey]).map(([key, value]) => [key, enforceStrictSchema(value)])
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
export interface ResponsesProviderConfig {
|
||||
providerId: string
|
||||
providerLabel: string
|
||||
modelName: string
|
||||
endpoint: string
|
||||
headers: Record<string, string>
|
||||
logger: Logger
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a Responses API request with tool-loop handling and streaming support.
|
||||
*/
|
||||
export async function executeResponsesProviderRequest(
|
||||
request: ProviderRequest,
|
||||
config: ResponsesProviderConfig
|
||||
): Promise<ProviderResponse | StreamingExecution> {
|
||||
const { logger } = config
|
||||
|
||||
logger.info(`Preparing ${config.providerLabel} request`, {
|
||||
model: request.model,
|
||||
hasSystemPrompt: !!request.systemPrompt,
|
||||
hasMessages: !!request.messages?.length,
|
||||
hasTools: !!request.tools?.length,
|
||||
toolCount: request.tools?.length || 0,
|
||||
hasResponseFormat: !!request.responseFormat,
|
||||
stream: !!request.stream,
|
||||
})
|
||||
|
||||
const allMessages: Message[] = []
|
||||
|
||||
if (request.systemPrompt) {
|
||||
allMessages.push({
|
||||
role: 'system',
|
||||
content: request.systemPrompt,
|
||||
})
|
||||
}
|
||||
|
||||
if (request.context) {
|
||||
allMessages.push({
|
||||
role: 'user',
|
||||
content: request.context,
|
||||
})
|
||||
}
|
||||
|
||||
if (request.messages) {
|
||||
allMessages.push(...request.messages)
|
||||
}
|
||||
|
||||
const initialInput = buildResponsesInputFromMessages(allMessages)
|
||||
|
||||
const basePayload: Record<string, any> = {
|
||||
model: config.modelName,
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) basePayload.temperature = request.temperature
|
||||
if (request.maxTokens != null) basePayload.max_output_tokens = request.maxTokens
|
||||
|
||||
if (request.reasoningEffort !== undefined) {
|
||||
basePayload.reasoning = {
|
||||
effort: request.reasoningEffort,
|
||||
summary: 'auto',
|
||||
}
|
||||
}
|
||||
|
||||
if (request.verbosity !== undefined) {
|
||||
basePayload.text = {
|
||||
...(basePayload.text ?? {}),
|
||||
verbosity: request.verbosity,
|
||||
}
|
||||
}
|
||||
|
||||
// Store response format config - for Azure with tools, we defer applying it until after tool calls complete
|
||||
let deferredTextFormat: { type: string; name: string; schema: any; strict: boolean } | undefined
|
||||
const hasTools = !!request.tools?.length
|
||||
const isAzure = config.providerId === 'azure-openai'
|
||||
|
||||
if (request.responseFormat) {
|
||||
const isStrict = request.responseFormat.strict !== false
|
||||
const rawSchema = request.responseFormat.schema || request.responseFormat
|
||||
// OpenAI strict mode requires additionalProperties: false on ALL nested objects
|
||||
const cleanedSchema = isStrict ? enforceStrictSchema(rawSchema) : rawSchema
|
||||
|
||||
const textFormat = {
|
||||
type: 'json_schema' as const,
|
||||
name: request.responseFormat.name || 'response_schema',
|
||||
schema: cleanedSchema,
|
||||
strict: isStrict,
|
||||
}
|
||||
|
||||
// Azure OpenAI has issues combining tools + response_format in the same request
|
||||
// Defer the format until after tool calls complete for Azure
|
||||
if (isAzure && hasTools) {
|
||||
deferredTextFormat = textFormat
|
||||
logger.info(
|
||||
`Deferring JSON schema response format for ${config.providerLabel} (will apply after tool calls complete)`
|
||||
)
|
||||
} else {
|
||||
basePayload.text = {
|
||||
...(basePayload.text ?? {}),
|
||||
format: textFormat,
|
||||
}
|
||||
logger.info(`Added JSON schema response format to ${config.providerLabel} request`)
|
||||
}
|
||||
}
|
||||
|
||||
const tools = request.tools?.length
|
||||
? request.tools.map((tool) => ({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.id,
|
||||
description: tool.description,
|
||||
parameters: tool.parameters,
|
||||
},
|
||||
}))
|
||||
: undefined
|
||||
|
||||
let preparedTools: PreparedTools | null = null
|
||||
let responsesToolChoice: ReturnType<typeof toResponsesToolChoice> | undefined
|
||||
let trackingToolChoice: ToolChoice | undefined
|
||||
|
||||
if (tools?.length) {
|
||||
preparedTools = prepareToolsWithUsageControl(tools, request.tools, logger, config.providerId)
|
||||
const { tools: filteredTools, toolChoice } = preparedTools
|
||||
trackingToolChoice = toolChoice
|
||||
|
||||
if (filteredTools?.length) {
|
||||
const convertedTools = convertToolsToResponses(filteredTools)
|
||||
if (!convertedTools.length) {
|
||||
throw new Error('All tools have empty names')
|
||||
}
|
||||
|
||||
basePayload.tools = convertedTools
|
||||
basePayload.parallel_tool_calls = true
|
||||
}
|
||||
|
||||
if (toolChoice) {
|
||||
responsesToolChoice = toResponsesToolChoice(toolChoice)
|
||||
if (responsesToolChoice) {
|
||||
basePayload.tool_choice = responsesToolChoice
|
||||
}
|
||||
|
||||
logger.info(`${config.providerLabel} request configuration:`, {
|
||||
toolCount: filteredTools?.length || 0,
|
||||
toolChoice:
|
||||
typeof toolChoice === 'string'
|
||||
? toolChoice
|
||||
: toolChoice.type === 'function'
|
||||
? `force:${toolChoice.function?.name}`
|
||||
: toolChoice.type === 'tool'
|
||||
? `force:${toolChoice.name}`
|
||||
: toolChoice.type === 'any'
|
||||
? `force:${toolChoice.any?.name || 'unknown'}`
|
||||
: 'unknown',
|
||||
model: config.modelName,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const createRequestBody = (input: ResponsesInputItem[], overrides: Record<string, any> = {}) => ({
|
||||
...basePayload,
|
||||
input,
|
||||
...overrides,
|
||||
})
|
||||
|
||||
const parseErrorResponse = async (response: Response): Promise<string> => {
|
||||
const text = await response.text()
|
||||
try {
|
||||
const payload = JSON.parse(text)
|
||||
return payload?.error?.message || text
|
||||
} catch {
|
||||
return text
|
||||
}
|
||||
}
|
||||
|
||||
const postResponses = async (body: Record<string, any>) => {
|
||||
const response = await fetch(config.endpoint, {
|
||||
method: 'POST',
|
||||
headers: config.headers,
|
||||
body: JSON.stringify(body),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const message = await parseErrorResponse(response)
|
||||
throw new Error(`${config.providerLabel} API error (${response.status}): ${message}`)
|
||||
}
|
||||
|
||||
return response.json()
|
||||
}
|
||||
|
||||
const providerStartTime = Date.now()
|
||||
const providerStartTimeISO = new Date(providerStartTime).toISOString()
|
||||
|
||||
try {
|
||||
if (request.stream && (!tools || tools.length === 0)) {
|
||||
logger.info(`Using streaming response for ${config.providerLabel} request`)
|
||||
|
||||
const streamResponse = await fetch(config.endpoint, {
|
||||
method: 'POST',
|
||||
headers: config.headers,
|
||||
body: JSON.stringify(createRequestBody(initialInput, { stream: true })),
|
||||
})
|
||||
|
||||
if (!streamResponse.ok) {
|
||||
const message = await parseErrorResponse(streamResponse)
|
||||
throw new Error(`${config.providerLabel} API error (${streamResponse.status}): ${message}`)
|
||||
}
|
||||
|
||||
const streamingResult = {
|
||||
stream: createReadableStreamFromResponses(streamResponse, (content, usage) => {
|
||||
streamingResult.execution.output.content = content
|
||||
streamingResult.execution.output.tokens = {
|
||||
input: usage?.promptTokens || 0,
|
||||
output: usage?.completionTokens || 0,
|
||||
total: usage?.totalTokens || 0,
|
||||
}
|
||||
|
||||
const costResult = calculateCost(
|
||||
request.model,
|
||||
usage?.promptTokens || 0,
|
||||
usage?.completionTokens || 0
|
||||
)
|
||||
streamingResult.execution.output.cost = {
|
||||
input: costResult.input,
|
||||
output: costResult.output,
|
||||
total: costResult.total,
|
||||
}
|
||||
|
||||
const streamEndTime = Date.now()
|
||||
const streamEndTimeISO = new Date(streamEndTime).toISOString()
|
||||
|
||||
if (streamingResult.execution.output.providerTiming) {
|
||||
streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO
|
||||
streamingResult.execution.output.providerTiming.duration =
|
||||
streamEndTime - providerStartTime
|
||||
|
||||
if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) {
|
||||
streamingResult.execution.output.providerTiming.timeSegments[0].endTime =
|
||||
streamEndTime
|
||||
streamingResult.execution.output.providerTiming.timeSegments[0].duration =
|
||||
streamEndTime - providerStartTime
|
||||
}
|
||||
}
|
||||
}),
|
||||
execution: {
|
||||
success: true,
|
||||
output: {
|
||||
content: '',
|
||||
model: request.model,
|
||||
tokens: { input: 0, output: 0, total: 0 },
|
||||
toolCalls: undefined,
|
||||
providerTiming: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
timeSegments: [
|
||||
{
|
||||
type: 'model',
|
||||
name: 'Streaming response',
|
||||
startTime: providerStartTime,
|
||||
endTime: Date.now(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
},
|
||||
],
|
||||
},
|
||||
cost: { input: 0, output: 0, total: 0 },
|
||||
},
|
||||
logs: [],
|
||||
metadata: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
},
|
||||
},
|
||||
} as StreamingExecution
|
||||
|
||||
return streamingResult as StreamingExecution
|
||||
}
|
||||
|
||||
const initialCallTime = Date.now()
|
||||
const forcedTools = preparedTools?.forcedTools || []
|
||||
let usedForcedTools: string[] = []
|
||||
let hasUsedForcedTool = false
|
||||
let currentToolChoice = responsesToolChoice
|
||||
let currentTrackingToolChoice = trackingToolChoice
|
||||
|
||||
const checkForForcedToolUsage = (
|
||||
toolCallsInResponse: ResponsesToolCall[],
|
||||
toolChoice: ToolChoice | undefined
|
||||
) => {
|
||||
if (typeof toolChoice === 'object' && toolCallsInResponse.length > 0) {
|
||||
const result = trackForcedToolUsage(
|
||||
toolCallsInResponse,
|
||||
toolChoice,
|
||||
logger,
|
||||
config.providerId,
|
||||
forcedTools,
|
||||
usedForcedTools
|
||||
)
|
||||
hasUsedForcedTool = result.hasUsedForcedTool
|
||||
usedForcedTools = result.usedForcedTools
|
||||
}
|
||||
}
|
||||
|
||||
const currentInput: ResponsesInputItem[] = [...initialInput]
|
||||
let currentResponse = await postResponses(
|
||||
createRequestBody(currentInput, { tool_choice: currentToolChoice })
|
||||
)
|
||||
const firstResponseTime = Date.now() - initialCallTime
|
||||
|
||||
const initialUsage = parseResponsesUsage(currentResponse.usage)
|
||||
const tokens = {
|
||||
input: initialUsage?.promptTokens || 0,
|
||||
output: initialUsage?.completionTokens || 0,
|
||||
total: initialUsage?.totalTokens || 0,
|
||||
}
|
||||
|
||||
const toolCalls = []
|
||||
const toolResults = []
|
||||
let iterationCount = 0
|
||||
let modelTime = firstResponseTime
|
||||
let toolsTime = 0
|
||||
let content = extractResponseText(currentResponse.output) || ''
|
||||
|
||||
const timeSegments: TimeSegment[] = [
|
||||
{
|
||||
type: 'model',
|
||||
name: 'Initial response',
|
||||
startTime: initialCallTime,
|
||||
endTime: initialCallTime + firstResponseTime,
|
||||
duration: firstResponseTime,
|
||||
},
|
||||
]
|
||||
|
||||
checkForForcedToolUsage(
|
||||
extractResponseToolCalls(currentResponse.output),
|
||||
currentTrackingToolChoice
|
||||
)
|
||||
|
||||
while (iterationCount < MAX_TOOL_ITERATIONS) {
|
||||
const responseText = extractResponseText(currentResponse.output)
|
||||
if (responseText) {
|
||||
content = responseText
|
||||
}
|
||||
|
||||
const toolCallsInResponse = extractResponseToolCalls(currentResponse.output)
|
||||
if (!toolCallsInResponse.length) {
|
||||
break
|
||||
}
|
||||
|
||||
const outputInputItems = convertResponseOutputToInputItems(currentResponse.output)
|
||||
if (outputInputItems.length) {
|
||||
currentInput.push(...outputInputItems)
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Processing ${toolCallsInResponse.length} tool calls in parallel (iteration ${
|
||||
iterationCount + 1
|
||||
}/${MAX_TOOL_ITERATIONS})`
|
||||
)
|
||||
|
||||
const toolsStartTime = Date.now()
|
||||
|
||||
const toolExecutionPromises = toolCallsInResponse.map(async (toolCall) => {
|
||||
const toolCallStartTime = Date.now()
|
||||
const toolName = toolCall.name
|
||||
|
||||
try {
|
||||
const toolArgs = toolCall.arguments ? JSON.parse(toolCall.arguments) : {}
|
||||
const tool = request.tools?.find((t) => t.id === toolName)
|
||||
|
||||
if (!tool) {
|
||||
return null
|
||||
}
|
||||
|
||||
const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request)
|
||||
const result = await executeTool(toolName, executionParams)
|
||||
const toolCallEndTime = Date.now()
|
||||
|
||||
return {
|
||||
toolCall,
|
||||
toolName,
|
||||
toolParams,
|
||||
result,
|
||||
startTime: toolCallStartTime,
|
||||
endTime: toolCallEndTime,
|
||||
duration: toolCallEndTime - toolCallStartTime,
|
||||
}
|
||||
} catch (error) {
|
||||
const toolCallEndTime = Date.now()
|
||||
logger.error('Error processing tool call:', { error, toolName })
|
||||
|
||||
return {
|
||||
toolCall,
|
||||
toolName,
|
||||
toolParams: {},
|
||||
result: {
|
||||
success: false,
|
||||
output: undefined,
|
||||
error: error instanceof Error ? error.message : 'Tool execution failed',
|
||||
},
|
||||
startTime: toolCallStartTime,
|
||||
endTime: toolCallEndTime,
|
||||
duration: toolCallEndTime - toolCallStartTime,
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const executionResults = await Promise.allSettled(toolExecutionPromises)
|
||||
|
||||
for (const settledResult of executionResults) {
|
||||
if (settledResult.status === 'rejected' || !settledResult.value) continue
|
||||
|
||||
const { toolCall, toolName, toolParams, result, startTime, endTime, duration } =
|
||||
settledResult.value
|
||||
|
||||
timeSegments.push({
|
||||
type: 'tool',
|
||||
name: toolName,
|
||||
startTime: startTime,
|
||||
endTime: endTime,
|
||||
duration: duration,
|
||||
})
|
||||
|
||||
let resultContent: any
|
||||
if (result.success) {
|
||||
toolResults.push(result.output)
|
||||
resultContent = result.output
|
||||
} else {
|
||||
resultContent = {
|
||||
error: true,
|
||||
message: result.error || 'Tool execution failed',
|
||||
tool: toolName,
|
||||
}
|
||||
}
|
||||
|
||||
toolCalls.push({
|
||||
name: toolName,
|
||||
arguments: toolParams,
|
||||
startTime: new Date(startTime).toISOString(),
|
||||
endTime: new Date(endTime).toISOString(),
|
||||
duration: duration,
|
||||
result: resultContent,
|
||||
success: result.success,
|
||||
})
|
||||
|
||||
currentInput.push({
|
||||
type: 'function_call_output',
|
||||
call_id: toolCall.id,
|
||||
output: JSON.stringify(resultContent),
|
||||
})
|
||||
}
|
||||
|
||||
const thisToolsTime = Date.now() - toolsStartTime
|
||||
toolsTime += thisToolsTime
|
||||
|
||||
if (typeof currentToolChoice === 'object' && hasUsedForcedTool && forcedTools.length > 0) {
|
||||
const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool))
|
||||
|
||||
if (remainingTools.length > 0) {
|
||||
currentToolChoice = {
|
||||
type: 'function',
|
||||
name: remainingTools[0],
|
||||
}
|
||||
currentTrackingToolChoice = {
|
||||
type: 'function',
|
||||
function: { name: remainingTools[0] },
|
||||
}
|
||||
logger.info(`Forcing next tool: ${remainingTools[0]}`)
|
||||
} else {
|
||||
currentToolChoice = 'auto'
|
||||
currentTrackingToolChoice = 'auto'
|
||||
logger.info('All forced tools have been used, switching to auto tool_choice')
|
||||
}
|
||||
}
|
||||
|
||||
const nextModelStartTime = Date.now()
|
||||
|
||||
currentResponse = await postResponses(
|
||||
createRequestBody(currentInput, { tool_choice: currentToolChoice })
|
||||
)
|
||||
|
||||
checkForForcedToolUsage(
|
||||
extractResponseToolCalls(currentResponse.output),
|
||||
currentTrackingToolChoice
|
||||
)
|
||||
|
||||
const latestText = extractResponseText(currentResponse.output)
|
||||
if (latestText) {
|
||||
content = latestText
|
||||
}
|
||||
|
||||
const nextModelEndTime = Date.now()
|
||||
const thisModelTime = nextModelEndTime - nextModelStartTime
|
||||
|
||||
timeSegments.push({
|
||||
type: 'model',
|
||||
name: `Model response (iteration ${iterationCount + 1})`,
|
||||
startTime: nextModelStartTime,
|
||||
endTime: nextModelEndTime,
|
||||
duration: thisModelTime,
|
||||
})
|
||||
|
||||
modelTime += thisModelTime
|
||||
|
||||
const usage = parseResponsesUsage(currentResponse.usage)
|
||||
if (usage) {
|
||||
tokens.input += usage.promptTokens
|
||||
tokens.output += usage.completionTokens
|
||||
tokens.total += usage.totalTokens
|
||||
}
|
||||
|
||||
iterationCount++
|
||||
}
|
||||
|
||||
// For Azure with deferred format: make a final call with the response format applied
|
||||
// This happens whenever we have a deferred format, even if no tools were called
|
||||
// (the initial call was made without the format, so we need to apply it now)
|
||||
let appliedDeferredFormat = false
|
||||
if (deferredTextFormat) {
|
||||
logger.info(
|
||||
`Applying deferred JSON schema response format for ${config.providerLabel} (iterationCount: ${iterationCount})`
|
||||
)
|
||||
|
||||
const finalFormatStartTime = Date.now()
|
||||
|
||||
// Determine what input to use for the formatted call
|
||||
let formattedInput: ResponsesInputItem[]
|
||||
|
||||
if (iterationCount > 0) {
|
||||
// Tools were called - include the conversation history with tool results
|
||||
const lastOutputItems = convertResponseOutputToInputItems(currentResponse.output)
|
||||
if (lastOutputItems.length) {
|
||||
currentInput.push(...lastOutputItems)
|
||||
}
|
||||
formattedInput = currentInput
|
||||
} else {
|
||||
// No tools were called - just retry the initial call with format applied
|
||||
// Don't include the model's previous unformatted response
|
||||
formattedInput = initialInput
|
||||
}
|
||||
|
||||
// Make final call with the response format - build payload without tools
|
||||
const finalPayload: Record<string, any> = {
|
||||
model: config.modelName,
|
||||
input: formattedInput,
|
||||
text: {
|
||||
...(basePayload.text ?? {}),
|
||||
format: deferredTextFormat,
|
||||
},
|
||||
}
|
||||
|
||||
// Copy over non-tool related settings
|
||||
if (request.temperature !== undefined) finalPayload.temperature = request.temperature
|
||||
if (request.maxTokens != null) finalPayload.max_output_tokens = request.maxTokens
|
||||
if (request.reasoningEffort !== undefined) {
|
||||
finalPayload.reasoning = {
|
||||
effort: request.reasoningEffort,
|
||||
summary: 'auto',
|
||||
}
|
||||
}
|
||||
if (request.verbosity !== undefined) {
|
||||
finalPayload.text = {
|
||||
...finalPayload.text,
|
||||
verbosity: request.verbosity,
|
||||
}
|
||||
}
|
||||
|
||||
currentResponse = await postResponses(finalPayload)
|
||||
|
||||
const finalFormatEndTime = Date.now()
|
||||
const finalFormatDuration = finalFormatEndTime - finalFormatStartTime
|
||||
|
||||
timeSegments.push({
|
||||
type: 'model',
|
||||
name: 'Final formatted response',
|
||||
startTime: finalFormatStartTime,
|
||||
endTime: finalFormatEndTime,
|
||||
duration: finalFormatDuration,
|
||||
})
|
||||
|
||||
modelTime += finalFormatDuration
|
||||
|
||||
const finalUsage = parseResponsesUsage(currentResponse.usage)
|
||||
if (finalUsage) {
|
||||
tokens.input += finalUsage.promptTokens
|
||||
tokens.output += finalUsage.completionTokens
|
||||
tokens.total += finalUsage.totalTokens
|
||||
}
|
||||
|
||||
// Update content with the formatted response
|
||||
const formattedText = extractResponseText(currentResponse.output)
|
||||
if (formattedText) {
|
||||
content = formattedText
|
||||
}
|
||||
|
||||
appliedDeferredFormat = true
|
||||
}
|
||||
|
||||
// Skip streaming if we already applied deferred format - we have the formatted content
|
||||
// Making another streaming call would lose the formatted response
|
||||
if (request.stream && !appliedDeferredFormat) {
|
||||
logger.info('Using streaming for final response after tool processing')
|
||||
|
||||
const accumulatedCost = calculateCost(request.model, tokens.input, tokens.output)
|
||||
|
||||
// For Azure with deferred format in streaming mode, include the format in the streaming call
|
||||
const streamOverrides: Record<string, any> = { stream: true, tool_choice: 'auto' }
|
||||
if (deferredTextFormat) {
|
||||
streamOverrides.text = {
|
||||
...(basePayload.text ?? {}),
|
||||
format: deferredTextFormat,
|
||||
}
|
||||
}
|
||||
|
||||
const streamResponse = await fetch(config.endpoint, {
|
||||
method: 'POST',
|
||||
headers: config.headers,
|
||||
body: JSON.stringify(createRequestBody(currentInput, streamOverrides)),
|
||||
})
|
||||
|
||||
if (!streamResponse.ok) {
|
||||
const message = await parseErrorResponse(streamResponse)
|
||||
throw new Error(`${config.providerLabel} API error (${streamResponse.status}): ${message}`)
|
||||
}
|
||||
|
||||
const streamingResult = {
|
||||
stream: createReadableStreamFromResponses(streamResponse, (content, usage) => {
|
||||
streamingResult.execution.output.content = content
|
||||
streamingResult.execution.output.tokens = {
|
||||
input: tokens.input + (usage?.promptTokens || 0),
|
||||
output: tokens.output + (usage?.completionTokens || 0),
|
||||
total: tokens.total + (usage?.totalTokens || 0),
|
||||
}
|
||||
|
||||
const streamCost = calculateCost(
|
||||
request.model,
|
||||
usage?.promptTokens || 0,
|
||||
usage?.completionTokens || 0
|
||||
)
|
||||
streamingResult.execution.output.cost = {
|
||||
input: accumulatedCost.input + streamCost.input,
|
||||
output: accumulatedCost.output + streamCost.output,
|
||||
total: accumulatedCost.total + streamCost.total,
|
||||
}
|
||||
}),
|
||||
execution: {
|
||||
success: true,
|
||||
output: {
|
||||
content: '',
|
||||
model: request.model,
|
||||
tokens: {
|
||||
input: tokens.input,
|
||||
output: tokens.output,
|
||||
total: tokens.total,
|
||||
},
|
||||
toolCalls:
|
||||
toolCalls.length > 0
|
||||
? {
|
||||
list: toolCalls,
|
||||
count: toolCalls.length,
|
||||
}
|
||||
: undefined,
|
||||
providerTiming: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
modelTime: modelTime,
|
||||
toolsTime: toolsTime,
|
||||
firstResponseTime: firstResponseTime,
|
||||
iterations: iterationCount + 1,
|
||||
timeSegments: timeSegments,
|
||||
},
|
||||
cost: {
|
||||
input: accumulatedCost.input,
|
||||
output: accumulatedCost.output,
|
||||
total: accumulatedCost.total,
|
||||
},
|
||||
},
|
||||
logs: [],
|
||||
metadata: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
},
|
||||
},
|
||||
} as StreamingExecution
|
||||
|
||||
return streamingResult as StreamingExecution
|
||||
}
|
||||
|
||||
const providerEndTime = Date.now()
|
||||
const providerEndTimeISO = new Date(providerEndTime).toISOString()
|
||||
const totalDuration = providerEndTime - providerStartTime
|
||||
|
||||
return {
|
||||
content,
|
||||
model: request.model,
|
||||
tokens,
|
||||
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
||||
toolResults: toolResults.length > 0 ? toolResults : undefined,
|
||||
timing: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: providerEndTimeISO,
|
||||
duration: totalDuration,
|
||||
modelTime: modelTime,
|
||||
toolsTime: toolsTime,
|
||||
firstResponseTime: firstResponseTime,
|
||||
iterations: iterationCount + 1,
|
||||
timeSegments: timeSegments,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
const providerEndTime = Date.now()
|
||||
const providerEndTimeISO = new Date(providerEndTime).toISOString()
|
||||
const totalDuration = providerEndTime - providerStartTime
|
||||
|
||||
logger.error(`Error in ${config.providerLabel} request:`, {
|
||||
error,
|
||||
duration: totalDuration,
|
||||
})
|
||||
|
||||
const enhancedError = new Error(error instanceof Error ? error.message : String(error))
|
||||
// @ts-ignore - Adding timing property to the error
|
||||
enhancedError.timing = {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: providerEndTimeISO,
|
||||
duration: totalDuration,
|
||||
}
|
||||
|
||||
throw enhancedError
|
||||
}
|
||||
}
|
||||
@@ -1,25 +1,11 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import OpenAI from 'openai'
|
||||
import type { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions'
|
||||
import type { StreamingExecution } from '@/executor/types'
|
||||
import { MAX_TOOL_ITERATIONS } from '@/providers'
|
||||
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
|
||||
import { createReadableStreamFromOpenAIStream } from '@/providers/openai/utils'
|
||||
import type {
|
||||
ProviderConfig,
|
||||
ProviderRequest,
|
||||
ProviderResponse,
|
||||
TimeSegment,
|
||||
} from '@/providers/types'
|
||||
import {
|
||||
calculateCost,
|
||||
prepareToolExecution,
|
||||
prepareToolsWithUsageControl,
|
||||
trackForcedToolUsage,
|
||||
} from '@/providers/utils'
|
||||
import { executeTool } from '@/tools'
|
||||
import type { ProviderConfig, ProviderRequest, ProviderResponse } from '@/providers/types'
|
||||
import { executeResponsesProviderRequest } from './core'
|
||||
|
||||
const logger = createLogger('OpenAIProvider')
|
||||
const responsesEndpoint = 'https://api.openai.com/v1/responses'
|
||||
|
||||
export const openaiProvider: ProviderConfig = {
|
||||
id: 'openai',
|
||||
@@ -32,534 +18,21 @@ export const openaiProvider: ProviderConfig = {
|
||||
executeRequest: async (
|
||||
request: ProviderRequest
|
||||
): Promise<ProviderResponse | StreamingExecution> => {
|
||||
logger.info('Preparing OpenAI request', {
|
||||
model: request.model,
|
||||
hasSystemPrompt: !!request.systemPrompt,
|
||||
hasMessages: !!request.messages?.length,
|
||||
hasTools: !!request.tools?.length,
|
||||
toolCount: request.tools?.length || 0,
|
||||
hasResponseFormat: !!request.responseFormat,
|
||||
stream: !!request.stream,
|
||||
if (!request.apiKey) {
|
||||
throw new Error('API key is required for OpenAI')
|
||||
}
|
||||
|
||||
return executeResponsesProviderRequest(request, {
|
||||
providerId: 'openai',
|
||||
providerLabel: 'OpenAI',
|
||||
modelName: request.model,
|
||||
endpoint: responsesEndpoint,
|
||||
headers: {
|
||||
Authorization: `Bearer ${request.apiKey}`,
|
||||
'Content-Type': 'application/json',
|
||||
'OpenAI-Beta': 'responses=v1',
|
||||
},
|
||||
logger,
|
||||
})
|
||||
|
||||
const openai = new OpenAI({ apiKey: request.apiKey })
|
||||
|
||||
const allMessages = []
|
||||
|
||||
if (request.systemPrompt) {
|
||||
allMessages.push({
|
||||
role: 'system',
|
||||
content: request.systemPrompt,
|
||||
})
|
||||
}
|
||||
|
||||
if (request.context) {
|
||||
allMessages.push({
|
||||
role: 'user',
|
||||
content: request.context,
|
||||
})
|
||||
}
|
||||
|
||||
if (request.messages) {
|
||||
allMessages.push(...request.messages)
|
||||
}
|
||||
|
||||
const tools = request.tools?.length
|
||||
? request.tools.map((tool) => ({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.id,
|
||||
description: tool.description,
|
||||
parameters: tool.parameters,
|
||||
},
|
||||
}))
|
||||
: undefined
|
||||
|
||||
const payload: any = {
|
||||
model: request.model,
|
||||
messages: allMessages,
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
|
||||
|
||||
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
|
||||
if (request.verbosity !== undefined) payload.verbosity = request.verbosity
|
||||
|
||||
if (request.responseFormat) {
|
||||
payload.response_format = {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
name: request.responseFormat.name || 'response_schema',
|
||||
schema: request.responseFormat.schema || request.responseFormat,
|
||||
strict: request.responseFormat.strict !== false,
|
||||
},
|
||||
}
|
||||
|
||||
logger.info('Added JSON schema response format to request')
|
||||
}
|
||||
|
||||
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
|
||||
|
||||
if (tools?.length) {
|
||||
preparedTools = prepareToolsWithUsageControl(tools, request.tools, logger, 'openai')
|
||||
const { tools: filteredTools, toolChoice } = preparedTools
|
||||
|
||||
if (filteredTools?.length && toolChoice) {
|
||||
payload.tools = filteredTools
|
||||
payload.tool_choice = toolChoice
|
||||
|
||||
logger.info('OpenAI request configuration:', {
|
||||
toolCount: filteredTools.length,
|
||||
toolChoice:
|
||||
typeof toolChoice === 'string'
|
||||
? toolChoice
|
||||
: toolChoice.type === 'function'
|
||||
? `force:${toolChoice.function.name}`
|
||||
: toolChoice.type === 'tool'
|
||||
? `force:${toolChoice.name}`
|
||||
: toolChoice.type === 'any'
|
||||
? `force:${toolChoice.any?.name || 'unknown'}`
|
||||
: 'unknown',
|
||||
model: request.model,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const providerStartTime = Date.now()
|
||||
const providerStartTimeISO = new Date(providerStartTime).toISOString()
|
||||
|
||||
try {
|
||||
if (request.stream && (!tools || tools.length === 0)) {
|
||||
logger.info('Using streaming response for OpenAI request')
|
||||
|
||||
const streamingParams: ChatCompletionCreateParamsStreaming = {
|
||||
...payload,
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
}
|
||||
const streamResponse = await openai.chat.completions.create(streamingParams)
|
||||
|
||||
const streamingResult = {
|
||||
stream: createReadableStreamFromOpenAIStream(streamResponse, (content, usage) => {
|
||||
streamingResult.execution.output.content = content
|
||||
streamingResult.execution.output.tokens = {
|
||||
input: usage.prompt_tokens,
|
||||
output: usage.completion_tokens,
|
||||
total: usage.total_tokens,
|
||||
}
|
||||
|
||||
const costResult = calculateCost(
|
||||
request.model,
|
||||
usage.prompt_tokens,
|
||||
usage.completion_tokens
|
||||
)
|
||||
streamingResult.execution.output.cost = {
|
||||
input: costResult.input,
|
||||
output: costResult.output,
|
||||
total: costResult.total,
|
||||
}
|
||||
|
||||
const streamEndTime = Date.now()
|
||||
const streamEndTimeISO = new Date(streamEndTime).toISOString()
|
||||
|
||||
if (streamingResult.execution.output.providerTiming) {
|
||||
streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO
|
||||
streamingResult.execution.output.providerTiming.duration =
|
||||
streamEndTime - providerStartTime
|
||||
|
||||
if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) {
|
||||
streamingResult.execution.output.providerTiming.timeSegments[0].endTime =
|
||||
streamEndTime
|
||||
streamingResult.execution.output.providerTiming.timeSegments[0].duration =
|
||||
streamEndTime - providerStartTime
|
||||
}
|
||||
}
|
||||
}),
|
||||
execution: {
|
||||
success: true,
|
||||
output: {
|
||||
content: '',
|
||||
model: request.model,
|
||||
tokens: { input: 0, output: 0, total: 0 },
|
||||
toolCalls: undefined,
|
||||
providerTiming: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
timeSegments: [
|
||||
{
|
||||
type: 'model',
|
||||
name: 'Streaming response',
|
||||
startTime: providerStartTime,
|
||||
endTime: Date.now(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
},
|
||||
],
|
||||
},
|
||||
cost: { input: 0, output: 0, total: 0 },
|
||||
},
|
||||
logs: [],
|
||||
metadata: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
},
|
||||
},
|
||||
} as StreamingExecution
|
||||
|
||||
return streamingResult as StreamingExecution
|
||||
}
|
||||
|
||||
const initialCallTime = Date.now()
|
||||
|
||||
const originalToolChoice = payload.tool_choice
|
||||
|
||||
const forcedTools = preparedTools?.forcedTools || []
|
||||
let usedForcedTools: string[] = []
|
||||
|
||||
/**
|
||||
* Helper function to check for forced tool usage in responses
|
||||
*/
|
||||
const checkForForcedToolUsage = (
|
||||
response: any,
|
||||
toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any }
|
||||
) => {
|
||||
if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) {
|
||||
const toolCallsResponse = response.choices[0].message.tool_calls
|
||||
const result = trackForcedToolUsage(
|
||||
toolCallsResponse,
|
||||
toolChoice,
|
||||
logger,
|
||||
'openai',
|
||||
forcedTools,
|
||||
usedForcedTools
|
||||
)
|
||||
hasUsedForcedTool = result.hasUsedForcedTool
|
||||
usedForcedTools = result.usedForcedTools
|
||||
}
|
||||
}
|
||||
|
||||
let currentResponse = await openai.chat.completions.create(payload)
|
||||
const firstResponseTime = Date.now() - initialCallTime
|
||||
|
||||
let content = currentResponse.choices[0]?.message?.content || ''
|
||||
const tokens = {
|
||||
input: currentResponse.usage?.prompt_tokens || 0,
|
||||
output: currentResponse.usage?.completion_tokens || 0,
|
||||
total: currentResponse.usage?.total_tokens || 0,
|
||||
}
|
||||
const toolCalls = []
|
||||
const toolResults = []
|
||||
const currentMessages = [...allMessages]
|
||||
let iterationCount = 0
|
||||
|
||||
let modelTime = firstResponseTime
|
||||
let toolsTime = 0
|
||||
|
||||
let hasUsedForcedTool = false
|
||||
|
||||
const timeSegments: TimeSegment[] = [
|
||||
{
|
||||
type: 'model',
|
||||
name: 'Initial response',
|
||||
startTime: initialCallTime,
|
||||
endTime: initialCallTime + firstResponseTime,
|
||||
duration: firstResponseTime,
|
||||
},
|
||||
]
|
||||
|
||||
checkForForcedToolUsage(currentResponse, originalToolChoice)
|
||||
|
||||
while (iterationCount < MAX_TOOL_ITERATIONS) {
|
||||
if (currentResponse.choices[0]?.message?.content) {
|
||||
content = currentResponse.choices[0].message.content
|
||||
}
|
||||
|
||||
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
|
||||
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
|
||||
break
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Processing ${toolCallsInResponse.length} tool calls in parallel (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})`
|
||||
)
|
||||
|
||||
const toolsStartTime = Date.now()
|
||||
|
||||
const toolExecutionPromises = toolCallsInResponse.map(async (toolCall) => {
|
||||
const toolCallStartTime = Date.now()
|
||||
const toolName = toolCall.function.name
|
||||
|
||||
try {
|
||||
const toolArgs = JSON.parse(toolCall.function.arguments)
|
||||
const tool = request.tools?.find((t) => t.id === toolName)
|
||||
|
||||
if (!tool) {
|
||||
return null
|
||||
}
|
||||
|
||||
const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request)
|
||||
const result = await executeTool(toolName, executionParams)
|
||||
const toolCallEndTime = Date.now()
|
||||
|
||||
return {
|
||||
toolCall,
|
||||
toolName,
|
||||
toolParams,
|
||||
result,
|
||||
startTime: toolCallStartTime,
|
||||
endTime: toolCallEndTime,
|
||||
duration: toolCallEndTime - toolCallStartTime,
|
||||
}
|
||||
} catch (error) {
|
||||
const toolCallEndTime = Date.now()
|
||||
logger.error('Error processing tool call:', { error, toolName })
|
||||
|
||||
return {
|
||||
toolCall,
|
||||
toolName,
|
||||
toolParams: {},
|
||||
result: {
|
||||
success: false,
|
||||
output: undefined,
|
||||
error: error instanceof Error ? error.message : 'Tool execution failed',
|
||||
},
|
||||
startTime: toolCallStartTime,
|
||||
endTime: toolCallEndTime,
|
||||
duration: toolCallEndTime - toolCallStartTime,
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const executionResults = await Promise.allSettled(toolExecutionPromises)
|
||||
|
||||
currentMessages.push({
|
||||
role: 'assistant',
|
||||
content: null,
|
||||
tool_calls: toolCallsInResponse.map((tc) => ({
|
||||
id: tc.id,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tc.function.name,
|
||||
arguments: tc.function.arguments,
|
||||
},
|
||||
})),
|
||||
})
|
||||
|
||||
for (const settledResult of executionResults) {
|
||||
if (settledResult.status === 'rejected' || !settledResult.value) continue
|
||||
|
||||
const { toolCall, toolName, toolParams, result, startTime, endTime, duration } =
|
||||
settledResult.value
|
||||
|
||||
timeSegments.push({
|
||||
type: 'tool',
|
||||
name: toolName,
|
||||
startTime: startTime,
|
||||
endTime: endTime,
|
||||
duration: duration,
|
||||
})
|
||||
|
||||
let resultContent: any
|
||||
if (result.success) {
|
||||
toolResults.push(result.output)
|
||||
resultContent = result.output
|
||||
} else {
|
||||
resultContent = {
|
||||
error: true,
|
||||
message: result.error || 'Tool execution failed',
|
||||
tool: toolName,
|
||||
}
|
||||
}
|
||||
|
||||
toolCalls.push({
|
||||
name: toolName,
|
||||
arguments: toolParams,
|
||||
startTime: new Date(startTime).toISOString(),
|
||||
endTime: new Date(endTime).toISOString(),
|
||||
duration: duration,
|
||||
result: resultContent,
|
||||
success: result.success,
|
||||
})
|
||||
|
||||
currentMessages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: toolCall.id,
|
||||
content: JSON.stringify(resultContent),
|
||||
})
|
||||
}
|
||||
|
||||
const thisToolsTime = Date.now() - toolsStartTime
|
||||
toolsTime += thisToolsTime
|
||||
|
||||
const nextPayload = {
|
||||
...payload,
|
||||
messages: currentMessages,
|
||||
}
|
||||
|
||||
if (typeof originalToolChoice === 'object' && hasUsedForcedTool && forcedTools.length > 0) {
|
||||
const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool))
|
||||
|
||||
if (remainingTools.length > 0) {
|
||||
nextPayload.tool_choice = {
|
||||
type: 'function',
|
||||
function: { name: remainingTools[0] },
|
||||
}
|
||||
logger.info(`Forcing next tool: ${remainingTools[0]}`)
|
||||
} else {
|
||||
nextPayload.tool_choice = 'auto'
|
||||
logger.info('All forced tools have been used, switching to auto tool_choice')
|
||||
}
|
||||
}
|
||||
|
||||
const nextModelStartTime = Date.now()
|
||||
|
||||
currentResponse = await openai.chat.completions.create(nextPayload)
|
||||
|
||||
checkForForcedToolUsage(currentResponse, nextPayload.tool_choice)
|
||||
|
||||
const nextModelEndTime = Date.now()
|
||||
const thisModelTime = nextModelEndTime - nextModelStartTime
|
||||
|
||||
timeSegments.push({
|
||||
type: 'model',
|
||||
name: `Model response (iteration ${iterationCount + 1})`,
|
||||
startTime: nextModelStartTime,
|
||||
endTime: nextModelEndTime,
|
||||
duration: thisModelTime,
|
||||
})
|
||||
|
||||
modelTime += thisModelTime
|
||||
|
||||
if (currentResponse.usage) {
|
||||
tokens.input += currentResponse.usage.prompt_tokens || 0
|
||||
tokens.output += currentResponse.usage.completion_tokens || 0
|
||||
tokens.total += currentResponse.usage.total_tokens || 0
|
||||
}
|
||||
|
||||
iterationCount++
|
||||
}
|
||||
|
||||
if (request.stream) {
|
||||
logger.info('Using streaming for final response after tool processing')
|
||||
|
||||
const accumulatedCost = calculateCost(request.model, tokens.input, tokens.output)
|
||||
|
||||
const streamingParams: ChatCompletionCreateParamsStreaming = {
|
||||
...payload,
|
||||
messages: currentMessages,
|
||||
tool_choice: 'auto',
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
}
|
||||
const streamResponse = await openai.chat.completions.create(streamingParams)
|
||||
|
||||
const streamingResult = {
|
||||
stream: createReadableStreamFromOpenAIStream(streamResponse, (content, usage) => {
|
||||
streamingResult.execution.output.content = content
|
||||
streamingResult.execution.output.tokens = {
|
||||
input: tokens.input + usage.prompt_tokens,
|
||||
output: tokens.output + usage.completion_tokens,
|
||||
total: tokens.total + usage.total_tokens,
|
||||
}
|
||||
|
||||
const streamCost = calculateCost(
|
||||
request.model,
|
||||
usage.prompt_tokens,
|
||||
usage.completion_tokens
|
||||
)
|
||||
streamingResult.execution.output.cost = {
|
||||
input: accumulatedCost.input + streamCost.input,
|
||||
output: accumulatedCost.output + streamCost.output,
|
||||
total: accumulatedCost.total + streamCost.total,
|
||||
}
|
||||
}),
|
||||
execution: {
|
||||
success: true,
|
||||
output: {
|
||||
content: '',
|
||||
model: request.model,
|
||||
tokens: {
|
||||
input: tokens.input,
|
||||
output: tokens.output,
|
||||
total: tokens.total,
|
||||
},
|
||||
toolCalls:
|
||||
toolCalls.length > 0
|
||||
? {
|
||||
list: toolCalls,
|
||||
count: toolCalls.length,
|
||||
}
|
||||
: undefined,
|
||||
providerTiming: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
modelTime: modelTime,
|
||||
toolsTime: toolsTime,
|
||||
firstResponseTime: firstResponseTime,
|
||||
iterations: iterationCount + 1,
|
||||
timeSegments: timeSegments,
|
||||
},
|
||||
cost: {
|
||||
input: accumulatedCost.input,
|
||||
output: accumulatedCost.output,
|
||||
total: accumulatedCost.total,
|
||||
},
|
||||
},
|
||||
logs: [],
|
||||
metadata: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: new Date().toISOString(),
|
||||
duration: Date.now() - providerStartTime,
|
||||
},
|
||||
},
|
||||
} as StreamingExecution
|
||||
|
||||
return streamingResult as StreamingExecution
|
||||
}
|
||||
|
||||
const providerEndTime = Date.now()
|
||||
const providerEndTimeISO = new Date(providerEndTime).toISOString()
|
||||
const totalDuration = providerEndTime - providerStartTime
|
||||
|
||||
return {
|
||||
content,
|
||||
model: request.model,
|
||||
tokens,
|
||||
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
||||
toolResults: toolResults.length > 0 ? toolResults : undefined,
|
||||
timing: {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: providerEndTimeISO,
|
||||
duration: totalDuration,
|
||||
modelTime: modelTime,
|
||||
toolsTime: toolsTime,
|
||||
firstResponseTime: firstResponseTime,
|
||||
iterations: iterationCount + 1,
|
||||
timeSegments: timeSegments,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
const providerEndTime = Date.now()
|
||||
const providerEndTimeISO = new Date(providerEndTime).toISOString()
|
||||
const totalDuration = providerEndTime - providerStartTime
|
||||
|
||||
logger.error('Error in OpenAI request:', {
|
||||
error,
|
||||
duration: totalDuration,
|
||||
})
|
||||
|
||||
const enhancedError = new Error(error instanceof Error ? error.message : String(error))
|
||||
// @ts-ignore - Adding timing property to the error
|
||||
enhancedError.timing = {
|
||||
startTime: providerStartTimeISO,
|
||||
endTime: providerEndTimeISO,
|
||||
duration: totalDuration,
|
||||
}
|
||||
|
||||
throw enhancedError
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,15 +1,465 @@
|
||||
import type { ChatCompletionChunk } from 'openai/resources/chat/completions'
|
||||
import type { CompletionUsage } from 'openai/resources/completions'
|
||||
import type { Stream } from 'openai/streaming'
|
||||
import { createOpenAICompatibleStream } from '@/providers/utils'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { Message } from '@/providers/types'
|
||||
|
||||
const logger = createLogger('ResponsesUtils')
|
||||
|
||||
export interface ResponsesUsageTokens {
|
||||
promptTokens: number
|
||||
completionTokens: number
|
||||
totalTokens: number
|
||||
cachedTokens: number
|
||||
reasoningTokens: number
|
||||
}
|
||||
|
||||
export interface ResponsesToolCall {
|
||||
id: string
|
||||
name: string
|
||||
arguments: string
|
||||
}
|
||||
|
||||
export type ResponsesInputItem =
|
||||
| {
|
||||
role: 'system' | 'user' | 'assistant'
|
||||
content: string
|
||||
}
|
||||
| {
|
||||
type: 'function_call'
|
||||
call_id: string
|
||||
name: string
|
||||
arguments: string
|
||||
}
|
||||
| {
|
||||
type: 'function_call_output'
|
||||
call_id: string
|
||||
output: string
|
||||
}
|
||||
|
||||
export interface ResponsesToolDefinition {
|
||||
type: 'function'
|
||||
name: string
|
||||
description?: string
|
||||
parameters?: Record<string, any>
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a ReadableStream from an OpenAI streaming response.
|
||||
* Uses the shared OpenAI-compatible streaming utility.
|
||||
* Converts chat-style messages into Responses API input items.
|
||||
*/
|
||||
export function createReadableStreamFromOpenAIStream(
|
||||
openaiStream: Stream<ChatCompletionChunk>,
|
||||
onComplete?: (content: string, usage: CompletionUsage) => void
|
||||
): ReadableStream<Uint8Array> {
|
||||
return createOpenAICompatibleStream(openaiStream, 'OpenAI', onComplete)
|
||||
export function buildResponsesInputFromMessages(messages: Message[]): ResponsesInputItem[] {
|
||||
const input: ResponsesInputItem[] = []
|
||||
|
||||
for (const message of messages) {
|
||||
if (message.role === 'tool' && message.tool_call_id) {
|
||||
input.push({
|
||||
type: 'function_call_output',
|
||||
call_id: message.tool_call_id,
|
||||
output: message.content ?? '',
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if (
|
||||
message.content &&
|
||||
(message.role === 'system' || message.role === 'user' || message.role === 'assistant')
|
||||
) {
|
||||
input.push({
|
||||
role: message.role,
|
||||
content: message.content,
|
||||
})
|
||||
}
|
||||
|
||||
if (message.tool_calls?.length) {
|
||||
for (const toolCall of message.tool_calls) {
|
||||
input.push({
|
||||
type: 'function_call',
|
||||
call_id: toolCall.id,
|
||||
name: toolCall.function.name,
|
||||
arguments: toolCall.function.arguments,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts tool definitions to the Responses API format.
|
||||
*/
|
||||
export function convertToolsToResponses(tools: any[]): ResponsesToolDefinition[] {
|
||||
return tools
|
||||
.map((tool) => {
|
||||
const name = tool.function?.name ?? tool.name
|
||||
if (!name) {
|
||||
return null
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'function' as const,
|
||||
name,
|
||||
description: tool.function?.description ?? tool.description,
|
||||
parameters: tool.function?.parameters ?? tool.parameters,
|
||||
}
|
||||
})
|
||||
.filter(Boolean) as ResponsesToolDefinition[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts tool_choice to the Responses API format.
|
||||
*/
|
||||
export function toResponsesToolChoice(
|
||||
toolChoice:
|
||||
| 'auto'
|
||||
| 'none'
|
||||
| { type: 'function'; function?: { name: string }; name?: string }
|
||||
| { type: 'tool'; name: string }
|
||||
| { type: 'any'; any: { model: string; name: string } }
|
||||
| undefined
|
||||
): 'auto' | 'none' | { type: 'function'; name: string } | undefined {
|
||||
if (!toolChoice) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
if (typeof toolChoice === 'string') {
|
||||
return toolChoice
|
||||
}
|
||||
|
||||
if (toolChoice.type === 'function') {
|
||||
const name = toolChoice.name ?? toolChoice.function?.name
|
||||
return name ? { type: 'function', name } : undefined
|
||||
}
|
||||
|
||||
return 'auto'
|
||||
}
|
||||
|
||||
function extractTextFromMessageItem(item: any): string {
|
||||
if (!item) {
|
||||
return ''
|
||||
}
|
||||
|
||||
if (typeof item.content === 'string') {
|
||||
return item.content
|
||||
}
|
||||
|
||||
if (!Array.isArray(item.content)) {
|
||||
return ''
|
||||
}
|
||||
|
||||
const textParts: string[] = []
|
||||
for (const part of item.content) {
|
||||
if (!part || typeof part !== 'object') {
|
||||
continue
|
||||
}
|
||||
|
||||
if ((part.type === 'output_text' || part.type === 'text') && typeof part.text === 'string') {
|
||||
textParts.push(part.text)
|
||||
continue
|
||||
}
|
||||
|
||||
if (part.type === 'output_json') {
|
||||
if (typeof part.text === 'string') {
|
||||
textParts.push(part.text)
|
||||
} else if (part.json !== undefined) {
|
||||
textParts.push(JSON.stringify(part.json))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return textParts.join('')
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts plain text from Responses API output items.
|
||||
*/
|
||||
export function extractResponseText(output: unknown): string {
|
||||
if (!Array.isArray(output)) {
|
||||
return ''
|
||||
}
|
||||
|
||||
const textParts: string[] = []
|
||||
for (const item of output) {
|
||||
if (item?.type !== 'message') {
|
||||
continue
|
||||
}
|
||||
|
||||
const text = extractTextFromMessageItem(item)
|
||||
if (text) {
|
||||
textParts.push(text)
|
||||
}
|
||||
}
|
||||
|
||||
return textParts.join('')
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts Responses API output items into input items for subsequent calls.
|
||||
*/
|
||||
export function convertResponseOutputToInputItems(output: unknown): ResponsesInputItem[] {
|
||||
if (!Array.isArray(output)) {
|
||||
return []
|
||||
}
|
||||
|
||||
const items: ResponsesInputItem[] = []
|
||||
for (const item of output) {
|
||||
if (!item || typeof item !== 'object') {
|
||||
continue
|
||||
}
|
||||
|
||||
if (item.type === 'message') {
|
||||
const text = extractTextFromMessageItem(item)
|
||||
if (text) {
|
||||
items.push({
|
||||
role: 'assistant',
|
||||
content: text,
|
||||
})
|
||||
}
|
||||
|
||||
const toolCalls = Array.isArray(item.tool_calls) ? item.tool_calls : []
|
||||
for (const toolCall of toolCalls) {
|
||||
const callId = toolCall?.id
|
||||
const name = toolCall?.function?.name ?? toolCall?.name
|
||||
if (!callId || !name) {
|
||||
continue
|
||||
}
|
||||
|
||||
const argumentsValue =
|
||||
typeof toolCall?.function?.arguments === 'string'
|
||||
? toolCall.function.arguments
|
||||
: JSON.stringify(toolCall?.function?.arguments ?? {})
|
||||
|
||||
items.push({
|
||||
type: 'function_call',
|
||||
call_id: callId,
|
||||
name,
|
||||
arguments: argumentsValue,
|
||||
})
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if (item.type === 'function_call') {
|
||||
const callId = item.call_id ?? item.id
|
||||
const name = item.name ?? item.function?.name
|
||||
if (!callId || !name) {
|
||||
continue
|
||||
}
|
||||
|
||||
const argumentsValue =
|
||||
typeof item.arguments === 'string' ? item.arguments : JSON.stringify(item.arguments ?? {})
|
||||
|
||||
items.push({
|
||||
type: 'function_call',
|
||||
call_id: callId,
|
||||
name,
|
||||
arguments: argumentsValue,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts tool calls from Responses API output items.
|
||||
*/
|
||||
export function extractResponseToolCalls(output: unknown): ResponsesToolCall[] {
|
||||
if (!Array.isArray(output)) {
|
||||
return []
|
||||
}
|
||||
|
||||
const toolCalls: ResponsesToolCall[] = []
|
||||
|
||||
for (const item of output) {
|
||||
if (!item || typeof item !== 'object') {
|
||||
continue
|
||||
}
|
||||
|
||||
if (item.type === 'function_call') {
|
||||
const callId = item.call_id ?? item.id
|
||||
const name = item.name ?? item.function?.name
|
||||
if (!callId || !name) {
|
||||
continue
|
||||
}
|
||||
|
||||
const argumentsValue =
|
||||
typeof item.arguments === 'string' ? item.arguments : JSON.stringify(item.arguments ?? {})
|
||||
|
||||
toolCalls.push({
|
||||
id: callId,
|
||||
name,
|
||||
arguments: argumentsValue,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if (item.type === 'message' && Array.isArray(item.tool_calls)) {
|
||||
for (const toolCall of item.tool_calls) {
|
||||
const callId = toolCall?.id
|
||||
const name = toolCall?.function?.name ?? toolCall?.name
|
||||
if (!callId || !name) {
|
||||
continue
|
||||
}
|
||||
|
||||
const argumentsValue =
|
||||
typeof toolCall?.function?.arguments === 'string'
|
||||
? toolCall.function.arguments
|
||||
: JSON.stringify(toolCall?.function?.arguments ?? {})
|
||||
|
||||
toolCalls.push({
|
||||
id: callId,
|
||||
name,
|
||||
arguments: argumentsValue,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return toolCalls
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps Responses API usage data to prompt/completion token counts.
|
||||
*
|
||||
* Note: output_tokens is expected to include reasoning tokens; fall back to reasoning_tokens
|
||||
* when output_tokens is missing or zero.
|
||||
*/
|
||||
export function parseResponsesUsage(usage: any): ResponsesUsageTokens | undefined {
|
||||
if (!usage || typeof usage !== 'object') {
|
||||
return undefined
|
||||
}
|
||||
|
||||
const inputTokens = Number(usage.input_tokens ?? 0)
|
||||
const outputTokens = Number(usage.output_tokens ?? 0)
|
||||
const cachedTokens = Number(usage.input_tokens_details?.cached_tokens ?? 0)
|
||||
const reasoningTokens = Number(usage.output_tokens_details?.reasoning_tokens ?? 0)
|
||||
const completionTokens = Math.max(outputTokens, reasoningTokens)
|
||||
const totalTokens = inputTokens + completionTokens
|
||||
|
||||
return {
|
||||
promptTokens: inputTokens,
|
||||
completionTokens,
|
||||
totalTokens,
|
||||
cachedTokens,
|
||||
reasoningTokens,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a ReadableStream from a Responses API SSE stream.
|
||||
*/
|
||||
export function createReadableStreamFromResponses(
|
||||
response: Response,
|
||||
onComplete?: (content: string, usage?: ResponsesUsageTokens) => void
|
||||
): ReadableStream<Uint8Array> {
|
||||
let fullContent = ''
|
||||
let finalUsage: ResponsesUsageTokens | undefined
|
||||
let activeEventType: string | undefined
|
||||
const encoder = new TextEncoder()
|
||||
|
||||
return new ReadableStream<Uint8Array>({
|
||||
async start(controller) {
|
||||
const reader = response.body?.getReader()
|
||||
if (!reader) {
|
||||
controller.close()
|
||||
return
|
||||
}
|
||||
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) {
|
||||
break
|
||||
}
|
||||
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
const lines = buffer.split('\n')
|
||||
buffer = lines.pop() || ''
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim()
|
||||
if (!trimmed) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (trimmed.startsWith('event:')) {
|
||||
activeEventType = trimmed.slice(6).trim()
|
||||
continue
|
||||
}
|
||||
|
||||
if (!trimmed.startsWith('data:')) {
|
||||
continue
|
||||
}
|
||||
|
||||
const data = trimmed.slice(5).trim()
|
||||
if (data === '[DONE]') {
|
||||
continue
|
||||
}
|
||||
|
||||
let event: any
|
||||
try {
|
||||
event = JSON.parse(data)
|
||||
} catch (error) {
|
||||
logger.debug('Skipping non-JSON response stream chunk', {
|
||||
data: data.slice(0, 200),
|
||||
error,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
const eventType = event?.type ?? activeEventType
|
||||
|
||||
if (
|
||||
eventType === 'response.error' ||
|
||||
eventType === 'error' ||
|
||||
eventType === 'response.failed'
|
||||
) {
|
||||
const message = event?.error?.message || 'Responses API stream error'
|
||||
controller.error(new Error(message))
|
||||
return
|
||||
}
|
||||
|
||||
if (
|
||||
eventType === 'response.output_text.delta' ||
|
||||
eventType === 'response.output_json.delta'
|
||||
) {
|
||||
let deltaText = ''
|
||||
if (typeof event.delta === 'string') {
|
||||
deltaText = event.delta
|
||||
} else if (event.delta && typeof event.delta.text === 'string') {
|
||||
deltaText = event.delta.text
|
||||
} else if (event.delta && event.delta.json !== undefined) {
|
||||
deltaText = JSON.stringify(event.delta.json)
|
||||
} else if (event.json !== undefined) {
|
||||
deltaText = JSON.stringify(event.json)
|
||||
} else if (typeof event.text === 'string') {
|
||||
deltaText = event.text
|
||||
}
|
||||
|
||||
if (deltaText.length > 0) {
|
||||
fullContent += deltaText
|
||||
controller.enqueue(encoder.encode(deltaText))
|
||||
}
|
||||
}
|
||||
|
||||
if (eventType === 'response.completed') {
|
||||
finalUsage = parseResponsesUsage(event?.response?.usage ?? event?.usage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (onComplete) {
|
||||
onComplete(fullContent, finalUsage)
|
||||
}
|
||||
|
||||
controller.close()
|
||||
} catch (error) {
|
||||
controller.error(error)
|
||||
} finally {
|
||||
reader.releaseLock()
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -464,6 +464,108 @@ describe('Serializer', () => {
|
||||
}).not.toThrow()
|
||||
})
|
||||
|
||||
it.concurrent(
|
||||
'should validate required fields for blocks without tools (empty tools.access)',
|
||||
() => {
|
||||
const serializer = new Serializer()
|
||||
|
||||
const waitBlockMissingRequired: any = {
|
||||
id: 'wait-block',
|
||||
type: 'wait',
|
||||
name: 'Wait Block',
|
||||
position: { x: 0, y: 0 },
|
||||
subBlocks: {
|
||||
timeValue: { value: '' },
|
||||
timeUnit: { value: 'seconds' },
|
||||
},
|
||||
outputs: {},
|
||||
enabled: true,
|
||||
}
|
||||
|
||||
expect(() => {
|
||||
serializer.serializeWorkflow(
|
||||
{ 'wait-block': waitBlockMissingRequired },
|
||||
[],
|
||||
{},
|
||||
undefined,
|
||||
true
|
||||
)
|
||||
}).toThrow('Wait Block is missing required fields: Wait Amount')
|
||||
}
|
||||
)
|
||||
|
||||
it.concurrent(
|
||||
'should pass validation for blocks without tools when required fields are present',
|
||||
() => {
|
||||
const serializer = new Serializer()
|
||||
|
||||
const waitBlockWithFields: any = {
|
||||
id: 'wait-block',
|
||||
type: 'wait',
|
||||
name: 'Wait Block',
|
||||
position: { x: 0, y: 0 },
|
||||
subBlocks: {
|
||||
timeValue: { value: '10' },
|
||||
timeUnit: { value: 'seconds' },
|
||||
},
|
||||
outputs: {},
|
||||
enabled: true,
|
||||
}
|
||||
|
||||
expect(() => {
|
||||
serializer.serializeWorkflow(
|
||||
{ 'wait-block': waitBlockWithFields },
|
||||
[],
|
||||
{},
|
||||
undefined,
|
||||
true
|
||||
)
|
||||
}).not.toThrow()
|
||||
}
|
||||
)
|
||||
|
||||
it.concurrent('should report all missing required fields for blocks without tools', () => {
|
||||
const serializer = new Serializer()
|
||||
|
||||
const waitBlockAllMissing: any = {
|
||||
id: 'wait-block',
|
||||
type: 'wait',
|
||||
name: 'Wait Block',
|
||||
position: { x: 0, y: 0 },
|
||||
subBlocks: {
|
||||
timeValue: { value: null },
|
||||
timeUnit: { value: '' },
|
||||
},
|
||||
outputs: {},
|
||||
enabled: true,
|
||||
}
|
||||
|
||||
expect(() => {
|
||||
serializer.serializeWorkflow({ 'wait-block': waitBlockAllMissing }, [], {}, undefined, true)
|
||||
}).toThrow('Wait Block is missing required fields: Wait Amount, Unit')
|
||||
})
|
||||
|
||||
it.concurrent('should skip validation for disabled blocks without tools', () => {
|
||||
const serializer = new Serializer()
|
||||
|
||||
const disabledWaitBlock: any = {
|
||||
id: 'wait-block',
|
||||
type: 'wait',
|
||||
name: 'Wait Block',
|
||||
position: { x: 0, y: 0 },
|
||||
subBlocks: {
|
||||
timeValue: { value: null },
|
||||
timeUnit: { value: null },
|
||||
},
|
||||
outputs: {},
|
||||
enabled: false,
|
||||
}
|
||||
|
||||
expect(() => {
|
||||
serializer.serializeWorkflow({ 'wait-block': disabledWaitBlock }, [], {}, undefined, true)
|
||||
}).not.toThrow()
|
||||
})
|
||||
|
||||
it.concurrent('should handle empty string values as missing', () => {
|
||||
const serializer = new Serializer()
|
||||
|
||||
|
||||
@@ -416,21 +416,6 @@ export class Serializer {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the tool configuration to check parameter visibility
|
||||
const toolAccess = blockConfig.tools?.access
|
||||
if (!toolAccess || toolAccess.length === 0) {
|
||||
return // No tools to validate against
|
||||
}
|
||||
|
||||
// Determine the current tool ID using the same logic as the serializer
|
||||
const currentToolId = this.selectToolId(blockConfig, params)
|
||||
|
||||
// Get the specific tool to validate against
|
||||
const currentTool = getTool(currentToolId)
|
||||
if (!currentTool) {
|
||||
return // Tool not found, skip validation
|
||||
}
|
||||
|
||||
const missingFields: string[] = []
|
||||
const displayAdvancedOptions = block.advancedMode ?? false
|
||||
const isTriggerContext = block.triggerMode ?? false
|
||||
@@ -439,55 +424,105 @@ export class Serializer {
|
||||
const canonicalModeOverrides = block.data?.canonicalModes
|
||||
const allValues = buildSubBlockValues(block.subBlocks)
|
||||
|
||||
// Iterate through the tool's parameters, not the block's subBlocks
|
||||
Object.entries(currentTool.params || {}).forEach(([paramId, paramConfig]) => {
|
||||
if (paramConfig.required && paramConfig.visibility === 'user-only') {
|
||||
const matchingConfigs = blockConfig.subBlocks?.filter((sb: any) => sb.id === paramId) || []
|
||||
// Get the tool configuration to check parameter visibility
|
||||
const toolAccess = blockConfig.tools?.access
|
||||
const currentToolId = toolAccess?.length > 0 ? this.selectToolId(blockConfig, params) : null
|
||||
const currentTool = currentToolId ? getTool(currentToolId) : null
|
||||
|
||||
let shouldValidateParam = true
|
||||
// Validate tool parameters (for blocks with tools)
|
||||
if (currentTool) {
|
||||
Object.entries(currentTool.params || {}).forEach(([paramId, paramConfig]) => {
|
||||
if (paramConfig.required && paramConfig.visibility === 'user-only') {
|
||||
const matchingConfigs =
|
||||
blockConfig.subBlocks?.filter((sb: any) => sb.id === paramId) || []
|
||||
|
||||
if (matchingConfigs.length > 0) {
|
||||
shouldValidateParam = matchingConfigs.some((subBlockConfig: any) => {
|
||||
const includedByMode = shouldSerializeSubBlock(
|
||||
subBlockConfig,
|
||||
allValues,
|
||||
displayAdvancedOptions,
|
||||
isTriggerContext,
|
||||
isTriggerCategory,
|
||||
canonicalIndex,
|
||||
canonicalModeOverrides
|
||||
let shouldValidateParam = true
|
||||
|
||||
if (matchingConfigs.length > 0) {
|
||||
shouldValidateParam = matchingConfigs.some((subBlockConfig: any) => {
|
||||
const includedByMode = shouldSerializeSubBlock(
|
||||
subBlockConfig,
|
||||
allValues,
|
||||
displayAdvancedOptions,
|
||||
isTriggerContext,
|
||||
isTriggerCategory,
|
||||
canonicalIndex,
|
||||
canonicalModeOverrides
|
||||
)
|
||||
|
||||
const isRequired = (() => {
|
||||
if (!subBlockConfig.required) return false
|
||||
if (typeof subBlockConfig.required === 'boolean') return subBlockConfig.required
|
||||
return evaluateSubBlockCondition(subBlockConfig.required, params)
|
||||
})()
|
||||
|
||||
return includedByMode && isRequired
|
||||
})
|
||||
}
|
||||
|
||||
if (!shouldValidateParam) {
|
||||
return
|
||||
}
|
||||
|
||||
const fieldValue = params[paramId]
|
||||
if (fieldValue === undefined || fieldValue === null || fieldValue === '') {
|
||||
const activeConfig = matchingConfigs.find((config: any) =>
|
||||
shouldSerializeSubBlock(
|
||||
config,
|
||||
allValues,
|
||||
displayAdvancedOptions,
|
||||
isTriggerContext,
|
||||
isTriggerCategory,
|
||||
canonicalIndex,
|
||||
canonicalModeOverrides
|
||||
)
|
||||
)
|
||||
|
||||
const isRequired = (() => {
|
||||
if (!subBlockConfig.required) return false
|
||||
if (typeof subBlockConfig.required === 'boolean') return subBlockConfig.required
|
||||
return evaluateSubBlockCondition(subBlockConfig.required, params)
|
||||
})()
|
||||
|
||||
return includedByMode && isRequired
|
||||
})
|
||||
const displayName = activeConfig?.title || paramId
|
||||
missingFields.push(displayName)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (!shouldValidateParam) {
|
||||
return
|
||||
}
|
||||
// Validate required subBlocks not covered by tool params (e.g., blocks with empty tools.access)
|
||||
const validatedByTool = new Set(currentTool ? Object.keys(currentTool.params || {}) : [])
|
||||
|
||||
const fieldValue = params[paramId]
|
||||
if (fieldValue === undefined || fieldValue === null || fieldValue === '') {
|
||||
const activeConfig = matchingConfigs.find((config: any) =>
|
||||
shouldSerializeSubBlock(
|
||||
config,
|
||||
allValues,
|
||||
displayAdvancedOptions,
|
||||
isTriggerContext,
|
||||
isTriggerCategory,
|
||||
canonicalIndex,
|
||||
canonicalModeOverrides
|
||||
)
|
||||
)
|
||||
const displayName = activeConfig?.title || paramId
|
||||
missingFields.push(displayName)
|
||||
}
|
||||
blockConfig.subBlocks?.forEach((subBlockConfig: SubBlockConfig) => {
|
||||
// Skip if already validated via tool params
|
||||
if (validatedByTool.has(subBlockConfig.id)) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if subBlock is visible
|
||||
const isVisible = shouldSerializeSubBlock(
|
||||
subBlockConfig,
|
||||
allValues,
|
||||
displayAdvancedOptions,
|
||||
isTriggerContext,
|
||||
isTriggerCategory,
|
||||
canonicalIndex,
|
||||
canonicalModeOverrides
|
||||
)
|
||||
|
||||
if (!isVisible) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if subBlock is required
|
||||
const isRequired = (() => {
|
||||
if (!subBlockConfig.required) return false
|
||||
if (typeof subBlockConfig.required === 'boolean') return subBlockConfig.required
|
||||
return evaluateSubBlockCondition(subBlockConfig.required, params)
|
||||
})()
|
||||
|
||||
if (!isRequired) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if value is missing
|
||||
const fieldValue = params[subBlockConfig.id]
|
||||
if (fieldValue === undefined || fieldValue === null || fieldValue === '') {
|
||||
missingFields.push(subBlockConfig.title || subBlockConfig.id)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -33,6 +33,20 @@ export interface CopilotToolCall {
|
||||
subAgentStreaming?: boolean
|
||||
}
|
||||
|
||||
export interface CopilotStreamInfo {
|
||||
streamId: string
|
||||
workflowId: string
|
||||
chatId?: string
|
||||
userMessageId: string
|
||||
assistantMessageId: string
|
||||
lastEventId: number
|
||||
resumeAttempts: number
|
||||
userMessageContent: string
|
||||
fileAttachments?: MessageFileAttachment[]
|
||||
contexts?: ChatContext[]
|
||||
startedAt: number
|
||||
}
|
||||
|
||||
export interface MessageFileAttachment {
|
||||
id: string
|
||||
key: string
|
||||
@@ -154,6 +168,9 @@ export interface CopilotState {
|
||||
// Auto-allowed integration tools (tools that can run without confirmation)
|
||||
autoAllowedTools: string[]
|
||||
|
||||
// Active stream metadata for reconnect/replay
|
||||
activeStream: CopilotStreamInfo | null
|
||||
|
||||
// Message queue for messages sent while another is in progress
|
||||
messageQueue: QueuedMessage[]
|
||||
|
||||
@@ -194,6 +211,7 @@ export interface CopilotActions {
|
||||
toolCallState: 'accepted' | 'rejected' | 'error',
|
||||
toolCallId?: string
|
||||
) => void
|
||||
resumeActiveStream: () => Promise<boolean>
|
||||
setToolCallState: (toolCall: any, newState: ClientToolCallState, options?: any) => void
|
||||
updateToolCallParams: (toolCallId: string, params: Record<string, any>) => void
|
||||
sendDocsMessage: (query: string, options?: { stream?: boolean; topK?: number }) => Promise<void>
|
||||
@@ -228,11 +246,10 @@ export interface CopilotActions {
|
||||
stream: ReadableStream,
|
||||
messageId: string,
|
||||
isContinuation?: boolean,
|
||||
triggerUserMessageId?: string
|
||||
triggerUserMessageId?: string,
|
||||
abortSignal?: AbortSignal
|
||||
) => Promise<void>
|
||||
handleNewChatCreation: (newChatId: string) => Promise<void>
|
||||
executeIntegrationTool: (toolCallId: string) => Promise<void>
|
||||
skipIntegrationTool: (toolCallId: string) => void
|
||||
loadAutoAllowedTools: () => Promise<void>
|
||||
addAutoAllowedTool: (toolId: string) => Promise<void>
|
||||
removeAutoAllowedTool: (toolId: string) => Promise<void>
|
||||
|
||||
@@ -121,6 +121,13 @@ export const useWorkflowDiffStore = create<WorkflowDiffState & WorkflowDiffActio
|
||||
|
||||
const candidateState = diffResult.diff.proposedState
|
||||
|
||||
logger.info('[WorkflowDiff] Applying proposed state', {
|
||||
blockCount: Object.keys(candidateState.blocks || {}).length,
|
||||
edgeCount: candidateState.edges?.length ?? 0,
|
||||
hasLoops: !!candidateState.loops,
|
||||
hasParallels: !!candidateState.parallels,
|
||||
})
|
||||
|
||||
// Validate proposed workflow using serializer round-trip
|
||||
const serializer = new Serializer()
|
||||
const serialized = serializer.serializeWorkflow(
|
||||
@@ -134,6 +141,7 @@ export const useWorkflowDiffStore = create<WorkflowDiffState & WorkflowDiffActio
|
||||
|
||||
// OPTIMISTIC: Apply state immediately to stores (this is what makes UI update)
|
||||
applyWorkflowStateToStores(activeWorkflowId, candidateState)
|
||||
logger.info('[WorkflowDiff] Applied state to stores')
|
||||
|
||||
// OPTIMISTIC: Update diff state immediately so UI shows the diff
|
||||
const triggerMessageId =
|
||||
|
||||
@@ -37,10 +37,26 @@ export function applyWorkflowStateToStores(
|
||||
workflowState: WorkflowState,
|
||||
options?: { updateLastSaved?: boolean }
|
||||
) {
|
||||
logger.info('[applyWorkflowStateToStores] Applying state', {
|
||||
workflowId,
|
||||
blockCount: Object.keys(workflowState.blocks || {}).length,
|
||||
edgeCount: workflowState.edges?.length ?? 0,
|
||||
edgePreview: workflowState.edges?.slice(0, 3).map((e) => `${e.source} -> ${e.target}`),
|
||||
})
|
||||
const workflowStore = useWorkflowStore.getState()
|
||||
workflowStore.replaceWorkflowState(cloneWorkflowState(workflowState), options)
|
||||
const cloned = cloneWorkflowState(workflowState)
|
||||
logger.info('[applyWorkflowStateToStores] Cloned state edges', {
|
||||
clonedEdgeCount: cloned.edges?.length ?? 0,
|
||||
})
|
||||
workflowStore.replaceWorkflowState(cloned, options)
|
||||
const subBlockValues = extractSubBlockValues(workflowState)
|
||||
useSubBlockStore.getState().setWorkflowValues(workflowId, subBlockValues)
|
||||
|
||||
// Verify what's in the store after apply
|
||||
const afterState = workflowStore.getWorkflowState()
|
||||
logger.info('[applyWorkflowStateToStores] After apply', {
|
||||
afterEdgeCount: afterState.edges?.length ?? 0,
|
||||
})
|
||||
}
|
||||
|
||||
export function captureBaselineSnapshot(workflowId: string): WorkflowState {
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
import { mistralParserTool, mistralParserV2Tool } from '@/tools/mistral/parser'
|
||||
import { mistralParserTool, mistralParserV2Tool, mistralParserV3Tool } from '@/tools/mistral/parser'
|
||||
|
||||
export { mistralParserTool, mistralParserV2Tool }
|
||||
export { mistralParserTool, mistralParserV2Tool, mistralParserV3Tool }
|
||||
|
||||
@@ -349,74 +349,14 @@ export const mistralParserTool: ToolConfig<MistralParserInput, MistralParserOutp
|
||||
},
|
||||
}
|
||||
|
||||
const mistralParserV2Params = {
|
||||
file: {
|
||||
type: 'file',
|
||||
required: true,
|
||||
visibility: 'hidden',
|
||||
description: 'File data from a previous block',
|
||||
},
|
||||
resultType: mistralParserTool.params.resultType,
|
||||
includeImageBase64: mistralParserTool.params.includeImageBase64,
|
||||
pages: mistralParserTool.params.pages,
|
||||
imageLimit: mistralParserTool.params.imageLimit,
|
||||
imageMinSize: mistralParserTool.params.imageMinSize,
|
||||
apiKey: mistralParserTool.params.apiKey,
|
||||
} satisfies ToolConfig['params']
|
||||
|
||||
export const mistralParserV2Tool: ToolConfig<MistralParserV2Input, MistralParserV2Output> = {
|
||||
export const mistralParserV2Tool: ToolConfig<MistralParserInput, MistralParserV2Output> = {
|
||||
id: 'mistral_parser_v2',
|
||||
name: 'Mistral PDF Parser',
|
||||
description: 'Parse PDF documents using Mistral OCR API',
|
||||
version: '2.0.0',
|
||||
|
||||
params: mistralParserV2Params,
|
||||
request: {
|
||||
url: '/api/tools/mistral/parse',
|
||||
method: 'POST',
|
||||
headers: (params) => {
|
||||
return {
|
||||
'Content-Type': 'application/json',
|
||||
Accept: 'application/json',
|
||||
Authorization: `Bearer ${params.apiKey}`,
|
||||
}
|
||||
},
|
||||
body: (params) => {
|
||||
if (!params || typeof params !== 'object') {
|
||||
throw new Error('Invalid parameters: Parameters must be provided as an object')
|
||||
}
|
||||
if (!params.apiKey || typeof params.apiKey !== 'string' || params.apiKey.trim() === '') {
|
||||
throw new Error('Missing or invalid API key: A valid Mistral API key is required')
|
||||
}
|
||||
|
||||
const file = params.file
|
||||
if (!file || typeof file !== 'object') {
|
||||
throw new Error('File input is required')
|
||||
}
|
||||
|
||||
const requestBody: Record<string, unknown> = {
|
||||
apiKey: params.apiKey,
|
||||
resultType: params.resultType || 'markdown',
|
||||
}
|
||||
|
||||
requestBody.file = file
|
||||
|
||||
if (params.pages) {
|
||||
requestBody.pages = params.pages
|
||||
}
|
||||
if (params.includeImageBase64 !== undefined) {
|
||||
requestBody.includeImageBase64 = params.includeImageBase64
|
||||
}
|
||||
if (params.imageLimit !== undefined) {
|
||||
requestBody.imageLimit = params.imageLimit
|
||||
}
|
||||
if (params.imageMinSize !== undefined) {
|
||||
requestBody.imageMinSize = params.imageMinSize
|
||||
}
|
||||
|
||||
return requestBody
|
||||
},
|
||||
},
|
||||
params: mistralParserTool.params,
|
||||
request: mistralParserTool.request,
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
let ocrResult
|
||||
@@ -543,3 +483,73 @@ export const mistralParserV2Tool: ToolConfig<MistralParserV2Input, MistralParser
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* V3 tool - Updated for new file handling pattern with UserFile normalization
|
||||
* Used by MistralParseV3Block which uses fileUpload (basic) and fileReference (advanced) subblocks
|
||||
*/
|
||||
export const mistralParserV3Tool: ToolConfig<MistralParserV2Input, MistralParserV2Output> = {
|
||||
...mistralParserV2Tool,
|
||||
id: 'mistral_parser_v3',
|
||||
version: '3.0.0',
|
||||
params: {
|
||||
file: {
|
||||
type: 'file',
|
||||
required: true,
|
||||
visibility: 'hidden',
|
||||
description: 'Normalized UserFile from file upload or file reference',
|
||||
},
|
||||
resultType: mistralParserTool.params.resultType,
|
||||
includeImageBase64: mistralParserTool.params.includeImageBase64,
|
||||
pages: mistralParserTool.params.pages,
|
||||
imageLimit: mistralParserTool.params.imageLimit,
|
||||
imageMinSize: mistralParserTool.params.imageMinSize,
|
||||
apiKey: mistralParserTool.params.apiKey,
|
||||
},
|
||||
request: {
|
||||
url: '/api/tools/mistral/parse',
|
||||
method: 'POST',
|
||||
headers: (params) => {
|
||||
return {
|
||||
'Content-Type': 'application/json',
|
||||
Accept: 'application/json',
|
||||
Authorization: `Bearer ${params.apiKey}`,
|
||||
}
|
||||
},
|
||||
body: (params) => {
|
||||
if (!params || typeof params !== 'object') {
|
||||
throw new Error('Invalid parameters: Parameters must be provided as an object')
|
||||
}
|
||||
if (!params.apiKey || typeof params.apiKey !== 'string' || params.apiKey.trim() === '') {
|
||||
throw new Error('Missing or invalid API key: A valid Mistral API key is required')
|
||||
}
|
||||
|
||||
// V3 expects normalized UserFile object via `file` param
|
||||
const file = params.file
|
||||
if (!file || typeof file !== 'object') {
|
||||
throw new Error('File input is required: provide a file upload or file reference')
|
||||
}
|
||||
|
||||
const requestBody: Record<string, unknown> = {
|
||||
apiKey: params.apiKey,
|
||||
resultType: params.resultType || 'markdown',
|
||||
file: file,
|
||||
}
|
||||
|
||||
if (params.pages) {
|
||||
requestBody.pages = params.pages
|
||||
}
|
||||
if (params.includeImageBase64 !== undefined) {
|
||||
requestBody.includeImageBase64 = params.includeImageBase64
|
||||
}
|
||||
if (params.imageLimit !== undefined) {
|
||||
requestBody.imageLimit = params.imageLimit
|
||||
}
|
||||
if (params.imageMinSize !== undefined) {
|
||||
requestBody.imageMinSize = params.imageMinSize
|
||||
}
|
||||
|
||||
return requestBody
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1093,7 +1093,7 @@ import {
|
||||
microsoftTeamsWriteChannelTool,
|
||||
microsoftTeamsWriteChatTool,
|
||||
} from '@/tools/microsoft_teams'
|
||||
import { mistralParserTool, mistralParserV2Tool } from '@/tools/mistral'
|
||||
import { mistralParserTool, mistralParserV2Tool, mistralParserV3Tool } from '@/tools/mistral'
|
||||
import {
|
||||
mongodbDeleteTool,
|
||||
mongodbExecuteTool,
|
||||
@@ -2684,6 +2684,7 @@ export const tools: Record<string, ToolConfig> = {
|
||||
apollo_email_accounts: apolloEmailAccountsTool,
|
||||
mistral_parser: mistralParserTool,
|
||||
mistral_parser_v2: mistralParserV2Tool,
|
||||
mistral_parser_v3: mistralParserV3Tool,
|
||||
reducto_parser: reductoParserTool,
|
||||
reducto_parser_v2: reductoParserV2Tool,
|
||||
textract_parser: textractParserTool,
|
||||
|
||||
17
bun.lock
17
bun.lock
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"configVersion": 0,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "simstudio",
|
||||
@@ -12,7 +13,7 @@
|
||||
"glob": "13.0.0",
|
||||
"husky": "9.1.7",
|
||||
"lint-staged": "16.0.0",
|
||||
"turbo": "2.8.0",
|
||||
"turbo": "2.8.3",
|
||||
},
|
||||
},
|
||||
"apps/docs": {
|
||||
@@ -3429,19 +3430,19 @@
|
||||
|
||||
"tunnel-agent": ["tunnel-agent@0.6.0", "", { "dependencies": { "safe-buffer": "^5.0.1" } }, "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w=="],
|
||||
|
||||
"turbo": ["turbo@2.8.0", "", { "optionalDependencies": { "turbo-darwin-64": "2.8.0", "turbo-darwin-arm64": "2.8.0", "turbo-linux-64": "2.8.0", "turbo-linux-arm64": "2.8.0", "turbo-windows-64": "2.8.0", "turbo-windows-arm64": "2.8.0" }, "bin": { "turbo": "bin/turbo" } }, "sha512-hYbxnLEdvJF+DLALS+Ia+PbfNtn0sDP0hH2u9AFoskSUDmcVHSrtwHpzdX94MrRJKo9D9tYxY3MyP20gnlrWyA=="],
|
||||
"turbo": ["turbo@2.8.3", "", { "optionalDependencies": { "turbo-darwin-64": "2.8.3", "turbo-darwin-arm64": "2.8.3", "turbo-linux-64": "2.8.3", "turbo-linux-arm64": "2.8.3", "turbo-windows-64": "2.8.3", "turbo-windows-arm64": "2.8.3" }, "bin": { "turbo": "bin/turbo" } }, "sha512-8Osxz5Tu/Dw2kb31EAY+nhq/YZ3wzmQSmYa1nIArqxgCAldxv9TPlrAiaBUDVnKA4aiPn0OFBD1ACcpc5VFOAQ=="],
|
||||
|
||||
"turbo-darwin-64": ["turbo-darwin-64@2.8.0", "", { "os": "darwin", "cpu": "x64" }, "sha512-N7f4PYqz25yk8c5kituk09bJ89tE4wPPqKXgYccT6nbEQnGnrdvlyCHLyqViNObTgjjrddqjb1hmDkv7VcxE0g=="],
|
||||
"turbo-darwin-64": ["turbo-darwin-64@2.8.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-4kXRLfcygLOeNcP6JquqRLmGB/ATjjfehiojL2dJkL7GFm3SPSXbq7oNj8UbD8XriYQ5hPaSuz59iF1ijPHkTw=="],
|
||||
|
||||
"turbo-darwin-arm64": ["turbo-darwin-arm64@2.8.0", "", { "os": "darwin", "cpu": "arm64" }, "sha512-eVzejaP5fn51gmJAPW68U6mSjFaAZ26rPiE36mMdk+tMC4XBGmJHT/fIgrhcrXMvINCl27RF8VmguRe+MBlSuQ=="],
|
||||
"turbo-darwin-arm64": ["turbo-darwin-arm64@2.8.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-xF7uCeC0UY0Hrv/tqax0BMbFlVP1J/aRyeGQPZT4NjvIPj8gSPDgFhfkfz06DhUwDg5NgMo04uiSkAWE8WB/QQ=="],
|
||||
|
||||
"turbo-linux-64": ["turbo-linux-64@2.8.0", "", { "os": "linux", "cpu": "x64" }, "sha512-ILR45zviYae3icf4cmUISdj8X17ybNcMh3Ms4cRdJF5sS50qDDTv8qeWqO/lPeHsu6r43gVWDofbDZYVuXYL7Q=="],
|
||||
"turbo-linux-64": ["turbo-linux-64@2.8.3", "", { "os": "linux", "cpu": "x64" }, "sha512-vxMDXwaOjweW/4etY7BxrXCSkvtwh0PbwVafyfT1Ww659SedUxd5rM3V2ZCmbwG8NiCfY7d6VtxyHx3Wh1GoZA=="],
|
||||
|
||||
"turbo-linux-arm64": ["turbo-linux-arm64@2.8.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-z9pUa8ENFuHmadPfjEYMRWlXO82t1F/XBDx2XTg+cWWRZHf85FnEB6D4ForJn/GoKEEvwdPhFLzvvhOssom2ug=="],
|
||||
"turbo-linux-arm64": ["turbo-linux-arm64@2.8.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-mQX7uYBZFkuPLLlKaNe9IjR1JIef4YvY8f21xFocvttXvdPebnq3PK1Zjzl9A1zun2BEuWNUwQIL8lgvN9Pm3Q=="],
|
||||
|
||||
"turbo-windows-64": ["turbo-windows-64@2.8.0", "", { "os": "win32", "cpu": "x64" }, "sha512-J6juRSRjmSErEqJCv7nVIq2DgZ2NHXqyeV8NQTFSyIvrThKiWe7FDOO6oYpuR06+C1NW82aoN4qQt4/gYvz25w=="],
|
||||
"turbo-windows-64": ["turbo-windows-64@2.8.3", "", { "os": "win32", "cpu": "x64" }, "sha512-YLGEfppGxZj3VWcNOVa08h6ISsVKiG85aCAWosOKNUjb6yErWEuydv6/qImRJUI+tDLvDvW7BxopAkujRnWCrw=="],
|
||||
|
||||
"turbo-windows-arm64": ["turbo-windows-arm64@2.8.0", "", { "os": "win32", "cpu": "arm64" }, "sha512-qarBZvCu6uka35739TS+y/3CBU3zScrVAfohAkKHG+So+93Wn+5tKArs8HrO2fuTaGou8fMIeTV7V5NgzCVkSQ=="],
|
||||
"turbo-windows-arm64": ["turbo-windows-arm64@2.8.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-afTUGKBRmOJU1smQSBnFGcbq0iabAPwh1uXu2BVk7BREg30/1gMnJh9DFEQTah+UD3n3ru8V55J83RQNFfqoyw=="],
|
||||
|
||||
"tweetnacl": ["tweetnacl@0.14.5", "", {}, "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA=="],
|
||||
|
||||
|
||||
927
docs/COPILOT_SERVER_REFACTOR.md
Normal file
927
docs/COPILOT_SERVER_REFACTOR.md
Normal file
@@ -0,0 +1,927 @@
|
||||
# Copilot Server-Side Refactor Plan
|
||||
|
||||
> **Goal**: Move copilot orchestration logic from the browser (React/Zustand) to the Next.js server, enabling both headless API access and a simplified interactive client.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Executive Summary](#executive-summary)
|
||||
2. [Current Architecture](#current-architecture)
|
||||
3. [Target Architecture](#target-architecture)
|
||||
4. [Scope & Boundaries](#scope--boundaries)
|
||||
5. [Module Design](#module-design)
|
||||
6. [Implementation Plan](#implementation-plan)
|
||||
7. [API Contracts](#api-contracts)
|
||||
8. [Migration Strategy](#migration-strategy)
|
||||
9. [Testing Strategy](#testing-strategy)
|
||||
10. [Risks & Mitigations](#risks--mitigations)
|
||||
11. [File Inventory](#file-inventory)
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
### Problem
|
||||
|
||||
The current copilot implementation in Sim has all orchestration logic in the browser:
|
||||
- SSE stream parsing happens in the React client
|
||||
- Tool execution is triggered from the browser
|
||||
- OAuth tokens are sent to the client
|
||||
- No headless/API access is possible
|
||||
- The Zustand store is ~4,200 lines of complex async logic
|
||||
|
||||
### Solution
|
||||
|
||||
Move orchestration to the Next.js server:
|
||||
- Server parses SSE from copilot backend
|
||||
- Server executes tools directly (no HTTP round-trips)
|
||||
- Server forwards events to client (if attached)
|
||||
- Headless API returns JSON response
|
||||
- Client store becomes a thin UI layer (~600 lines)
|
||||
|
||||
### Benefits
|
||||
|
||||
| Aspect | Before | After |
|
||||
|--------|--------|-------|
|
||||
| Security | OAuth tokens in browser | Tokens stay server-side |
|
||||
| Headless access | Not possible | Full API support |
|
||||
| Store complexity | ~4,200 lines | ~600 lines |
|
||||
| Tool execution | Browser-initiated | Server-side |
|
||||
| Testing | Complex async | Simple state |
|
||||
| Bundle size | Large (tool classes) | Minimal |
|
||||
|
||||
---
|
||||
|
||||
## Current Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ BROWSER (React) │
|
||||
├─────────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────┐│
|
||||
│ │ Copilot Store (4,200 lines) ││
|
||||
│ │ ││
|
||||
│ │ • SSE stream parsing (parseSSEStream) ││
|
||||
│ │ • Event handlers (sseHandlers, subAgentSSEHandlers) ││
|
||||
│ │ • Tool execution logic ││
|
||||
│ │ • Client tool instantiation ││
|
||||
│ │ • Content block processing ││
|
||||
│ │ • State management ││
|
||||
│ │ • UI state ││
|
||||
│ └─────────────────────────────────────────────────────────────────────────┘│
|
||||
│ │ │
|
||||
│ │ HTTP calls for tool execution │
|
||||
│ ▼ │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ NEXT.JS SERVER │
|
||||
├─────────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ /api/copilot/chat - Proxy to copilot backend (pass-through) │
|
||||
│ /api/copilot/execute-tool - Execute integration tools │
|
||||
│ /api/copilot/confirm - Update Redis with tool status │
|
||||
│ /api/copilot/tools/mark-complete - Notify copilot backend │
|
||||
│ /api/copilot/execute-copilot-server-tool - Execute server tools │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ COPILOT BACKEND (Go) │
|
||||
│ copilot.sim.ai │
|
||||
├─────────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ • LLM orchestration │
|
||||
│ • Subagent system (plan, edit, debug, etc.) │
|
||||
│ • Tool definitions │
|
||||
│ • Conversation management │
|
||||
│ • SSE streaming │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Current Flow (Interactive)
|
||||
|
||||
1. User sends message in UI
|
||||
2. Store calls `/api/copilot/chat`
|
||||
3. Chat route proxies to copilot backend, streams SSE back
|
||||
4. **Store parses SSE in browser**
|
||||
5. On `tool_call` event:
|
||||
- Store decides if tool needs confirmation
|
||||
- Store calls `/api/copilot/execute-tool` or `/api/copilot/execute-copilot-server-tool`
|
||||
- Store calls `/api/copilot/tools/mark-complete`
|
||||
6. Store updates UI state
|
||||
|
||||
### Problems with Current Flow
|
||||
|
||||
1. **No headless access**: Must have browser client
|
||||
2. **Security**: OAuth tokens sent to browser for tool execution
|
||||
3. **Complexity**: All orchestration logic in Zustand store
|
||||
4. **Performance**: Multiple HTTP round-trips from browser
|
||||
5. **Reliability**: Browser can disconnect mid-operation
|
||||
6. **Testing**: Hard to test async browser logic
|
||||
|
||||
---
|
||||
|
||||
## Target Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ BROWSER (React) │
|
||||
├─────────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────┐│
|
||||
│ │ Copilot Store (~600 lines) ││
|
||||
│ │ ││
|
||||
│ │ • UI state (messages, toolCalls display) ││
|
||||
│ │ • Event listener (receive server events) ││
|
||||
│ │ • User actions (send message, confirm/reject) ││
|
||||
│ │ • Simple API calls ││
|
||||
│ └─────────────────────────────────────────────────────────────────────────┘│
|
||||
│ │ │
|
||||
│ │ SSE events from server │
|
||||
│ │ │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
▲
|
||||
│ (Optional - headless mode has no client)
|
||||
│
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ NEXT.JS SERVER │
|
||||
├─────────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────┐│
|
||||
│ │ Orchestrator Module (NEW) ││
|
||||
│ │ lib/copilot/orchestrator/ ││
|
||||
│ │ ││
|
||||
│ │ • SSE stream parsing ││
|
||||
│ │ • Event handlers ││
|
||||
│ │ • Tool execution (direct function calls) ││
|
||||
│ │ • Response building ││
|
||||
│ │ • Event forwarding (to client if attached) ││
|
||||
│ └─────────────────────────────────────────────────────────────────────────┘│
|
||||
│ │ │
|
||||
│ ┌──────┴──────┐ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ /api/copilot/chat /api/v1/copilot/chat │
|
||||
│ (Interactive) (Headless) │
|
||||
│ - Session auth - API key auth │
|
||||
│ - SSE to client - JSON response │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ (Single external HTTP call)
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ COPILOT BACKEND (Go) │
|
||||
│ (UNCHANGED - no modifications) │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Target Flow (Headless)
|
||||
|
||||
1. External client calls `POST /api/v1/copilot/chat` with API key
|
||||
2. Orchestrator calls copilot backend
|
||||
3. **Server parses SSE stream**
|
||||
4. **Server executes tools directly** (no HTTP)
|
||||
5. Server notifies copilot backend (mark-complete)
|
||||
6. Server returns JSON response
|
||||
|
||||
### Target Flow (Interactive)
|
||||
|
||||
1. User sends message in UI
|
||||
2. Store calls `/api/copilot/chat`
|
||||
3. **Server orchestrates everything**
|
||||
4. Server forwards events to client via SSE
|
||||
5. Client just updates UI from events
|
||||
6. Server returns when complete
|
||||
|
||||
---
|
||||
|
||||
## Scope & Boundaries
|
||||
|
||||
### In Scope
|
||||
|
||||
| Item | Description |
|
||||
|------|-------------|
|
||||
| Orchestrator module | New module in `lib/copilot/orchestrator/` |
|
||||
| Headless API route | New route `POST /api/v1/copilot/chat` |
|
||||
| SSE parsing | Move from store to server |
|
||||
| Tool execution | Direct function calls on server |
|
||||
| Event forwarding | SSE to client (interactive mode) |
|
||||
| Store simplification | Reduce to UI-only logic |
|
||||
|
||||
### Out of Scope
|
||||
|
||||
| Item | Reason |
|
||||
|------|--------|
|
||||
| Copilot backend (Go) | Separate repo, working correctly |
|
||||
| Tool definitions | Already work, just called differently |
|
||||
| LLM providers | Handled by copilot backend |
|
||||
| Subagent system | Handled by copilot backend |
|
||||
|
||||
### Boundaries
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────┐
|
||||
│ MODIFICATION ZONE │
|
||||
│ │
|
||||
┌────────────────┼─────────────────────────────────────┼────────────────┐
|
||||
│ │ │ │
|
||||
│ UNCHANGED │ apps/sim/ │ UNCHANGED │
|
||||
│ │ ├── lib/copilot/orchestrator/ │ │
|
||||
│ copilot/ │ │ └── (NEW) │ apps/sim/ │
|
||||
│ (Go backend) │ ├── app/api/v1/copilot/ │ tools/ │
|
||||
│ │ │ └── (NEW) │ (definitions)│
|
||||
│ │ ├── app/api/copilot/chat/ │ │
|
||||
│ │ │ └── (MODIFIED) │ │
|
||||
│ │ └── stores/panel/copilot/ │ │
|
||||
│ │ └── (SIMPLIFIED) │ │
|
||||
│ │ │ │
|
||||
└────────────────┼─────────────────────────────────────┼────────────────┘
|
||||
│ │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Module Design
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
apps/sim/lib/copilot/orchestrator/
|
||||
├── index.ts # Main orchestrator function
|
||||
├── types.ts # Type definitions
|
||||
├── sse-parser.ts # Parse SSE stream from copilot backend
|
||||
├── sse-handlers.ts # Handle each SSE event type
|
||||
├── tool-executor.ts # Execute tools directly (no HTTP)
|
||||
├── persistence.ts # Database and Redis operations
|
||||
└── response-builder.ts # Build final response
|
||||
```
|
||||
|
||||
### Module Responsibilities
|
||||
|
||||
#### `types.ts`
|
||||
|
||||
Defines all types used by the orchestrator:
|
||||
|
||||
```typescript
|
||||
// SSE Events
|
||||
interface SSEEvent { type, data, subagent?, toolCallId?, toolName? }
|
||||
type SSEEventType = 'content' | 'tool_call' | 'tool_result' | 'done' | ...
|
||||
|
||||
// Tool State
|
||||
interface ToolCallState { id, name, status, params?, result?, error? }
|
||||
type ToolCallStatus = 'pending' | 'executing' | 'success' | 'error' | 'skipped'
|
||||
|
||||
// Streaming Context (internal state during orchestration)
|
||||
interface StreamingContext {
|
||||
chatId?, conversationId?, messageId
|
||||
accumulatedContent, contentBlocks
|
||||
toolCalls: Map<string, ToolCallState>
|
||||
streamComplete, errors[]
|
||||
}
|
||||
|
||||
// Orchestrator API
|
||||
interface OrchestratorRequest { message, workflowId, userId, chatId?, mode?, ... }
|
||||
interface OrchestratorOptions { autoExecuteTools?, onEvent?, timeout?, ... }
|
||||
interface OrchestratorResult { success, content, toolCalls[], chatId?, error? }
|
||||
|
||||
// Execution Context (passed to tool executors)
|
||||
interface ExecutionContext { userId, workflowId, workspaceId?, decryptedEnvVars? }
|
||||
```
|
||||
|
||||
#### `sse-parser.ts`
|
||||
|
||||
Parses SSE stream into typed events:
|
||||
|
||||
```typescript
|
||||
async function* parseSSEStream(
|
||||
reader: ReadableStreamDefaultReader,
|
||||
decoder: TextDecoder,
|
||||
abortSignal?: AbortSignal
|
||||
): AsyncGenerator<SSEEvent>
|
||||
```
|
||||
|
||||
- Handles buffering for partial lines
|
||||
- Parses JSON from `data:` lines
|
||||
- Yields typed `SSEEvent` objects
|
||||
- Supports abort signal
|
||||
|
||||
#### `sse-handlers.ts`
|
||||
|
||||
Handles each SSE event type:
|
||||
|
||||
```typescript
|
||||
const sseHandlers: Record<SSEEventType, SSEHandler> = {
|
||||
content: (event, context) => { /* append to accumulated content */ },
|
||||
tool_call: async (event, context, execContext, options) => {
|
||||
/* track tool, execute if autoExecuteTools */
|
||||
},
|
||||
tool_result: (event, context) => { /* update tool status */ },
|
||||
tool_generating: (event, context) => { /* create pending tool */ },
|
||||
reasoning: (event, context) => { /* handle thinking blocks */ },
|
||||
done: (event, context) => { /* mark stream complete */ },
|
||||
error: (event, context) => { /* record error */ },
|
||||
// ... etc
|
||||
}
|
||||
|
||||
const subAgentHandlers: Record<SSEEventType, SSEHandler> = {
|
||||
// Handlers for events within subagent context
|
||||
}
|
||||
```
|
||||
|
||||
#### `tool-executor.ts`
|
||||
|
||||
Executes tools directly without HTTP:
|
||||
|
||||
```typescript
|
||||
// Main entry point
|
||||
async function executeToolServerSide(
|
||||
toolCall: ToolCallState,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult>
|
||||
|
||||
// Server tools (edit_workflow, search_documentation, etc.)
|
||||
async function executeServerToolDirect(
|
||||
toolName: string,
|
||||
params: Record<string, any>,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult>
|
||||
|
||||
// Integration tools (slack_send, gmail_read, etc.)
|
||||
async function executeIntegrationToolDirect(
|
||||
toolCallId: string,
|
||||
toolName: string,
|
||||
toolConfig: ToolConfig,
|
||||
params: Record<string, any>,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult>
|
||||
|
||||
// Notify copilot backend (external HTTP - required)
|
||||
async function markToolComplete(
|
||||
toolCallId: string,
|
||||
toolName: string,
|
||||
status: number,
|
||||
message?: any,
|
||||
data?: any
|
||||
): Promise<boolean>
|
||||
|
||||
// Prepare cached context for tool execution
|
||||
async function prepareExecutionContext(
|
||||
userId: string,
|
||||
workflowId: string
|
||||
): Promise<ExecutionContext>
|
||||
```
|
||||
|
||||
**Key principle**: Internal tool execution uses direct function calls. Only `markToolComplete` makes HTTP call (to copilot backend - external).
|
||||
|
||||
#### `persistence.ts`
|
||||
|
||||
Database and Redis operations:
|
||||
|
||||
```typescript
|
||||
// Chat persistence
|
||||
async function createChat(params): Promise<{ id: string }>
|
||||
async function loadChat(chatId, userId): Promise<Chat | null>
|
||||
async function saveMessages(chatId, messages, options?): Promise<void>
|
||||
async function updateChatConversationId(chatId, conversationId): Promise<void>
|
||||
|
||||
// Tool confirmation (Redis)
|
||||
async function setToolConfirmation(toolCallId, status, message?): Promise<boolean>
|
||||
async function getToolConfirmation(toolCallId): Promise<Confirmation | null>
|
||||
```
|
||||
|
||||
#### `index.ts`
|
||||
|
||||
Main orchestrator function:
|
||||
|
||||
```typescript
|
||||
async function orchestrateCopilotRequest(
|
||||
request: OrchestratorRequest,
|
||||
options: OrchestratorOptions = {}
|
||||
): Promise<OrchestratorResult> {
|
||||
|
||||
// 1. Prepare execution context (cache env vars, etc.)
|
||||
const execContext = await prepareExecutionContext(userId, workflowId)
|
||||
|
||||
// 2. Handle chat creation/loading
|
||||
let chatId = await resolveChat(request)
|
||||
|
||||
// 3. Build request payload for copilot backend
|
||||
const payload = buildCopilotPayload(request)
|
||||
|
||||
// 4. Call copilot backend
|
||||
const response = await fetch(COPILOT_URL, { body: JSON.stringify(payload) })
|
||||
|
||||
// 5. Create streaming context
|
||||
const context = createStreamingContext(chatId)
|
||||
|
||||
// 6. Parse and handle SSE stream
|
||||
for await (const event of parseSSEStream(response.body)) {
|
||||
// Forward to client if attached
|
||||
options.onEvent?.(event)
|
||||
|
||||
// Handle event
|
||||
const handler = getHandler(event)
|
||||
await handler(event, context, execContext, options)
|
||||
|
||||
if (context.streamComplete) break
|
||||
}
|
||||
|
||||
// 7. Persist to database
|
||||
await persistChat(chatId, context)
|
||||
|
||||
// 8. Build and return result
|
||||
return buildResult(context)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Create Orchestrator Module (3-4 days)
|
||||
|
||||
**Goal**: Build the orchestrator module that can run independently.
|
||||
|
||||
#### Tasks
|
||||
|
||||
1. **Create `types.ts`** (~200 lines)
|
||||
- [ ] Define SSE event types
|
||||
- [ ] Define tool call state types
|
||||
- [ ] Define streaming context type
|
||||
- [ ] Define orchestrator request/response types
|
||||
- [ ] Define execution context type
|
||||
|
||||
2. **Create `sse-parser.ts`** (~80 lines)
|
||||
- [ ] Extract parsing logic from store.ts
|
||||
- [ ] Add abort signal support
|
||||
- [ ] Add error handling
|
||||
|
||||
3. **Create `persistence.ts`** (~120 lines)
|
||||
- [ ] Extract DB operations from chat route
|
||||
- [ ] Extract Redis operations from confirm route
|
||||
- [ ] Add chat creation/loading
|
||||
- [ ] Add message saving
|
||||
|
||||
4. **Create `tool-executor.ts`** (~300 lines)
|
||||
- [ ] Create `executeToolServerSide()` main entry
|
||||
- [ ] Create `executeServerToolDirect()` for server tools
|
||||
- [ ] Create `executeIntegrationToolDirect()` for integration tools
|
||||
- [ ] Create `markToolComplete()` for copilot backend notification
|
||||
- [ ] Create `prepareExecutionContext()` for caching
|
||||
- [ ] Handle OAuth token resolution
|
||||
- [ ] Handle env var resolution
|
||||
|
||||
5. **Create `sse-handlers.ts`** (~350 lines)
|
||||
- [ ] Extract handlers from store.ts
|
||||
- [ ] Adapt for server-side context
|
||||
- [ ] Add tool execution integration
|
||||
- [ ] Add subagent handlers
|
||||
|
||||
6. **Create `index.ts`** (~250 lines)
|
||||
- [ ] Create `orchestrateCopilotRequest()` main function
|
||||
- [ ] Wire together all modules
|
||||
- [ ] Add timeout handling
|
||||
- [ ] Add abort signal support
|
||||
- [ ] Add event forwarding
|
||||
|
||||
#### Deliverables
|
||||
|
||||
- Complete `lib/copilot/orchestrator/` module
|
||||
- Unit tests for each component
|
||||
- Integration test for full orchestration
|
||||
|
||||
### Phase 2: Create Headless API Route (1 day)
|
||||
|
||||
**Goal**: Create API endpoint for headless copilot access.
|
||||
|
||||
#### Tasks
|
||||
|
||||
1. **Create route** `app/api/v1/copilot/chat/route.ts` (~100 lines)
|
||||
- [ ] Add API key authentication
|
||||
- [ ] Parse and validate request
|
||||
- [ ] Call orchestrator
|
||||
- [ ] Return JSON response
|
||||
|
||||
2. **Add to API documentation**
|
||||
- [ ] Document request format
|
||||
- [ ] Document response format
|
||||
- [ ] Document error codes
|
||||
|
||||
#### Deliverables
|
||||
|
||||
- Working `POST /api/v1/copilot/chat` endpoint
|
||||
- API documentation
|
||||
- E2E test
|
||||
|
||||
### Phase 3: Wire Interactive Route (2 days)
|
||||
|
||||
**Goal**: Use orchestrator for existing interactive flow.
|
||||
|
||||
#### Tasks
|
||||
|
||||
1. **Modify `/api/copilot/chat/route.ts`**
|
||||
- [ ] Add feature flag for new vs old flow
|
||||
- [ ] Call orchestrator with `onEvent` callback
|
||||
- [ ] Forward events to client via SSE
|
||||
- [ ] Maintain backward compatibility
|
||||
|
||||
2. **Test both flows**
|
||||
- [ ] Verify interactive works with new orchestrator
|
||||
- [ ] Verify old flow still works (feature flag off)
|
||||
|
||||
#### Deliverables
|
||||
|
||||
- Interactive route using orchestrator
|
||||
- Feature flag for gradual rollout
|
||||
- No breaking changes
|
||||
|
||||
### Phase 4: Simplify Client Store (2-3 days)
|
||||
|
||||
**Goal**: Remove orchestration logic from client, keep UI-only.
|
||||
|
||||
#### Tasks
|
||||
|
||||
1. **Create simplified store** (new file or gradual refactor)
|
||||
- [ ] Keep: UI state, messages, tool display
|
||||
- [ ] Keep: Simple API calls
|
||||
- [ ] Keep: Event listener
|
||||
- [ ] Remove: SSE parsing
|
||||
- [ ] Remove: Tool execution logic
|
||||
- [ ] Remove: Client tool instantiators
|
||||
|
||||
2. **Update components**
|
||||
- [ ] Update components to use simplified store
|
||||
- [ ] Remove tool execution from UI components
|
||||
- [ ] Simplify tool display components
|
||||
|
||||
3. **Remove dead code**
|
||||
- [ ] Remove unused imports
|
||||
- [ ] Remove unused helper functions
|
||||
- [ ] Remove client tool classes (if no longer needed)
|
||||
|
||||
#### Deliverables
|
||||
|
||||
- Simplified store (~600 lines)
|
||||
- Updated components
|
||||
- Reduced bundle size
|
||||
|
||||
### Phase 5: Testing & Polish (2-3 days)
|
||||
|
||||
#### Tasks
|
||||
|
||||
1. **E2E testing**
|
||||
- [ ] Test headless API with various prompts
|
||||
- [ ] Test interactive with various prompts
|
||||
- [ ] Test tool execution scenarios
|
||||
- [ ] Test error handling
|
||||
- [ ] Test abort/timeout scenarios
|
||||
|
||||
2. **Performance testing**
|
||||
- [ ] Compare latency (old vs new)
|
||||
- [ ] Check memory usage
|
||||
- [ ] Check for connection issues
|
||||
|
||||
3. **Documentation**
|
||||
- [ ] Update developer docs
|
||||
- [ ] Add architecture diagram
|
||||
- [ ] Document new API
|
||||
|
||||
#### Deliverables
|
||||
|
||||
- Comprehensive test suite
|
||||
- Performance benchmarks
|
||||
- Complete documentation
|
||||
|
||||
---
|
||||
|
||||
## API Contracts
|
||||
|
||||
### Headless API
|
||||
|
||||
#### Request
|
||||
|
||||
```http
|
||||
POST /api/v1/copilot/chat
|
||||
Content-Type: application/json
|
||||
X-API-Key: sim_xxx
|
||||
|
||||
{
|
||||
"message": "Create a Slack notification workflow",
|
||||
"workflowId": "wf_abc123",
|
||||
"chatId": "chat_xyz", // Optional: continue existing chat
|
||||
"mode": "agent", // Optional: "agent" | "ask" | "plan"
|
||||
"model": "claude-4-sonnet", // Optional
|
||||
"autoExecuteTools": true, // Optional: default true
|
||||
"timeout": 300000 // Optional: default 5 minutes
|
||||
}
|
||||
```
|
||||
|
||||
#### Response (Success)
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"content": "I've created a Slack notification workflow that...",
|
||||
"toolCalls": [
|
||||
{
|
||||
"id": "tc_001",
|
||||
"name": "search_patterns",
|
||||
"status": "success",
|
||||
"params": { "query": "slack notification" },
|
||||
"result": { "patterns": [...] },
|
||||
"durationMs": 234
|
||||
},
|
||||
{
|
||||
"id": "tc_002",
|
||||
"name": "edit_workflow",
|
||||
"status": "success",
|
||||
"params": { "operations": [...] },
|
||||
"result": { "blocksAdded": 3 },
|
||||
"durationMs": 1523
|
||||
}
|
||||
],
|
||||
"chatId": "chat_xyz",
|
||||
"conversationId": "conv_123"
|
||||
}
|
||||
```
|
||||
|
||||
#### Response (Error)
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Workflow not found",
|
||||
"content": "",
|
||||
"toolCalls": []
|
||||
}
|
||||
```
|
||||
|
||||
#### Error Codes
|
||||
|
||||
| Status | Error | Description |
|
||||
|--------|-------|-------------|
|
||||
| 400 | Invalid request | Missing required fields |
|
||||
| 401 | Unauthorized | Invalid or missing API key |
|
||||
| 404 | Workflow not found | Workflow ID doesn't exist |
|
||||
| 500 | Internal error | Server-side failure |
|
||||
| 504 | Timeout | Request exceeded timeout |
|
||||
|
||||
### Interactive API (Existing - Modified)
|
||||
|
||||
The existing `/api/copilot/chat` endpoint continues to work but now uses the orchestrator internally. SSE events forwarded to client remain the same format.
|
||||
|
||||
---
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
### Rollout Plan
|
||||
|
||||
```
|
||||
Week 1: Phase 1 (Orchestrator)
|
||||
├── Day 1-2: Types + SSE Parser
|
||||
├── Day 3: Tool Executor
|
||||
└── Day 4-5: Handlers + Main Orchestrator
|
||||
|
||||
Week 2: Phase 2-3 (Routes)
|
||||
├── Day 1: Headless API route
|
||||
├── Day 2-3: Wire interactive route
|
||||
└── Day 4-5: Testing both modes
|
||||
|
||||
Week 3: Phase 4-5 (Cleanup)
|
||||
├── Day 1-3: Simplify store
|
||||
├── Day 4: Testing
|
||||
└── Day 5: Documentation
|
||||
```
|
||||
|
||||
### Feature Flags
|
||||
|
||||
```typescript
|
||||
// lib/copilot/config.ts
|
||||
|
||||
export const COPILOT_FLAGS = {
|
||||
// Use new orchestrator for interactive mode
|
||||
USE_SERVER_ORCHESTRATOR: process.env.COPILOT_USE_SERVER_ORCHESTRATOR === 'true',
|
||||
|
||||
// Enable headless API
|
||||
ENABLE_HEADLESS_API: process.env.COPILOT_ENABLE_HEADLESS_API === 'true',
|
||||
}
|
||||
```
|
||||
|
||||
### Rollback Plan
|
||||
|
||||
If issues arise:
|
||||
1. Set `COPILOT_USE_SERVER_ORCHESTRATOR=false`
|
||||
2. Interactive mode falls back to old client-side flow
|
||||
3. Headless API returns 503 Service Unavailable
|
||||
|
||||
---
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
```
|
||||
lib/copilot/orchestrator/
|
||||
├── __tests__/
|
||||
│ ├── sse-parser.test.ts
|
||||
│ ├── sse-handlers.test.ts
|
||||
│ ├── tool-executor.test.ts
|
||||
│ ├── persistence.test.ts
|
||||
│ └── index.test.ts
|
||||
```
|
||||
|
||||
#### SSE Parser Tests
|
||||
|
||||
```typescript
|
||||
describe('parseSSEStream', () => {
|
||||
it('parses content events')
|
||||
it('parses tool_call events')
|
||||
it('handles partial lines')
|
||||
it('handles malformed JSON')
|
||||
it('respects abort signal')
|
||||
})
|
||||
```
|
||||
|
||||
#### Tool Executor Tests
|
||||
|
||||
```typescript
|
||||
describe('executeToolServerSide', () => {
|
||||
it('executes server tools directly')
|
||||
it('executes integration tools with OAuth')
|
||||
it('resolves env var references')
|
||||
it('handles tool not found')
|
||||
it('handles execution errors')
|
||||
})
|
||||
```
|
||||
|
||||
### Integration Tests
|
||||
|
||||
```typescript
|
||||
describe('orchestrateCopilotRequest', () => {
|
||||
it('handles simple message without tools')
|
||||
it('handles message with single tool call')
|
||||
it('handles message with multiple tool calls')
|
||||
it('handles subagent tool calls')
|
||||
it('handles stream errors')
|
||||
it('respects timeout')
|
||||
it('forwards events to callback')
|
||||
})
|
||||
```
|
||||
|
||||
### E2E Tests
|
||||
|
||||
```typescript
|
||||
describe('POST /api/v1/copilot/chat', () => {
|
||||
it('returns 401 without API key')
|
||||
it('returns 400 with invalid request')
|
||||
it('executes simple ask query')
|
||||
it('executes workflow modification')
|
||||
it('handles tool execution')
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Risks & Mitigations
|
||||
|
||||
### Risk 1: Breaking Interactive Mode
|
||||
|
||||
**Risk**: Refactoring could break existing interactive copilot.
|
||||
|
||||
**Mitigation**:
|
||||
- Feature flag for gradual rollout
|
||||
- Keep old code path available
|
||||
- Extensive E2E testing
|
||||
- Staged deployment (internal → beta → production)
|
||||
|
||||
### Risk 2: Tool Execution Differences
|
||||
|
||||
**Risk**: Tool behavior differs between client and server execution.
|
||||
|
||||
**Mitigation**:
|
||||
- Reuse existing tool execution logic (same functions)
|
||||
- Compare outputs in parallel testing
|
||||
- Log discrepancies for investigation
|
||||
|
||||
### Risk 3: Performance Regression
|
||||
|
||||
**Risk**: Server-side orchestration could be slower.
|
||||
|
||||
**Mitigation**:
|
||||
- Actually should be faster (no browser round-trips)
|
||||
- Benchmark before/after
|
||||
- Profile critical paths
|
||||
|
||||
### Risk 4: Memory Usage
|
||||
|
||||
**Risk**: Server accumulates state during long-running requests.
|
||||
|
||||
**Mitigation**:
|
||||
- Set reasonable timeouts
|
||||
- Clean up context after request
|
||||
- Monitor memory in production
|
||||
|
||||
### Risk 5: Connection Issues
|
||||
|
||||
**Risk**: Long-running SSE connections could drop.
|
||||
|
||||
**Mitigation**:
|
||||
- Implement reconnection logic
|
||||
- Save checkpoints to resume
|
||||
- Handle partial completions gracefully
|
||||
|
||||
---
|
||||
|
||||
## File Inventory
|
||||
|
||||
### New Files
|
||||
|
||||
| File | Lines | Description |
|
||||
|------|-------|-------------|
|
||||
| `lib/copilot/orchestrator/types.ts` | ~200 | Type definitions |
|
||||
| `lib/copilot/orchestrator/sse-parser.ts` | ~80 | SSE stream parsing |
|
||||
| `lib/copilot/orchestrator/sse-handlers.ts` | ~350 | Event handlers |
|
||||
| `lib/copilot/orchestrator/tool-executor.ts` | ~300 | Tool execution |
|
||||
| `lib/copilot/orchestrator/persistence.ts` | ~120 | DB/Redis operations |
|
||||
| `lib/copilot/orchestrator/index.ts` | ~250 | Main orchestrator |
|
||||
| `app/api/v1/copilot/chat/route.ts` | ~100 | Headless API |
|
||||
| **Total New** | **~1,400** | |
|
||||
|
||||
### Modified Files
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `app/api/copilot/chat/route.ts` | Use orchestrator (optional) |
|
||||
| `stores/panel/copilot/store.ts` | Simplify to ~600 lines |
|
||||
|
||||
### Deleted Code (from store.ts)
|
||||
|
||||
| Section | Lines Removed |
|
||||
|---------|---------------|
|
||||
| SSE parsing logic | ~150 |
|
||||
| `sseHandlers` object | ~750 |
|
||||
| `subAgentSSEHandlers` | ~280 |
|
||||
| Tool execution logic | ~400 |
|
||||
| Client tool instantiators | ~120 |
|
||||
| Content block helpers | ~200 |
|
||||
| Streaming context | ~100 |
|
||||
| **Total Removed** | **~2,000** |
|
||||
|
||||
### Net Change
|
||||
|
||||
```
|
||||
New code: +1,400 lines (orchestrator module)
|
||||
Removed code: -2,000 lines (from store)
|
||||
Modified code: ~200 lines (route changes)
|
||||
───────────────────────────────────────
|
||||
Net change: -400 lines (cleaner, more maintainable)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Appendix: Code Extraction Map
|
||||
|
||||
### From `stores/panel/copilot/store.ts`
|
||||
|
||||
| Source Lines | Destination | Notes |
|
||||
|--------------|-------------|-------|
|
||||
| 900-1050 (parseSSEStream) | `sse-parser.ts` | Adapt for server |
|
||||
| 1120-1867 (sseHandlers) | `sse-handlers.ts` | Remove Zustand deps |
|
||||
| 1940-2217 (subAgentSSEHandlers) | `sse-handlers.ts` | Merge with above |
|
||||
| 1365-1583 (tool execution) | `tool-executor.ts` | Direct calls |
|
||||
| 330-380 (StreamingContext) | `types.ts` | Clean up |
|
||||
| 3328-3648 (handleStreamingResponse) | `index.ts` | Main loop |
|
||||
|
||||
### From `app/api/copilot/execute-tool/route.ts`
|
||||
|
||||
| Source Lines | Destination | Notes |
|
||||
|--------------|-------------|-------|
|
||||
| 30-247 (POST handler) | `tool-executor.ts` | Extract core logic |
|
||||
|
||||
### From `app/api/copilot/confirm/route.ts`
|
||||
|
||||
| Source Lines | Destination | Notes |
|
||||
|--------------|-------------|-------|
|
||||
| 28-89 (updateToolCallStatus) | `persistence.ts` | Redis operations |
|
||||
|
||||
---
|
||||
|
||||
## Approval & Sign-off
|
||||
|
||||
- [ ] Technical review complete
|
||||
- [ ] Security review complete
|
||||
- [ ] Performance impact assessed
|
||||
- [ ] Rollback plan approved
|
||||
- [ ] Testing plan approved
|
||||
|
||||
---
|
||||
|
||||
*Document created: January 2026*
|
||||
*Last updated: January 2026*
|
||||
|
||||
@@ -127,6 +127,18 @@ app:
|
||||
RATE_LIMIT_WINDOW_MS: "60000" # Rate limit window duration (1 minute)
|
||||
RATE_LIMIT_FREE_SYNC: "50" # Sync API executions per minute
|
||||
RATE_LIMIT_FREE_ASYNC: "200" # Async API executions per minute
|
||||
|
||||
# Execution Timeout Configuration (in seconds)
|
||||
# Sync timeouts apply to synchronous API calls
|
||||
EXECUTION_TIMEOUT_FREE: "300" # Free tier sync timeout (5 minutes)
|
||||
EXECUTION_TIMEOUT_PRO: "3000" # Pro tier sync timeout (50 minutes)
|
||||
EXECUTION_TIMEOUT_TEAM: "3000" # Team tier sync timeout (50 minutes)
|
||||
EXECUTION_TIMEOUT_ENTERPRISE: "3000" # Enterprise tier sync timeout (50 minutes)
|
||||
# Async timeouts apply to async/background job executions
|
||||
EXECUTION_TIMEOUT_ASYNC_FREE: "5400" # Free tier async timeout (90 minutes)
|
||||
EXECUTION_TIMEOUT_ASYNC_PRO: "5400" # Pro tier async timeout (90 minutes)
|
||||
EXECUTION_TIMEOUT_ASYNC_TEAM: "5400" # Team tier async timeout (90 minutes)
|
||||
EXECUTION_TIMEOUT_ASYNC_ENTERPRISE: "5400" # Enterprise tier async timeout (90 minutes)
|
||||
|
||||
# UI Branding & Whitelabeling Configuration
|
||||
NEXT_PUBLIC_BRAND_NAME: "Sim" # Custom brand name
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
"glob": "13.0.0",
|
||||
"husky": "9.1.7",
|
||||
"lint-staged": "16.0.0",
|
||||
"turbo": "2.8.0"
|
||||
"turbo": "2.8.3"
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.{js,jsx,ts,tsx,json,css,scss}": [
|
||||
|
||||
19
packages/db/migrations/0151_stale_screwball.sql
Normal file
19
packages/db/migrations/0151_stale_screwball.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE "async_jobs" (
|
||||
"id" text PRIMARY KEY NOT NULL,
|
||||
"type" text NOT NULL,
|
||||
"payload" jsonb NOT NULL,
|
||||
"status" text DEFAULT 'pending' NOT NULL,
|
||||
"created_at" timestamp DEFAULT now() NOT NULL,
|
||||
"started_at" timestamp,
|
||||
"completed_at" timestamp,
|
||||
"run_at" timestamp,
|
||||
"attempts" integer DEFAULT 0 NOT NULL,
|
||||
"max_attempts" integer DEFAULT 3 NOT NULL,
|
||||
"error" text,
|
||||
"output" jsonb,
|
||||
"metadata" jsonb DEFAULT '{}' NOT NULL,
|
||||
"updated_at" timestamp DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE INDEX "async_jobs_status_started_at_idx" ON "async_jobs" USING btree ("status","started_at");--> statement-breakpoint
|
||||
CREATE INDEX "async_jobs_status_completed_at_idx" ON "async_jobs" USING btree ("status","completed_at");
|
||||
10500
packages/db/migrations/meta/0151_snapshot.json
Normal file
10500
packages/db/migrations/meta/0151_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1051,6 +1051,13 @@
|
||||
"when": 1769897862156,
|
||||
"tag": "0150_flimsy_hemingway",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 151,
|
||||
"version": "7",
|
||||
"when": 1770239332381,
|
||||
"tag": "0151_stale_screwball",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -2124,3 +2124,34 @@ export const permissionGroupMember = pgTable(
|
||||
userIdUnique: uniqueIndex('permission_group_member_user_id_unique').on(table.userId),
|
||||
})
|
||||
)
|
||||
|
||||
/**
|
||||
* Async Jobs - Queue for background job processing (Redis/DB backends)
|
||||
* Used when trigger.dev is not available for async workflow executions
|
||||
*/
|
||||
export const asyncJobs = pgTable(
|
||||
'async_jobs',
|
||||
{
|
||||
id: text('id').primaryKey(),
|
||||
type: text('type').notNull(),
|
||||
payload: jsonb('payload').notNull(),
|
||||
status: text('status').notNull().default('pending'),
|
||||
createdAt: timestamp('created_at').notNull().defaultNow(),
|
||||
startedAt: timestamp('started_at'),
|
||||
completedAt: timestamp('completed_at'),
|
||||
runAt: timestamp('run_at'),
|
||||
attempts: integer('attempts').notNull().default(0),
|
||||
maxAttempts: integer('max_attempts').notNull().default(3),
|
||||
error: text('error'),
|
||||
output: jsonb('output'),
|
||||
metadata: jsonb('metadata').notNull().default('{}'),
|
||||
updatedAt: timestamp('updated_at').notNull().defaultNow(),
|
||||
},
|
||||
(table) => ({
|
||||
statusStartedAtIdx: index('async_jobs_status_started_at_idx').on(table.status, table.startedAt),
|
||||
statusCompletedAtIdx: index('async_jobs_status_completed_at_idx').on(
|
||||
table.status,
|
||||
table.completedAt
|
||||
),
|
||||
})
|
||||
)
|
||||
|
||||
@@ -45,12 +45,14 @@ export * from './assertions'
|
||||
export * from './builders'
|
||||
export * from './factories'
|
||||
export {
|
||||
clearRedisMocks,
|
||||
createEnvMock,
|
||||
createMockDb,
|
||||
createMockFetch,
|
||||
createMockFormDataRequest,
|
||||
createMockGetEnv,
|
||||
createMockLogger,
|
||||
createMockRedis,
|
||||
createMockRequest,
|
||||
createMockResponse,
|
||||
createMockSocket,
|
||||
@@ -63,6 +65,7 @@ export {
|
||||
loggerMock,
|
||||
type MockAuthResult,
|
||||
type MockFetchResponse,
|
||||
type MockRedis,
|
||||
type MockUser,
|
||||
mockAuth,
|
||||
mockCommonSchemas,
|
||||
|
||||
@@ -259,6 +259,38 @@ export const mockBlockConfigs: Record<string, any> = {
|
||||
],
|
||||
inputs: {},
|
||||
},
|
||||
wait: {
|
||||
name: 'Wait',
|
||||
description: 'Pause workflow execution for a specified time delay',
|
||||
category: 'blocks',
|
||||
bgColor: '#F59E0B',
|
||||
tools: {
|
||||
access: [],
|
||||
},
|
||||
subBlocks: [
|
||||
{
|
||||
id: 'timeValue',
|
||||
title: 'Wait Amount',
|
||||
type: 'short-input',
|
||||
placeholder: '10',
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
id: 'timeUnit',
|
||||
title: 'Unit',
|
||||
type: 'dropdown',
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
inputs: {
|
||||
timeValue: { type: 'string' },
|
||||
timeUnit: { type: 'string' },
|
||||
},
|
||||
outputs: {
|
||||
waitDuration: { type: 'number' },
|
||||
status: { type: 'string' },
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -63,6 +63,8 @@ export {
|
||||
} from './fetch.mock'
|
||||
// Logger mocks
|
||||
export { clearLoggerMocks, createMockLogger, getLoggerCalls, loggerMock } from './logger.mock'
|
||||
// Redis mocks
|
||||
export { clearRedisMocks, createMockRedis, type MockRedis } from './redis.mock'
|
||||
// Request mocks
|
||||
export { createMockFormDataRequest, createMockRequest } from './request.mock'
|
||||
// Socket mocks
|
||||
|
||||
80
packages/testing/src/mocks/redis.mock.ts
Normal file
80
packages/testing/src/mocks/redis.mock.ts
Normal file
@@ -0,0 +1,80 @@
|
||||
import { vi } from 'vitest'
|
||||
|
||||
/**
|
||||
* Creates a mock Redis client with common operations.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const redis = createMockRedis()
|
||||
* const queue = new RedisJobQueue(redis as never)
|
||||
*
|
||||
* // After operations
|
||||
* expect(redis.hset).toHaveBeenCalled()
|
||||
* expect(redis.expire).toHaveBeenCalledWith('key', 86400)
|
||||
* ```
|
||||
*/
|
||||
export function createMockRedis() {
|
||||
return {
|
||||
// Hash operations
|
||||
hset: vi.fn().mockResolvedValue(1),
|
||||
hget: vi.fn().mockResolvedValue(null),
|
||||
hgetall: vi.fn().mockResolvedValue({}),
|
||||
hdel: vi.fn().mockResolvedValue(1),
|
||||
hmset: vi.fn().mockResolvedValue('OK'),
|
||||
hincrby: vi.fn().mockResolvedValue(1),
|
||||
|
||||
// Key operations
|
||||
get: vi.fn().mockResolvedValue(null),
|
||||
set: vi.fn().mockResolvedValue('OK'),
|
||||
del: vi.fn().mockResolvedValue(1),
|
||||
exists: vi.fn().mockResolvedValue(0),
|
||||
expire: vi.fn().mockResolvedValue(1),
|
||||
ttl: vi.fn().mockResolvedValue(-1),
|
||||
|
||||
// List operations
|
||||
lpush: vi.fn().mockResolvedValue(1),
|
||||
rpush: vi.fn().mockResolvedValue(1),
|
||||
lpop: vi.fn().mockResolvedValue(null),
|
||||
rpop: vi.fn().mockResolvedValue(null),
|
||||
lrange: vi.fn().mockResolvedValue([]),
|
||||
llen: vi.fn().mockResolvedValue(0),
|
||||
|
||||
// Set operations
|
||||
sadd: vi.fn().mockResolvedValue(1),
|
||||
srem: vi.fn().mockResolvedValue(1),
|
||||
smembers: vi.fn().mockResolvedValue([]),
|
||||
sismember: vi.fn().mockResolvedValue(0),
|
||||
|
||||
// Pub/Sub
|
||||
publish: vi.fn().mockResolvedValue(0),
|
||||
subscribe: vi.fn().mockResolvedValue(undefined),
|
||||
unsubscribe: vi.fn().mockResolvedValue(undefined),
|
||||
on: vi.fn(),
|
||||
|
||||
// Transaction
|
||||
multi: vi.fn(() => ({
|
||||
exec: vi.fn().mockResolvedValue([]),
|
||||
})),
|
||||
|
||||
// Connection
|
||||
ping: vi.fn().mockResolvedValue('PONG'),
|
||||
quit: vi.fn().mockResolvedValue('OK'),
|
||||
disconnect: vi.fn().mockResolvedValue(undefined),
|
||||
|
||||
// Status
|
||||
status: 'ready',
|
||||
}
|
||||
}
|
||||
|
||||
export type MockRedis = ReturnType<typeof createMockRedis>
|
||||
|
||||
/**
|
||||
* Clears all Redis mock calls.
|
||||
*/
|
||||
export function clearRedisMocks(redis: MockRedis) {
|
||||
Object.values(redis).forEach((value) => {
|
||||
if (typeof value === 'function' && 'mockClear' in value) {
|
||||
value.mockClear()
|
||||
}
|
||||
})
|
||||
}
|
||||
Reference in New Issue
Block a user