improvement(mothership): new agent loop (#3920)

* feat(transport): replace shared chat transport with mothership-stream module

* improvement(contracts): regenerate contracts from go

* feat(tools): add tool catalog codegen from go tool contracts

* feat(tools): add tool-executor dispatch framework for sim side tool routing

* feat(orchestrator): rewrite tool dispatch with catalog-driven executor and simplified resume loop

* feat(orchestrator): checkpoint resume flow

* refactor(copilot): consolidate orchestrator into request/ layer

* refactor(mothership): reorganize lib/copilot into structured subdirectories

* refactor(mothership): canonical transcript layer, dead code cleanup, type consolidation

* refactor(mothership): rebase onto latest staging

* refactor(mothership): rename request continue to lifecycle

* feat(trace): add initial version of request traces

* improvement(stream): batch stream from redis

* fix(resume): fix the resume checkpoint

* fix(resume): fix resume client tool

* fix(subagents): subagent resume should join on existing subagent text block

* improvement(reconnect): harden reconnect logic

* fix(superagent): fix superagent integration tools

* improvement(stream): improve stream perf

* Rebase with origin dev

* fix(tests): fix failing test

* fix(build): fix type errors

* fix(build): fix build errors

* fix(build): fix type errors

* feat(mothership): add cli execution

* fix(mothership): fix function execute tests
This commit is contained in:
Siddharth Ganesan
2026-04-03 17:27:51 -07:00
committed by GitHub
parent 3b9e663f25
commit 7fdab14266
200 changed files with 12246 additions and 9732 deletions

View File

@@ -4,7 +4,7 @@ import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { recordUsage } from '@/lib/billing/core/usage-log'
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
import { checkInternalApiKey } from '@/lib/copilot/utils'
import { checkInternalApiKey } from '@/lib/copilot/request/http'
import { isBillingEnabled } from '@/lib/core/config/feature-flags'
import { generateRequestId } from '@/lib/core/utils/request'

View File

@@ -2,7 +2,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkServerSideUsageLimits } from '@/lib/billing/calculations/usage-monitor'
import { checkInternalApiKey } from '@/lib/copilot/utils'
import { checkInternalApiKey } from '@/lib/copilot/request/http'
const logger = createLogger('CopilotApiKeysValidate')

View File

@@ -1,10 +1,12 @@
import { createLogger } from '@sim/logger'
import { NextResponse } from 'next/server'
import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository'
import { abortActiveStream, waitForPendingChatStream } from '@/lib/copilot/chat-streaming'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
import { abortActiveStream } from '@/lib/copilot/request/session/abort'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotChatAbortAPI')
const GO_EXPLICIT_ABORT_TIMEOUT_MS = 3000
export async function POST(request: Request) {
@@ -15,7 +17,12 @@ export async function POST(request: Request) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const body = await request.json().catch(() => ({}))
const body = await request.json().catch((err) => {
logger.warn('Abort request body parse failed; continuing with empty object', {
error: err instanceof Error ? err.message : String(err),
})
return {}
})
const streamId = typeof body.streamId === 'string' ? body.streamId : ''
let chatId = typeof body.chatId === 'string' ? body.chatId : ''
@@ -24,7 +31,13 @@ export async function POST(request: Request) {
}
if (!chatId) {
const run = await getLatestRunForStream(streamId, authenticatedUserId).catch(() => null)
const run = await getLatestRunForStream(streamId, authenticatedUserId).catch((err) => {
logger.warn('getLatestRunForStream failed while resolving chatId for abort', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (run?.chatId) {
chatId = run.chatId
}
@@ -50,15 +63,13 @@ export async function POST(request: Request) {
if (!response.ok) {
throw new Error(`Explicit abort marker request failed: ${response.status}`)
}
} catch {
// best effort: local abort should still proceed even if Go marker fails
} catch (err) {
logger.warn('Explicit abort marker request failed; proceeding with local abort', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
}
const aborted = await abortActiveStream(streamId)
if (chatId) {
await waitForPendingChatStream(chatId, GO_EXPLICIT_ABORT_TIMEOUT_MS + 1000, streamId).catch(
() => false
)
}
return NextResponse.json({ aborted })
}

View File

@@ -36,11 +36,11 @@ vi.mock('drizzle-orm', () => ({
eq: vi.fn((field: unknown, value: unknown) => ({ field, value, type: 'eq' })),
}))
vi.mock('@/lib/copilot/chat-lifecycle', () => ({
vi.mock('@/lib/copilot/chat/lifecycle', () => ({
getAccessibleCopilotChat: mockGetAccessibleCopilotChat,
}))
vi.mock('@/lib/copilot/task-events', () => ({
vi.mock('@/lib/copilot/tasks', () => ({
taskPubSub: { publishStatusChanged: vi.fn() },
}))

View File

@@ -5,8 +5,8 @@ import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { taskPubSub } from '@/lib/copilot/task-events'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('DeleteChatAPI')

View File

@@ -0,0 +1,119 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, desc, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request/http'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { assertActiveWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('CopilotChatAPI')
function transformChat(chat: {
id: string
title: string | null
model: string | null
messages: unknown
planArtifact?: unknown
config?: unknown
conversationId?: string | null
resources?: unknown
createdAt: Date | null
updatedAt: Date | null
}) {
return {
id: chat.id,
title: chat.title,
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
planArtifact: chat.planArtifact || null,
config: chat.config || null,
...('conversationId' in chat ? { activeStreamId: chat.conversationId || null } : {}),
...('resources' in chat
? { resources: Array.isArray(chat.resources) ? chat.resources : [] }
: {}),
createdAt: chat.createdAt,
updatedAt: chat.updatedAt,
}
}
export async function GET(req: NextRequest) {
try {
const { searchParams } = new URL(req.url)
const workflowId = searchParams.get('workflowId')
const workspaceId = searchParams.get('workspaceId')
const chatId = searchParams.get('chatId')
const { userId: authenticatedUserId, isAuthenticated } =
await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !authenticatedUserId) {
return createUnauthorizedResponse()
}
if (chatId) {
const chat = await getAccessibleCopilotChat(chatId, authenticatedUserId)
if (!chat) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
}
logger.info(`Retrieved chat ${chatId}`)
return NextResponse.json({ success: true, chat: transformChat(chat) })
}
if (!workflowId && !workspaceId) {
return createBadRequestResponse('workflowId, workspaceId, or chatId is required')
}
if (workspaceId) {
await assertActiveWorkspaceAccess(workspaceId, authenticatedUserId)
}
if (workflowId) {
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: authenticatedUserId,
action: 'read',
})
if (!authorization.allowed) {
return createUnauthorizedResponse()
}
}
const scopeFilter = workflowId
? eq(copilotChats.workflowId, workflowId)
: eq(copilotChats.workspaceId, workspaceId!)
const chats = await db
.select({
id: copilotChats.id,
title: copilotChats.title,
model: copilotChats.model,
messages: copilotChats.messages,
planArtifact: copilotChats.planArtifact,
config: copilotChats.config,
createdAt: copilotChats.createdAt,
updatedAt: copilotChats.updatedAt,
})
.from(copilotChats)
.where(and(eq(copilotChats.userId, authenticatedUserId), scopeFilter))
.orderBy(desc(copilotChats.updatedAt))
const scope = workflowId ? `workflow ${workflowId}` : `workspace ${workspaceId}`
logger.info(`Retrieved ${chats.length} chats for ${scope}`)
return NextResponse.json({
success: true,
chats: chats.map(transformChat),
})
} catch (error) {
logger.error('Error fetching copilot chats:', error)
return createInternalServerErrorResponse('Failed to fetch chats')
}
}

View File

@@ -0,0 +1,65 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('RenameChatAPI')
const RenameChatSchema = z.object({
chatId: z.string().min(1),
title: z.string().min(1).max(200),
})
export async function PATCH(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ success: false, error: 'Unauthorized' }, { status: 401 })
}
const body = await request.json()
const { chatId, title } = RenameChatSchema.parse(body)
const chat = await getAccessibleCopilotChat(chatId, session.user.id)
if (!chat) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
}
const now = new Date()
const [updated] = await db
.update(copilotChats)
.set({ title, updatedAt: now, lastSeenAt: now })
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, session.user.id)))
.returning({ id: copilotChats.id, workspaceId: copilotChats.workspaceId })
if (!updated) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
}
logger.info('Chat renamed', { chatId, title })
if (updated.workspaceId) {
taskPubSub?.publishStatusChanged({
workspaceId: updated.workspaceId,
chatId,
type: 'renamed',
})
}
return NextResponse.json({ success: true })
} catch (error) {
if (error instanceof z.ZodError) {
return NextResponse.json(
{ success: false, error: 'Invalid request data', details: error.errors },
{ status: 400 }
)
}
logger.error('Error renaming chat:', error)
return NextResponse.json({ success: false, error: 'Failed to rename chat' }, { status: 500 })
}
}

View File

@@ -10,8 +10,8 @@ import {
createInternalServerErrorResponse,
createNotFoundResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import type { ChatResource, ResourceType } from '@/lib/copilot/resources'
} from '@/lib/copilot/request/http'
import type { ChatResource, ResourceType } from '@/lib/copilot/resources/persistence'
const logger = createLogger('CopilotChatResourcesAPI')

View File

@@ -1,45 +1,45 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, desc, eq, sql } from 'drizzle-orm'
import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { getAccessibleCopilotChat, resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat-payload'
import { type ChatLoadResult, resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat/payload'
import {
acquirePendingChatStream,
createSSEStream,
releasePendingChatStream,
requestChatTitle,
SSE_RESPONSE_HEADERS,
} from '@/lib/copilot/chat-streaming'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { getStreamMeta, readStreamEvents } from '@/lib/copilot/orchestrator/stream/buffer'
import type { OrchestratorResult } from '@/lib/copilot/orchestrator/types'
import { resolveActiveResourceContext } from '@/lib/copilot/process-contents'
buildPersistedAssistantMessage,
buildPersistedUserMessage,
} from '@/lib/copilot/chat/persisted-message'
import {
processContextsServer,
resolveActiveResourceContext,
} from '@/lib/copilot/chat/process-contents'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { createSSEStream, SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/lifecycle/start'
import {
authorizeWorkflowByWorkspacePermission,
resolveWorkflowIdForUser,
} from '@/lib/workflows/utils'
import {
assertActiveWorkspaceAccess,
getUserEntityPermissions,
} from '@/lib/workspaces/permissions/utils'
acquirePendingChatStream,
getPendingChatStreamId,
releasePendingChatStream,
} from '@/lib/copilot/request/session'
import type { OrchestratorResult } from '@/lib/copilot/request/types'
import { getWorkflowById, resolveWorkflowIdForUser } from '@/lib/workflows/utils'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
import type { ChatContext } from '@/stores/panel'
export const maxDuration = 3600
const logger = createLogger('CopilotChatAPI')
// ---------------------------------------------------------------------------
// Schemas
// ---------------------------------------------------------------------------
const FileAttachmentSchema = z.object({
id: z.string(),
key: z.string(),
@@ -66,7 +66,6 @@ const ChatMessageSchema = z.object({
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
prefetch: z.boolean().optional(),
createNewChat: z.boolean().optional().default(false),
stream: z.boolean().optional().default(true),
implicitFeedback: z.string().optional(),
fileAttachments: z.array(FileAttachmentSchema).optional(),
resourceAttachments: z.array(ResourceAttachmentSchema).optional(),
@@ -104,27 +103,25 @@ const ChatMessageSchema = z.object({
userTimezone: z.string().optional(),
})
/**
* POST /api/copilot/chat
* Send messages to sim agent and handle chat persistence
*/
// ---------------------------------------------------------------------------
// POST /api/copilot/chat
// ---------------------------------------------------------------------------
export async function POST(req: NextRequest) {
const tracker = createRequestTracker()
let actualChatId: string | undefined
let pendingChatStreamAcquired = false
let pendingChatStreamHandedOff = false
let pendingChatStreamID: string | undefined
let chatStreamLockAcquired = false
let userMessageIdToUse = ''
try {
// Get session to access user information including name
// 1. Auth
const session = await getSession()
if (!session?.user?.id) {
return createUnauthorizedResponse()
}
const authenticatedUserId = session.user.id
// 2. Parse & validate
const body = await req.json()
const {
message,
@@ -137,7 +134,6 @@ export async function POST(req: NextRequest) {
mode,
prefetch,
createNewChat,
stream,
implicitFeedback,
fileAttachments,
resourceAttachments,
@@ -151,17 +147,12 @@ export async function POST(req: NextRequest) {
? contexts.map((ctx) => {
if (ctx.kind !== 'blocks') return ctx
if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx
if (ctx.blockId) {
return {
...ctx,
blockIds: [ctx.blockId],
}
}
if (ctx.blockId) return { ...ctx, blockIds: [ctx.blockId] }
return ctx
})
: contexts
// Copilot route always requires a workflow scope
// 3. Resolve workflow & workspace
const resolved = await resolveWorkflowIdForUser(
authenticatedUserId,
providedWorkflowId,
@@ -173,48 +164,29 @@ export async function POST(req: NextRequest) {
'No workflows found. Create a workflow first or provide a valid workflowId.'
)
}
const workflowId = resolved.workflowId
const workflowResolvedName = resolved.workflowName
const { workflowId, workflowName: workflowResolvedName } = resolved
// Resolve workspace from workflow so it can be sent as implicit context to the copilot.
let resolvedWorkspaceId: string | undefined
try {
const { getWorkflowById } = await import('@/lib/workflows/utils')
const wf = await getWorkflowById(workflowId)
resolvedWorkspaceId = wf?.workspaceId ?? undefined
} catch {
logger
.withMetadata({ requestId: tracker.requestId, messageId: userMessageId })
.warn('Failed to resolve workspaceId from workflow')
logger.warn(`[${tracker.requestId}] Failed to resolve workspaceId from workflow`)
}
const userMessageIdToUse = userMessageId || crypto.randomUUID()
const reqLogger = logger.withMetadata({
requestId: tracker.requestId,
messageId: userMessageIdToUse,
})
try {
reqLogger.info('Received chat POST', {
workflowId,
hasContexts: Array.isArray(normalizedContexts),
contextsCount: Array.isArray(normalizedContexts) ? normalizedContexts.length : 0,
contextsPreview: Array.isArray(normalizedContexts)
? normalizedContexts.map((c: any) => ({
kind: c?.kind,
chatId: c?.chatId,
workflowId: c?.workflowId,
executionId: (c as any)?.executionId,
label: c?.label,
}))
: undefined,
})
} catch {}
let currentChat: any = null
let conversationHistory: any[] = []
actualChatId = chatId
userMessageIdToUse = userMessageId || crypto.randomUUID()
const selectedModel = model || 'claude-opus-4-6'
logger.info(`[${tracker.requestId}] Received chat POST`, {
workflowId,
contextsCount: Array.isArray(normalizedContexts) ? normalizedContexts.length : 0,
})
// 4. Resolve or create chat
let currentChat: ChatLoadResult['chat'] = null
let conversationHistory: unknown[] = []
actualChatId = chatId
if (chatId || createNewChat) {
const chatResult = await resolveOrCreateChat({
chatId,
@@ -233,37 +205,48 @@ export async function POST(req: NextRequest) {
}
}
if (actualChatId) {
chatStreamLockAcquired = await acquirePendingChatStream(actualChatId, userMessageIdToUse)
if (!chatStreamLockAcquired) {
const activeStreamId = await getPendingChatStreamId(actualChatId)
return NextResponse.json(
{
error: 'A response is already in progress for this chat.',
...(activeStreamId ? { activeStreamId } : {}),
},
{ status: 409 }
)
}
}
// 5. Process contexts
let agentContexts: Array<{ type: string; content: string }> = []
if (Array.isArray(normalizedContexts) && normalizedContexts.length > 0) {
try {
const { processContextsServer } = await import('@/lib/copilot/process-contents')
const processed = await processContextsServer(
normalizedContexts as any,
normalizedContexts as ChatContext[],
authenticatedUserId,
message,
resolvedWorkspaceId,
actualChatId
)
agentContexts = processed
reqLogger.info('Contexts processed for request', {
logger.info(`[${tracker.requestId}] Contexts processed`, {
processedCount: agentContexts.length,
kinds: agentContexts.map((c) => c.type),
lengthPreview: agentContexts.map((c) => c.content?.length ?? 0),
})
if (
Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 &&
agentContexts.length === 0
) {
reqLogger.warn(
'Contexts provided but none processed. Check executionId for logs contexts.'
if (agentContexts.length === 0) {
logger.warn(
`[${tracker.requestId}] Contexts provided but none processed. Check executionId for logs contexts.`
)
}
} catch (e) {
reqLogger.error('Failed to process contexts', e)
logger.error(`[${tracker.requestId}] Failed to process contexts`, e)
}
}
// 5b. Process resource attachments
if (
Array.isArray(resourceAttachments) &&
resourceAttachments.length > 0 &&
@@ -279,26 +262,30 @@ export async function POST(req: NextRequest) {
actualChatId
)
if (!ctx) return null
return {
...ctx,
tag: r.active ? '@active_tab' : '@open_tab',
}
return { ...ctx, tag: r.active ? '@active_tab' : '@open_tab' }
})
)
for (const result of results) {
if (result.status === 'fulfilled' && result.value) {
agentContexts.push(result.value)
} else if (result.status === 'rejected') {
reqLogger.error('Failed to resolve resource attachment', result.reason)
logger.error(
`[${tracker.requestId}] Failed to resolve resource attachment`,
result.reason
)
}
}
}
const effectiveMode = mode === 'agent' ? 'build' : mode
// 6. Build copilot request payload
const userPermission = resolvedWorkspaceId
? await getUserEntityPermissions(authenticatedUserId, 'workspace', resolvedWorkspaceId).catch(
() => null
(err) => {
logger.warn('Failed to load user permissions', {
error: err instanceof Error ? err.message : String(err),
})
return null
}
)
: null
@@ -322,55 +309,24 @@ export async function POST(req: NextRequest) {
userPermission: userPermission ?? undefined,
userTimezone,
},
{
selectedModel,
}
{ selectedModel }
)
try {
reqLogger.info('About to call Sim Agent', {
hasContext: agentContexts.length > 0,
contextCount: agentContexts.length,
hasFileAttachments: Array.isArray(requestPayload.fileAttachments),
messageLength: message.length,
mode: effectiveMode,
hasTools: Array.isArray(requestPayload.tools),
toolCount: Array.isArray(requestPayload.tools) ? requestPayload.tools.length : 0,
hasBaseTools: Array.isArray(requestPayload.baseTools),
baseToolCount: Array.isArray(requestPayload.baseTools)
? requestPayload.baseTools.length
: 0,
hasCredentials: !!requestPayload.credentials,
})
} catch {}
if (stream && actualChatId) {
const acquired = await acquirePendingChatStream(actualChatId, userMessageIdToUse)
if (!acquired) {
return NextResponse.json(
{
error:
'A response is already in progress for this chat. Wait for it to finish or use Stop.',
},
{ status: 409 }
)
}
pendingChatStreamAcquired = true
pendingChatStreamID = userMessageIdToUse
}
logger.info(`[${tracker.requestId}] About to call Sim Agent`, {
contextCount: agentContexts.length,
hasFileAttachments: Array.isArray(requestPayload.fileAttachments),
messageLength: message.length,
mode,
})
// 7. Persist user message
if (actualChatId) {
const userMsg = {
const userMsg = buildPersistedUserMessage({
id: userMessageIdToUse,
role: 'user' as const,
content: message,
timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contexts: normalizedContexts,
}),
}
fileAttachments,
contexts: normalizedContexts,
})
const [updated] = await db
.update(copilotChats)
@@ -383,268 +339,66 @@ export async function POST(req: NextRequest) {
.returning({ messages: copilotChats.messages })
if (updated) {
const freshMessages: any[] = Array.isArray(updated.messages) ? updated.messages : []
conversationHistory = freshMessages.filter((m: any) => m.id !== userMessageIdToUse)
const freshMessages: Record<string, unknown>[] = Array.isArray(updated.messages)
? updated.messages
: []
conversationHistory = freshMessages.filter(
(m: Record<string, unknown>) => m.id !== userMessageIdToUse
)
}
}
if (stream) {
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const sseStream = createSSEStream({
requestPayload,
userId: authenticatedUserId,
streamId: userMessageIdToUse,
executionId,
runId,
chatId: actualChatId,
currentChat,
isNewChat: conversationHistory.length === 0,
message,
titleModel: selectedModel,
titleProvider: provider,
requestId: tracker.requestId,
workspaceId: resolvedWorkspaceId,
pendingChatStreamAlreadyRegistered: Boolean(actualChatId && stream),
orchestrateOptions: {
userId: authenticatedUserId,
workflowId,
chatId: actualChatId,
executionId,
runId,
goRoute: '/api/copilot',
autoExecuteTools: true,
interactive: true,
onComplete: async (result: OrchestratorResult) => {
if (!actualChatId) return
if (!result.success) return
// 8. Create SSE stream with onComplete for assistant message persistence
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const assistantMessage: Record<string, unknown> = {
id: crypto.randomUUID(),
role: 'assistant' as const,
content: result.content,
timestamp: new Date().toISOString(),
...(result.requestId ? { requestId: result.requestId } : {}),
}
if (result.toolCalls.length > 0) {
assistantMessage.toolCalls = result.toolCalls
}
if (result.contentBlocks.length > 0) {
assistantMessage.contentBlocks = result.contentBlocks.map((block) => {
const stored: Record<string, unknown> = { type: block.type }
if (block.content) stored.content = block.content
if (block.type === 'tool_call' && block.toolCall) {
const state =
block.toolCall.result?.success !== undefined
? block.toolCall.result.success
? 'success'
: 'error'
: block.toolCall.status
const isSubagentTool = !!block.calledBy
const isNonTerminal =
state === 'cancelled' || state === 'pending' || state === 'executing'
stored.toolCall = {
id: block.toolCall.id,
name: block.toolCall.name,
state,
...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }),
...(isSubagentTool && isNonTerminal
? {}
: block.toolCall.params
? { params: block.toolCall.params }
: {}),
...(block.calledBy ? { calledBy: block.calledBy } : {}),
}
}
return stored
})
}
try {
const [row] = await db
.select({ messages: copilotChats.messages })
.from(copilotChats)
.where(eq(copilotChats.id, actualChatId))
.limit(1)
const msgs: any[] = Array.isArray(row?.messages) ? row.messages : []
const userIdx = msgs.findIndex((m: any) => m.id === userMessageIdToUse)
const alreadyHasResponse =
userIdx >= 0 &&
userIdx + 1 < msgs.length &&
(msgs[userIdx + 1] as any)?.role === 'assistant'
if (!alreadyHasResponse) {
await db
.update(copilotChats)
.set({
messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`,
conversationId: sql`CASE WHEN ${copilotChats.conversationId} = ${userMessageIdToUse} THEN NULL ELSE ${copilotChats.conversationId} END`,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, actualChatId))
}
} catch (error) {
reqLogger.error('Failed to persist chat messages', {
chatId: actualChatId,
error: error instanceof Error ? error.message : 'Unknown error',
})
}
},
},
})
pendingChatStreamHandedOff = true
return new Response(sseStream, { headers: SSE_RESPONSE_HEADERS })
}
const nsExecutionId = crypto.randomUUID()
const nsRunId = crypto.randomUUID()
if (actualChatId) {
await createRunSegment({
id: nsRunId,
executionId: nsExecutionId,
chatId: actualChatId,
const sseStream = createSSEStream({
requestPayload,
userId: authenticatedUserId,
streamId: userMessageIdToUse,
executionId,
runId,
chatId: actualChatId,
currentChat,
isNewChat: conversationHistory.length === 0,
message,
titleModel: selectedModel,
titleProvider: provider,
requestId: tracker.requestId,
workspaceId: resolvedWorkspaceId,
orchestrateOptions: {
userId: authenticatedUserId,
workflowId,
streamId: userMessageIdToUse,
}).catch(() => {})
}
const nonStreamingResult = await orchestrateCopilotStream(requestPayload, {
userId: authenticatedUserId,
workflowId,
chatId: actualChatId,
executionId: nsExecutionId,
runId: nsRunId,
goRoute: '/api/copilot',
autoExecuteTools: true,
interactive: true,
})
const responseData = {
content: nonStreamingResult.content,
toolCalls: nonStreamingResult.toolCalls,
model: selectedModel,
provider: typeof requestPayload?.provider === 'string' ? requestPayload.provider : undefined,
}
reqLogger.info('Non-streaming response from orchestrator', {
hasContent: !!responseData.content,
contentLength: responseData.content?.length || 0,
model: responseData.model,
provider: responseData.provider,
toolCallsCount: responseData.toolCalls?.length || 0,
})
// Save messages if we have a chat
if (currentChat && responseData.content) {
const userMessage = {
id: userMessageIdToUse, // Consistent ID used for request and persistence
role: 'user',
content: message,
timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contexts: normalizedContexts,
}),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contentBlocks: [
{ type: 'contexts', contexts: normalizedContexts as any, timestamp: Date.now() },
],
}),
}
const assistantMessage = {
id: crypto.randomUUID(),
role: 'assistant',
content: responseData.content,
timestamp: new Date().toISOString(),
}
const updatedMessages = [...conversationHistory, userMessage, assistantMessage]
// Start title generation in parallel if this is first message (non-streaming)
if (actualChatId && !currentChat.title && conversationHistory.length === 0) {
reqLogger.info('Starting title generation for non-streaming response')
requestChatTitle({ message, model: selectedModel, provider, messageId: userMessageIdToUse })
.then(async (title) => {
if (title) {
await db
.update(copilotChats)
.set({
title,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, actualChatId!))
reqLogger.info(`Generated and saved title: ${title}`)
}
})
.catch((error) => {
reqLogger.error('Title generation failed', error)
})
}
// Update chat in database immediately (without blocking for title)
await db
.update(copilotChats)
.set({
messages: updatedMessages,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, actualChatId!))
}
reqLogger.info('Returning non-streaming response', {
duration: tracker.getDuration(),
chatId: actualChatId,
responseLength: responseData.content?.length || 0,
})
return NextResponse.json({
success: true,
response: responseData,
chatId: actualChatId,
metadata: {
requestId: tracker.requestId,
message,
duration: tracker.getDuration(),
chatId: actualChatId,
executionId,
runId,
goRoute: '/api/copilot',
autoExecuteTools: true,
interactive: true,
onComplete: buildOnComplete(actualChatId, userMessageIdToUse, tracker.requestId),
},
})
return new Response(sseStream, { headers: SSE_RESPONSE_HEADERS })
} catch (error) {
if (
actualChatId &&
pendingChatStreamAcquired &&
!pendingChatStreamHandedOff &&
pendingChatStreamID
) {
await releasePendingChatStream(actualChatId, pendingChatStreamID).catch(() => {})
if (chatStreamLockAcquired && actualChatId && userMessageIdToUse) {
await releasePendingChatStream(actualChatId, userMessageIdToUse)
}
const duration = tracker.getDuration()
if (error instanceof z.ZodError) {
logger
.withMetadata({ requestId: tracker.requestId, messageId: pendingChatStreamID ?? undefined })
.error('Validation error', {
duration,
errors: error.errors,
})
logger.error(`[${tracker.requestId}] Validation error:`, { duration, errors: error.errors })
return NextResponse.json(
{ error: 'Invalid request data', details: error.errors },
{ status: 400 }
)
}
logger
.withMetadata({ requestId: tracker.requestId, messageId: pendingChatStreamID ?? undefined })
.error('Error handling copilot chat', {
duration,
error: error instanceof Error ? error.message : 'Unknown error',
stack: error instanceof Error ? error.stack : undefined,
})
logger.error(`[${tracker.requestId}] Error handling copilot chat:`, {
duration,
error: error instanceof Error ? error.message : 'Unknown error',
stack: error instanceof Error ? error.stack : undefined,
})
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Internal server error' },
@@ -653,132 +407,55 @@ export async function POST(req: NextRequest) {
}
}
export async function GET(req: NextRequest) {
try {
const { searchParams } = new URL(req.url)
const workflowId = searchParams.get('workflowId')
const workspaceId = searchParams.get('workspaceId')
const chatId = searchParams.get('chatId')
// ---------------------------------------------------------------------------
// onComplete: persist assistant message after streaming finishes
// ---------------------------------------------------------------------------
const { userId: authenticatedUserId, isAuthenticated } =
await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !authenticatedUserId) {
return createUnauthorizedResponse()
}
function buildOnComplete(
chatId: string | undefined,
userMessageId: string,
requestId: string
): (result: OrchestratorResult) => Promise<void> {
return async (result) => {
if (!chatId || !result.success) return
if (chatId) {
const chat = await getAccessibleCopilotChat(chatId, authenticatedUserId)
const assistantMessage = buildPersistedAssistantMessage(result, result.requestId)
if (!chat) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
try {
const [row] = await db
.select({ messages: copilotChats.messages })
.from(copilotChats)
.where(eq(copilotChats.id, chatId))
.limit(1)
const msgs: Record<string, unknown>[] = Array.isArray(row?.messages) ? row.messages : []
const userIdx = msgs.findIndex((m: Record<string, unknown>) => m.id === userMessageId)
const alreadyHasResponse =
userIdx >= 0 &&
userIdx + 1 < msgs.length &&
(msgs[userIdx + 1] as Record<string, unknown>)?.role === 'assistant'
if (!alreadyHasResponse) {
await db
.update(copilotChats)
.set({
messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`,
conversationId: sql`CASE WHEN ${copilotChats.conversationId} = ${userMessageId} THEN NULL ELSE ${copilotChats.conversationId} END`,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, chatId))
}
let streamSnapshot: {
events: Array<{ eventId: number; streamId: string; event: Record<string, unknown> }>
status: string
} | null = null
if (chat.conversationId) {
try {
const [meta, events] = await Promise.all([
getStreamMeta(chat.conversationId),
readStreamEvents(chat.conversationId, 0),
])
streamSnapshot = {
events: events || [],
status: meta?.status || 'unknown',
}
} catch (err) {
logger
.withMetadata({ messageId: chat.conversationId || undefined })
.warn('Failed to read stream snapshot for chat', {
chatId,
conversationId: chat.conversationId,
error: err instanceof Error ? err.message : String(err),
})
}
}
const transformedChat = {
id: chat.id,
title: chat.title,
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
planArtifact: chat.planArtifact || null,
config: chat.config || null,
conversationId: chat.conversationId || null,
resources: Array.isArray(chat.resources) ? chat.resources : [],
createdAt: chat.createdAt,
updatedAt: chat.updatedAt,
...(streamSnapshot ? { streamSnapshot } : {}),
}
logger
.withMetadata({ messageId: chat.conversationId || undefined })
.info(`Retrieved chat ${chatId}`)
return NextResponse.json({ success: true, chat: transformedChat })
}
if (!workflowId && !workspaceId) {
return createBadRequestResponse('workflowId, workspaceId, or chatId is required')
}
if (workspaceId) {
await assertActiveWorkspaceAccess(workspaceId, authenticatedUserId)
}
if (workflowId) {
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: authenticatedUserId,
action: 'read',
} catch (error) {
logger.error(`[${requestId}] Failed to persist chat messages`, {
chatId,
error: error instanceof Error ? error.message : 'Unknown error',
})
if (!authorization.allowed) {
return createUnauthorizedResponse()
}
}
const scopeFilter = workflowId
? eq(copilotChats.workflowId, workflowId)
: eq(copilotChats.workspaceId, workspaceId!)
const chats = await db
.select({
id: copilotChats.id,
title: copilotChats.title,
model: copilotChats.model,
messages: copilotChats.messages,
planArtifact: copilotChats.planArtifact,
config: copilotChats.config,
createdAt: copilotChats.createdAt,
updatedAt: copilotChats.updatedAt,
})
.from(copilotChats)
.where(and(eq(copilotChats.userId, authenticatedUserId), scopeFilter))
.orderBy(desc(copilotChats.updatedAt))
const transformedChats = chats.map((chat) => ({
id: chat.id,
title: chat.title,
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
planArtifact: chat.planArtifact || null,
config: chat.config || null,
createdAt: chat.createdAt,
updatedAt: chat.updatedAt,
}))
const scope = workflowId ? `workflow ${workflowId}` : `workspace ${workspaceId}`
logger.info(`Retrieved ${transformedChats.length} chats for ${scope}`)
return NextResponse.json({
success: true,
chats: transformedChats,
})
} catch (error) {
logger.error('Error fetching copilot chats', error)
return createInternalServerErrorResponse('Failed to fetch chats')
}
}
// ---------------------------------------------------------------------------
// GET handler (read-only queries, extracted to queries.ts)
// ---------------------------------------------------------------------------
export { GET } from './queries'

View File

@@ -4,25 +4,67 @@
import { NextRequest } from 'next/server'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import {
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
} from '@/lib/copilot/generated/mothership-stream-v1'
const { getStreamMeta, readStreamEvents, authenticateCopilotRequestSessionOnly } = vi.hoisted(
() => ({
getStreamMeta: vi.fn(),
readStreamEvents: vi.fn(),
authenticateCopilotRequestSessionOnly: vi.fn(),
})
)
vi.mock('@/lib/copilot/orchestrator/stream/buffer', () => ({
getStreamMeta,
readStreamEvents,
const {
getLatestRunForStream,
readEvents,
checkForReplayGap,
authenticateCopilotRequestSessionOnly,
} = vi.hoisted(() => ({
getLatestRunForStream: vi.fn(),
readEvents: vi.fn(),
checkForReplayGap: vi.fn(),
authenticateCopilotRequestSessionOnly: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/async-runs/repository', () => ({
getLatestRunForStream,
}))
vi.mock('@/lib/copilot/request/session', () => ({
readEvents,
checkForReplayGap,
createEvent: (event: Record<string, unknown>) => ({
stream: {
streamId: event.streamId,
cursor: event.cursor,
},
seq: event.seq,
trace: { requestId: event.requestId ?? '' },
type: event.type,
payload: event.payload,
}),
encodeSSEEnvelope: (event: Record<string, unknown>) =>
new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`),
SSE_RESPONSE_HEADERS: {
'Content-Type': 'text/event-stream',
},
}))
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly,
}))
import { GET } from '@/app/api/copilot/chat/stream/route'
import { GET } from './route'
async function readAllChunks(response: Response): Promise<string[]> {
const reader = response.body?.getReader()
expect(reader).toBeTruthy()
const chunks: string[] = []
while (true) {
const { done, value } = await reader!.read()
if (done) {
break
}
chunks.push(new TextDecoder().decode(value))
}
return chunks
}
describe('copilot chat stream replay route', () => {
beforeEach(() => {
@@ -31,29 +73,54 @@ describe('copilot chat stream replay route', () => {
userId: 'user-1',
isAuthenticated: true,
})
readStreamEvents.mockResolvedValue([])
readEvents.mockResolvedValue([])
checkForReplayGap.mockResolvedValue(null)
})
it('stops replay polling when stream meta becomes cancelled', async () => {
getStreamMeta
it('stops replay polling when run becomes cancelled', async () => {
getLatestRunForStream
.mockResolvedValueOnce({
status: 'active',
userId: 'user-1',
executionId: 'exec-1',
id: 'run-1',
})
.mockResolvedValueOnce({
status: 'cancelled',
userId: 'user-1',
executionId: 'exec-1',
id: 'run-1',
})
const response = await GET(
new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1')
new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0')
)
const reader = response.body?.getReader()
expect(reader).toBeTruthy()
const chunks = await readAllChunks(response)
expect(chunks.join('')).toContain(
JSON.stringify({
status: MothershipStreamV1CompletionStatus.cancelled,
reason: 'terminal_status',
})
)
expect(getLatestRunForStream).toHaveBeenCalledTimes(2)
})
const first = await reader!.read()
expect(first.done).toBe(true)
expect(getStreamMeta).toHaveBeenCalledTimes(2)
it('emits structured terminal replay error when run metadata disappears', async () => {
getLatestRunForStream
.mockResolvedValueOnce({
status: 'active',
executionId: 'exec-1',
id: 'run-1',
})
.mockResolvedValueOnce(null)
const response = await GET(
new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0')
)
const chunks = await readAllChunks(response)
const body = chunks.join('')
expect(body).toContain(`"type":"${MothershipStreamV1EventType.error}"`)
expect(body).toContain('"code":"resume_run_unavailable"')
expect(body).toContain(`"type":"${MothershipStreamV1EventType.complete}"`)
})
})

View File

@@ -1,12 +1,18 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository'
import {
getStreamMeta,
readStreamEvents,
type StreamMeta,
} from '@/lib/copilot/orchestrator/stream/buffer'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
} from '@/lib/copilot/generated/mothership-stream-v1'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
import {
checkForReplayGap,
createEvent,
encodeSSEEnvelope,
readEvents,
SSE_RESPONSE_HEADERS,
} from '@/lib/copilot/request/session'
export const maxDuration = 3600
@@ -14,8 +20,59 @@ const logger = createLogger('CopilotChatStreamAPI')
const POLL_INTERVAL_MS = 250
const MAX_STREAM_MS = 60 * 60 * 1000
function encodeEvent(event: Record<string, any>): Uint8Array {
return new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`)
function isTerminalStatus(
status: string | null | undefined
): status is MothershipStreamV1CompletionStatus {
return (
status === MothershipStreamV1CompletionStatus.complete ||
status === MothershipStreamV1CompletionStatus.error ||
status === MothershipStreamV1CompletionStatus.cancelled
)
}
function buildResumeTerminalEnvelopes(options: {
streamId: string
afterCursor: string
status: MothershipStreamV1CompletionStatus
message?: string
code: string
reason?: string
}) {
const baseSeq = Number(options.afterCursor || '0')
const seq = Number.isFinite(baseSeq) ? baseSeq : 0
const envelopes: ReturnType<typeof createEvent>[] = []
if (options.status === MothershipStreamV1CompletionStatus.error) {
envelopes.push(
createEvent({
streamId: options.streamId,
cursor: String(seq + 1),
seq: seq + 1,
requestId: '',
type: MothershipStreamV1EventType.error,
payload: {
message: options.message || 'Stream recovery failed before completion.',
code: options.code,
},
})
)
}
envelopes.push(
createEvent({
streamId: options.streamId,
cursor: String(seq + envelopes.length + 1),
seq: seq + envelopes.length + 1,
requestId: '',
type: MothershipStreamV1EventType.complete,
payload: {
status: options.status,
...(options.reason ? { reason: options.reason } : {}),
},
})
)
return envelopes
}
export async function GET(request: NextRequest) {
@@ -28,68 +85,36 @@ export async function GET(request: NextRequest) {
const url = new URL(request.url)
const streamId = url.searchParams.get('streamId') || ''
const fromParam = url.searchParams.get('from') || '0'
const fromEventId = Number(fromParam || 0)
// If batch=true, return buffered events as JSON instead of SSE
const batchMode = url.searchParams.get('batch') === 'true'
const toParam = url.searchParams.get('to')
const toEventId = toParam ? Number(toParam) : undefined
const reqLogger = logger.withMetadata({ messageId: streamId || undefined })
reqLogger.info('[Resume] Received resume request', {
streamId: streamId || undefined,
fromEventId,
toEventId,
batchMode,
})
const afterCursor = url.searchParams.get('after') || ''
if (!streamId) {
return NextResponse.json({ error: 'streamId is required' }, { status: 400 })
}
const meta = (await getStreamMeta(streamId)) as StreamMeta | null
reqLogger.info('[Resume] Stream lookup', {
streamId,
fromEventId,
toEventId,
batchMode,
hasMeta: !!meta,
metaStatus: meta?.status,
})
if (!meta) {
return NextResponse.json({ error: 'Stream not found' }, { status: 404 })
}
if (meta.userId && meta.userId !== authenticatedUserId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
}
// Batch mode: return all buffered events as JSON
if (batchMode) {
const events = await readStreamEvents(streamId, fromEventId)
const filteredEvents = toEventId ? events.filter((e) => e.eventId <= toEventId) : events
reqLogger.info('[Resume] Batch response', {
const run = await getLatestRunForStream(streamId, authenticatedUserId).catch((err) => {
logger.warn('Failed to fetch latest run for stream', {
streamId,
fromEventId,
toEventId,
eventCount: filteredEvents.length,
})
return NextResponse.json({
success: true,
events: filteredEvents,
status: meta.status,
executionId: meta.executionId,
runId: meta.runId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
logger.info('[Resume] Stream lookup', {
streamId,
afterCursor,
hasRun: !!run,
runStatus: run?.status,
})
if (!run) {
return NextResponse.json({ error: 'Stream not found' }, { status: 404 })
}
const startTime = Date.now()
const stream = new ReadableStream({
async start(controller) {
let lastEventId = Number.isFinite(fromEventId) ? fromEventId : 0
let latestMeta = meta
let cursor = afterCursor || '0'
let controllerClosed = false
let sawTerminalEvent = false
const closeController = () => {
if (controllerClosed) return
@@ -97,14 +122,14 @@ export async function GET(request: NextRequest) {
try {
controller.close()
} catch {
// Controller already closed by runtime/client - treat as normal.
// Controller already closed by runtime/client
}
}
const enqueueEvent = (payload: Record<string, any>) => {
const enqueueEvent = (payload: unknown) => {
if (controllerClosed) return false
try {
controller.enqueue(encodeEvent(payload))
controller.enqueue(encodeSSEEnvelope(payload))
return true
} catch {
controllerClosed = true
@@ -118,47 +143,96 @@ export async function GET(request: NextRequest) {
request.signal.addEventListener('abort', abortListener, { once: true })
const flushEvents = async () => {
const events = await readStreamEvents(streamId, lastEventId)
const events = await readEvents(streamId, cursor)
if (events.length > 0) {
reqLogger.info('[Resume] Flushing events', {
logger.info('[Resume] Flushing events', {
streamId,
fromEventId: lastEventId,
afterCursor: cursor,
eventCount: events.length,
})
}
for (const entry of events) {
lastEventId = entry.eventId
const payload = {
...entry.event,
eventId: entry.eventId,
streamId: entry.streamId,
executionId: latestMeta?.executionId,
runId: latestMeta?.runId,
for (const envelope of events) {
cursor = envelope.stream.cursor ?? String(envelope.seq)
if (envelope.type === MothershipStreamV1EventType.complete) {
sawTerminalEvent = true
}
if (!enqueueEvent(payload)) {
if (!enqueueEvent(envelope)) {
break
}
}
}
const emitTerminalIfMissing = (
status: MothershipStreamV1CompletionStatus,
options?: { message?: string; code: string; reason?: string }
) => {
if (controllerClosed || sawTerminalEvent) {
return
}
for (const envelope of buildResumeTerminalEnvelopes({
streamId,
afterCursor: cursor,
status,
message: options?.message,
code: options?.code ?? 'resume_terminal',
reason: options?.reason,
})) {
cursor = envelope.stream.cursor ?? String(envelope.seq)
if (envelope.type === MothershipStreamV1EventType.complete) {
sawTerminalEvent = true
}
if (!enqueueEvent(envelope)) {
break
}
}
}
try {
const gap = await checkForReplayGap(streamId, afterCursor)
if (gap) {
for (const envelope of gap.envelopes) {
enqueueEvent(envelope)
}
return
}
await flushEvents()
while (!controllerClosed && Date.now() - startTime < MAX_STREAM_MS) {
const currentMeta = await getStreamMeta(streamId)
if (!currentMeta) break
latestMeta = currentMeta
const currentRun = await getLatestRunForStream(streamId, authenticatedUserId).catch(
(err) => {
logger.warn('Failed to poll latest run for stream', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
return null
}
)
if (!currentRun) {
emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, {
message: 'The stream could not be recovered because its run metadata is unavailable.',
code: 'resume_run_unavailable',
reason: 'run_unavailable',
})
break
}
await flushEvents()
if (controllerClosed) {
break
}
if (
currentMeta.status === 'complete' ||
currentMeta.status === 'error' ||
currentMeta.status === 'cancelled'
) {
if (isTerminalStatus(currentRun.status)) {
emitTerminalIfMissing(currentRun.status, {
message:
currentRun.status === MothershipStreamV1CompletionStatus.error
? typeof currentRun.error === 'string'
? currentRun.error
: 'The recovered stream ended with an error.'
: undefined,
code: 'resume_terminal_status',
reason: 'terminal_status',
})
break
}
@@ -169,12 +243,24 @@ export async function GET(request: NextRequest) {
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS))
}
if (!controllerClosed && Date.now() - startTime >= MAX_STREAM_MS) {
emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, {
message: 'The stream recovery timed out before completion.',
code: 'resume_timeout',
reason: 'timeout',
})
}
} catch (error) {
if (!controllerClosed && !request.signal.aborted) {
reqLogger.warn('Stream replay failed', {
logger.warn('Stream replay failed', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, {
message: 'The stream replay failed before completion.',
code: 'resume_internal',
reason: 'stream_replay_failed',
})
}
} finally {
request.signal.removeEventListener('abort', abortListener)
@@ -183,5 +269,5 @@ export async function GET(request: NextRequest) {
},
})
return new Response(stream, { headers: SSE_HEADERS })
return new Response(stream, { headers: SSE_RESPONSE_HEADERS })
}

View File

@@ -327,7 +327,35 @@ describe('Copilot Chat Update Messages API Route', () => {
})
expect(mockSet).toHaveBeenCalledWith({
messages,
messages: [
{
id: 'msg-1',
role: 'user',
content: 'Hello',
timestamp: '2024-01-01T10:00:00.000Z',
},
{
id: 'msg-2',
role: 'assistant',
content: 'Hi there!',
timestamp: '2024-01-01T10:01:00.000Z',
contentBlocks: [
{
type: 'text',
content: 'Here is the weather information',
},
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'get_weather',
state: 'pending',
},
},
],
},
],
updatedAt: expect.any(Date),
})
})

View File

@@ -4,15 +4,16 @@ import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { COPILOT_MODES } from '@/lib/copilot/models'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import { normalizeMessage, type PersistedMessage } from '@/lib/copilot/chat/persisted-message'
import { COPILOT_MODES } from '@/lib/copilot/constants'
import {
authenticateCopilotRequestSessionOnly,
createInternalServerErrorResponse,
createNotFoundResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
const logger = createLogger('CopilotChatUpdateAPI')
@@ -78,12 +79,15 @@ export async function POST(req: NextRequest) {
}
const { chatId, messages, planArtifact, config } = UpdateMessagesSchema.parse(body)
const normalizedMessages: PersistedMessage[] = messages.map((message) =>
normalizeMessage(message as Record<string, unknown>)
)
// Debug: Log what we're about to save
const lastMsgParsed = messages[messages.length - 1]
const lastMsgParsed = normalizedMessages[normalizedMessages.length - 1]
if (lastMsgParsed?.role === 'assistant') {
logger.info(`[${tracker.requestId}] Parsed messages to save`, {
messageCount: messages.length,
messageCount: normalizedMessages.length,
lastMsgId: lastMsgParsed.id,
lastMsgContentLength: lastMsgParsed.content?.length || 0,
lastMsgContentBlockCount: lastMsgParsed.contentBlocks?.length || 0,
@@ -99,8 +103,8 @@ export async function POST(req: NextRequest) {
}
// Update chat with new messages, plan artifact, and config
const updateData: Record<string, any> = {
messages: messages,
const updateData: Record<string, unknown> = {
messages: normalizedMessages,
updatedAt: new Date(),
}
@@ -116,14 +120,14 @@ export async function POST(req: NextRequest) {
logger.info(`[${tracker.requestId}] Successfully updated chat`, {
chatId,
newMessageCount: messages.length,
newMessageCount: normalizedMessages.length,
hasPlanArtifact: !!planArtifact,
hasConfig: !!config,
})
return NextResponse.json({
success: true,
messageCount: messages.length,
messageCount: normalizedMessages.length,
})
} catch (error) {
logger.error(`[${tracker.requestId}] Error updating chat messages:`, error)

View File

@@ -66,7 +66,7 @@ vi.mock('drizzle-orm', () => ({
sql: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly: mockAuthenticate,
createUnauthorizedResponse: mockCreateUnauthorizedResponse,
createInternalServerErrorResponse: mockCreateInternalServerErrorResponse,

View File

@@ -4,14 +4,14 @@ import { createLogger } from '@sim/logger'
import { and, desc, eq, isNull, or, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle'
import { resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
} from '@/lib/copilot/request/http'
import { taskPubSub } from '@/lib/copilot/tasks'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { assertActiveWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
@@ -37,7 +37,7 @@ export async function GET(_request: NextRequest) {
title: copilotChats.title,
workflowId: copilotChats.workflowId,
workspaceId: copilotChats.workspaceId,
conversationId: copilotChats.conversationId,
activeStreamId: copilotChats.conversationId,
updatedAt: copilotChats.updatedAt,
})
.from(copilotChats)

View File

@@ -43,7 +43,7 @@ vi.mock('@/lib/workflows/utils', () => ({
authorizeWorkflowByWorkspacePermission: mockAuthorize,
}))
vi.mock('@/lib/copilot/chat-lifecycle', () => ({
vi.mock('@/lib/copilot/chat/lifecycle', () => ({
getAccessibleCopilotChat: mockGetAccessibleCopilotChat,
}))

View File

@@ -4,14 +4,14 @@ import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createInternalServerErrorResponse,
createNotFoundResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { getInternalApiBaseUrl } from '@/lib/core/utils/urls'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { isUuidV4 } from '@/executor/constants'

View File

@@ -62,7 +62,7 @@ vi.mock('drizzle-orm', () => ({
desc: vi.fn((field: unknown) => ({ field, type: 'desc' })),
}))
vi.mock('@/lib/copilot/chat-lifecycle', () => ({
vi.mock('@/lib/copilot/chat/lifecycle', () => ({
getAccessibleCopilotChat: mockGetAccessibleCopilotChat,
}))

View File

@@ -4,14 +4,14 @@ import { createLogger } from '@sim/logger'
import { and, desc, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
const logger = createLogger('WorkflowCheckpointsAPI')

View File

@@ -38,7 +38,7 @@ const {
publishToolConfirmation: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
@@ -54,7 +54,7 @@ vi.mock('@/lib/copilot/async-runs/repository', () => ({
completeAsyncToolCall,
}))
vi.mock('@/lib/copilot/orchestrator/persistence', () => ({
vi.mock('@/lib/copilot/persistence/tool-confirm', () => ({
publishToolConfirmation,
}))

View File

@@ -1,13 +1,14 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { ASYNC_TOOL_STATUS } from '@/lib/copilot/async-runs/lifecycle'
import {
completeAsyncToolCall,
getAsyncToolCall,
getRunSegment,
upsertAsyncToolCall,
} from '@/lib/copilot/async-runs/repository'
import { publishToolConfirmation } from '@/lib/copilot/orchestrator/persistence'
import { publishToolConfirmation } from '@/lib/copilot/persistence/tool-confirm'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
@@ -16,7 +17,7 @@ import {
createRequestTracker,
createUnauthorizedResponse,
type NotificationStatus,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
const logger = createLogger('CopilotConfirmAPI')
@@ -42,17 +43,17 @@ async function updateToolCallStatus(
const toolCallId = existing.toolCallId
const durableStatus =
status === 'success'
? 'completed'
? ASYNC_TOOL_STATUS.completed
: status === 'cancelled'
? 'cancelled'
? ASYNC_TOOL_STATUS.cancelled
: status === 'error' || status === 'rejected'
? 'failed'
: 'pending'
? ASYNC_TOOL_STATUS.failed
: ASYNC_TOOL_STATUS.pending
try {
if (
durableStatus === 'completed' ||
durableStatus === 'failed' ||
durableStatus === 'cancelled'
durableStatus === ASYNC_TOOL_STATUS.completed ||
durableStatus === ASYNC_TOOL_STATUS.failed ||
durableStatus === ASYNC_TOOL_STATUS.cancelled
) {
await completeAsyncToolCall({
toolCallId,
@@ -107,13 +108,25 @@ export async function POST(req: NextRequest) {
const body = await req.json()
const { toolCallId, status, message, data } = ConfirmationSchema.parse(body)
const existing = await getAsyncToolCall(toolCallId).catch(() => null)
const existing = await getAsyncToolCall(toolCallId).catch((err) => {
logger.warn('Failed to fetch async tool call', {
toolCallId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (!existing) {
return createNotFoundResponse('Tool call not found')
}
const run = await getRunSegment(existing.runId).catch(() => null)
const run = await getRunSegment(existing.runId).catch((err) => {
logger.warn('Failed to fetch run segment', {
runId: existing.runId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (!run) {
return createNotFoundResponse('Tool call run not found')
}

View File

@@ -1,5 +1,5 @@
import { type NextRequest, NextResponse } from 'next/server'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
import { routeExecution } from '@/lib/copilot/tools/server/router'
/**

View File

@@ -57,7 +57,7 @@ vi.mock('drizzle-orm', () => ({
eq: vi.fn((field: unknown, value: unknown) => ({ field, value, type: 'eq' })),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly: mockAuthenticate,
createUnauthorizedResponse: mockCreateUnauthorizedResponse,
createBadRequestResponse: mockCreateBadRequestResponse,

View File

@@ -10,7 +10,7 @@ import {
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
const logger = createLogger('CopilotFeedbackAPI')

View File

@@ -1,8 +1,14 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import type { AvailableModel } from '@/lib/copilot/types'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
interface AvailableModel {
id: string
friendlyName: string
provider: string
}
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotModelsAPI')

View File

@@ -23,7 +23,7 @@ const {
mockFetch: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly: mockAuthenticateCopilotRequestSessionOnly,
createUnauthorizedResponse: mockCreateUnauthorizedResponse,
createBadRequestResponse: mockCreateBadRequestResponse,

View File

@@ -7,7 +7,7 @@ import {
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { env } from '@/lib/core/config/env'
const BodySchema = z.object({

View File

@@ -4,7 +4,7 @@ import { z } from 'zod'
import {
authenticateCopilotRequestSessionOnly,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotTrainingExamplesAPI')

View File

@@ -4,7 +4,7 @@ import { z } from 'zod'
import {
authenticateCopilotRequestSessionOnly,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotTrainingAPI')

View File

@@ -24,6 +24,27 @@ vi.mock('@/lib/auth/hybrid', () => ({
vi.mock('@/lib/execution/e2b', () => ({
executeInE2B: mockExecuteInE2B,
executeShellInE2B: vi.fn(),
}))
vi.mock('@/lib/copilot/request/tools/files', () => ({
FORMAT_TO_CONTENT_TYPE: {
json: 'application/json',
csv: 'text/csv',
txt: 'text/plain',
md: 'text/markdown',
html: 'text/html',
},
normalizeOutputWorkspaceFileName: vi.fn((p: string) => p.replace(/^files\//, '')),
resolveOutputFormat: vi.fn(() => 'json'),
}))
vi.mock('@/lib/uploads/contexts/workspace/workspace-file-manager', () => ({
uploadWorkspaceFile: vi.fn(),
}))
vi.mock('@/lib/workflows/utils', () => ({
getWorkflowById: vi.fn(),
}))
vi.mock('@/lib/core/config/feature-flags', () => ({
@@ -32,6 +53,7 @@ vi.mock('@/lib/core/config/feature-flags', () => ({
isProd: false,
isDev: false,
isTest: true,
isEmailVerificationEnabled: false,
}))
import { validateProxyUrl } from '@/lib/core/security/input-validation'

View File

@@ -1,11 +1,18 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkInternalAuth } from '@/lib/auth/hybrid'
import {
FORMAT_TO_CONTENT_TYPE,
normalizeOutputWorkspaceFileName,
resolveOutputFormat,
} from '@/lib/copilot/request/tools/files'
import { isE2bEnabled } from '@/lib/core/config/feature-flags'
import { generateRequestId } from '@/lib/core/utils/request'
import { executeInE2B } from '@/lib/execution/e2b'
import { executeInE2B, executeShellInE2B } from '@/lib/execution/e2b'
import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
import { CodeLanguage, DEFAULT_CODE_LANGUAGE, isValidCodeLanguage } from '@/lib/execution/languages'
import { uploadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
import { getWorkflowById } from '@/lib/workflows/utils'
import { escapeRegExp, normalizeName, REFERENCE } from '@/executor/constants'
import { type OutputSchema, resolveBlockReference } from '@/executor/utils/block-reference'
import { formatLiteralForCode } from '@/executor/utils/code-formatting'
@@ -580,6 +587,96 @@ function cleanStdout(stdout: string): string {
return stdout
}
async function maybeExportSandboxFileToWorkspace(args: {
authUserId: string
workflowId?: string
workspaceId?: string
outputPath?: string
outputFormat?: string
outputSandboxPath?: string
exportedFileContent?: string
stdout: string
executionTime: number
}) {
const {
authUserId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputSandboxPath,
exportedFileContent,
stdout,
executionTime,
} = args
if (!outputSandboxPath) return null
if (!outputPath) {
return NextResponse.json(
{
success: false,
error:
'outputSandboxPath requires outputPath. Set outputPath to the destination workspace file, e.g. "files/result.csv".',
output: { result: null, stdout: cleanStdout(stdout), executionTime },
},
{ status: 400 }
)
}
const resolvedWorkspaceId =
workspaceId || (workflowId ? (await getWorkflowById(workflowId))?.workspaceId : undefined)
if (!resolvedWorkspaceId) {
return NextResponse.json(
{
success: false,
error: 'Workspace context required to save sandbox file to workspace',
output: { result: null, stdout: cleanStdout(stdout), executionTime },
},
{ status: 400 }
)
}
if (exportedFileContent === undefined) {
return NextResponse.json(
{
success: false,
error: `Sandbox file "${outputSandboxPath}" was not found or could not be read`,
output: { result: null, stdout: cleanStdout(stdout), executionTime },
},
{ status: 500 }
)
}
const fileName = normalizeOutputWorkspaceFileName(outputPath)
const format = resolveOutputFormat(fileName, outputFormat)
const contentType = FORMAT_TO_CONTENT_TYPE[format]
const uploaded = await uploadWorkspaceFile(
resolvedWorkspaceId,
authUserId,
Buffer.from(exportedFileContent, 'utf-8'),
fileName,
contentType
)
return NextResponse.json({
success: true,
output: {
result: {
message: `Sandbox file exported to files/${fileName}`,
fileId: uploaded.id,
fileName,
downloadUrl: uploaded.url,
sandboxPath: outputSandboxPath,
},
stdout: cleanStdout(stdout),
executionTime,
},
resources: [{ type: 'file', id: uploaded.id, title: fileName }],
})
}
export async function POST(req: NextRequest) {
const requestId = generateRequestId()
const startTime = Date.now()
@@ -603,12 +700,16 @@ export async function POST(req: NextRequest) {
params = {},
timeout = DEFAULT_EXECUTION_TIMEOUT_MS,
language = DEFAULT_CODE_LANGUAGE,
outputPath,
outputFormat,
outputSandboxPath,
envVars = {},
blockData = {},
blockNameMapping = {},
blockOutputSchemas = {},
workflowVariables = {},
workflowId,
workspaceId,
isCustomTool = false,
_sandboxFiles,
} = body
@@ -652,6 +753,82 @@ export async function POST(req: NextRequest) {
hasImports = jsImports.trim().length > 0 || hasRequireStatements
}
if (lang === CodeLanguage.Shell) {
if (!isE2bEnabled) {
throw new Error(
'Shell execution requires E2B to be enabled. Please contact your administrator to enable E2B.'
)
}
const shellEnvs: Record<string, string> = {}
for (const [k, v] of Object.entries(envVars)) {
shellEnvs[k] = String(v)
}
for (const [k, v] of Object.entries(contextVariables)) {
shellEnvs[k] = String(v)
}
logger.info(`[${requestId}] E2B shell execution`, {
enabled: isE2bEnabled,
hasApiKey: Boolean(process.env.E2B_API_KEY),
envVarCount: Object.keys(shellEnvs).length,
})
const execStart = Date.now()
const {
result: shellResult,
stdout: shellStdout,
sandboxId,
error: shellError,
exportedFileContent,
} = await executeShellInE2B({
code: resolvedCode,
envs: shellEnvs,
timeoutMs: timeout,
sandboxFiles: _sandboxFiles,
outputSandboxPath,
})
const executionTime = Date.now() - execStart
logger.info(`[${requestId}] E2B shell sandbox`, {
sandboxId,
stdoutPreview: shellStdout?.slice(0, 200),
error: shellError,
executionTime,
})
if (shellError) {
return NextResponse.json(
{
success: false,
error: shellError,
output: { result: null, stdout: cleanStdout(shellStdout), executionTime },
},
{ status: 500 }
)
}
if (outputSandboxPath) {
const fileExportResponse = await maybeExportSandboxFileToWorkspace({
authUserId: auth.userId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputSandboxPath,
exportedFileContent,
stdout: shellStdout,
executionTime,
})
if (fileExportResponse) return fileExportResponse
}
return NextResponse.json({
success: true,
output: { result: shellResult ?? null, stdout: cleanStdout(shellStdout), executionTime },
})
}
if (lang === CodeLanguage.Python && !isE2bEnabled) {
throw new Error(
'Python execution requires E2B to be enabled. Please contact your administrator to enable E2B, or use JavaScript instead.'
@@ -719,11 +896,13 @@ export async function POST(req: NextRequest) {
stdout: e2bStdout,
sandboxId,
error: e2bError,
exportedFileContent,
} = await executeInE2B({
code: codeForE2B,
language: CodeLanguage.JavaScript,
timeoutMs: timeout,
sandboxFiles: _sandboxFiles,
outputSandboxPath,
})
const executionTime = Date.now() - execStart
stdout += e2bStdout
@@ -752,6 +931,21 @@ export async function POST(req: NextRequest) {
)
}
if (outputSandboxPath) {
const fileExportResponse = await maybeExportSandboxFileToWorkspace({
authUserId: auth.userId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputSandboxPath,
exportedFileContent,
stdout,
executionTime,
})
if (fileExportResponse) return fileExportResponse
}
return NextResponse.json({
success: true,
output: { result: e2bResult ?? null, stdout: cleanStdout(stdout), executionTime },
@@ -783,11 +977,13 @@ export async function POST(req: NextRequest) {
stdout: e2bStdout,
sandboxId,
error: e2bError,
exportedFileContent,
} = await executeInE2B({
code: codeForE2B,
language: CodeLanguage.Python,
timeoutMs: timeout,
sandboxFiles: _sandboxFiles,
outputSandboxPath,
})
const executionTime = Date.now() - execStart
stdout += e2bStdout
@@ -816,6 +1012,21 @@ export async function POST(req: NextRequest) {
)
}
if (outputSandboxPath) {
const fileExportResponse = await maybeExportSandboxFileToWorkspace({
authUserId: auth.userId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputSandboxPath,
exportedFileContent,
stdout,
executionTime,
})
if (fileExportResponse) return fileExportResponse
}
return NextResponse.json({
success: true,
output: { result: e2bResult ?? null, stdout: cleanStdout(stdout), executionTime },

View File

@@ -18,14 +18,11 @@ import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { validateOAuthAccessToken } from '@/lib/auth/oauth-token'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { ORCHESTRATION_TIMEOUT_MS, SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent'
import {
executeToolServerSide,
prepareExecutionContext,
} from '@/lib/copilot/orchestrator/tool-executor'
import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run'
import { orchestrateSubagentStream } from '@/lib/copilot/request/subagent'
import { ensureHandlersRegistered, executeTool } from '@/lib/copilot/tool-executor'
import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context'
import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions'
import { env } from '@/lib/core/config/env'
import { RateLimiter } from '@/lib/core/rate-limiter'
@@ -645,7 +642,8 @@ async function handleDirectToolCall(
startTime: Date.now(),
}
const result = await executeToolServerSide(toolCall, execContext)
ensureHandlersRegistered()
const result = await executeTool(toolCall.name, toolCall.params || {}, execContext)
return {
content: [
@@ -728,25 +726,10 @@ async function handleBuildToolCall(
chatId,
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const messageId = requestPayload.messageId as string
await createRunSegment({
id: runId,
executionId,
chatId,
userId,
workflowId: resolved.workflowId,
streamId: messageId,
}).catch(() => {})
const result = await orchestrateCopilotStream(requestPayload, {
const result = await runCopilotLifecycle(requestPayload, {
userId,
workflowId: resolved.workflowId,
chatId,
executionId,
runId,
goRoute: '/api/mcp',
autoExecuteTools: true,
timeout: ORCHESTRATION_TIMEOUT_MS,

View File

@@ -5,18 +5,26 @@ import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat-payload'
import { resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat/payload'
import {
buildPersistedAssistantMessage,
buildPersistedUserMessage,
} from '@/lib/copilot/chat/persisted-message'
import {
processContextsServer,
resolveActiveResourceContext,
} from '@/lib/copilot/chat/process-contents'
import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context'
import { createRequestTracker, createUnauthorizedResponse } from '@/lib/copilot/request/http'
import { createSSEStream, SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/lifecycle/start'
import {
acquirePendingChatStream,
createSSEStream,
SSE_RESPONSE_HEADERS,
} from '@/lib/copilot/chat-streaming'
import type { OrchestratorResult } from '@/lib/copilot/orchestrator/types'
import { processContextsServer, resolveActiveResourceContext } from '@/lib/copilot/process-contents'
import { createRequestTracker, createUnauthorizedResponse } from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
import { generateWorkspaceContext } from '@/lib/copilot/workspace-context'
getPendingChatStreamId,
releasePendingChatStream,
} from '@/lib/copilot/request/session'
import type { OrchestratorResult } from '@/lib/copilot/request/types'
import { taskPubSub } from '@/lib/copilot/tasks'
import {
assertActiveWorkspaceAccess,
getUserEntityPermissions,
@@ -37,7 +45,6 @@ const FileAttachmentSchema = z.object({
const ResourceAttachmentSchema = z.object({
type: z.enum(['workflow', 'table', 'file', 'knowledgebase']),
id: z.string().min(1),
title: z.string().optional(),
active: z.boolean().optional(),
})
@@ -87,7 +94,9 @@ const MothershipMessageSchema = z.object({
*/
export async function POST(req: NextRequest) {
const tracker = createRequestTracker()
let userMessageIdForLogs: string | undefined
let lockChatId: string | undefined
let lockStreamId = ''
let chatStreamLockAcquired = false
try {
const session = await getSession()
@@ -110,27 +119,23 @@ export async function POST(req: NextRequest) {
} = MothershipMessageSchema.parse(body)
const userMessageId = providedMessageId || crypto.randomUUID()
userMessageIdForLogs = userMessageId
const reqLogger = logger.withMetadata({
requestId: tracker.requestId,
messageId: userMessageId,
})
lockStreamId = userMessageId
reqLogger.info('Received mothership chat start request', {
workspaceId,
chatId,
createNewChat,
hasContexts: Array.isArray(contexts) && contexts.length > 0,
contextsCount: Array.isArray(contexts) ? contexts.length : 0,
hasResourceAttachments: Array.isArray(resourceAttachments) && resourceAttachments.length > 0,
resourceAttachmentCount: Array.isArray(resourceAttachments) ? resourceAttachments.length : 0,
hasFileAttachments: Array.isArray(fileAttachments) && fileAttachments.length > 0,
fileAttachmentCount: Array.isArray(fileAttachments) ? fileAttachments.length : 0,
})
// Phase 1: workspace access + chat resolution in parallel
const [accessResult, chatResult] = await Promise.allSettled([
assertActiveWorkspaceAccess(workspaceId, authenticatedUserId),
chatId || createNewChat
? resolveOrCreateChat({
chatId,
userId: authenticatedUserId,
workspaceId,
model: 'claude-opus-4-6',
type: 'mothership',
})
: null,
])
try {
await assertActiveWorkspaceAccess(workspaceId, authenticatedUserId)
} catch {
if (accessResult.status === 'rejected') {
return NextResponse.json({ error: 'Workspace not found or access denied' }, { status: 403 })
}
@@ -138,18 +143,12 @@ export async function POST(req: NextRequest) {
let conversationHistory: any[] = []
let actualChatId = chatId
if (chatId || createNewChat) {
const chatResult = await resolveOrCreateChat({
chatId,
userId: authenticatedUserId,
workspaceId,
model: 'claude-opus-4-6',
type: 'mothership',
})
currentChat = chatResult.chat
actualChatId = chatResult.chatId || chatId
conversationHistory = Array.isArray(chatResult.conversationHistory)
? chatResult.conversationHistory
if (chatResult.status === 'fulfilled' && chatResult.value) {
const resolved = chatResult.value
currentChat = resolved.chat
actualChatId = resolved.chatId || chatId
conversationHistory = Array.isArray(resolved.conversationHistory)
? resolved.conversationHistory
: []
if (chatId && !currentChat) {
@@ -157,76 +156,73 @@ export async function POST(req: NextRequest) {
}
}
let agentContexts: Array<{ type: string; content: string }> = []
if (Array.isArray(contexts) && contexts.length > 0) {
try {
agentContexts = await processContextsServer(
contexts as any,
authenticatedUserId,
message,
workspaceId,
actualChatId
if (actualChatId) {
chatStreamLockAcquired = await acquirePendingChatStream(actualChatId, userMessageId)
if (!chatStreamLockAcquired) {
const activeStreamId = await getPendingChatStreamId(actualChatId)
return NextResponse.json(
{
error: 'A response is already in progress for this chat.',
...(activeStreamId ? { activeStreamId } : {}),
},
{ status: 409 }
)
} catch (e) {
reqLogger.error('Failed to process contexts', e)
}
lockChatId = actualChatId
}
if (Array.isArray(resourceAttachments) && resourceAttachments.length > 0) {
const results = await Promise.allSettled(
resourceAttachments.map(async (r) => {
const ctx = await resolveActiveResourceContext(
r.type,
r.id,
workspaceId,
// Phase 2: contexts + workspace context + user message persistence in parallel
const contextPromise = (async () => {
let agentCtxs: Array<{ type: string; content: string }> = []
if (Array.isArray(contexts) && contexts.length > 0) {
try {
agentCtxs = await processContextsServer(
contexts as any,
authenticatedUserId,
message,
workspaceId,
actualChatId
)
if (!ctx) return null
return {
...ctx,
tag: r.active ? '@active_tab' : '@open_tab',
}
})
)
for (const result of results) {
if (result.status === 'fulfilled' && result.value) {
agentContexts.push(result.value)
} else if (result.status === 'rejected') {
reqLogger.error('Failed to resolve resource attachment', result.reason)
} catch (e) {
logger.error(`[${tracker.requestId}] Failed to process contexts`, e)
}
}
}
if (actualChatId) {
const userMsg = {
id: userMessageId,
role: 'user' as const,
content: message,
timestamp: new Date().toISOString(),
...(fileAttachments &&
fileAttachments.length > 0 && {
fileAttachments: fileAttachments.map((f) => ({
id: f.id,
key: f.key,
filename: f.filename,
media_type: f.media_type,
size: f.size,
})),
}),
...(contexts &&
contexts.length > 0 && {
contexts: contexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId && { workflowId: c.workflowId }),
...(c.knowledgeId && { knowledgeId: c.knowledgeId }),
...(c.tableId && { tableId: c.tableId }),
...(c.fileId && { fileId: c.fileId }),
})),
}),
if (Array.isArray(resourceAttachments) && resourceAttachments.length > 0) {
const results = await Promise.allSettled(
resourceAttachments.map(async (r) => {
const ctx = await resolveActiveResourceContext(
r.type,
r.id,
workspaceId,
authenticatedUserId,
actualChatId
)
if (!ctx) return null
return { ...ctx, tag: r.active ? '@active_tab' : '@open_tab' }
})
)
for (const result of results) {
if (result.status === 'fulfilled' && result.value) {
agentCtxs.push(result.value)
} else if (result.status === 'rejected') {
logger.error(
`[${tracker.requestId}] Failed to resolve resource attachment`,
result.reason
)
}
}
}
return agentCtxs
})()
const userMsgPromise = (async () => {
if (!actualChatId) return
const userMsg = buildPersistedUserMessage({
id: userMessageId,
content: message,
fileAttachments,
contexts,
})
const [updated] = await db
.update(copilotChats)
.set({
@@ -242,11 +238,15 @@ export async function POST(req: NextRequest) {
conversationHistory = freshMessages.filter((m: any) => m.id !== userMessageId)
taskPubSub?.publishStatusChanged({ workspaceId, chatId: actualChatId, type: 'started' })
}
}
})()
const [workspaceContext, userPermission] = await Promise.all([
generateWorkspaceContext(workspaceId, authenticatedUserId),
getUserEntityPermissions(authenticatedUserId, 'workspace', workspaceId).catch(() => null),
const [agentContexts, [workspaceContext, userPermission]] = await Promise.all([
contextPromise,
Promise.all([
generateWorkspaceContext(workspaceId, authenticatedUserId),
getUserEntityPermissions(authenticatedUserId, 'workspace', workspaceId).catch(() => null),
]),
userMsgPromise,
])
const requestPayload = await buildCopilotRequestPayload(
@@ -267,19 +267,6 @@ export async function POST(req: NextRequest) {
{ selectedModel: '' }
)
if (actualChatId) {
const acquired = await acquirePendingChatStream(actualChatId, userMessageId)
if (!acquired) {
return NextResponse.json(
{
error:
'A response is already in progress for this chat. Wait for it to finish or use Stop.',
},
{ status: 409 }
)
}
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const stream = createSSEStream({
@@ -295,7 +282,6 @@ export async function POST(req: NextRequest) {
titleModel: 'claude-opus-4-6',
requestId: tracker.requestId,
workspaceId,
pendingChatStreamAlreadyRegistered: Boolean(actualChatId),
orchestrateOptions: {
userId: authenticatedUserId,
workspaceId,
@@ -309,46 +295,7 @@ export async function POST(req: NextRequest) {
if (!actualChatId) return
if (!result.success) return
const assistantMessage: Record<string, unknown> = {
id: crypto.randomUUID(),
role: 'assistant' as const,
content: result.content,
timestamp: new Date().toISOString(),
...(result.requestId ? { requestId: result.requestId } : {}),
}
if (result.toolCalls.length > 0) {
assistantMessage.toolCalls = result.toolCalls
}
if (result.contentBlocks.length > 0) {
assistantMessage.contentBlocks = result.contentBlocks.map((block) => {
const stored: Record<string, unknown> = { type: block.type }
if (block.content) stored.content = block.content
if (block.type === 'tool_call' && block.toolCall) {
const state =
block.toolCall.result?.success !== undefined
? block.toolCall.result.success
? 'success'
: 'error'
: block.toolCall.status
const isSubagentTool = !!block.calledBy
const isNonTerminal =
state === 'cancelled' || state === 'pending' || state === 'executing'
stored.toolCall = {
id: block.toolCall.id,
name: block.toolCall.name,
state,
...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }),
...(isSubagentTool && isNonTerminal
? {}
: block.toolCall.params
? { params: block.toolCall.params }
: {}),
...(block.calledBy ? { calledBy: block.calledBy } : {}),
}
}
return stored
})
}
const assistantMessage = buildPersistedAssistantMessage(result, result.requestId)
try {
const [row] = await db
@@ -381,7 +328,7 @@ export async function POST(req: NextRequest) {
})
}
} catch (error) {
reqLogger.error('Failed to persist chat messages', {
logger.error(`[${tracker.requestId}] Failed to persist chat messages`, {
chatId: actualChatId,
error: error instanceof Error ? error.message : 'Unknown error',
})
@@ -392,6 +339,9 @@ export async function POST(req: NextRequest) {
return new Response(stream, { headers: SSE_RESPONSE_HEADERS })
} catch (error) {
if (chatStreamLockAcquired && lockChatId && lockStreamId) {
await releasePendingChatStream(lockChatId, lockStreamId)
}
if (error instanceof z.ZodError) {
return NextResponse.json(
{ error: 'Invalid request data', details: error.errors },
@@ -399,11 +349,9 @@ export async function POST(req: NextRequest) {
)
}
logger
.withMetadata({ requestId: tracker.requestId, messageId: userMessageIdForLogs })
.error('Error handling mothership chat', {
error: error instanceof Error ? error.message : 'Unknown error',
})
logger.error(`[${tracker.requestId}] Error handling mothership chat:`, {
error: error instanceof Error ? error.message : 'Unknown error',
})
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Internal server error' },

View File

@@ -5,8 +5,9 @@ import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { releasePendingChatStream } from '@/lib/copilot/chat-streaming'
import { taskPubSub } from '@/lib/copilot/task-events'
import { normalizeMessage, type PersistedMessage } from '@/lib/copilot/chat/persisted-message'
import { releasePendingChatStream } from '@/lib/copilot/request/session'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('MothershipChatStopAPI')
@@ -26,15 +27,25 @@ const StoredToolCallSchema = z
display: z
.object({
text: z.string().optional(),
title: z.string().optional(),
phaseLabel: z.string().optional(),
})
.optional(),
calledBy: z.string().optional(),
durationMs: z.number().optional(),
error: z.string().optional(),
})
.nullable()
const ContentBlockSchema = z.object({
type: z.string(),
lane: z.enum(['main', 'subagent']).optional(),
content: z.string().optional(),
channel: z.enum(['assistant', 'thinking']).optional(),
phase: z.enum(['call', 'args_delta', 'result']).optional(),
kind: z.enum(['subagent', 'structured_result', 'subagent_result']).optional(),
lifecycle: z.enum(['start', 'end']).optional(),
status: z.enum(['complete', 'error', 'cancelled']).optional(),
toolCall: StoredToolCallSchema.optional(),
})
@@ -70,15 +81,14 @@ export async function POST(req: NextRequest) {
const hasBlocks = Array.isArray(contentBlocks) && contentBlocks.length > 0
if (hasContent || hasBlocks) {
const assistantMessage: Record<string, unknown> = {
const normalized = normalizeMessage({
id: crypto.randomUUID(),
role: 'assistant' as const,
role: 'assistant',
content,
timestamp: new Date().toISOString(),
}
if (hasBlocks) {
assistantMessage.contentBlocks = contentBlocks
}
...(hasBlocks ? { contentBlocks } : {}),
})
const assistantMessage: PersistedMessage = normalized
setClause.messages = sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`
}

View File

@@ -4,15 +4,15 @@ import { createLogger } from '@sim/logger'
import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { getStreamMeta, readStreamEvents } from '@/lib/copilot/orchestrator/stream/buffer'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
} from '@/lib/copilot/request/http'
import { readEvents } from '@/lib/copilot/request/session/buffer'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('MothershipChatAPI')
@@ -46,29 +46,24 @@ export async function GET(
}
let streamSnapshot: {
events: Array<{ eventId: number; streamId: string; event: Record<string, unknown> }>
events: unknown[]
status: string
} | null = null
if (chat.conversationId) {
try {
const [meta, events] = await Promise.all([
getStreamMeta(chat.conversationId),
readStreamEvents(chat.conversationId, 0),
])
const events = await readEvents(chat.conversationId, '0')
streamSnapshot = {
events: events || [],
status: meta?.status || 'unknown',
status: events.length > 0 ? 'active' : 'unknown',
}
} catch (error) {
logger
.withMetadata({ messageId: chat.conversationId || undefined })
.warn('Failed to read stream snapshot for mothership chat', {
chatId,
conversationId: chat.conversationId,
error: error instanceof Error ? error.message : String(error),
})
logger.warn('Failed to read stream snapshot for mothership chat', {
chatId,
conversationId: chat.conversationId,
error: error instanceof Error ? error.message : String(error),
})
}
}

View File

@@ -0,0 +1,43 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request/http'
const logger = createLogger('MarkTaskReadAPI')
const MarkReadSchema = z.object({
chatId: z.string().min(1),
})
export async function POST(request: NextRequest) {
try {
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !userId) {
return createUnauthorizedResponse()
}
const body = await request.json()
const { chatId } = MarkReadSchema.parse(body)
await db
.update(copilotChats)
.set({ lastSeenAt: sql`GREATEST(${copilotChats.updatedAt}, NOW())` })
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
return NextResponse.json({ success: true })
} catch (error) {
if (error instanceof z.ZodError) {
return createBadRequestResponse('chatId is required')
}
logger.error('Error marking task as read:', error)
return createInternalServerErrorResponse('Failed to mark task as read')
}
}

View File

@@ -9,8 +9,8 @@ import {
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
} from '@/lib/copilot/request/http'
import { taskPubSub } from '@/lib/copilot/tasks'
import { assertActiveWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('MothershipChatsAPI')
@@ -38,7 +38,7 @@ export async function GET(request: NextRequest) {
id: copilotChats.id,
title: copilotChats.title,
updatedAt: copilotChats.updatedAt,
conversationId: copilotChats.conversationId,
activeStreamId: copilotChats.conversationId,
lastSeenAt: copilotChats.lastSeenAt,
})
.from(copilotChats)

View File

@@ -7,7 +7,7 @@
* Auth is handled via session cookies (EventSource sends cookies automatically).
*/
import { taskPubSub } from '@/lib/copilot/task-events'
import { taskPubSub } from '@/lib/copilot/tasks'
import { createWorkspaceSSE } from '@/lib/events/sse-endpoint'
export const dynamic = 'force-dynamic'

View File

@@ -2,10 +2,9 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkInternalAuth } from '@/lib/auth/hybrid'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { buildIntegrationToolSchemas } from '@/lib/copilot/chat-payload'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { generateWorkspaceContext } from '@/lib/copilot/workspace-context'
import { buildIntegrationToolSchemas } from '@/lib/copilot/chat/payload'
import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context'
import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run'
import {
assertActiveWorkspaceAccess,
getUserEntityPermissions,
@@ -72,34 +71,25 @@ export async function POST(req: NextRequest) {
...(userPermission ? { userPermission } : {}),
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
await createRunSegment({
id: runId,
executionId,
chatId: effectiveChatId,
userId,
workspaceId,
streamId: messageId,
}).catch(() => {})
const result = await orchestrateCopilotStream(requestPayload, {
const result = await runCopilotLifecycle(requestPayload, {
userId,
workspaceId,
chatId: effectiveChatId,
executionId,
runId,
goRoute: '/api/mothership/execute',
autoExecuteTools: true,
interactive: false,
})
if (!result.success) {
reqLogger.error('Mothership execute failed', {
error: result.error,
errors: result.errors,
})
logger.error(
messageId
? `Mothership execute failed [messageId:${messageId}]`
: 'Mothership execute failed',
{
error: result.error,
errors: result.errors,
}
)
return NextResponse.json(
{
error: result.error || 'Mothership execution failed',
@@ -135,9 +125,12 @@ export async function POST(req: NextRequest) {
)
}
logger.withMetadata({ messageId }).error('Mothership execute error', {
error: error instanceof Error ? error.message : 'Unknown error',
})
logger.error(
messageId ? `Mothership execute error [messageId:${messageId}]` : 'Mothership execute error',
{
error: error instanceof Error ? error.message : 'Unknown error',
}
)
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Internal server error' },

View File

@@ -3,7 +3,7 @@ import { templates } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { checkInternalApiKey } from '@/lib/copilot/utils'
import { checkInternalApiKey } from '@/lib/copilot/request/http'
import { generateRequestId } from '@/lib/core/utils/request'
import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer'

View File

@@ -1,9 +1,8 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants'
import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run'
import { getWorkflowById, resolveWorkflowIdForUser } from '@/lib/workflows/utils'
import { authenticateV1Request } from '@/app/api/v1/auth'
@@ -83,15 +82,19 @@ export async function POST(req: NextRequest) {
const chatId = parsed.chatId || crypto.randomUUID()
messageId = crypto.randomUUID()
const reqLogger = logger.withMetadata({ messageId })
reqLogger.info('Received headless copilot chat start request', {
workflowId: resolved.workflowId,
workflowName: parsed.workflowName,
chatId,
mode: transportMode,
autoExecuteTools: parsed.autoExecuteTools,
timeout: parsed.timeout,
})
logger.info(
messageId
? `Received headless copilot chat start request [messageId:${messageId}]`
: 'Received headless copilot chat start request',
{
workflowId: resolved.workflowId,
workflowName: parsed.workflowName,
chatId,
mode: transportMode,
autoExecuteTools: parsed.autoExecuteTools,
timeout: parsed.timeout,
}
)
const requestPayload = {
message: parsed.message,
workflowId: resolved.workflowId,
@@ -102,24 +105,10 @@ export async function POST(req: NextRequest) {
chatId,
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
await createRunSegment({
id: runId,
executionId,
chatId,
userId: auth.userId,
workflowId: resolved.workflowId,
streamId: messageId,
}).catch(() => {})
const result = await orchestrateCopilotStream(requestPayload, {
const result = await runCopilotLifecycle(requestPayload, {
userId: auth.userId,
workflowId: resolved.workflowId,
chatId,
executionId,
runId,
goRoute: '/api/mcp',
autoExecuteTools: parsed.autoExecuteTools,
timeout: parsed.timeout,
@@ -141,9 +130,14 @@ export async function POST(req: NextRequest) {
)
}
logger.withMetadata({ messageId }).error('Headless copilot request failed', {
error: error instanceof Error ? error.message : String(error),
})
logger.error(
messageId
? `Headless copilot request failed [messageId:${messageId}]`
: 'Headless copilot request failed',
{
error: error instanceof Error ? error.message : String(error),
}
)
return NextResponse.json({ success: false, error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -155,19 +155,14 @@ async function handleWebhookPost(
if (shouldSkipWebhookEvent(foundWebhook, body, requestId)) {
continue
}
try {
const response = await queueWebhookExecution(foundWebhook, foundWorkflow, body, request, {
requestId,
path,
actorUserId: preprocessResult.actorUserId,
executionId: preprocessResult.executionId,
correlation: preprocessResult.correlation,
})
responses.push(response)
} catch (error) {
throw error
}
const response = await queueWebhookExecution(foundWebhook, foundWorkflow, body, request, {
requestId,
path,
actorUserId: preprocessResult.actorUserId,
executionId: preprocessResult.executionId,
correlation: preprocessResult.correlation,
})
responses.push(response)
}
if (responses.length === 0) {

View File

@@ -1,21 +1,17 @@
'use client'
import type { AgentGroupItem } from '@/app/workspace/[workspaceId]/home/components/message-content/components'
import {
AgentGroup,
ChatContent,
CircleStop,
Options,
PendingTagIndicator,
} from '@/app/workspace/[workspaceId]/home/components/message-content/components'
import type {
ContentBlock,
MothershipToolName,
OptionItem,
SubagentName,
ToolCallData,
} from '@/app/workspace/[workspaceId]/home/types'
import { SUBAGENT_LABELS, TOOL_UI_METADATA } from '@/app/workspace/[workspaceId]/home/types'
FileWrite,
Read as ReadTool,
ToolSearchToolRegex,
WorkspaceFile,
} from '@/lib/copilot/generated/tool-catalog-v1'
import { resolveToolDisplay } from '@/lib/copilot/tools/client/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { ContentBlock, OptionItem, ToolCallData } from '../../types'
import { SUBAGENT_LABELS, TOOL_UI_METADATA } from '../../types'
import type { AgentGroupItem } from './components'
import { AgentGroup, ChatContent, CircleStop, Options, PendingTagIndicator } from './components'
interface TextSegment {
type: 'text'
@@ -52,11 +48,19 @@ const SUBAGENT_KEYS = new Set(Object.keys(SUBAGENT_LABELS))
* group is absorbed so it doesn't render as a separate Mothership entry.
*/
const SUBAGENT_DISPATCH_TOOLS: Record<string, string> = {
file_write: 'workspace_file',
[FileWrite.id]: WorkspaceFile.id,
}
function formatToolName(name: string): string {
return name
.replace(/_v\d+$/, '')
.split('_')
.map((w) => w.charAt(0).toUpperCase() + w.slice(1))
.join(' ')
}
function resolveAgentLabel(key: string): string {
return SUBAGENT_LABELS[key as SubagentName] ?? key
return SUBAGENT_LABELS[key] ?? formatToolName(key)
}
function isToolDone(status: ToolCallData['status']): boolean {
@@ -67,12 +71,41 @@ function isDelegatingTool(tc: NonNullable<ContentBlock['toolCall']>): boolean {
return tc.status === 'executing'
}
function mapToolStatusToClientState(
status: ContentBlock['toolCall'] extends { status: infer T } ? T : string
) {
switch (status) {
case 'success':
return ClientToolCallState.success
case 'error':
return ClientToolCallState.error
case 'cancelled':
return ClientToolCallState.cancelled
default:
return ClientToolCallState.executing
}
}
function getOverrideDisplayTitle(tc: NonNullable<ContentBlock['toolCall']>): string | undefined {
if (tc.name === ReadTool.id || tc.name.endsWith('_respond')) {
return resolveToolDisplay(tc.name, mapToolStatusToClientState(tc.status), tc.id, tc.params)
?.text
}
return undefined
}
function toToolData(tc: NonNullable<ContentBlock['toolCall']>): ToolCallData {
const overrideDisplayTitle = getOverrideDisplayTitle(tc)
const displayTitle =
overrideDisplayTitle ||
tc.displayTitle ||
TOOL_UI_METADATA[tc.name as keyof typeof TOOL_UI_METADATA]?.title ||
formatToolName(tc.name)
return {
id: tc.id,
toolName: tc.name,
displayTitle:
tc.displayTitle ?? TOOL_UI_METADATA[tc.name as MothershipToolName]?.title ?? tc.name,
displayTitle,
status: tc.status,
params: tc.params,
result: tc.result,
@@ -172,7 +205,7 @@ function parseBlocks(blocks: ContentBlock[]): MessageSegment[] {
if (block.type === 'tool_call') {
if (!block.toolCall) continue
const tc = block.toolCall
if (tc.name === 'tool_search_tool_regex') continue
if (tc.name === ToolSearchToolRegex.id) continue
const isDispatch = SUBAGENT_KEYS.has(tc.name) && !tc.calledBy
if (isDispatch) {
@@ -312,7 +345,7 @@ export function MessageContent({
if (segments.length === 0) {
if (isStreaming) {
return (
<div className='space-y-2.5'>
<div className='space-y-[10px]'>
<PendingTagIndicator />
</div>
)
@@ -341,7 +374,7 @@ export function MessageContent({
)?.id
return (
<div className='space-y-2.5'>
<div className='space-y-[10px]'>
{segments.map((segment, i) => {
switch (segment.type) {
case 'text':
@@ -384,9 +417,11 @@ export function MessageContent({
)
case 'stopped':
return (
<div key={`stopped-${i}`} className='flex items-center gap-2'>
<div key={`stopped-${i}`} className='flex items-center gap-[8px]'>
<CircleStop className='h-[16px] w-[16px] flex-shrink-0 text-[var(--text-icon)]' />
<span className='font-base text-[var(--text-body)] text-sm'>Stopped by user</span>
<span className='font-base text-[14px] text-[var(--text-body)]'>
Stopped by user
</span>
</div>
)
}

View File

@@ -23,96 +23,33 @@ import {
} from '@/components/emcn'
import { Table as TableIcon } from '@/components/emcn/icons'
import { AgentIcon } from '@/components/icons'
import type { MothershipToolName, SubagentName } from '@/app/workspace/[workspaceId]/home/types'
export type IconComponent = ComponentType<SVGProps<SVGSVGElement>>
const TOOL_ICONS: Record<MothershipToolName | SubagentName | 'mothership', IconComponent> = {
const TOOL_ICONS: Record<string, IconComponent> = {
mothership: Blimp,
// Workspace
glob: FolderCode,
grep: Search,
read: File,
// Search
search_online: Search,
scrape_page: Search,
get_page_contents: Search,
search_library_docs: Library,
crawl_website: Search,
// Execution
function_execute: TerminalWindow,
superagent: Blimp,
run_workflow: PlayOutline,
run_block: PlayOutline,
run_from_block: PlayOutline,
run_workflow_until_block: PlayOutline,
complete_job: PlayOutline,
get_execution_summary: ClipboardList,
get_job_logs: ClipboardList,
get_workflow_logs: ClipboardList,
get_workflow_data: Layout,
get_block_outputs: ClipboardList,
get_block_upstream_references: ClipboardList,
get_deployed_workflow_state: Rocket,
check_deployment_status: Rocket,
// Workflows & folders
create_workflow: Layout,
delete_workflow: Layout,
edit_workflow: Pencil,
rename_workflow: Pencil,
move_workflow: Layout,
create_folder: FolderCode,
delete_folder: FolderCode,
move_folder: FolderCode,
list_folders: FolderCode,
list_user_workspaces: Layout,
revert_to_version: Rocket,
get_deployment_version: Rocket,
open_resource: Eye,
// Files
workspace_file: File,
download_to_workspace_file: File,
materialize_file: File,
generate_image: File,
generate_visualization: File,
// Tables & knowledge
user_table: TableIcon,
knowledge_base: Database,
// Jobs
create_job: Calendar,
manage_job: Calendar,
update_job_history: Calendar,
job_respond: Calendar,
// Management
manage_mcp_tool: Settings,
manage_skill: Asterisk,
manage_credential: Integration,
manage_custom_tool: Wrench,
update_workspace_mcp_server: Settings,
delete_workspace_mcp_server: Settings,
create_workspace_mcp_server: Settings,
list_workspace_mcp_servers: Settings,
oauth_get_auth_link: Integration,
oauth_request_access: Integration,
set_environment_variables: Settings,
set_global_workflow_variables: Settings,
get_platform_actions: Settings,
search_documentation: Library,
search_patterns: Search,
deploy_api: Rocket,
deploy_chat: Rocket,
deploy_mcp: Rocket,
redeploy: Rocket,
generate_api_key: Asterisk,
user_memory: Database,
context_write: Pencil,
context_compaction: Asterisk,
// Subagents
function_execute: TerminalWindow,
superagent: Blimp,
user_table: TableIcon,
workspace_file: File,
create_workflow: Layout,
edit_workflow: Pencil,
build: Hammer,
run: PlayOutline,
deploy: Rocket,
auth: Integration,
knowledge: Database,
knowledge_base: Database,
table: TableIcon,
job: Calendar,
agent: AgentIcon,
@@ -122,6 +59,8 @@ const TOOL_ICONS: Record<MothershipToolName | SubagentName | 'mothership', IconC
debug: Bug,
edit: Pencil,
fast_edit: Pencil,
context_compaction: Asterisk,
open_resource: Eye,
file_write: File,
}

View File

@@ -10,7 +10,7 @@ import {
cancelRunToolExecution,
markRunToolManuallyStopped,
reportManualRunToolStop,
} from '@/lib/copilot/client-sse/run-tool-execution'
} from '@/lib/copilot/tools/client/run-tool-execution'
import {
downloadWorkspaceFile,
getFileExtension,

View File

@@ -9,7 +9,7 @@ import {
} from 'react'
import { Button, Tooltip } from '@/components/emcn'
import { Columns3, Eye, PanelLeft, Pencil } from '@/components/emcn/icons'
import { isEphemeralResource } from '@/lib/copilot/resource-extraction'
import { isEphemeralResource } from '@/lib/copilot/resources/types'
import { cn } from '@/lib/core/utils/cn'
import type { PreviewMode } from '@/app/workspace/[workspaceId]/files/components/file-viewer'
import { AddResourceDropdown } from '@/app/workspace/[workspaceId]/home/components/mothership-view/components/add-resource-dropdown'

View File

@@ -369,7 +369,7 @@ export function Home({ chatId }: HomeProps = {}) {
onCollapse={collapseResource}
isCollapsed={isResourceCollapsed}
streamingFile={streamingFile}
genericResourceData={genericResourceData}
genericResourceData={genericResourceData ?? undefined}
className={skipResourceTransition ? '!transition-none' : undefined}
/>

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,39 @@
import type { MothershipResourceType } from '@/lib/copilot/resource-types'
import {
Agent,
Auth,
Build,
CreateWorkflow,
Debug,
Deploy,
EditWorkflow,
FunctionExecute,
GetPageContents,
Glob,
Grep,
Job,
Knowledge,
KnowledgeBase,
ManageMcpTool,
ManageSkill,
OpenResource,
Read as ReadTool,
Research,
Run,
ScrapePage,
SearchLibraryDocs,
SearchOnline,
Superagent,
Table,
UserMemory,
UserTable,
WorkspaceFile,
} from '@/lib/copilot/generated/tool-catalog-v1'
import type { ChatContext } from '@/stores/panel'
export type {
MothershipResource,
MothershipResourceType,
} from '@/lib/copilot/resource-types'
} from '@/lib/copilot/resources/types'
export interface FileAttachmentForApi {
id: string
@@ -21,169 +50,34 @@ export interface QueuedMessage {
contexts?: ChatContext[]
}
/**
* SSE event types emitted by the Go orchestrator backend.
*
* @example
* ```json
* { "type": "content", "data": "Hello world" }
* { "type": "tool_call", "state": "executing", "toolCallId": "toolu_...", "toolName": "glob", "ui": { "title": "..." } }
* { "type": "subagent_start", "subagent": "build" }
* ```
*/
export type SSEEventType =
| 'chat_id'
| 'request_id'
| 'title_updated'
| 'content'
| 'reasoning' // openai reasoning - render as thinking text
| 'tool_call' // tool call name
| 'tool_call_delta' // chunk of tool call
| 'tool_generating' // start a tool call
| 'tool_result' // tool call result
| 'tool_error' // tool call error
| 'resource_added' // add a resource to the chat
| 'resource_deleted' // delete a resource from the chat
| 'subagent_start' // start a subagent
| 'subagent_end' // end a subagent
| 'structured_result' // structured result from a tool call
| 'subagent_result' // result from a subagent
| 'done' // end of the chat
| 'context_compaction_start' // context compaction started
| 'context_compaction' // conversation context was compacted
| 'error' // error in the chat
| 'start' // start of the chat
/**
* All tool names observed in the mothership SSE stream, grouped by phase.
*
* @example
* ```json
* { "type": "tool_generating", "toolName": "glob" }
* { "type": "tool_call", "toolName": "function_execute", "ui": { "title": "Running code", "icon": "code" } }
* { "type": "tool", "phase": "call", "toolName": "glob" }
* { "type": "tool", "phase": "call", "toolName": "function_execute", "ui": { "title": "Running code", "icon": "code" } }
* ```
* Stream `type` is `MothershipStreamV1EventType.tool` (`mothership-stream-v1`) with `phase: 'call'`.
*/
export type MothershipToolName =
| 'glob'
| 'grep'
| 'read'
| 'search_online'
| 'scrape_page'
| 'get_page_contents'
| 'search_library_docs'
| 'manage_mcp_tool'
| 'manage_skill'
| 'manage_credential'
| 'manage_custom_tool'
| 'manage_job'
| 'user_memory'
| 'function_execute'
| 'superagent'
| 'user_table'
| 'workspace_file'
| 'create_workflow'
| 'delete_workflow'
| 'edit_workflow'
| 'rename_workflow'
| 'move_workflow'
| 'run_workflow'
| 'run_block'
| 'run_from_block'
| 'run_workflow_until_block'
| 'create_folder'
| 'delete_folder'
| 'move_folder'
| 'list_folders'
| 'list_user_workspaces'
| 'create_job'
| 'complete_job'
| 'update_job_history'
| 'job_respond'
| 'download_to_workspace_file'
| 'materialize_file'
| 'context_write'
| 'generate_image'
| 'generate_visualization'
| 'crawl_website'
| 'get_execution_summary'
| 'get_job_logs'
| 'get_deployment_version'
| 'revert_to_version'
| 'check_deployment_status'
| 'get_deployed_workflow_state'
| 'get_workflow_data'
| 'get_workflow_logs'
| 'get_block_outputs'
| 'get_block_upstream_references'
| 'set_global_workflow_variables'
| 'set_environment_variables'
| 'get_platform_actions'
| 'search_documentation'
| 'search_patterns'
| 'update_workspace_mcp_server'
| 'delete_workspace_mcp_server'
| 'create_workspace_mcp_server'
| 'list_workspace_mcp_servers'
| 'deploy_api'
| 'deploy_chat'
| 'deploy_mcp'
| 'redeploy'
| 'generate_api_key'
| 'oauth_get_auth_link'
| 'oauth_request_access'
| 'build'
| 'run'
| 'deploy'
| 'auth'
| 'knowledge'
| 'knowledge_base'
| 'table'
| 'job'
| 'agent'
| 'custom_tool'
| 'research'
| 'plan'
| 'debug'
| 'edit'
| 'fast_edit'
| 'open_resource'
| 'context_compaction'
/**
* Subagent identifiers dispatched via `subagent_start` SSE events.
*
* @example
* ```json
* { "type": "subagent_start", "subagent": "build" }
* ```
*/
export type SubagentName =
| 'build'
| 'deploy'
| 'auth'
| 'research'
| 'knowledge'
| 'table'
| 'custom_tool'
| 'superagent'
| 'plan'
| 'debug'
| 'edit'
| 'fast_edit'
| 'run'
| 'agent'
| 'job'
| 'file_write'
export const ToolPhase = {
workspace: 'workspace',
search: 'search',
management: 'management',
execution: 'execution',
resource: 'resource',
subagent: 'subagent',
} as const
export type ToolPhase = (typeof ToolPhase)[keyof typeof ToolPhase]
export type ToolPhase =
| 'workspace'
| 'search'
| 'management'
| 'execution'
| 'resource'
| 'subagent'
export type ToolCallStatus = 'executing' | 'success' | 'error' | 'cancelled'
export const ToolCallStatus = {
executing: 'executing',
success: 'success',
error: 'error',
cancelled: 'cancelled',
} as const
export type ToolCallStatus = (typeof ToolCallStatus)[keyof typeof ToolCallStatus]
export interface ToolCallResult {
success: boolean
@@ -191,7 +85,6 @@ export interface ToolCallResult {
error?: string
}
/** A single tool call result entry in the generic Results resource tab. */
export interface GenericResourceEntry {
toolCallId: string
toolName: string
@@ -202,7 +95,6 @@ export interface GenericResourceEntry {
result?: ToolCallResult
}
/** Accumulated feed of tool call results shown in the generic Results tab. */
export interface GenericResourceData {
entries: GenericResourceEntry[]
}
@@ -225,7 +117,7 @@ export interface ToolCallInfo {
phaseLabel?: string
params?: Record<string, unknown>
calledBy?: string
result?: { success: boolean; output?: unknown; error?: string }
result?: ToolCallResult
streamingArgs?: string
}
@@ -234,14 +126,16 @@ export interface OptionItem {
label: string
}
export type ContentBlockType =
| 'text'
| 'tool_call'
| 'subagent'
| 'subagent_end'
| 'subagent_text'
| 'options'
| 'stopped'
export const ContentBlockType = {
text: 'text',
tool_call: 'tool_call',
subagent: 'subagent',
subagent_end: 'subagent_end',
subagent_text: 'subagent_text',
options: 'options',
stopped: 'stopped',
} as const
export type ContentBlockType = (typeof ContentBlockType)[keyof typeof ContentBlockType]
export interface ContentBlock {
type: ContentBlockType
@@ -278,7 +172,7 @@ export interface ChatMessage {
requestId?: string
}
export const SUBAGENT_LABELS: Record<SubagentName, string> = {
export const SUBAGENT_LABELS: Record<string, string> = {
build: 'Build agent',
deploy: 'Deploy agent',
auth: 'Integration agent',
@@ -304,206 +198,130 @@ export interface ToolUIMetadata {
}
/**
* Primary UI metadata for tools observed in the SSE stream.
* Maps tool IDs to human-readable display names shown in the chat.
* This is the single source of truth — server-sent `ui.title` values are not used.
* Default UI metadata for tools observed in the SSE stream.
* The backend may send `ui` on some `MothershipStreamV1EventType.tool` payloads (`phase: 'call'`);
* this map provides fallback metadata when `ui` is absent.
*/
export const TOOL_UI_METADATA: Record<MothershipToolName, ToolUIMetadata> = {
// Workspace
glob: { title: 'Searching workspace', phaseLabel: 'Workspace', phase: 'workspace' },
grep: { title: 'Searching workspace', phaseLabel: 'Workspace', phase: 'workspace' },
read: { title: 'Reading file', phaseLabel: 'Workspace', phase: 'workspace' },
// Search
search_online: { title: 'Searching online', phaseLabel: 'Search', phase: 'search' },
scrape_page: { title: 'Reading webpage', phaseLabel: 'Search', phase: 'search' },
get_page_contents: { title: 'Reading page', phaseLabel: 'Search', phase: 'search' },
search_library_docs: { title: 'Searching docs', phaseLabel: 'Search', phase: 'search' },
crawl_website: { title: 'Browsing website', phaseLabel: 'Search', phase: 'search' },
// Execution
function_execute: { title: 'Running code', phaseLabel: 'Code', phase: 'execution' },
superagent: { title: 'Taking action', phaseLabel: 'Action', phase: 'execution' },
run_workflow: { title: 'Running workflow', phaseLabel: 'Execution', phase: 'execution' },
run_block: { title: 'Running block', phaseLabel: 'Execution', phase: 'execution' },
run_from_block: { title: 'Running from block', phaseLabel: 'Execution', phase: 'execution' },
run_workflow_until_block: {
title: 'Running partial workflow',
phaseLabel: 'Execution',
export const TOOL_UI_METADATA: Record<string, ToolUIMetadata> = {
[Glob.id]: {
title: 'Searching files',
phaseLabel: 'Workspace',
phase: 'workspace',
},
[Grep.id]: {
title: 'Searching code',
phaseLabel: 'Workspace',
phase: 'workspace',
},
[ReadTool.id]: { title: 'Reading file', phaseLabel: 'Workspace', phase: 'workspace' },
[SearchOnline.id]: {
title: 'Searching online',
phaseLabel: 'Search',
phase: 'search',
},
[ScrapePage.id]: {
title: 'Scraping page',
phaseLabel: 'Search',
phase: 'search',
},
[GetPageContents.id]: {
title: 'Getting page contents',
phaseLabel: 'Search',
phase: 'search',
},
[SearchLibraryDocs.id]: {
title: 'Searching library docs',
phaseLabel: 'Search',
phase: 'search',
},
[ManageMcpTool.id]: {
title: 'Managing MCP tool',
phaseLabel: 'Management',
phase: 'management',
},
[ManageSkill.id]: {
title: 'Managing skill',
phaseLabel: 'Management',
phase: 'management',
},
[UserMemory.id]: {
title: 'Accessing memory',
phaseLabel: 'Management',
phase: 'management',
},
[FunctionExecute.id]: {
title: 'Running code',
phaseLabel: 'Code',
phase: 'execution',
},
complete_job: { title: 'Completing job', phaseLabel: 'Execution', phase: 'execution' },
get_execution_summary: { title: 'Checking results', phaseLabel: 'Execution', phase: 'execution' },
get_job_logs: { title: 'Checking logs', phaseLabel: 'Execution', phase: 'execution' },
get_workflow_logs: { title: 'Checking logs', phaseLabel: 'Execution', phase: 'execution' },
get_workflow_data: { title: 'Loading workflow', phaseLabel: 'Execution', phase: 'execution' },
get_block_outputs: {
title: 'Checking block outputs',
phaseLabel: 'Execution',
[Superagent.id]: {
title: 'Executing action',
phaseLabel: 'Action',
phase: 'execution',
},
get_block_upstream_references: {
title: 'Checking references',
phaseLabel: 'Execution',
phase: 'execution',
},
get_deployed_workflow_state: {
title: 'Checking deployment',
phaseLabel: 'Execution',
phase: 'execution',
},
check_deployment_status: {
title: 'Checking deployment',
phaseLabel: 'Execution',
phase: 'execution',
},
// Workflows & folders
create_workflow: { title: 'Creating workflow', phaseLabel: 'Resource', phase: 'resource' },
delete_workflow: { title: 'Deleting workflow', phaseLabel: 'Resource', phase: 'resource' },
edit_workflow: { title: 'Editing workflow', phaseLabel: 'Resource', phase: 'resource' },
rename_workflow: { title: 'Renaming workflow', phaseLabel: 'Resource', phase: 'resource' },
move_workflow: { title: 'Moving workflow', phaseLabel: 'Resource', phase: 'resource' },
create_folder: { title: 'Creating folder', phaseLabel: 'Resource', phase: 'resource' },
delete_folder: { title: 'Deleting folder', phaseLabel: 'Resource', phase: 'resource' },
move_folder: { title: 'Moving folder', phaseLabel: 'Resource', phase: 'resource' },
list_folders: { title: 'Browsing folders', phaseLabel: 'Resource', phase: 'resource' },
list_user_workspaces: { title: 'Browsing workspaces', phaseLabel: 'Resource', phase: 'resource' },
revert_to_version: { title: 'Restoring version', phaseLabel: 'Resource', phase: 'resource' },
get_deployment_version: {
title: 'Checking deployment',
[UserTable.id]: {
title: 'Managing table',
phaseLabel: 'Resource',
phase: 'resource',
},
open_resource: { title: 'Opening resource', phaseLabel: 'Resource', phase: 'resource' },
// Files
workspace_file: { title: 'Working with files', phaseLabel: 'Resource', phase: 'resource' },
download_to_workspace_file: {
title: 'Downloading file',
[WorkspaceFile.id]: {
title: 'Managing file',
phaseLabel: 'Resource',
phase: 'resource',
},
materialize_file: { title: 'Saving file', phaseLabel: 'Resource', phase: 'resource' },
generate_image: { title: 'Generating image', phaseLabel: 'Resource', phase: 'resource' },
generate_visualization: {
title: 'Generating visualization',
[CreateWorkflow.id]: {
title: 'Creating workflow',
phaseLabel: 'Resource',
phase: 'resource',
},
// Tables & knowledge
user_table: { title: 'Editing table', phaseLabel: 'Resource', phase: 'resource' },
knowledge_base: { title: 'Updating knowledge base', phaseLabel: 'Resource', phase: 'resource' },
// Jobs
create_job: { title: 'Creating job', phaseLabel: 'Resource', phase: 'resource' },
manage_job: { title: 'Updating job', phaseLabel: 'Management', phase: 'management' },
update_job_history: { title: 'Updating job', phaseLabel: 'Management', phase: 'management' },
job_respond: { title: 'Explaining job scheduled', phaseLabel: 'Execution', phase: 'execution' },
// Management
manage_mcp_tool: { title: 'Updating integration', phaseLabel: 'Management', phase: 'management' },
manage_skill: { title: 'Updating skill', phaseLabel: 'Management', phase: 'management' },
manage_credential: { title: 'Connecting account', phaseLabel: 'Management', phase: 'management' },
manage_custom_tool: { title: 'Updating tool', phaseLabel: 'Management', phase: 'management' },
update_workspace_mcp_server: {
title: 'Updating MCP server',
phaseLabel: 'Management',
phase: 'management',
[EditWorkflow.id]: {
title: 'Editing workflow',
phaseLabel: 'Resource',
phase: 'resource',
},
delete_workspace_mcp_server: {
title: 'Removing MCP server',
phaseLabel: 'Management',
phase: 'management',
[Build.id]: { title: 'Building', phaseLabel: 'Build', phase: 'subagent' },
[Run.id]: { title: 'Running', phaseLabel: 'Run', phase: 'subagent' },
[Deploy.id]: { title: 'Deploying', phaseLabel: 'Deploy', phase: 'subagent' },
[Auth.id]: {
title: 'Connecting credentials',
phaseLabel: 'Auth',
phase: 'subagent',
},
create_workspace_mcp_server: {
title: 'Creating MCP server',
phaseLabel: 'Management',
phase: 'management',
[Knowledge.id]: {
title: 'Managing knowledge',
phaseLabel: 'Knowledge',
phase: 'subagent',
},
list_workspace_mcp_servers: {
title: 'Browsing MCP servers',
phaseLabel: 'Management',
phase: 'management',
[KnowledgeBase.id]: {
title: 'Managing knowledge base',
phaseLabel: 'Resource',
phase: 'resource',
},
oauth_get_auth_link: {
title: 'Connecting account',
phaseLabel: 'Management',
phase: 'management',
[Table.id]: { title: 'Managing tables', phaseLabel: 'Table', phase: 'subagent' },
[Job.id]: { title: 'Managing jobs', phaseLabel: 'Job', phase: 'subagent' },
[Agent.id]: { title: 'Agent action', phaseLabel: 'Agent', phase: 'subagent' },
custom_tool: {
title: 'Creating tool',
phaseLabel: 'Tool',
phase: 'subagent',
},
oauth_request_access: {
title: 'Connecting account',
phaseLabel: 'Management',
phase: 'management',
},
set_environment_variables: {
title: 'Updating environment',
phaseLabel: 'Management',
phase: 'management',
},
set_global_workflow_variables: {
title: 'Updating variables',
phaseLabel: 'Management',
phase: 'management',
},
get_platform_actions: { title: 'Loading actions', phaseLabel: 'Management', phase: 'management' },
search_documentation: { title: 'Searching docs', phaseLabel: 'Search', phase: 'search' },
search_patterns: { title: 'Searching patterns', phaseLabel: 'Search', phase: 'search' },
deploy_api: { title: 'Deploying API', phaseLabel: 'Deploy', phase: 'management' },
deploy_chat: { title: 'Deploying chat', phaseLabel: 'Deploy', phase: 'management' },
deploy_mcp: { title: 'Deploying MCP', phaseLabel: 'Deploy', phase: 'management' },
redeploy: { title: 'Redeploying', phaseLabel: 'Deploy', phase: 'management' },
generate_api_key: { title: 'Generating API key', phaseLabel: 'Deploy', phase: 'management' },
user_memory: { title: 'Updating memory', phaseLabel: 'Management', phase: 'management' },
context_write: { title: 'Writing notes', phaseLabel: 'Management', phase: 'management' },
context_compaction: {
title: 'Optimizing context',
phaseLabel: 'Management',
phase: 'management',
},
// Subagents
build: { title: 'Building', phaseLabel: 'Build', phase: 'subagent' },
run: { title: 'Running', phaseLabel: 'Run', phase: 'subagent' },
deploy: { title: 'Deploying', phaseLabel: 'Deploy', phase: 'subagent' },
auth: { title: 'Connecting integration', phaseLabel: 'Auth', phase: 'subagent' },
knowledge: { title: 'Working with knowledge', phaseLabel: 'Knowledge', phase: 'subagent' },
table: { title: 'Working with tables', phaseLabel: 'Table', phase: 'subagent' },
job: { title: 'Working with jobs', phaseLabel: 'Job', phase: 'subagent' },
agent: { title: 'Taking action', phaseLabel: 'Agent', phase: 'subagent' },
custom_tool: { title: 'Creating tool', phaseLabel: 'Tool', phase: 'subagent' },
research: { title: 'Researching', phaseLabel: 'Research', phase: 'subagent' },
[Research.id]: { title: 'Researching', phaseLabel: 'Research', phase: 'subagent' },
plan: { title: 'Planning', phaseLabel: 'Plan', phase: 'subagent' },
debug: { title: 'Debugging', phaseLabel: 'Debug', phase: 'subagent' },
[Debug.id]: { title: 'Debugging', phaseLabel: 'Debug', phase: 'subagent' },
edit: { title: 'Editing workflow', phaseLabel: 'Edit', phase: 'subagent' },
fast_edit: { title: 'Editing workflow', phaseLabel: 'Edit', phase: 'subagent' },
}
export interface SSEPayloadUI {
hidden?: boolean
title?: string
phaseLabel?: string
icon?: string
internal?: boolean
clientExecutable?: boolean
}
export interface SSEPayloadData {
name?: string
ui?: SSEPayloadUI
id?: string
agent?: string
partial?: boolean
arguments?: Record<string, unknown>
input?: Record<string, unknown>
result?: unknown
error?: string
}
export interface SSEPayload {
type: SSEEventType | (string & {})
chatId?: string
data?: string | SSEPayloadData
content?: string
toolCallId?: string
toolName?: string
ui?: SSEPayloadUI
success?: boolean
result?: unknown
error?: string
subagent?: string
resource?: { type: MothershipResourceType; id: string; title: string }
fast_edit: {
title: 'Editing workflow',
phaseLabel: 'Edit',
phase: 'subagent',
},
[OpenResource.id]: {
title: 'Opening resource',
phaseLabel: 'Resource',
phase: 'resource',
},
context_compaction: {
title: 'Compacted context',
phaseLabel: 'Context',
phase: 'management',
},
}

View File

@@ -218,7 +218,7 @@ export const Panel = memo(function Panel({ workspaceId: propWorkspaceId }: Panel
const [copilotChatId, setCopilotChatId] = useState<string | undefined>(undefined)
const [copilotChatTitle, setCopilotChatTitle] = useState<string | null>(null)
const [copilotChatList, setCopilotChatList] = useState<
{ id: string; title: string | null; updatedAt: string; conversationId: string | null }[]
{ id: string; title: string | null; updatedAt: string; activeStreamId: string | null }[]
>([])
const [isCopilotHistoryOpen, setIsCopilotHistoryOpen] = useState(false)
@@ -238,7 +238,7 @@ export const Panel = memo(function Panel({ workspaceId: propWorkspaceId }: Panel
id: string
title: string | null
updatedAt: string
conversationId: string | null
activeStreamId: string | null
}>
setCopilotChatList(filtered)
@@ -784,7 +784,7 @@ export const Panel = memo(function Panel({ workspaceId: propWorkspaceId }: Panel
>
<ConversationListItem
title={chat.title || 'New Chat'}
isActive={Boolean(chat.conversationId)}
isActive={Boolean(chat.activeStreamId)}
titleClassName='text-[13px]'
actions={
<div

View File

@@ -1,4 +1,6 @@
import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
import type { PersistedMessage } from '@/lib/copilot/chat/persisted-message'
import { normalizeMessage } from '@/lib/copilot/chat/persisted-message'
import type { MothershipResource } from '@/app/workspace/[workspaceId]/home/types'
export interface TaskMetadata {
@@ -9,70 +11,13 @@ export interface TaskMetadata {
isUnread: boolean
}
export interface StreamSnapshot {
events: Array<{ eventId: number; streamId: string; event: Record<string, unknown> }>
status: string
}
export interface TaskChatHistory {
id: string
title: string | null
messages: TaskStoredMessage[]
messages: PersistedMessage[]
activeStreamId: string | null
resources: MothershipResource[]
streamSnapshot?: StreamSnapshot | null
}
export interface TaskStoredToolCall {
id: string
name: string
status: string
params?: Record<string, unknown>
result?: unknown
error?: string
durationMs?: number
}
export interface TaskStoredFileAttachment {
id: string
key: string
filename: string
media_type: string
size: number
}
export interface TaskStoredMessageContext {
kind: string
label: string
workflowId?: string
knowledgeId?: string
tableId?: string
fileId?: string
}
export interface TaskStoredMessage {
id: string
role: 'user' | 'assistant'
content: string
requestId?: string
toolCalls?: TaskStoredToolCall[]
contentBlocks?: TaskStoredContentBlock[]
fileAttachments?: TaskStoredFileAttachment[]
contexts?: TaskStoredMessageContext[]
}
export interface TaskStoredContentBlock {
type: string
content?: string
toolCall?: {
id?: string
name?: string
state?: string
params?: Record<string, unknown>
result?: { success: boolean; output?: unknown; error?: string }
display?: { text?: string }
calledBy?: string
} | null
streamSnapshot?: { events: unknown[]; status: string } | null
}
export const taskKeys = {
@@ -87,7 +32,7 @@ interface TaskResponse {
id: string
title: string | null
updatedAt: string
conversationId: string | null
activeStreamId: string | null
lastSeenAt: string | null
}
@@ -97,9 +42,9 @@ function mapTask(chat: TaskResponse): TaskMetadata {
id: chat.id,
name: chat.title ?? 'New task',
updatedAt,
isActive: chat.conversationId !== null,
isActive: chat.activeStreamId !== null,
isUnread:
chat.conversationId === null &&
chat.activeStreamId === null &&
(chat.lastSeenAt === null || updatedAt > new Date(chat.lastSeenAt)),
}
}
@@ -159,10 +104,11 @@ export async function fetchChatHistory(
return {
id: chat.id,
title: chat.title,
messages: Array.isArray(chat.messages) ? chat.messages : [],
activeStreamId: chat.conversationId || null,
messages: Array.isArray(chat.messages)
? chat.messages.map((m: Record<string, unknown>) => normalizeMessage(m))
: [],
activeStreamId: chat.activeStreamId || null,
resources: Array.isArray(chat.resources) ? chat.resources : [],
streamSnapshot: chat.streamSnapshot || null,
}
}

View File

@@ -1,13 +1,7 @@
import type { CopilotAsyncToolStatus } from '@sim/db/schema'
import { MothershipStreamV1AsyncToolRecordStatus } from '@/lib/copilot/generated/mothership-stream-v1'
export const ASYNC_TOOL_STATUS = {
pending: 'pending',
running: 'running',
completed: 'completed',
failed: 'failed',
cancelled: 'cancelled',
delivered: 'delivered',
} as const
export const ASYNC_TOOL_STATUS = MothershipStreamV1AsyncToolRecordStatus
export type AsyncLifecycleStatus =
| typeof ASYNC_TOOL_STATUS.pending

View File

@@ -1,53 +0,0 @@
import { createLogger } from '@sim/logger'
import { CopilotFiles } from '@/lib/uploads'
import { createFileContent } from '@/lib/uploads/utils/file-utils'
const logger = createLogger('CopilotChatContext')
export interface FileAttachmentInput {
id: string
key: string
name?: string
filename?: string
mimeType?: string
media_type?: string
size: number
}
export interface FileContent {
type: string
[key: string]: unknown
}
/**
* Process file attachments into content for the payload.
*/
export async function processFileAttachments(
fileAttachments: FileAttachmentInput[],
userId: string
): Promise<FileContent[]> {
if (!Array.isArray(fileAttachments) || fileAttachments.length === 0) return []
const processedFileContents: FileContent[] = []
const requestId = `copilot-${userId}-${Date.now()}`
const processedAttachments = await CopilotFiles.processCopilotAttachments(
fileAttachments as Parameters<typeof CopilotFiles.processCopilotAttachments>[0],
requestId
)
for (const { buffer, attachment } of processedAttachments) {
const fileContent = createFileContent(buffer, attachment.media_type)
if (fileContent) {
const enriched: FileContent = { ...fileContent, filename: attachment.filename }
processedFileContents.push(enriched)
}
}
logger.debug('Processed file attachments for payload', {
userId,
inputCount: fileAttachments.length,
outputCount: processedFileContents.length,
})
return processedFileContents
}

View File

@@ -1,140 +0,0 @@
/**
* @vitest-environment node
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
orchestrateCopilotStream,
createRunSegment,
updateRunStatus,
resetStreamBuffer,
setStreamMeta,
createStreamEventWriter,
} = vi.hoisted(() => ({
orchestrateCopilotStream: vi.fn(),
createRunSegment: vi.fn(),
updateRunStatus: vi.fn(),
resetStreamBuffer: vi.fn(),
setStreamMeta: vi.fn(),
createStreamEventWriter: vi.fn(),
}))
vi.mock('@/lib/copilot/orchestrator', () => ({
orchestrateCopilotStream,
}))
vi.mock('@/lib/copilot/async-runs/repository', () => ({
createRunSegment,
updateRunStatus,
}))
vi.mock('@/lib/copilot/orchestrator/stream/buffer', () => ({
createStreamEventWriter,
resetStreamBuffer,
setStreamMeta,
}))
vi.mock('@sim/db', () => ({
db: {
update: vi.fn(() => ({
set: vi.fn(() => ({
where: vi.fn(),
})),
})),
},
}))
vi.mock('@/lib/copilot/task-events', () => ({
taskPubSub: null,
}))
import { createSSEStream } from '@/lib/copilot/chat-streaming'
async function drainStream(stream: ReadableStream) {
const reader = stream.getReader()
while (true) {
const { done } = await reader.read()
if (done) break
}
}
describe('createSSEStream terminal error handling', () => {
const write = vi.fn().mockResolvedValue({ eventId: 1, streamId: 'stream-1', event: {} })
const flush = vi.fn().mockResolvedValue(undefined)
const close = vi.fn().mockResolvedValue(undefined)
beforeEach(() => {
vi.clearAllMocks()
write.mockResolvedValue({ eventId: 1, streamId: 'stream-1', event: {} })
flush.mockResolvedValue(undefined)
close.mockResolvedValue(undefined)
createStreamEventWriter.mockReturnValue({ write, flush, close })
resetStreamBuffer.mockResolvedValue(undefined)
setStreamMeta.mockResolvedValue(undefined)
createRunSegment.mockResolvedValue(null)
updateRunStatus.mockResolvedValue(null)
})
it('writes a terminal error event before close when orchestration returns success=false', async () => {
orchestrateCopilotStream.mockResolvedValue({
success: false,
error: 'resume failed',
content: '',
contentBlocks: [],
toolCalls: [],
})
const stream = createSSEStream({
requestPayload: { message: 'hello' },
userId: 'user-1',
streamId: 'stream-1',
executionId: 'exec-1',
runId: 'run-1',
currentChat: null,
isNewChat: false,
message: 'hello',
titleModel: 'gpt-5.4',
requestId: 'req-1',
orchestrateOptions: {},
})
await drainStream(stream)
expect(write).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
error: 'resume failed',
})
)
expect(write.mock.invocationCallOrder.at(-1)).toBeLessThan(close.mock.invocationCallOrder[0])
})
it('writes the thrown terminal error event before close for replay durability', async () => {
orchestrateCopilotStream.mockRejectedValue(new Error('kaboom'))
const stream = createSSEStream({
requestPayload: { message: 'hello' },
userId: 'user-1',
streamId: 'stream-1',
executionId: 'exec-1',
runId: 'run-1',
currentChat: null,
isNewChat: false,
message: 'hello',
titleModel: 'gpt-5.4',
requestId: 'req-1',
orchestrateOptions: {},
})
await drainStream(stream)
expect(write).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
error: 'kaboom',
})
)
expect(write.mock.invocationCallOrder.at(-1)).toBeLessThan(close.mock.invocationCallOrder[0])
})
})

View File

@@ -1,579 +0,0 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { createRunSegment, updateRunStatus } from '@/lib/copilot/async-runs/repository'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import type { OrchestrateStreamOptions } from '@/lib/copilot/orchestrator'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import {
createStreamEventWriter,
getStreamMeta,
resetStreamBuffer,
setStreamMeta,
} from '@/lib/copilot/orchestrator/stream/buffer'
import { taskPubSub } from '@/lib/copilot/task-events'
import { env } from '@/lib/core/config/env'
import { acquireLock, getRedisClient, releaseLock } from '@/lib/core/config/redis'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
const logger = createLogger('CopilotChatStreaming')
const CHAT_STREAM_LOCK_TTL_SECONDS = 2 * 60 * 60
const STREAM_ABORT_TTL_SECONDS = 10 * 60
const STREAM_ABORT_POLL_MS = 1000
interface ActiveStreamEntry {
abortController: AbortController
userStopController: AbortController
}
const activeStreams = new Map<string, ActiveStreamEntry>()
// Tracks in-flight streams by chatId so that a subsequent request for the
// same chat can force-abort the previous stream and wait for it to settle
// before forwarding to Go.
const pendingChatStreams = new Map<
string,
{ promise: Promise<void>; resolve: () => void; streamId: string }
>()
function registerPendingChatStream(chatId: string, streamId: string): void {
if (pendingChatStreams.has(chatId)) {
logger.warn(`registerPendingChatStream: overwriting existing entry for chatId ${chatId}`)
}
let resolve!: () => void
const promise = new Promise<void>((r) => {
resolve = r
})
pendingChatStreams.set(chatId, { promise, resolve, streamId })
}
function resolvePendingChatStream(chatId: string, streamId: string): void {
const entry = pendingChatStreams.get(chatId)
if (entry && entry.streamId === streamId) {
entry.resolve()
pendingChatStreams.delete(chatId)
}
}
function getChatStreamLockKey(chatId: string): string {
return `copilot:chat-stream-lock:${chatId}`
}
function getStreamAbortKey(streamId: string): string {
return `copilot:stream-abort:${streamId}`
}
/**
* Wait for any in-flight stream on `chatId` to settle without force-aborting it.
* Returns true when no stream is active (or it settles in time), false on timeout.
*/
export async function waitForPendingChatStream(
chatId: string,
timeoutMs = 5_000,
expectedStreamId?: string
): Promise<boolean> {
const redis = getRedisClient()
const deadline = Date.now() + timeoutMs
for (;;) {
const entry = pendingChatStreams.get(chatId)
const localPending = !!entry && (!expectedStreamId || entry.streamId === expectedStreamId)
if (redis) {
try {
const ownerStreamId = await redis.get(getChatStreamLockKey(chatId))
const lockReleased =
!ownerStreamId || (expectedStreamId !== undefined && ownerStreamId !== expectedStreamId)
if (!localPending && lockReleased) {
return true
}
} catch (error) {
logger.warn('Failed to check distributed chat stream lock while waiting', {
chatId,
expectedStreamId,
error: error instanceof Error ? error.message : String(error),
})
}
} else if (!localPending) {
return true
}
if (Date.now() >= deadline) return false
await new Promise((resolve) => setTimeout(resolve, 200))
}
}
export async function releasePendingChatStream(chatId: string, streamId: string): Promise<void> {
const redis = getRedisClient()
if (redis) {
await releaseLock(getChatStreamLockKey(chatId), streamId).catch(() => false)
}
resolvePendingChatStream(chatId, streamId)
}
export async function acquirePendingChatStream(
chatId: string,
streamId: string,
timeoutMs = 5_000
): Promise<boolean> {
const redis = getRedisClient()
if (redis) {
const deadline = Date.now() + timeoutMs
for (;;) {
try {
const acquired = await acquireLock(
getChatStreamLockKey(chatId),
streamId,
CHAT_STREAM_LOCK_TTL_SECONDS
)
if (acquired) {
registerPendingChatStream(chatId, streamId)
return true
}
if (!pendingChatStreams.has(chatId)) {
const ownerStreamId = await redis.get(getChatStreamLockKey(chatId))
if (ownerStreamId) {
const ownerMeta = await getStreamMeta(ownerStreamId)
const ownerTerminal =
ownerMeta?.status === 'complete' ||
ownerMeta?.status === 'error' ||
ownerMeta?.status === 'cancelled'
if (ownerTerminal) {
await releaseLock(getChatStreamLockKey(chatId), ownerStreamId).catch(() => false)
continue
}
}
}
} catch (error) {
logger.warn('Distributed chat stream lock failed; retrying distributed coordination', {
chatId,
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
if (Date.now() >= deadline) return false
await new Promise((resolve) => setTimeout(resolve, 200))
}
}
for (;;) {
const existing = pendingChatStreams.get(chatId)
if (!existing) {
registerPendingChatStream(chatId, streamId)
return true
}
const settled = await Promise.race([
existing.promise.then(() => true),
new Promise<boolean>((r) => setTimeout(() => r(false), timeoutMs)),
])
if (!settled) return false
}
}
export async function abortActiveStream(streamId: string): Promise<boolean> {
const redis = getRedisClient()
let published = false
if (redis) {
try {
await redis.set(getStreamAbortKey(streamId), '1', 'EX', STREAM_ABORT_TTL_SECONDS)
published = true
} catch (error) {
logger.warn('Failed to publish distributed stream abort', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
}
const entry = activeStreams.get(streamId)
if (!entry) return published
entry.userStopController.abort()
entry.abortController.abort()
activeStreams.delete(streamId)
return true
}
const FLUSH_EVENT_TYPES = new Set([
'tool_call',
'tool_result',
'tool_error',
'subagent_end',
'structured_result',
'subagent_result',
'done',
'error',
])
export async function requestChatTitle(params: {
message: string
model: string
provider?: string
messageId?: string
}): Promise<string | null> {
const { message, model, provider, messageId } = params
if (!message || !model) return null
const headers: Record<string, string> = { 'Content-Type': 'application/json' }
if (env.COPILOT_API_KEY) {
headers['x-api-key'] = env.COPILOT_API_KEY
}
try {
const response = await fetch(`${SIM_AGENT_API_URL}/api/generate-chat-title`, {
method: 'POST',
headers,
body: JSON.stringify({ message, model, ...(provider ? { provider } : {}) }),
})
const payload = await response.json().catch(() => ({}))
if (!response.ok) {
logger.withMetadata({ messageId }).warn('Failed to generate chat title via copilot backend', {
status: response.status,
error: payload,
})
return null
}
const title = typeof payload?.title === 'string' ? payload.title.trim() : ''
return title || null
} catch (error) {
logger.withMetadata({ messageId }).error('Error generating chat title', error)
return null
}
}
export interface StreamingOrchestrationParams {
requestPayload: Record<string, unknown>
userId: string
streamId: string
executionId: string
runId: string
chatId?: string
currentChat: any
isNewChat: boolean
message: string
titleModel: string
titleProvider?: string
requestId: string
workspaceId?: string
orchestrateOptions: Omit<OrchestrateStreamOptions, 'onEvent'>
pendingChatStreamAlreadyRegistered?: boolean
}
export function createSSEStream(params: StreamingOrchestrationParams): ReadableStream {
const {
requestPayload,
userId,
streamId,
executionId,
runId,
chatId,
currentChat,
isNewChat,
message,
titleModel,
titleProvider,
requestId,
workspaceId,
orchestrateOptions,
pendingChatStreamAlreadyRegistered = false,
} = params
const messageId =
typeof requestPayload.messageId === 'string' ? requestPayload.messageId : streamId
const reqLogger = logger.withMetadata({ requestId, messageId })
let eventWriter: ReturnType<typeof createStreamEventWriter> | null = null
let clientDisconnected = false
const abortController = new AbortController()
const userStopController = new AbortController()
const clientDisconnectedController = new AbortController()
activeStreams.set(streamId, { abortController, userStopController })
if (chatId && !pendingChatStreamAlreadyRegistered) {
registerPendingChatStream(chatId, streamId)
}
return new ReadableStream({
async start(controller) {
const encoder = new TextEncoder()
const markClientDisconnected = (reason: string) => {
if (clientDisconnected) return
clientDisconnected = true
if (!clientDisconnectedController.signal.aborted) {
clientDisconnectedController.abort()
}
reqLogger.info('Client disconnected from live SSE stream', {
streamId,
runId,
reason,
})
}
await resetStreamBuffer(streamId)
await setStreamMeta(streamId, { status: 'active', userId, executionId, runId })
if (chatId) {
await createRunSegment({
id: runId,
executionId,
chatId,
userId,
workflowId: (requestPayload.workflowId as string | undefined) || null,
workspaceId,
streamId,
model: (requestPayload.model as string | undefined) || null,
provider: (requestPayload.provider as string | undefined) || null,
requestContext: { requestId },
}).catch((error) => {
reqLogger.warn('Failed to create copilot run segment', {
error: error instanceof Error ? error.message : String(error),
})
})
}
eventWriter = createStreamEventWriter(streamId)
let localSeq = 0
let abortPoller: ReturnType<typeof setInterval> | null = null
const redis = getRedisClient()
if (redis) {
abortPoller = setInterval(() => {
void (async () => {
try {
const shouldAbort = await redis.get(getStreamAbortKey(streamId))
if (shouldAbort && !abortController.signal.aborted) {
userStopController.abort()
abortController.abort()
await redis.del(getStreamAbortKey(streamId))
}
} catch (error) {
reqLogger.warn('Failed to poll distributed stream abort', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
})()
}, STREAM_ABORT_POLL_MS)
}
const pushEvent = async (event: Record<string, any>) => {
if (!eventWriter) return
const eventId = ++localSeq
try {
await eventWriter.write(event)
if (FLUSH_EVENT_TYPES.has(event.type)) {
await eventWriter.flush()
}
} catch (error) {
reqLogger.error('Failed to persist stream event', {
eventType: event.type,
eventId,
error: error instanceof Error ? error.message : String(error),
})
// Keep the live SSE stream going even if durable buffering hiccups.
}
try {
if (!clientDisconnected) {
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ ...event, eventId, streamId })}\n\n`)
)
}
} catch {
markClientDisconnected('enqueue_failed')
}
}
const pushEventBestEffort = async (event: Record<string, any>) => {
try {
await pushEvent(event)
} catch (error) {
reqLogger.error('Failed to push event', {
eventType: event.type,
error: error instanceof Error ? error.message : String(error),
})
}
}
if (chatId) {
await pushEvent({ type: 'chat_id', chatId })
}
if (chatId && !currentChat?.title && isNewChat) {
requestChatTitle({ message, model: titleModel, provider: titleProvider, messageId })
.then(async (title) => {
if (title) {
await db.update(copilotChats).set({ title }).where(eq(copilotChats.id, chatId!))
await pushEvent({ type: 'title_updated', title })
if (workspaceId) {
taskPubSub?.publishStatusChanged({ workspaceId, chatId: chatId!, type: 'renamed' })
}
}
})
.catch((error) => {
reqLogger.error('Title generation failed', error)
})
}
const keepaliveInterval = setInterval(() => {
if (clientDisconnected) return
try {
controller.enqueue(encoder.encode(': keepalive\n\n'))
} catch {
markClientDisconnected('keepalive_failed')
}
}, 15_000)
try {
const result = await orchestrateCopilotStream(requestPayload, {
...orchestrateOptions,
executionId,
runId,
abortSignal: abortController.signal,
userStopSignal: userStopController.signal,
clientDisconnectedSignal: clientDisconnectedController.signal,
onEvent: async (event) => {
await pushEvent(event)
},
})
if (abortController.signal.aborted) {
reqLogger.info('Stream aborted by explicit stop')
await eventWriter.close().catch(() => {})
await setStreamMeta(streamId, { status: 'cancelled', userId, executionId, runId })
await updateRunStatus(runId, 'cancelled', { completedAt: new Date() }).catch(() => {})
return
}
if (!result.success) {
const errorMessage =
result.error ||
result.errors?.[0] ||
'An unexpected error occurred while processing the response.'
if (clientDisconnected) {
reqLogger.info('Stream failed after client disconnect', {
error: errorMessage,
})
}
reqLogger.error('Orchestration returned failure', {
error: errorMessage,
})
await pushEventBestEffort({
type: 'error',
error: errorMessage,
data: {
displayMessage: errorMessage,
},
})
await eventWriter.close()
await setStreamMeta(streamId, {
status: 'error',
userId,
executionId,
runId,
error: errorMessage,
})
await updateRunStatus(runId, 'error', {
completedAt: new Date(),
error: errorMessage,
}).catch(() => {})
return
}
await eventWriter.close()
await setStreamMeta(streamId, { status: 'complete', userId, executionId, runId })
await updateRunStatus(runId, 'complete', { completedAt: new Date() }).catch(() => {})
if (clientDisconnected) {
reqLogger.info('Orchestration completed after client disconnect', {
streamId,
runId,
})
}
} catch (error) {
if (abortController.signal.aborted) {
reqLogger.info('Stream aborted by explicit stop')
await eventWriter.close().catch(() => {})
await setStreamMeta(streamId, { status: 'cancelled', userId, executionId, runId })
await updateRunStatus(runId, 'cancelled', { completedAt: new Date() }).catch(() => {})
return
}
if (clientDisconnected) {
reqLogger.info('Stream errored after client disconnect', {
error: error instanceof Error ? error.message : 'Stream error',
})
}
reqLogger.error('Orchestration error', error)
const errorMessage = error instanceof Error ? error.message : 'Stream error'
await pushEventBestEffort({
type: 'error',
error: errorMessage,
data: {
displayMessage: 'An unexpected error occurred while processing the response.',
},
})
await eventWriter.close()
await setStreamMeta(streamId, {
status: 'error',
userId,
executionId,
runId,
error: errorMessage,
})
await updateRunStatus(runId, 'error', {
completedAt: new Date(),
error: errorMessage,
}).catch(() => {})
} finally {
reqLogger.info('Closing live SSE stream', {
streamId,
runId,
clientDisconnected,
aborted: abortController.signal.aborted,
})
clearInterval(keepaliveInterval)
if (abortPoller) {
clearInterval(abortPoller)
}
activeStreams.delete(streamId)
if (chatId) {
if (redis) {
await releaseLock(getChatStreamLockKey(chatId), streamId).catch(() => false)
}
resolvePendingChatStream(chatId, streamId)
}
if (redis) {
await redis.del(getStreamAbortKey(streamId)).catch(() => {})
}
try {
controller.close()
} catch {
// Controller already closed from cancel() — safe to ignore
}
}
},
cancel() {
reqLogger.info('ReadableStream cancel received from client', {
streamId,
runId,
})
if (!clientDisconnected) {
clientDisconnected = true
if (!clientDisconnectedController.signal.aborted) {
clientDisconnectedController.abort()
}
}
if (eventWriter) {
eventWriter.flush().catch(() => {})
}
},
})
}
export const SSE_RESPONSE_HEADERS = {
...SSE_HEADERS,
'Content-Encoding': 'none',
} as const

View File

@@ -0,0 +1,63 @@
/**
* @vitest-environment node
*/
import { describe, expect, it } from 'vitest'
import { toDisplayMessage } from './display-message'
describe('display-message', () => {
it('maps canonical tool, subagent text, and cancelled complete blocks to display blocks', () => {
const display = toDisplayMessage({
id: 'msg-1',
role: 'assistant',
content: 'done',
timestamp: '2024-01-01T00:00:00.000Z',
requestId: 'req-1',
contentBlocks: [
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'cancelled',
display: { title: 'Stopped by user' },
},
},
{
type: 'text',
lane: 'subagent',
channel: 'assistant',
content: 'subagent output',
},
{
type: 'complete',
status: 'cancelled',
},
],
})
expect(display.contentBlocks).toEqual([
{
type: 'tool_call',
toolCall: {
id: 'tool-1',
name: 'read',
status: 'cancelled',
displayTitle: 'Stopped by user',
phaseLabel: undefined,
params: undefined,
calledBy: undefined,
result: undefined,
},
},
{
type: 'subagent_text',
content: 'subagent output',
},
{
type: 'stopped',
},
])
})
})

View File

@@ -0,0 +1,118 @@
import {
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
MothershipStreamV1SpanLifecycleEvent,
MothershipStreamV1ToolOutcome,
} from '@/lib/copilot/generated/mothership-stream-v1'
import {
type ChatMessage,
type ChatMessageAttachment,
type ChatMessageContext,
type ContentBlock,
ContentBlockType,
type ToolCallInfo,
ToolCallStatus,
} from '@/app/workspace/[workspaceId]/home/types'
import type { PersistedContentBlock, PersistedMessage } from './persisted-message'
const STATE_TO_STATUS: Record<string, ToolCallStatus> = {
[MothershipStreamV1ToolOutcome.success]: ToolCallStatus.success,
[MothershipStreamV1ToolOutcome.error]: ToolCallStatus.error,
[MothershipStreamV1ToolOutcome.cancelled]: ToolCallStatus.cancelled,
[MothershipStreamV1ToolOutcome.rejected]: ToolCallStatus.error,
[MothershipStreamV1ToolOutcome.skipped]: ToolCallStatus.success,
pending: ToolCallStatus.executing,
executing: ToolCallStatus.executing,
}
function toToolCallInfo(block: PersistedContentBlock): ToolCallInfo | undefined {
const tc = block.toolCall
if (!tc) return undefined
const status: ToolCallStatus = STATE_TO_STATUS[tc.state] ?? ToolCallStatus.error
return {
id: tc.id,
name: tc.name,
status,
displayTitle: status === ToolCallStatus.cancelled ? 'Stopped by user' : tc.display?.title,
phaseLabel: tc.display?.phaseLabel,
params: tc.params,
calledBy: tc.calledBy,
result: tc.result,
}
}
function toDisplayBlock(block: PersistedContentBlock): ContentBlock {
switch (block.type) {
case MothershipStreamV1EventType.text:
if (block.lane === 'subagent') {
return { type: ContentBlockType.subagent_text, content: block.content }
}
return { type: ContentBlockType.text, content: block.content }
case MothershipStreamV1EventType.tool:
return { type: ContentBlockType.tool_call, toolCall: toToolCallInfo(block) }
case MothershipStreamV1EventType.span:
if (block.lifecycle === MothershipStreamV1SpanLifecycleEvent.end) {
return { type: ContentBlockType.subagent_end }
}
return { type: ContentBlockType.subagent, content: block.content }
case MothershipStreamV1EventType.complete:
if (block.status === MothershipStreamV1CompletionStatus.cancelled) {
return { type: ContentBlockType.stopped }
}
return { type: ContentBlockType.text, content: block.content }
default:
return { type: ContentBlockType.text, content: block.content }
}
}
function toDisplayAttachment(f: PersistedMessage['fileAttachments']): ChatMessageAttachment[] {
if (!f || f.length === 0) return []
return f.map((a) => ({
id: a.id,
filename: a.filename,
media_type: a.media_type,
size: a.size,
previewUrl: a.media_type.startsWith('image/')
? `/api/files/serve/${encodeURIComponent(a.key)}?context=mothership`
: undefined,
}))
}
function toDisplayContexts(
contexts: PersistedMessage['contexts']
): ChatMessageContext[] | undefined {
if (!contexts || contexts.length === 0) return undefined
return contexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId ? { workflowId: c.workflowId } : {}),
...(c.knowledgeId ? { knowledgeId: c.knowledgeId } : {}),
...(c.tableId ? { tableId: c.tableId } : {}),
...(c.fileId ? { fileId: c.fileId } : {}),
}))
}
export function toDisplayMessage(msg: PersistedMessage): ChatMessage {
const display: ChatMessage = {
id: msg.id,
role: msg.role,
content: msg.content,
}
if (msg.requestId) {
display.requestId = msg.requestId
}
if (msg.contentBlocks && msg.contentBlocks.length > 0) {
display.contentBlocks = msg.contentBlocks.map(toDisplayBlock)
}
const attachments = toDisplayAttachment(msg.fileAttachments)
if (attachments.length > 0) {
display.attachments = attachments
}
display.contexts = toDisplayContexts(msg.contexts)
return display
}

View File

@@ -17,10 +17,6 @@ vi.mock('@/lib/billing/core/subscription', () => ({
getUserSubscriptionState: vi.fn(),
}))
vi.mock('@/lib/copilot/chat-context', () => ({
processFileAttachments: vi.fn(),
}))
vi.mock('@/lib/core/config/feature-flags', () => ({
isHosted: false,
}))
@@ -45,6 +41,12 @@ vi.mock('@/tools/registry', () => ({
name: 'Brandfetch Search',
description: 'Search for brands by company name',
},
// Catalog marks run_workflow as client / clientExecutable; registry ToolConfig has no executor fields.
run_workflow: {
id: 'run_workflow',
name: 'Run Workflow',
description: 'Run a workflow from the client',
},
},
}))
@@ -58,7 +60,7 @@ vi.mock('@/tools/params', () => ({
}))
import { getUserSubscriptionState } from '@/lib/billing/core/subscription'
import { buildIntegrationToolSchemas } from '@/lib/copilot/chat-payload'
import { buildIntegrationToolSchemas } from './payload'
const mockedGetUserSubscriptionState = getUserSubscriptionState as unknown as {
mockResolvedValue: (value: unknown) => void
@@ -102,4 +104,15 @@ describe('buildIntegrationToolSchemas', () => {
expect(gmailTool?.description).toBe('Send emails using Gmail')
expect(brandfetchTool?.description).toBe('Search for brands by company name')
})
it('emits executeLocally for dynamic client tools only', async () => {
mockedGetUserSubscriptionState.mockResolvedValue({ isFree: false })
const toolSchemas = await buildIntegrationToolSchemas('user-client')
const gmailTool = toolSchemas.find((tool) => tool.name === 'gmail_send')
const runTool = toolSchemas.find((tool) => tool.name === 'run_workflow')
expect(gmailTool?.executeLocally).toBe(false)
expect(runTool?.executeLocally).toBe(true)
})
})

View File

@@ -1,6 +1,7 @@
import { createLogger } from '@sim/logger'
import { getUserSubscriptionState } from '@/lib/billing/core/subscription'
import { getCopilotToolDescription } from '@/lib/copilot/tool-descriptions'
import { getToolEntry } from '@/lib/copilot/tool-executor/router'
import { getCopilotToolDescription } from '@/lib/copilot/tools/descriptions'
import { isHosted } from '@/lib/core/config/feature-flags'
import { createMcpToolId } from '@/lib/mcp/utils'
import { trackChatUpload } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
@@ -10,7 +11,7 @@ import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils'
const logger = createLogger('CopilotChatPayload')
export interface BuildPayloadParams {
interface BuildPayloadParams {
message: string
workflowId?: string
workflowName?: string
@@ -60,16 +61,22 @@ export async function buildIntegrationToolSchemas(
const subscriptionState = await getUserSubscriptionState(userId)
shouldAppendEmailTagline = subscriptionState.isFree
} catch (error) {
reqLogger.warn('Failed to load subscription state for copilot tool descriptions', {
userId,
error: error instanceof Error ? error.message : String(error),
})
logger.warn(
messageId
? `Failed to load subscription state for copilot tool descriptions [messageId:${messageId}]`
: 'Failed to load subscription state for copilot tool descriptions',
{
userId,
error: error instanceof Error ? error.message : String(error),
}
)
}
for (const [toolId, toolConfig] of Object.entries(latestTools)) {
try {
const userSchema = createUserToolSchema(toolConfig)
const strippedName = stripVersionSuffix(toolId)
const catalogEntry = getToolEntry(strippedName)
integrationTools.push({
name: strippedName,
description: getCopilotToolDescription(toolConfig, {
@@ -79,6 +86,8 @@ export async function buildIntegrationToolSchemas(
}),
input_schema: userSchema as unknown as Record<string, unknown>,
defer_loading: true,
executeLocally:
catalogEntry?.clientExecutable === true || catalogEntry?.executor === 'client',
...(toolConfig.oauth?.required && {
oauth: {
required: true,
@@ -87,16 +96,26 @@ export async function buildIntegrationToolSchemas(
}),
})
} catch (toolError) {
reqLogger.warn('Failed to build schema for tool, skipping', {
toolId,
error: toolError instanceof Error ? toolError.message : String(toolError),
})
logger.warn(
messageId
? `Failed to build schema for tool, skipping [messageId:${messageId}]`
: 'Failed to build schema for tool, skipping',
{
toolId,
error: toolError instanceof Error ? toolError.message : String(toolError),
}
)
}
}
} catch (error) {
reqLogger.warn('Failed to build tool schemas', {
error: error instanceof Error ? error.message : String(error),
})
logger.warn(
messageId
? `Failed to build tool schemas [messageId:${messageId}]`
: 'Failed to build tool schemas',
{
error: error instanceof Error ? error.message : String(error),
}
)
}
return integrationTools
}
@@ -192,16 +211,27 @@ export async function buildCopilotRequestPayload(
description:
mcpTool.description || `MCP tool: ${mcpTool.name} (${mcpTool.serverName})`,
input_schema: mcpTool.inputSchema as unknown as Record<string, unknown>,
executeLocally: false,
})
}
if (mcpTools.length > 0) {
payloadLogger.info('Added MCP tools to copilot payload', { count: mcpTools.length })
logger.error(
userMessageId
? `Added MCP tools to copilot payload [messageId:${userMessageId}]`
: 'Added MCP tools to copilot payload',
{ count: mcpTools.length }
)
}
}
} catch (error) {
payloadLogger.warn('Failed to discover MCP tools for copilot', {
error: error instanceof Error ? error.message : String(error),
})
logger.warn(
userMessageId
? `Failed to discover MCP tools for copilot [messageId:${userMessageId}]`
: 'Failed to discover MCP tools for copilot',
{
error: error instanceof Error ? error.message : String(error),
}
)
}
}
}

View File

@@ -0,0 +1,122 @@
/**
* @vitest-environment node
*/
import { describe, expect, it } from 'vitest'
import type { OrchestratorResult } from '@/lib/copilot/request/types'
import {
buildPersistedAssistantMessage,
buildPersistedUserMessage,
normalizeMessage,
} from './persisted-message'
describe('persisted-message', () => {
it('round-trips canonical tool blocks through normalizeMessage', () => {
const result: OrchestratorResult = {
success: true,
content: 'done',
requestId: 'req-1',
contentBlocks: [
{
type: 'tool_call',
timestamp: Date.now(),
calledBy: 'build',
toolCall: {
id: 'tool-1',
name: 'read',
status: 'success',
params: { path: 'foo.txt' },
result: { success: true, output: { ok: true } },
},
},
],
toolCalls: [],
}
const persisted = buildPersistedAssistantMessage(result)
const normalized = normalizeMessage(persisted as unknown as Record<string, unknown>)
expect(normalized.contentBlocks).toEqual([
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'success',
params: { path: 'foo.txt' },
result: { success: true, output: { ok: true } },
calledBy: 'build',
},
},
{
type: 'text',
channel: 'assistant',
content: 'done',
},
])
})
it('normalizes legacy tool_call and top-level toolCalls shapes', () => {
const normalized = normalizeMessage({
id: 'msg-1',
role: 'assistant',
content: 'hello',
timestamp: '2024-01-01T00:00:00.000Z',
contentBlocks: [
{
type: 'tool_call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'cancelled',
display: { text: 'Stopped by user' },
},
},
],
toolCalls: [
{
id: 'tool-2',
name: 'glob',
status: 'success',
result: { matches: [] },
},
],
})
expect(normalized.contentBlocks).toEqual([
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'cancelled',
display: { title: 'Stopped by user' },
},
},
{
type: 'text',
channel: 'assistant',
content: 'hello',
},
])
})
it('builds normalized user messages with stripped optional empties', () => {
const msg = buildPersistedUserMessage({
id: 'user-1',
content: 'hello',
fileAttachments: [],
contexts: [],
})
expect(msg).toMatchObject({
id: 'user-1',
role: 'user',
content: 'hello',
})
expect(msg.fileAttachments).toBeUndefined()
expect(msg.contexts).toBeUndefined()
})
})

View File

@@ -0,0 +1,469 @@
import {
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
MothershipStreamV1SpanLifecycleEvent,
MothershipStreamV1SpanPayloadKind,
type MothershipStreamV1StreamScope,
MothershipStreamV1TextChannel,
MothershipStreamV1ToolOutcome,
MothershipStreamV1ToolPhase,
} from '@/lib/copilot/generated/mothership-stream-v1'
import type {
ContentBlock,
LocalToolCallStatus,
OrchestratorResult,
} from '@/lib/copilot/request/types'
export type PersistedToolState = LocalToolCallStatus | MothershipStreamV1ToolOutcome
export interface PersistedToolCall {
id: string
name: string
state: PersistedToolState
params?: Record<string, unknown>
result?: { success: boolean; output?: unknown; error?: string }
error?: string
calledBy?: string
durationMs?: number
display?: { title?: string; phaseLabel?: string }
}
export interface PersistedContentBlock {
type: MothershipStreamV1EventType
lane?: MothershipStreamV1StreamScope['lane']
channel?: MothershipStreamV1TextChannel
phase?: MothershipStreamV1ToolPhase
kind?: MothershipStreamV1SpanPayloadKind
lifecycle?: MothershipStreamV1SpanLifecycleEvent
status?: MothershipStreamV1CompletionStatus
content?: string
toolCall?: PersistedToolCall
}
export interface PersistedFileAttachment {
id: string
key: string
filename: string
media_type: string
size: number
}
export interface PersistedMessageContext {
kind: string
label: string
workflowId?: string
knowledgeId?: string
tableId?: string
fileId?: string
}
export interface PersistedMessage {
id: string
role: 'user' | 'assistant'
content: string
timestamp: string
requestId?: string
contentBlocks?: PersistedContentBlock[]
fileAttachments?: PersistedFileAttachment[]
contexts?: PersistedMessageContext[]
}
// ---------------------------------------------------------------------------
// Write: OrchestratorResult → PersistedMessage
// ---------------------------------------------------------------------------
function resolveToolState(block: ContentBlock): PersistedToolState {
const tc = block.toolCall
if (!tc) return 'pending'
if (tc.result?.success !== undefined) {
return tc.result.success
? MothershipStreamV1ToolOutcome.success
: MothershipStreamV1ToolOutcome.error
}
return tc.status as PersistedToolState
}
function mapContentBlock(block: ContentBlock): PersistedContentBlock {
switch (block.type) {
case 'text':
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
case 'thinking':
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.thinking,
content: block.content,
}
case 'subagent':
return {
type: MothershipStreamV1EventType.span,
kind: MothershipStreamV1SpanPayloadKind.subagent,
lifecycle: MothershipStreamV1SpanLifecycleEvent.start,
content: block.content,
}
case 'subagent_text':
return {
type: MothershipStreamV1EventType.text,
lane: 'subagent',
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
case 'tool_call': {
if (!block.toolCall) {
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
content: block.content,
}
}
const state = resolveToolState(block)
const isSubagentTool = !!block.calledBy
const isNonTerminal =
state === MothershipStreamV1ToolOutcome.cancelled ||
state === 'pending' ||
state === 'executing'
const toolCall: PersistedToolCall = {
id: block.toolCall.id,
name: block.toolCall.name,
state,
...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }),
...(isSubagentTool && isNonTerminal
? {}
: block.toolCall.params
? { params: block.toolCall.params }
: {}),
...(block.calledBy ? { calledBy: block.calledBy } : {}),
}
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
toolCall,
}
}
default:
return { type: MothershipStreamV1EventType.text, content: block.content }
}
}
export function buildPersistedAssistantMessage(
result: OrchestratorResult,
requestId?: string
): PersistedMessage {
const message: PersistedMessage = {
id: crypto.randomUUID(),
role: 'assistant',
content: result.content,
timestamp: new Date().toISOString(),
}
if (requestId || result.requestId) {
message.requestId = requestId || result.requestId
}
if (result.contentBlocks.length > 0) {
message.contentBlocks = result.contentBlocks.map(mapContentBlock)
}
return message
}
export interface UserMessageParams {
id: string
content: string
fileAttachments?: PersistedFileAttachment[]
contexts?: PersistedMessageContext[]
}
export function buildPersistedUserMessage(params: UserMessageParams): PersistedMessage {
const message: PersistedMessage = {
id: params.id,
role: 'user',
content: params.content,
timestamp: new Date().toISOString(),
}
if (params.fileAttachments && params.fileAttachments.length > 0) {
message.fileAttachments = params.fileAttachments
}
if (params.contexts && params.contexts.length > 0) {
message.contexts = params.contexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId ? { workflowId: c.workflowId } : {}),
...(c.knowledgeId ? { knowledgeId: c.knowledgeId } : {}),
...(c.tableId ? { tableId: c.tableId } : {}),
...(c.fileId ? { fileId: c.fileId } : {}),
}))
}
return message
}
// ---------------------------------------------------------------------------
// Read: raw JSONB → PersistedMessage
// Handles both canonical (type: 'tool', 'text', 'span', 'complete') and
// legacy (type: 'tool_call', 'thinking', 'subagent', 'stopped') blocks.
// ---------------------------------------------------------------------------
const CANONICAL_BLOCK_TYPES: Set<string> = new Set(Object.values(MothershipStreamV1EventType))
interface RawBlock {
type: string
lane?: string
content?: string
channel?: string
phase?: string
kind?: string
lifecycle?: string
status?: string
toolCall?: {
id?: string
name?: string
state?: string
params?: Record<string, unknown>
result?: { success: boolean; output?: unknown; error?: string }
display?: { text?: string; title?: string; phaseLabel?: string }
calledBy?: string
durationMs?: number
error?: string
} | null
}
interface LegacyToolCall {
id: string
name: string
status: string
params?: Record<string, unknown>
result?: unknown
error?: string
durationMs?: number
}
const OUTCOME_NORMALIZATION: Record<string, PersistedToolState> = {
[MothershipStreamV1ToolOutcome.success]: MothershipStreamV1ToolOutcome.success,
[MothershipStreamV1ToolOutcome.error]: MothershipStreamV1ToolOutcome.error,
[MothershipStreamV1ToolOutcome.cancelled]: MothershipStreamV1ToolOutcome.cancelled,
[MothershipStreamV1ToolOutcome.skipped]: MothershipStreamV1ToolOutcome.skipped,
[MothershipStreamV1ToolOutcome.rejected]: MothershipStreamV1ToolOutcome.rejected,
pending: 'pending',
executing: 'executing',
}
function normalizeToolState(state: string | undefined): PersistedToolState {
if (!state) return 'pending'
return OUTCOME_NORMALIZATION[state] ?? MothershipStreamV1ToolOutcome.error
}
function isCanonicalBlock(block: RawBlock): boolean {
return CANONICAL_BLOCK_TYPES.has(block.type)
}
function normalizeCanonicalBlock(block: RawBlock): PersistedContentBlock {
const result: PersistedContentBlock = {
type: block.type as MothershipStreamV1EventType,
}
if (block.lane === 'main' || block.lane === 'subagent') {
result.lane = block.lane
}
if (block.content !== undefined) result.content = block.content
if (block.channel) result.channel = block.channel as MothershipStreamV1TextChannel
if (block.phase) result.phase = block.phase as MothershipStreamV1ToolPhase
if (block.kind) result.kind = block.kind as MothershipStreamV1SpanPayloadKind
if (block.lifecycle) result.lifecycle = block.lifecycle as MothershipStreamV1SpanLifecycleEvent
if (block.status) result.status = block.status as MothershipStreamV1CompletionStatus
if (block.toolCall) {
result.toolCall = {
id: block.toolCall.id ?? '',
name: block.toolCall.name ?? '',
state: normalizeToolState(block.toolCall.state),
...(block.toolCall.params ? { params: block.toolCall.params } : {}),
...(block.toolCall.result ? { result: block.toolCall.result } : {}),
...(block.toolCall.calledBy ? { calledBy: block.toolCall.calledBy } : {}),
...(block.toolCall.error ? { error: block.toolCall.error } : {}),
...(block.toolCall.durationMs ? { durationMs: block.toolCall.durationMs } : {}),
...(block.toolCall.display
? {
display: {
title: block.toolCall.display.title ?? block.toolCall.display.text,
phaseLabel: block.toolCall.display.phaseLabel,
},
}
: {}),
}
}
return result
}
function normalizeLegacyBlock(block: RawBlock): PersistedContentBlock {
if (block.type === 'tool_call' && block.toolCall) {
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
toolCall: {
id: block.toolCall.id ?? '',
name: block.toolCall.name ?? '',
state: normalizeToolState(block.toolCall.state),
...(block.toolCall.params ? { params: block.toolCall.params } : {}),
...(block.toolCall.result ? { result: block.toolCall.result } : {}),
...(block.toolCall.calledBy ? { calledBy: block.toolCall.calledBy } : {}),
...(block.toolCall.display ? { display: { title: block.toolCall.display.text } } : {}),
},
}
}
if (block.type === 'thinking') {
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.thinking,
content: block.content,
}
}
if (block.type === 'subagent' || block.type === 'subagent_text') {
if (block.type === 'subagent_text') {
return {
type: MothershipStreamV1EventType.text,
lane: 'subagent',
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
}
return {
type: MothershipStreamV1EventType.span,
kind: MothershipStreamV1SpanPayloadKind.subagent,
lifecycle: MothershipStreamV1SpanLifecycleEvent.start,
content: block.content,
}
}
if (block.type === 'subagent_end') {
return {
type: MothershipStreamV1EventType.span,
kind: MothershipStreamV1SpanPayloadKind.subagent,
lifecycle: MothershipStreamV1SpanLifecycleEvent.end,
}
}
if (block.type === 'stopped') {
return {
type: MothershipStreamV1EventType.complete,
status: MothershipStreamV1CompletionStatus.cancelled,
}
}
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
}
function normalizeBlock(block: RawBlock): PersistedContentBlock {
return isCanonicalBlock(block) ? normalizeCanonicalBlock(block) : normalizeLegacyBlock(block)
}
function normalizeLegacyToolCall(tc: LegacyToolCall): PersistedContentBlock {
const state = normalizeToolState(tc.status)
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
toolCall: {
id: tc.id,
name: tc.name,
state,
...(tc.params ? { params: tc.params } : {}),
...(tc.result != null
? {
result: {
success: tc.status === MothershipStreamV1ToolOutcome.success,
output: tc.result,
...(tc.error ? { error: tc.error } : {}),
},
}
: {}),
...(tc.durationMs ? { durationMs: tc.durationMs } : {}),
},
}
}
function blocksContainTools(blocks: RawBlock[]): boolean {
return blocks.some((b) => b.type === 'tool_call' || b.type === MothershipStreamV1EventType.tool)
}
function normalizeBlocks(rawBlocks: RawBlock[], messageContent: string): PersistedContentBlock[] {
const blocks = rawBlocks.map(normalizeBlock)
const hasAssistantText = blocks.some(
(b) =>
b.type === MothershipStreamV1EventType.text &&
b.channel !== MothershipStreamV1TextChannel.thinking &&
b.content?.trim()
)
if (!hasAssistantText && messageContent.trim()) {
blocks.push({
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: messageContent,
})
}
return blocks
}
export function normalizeMessage(raw: Record<string, unknown>): PersistedMessage {
const msg: PersistedMessage = {
id: (raw.id as string) ?? crypto.randomUUID(),
role: (raw.role as 'user' | 'assistant') ?? 'assistant',
content: (raw.content as string) ?? '',
timestamp: (raw.timestamp as string) ?? new Date().toISOString(),
}
if (raw.requestId && typeof raw.requestId === 'string') {
msg.requestId = raw.requestId
}
const rawBlocks = raw.contentBlocks as RawBlock[] | undefined
const rawToolCalls = raw.toolCalls as LegacyToolCall[] | undefined
const hasBlocks = Array.isArray(rawBlocks) && rawBlocks.length > 0
const hasToolCalls = Array.isArray(rawToolCalls) && rawToolCalls.length > 0
if (hasBlocks) {
msg.contentBlocks = normalizeBlocks(rawBlocks!, msg.content)
const contentBlocksAlreadyContainTools = blocksContainTools(rawBlocks!)
if (hasToolCalls && !contentBlocksAlreadyContainTools) {
msg.contentBlocks.push(...rawToolCalls!.map(normalizeLegacyToolCall))
}
} else if (hasToolCalls) {
msg.contentBlocks = rawToolCalls!.map(normalizeLegacyToolCall)
if (msg.content.trim()) {
msg.contentBlocks.push({
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: msg.content,
})
}
}
const rawAttachments = raw.fileAttachments as PersistedFileAttachment[] | undefined
if (Array.isArray(rawAttachments) && rawAttachments.length > 0) {
msg.fileAttachments = rawAttachments
}
const rawContexts = raw.contexts as PersistedMessageContext[] | undefined
if (Array.isArray(rawContexts) && rawContexts.length > 0) {
msg.contexts = rawContexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId ? { workflowId: c.workflowId } : {}),
...(c.knowledgeId ? { knowledgeId: c.knowledgeId } : {}),
...(c.tableId ? { tableId: c.tableId } : {}),
...(c.fileId ? { fileId: c.fileId } : {}),
}))
}
return msg
}

View File

@@ -19,9 +19,9 @@ import { checkKnowledgeBaseAccess } from '@/app/api/knowledge/utils'
import { isHiddenFromDisplay } from '@/blocks/types'
import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check'
import { escapeRegExp } from '@/executor/constants'
import type { ChatContext } from '@/stores/panel/copilot/types'
import type { ChatContext } from '@/stores/panel'
export type AgentContextType =
type AgentContextType =
| 'past_chat'
| 'workflow'
| 'current_workflow'
@@ -35,7 +35,7 @@ export type AgentContextType =
| 'docs'
| 'active_resource'
export interface AgentContext {
interface AgentContext {
type: AgentContextType
tag: string
content: string
@@ -43,62 +43,6 @@ export interface AgentContext {
const logger = createLogger('ProcessContents')
export async function processContexts(
contexts: ChatContext[] | undefined
): Promise<AgentContext[]> {
if (!Array.isArray(contexts) || contexts.length === 0) return []
const tasks = contexts.map(async (ctx) => {
try {
if (ctx.kind === 'past_chat') {
return await processPastChatViaApi(ctx.chatId, ctx.label ? `@${ctx.label}` : '@')
}
if ((ctx.kind === 'workflow' || ctx.kind === 'current_workflow') && ctx.workflowId) {
return await processWorkflowFromDb(
ctx.workflowId,
undefined,
ctx.label ? `@${ctx.label}` : '@',
ctx.kind
)
}
if (ctx.kind === 'knowledge' && ctx.knowledgeId) {
return await processKnowledgeFromDb(
ctx.knowledgeId,
undefined,
ctx.label ? `@${ctx.label}` : '@'
)
}
if (ctx.kind === 'blocks' && ctx.blockIds?.length > 0) {
return await processBlockMetadata(ctx.blockIds[0], ctx.label ? `@${ctx.label}` : '@')
}
if (ctx.kind === 'templates' && ctx.templateId) {
return await processTemplateFromDb(
ctx.templateId,
undefined,
ctx.label ? `@${ctx.label}` : '@'
)
}
if (ctx.kind === 'logs' && ctx.executionId) {
return await processExecutionLogFromDb(
ctx.executionId,
undefined,
ctx.label ? `@${ctx.label}` : '@'
)
}
if (ctx.kind === 'workflow_block' && ctx.workflowId && ctx.blockId) {
return await processWorkflowBlockFromDb(ctx.workflowId, undefined, ctx.blockId, ctx.label)
}
// Other kinds can be added here: workflow, blocks, logs, knowledge, templates, docs
return null
} catch (error) {
logger.error('Failed processing context', { ctx, error })
return null
}
})
const results = await Promise.all(tasks)
return results.filter((r): r is AgentContext => !!r) as AgentContext[]
}
// Server-side variant (recommended for use in API routes)
export async function processContextsServer(
contexts: ChatContext[] | undefined,
@@ -265,7 +209,7 @@ async function processPastChatFromDb(
currentWorkspaceId?: string
): Promise<AgentContext | null> {
try {
const { getAccessibleCopilotChat } = await import('@/lib/copilot/chat-lifecycle')
const { getAccessibleCopilotChat } = await import('./lifecycle')
const chat = await getAccessibleCopilotChat(chatId, userId)
if (!chat) {
return null

View File

@@ -1,48 +0,0 @@
import type {
ChatContext,
CopilotToolCall,
SubAgentContentBlock,
} from '@/stores/panel/copilot/types'
/**
* A content block used in copilot messages and during streaming.
* Uses a literal type union for `type` to stay compatible with CopilotMessage.
*/
export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'contexts'
export interface ClientContentBlock {
type: ContentBlockType
content?: string
timestamp: number
toolCall?: CopilotToolCall | null
startTime?: number
duration?: number
contexts?: ChatContext[]
}
export interface StreamingContext {
messageId: string
requestId?: string
accumulatedContent: string
contentBlocks: ClientContentBlock[]
currentTextBlock: ClientContentBlock | null
isInThinkingBlock: boolean
currentThinkingBlock: ClientContentBlock | null
isInDesignWorkflowBlock: boolean
designWorkflowContent: string
pendingContent: string
newChatId?: string
doneEventCount: number
streamComplete?: boolean
wasAborted?: boolean
suppressContinueOption?: boolean
subAgentParentToolCallId?: string
subAgentParentStack: string[]
subAgentContent: Record<string, string>
subAgentToolCalls: Record<string, CopilotToolCall[]>
subAgentBlocks: Record<string, SubAgentContentBlock[]>
suppressStreamingUpdates?: boolean
activeCompactionId?: string
}
export type ClientStreamingContext = StreamingContext

View File

@@ -10,13 +10,6 @@ export const SIM_AGENT_API_URL =
? rawAgentUrl
: SIM_AGENT_API_URL_DEFAULT
// ---------------------------------------------------------------------------
// Redis key prefixes
// ---------------------------------------------------------------------------
/** Redis key prefix for copilot SSE stream buffers. */
export const REDIS_COPILOT_STREAM_PREFIX = 'copilot_stream:'
// ---------------------------------------------------------------------------
// Timeouts
// ---------------------------------------------------------------------------
@@ -31,29 +24,9 @@ export const STREAM_TIMEOUT_MS = 3_600_000
// Stream resume
// ---------------------------------------------------------------------------
/** Maximum number of resume attempts before giving up. */
export const MAX_RESUME_ATTEMPTS = 3
/** SessionStorage key for persisting active stream metadata across page reloads. */
export const STREAM_STORAGE_KEY = 'copilot_active_stream'
// ---------------------------------------------------------------------------
// Client-side streaming batching
// ---------------------------------------------------------------------------
/** Delay (ms) before processing the next queued message after stream completion. */
export const QUEUE_PROCESS_DELAY_MS = 100
/** Delay (ms) before invalidating subscription queries after stream completion. */
export const SUBSCRIPTION_INVALIDATE_DELAY_MS = 1_000
// ---------------------------------------------------------------------------
// UI helpers
// ---------------------------------------------------------------------------
/** Maximum character length for an optimistic chat title derived from a user message. */
export const OPTIMISTIC_TITLE_MAX_LENGTH = 50
// ---------------------------------------------------------------------------
// Copilot API paths (client-side fetch targets)
// ---------------------------------------------------------------------------
@@ -64,39 +37,23 @@ export const COPILOT_CHAT_API_PATH = '/api/copilot/chat'
/** POST — send a workspace-scoped chat message (mothership). */
export const MOTHERSHIP_CHAT_API_PATH = '/api/mothership/chat'
/** GET — resume/replay a copilot SSE stream. */
export const COPILOT_CHAT_STREAM_API_PATH = '/api/copilot/chat/stream'
/** POST — persist chat messages / plan artifact / config. */
export const COPILOT_UPDATE_MESSAGES_API_PATH = '/api/copilot/chat/update-messages'
/** DELETE — delete a copilot chat. */
export const COPILOT_DELETE_CHAT_API_PATH = '/api/copilot/chat/delete'
/** POST — confirm or reject a tool call. */
export const COPILOT_CONFIRM_API_PATH = '/api/copilot/confirm'
/** POST — forward diff-accepted/rejected stats to the copilot backend. */
export const COPILOT_STATS_API_PATH = '/api/copilot/stats'
/** GET — load checkpoints for a chat. */
export const COPILOT_CHECKPOINTS_API_PATH = '/api/copilot/checkpoints'
/** POST — revert to a checkpoint. */
export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert'
/** GET/POST/DELETE — manage auto-allowed tools. */
export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools'
/** GET — fetch dynamically available copilot models. */
export const COPILOT_MODELS_API_PATH = '/api/copilot/models'
/** GET — fetch user credentials for masking. */
export const COPILOT_CREDENTIALS_API_PATH = '/api/copilot/credentials'
// ---------------------------------------------------------------------------
// Dedup limits
// ---------------------------------------------------------------------------
/** Maximum entries in the in-memory SSE tool-event dedup cache. */
export const STREAM_BUFFER_MAX_DEDUP_ENTRIES = 1_000
// ---------------------------------------------------------------------------
// Copilot modes
// ---------------------------------------------------------------------------
export const COPILOT_MODES = ['ask', 'build', 'plan'] as const
export const COPILOT_REQUEST_MODES = ['ask', 'build', 'plan', 'agent'] as const

View File

@@ -0,0 +1,295 @@
// AUTO-GENERATED FILE. DO NOT EDIT.
//
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1EventType".
*/
export type MothershipStreamV1EventType =
| 'session'
| 'text'
| 'tool'
| 'span'
| 'resource'
| 'run'
| 'error'
| 'complete'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1AsyncToolRecordStatus".
*/
export type MothershipStreamV1AsyncToolRecordStatus =
| 'pending'
| 'running'
| 'completed'
| 'failed'
| 'cancelled'
| 'delivered'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1CompletionStatus".
*/
export type MothershipStreamV1CompletionStatus = 'complete' | 'error' | 'cancelled'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ResourceOp".
*/
export type MothershipStreamV1ResourceOp = 'upsert' | 'remove'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1RunKind".
*/
export type MothershipStreamV1RunKind =
| 'checkpoint_pause'
| 'resumed'
| 'compaction_start'
| 'compaction_done'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1SessionKind".
*/
export type MothershipStreamV1SessionKind = 'trace' | 'chat' | 'title' | 'start'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1SpanKind".
*/
export type MothershipStreamV1SpanKind = 'subagent'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1SpanLifecycleEvent".
*/
export type MothershipStreamV1SpanLifecycleEvent = 'start' | 'end'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1SpanPayloadKind".
*/
export type MothershipStreamV1SpanPayloadKind = 'subagent' | 'structured_result' | 'subagent_result'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1TextChannel".
*/
export type MothershipStreamV1TextChannel = 'assistant' | 'thinking'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolExecutor".
*/
export type MothershipStreamV1ToolExecutor = 'go' | 'sim' | 'client'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolMode".
*/
export type MothershipStreamV1ToolMode = 'sync' | 'async'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolPhase".
*/
export type MothershipStreamV1ToolPhase = 'call' | 'args_delta' | 'result'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolOutcome".
*/
export type MothershipStreamV1ToolOutcome =
| 'success'
| 'error'
| 'cancelled'
| 'skipped'
| 'rejected'
/**
* Shared execution-oriented mothership stream contract from Go to Sim.
*/
export interface MothershipStreamV1EventEnvelope {
payload: MothershipStreamV1AdditionalPropertiesMap
scope?: MothershipStreamV1StreamScope
seq: number
stream: MothershipStreamV1StreamRef
trace?: MothershipStreamV1Trace
ts: string
type: MothershipStreamV1EventType
v: number
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1AdditionalPropertiesMap".
*/
export interface MothershipStreamV1AdditionalPropertiesMap {
[k: string]: unknown
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1StreamScope".
*/
export interface MothershipStreamV1StreamScope {
agentId?: string
lane: 'main' | 'subagent'
parentToolCallId?: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1StreamRef".
*/
export interface MothershipStreamV1StreamRef {
chatId?: string
cursor?: string
streamId: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1Trace".
*/
export interface MothershipStreamV1Trace {
requestId: string
spanId?: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1CheckpointPausePayload".
*/
export interface MothershipStreamV1CheckpointPausePayload {
checkpointId: string
executionId: string
pendingToolCallIds: string[]
runId: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ResumeRequest".
*/
export interface MothershipStreamV1ResumeRequest {
checkpointId: string
results: MothershipStreamV1ResumeToolResult[]
streamId: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ResumeToolResult".
*/
export interface MothershipStreamV1ResumeToolResult {
error?: string
output?: unknown
success: boolean
toolCallId: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1StreamCursor".
*/
export interface MothershipStreamV1StreamCursor {
cursor: string
seq: number
streamId: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolCallDescriptor".
*/
export interface MothershipStreamV1ToolCallDescriptor {
arguments?: MothershipStreamV1AdditionalPropertiesMap
argumentsDelta?: string
executor: MothershipStreamV1ToolExecutor
mode: MothershipStreamV1ToolMode
partial?: boolean
phase: MothershipStreamV1ToolPhase
requiresConfirmation?: boolean
toolCallId: string
toolName: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolResultPayload".
*/
export interface MothershipStreamV1ToolResultPayload {
error?: string
output?: unknown
success: boolean
}
export const MothershipStreamV1AsyncToolRecordStatus = {
pending: 'pending',
running: 'running',
completed: 'completed',
failed: 'failed',
cancelled: 'cancelled',
delivered: 'delivered',
} as const
export const MothershipStreamV1CompletionStatus = {
complete: 'complete',
error: 'error',
cancelled: 'cancelled',
} as const
export const MothershipStreamV1EventType = {
session: 'session',
text: 'text',
tool: 'tool',
span: 'span',
resource: 'resource',
run: 'run',
error: 'error',
complete: 'complete',
} as const
export const MothershipStreamV1ResourceOp = {
upsert: 'upsert',
remove: 'remove',
} as const
export const MothershipStreamV1RunKind = {
checkpoint_pause: 'checkpoint_pause',
resumed: 'resumed',
compaction_start: 'compaction_start',
compaction_done: 'compaction_done',
} as const
export const MothershipStreamV1SessionKind = {
trace: 'trace',
chat: 'chat',
title: 'title',
start: 'start',
} as const
export const MothershipStreamV1SpanKind = {
subagent: 'subagent',
} as const
export const MothershipStreamV1SpanLifecycleEvent = {
start: 'start',
end: 'end',
} as const
export const MothershipStreamV1SpanPayloadKind = {
subagent: 'subagent',
structured_result: 'structured_result',
subagent_result: 'subagent_result',
} as const
export const MothershipStreamV1TextChannel = {
assistant: 'assistant',
thinking: 'thinking',
} as const
export const MothershipStreamV1ToolExecutor = {
go: 'go',
sim: 'sim',
client: 'client',
} as const
export const MothershipStreamV1ToolMode = {
sync: 'sync',
async: 'async',
} as const
export const MothershipStreamV1ToolOutcome = {
success: 'success',
error: 'error',
cancelled: 'cancelled',
skipped: 'skipped',
rejected: 'rejected',
} as const
export const MothershipStreamV1ToolPhase = {
call: 'call',
args_delta: 'args_delta',
result: 'result',
} as const

View File

@@ -0,0 +1,136 @@
// AUTO-GENERATED FILE. DO NOT EDIT.
//
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1Outcome".
*/
export type RequestTraceV1Outcome = 'success' | 'error' | 'cancelled'
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1SpanSource".
*/
export type RequestTraceV1SpanSource = 'sim' | 'go'
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1SpanStatus".
*/
export type RequestTraceV1SpanStatus = 'ok' | 'error' | 'cancelled'
/**
* Trace report sent from Sim to Go after a request completes.
*/
export interface RequestTraceV1SimReport {
chatId?: string
cost?: RequestTraceV1CostSummary
durationMs: number
endMs: number
executionId?: string
goTraceId?: string
outcome: RequestTraceV1Outcome
runId?: string
simRequestId: string
spans: RequestTraceV1Span[]
startMs: number
streamId?: string
usage?: RequestTraceV1UsageSummary
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1CostSummary".
*/
export interface RequestTraceV1CostSummary {
billedTotalCost?: number
rawTotalCost?: number
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1Span".
*/
export interface RequestTraceV1Span {
attributes?: MothershipStreamV1AdditionalPropertiesMap
durationMs?: number
endMs?: number
kind?: string
name: string
parentName?: string
source?: RequestTraceV1SpanSource
startMs: number
status: RequestTraceV1SpanStatus
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "MothershipStreamV1AdditionalPropertiesMap".
*/
export interface MothershipStreamV1AdditionalPropertiesMap {
[k: string]: unknown
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1UsageSummary".
*/
export interface RequestTraceV1UsageSummary {
cacheReadTokens?: number
cacheWriteTokens?: number
inputTokens?: number
outputTokens?: number
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1MergedTrace".
*/
export interface RequestTraceV1MergedTrace {
chatId?: string
cost?: RequestTraceV1CostSummary
durationMs: number
endMs: number
executionId?: string
goTraceId: string
model?: string
outcome: RequestTraceV1Outcome
provider?: string
runId?: string
serviceCharges?: MothershipStreamV1AdditionalPropertiesMap
simRequestId?: string
spans: RequestTraceV1Span[]
startMs: number
streamId?: string
usage?: RequestTraceV1UsageSummary
userId?: string
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1SimReport".
*/
export interface RequestTraceV1SimReport1 {
chatId?: string
cost?: RequestTraceV1CostSummary
durationMs: number
endMs: number
executionId?: string
goTraceId?: string
outcome: RequestTraceV1Outcome
runId?: string
simRequestId: string
spans: RequestTraceV1Span[]
startMs: number
streamId?: string
usage?: RequestTraceV1UsageSummary
}
export const RequestTraceV1Outcome = {
success: 'success',
error: 'error',
cancelled: 'cancelled',
} as const
export const RequestTraceV1SpanSource = {
sim: 'sim',
go: 'go',
} as const
export const RequestTraceV1SpanStatus = {
ok: 'ok',
error: 'error',
cancelled: 'cancelled',
} as const

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +0,0 @@
export interface CopilotLogContext {
requestId?: string
messageId?: string
}
/**
* Appends copilot request identifiers to a log message.
*/
export function appendCopilotLogContext(message: string, context: CopilotLogContext = {}): string {
const suffixParts: string[] = []
if (context.requestId) {
suffixParts.push(`requestId:${context.requestId}`)
}
if (context.messageId) {
suffixParts.push(`messageId:${context.messageId}`)
}
if (suffixParts.length === 0) {
return message
}
return `${message} [${suffixParts.join(' ')}]`
}

View File

@@ -1,10 +0,0 @@
export type CopilotModelId = string
export const COPILOT_MODES = ['ask', 'build', 'plan'] as const
export type CopilotMode = (typeof COPILOT_MODES)[number]
export const COPILOT_TRANSPORT_MODES = ['ask', 'agent', 'plan'] as const
export type CopilotTransportMode = (typeof COPILOT_TRANSPORT_MODES)[number]
export const COPILOT_REQUEST_MODES = ['ask', 'build', 'plan', 'agent'] as const
export type CopilotRequestMode = (typeof COPILOT_REQUEST_MODES)[number]

View File

@@ -1,570 +0,0 @@
import { createLogger } from '@sim/logger'
import {
ASYNC_TOOL_STATUS,
inferDeliveredAsyncSuccess,
isDeliveredAsyncStatus,
isTerminalAsyncStatus,
} from '@/lib/copilot/async-runs/lifecycle'
import {
claimCompletedAsyncToolCall,
getAsyncToolCall,
getAsyncToolCalls,
markAsyncToolDelivered,
releaseCompletedAsyncToolClaim,
updateRunStatus,
} from '@/lib/copilot/async-runs/repository'
import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from '@/lib/copilot/constants'
import {
isToolAvailableOnSimSide,
prepareExecutionContext,
} from '@/lib/copilot/orchestrator/tool-executor'
import {
type ExecutionContext,
isTerminalToolCallStatus,
type OrchestratorOptions,
type OrchestratorResult,
type SSEEvent,
type ToolCallState,
} from '@/lib/copilot/orchestrator/types'
import { env } from '@/lib/core/config/env'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { buildToolCallSummaries, createStreamingContext, runStreamLoop } from './stream/core'
const logger = createLogger('CopilotOrchestrator')
function didAsyncToolSucceed(input: {
durableStatus?: string | null
durableResult?: Record<string, unknown>
durableError?: string | null
toolStateStatus?: string | undefined
}) {
const { durableStatus, durableResult, durableError, toolStateStatus } = input
if (durableStatus === ASYNC_TOOL_STATUS.completed) {
return true
}
if (durableStatus === ASYNC_TOOL_STATUS.failed || durableStatus === ASYNC_TOOL_STATUS.cancelled) {
return false
}
if (durableStatus === ASYNC_TOOL_STATUS.delivered) {
return inferDeliveredAsyncSuccess({
result: durableResult,
error: durableError,
})
}
if (toolStateStatus === 'success') return true
if (toolStateStatus === 'error' || toolStateStatus === 'cancelled') return false
return false
}
interface ReadyContinuationTool {
toolCallId: string
toolState?: ToolCallState
durableRow?: Awaited<ReturnType<typeof getAsyncToolCall>>
needsDurableClaim: boolean
alreadyClaimedByWorker: boolean
}
export interface OrchestrateStreamOptions extends OrchestratorOptions {
userId: string
workflowId?: string
workspaceId?: string
chatId?: string
executionId?: string
runId?: string
/** Go-side route to proxy to. Defaults to '/api/copilot'. */
goRoute?: string
}
export async function orchestrateCopilotStream(
requestPayload: Record<string, unknown>,
options: OrchestrateStreamOptions
): Promise<OrchestratorResult> {
const {
userId,
workflowId,
workspaceId,
chatId,
executionId,
runId,
goRoute = '/api/copilot',
} = options
const userTimezone =
typeof requestPayload?.userTimezone === 'string' ? requestPayload.userTimezone : undefined
let execContext: ExecutionContext
if (workflowId) {
execContext = await prepareExecutionContext(userId, workflowId, chatId)
} else {
const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId)
execContext = {
userId,
workflowId: '',
workspaceId,
chatId,
decryptedEnvVars,
}
}
if (userTimezone) {
execContext.userTimezone = userTimezone
}
execContext.executionId = executionId
execContext.runId = runId
execContext.abortSignal = options.abortSignal
execContext.userStopSignal = options.userStopSignal
const payloadMsgId = requestPayload?.messageId
const messageId = typeof payloadMsgId === 'string' ? payloadMsgId : crypto.randomUUID()
execContext.messageId = messageId
const context = createStreamingContext({
chatId,
executionId,
runId,
messageId,
})
const continuationWorkerId = `sim-resume:${crypto.randomUUID()}`
const reqLogger = logger.withMetadata({ requestId: context.requestId, messageId })
let claimedToolCallIds: string[] = []
let claimedByWorkerId: string | null = null
reqLogger.info('Starting copilot orchestration', {
goRoute,
workflowId,
workspaceId,
chatId,
executionId,
runId,
hasUserTimezone: Boolean(userTimezone),
})
try {
let route = goRoute
let payload = requestPayload
const callerOnEvent = options.onEvent
for (;;) {
context.streamComplete = false
reqLogger.info('Starting orchestration loop iteration', {
route,
hasPendingAsyncContinuation: Boolean(context.awaitingAsyncContinuation),
claimedToolCallCount: claimedToolCallIds.length,
})
const loopOptions = {
...options,
onEvent: async (event: SSEEvent) => {
if (event.type === 'done') {
const d = (event.data ?? {}) as Record<string, unknown>
const response = (d.response ?? {}) as Record<string, unknown>
if (response.async_pause) {
reqLogger.info('Detected async pause from copilot backend', {
route,
checkpointId:
typeof (response.async_pause as Record<string, unknown>)?.checkpointId ===
'string'
? (response.async_pause as Record<string, unknown>).checkpointId
: undefined,
})
if (runId) {
await updateRunStatus(runId, 'paused_waiting_for_tool').catch(() => {})
}
}
}
await callerOnEvent?.(event)
},
}
await runStreamLoop(
`${SIM_AGENT_API_URL}${route}`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
'X-Client-Version': SIM_AGENT_VERSION,
},
body: JSON.stringify(payload),
},
context,
execContext,
loopOptions
)
reqLogger.info('Completed orchestration loop iteration', {
route,
streamComplete: context.streamComplete,
wasAborted: context.wasAborted,
hasAsyncContinuation: Boolean(context.awaitingAsyncContinuation),
errorCount: context.errors.length,
})
if (claimedToolCallIds.length > 0) {
reqLogger.info('Marking async tool calls as delivered', {
toolCallIds: claimedToolCallIds,
})
await Promise.all(
claimedToolCallIds.map((toolCallId) =>
markAsyncToolDelivered(toolCallId).catch(() => null)
)
)
claimedToolCallIds = []
claimedByWorkerId = null
}
if (options.abortSignal?.aborted || context.wasAborted) {
reqLogger.info('Stopping orchestration because request was aborted', {
pendingToolCallCount: Array.from(context.toolCalls.values()).filter(
(toolCall) => toolCall.status === 'pending' || toolCall.status === 'executing'
).length,
})
for (const [toolCallId, toolCall] of context.toolCalls) {
if (toolCall.status === 'pending' || toolCall.status === 'executing') {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
toolCall.error = 'Stopped by user'
}
}
context.awaitingAsyncContinuation = undefined
break
}
const continuation = context.awaitingAsyncContinuation
if (!continuation) {
reqLogger.info('No async continuation pending; finishing orchestration')
break
}
let resumeReady = false
let resumeRetries = 0
reqLogger.info('Processing async continuation', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
pendingToolCallIds: continuation.pendingToolCallIds,
})
for (;;) {
claimedToolCallIds = []
claimedByWorkerId = null
const resumeWorkerId = continuationWorkerId
const readyTools: ReadyContinuationTool[] = []
const localPendingPromises: Promise<unknown>[] = []
const missingToolCallIds: string[] = []
for (const toolCallId of continuation.pendingToolCallIds) {
const durableRow = await getAsyncToolCall(toolCallId).catch(() => null)
const localPendingPromise = context.pendingToolPromises.get(toolCallId)
const toolState = context.toolCalls.get(toolCallId)
if (localPendingPromise) {
localPendingPromises.push(localPendingPromise)
reqLogger.info('Waiting for local async tool completion before retrying resume claim', {
toolCallId,
runId: continuation.runId,
workerId: resumeWorkerId,
})
continue
}
if (durableRow && isTerminalAsyncStatus(durableRow.status)) {
if (durableRow.claimedBy && durableRow.claimedBy !== resumeWorkerId) {
missingToolCallIds.push(toolCallId)
reqLogger.warn(
'Async tool continuation is waiting on a claim held by another worker',
{
toolCallId,
runId: continuation.runId,
workerId: resumeWorkerId,
claimedBy: durableRow.claimedBy,
}
)
continue
}
readyTools.push({
toolCallId,
toolState,
durableRow,
needsDurableClaim: durableRow.claimedBy !== resumeWorkerId,
alreadyClaimedByWorker: durableRow.claimedBy === resumeWorkerId,
})
continue
}
if (
!durableRow &&
toolState &&
isTerminalToolCallStatus(toolState.status) &&
!isToolAvailableOnSimSide(toolState.name)
) {
reqLogger.info('Including Go-handled tool in resume payload (no Sim-side row)', {
toolCallId,
toolName: toolState.name,
status: toolState.status,
runId: continuation.runId,
})
readyTools.push({
toolCallId,
toolState,
needsDurableClaim: false,
alreadyClaimedByWorker: false,
})
continue
}
reqLogger.warn('Skipping already-claimed or missing async tool resume', {
toolCallId,
runId: continuation.runId,
durableStatus: durableRow?.status,
toolStateStatus: toolState?.status,
})
missingToolCallIds.push(toolCallId)
}
if (localPendingPromises.length > 0) {
reqLogger.info('Waiting for local pending async tools before resuming continuation', {
checkpointId: continuation.checkpointId,
pendingPromiseCount: localPendingPromises.length,
})
await Promise.allSettled(localPendingPromises)
continue
}
if (missingToolCallIds.length > 0) {
if (resumeRetries < 3) {
resumeRetries++
reqLogger.info('Retrying async resume after some tool calls were not yet ready', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
workerId: resumeWorkerId,
retry: resumeRetries,
missingToolCallIds,
})
await new Promise((resolve) => setTimeout(resolve, 250 * resumeRetries))
continue
}
reqLogger.error(
'Async continuation failed because pending tool calls never became ready',
{
checkpointId: continuation.checkpointId,
runId: continuation.runId,
missingToolCallIds,
}
)
throw new Error(
`Failed to resume async tool continuation: pending tool calls were not ready (${missingToolCallIds.join(', ')})`
)
}
if (readyTools.length === 0) {
if (resumeRetries < 3 && continuation.pendingToolCallIds.length > 0) {
resumeRetries++
reqLogger.info('Retrying async resume because no tool calls were ready yet', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
workerId: resumeWorkerId,
retry: resumeRetries,
})
await new Promise((resolve) => setTimeout(resolve, 250 * resumeRetries))
continue
}
reqLogger.error('Async continuation failed because no tool calls were ready', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
requestedToolCallIds: continuation.pendingToolCallIds,
})
throw new Error('Failed to resume async tool continuation: no tool calls were ready')
}
const claimCandidates = readyTools.filter((tool) => tool.needsDurableClaim)
const newlyClaimedToolCallIds: string[] = []
const claimFailures: string[] = []
for (const tool of claimCandidates) {
const claimed = await claimCompletedAsyncToolCall(tool.toolCallId, resumeWorkerId).catch(
() => null
)
if (!claimed) {
claimFailures.push(tool.toolCallId)
continue
}
newlyClaimedToolCallIds.push(tool.toolCallId)
}
if (claimFailures.length > 0) {
if (newlyClaimedToolCallIds.length > 0) {
reqLogger.info('Releasing async tool claims after claim contention during resume', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
workerId: resumeWorkerId,
newlyClaimedToolCallIds,
claimFailures,
})
await Promise.all(
newlyClaimedToolCallIds.map((toolCallId) =>
releaseCompletedAsyncToolClaim(toolCallId, resumeWorkerId).catch(() => null)
)
)
}
if (resumeRetries < 3) {
resumeRetries++
reqLogger.info('Retrying async resume after claim contention', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
workerId: resumeWorkerId,
retry: resumeRetries,
claimFailures,
})
await new Promise((resolve) => setTimeout(resolve, 250 * resumeRetries))
continue
}
reqLogger.error('Async continuation failed because tool claims could not be acquired', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
claimFailures,
})
throw new Error(
`Failed to resume async tool continuation: unable to claim tool calls (${claimFailures.join(', ')})`
)
}
claimedToolCallIds = [
...readyTools
.filter((tool) => tool.alreadyClaimedByWorker)
.map((tool) => tool.toolCallId),
...newlyClaimedToolCallIds,
]
claimedByWorkerId = claimedToolCallIds.length > 0 ? resumeWorkerId : null
reqLogger.info('Resuming async tool continuation', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
workerId: resumeWorkerId,
toolCallIds: readyTools.map((tool) => tool.toolCallId),
})
const durableRows = await getAsyncToolCalls(
readyTools.map((tool) => tool.toolCallId)
).catch(() => [])
const durableByToolCallId = new Map(durableRows.map((row) => [row.toolCallId, row]))
const results = await Promise.all(
readyTools.map(async (tool) => {
const durable = durableByToolCallId.get(tool.toolCallId) || tool.durableRow
const durableStatus = durable?.status
const durableResult =
durable?.result && typeof durable.result === 'object'
? (durable.result as Record<string, unknown>)
: undefined
const success = didAsyncToolSucceed({
durableStatus,
durableResult,
durableError: durable?.error,
toolStateStatus: tool.toolState?.status,
})
const data =
durableResult ||
(tool.toolState?.result?.output as Record<string, unknown> | undefined) ||
(success
? { message: 'Tool completed' }
: {
error: durable?.error || tool.toolState?.error || 'Tool failed',
})
if (
durableStatus &&
!isTerminalAsyncStatus(durableStatus) &&
!isDeliveredAsyncStatus(durableStatus)
) {
reqLogger.warn(
'Async tool row was claimed for resume without terminal durable state',
{
toolCallId: tool.toolCallId,
status: durableStatus,
}
)
}
return {
callId: tool.toolCallId,
name: durable?.toolName || tool.toolState?.name || '',
data,
success,
}
})
)
context.awaitingAsyncContinuation = undefined
route = '/api/tools/resume'
payload = {
checkpointId: continuation.checkpointId,
results,
}
reqLogger.info('Prepared async continuation payload for resume endpoint', {
route,
checkpointId: continuation.checkpointId,
resultCount: results.length,
})
resumeReady = true
break
}
if (!resumeReady) {
reqLogger.warn('Async continuation loop exited without resume payload', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
})
break
}
}
const result: OrchestratorResult = {
success: context.errors.length === 0 && !context.wasAborted,
content: context.accumulatedContent,
contentBlocks: context.contentBlocks,
toolCalls: buildToolCallSummaries(context),
chatId: context.chatId,
requestId: context.requestId,
errors: context.errors.length ? context.errors : undefined,
usage: context.usage,
cost: context.cost,
}
reqLogger.info('Completing copilot orchestration', {
success: result.success,
chatId: result.chatId,
hasRequestId: Boolean(result.requestId),
errorCount: result.errors?.length || 0,
toolCallCount: result.toolCalls.length,
})
await options.onComplete?.(result)
return result
} catch (error) {
const err = error instanceof Error ? error : new Error('Copilot orchestration failed')
if (claimedToolCallIds.length > 0 && claimedByWorkerId) {
reqLogger.warn('Releasing async tool claims after delivery failure', {
toolCallIds: claimedToolCallIds,
workerId: claimedByWorkerId,
})
await Promise.all(
claimedToolCallIds.map((toolCallId) =>
releaseCompletedAsyncToolClaim(toolCallId, claimedByWorkerId!).catch(() => null)
)
)
}
reqLogger.error('Copilot orchestration failed', {
error: err.message,
})
await options.onError?.(err)
return {
success: false,
content: '',
contentBlocks: [],
toolCalls: [],
chatId: context.chatId,
error: err.message,
}
}
}

View File

@@ -1,293 +0,0 @@
/**
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@sim/logger', () => loggerMock)
const { executeToolServerSide, markToolComplete, isToolAvailableOnSimSide } = vi.hoisted(() => ({
executeToolServerSide: vi.fn(),
markToolComplete: vi.fn(),
isToolAvailableOnSimSide: vi.fn().mockReturnValue(true),
}))
const { upsertAsyncToolCall } = vi.hoisted(() => ({
upsertAsyncToolCall: vi.fn(),
}))
vi.mock('@/lib/copilot/orchestrator/tool-executor', () => ({
executeToolServerSide,
markToolComplete,
isToolAvailableOnSimSide,
}))
vi.mock('@/lib/copilot/async-runs/repository', async () => {
const actual = await vi.importActual<typeof import('@/lib/copilot/async-runs/repository')>(
'@/lib/copilot/async-runs/repository'
)
return {
...actual,
upsertAsyncToolCall,
}
})
import { sseHandlers } from '@/lib/copilot/orchestrator/sse/handlers'
import type { ExecutionContext, StreamingContext } from '@/lib/copilot/orchestrator/types'
describe('sse-handlers tool lifecycle', () => {
let context: StreamingContext
let execContext: ExecutionContext
beforeEach(() => {
vi.clearAllMocks()
upsertAsyncToolCall.mockResolvedValue(null)
context = {
chatId: undefined,
messageId: 'msg-1',
accumulatedContent: '',
contentBlocks: [],
toolCalls: new Map(),
pendingToolPromises: new Map(),
currentThinkingBlock: null,
isInThinkingBlock: false,
subAgentParentToolCallId: undefined,
subAgentParentStack: [],
subAgentContent: {},
subAgentToolCalls: {},
pendingContent: '',
streamComplete: false,
wasAborted: false,
errors: [],
}
execContext = {
userId: 'user-1',
workflowId: 'workflow-1',
}
})
it('executes tool_call and emits tool_result + mark-complete', async () => {
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
markToolComplete.mockResolvedValueOnce(true)
const onEvent = vi.fn()
await sseHandlers.tool_call(
{
type: 'tool_call',
data: { id: 'tool-1', name: 'read', arguments: { workflowId: 'workflow-1' } },
} as any,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
// tool_call fires execution without awaiting (fire-and-forget for parallel execution),
// so we flush pending microtasks before asserting
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
expect(markToolComplete).toHaveBeenCalledTimes(1)
expect(onEvent).toHaveBeenCalledWith(
expect.objectContaining({
type: 'tool_result',
toolCallId: 'tool-1',
success: true,
})
)
const updated = context.toolCalls.get('tool-1')
expect(updated?.status).toBe('success')
expect(updated?.result?.output).toEqual({ ok: true })
})
it('skips duplicate tool_call after result', async () => {
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
markToolComplete.mockResolvedValueOnce(true)
const event = {
type: 'tool_call',
data: { id: 'tool-dup', name: 'read', arguments: { workflowId: 'workflow-1' } },
}
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
await new Promise((resolve) => setTimeout(resolve, 0))
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
expect(markToolComplete).toHaveBeenCalledTimes(1)
})
it('marks an in-flight tool as cancelled when aborted mid-execution', async () => {
const abortController = new AbortController()
const userStopController = new AbortController()
execContext.abortSignal = abortController.signal
execContext.userStopSignal = userStopController.signal
executeToolServerSide.mockImplementationOnce(
() =>
new Promise((resolve) => {
setTimeout(() => resolve({ success: true, output: { ok: true } }), 0)
})
)
markToolComplete.mockResolvedValue(true)
await sseHandlers.tool_call(
{
type: 'tool_call',
data: { id: 'tool-cancel', name: 'read', arguments: { workflowId: 'workflow-1' } },
} as any,
context,
execContext,
{
interactive: false,
timeout: 1000,
abortSignal: abortController.signal,
userStopSignal: userStopController.signal,
}
)
userStopController.abort()
abortController.abort()
await new Promise((resolve) => setTimeout(resolve, 10))
expect(markToolComplete).toHaveBeenCalledWith(
'tool-cancel',
'read',
499,
'Request aborted during tool execution',
{ cancelled: true },
'msg-1'
)
const updated = context.toolCalls.get('tool-cancel')
expect(updated?.status).toBe('cancelled')
})
it('does not replace an in-flight pending promise on duplicate tool_call', async () => {
let resolveTool: ((value: { success: boolean; output: { ok: boolean } }) => void) | undefined
executeToolServerSide.mockImplementationOnce(
() =>
new Promise((resolve) => {
resolveTool = resolve
})
)
markToolComplete.mockResolvedValueOnce(true)
const event = {
type: 'tool_call',
data: { id: 'tool-inflight', name: 'read', arguments: { workflowId: 'workflow-1' } },
}
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
await new Promise((resolve) => setTimeout(resolve, 0))
const firstPromise = context.pendingToolPromises.get('tool-inflight')
expect(firstPromise).toBeDefined()
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
expect(context.pendingToolPromises.get('tool-inflight')).toBe(firstPromise)
resolveTool?.({ success: true, output: { ok: true } })
await new Promise((resolve) => setTimeout(resolve, 0))
expect(context.pendingToolPromises.has('tool-inflight')).toBe(false)
expect(markToolComplete).toHaveBeenCalledTimes(1)
})
it('still executes the tool when async row upsert fails', async () => {
upsertAsyncToolCall.mockRejectedValueOnce(new Error('db down'))
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
markToolComplete.mockResolvedValueOnce(true)
await sseHandlers.tool_call(
{
type: 'tool_call',
data: { id: 'tool-upsert-fail', name: 'read', arguments: { workflowId: 'workflow-1' } },
} as any,
context,
execContext,
{ onEvent: vi.fn(), interactive: false, timeout: 1000 }
)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
expect(markToolComplete).toHaveBeenCalledTimes(1)
expect(context.toolCalls.get('tool-upsert-fail')?.status).toBe('success')
})
it('does not execute a tool if a terminal tool_result arrives before local execution starts', async () => {
let resolveUpsert: ((value: null) => void) | undefined
upsertAsyncToolCall.mockImplementationOnce(
() =>
new Promise((resolve) => {
resolveUpsert = resolve
})
)
const onEvent = vi.fn()
await sseHandlers.tool_call(
{
type: 'tool_call',
data: { id: 'tool-race', name: 'read', arguments: { workflowId: 'workflow-1' } },
} as any,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
await sseHandlers.tool_result(
{
type: 'tool_result',
toolCallId: 'tool-race',
data: { id: 'tool-race', success: true, result: { ok: true } },
} as any,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
resolveUpsert?.(null)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeToolServerSide).not.toHaveBeenCalled()
expect(markToolComplete).not.toHaveBeenCalled()
expect(context.toolCalls.get('tool-race')?.status).toBe('success')
expect(context.toolCalls.get('tool-race')?.result?.output).toEqual({ ok: true })
})
it('does not execute a tool if a tool_result arrives before the tool_call event', async () => {
const onEvent = vi.fn()
await sseHandlers.tool_result(
{
type: 'tool_result',
toolCallId: 'tool-early-result',
toolName: 'read',
data: { id: 'tool-early-result', name: 'read', success: true, result: { ok: true } },
} as any,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
await sseHandlers.tool_call(
{
type: 'tool_call',
data: { id: 'tool-early-result', name: 'read', arguments: { workflowId: 'workflow-1' } },
} as any,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeToolServerSide).not.toHaveBeenCalled()
expect(markToolComplete).not.toHaveBeenCalled()
expect(context.toolCalls.get('tool-early-result')?.status).toBe('success')
})
})

View File

@@ -1,852 +0,0 @@
import { createLogger } from '@sim/logger'
import { upsertAsyncToolCall } from '@/lib/copilot/async-runs/repository'
import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants'
import {
asRecord,
getEventData,
markToolResultSeen,
wasToolResultSeen,
} from '@/lib/copilot/orchestrator/sse/utils'
import {
isToolAvailableOnSimSide,
markToolComplete,
} from '@/lib/copilot/orchestrator/tool-executor'
import type {
ContentBlock,
ExecutionContext,
OrchestratorOptions,
SSEEvent,
StreamingContext,
ToolCallState,
} from '@/lib/copilot/orchestrator/types'
import { isWorkflowToolName } from '@/lib/copilot/workflow-tools'
import { executeToolAndReport, waitForToolCompletion } from './tool-execution'
const logger = createLogger('CopilotSseHandlers')
/**
* Builds an AbortSignal that fires when either the main abort signal OR
* the client-disconnect signal fires. Used for client-executable tool waits
* so the orchestrator doesn't block for the full timeout when the browser dies.
*/
function buildClientToolAbortSignal(options: OrchestratorOptions): AbortSignal | undefined {
const { abortSignal, clientDisconnectedSignal } = options
if (!clientDisconnectedSignal || clientDisconnectedSignal.aborted) {
return clientDisconnectedSignal?.aborted ? AbortSignal.abort() : abortSignal
}
if (!abortSignal) return clientDisconnectedSignal
const combined = new AbortController()
const fire = () => combined.abort()
abortSignal.addEventListener('abort', fire, { once: true })
clientDisconnectedSignal.addEventListener('abort', fire, { once: true })
return combined.signal
}
function registerPendingToolPromise(
context: StreamingContext,
toolCallId: string,
pendingPromise: Promise<{ status: string; message?: string; data?: Record<string, unknown> }>
) {
context.pendingToolPromises.set(toolCallId, pendingPromise)
pendingPromise.finally(() => {
if (context.pendingToolPromises.get(toolCallId) === pendingPromise) {
context.pendingToolPromises.delete(toolCallId)
}
})
}
/**
* When the Sim→Go stream is aborted, avoid starting server-side tool work and
* unblock the Go async waiter with a terminal 499 completion.
*/
function abortPendingToolIfStreamDead(
toolCall: ToolCallState,
toolCallId: string,
options: OrchestratorOptions,
context: StreamingContext
): boolean {
if (!options.abortSignal?.aborted && !context.wasAborted) {
return false
}
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCallId)
markToolComplete(
toolCall.id,
toolCall.name,
499,
'Request aborted before tool execution',
{
cancelled: true,
},
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed (stream aborted)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
return true
}
/**
* Extract the `ui` object from an SSE event. The server enriches
* tool_call events with `ui: { requiresConfirmation, clientExecutable, ... }`.
*/
function getEventUI(event: SSEEvent): {
requiresConfirmation: boolean
clientExecutable: boolean
internal: boolean
hidden: boolean
} {
const raw = asRecord((event as unknown as Record<string, unknown>).ui)
return {
requiresConfirmation: raw.requiresConfirmation === true,
clientExecutable: raw.clientExecutable === true,
internal: raw.internal === true,
hidden: raw.hidden === true,
}
}
/**
* Handle the completion signal from a client-executable tool.
* Shared by both the main and subagent tool_call handlers.
*/
function handleClientCompletion(
toolCall: ToolCallState,
toolCallId: string,
completion: { status: string; message?: string; data?: Record<string, unknown> } | null,
context: StreamingContext
): void {
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true },
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed (client background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
if (completion?.status === 'rejected') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
400,
completion.message || 'Tool execution rejected',
undefined,
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed (client rejected)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
if (completion?.status === 'cancelled') {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
499,
completion.message || 'Workflow execution was stopped manually by the user.',
completion.data,
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed (client cancelled)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
markToolComplete(
toolCall.id,
toolCall.name,
success ? 200 : 500,
msg,
completion?.data,
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed (client completion)', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
}
/**
* Emit a synthetic tool_result SSE event to the client after a client-executable
* tool completes. The server's actual tool_result is skipped (markToolResultSeen),
* so the client would never learn the outcome without this.
*/
async function emitSyntheticToolResult(
toolCallId: string,
toolName: string,
completion: { status: string; message?: string; data?: Record<string, unknown> } | null,
options: OrchestratorOptions,
context: StreamingContext
): Promise<void> {
const success = completion?.status === 'success'
const isCancelled = completion?.status === 'cancelled'
const resultPayload = isCancelled
? { ...completion?.data, reason: 'user_cancelled', cancelledByUser: true }
: completion?.data
try {
await options.onEvent?.({
type: 'tool_result',
toolCallId,
toolName,
success,
result: resultPayload,
error: !success ? completion?.message : undefined,
} as SSEEvent)
} catch (error) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to emit synthetic tool_result', {
toolCallId,
toolName,
error: error instanceof Error ? error.message : String(error),
})
}
}
// Normalization + dedupe helpers live in sse-utils to keep server/client in sync.
function inferToolSuccess(data: Record<string, unknown> | undefined): {
success: boolean
hasResultData: boolean
hasError: boolean
} {
const resultObj = asRecord(data?.result)
const hasExplicitSuccess = data?.success !== undefined || resultObj.success !== undefined
const explicitSuccess = data?.success ?? resultObj.success
const hasResultData = data?.result !== undefined || data?.data !== undefined
const hasError = !!data?.error || !!resultObj.error
const success = hasExplicitSuccess ? !!explicitSuccess : !hasError
return { success, hasResultData, hasError }
}
function ensureTerminalToolCallState(
context: StreamingContext,
toolCallId: string,
toolName: string
): ToolCallState {
const existing = context.toolCalls.get(toolCallId)
if (existing) {
return existing
}
const toolCall: ToolCallState = {
id: toolCallId,
name: toolName || 'unknown_tool',
status: 'pending',
startTime: Date.now(),
}
context.toolCalls.set(toolCallId, toolCall)
addContentBlock(context, { type: 'tool_call', toolCall })
return toolCall
}
export type SSEHandler = (
event: SSEEvent,
context: StreamingContext,
execContext: ExecutionContext,
options: OrchestratorOptions
) => void | Promise<void>
function addContentBlock(context: StreamingContext, block: Omit<ContentBlock, 'timestamp'>): void {
context.contentBlocks.push({
...block,
timestamp: Date.now(),
})
}
export const sseHandlers: Record<string, SSEHandler> = {
chat_id: (event, context, execContext) => {
const chatId = asRecord(event.data).chatId as string | undefined
context.chatId = chatId
if (chatId) {
execContext.chatId = chatId
}
},
request_id: (event, context) => {
const rid = typeof event.data === 'string' ? event.data : undefined
if (rid) {
context.requestId = rid
logger
.withMetadata({ messageId: context.messageId })
.info('Mapped copilot message to Go trace ID', {
goTraceId: rid,
chatId: context.chatId,
executionId: context.executionId,
runId: context.runId,
})
}
},
title_updated: () => {},
tool_result: (event, context) => {
const data = getEventData(event)
const toolCallId = event.toolCallId || (data?.id as string | undefined)
if (!toolCallId) return
const toolName =
event.toolName ||
(data?.name as string | undefined) ||
context.toolCalls.get(toolCallId)?.name ||
''
const current = ensureTerminalToolCallState(context, toolCallId, toolName)
const { success, hasResultData, hasError } = inferToolSuccess(data)
current.status = success ? 'success' : 'error'
current.endTime = Date.now()
if (hasResultData) {
current.result = {
success,
output: data?.result || data?.data,
}
}
if (hasError) {
const resultObj = asRecord(data?.result)
current.error = (data?.error || resultObj.error) as string | undefined
}
markToolResultSeen(toolCallId)
},
tool_error: (event, context) => {
const data = getEventData(event)
const toolCallId = event.toolCallId || (data?.id as string | undefined)
if (!toolCallId) return
const toolName =
event.toolName ||
(data?.name as string | undefined) ||
context.toolCalls.get(toolCallId)?.name ||
''
const current = ensureTerminalToolCallState(context, toolCallId, toolName)
current.status = 'error'
current.error = (data?.error as string | undefined) || 'Tool execution failed'
current.endTime = Date.now()
markToolResultSeen(toolCallId)
},
tool_call_delta: () => {
// Argument streaming delta — no action needed on orchestrator side
},
tool_generating: (event, context) => {
const data = getEventData(event)
const toolCallId =
event.toolCallId ||
(data?.toolCallId as string | undefined) ||
(data?.id as string | undefined)
const toolName =
event.toolName || (data?.toolName as string | undefined) || (data?.name as string | undefined)
if (!toolCallId || !toolName) return
if (!context.toolCalls.has(toolCallId)) {
const toolCall = {
id: toolCallId,
name: toolName,
status: 'pending' as const,
startTime: Date.now(),
}
context.toolCalls.set(toolCallId, toolCall)
addContentBlock(context, { type: 'tool_call', toolCall })
}
},
tool_call: async (event, context, execContext, options) => {
const toolData = getEventData(event) || ({} as Record<string, unknown>)
const toolCallId = (toolData.id as string | undefined) || event.toolCallId
const toolName = (toolData.name as string | undefined) || event.toolName
if (!toolCallId || !toolName) return
const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as
| Record<string, unknown>
| undefined
const isPartial = toolData.partial === true
const existing = context.toolCalls.get(toolCallId)
if (
existing?.endTime ||
(existing && existing.status !== 'pending' && existing.status !== 'executing')
) {
if (!existing.name && toolName) {
existing.name = toolName
}
if (!existing.params && args) {
existing.params = args
}
return
}
if (existing) {
if (args && !existing.params) existing.params = args
if (
!context.contentBlocks.some((b) => b.type === 'tool_call' && b.toolCall?.id === toolCallId)
) {
addContentBlock(context, { type: 'tool_call', toolCall: existing })
}
} else {
const created = {
id: toolCallId,
name: toolName,
status: 'pending' as const,
params: args,
startTime: Date.now(),
}
context.toolCalls.set(toolCallId, created)
addContentBlock(context, { type: 'tool_call', toolCall: created })
}
if (isPartial) return
if (wasToolResultSeen(toolCallId)) return
if (context.pendingToolPromises.has(toolCallId) || existing?.status === 'executing') {
return
}
const toolCall = context.toolCalls.get(toolCallId)
if (!toolCall) return
const { clientExecutable, internal } = getEventUI(event)
if (internal) {
return
}
if (!isToolAvailableOnSimSide(toolName) && !clientExecutable) {
return
}
/**
* Fire tool execution without awaiting so parallel tool calls from the
* same LLM turn execute concurrently. executeToolAndReport is self-contained:
* it updates tool state, calls markToolComplete, and emits result events.
*/
const fireToolExecution = () => {
const pendingPromise = (async () => {
try {
await upsertAsyncToolCall({
runId: context.runId || crypto.randomUUID(),
toolCallId,
toolName,
args,
})
} catch (err) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to persist async tool row before execution', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
}
return executeToolAndReport(toolCallId, context, execContext, options)
})().catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('Parallel tool execution failed', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
return {
status: 'error',
message: err instanceof Error ? err.message : String(err),
data: { error: err instanceof Error ? err.message : String(err) },
}
})
registerPendingToolPromise(context, toolCallId, pendingPromise)
}
if (options.interactive === false) {
if (options.autoExecuteTools !== false) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
}
return
}
// Client-executable tool: execute server-side if available, otherwise
// delegate to the client (React UI) and wait for completion.
// Workflow run tools are implemented on Sim for MCP/server callers but must
// still run in the browser when clientExecutable so the workflow terminal
// receives SSE block logs (executeWorkflowWithFullLogging).
if (clientExecutable) {
const delegateWorkflowRunToClient = isWorkflowToolName(toolName)
if (isToolAvailableOnSimSide(toolName) && !delegateWorkflowRunToClient) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
} else {
toolCall.status = 'executing'
await upsertAsyncToolCall({
runId: context.runId || crypto.randomUUID(),
toolCallId,
toolName,
args,
status: 'running',
}).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to persist async tool row for client-executable tool', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
})
const clientWaitSignal = buildClientToolAbortSignal(options)
const completion = await waitForToolCompletion(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
clientWaitSignal
)
handleClientCompletion(toolCall, toolCallId, completion, context)
await emitSyntheticToolResult(toolCallId, toolCall.name, completion, options, context)
}
return
}
if (options.autoExecuteTools !== false) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
}
},
reasoning: (event, context) => {
const d = asRecord(event.data)
const phase = d.phase || asRecord(d.data).phase
if (phase === 'start') {
context.isInThinkingBlock = true
context.currentThinkingBlock = {
type: 'thinking',
content: '',
timestamp: Date.now(),
}
return
}
if (phase === 'end') {
if (context.currentThinkingBlock) {
context.contentBlocks.push(context.currentThinkingBlock)
}
context.isInThinkingBlock = false
context.currentThinkingBlock = null
return
}
const chunk = (d.data || d.content || event.content) as string | undefined
if (!chunk || !context.currentThinkingBlock) return
context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}`
},
content: (event, context) => {
// Server sends content as a plain string in event.data, not wrapped in an object.
let chunk: string | undefined
if (typeof event.data === 'string') {
chunk = event.data
} else {
const d = asRecord(event.data)
chunk = (d.content || d.data || event.content) as string | undefined
}
if (!chunk) return
context.accumulatedContent += chunk
addContentBlock(context, { type: 'text', content: chunk })
},
done: (event, context) => {
const d = asRecord(event.data)
const response = asRecord(d.response)
const asyncPause = asRecord(response.async_pause)
if (asyncPause.checkpointId) {
context.awaitingAsyncContinuation = {
checkpointId: String(asyncPause.checkpointId),
executionId:
typeof asyncPause.executionId === 'string' ? asyncPause.executionId : context.executionId,
runId: typeof asyncPause.runId === 'string' ? asyncPause.runId : context.runId,
pendingToolCallIds: Array.isArray(asyncPause.pendingToolCallIds)
? asyncPause.pendingToolCallIds.map((id) => String(id))
: [],
}
}
if (d.usage) {
const u = asRecord(d.usage)
context.usage = {
prompt: (u.input_tokens as number) || 0,
completion: (u.output_tokens as number) || 0,
}
}
if (d.cost) {
const c = asRecord(d.cost)
context.cost = {
input: (c.input as number) || 0,
output: (c.output as number) || 0,
total: (c.total as number) || 0,
}
}
context.streamComplete = true
},
start: () => {},
error: (event, context) => {
const d = asRecord(event.data)
const message = (d.message || d.error || event.error) as string | undefined
if (message) {
context.errors.push(message)
}
context.streamComplete = true
},
}
export const subAgentHandlers: Record<string, SSEHandler> = {
content: (event, context) => {
const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId || !event.data) return
// Server sends content as a plain string in event.data
let chunk: string | undefined
if (typeof event.data === 'string') {
chunk = event.data
} else {
const d = asRecord(event.data)
chunk = (d.content || d.data || event.content) as string | undefined
}
if (!chunk) return
context.subAgentContent[parentToolCallId] =
(context.subAgentContent[parentToolCallId] || '') + chunk
addContentBlock(context, { type: 'subagent_text', content: chunk })
},
tool_call: async (event, context, execContext, options) => {
const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return
const toolData = getEventData(event) || ({} as Record<string, unknown>)
const toolCallId = (toolData.id as string | undefined) || event.toolCallId
const toolName = (toolData.name as string | undefined) || event.toolName
if (!toolCallId || !toolName) return
const isPartial = toolData.partial === true
const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as
| Record<string, unknown>
| undefined
const existing = context.toolCalls.get(toolCallId)
// Ignore late/duplicate tool_call events once we already have a result.
if (wasToolResultSeen(toolCallId) || existing?.endTime) {
if (existing && !existing.name && toolName) {
existing.name = toolName
}
if (existing && !existing.params && args) {
existing.params = args
}
return
}
const toolCall: ToolCallState = {
id: toolCallId,
name: toolName,
status: 'pending',
params: args,
startTime: Date.now(),
}
// Store in both places - but do NOT overwrite existing tool call state for the same id.
if (!context.subAgentToolCalls[parentToolCallId]) {
context.subAgentToolCalls[parentToolCallId] = []
}
if (!context.subAgentToolCalls[parentToolCallId].some((tc) => tc.id === toolCallId)) {
context.subAgentToolCalls[parentToolCallId].push(toolCall)
}
if (!context.toolCalls.has(toolCallId)) {
context.toolCalls.set(toolCallId, toolCall)
const parentToolCall = context.toolCalls.get(parentToolCallId)
addContentBlock(context, {
type: 'tool_call',
toolCall,
calledBy: parentToolCall?.name,
})
}
if (isPartial) return
if (context.pendingToolPromises.has(toolCallId) || existing?.status === 'executing') {
return
}
const { clientExecutable, internal } = getEventUI(event)
if (internal) {
return
}
if (!isToolAvailableOnSimSide(toolName) && !clientExecutable) {
return
}
const fireToolExecution = () => {
const pendingPromise = (async () => {
try {
await upsertAsyncToolCall({
runId: context.runId || crypto.randomUUID(),
toolCallId,
toolName,
args,
})
} catch (err) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to persist async subagent tool row before execution', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
}
return executeToolAndReport(toolCallId, context, execContext, options)
})().catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('Parallel subagent tool execution failed', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
return {
status: 'error',
message: err instanceof Error ? err.message : String(err),
data: { error: err instanceof Error ? err.message : String(err) },
}
})
registerPendingToolPromise(context, toolCallId, pendingPromise)
}
if (options.interactive === false) {
if (options.autoExecuteTools !== false) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
}
return
}
if (clientExecutable) {
const delegateWorkflowRunToClient = isWorkflowToolName(toolName)
if (isToolAvailableOnSimSide(toolName) && !delegateWorkflowRunToClient) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
} else {
toolCall.status = 'executing'
await upsertAsyncToolCall({
runId: context.runId || crypto.randomUUID(),
toolCallId,
toolName,
args,
status: 'running',
}).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to persist async tool row for client-executable subagent tool', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
})
const clientWaitSignal = buildClientToolAbortSignal(options)
const completion = await waitForToolCompletion(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
clientWaitSignal
)
handleClientCompletion(toolCall, toolCallId, completion, context)
await emitSyntheticToolResult(toolCallId, toolCall.name, completion, options, context)
}
return
}
if (options.autoExecuteTools !== false) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
}
},
tool_result: (event, context) => {
const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return
const data = getEventData(event)
const toolCallId = event.toolCallId || (data?.id as string | undefined)
if (!toolCallId) return
const toolName = event.toolName || (data?.name as string | undefined) || ''
// Update in subAgentToolCalls.
const toolCalls = context.subAgentToolCalls[parentToolCallId] || []
const subAgentToolCall = toolCalls.find((tc) => tc.id === toolCallId)
// Also update in main toolCalls (where we added it for execution).
const mainToolCall = ensureTerminalToolCallState(context, toolCallId, toolName)
const { success, hasResultData, hasError } = inferToolSuccess(data)
const status = success ? 'success' : 'error'
const endTime = Date.now()
const result = hasResultData ? { success, output: data?.result || data?.data } : undefined
if (subAgentToolCall) {
subAgentToolCall.status = status
subAgentToolCall.endTime = endTime
if (result) subAgentToolCall.result = result
if (hasError) {
const resultObj = asRecord(data?.result)
subAgentToolCall.error = (data?.error || resultObj.error) as string | undefined
}
}
if (mainToolCall) {
mainToolCall.status = status
mainToolCall.endTime = endTime
if (result) mainToolCall.result = result
if (hasError) {
const resultObj = asRecord(data?.result)
mainToolCall.error = (data?.error || resultObj.error) as string | undefined
}
}
if (subAgentToolCall || mainToolCall) {
markToolResultSeen(toolCallId)
}
},
}
export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean {
if (!event.subagent) return false
if (!context.subAgentParentToolCallId) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Subagent event missing parent tool call', {
type: event.type,
subagent: event.subagent,
})
return false
}
return true
}

View File

@@ -1,2 +0,0 @@
export type { SSEHandler } from './handlers'
export { handleSubagentRouting, sseHandlers, subAgentHandlers } from './handlers'

View File

@@ -1,936 +0,0 @@
import { db } from '@sim/db'
import { userTableRows } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { completeAsyncToolCall, markAsyncToolRunning } from '@/lib/copilot/async-runs/repository'
import { waitForToolConfirmation } from '@/lib/copilot/orchestrator/persistence'
import { asRecord, markToolResultSeen } from '@/lib/copilot/orchestrator/sse/utils'
import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor'
import {
type ExecutionContext,
isTerminalToolCallStatus,
type OrchestratorOptions,
type SSEEvent,
type StreamingContext,
type ToolCallResult,
} from '@/lib/copilot/orchestrator/types'
import {
extractDeletedResourcesFromToolResult,
extractResourcesFromToolResult,
hasDeleteCapability,
isResourceToolName,
persistChatResources,
removeChatResources,
} from '@/lib/copilot/resources'
import { getTableById } from '@/lib/table/service'
import { uploadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
const logger = createLogger('CopilotSseToolExecution')
const OUTPUT_PATH_TOOLS = new Set(['function_execute', 'user_table'])
/**
* Try to pull a flat array of row-objects out of the various shapes that
* `function_execute` and `user_table` can return.
*/
function extractTabularData(output: unknown): Record<string, unknown>[] | null {
if (!output || typeof output !== 'object') return null
if (Array.isArray(output)) {
if (output.length > 0 && typeof output[0] === 'object' && output[0] !== null) {
return output as Record<string, unknown>[]
}
return null
}
const obj = output as Record<string, unknown>
// function_execute shape: { result: [...], stdout: "..." }
if (Array.isArray(obj.result)) {
const rows = obj.result
if (rows.length > 0 && typeof rows[0] === 'object' && rows[0] !== null) {
return rows as Record<string, unknown>[]
}
}
// user_table query_rows shape: { data: { rows: [{ data: {...} }], totalCount } }
if (obj.data && typeof obj.data === 'object' && !Array.isArray(obj.data)) {
const data = obj.data as Record<string, unknown>
if (Array.isArray(data.rows) && data.rows.length > 0) {
const rows = data.rows as Record<string, unknown>[]
// user_table rows nest actual values inside .data
if (typeof rows[0].data === 'object' && rows[0].data !== null) {
return rows.map((r) => r.data as Record<string, unknown>)
}
return rows
}
}
return null
}
function escapeCsvValue(value: unknown): string {
if (value === null || value === undefined) return ''
const str = typeof value === 'object' ? JSON.stringify(value) : String(value)
if (str.includes(',') || str.includes('"') || str.includes('\n') || str.includes('\r')) {
return `"${str.replace(/"/g, '""')}"`
}
return str
}
function convertRowsToCsv(rows: Record<string, unknown>[]): string {
if (rows.length === 0) return ''
const headerSet = new Set<string>()
for (const row of rows) {
for (const key of Object.keys(row)) {
headerSet.add(key)
}
}
const headers = [...headerSet]
const lines = [headers.map(escapeCsvValue).join(',')]
for (const row of rows) {
lines.push(headers.map((h) => escapeCsvValue(row[h])).join(','))
}
return lines.join('\n')
}
type OutputFormat = 'json' | 'csv' | 'txt' | 'md' | 'html'
const EXT_TO_FORMAT: Record<string, OutputFormat> = {
'.json': 'json',
'.csv': 'csv',
'.txt': 'txt',
'.md': 'md',
'.html': 'html',
}
const FORMAT_TO_CONTENT_TYPE: Record<OutputFormat, string> = {
json: 'application/json',
csv: 'text/csv',
txt: 'text/plain',
md: 'text/markdown',
html: 'text/html',
}
function normalizeOutputWorkspaceFileName(outputPath: string): string {
const trimmed = outputPath.trim().replace(/^\/+/, '')
const withoutPrefix = trimmed.startsWith('files/') ? trimmed.slice('files/'.length) : trimmed
if (!withoutPrefix) {
throw new Error('outputPath must include a file name, e.g. "files/result.json"')
}
if (withoutPrefix.includes('/')) {
throw new Error(
'outputPath must target a flat workspace file, e.g. "files/result.json". Nested paths like "files/reports/result.json" are not supported.'
)
}
return withoutPrefix
}
function resolveOutputFormat(fileName: string, explicit?: string): OutputFormat {
if (explicit && explicit in FORMAT_TO_CONTENT_TYPE) return explicit as OutputFormat
const ext = fileName.slice(fileName.lastIndexOf('.')).toLowerCase()
return EXT_TO_FORMAT[ext] ?? 'json'
}
function serializeOutputForFile(output: unknown, format: OutputFormat): string {
if (typeof output === 'string') return output
if (format === 'csv') {
const rows = extractTabularData(output)
if (rows && rows.length > 0) {
return convertRowsToCsv(rows)
}
}
return JSON.stringify(output, null, 2)
}
async function maybeWriteOutputToFile(
toolName: string,
params: Record<string, unknown> | undefined,
result: ToolCallResult,
context: ExecutionContext
): Promise<ToolCallResult> {
if (!result.success || !result.output) return result
if (!OUTPUT_PATH_TOOLS.has(toolName)) return result
if (!context.workspaceId || !context.userId) return result
const args = params?.args as Record<string, unknown> | undefined
const outputPath =
(params?.outputPath as string | undefined) ?? (args?.outputPath as string | undefined)
if (!outputPath) return result
const explicitFormat =
(params?.outputFormat as string | undefined) ?? (args?.outputFormat as string | undefined)
try {
const fileName = normalizeOutputWorkspaceFileName(outputPath)
const format = resolveOutputFormat(fileName, explicitFormat)
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
const content = serializeOutputForFile(result.output, format)
const contentType = FORMAT_TO_CONTENT_TYPE[format]
const buffer = Buffer.from(content, 'utf-8')
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
const uploaded = await uploadWorkspaceFile(
context.workspaceId,
context.userId,
buffer,
fileName,
contentType
)
logger.withMetadata({ messageId: context.messageId }).info('Tool output written to file', {
toolName,
fileName,
size: buffer.length,
fileId: uploaded.id,
})
return {
success: true,
output: {
message: `Output written to files/${fileName} (${buffer.length} bytes)`,
fileId: uploaded.id,
fileName,
size: buffer.length,
downloadUrl: uploaded.url,
},
}
} catch (err) {
const message = err instanceof Error ? err.message : String(err)
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to write tool output to file', {
toolName,
outputPath,
error: message,
})
return {
success: false,
error: `Failed to write output file: ${message}`,
}
}
}
const MAX_OUTPUT_TABLE_ROWS = 10_000
const BATCH_CHUNK_SIZE = 500
export interface AsyncToolCompletion {
status: string
message?: string
data?: Record<string, unknown>
}
function abortRequested(
context: StreamingContext,
execContext: ExecutionContext,
options?: OrchestratorOptions
): boolean {
if (options?.userStopSignal?.aborted || execContext.userStopSignal?.aborted) {
return true
}
if (context.wasAborted) {
return true
}
return false
}
function cancelledCompletion(message: string): AsyncToolCompletion {
return {
status: 'cancelled',
message,
data: { cancelled: true },
}
}
function terminalCompletionFromToolCall(toolCall: {
status: string
error?: string
result?: { output?: unknown; error?: string }
}): AsyncToolCompletion {
if (toolCall.status === 'cancelled') {
return cancelledCompletion(toolCall.error || 'Tool execution cancelled')
}
if (toolCall.status === 'success') {
return {
status: 'success',
message: 'Tool completed',
data:
toolCall.result?.output &&
typeof toolCall.result.output === 'object' &&
!Array.isArray(toolCall.result.output)
? (toolCall.result.output as Record<string, unknown>)
: undefined,
}
}
if (toolCall.status === 'skipped') {
return {
status: 'success',
message: 'Tool skipped',
data:
toolCall.result?.output &&
typeof toolCall.result.output === 'object' &&
!Array.isArray(toolCall.result.output)
? (toolCall.result.output as Record<string, unknown>)
: undefined,
}
}
return {
status: toolCall.status === 'rejected' ? 'rejected' : 'error',
message: toolCall.error || toolCall.result?.error || 'Tool failed',
data: { error: toolCall.error || toolCall.result?.error || 'Tool failed' },
}
}
function reportCancelledTool(
toolCall: { id: string; name: string },
message: string,
messageId?: string,
data: Record<string, unknown> = { cancelled: true }
): void {
markToolComplete(toolCall.id, toolCall.name, 499, message, data, messageId).catch((err) => {
logger.withMetadata({ messageId }).error('markToolComplete failed (cancelled)', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
}
async function maybeWriteOutputToTable(
toolName: string,
params: Record<string, unknown> | undefined,
result: ToolCallResult,
context: ExecutionContext
): Promise<ToolCallResult> {
if (toolName !== 'function_execute') return result
if (!result.success || !result.output) return result
if (!context.workspaceId || !context.userId) return result
const outputTable = params?.outputTable as string | undefined
if (!outputTable) return result
try {
const table = await getTableById(outputTable)
if (!table) {
return {
success: false,
error: `Table "${outputTable}" not found`,
}
}
const rawOutput = result.output
let rows: Array<Record<string, unknown>>
if (rawOutput && typeof rawOutput === 'object' && 'result' in rawOutput) {
const inner = (rawOutput as Record<string, unknown>).result
if (Array.isArray(inner)) {
rows = inner
} else {
return {
success: false,
error: 'outputTable requires the code to return an array of objects',
}
}
} else if (Array.isArray(rawOutput)) {
rows = rawOutput
} else {
return {
success: false,
error: 'outputTable requires the code to return an array of objects',
}
}
if (rows.length > MAX_OUTPUT_TABLE_ROWS) {
return {
success: false,
error: `outputTable row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`,
}
}
if (rows.length === 0) {
return {
success: false,
error: 'outputTable requires at least one row — code returned an empty array',
}
}
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
await db.transaction(async (tx) => {
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
await tx.delete(userTableRows).where(eq(userTableRows.tableId, outputTable))
const now = new Date()
for (let i = 0; i < rows.length; i += BATCH_CHUNK_SIZE) {
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
const chunk = rows.slice(i, i + BATCH_CHUNK_SIZE)
const values = chunk.map((rowData, j) => ({
id: `row_${crypto.randomUUID().replace(/-/g, '')}`,
tableId: outputTable,
workspaceId: context.workspaceId!,
data: rowData,
position: i + j,
createdAt: now,
updatedAt: now,
createdBy: context.userId,
}))
await tx.insert(userTableRows).values(values)
}
})
logger.withMetadata({ messageId: context.messageId }).info('Tool output written to table', {
toolName,
tableId: outputTable,
rowCount: rows.length,
})
return {
success: true,
output: {
message: `Wrote ${rows.length} rows to table ${outputTable}`,
tableId: outputTable,
rowCount: rows.length,
},
}
} catch (err) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to write tool output to table', {
toolName,
outputTable,
error: err instanceof Error ? err.message : String(err),
})
return {
success: false,
error: `Failed to write to table: ${err instanceof Error ? err.message : String(err)}`,
}
}
}
async function maybeWriteReadCsvToTable(
toolName: string,
params: Record<string, unknown> | undefined,
result: ToolCallResult,
context: ExecutionContext
): Promise<ToolCallResult> {
if (toolName !== 'read') return result
if (!result.success || !result.output) return result
if (!context.workspaceId || !context.userId) return result
const outputTable = params?.outputTable as string | undefined
if (!outputTable) return result
try {
const table = await getTableById(outputTable)
if (!table) {
return { success: false, error: `Table "${outputTable}" not found` }
}
const output = result.output as Record<string, unknown>
const content = (output.content as string) || ''
if (!content.trim()) {
return { success: false, error: 'File has no content to import into table' }
}
const filePath = (params?.path as string) || ''
const ext = filePath.split('.').pop()?.toLowerCase()
let rows: Record<string, unknown>[]
if (ext === 'json') {
const parsed = JSON.parse(content)
if (!Array.isArray(parsed)) {
return {
success: false,
error: 'JSON file must contain an array of objects for table import',
}
}
rows = parsed
} else {
const { parse } = await import('csv-parse/sync')
rows = parse(content, {
columns: true,
skip_empty_lines: true,
trim: true,
relax_column_count: true,
relax_quotes: true,
skip_records_with_error: true,
cast: false,
}) as Record<string, unknown>[]
}
if (rows.length === 0) {
return { success: false, error: 'File has no data rows to import' }
}
if (rows.length > MAX_OUTPUT_TABLE_ROWS) {
return {
success: false,
error: `Row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`,
}
}
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
await db.transaction(async (tx) => {
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
await tx.delete(userTableRows).where(eq(userTableRows.tableId, outputTable))
const now = new Date()
for (let i = 0; i < rows.length; i += BATCH_CHUNK_SIZE) {
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
const chunk = rows.slice(i, i + BATCH_CHUNK_SIZE)
const values = chunk.map((rowData, j) => ({
id: `row_${crypto.randomUUID().replace(/-/g, '')}`,
tableId: outputTable,
workspaceId: context.workspaceId!,
data: rowData,
position: i + j,
createdAt: now,
updatedAt: now,
createdBy: context.userId,
}))
await tx.insert(userTableRows).values(values)
}
})
logger.withMetadata({ messageId: context.messageId }).info('Read output written to table', {
toolName,
tableId: outputTable,
tableName: table.name,
rowCount: rows.length,
filePath,
})
return {
success: true,
output: {
message: `Imported ${rows.length} rows from "${filePath}" into table "${table.name}"`,
tableId: outputTable,
tableName: table.name,
rowCount: rows.length,
},
}
} catch (err) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to write read output to table', {
toolName,
outputTable,
error: err instanceof Error ? err.message : String(err),
})
return {
success: false,
error: `Failed to import into table: ${err instanceof Error ? err.message : String(err)}`,
}
}
}
export async function executeToolAndReport(
toolCallId: string,
context: StreamingContext,
execContext: ExecutionContext,
options?: OrchestratorOptions
): Promise<AsyncToolCompletion> {
const toolCall = context.toolCalls.get(toolCallId)
if (!toolCall) return { status: 'error', message: 'Tool call not found' }
if (toolCall.status === 'executing') {
return { status: 'running', message: 'Tool already executing' }
}
if (toolCall.endTime || isTerminalToolCallStatus(toolCall.status)) {
return terminalCompletionFromToolCall(toolCall)
}
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted before tool execution',
}).catch(() => {})
reportCancelledTool(toolCall, 'Request aborted before tool execution', context.messageId)
return cancelledCompletion('Request aborted before tool execution')
}
toolCall.status = 'executing'
await markAsyncToolRunning(toolCall.id, 'sim-stream').catch(() => {})
logger.withMetadata({ messageId: context.messageId }).info('Tool execution started', {
toolCallId: toolCall.id,
toolName: toolCall.name,
params: toolCall.params,
})
try {
let result = await executeToolServerSide(toolCall, execContext)
if (toolCall.endTime || isTerminalToolCallStatus(toolCall.status)) {
return terminalCompletionFromToolCall(toolCall)
}
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted during tool execution',
}).catch(() => {})
reportCancelledTool(toolCall, 'Request aborted during tool execution', context.messageId)
return cancelledCompletion('Request aborted during tool execution')
}
result = await maybeWriteOutputToFile(toolCall.name, toolCall.params, result, execContext)
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted during tool post-processing',
}).catch(() => {})
reportCancelledTool(
toolCall,
'Request aborted during tool post-processing',
context.messageId
)
return cancelledCompletion('Request aborted during tool post-processing')
}
result = await maybeWriteOutputToTable(toolCall.name, toolCall.params, result, execContext)
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted during tool post-processing',
}).catch(() => {})
reportCancelledTool(
toolCall,
'Request aborted during tool post-processing',
context.messageId
)
return cancelledCompletion('Request aborted during tool post-processing')
}
result = await maybeWriteReadCsvToTable(toolCall.name, toolCall.params, result, execContext)
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted during tool post-processing',
}).catch(() => {})
reportCancelledTool(
toolCall,
'Request aborted during tool post-processing',
context.messageId
)
return cancelledCompletion('Request aborted during tool post-processing')
}
toolCall.status = result.success ? 'success' : 'error'
toolCall.result = result
toolCall.error = result.error
toolCall.endTime = Date.now()
if (result.success) {
const raw = result.output
const preview =
typeof raw === 'string'
? raw.slice(0, 200)
: raw && typeof raw === 'object'
? JSON.stringify(raw).slice(0, 200)
: undefined
logger.withMetadata({ messageId: context.messageId }).info('Tool execution succeeded', {
toolCallId: toolCall.id,
toolName: toolCall.name,
outputPreview: preview,
})
} else {
logger.withMetadata({ messageId: context.messageId }).warn('Tool execution failed', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: result.error,
params: toolCall.params,
})
}
// If create_workflow was successful, update the execution context with the new workflowId.
// This ensures subsequent tools in the same stream have access to the workflowId.
const output = asRecord(result.output)
if (
toolCall.name === 'create_workflow' &&
result.success &&
output.workflowId &&
!execContext.workflowId
) {
execContext.workflowId = output.workflowId as string
if (output.workspaceId) {
execContext.workspaceId = output.workspaceId as string
}
}
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: result.success ? 'completed' : 'failed',
result: result.success ? asRecord(result.output) : { error: result.error || 'Tool failed' },
error: result.success ? null : result.error || 'Tool failed',
}).catch(() => {})
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
reportCancelledTool(
toolCall,
'Request aborted before tool result delivery',
context.messageId
)
return cancelledCompletion('Request aborted before tool result delivery')
}
// Fire-and-forget: notify the copilot backend that the tool completed.
// IMPORTANT: We must NOT await this — the server may block on the
// mark-complete handler until it can write back on the SSE stream, but
// the SSE reader (our for-await loop) is paused while we're in this
// handler. Awaiting here would deadlock: sim waits for the server's response,
// the server waits for sim to drain the SSE stream.
markToolComplete(
toolCall.id,
toolCall.name,
result.success ? 200 : 500,
result.error || (result.success ? 'Tool completed' : 'Tool failed'),
result.output,
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
const resultEvent: SSEEvent = {
type: 'tool_result',
toolCallId: toolCall.id,
toolName: toolCall.name,
success: result.success,
result: result.output,
data: {
id: toolCall.id,
name: toolCall.name,
success: result.success,
result: result.output,
},
}
await options?.onEvent?.(resultEvent)
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
return cancelledCompletion('Request aborted before resource persistence')
}
if (result.success && execContext.chatId && !abortRequested(context, execContext, options)) {
let isDeleteOp = false
if (hasDeleteCapability(toolCall.name)) {
const deleted = extractDeletedResourcesFromToolResult(
toolCall.name,
toolCall.params,
result.output
)
if (deleted.length > 0) {
isDeleteOp = true
removeChatResources(execContext.chatId, deleted).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to remove chat resources after deletion', {
chatId: execContext.chatId,
error: err instanceof Error ? err.message : String(err),
})
})
for (const resource of deleted) {
if (abortRequested(context, execContext, options)) break
await options?.onEvent?.({
type: 'resource_deleted',
resource: { type: resource.type, id: resource.id, title: resource.title },
})
}
}
}
if (!isDeleteOp && !abortRequested(context, execContext, options)) {
const resources =
result.resources && result.resources.length > 0
? result.resources
: isResourceToolName(toolCall.name)
? extractResourcesFromToolResult(toolCall.name, toolCall.params, result.output)
: []
if (resources.length > 0) {
persistChatResources(execContext.chatId, resources).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to persist chat resources', {
chatId: execContext.chatId,
error: err instanceof Error ? err.message : String(err),
})
})
for (const resource of resources) {
if (abortRequested(context, execContext, options)) break
await options?.onEvent?.({
type: 'resource_added',
resource: { type: resource.type, id: resource.id, title: resource.title },
})
}
}
}
}
return {
status: result.success ? 'success' : 'error',
message: result.error || (result.success ? 'Tool completed' : 'Tool failed'),
data: asRecord(result.output),
}
} catch (error) {
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted during tool execution',
}).catch(() => {})
reportCancelledTool(toolCall, 'Request aborted during tool execution', context.messageId)
return cancelledCompletion('Request aborted during tool execution')
}
toolCall.status = 'error'
toolCall.error = error instanceof Error ? error.message : String(error)
toolCall.endTime = Date.now()
logger.withMetadata({ messageId: context.messageId }).error('Tool execution threw', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: toolCall.error,
params: toolCall.params,
})
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'failed',
result: { error: toolCall.error },
error: toolCall.error,
}).catch(() => {})
// Fire-and-forget (same reasoning as above).
// Pass error as structured data so the Go side can surface it to the LLM.
markToolComplete(
toolCall.id,
toolCall.name,
500,
toolCall.error,
{
error: toolCall.error,
},
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
const errorEvent: SSEEvent = {
type: 'tool_error',
state: 'error',
toolCallId: toolCall.id,
data: {
id: toolCall.id,
name: toolCall.name,
error: toolCall.error,
},
}
await options?.onEvent?.(errorEvent)
return {
status: 'error',
message: toolCall.error,
data: { error: toolCall.error },
}
}
}
/**
* Wait for a tool completion signal (success/error/rejected) from the client.
* Ignores intermediate statuses like `accepted` and only returns terminal statuses:
* - success: client finished executing successfully
* - error: client execution failed
* - rejected: user clicked Skip (subagent run tools where user hasn't auto-allowed)
*
* Used for client-executable run tools: the client executes the workflow
* and posts success/error to /api/copilot/confirm when done. The server
* waits here until that completion signal arrives.
*/
export async function waitForToolCompletion(
toolCallId: string,
timeoutMs: number,
abortSignal?: AbortSignal
): Promise<{ status: string; message?: string; data?: Record<string, unknown> } | null> {
const decision = await waitForToolConfirmation(toolCallId, timeoutMs, abortSignal, {
acceptStatus: (status) =>
status === 'success' ||
status === 'error' ||
status === 'rejected' ||
status === 'background' ||
status === 'cancelled' ||
status === 'delivered',
})
if (
decision?.status === 'success' ||
decision?.status === 'error' ||
decision?.status === 'rejected' ||
decision?.status === 'background' ||
decision?.status === 'cancelled' ||
decision?.status === 'delivered'
) {
return decision
}
return null
}

View File

@@ -1,44 +0,0 @@
/**
* @vitest-environment node
*/
import { describe, expect, it } from 'vitest'
import {
markToolResultSeen,
normalizeSseEvent,
shouldSkipToolCallEvent,
shouldSkipToolResultEvent,
} from '@/lib/copilot/orchestrator/sse/utils'
describe('sse-utils', () => {
it.concurrent('normalizes tool fields from string data', () => {
const event = {
type: 'tool_result',
data: JSON.stringify({
id: 'tool_1',
name: 'edit_workflow',
success: true,
result: { ok: true },
}),
}
const normalized = normalizeSseEvent(event as any)
expect(normalized.toolCallId).toBe('tool_1')
expect(normalized.toolName).toBe('edit_workflow')
expect(normalized.success).toBe(true)
expect(normalized.result).toEqual({ ok: true })
})
it.concurrent('dedupes tool_call events', () => {
const event = { type: 'tool_call', data: { id: 'tool_call_1', name: 'plan' } }
expect(shouldSkipToolCallEvent(event as any)).toBe(false)
expect(shouldSkipToolCallEvent(event as any)).toBe(true)
})
it.concurrent('dedupes tool_result events', () => {
const event = { type: 'tool_result', data: { id: 'tool_result_1', name: 'plan' } }
expect(shouldSkipToolResultEvent(event as any)).toBe(false)
markToolResultSeen('tool_result_1')
expect(shouldSkipToolResultEvent(event as any)).toBe(true)
})
})

View File

@@ -1,129 +0,0 @@
import { STREAM_BUFFER_MAX_DEDUP_ENTRIES } from '@/lib/copilot/constants'
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
type EventDataObject = Record<string, unknown> | undefined
/** Safely cast event.data to a record for property access. */
export const asRecord = (data: unknown): Record<string, unknown> =>
(data && typeof data === 'object' && !Array.isArray(data) ? data : {}) as Record<string, unknown>
/**
* In-memory tool event dedupe with bounded size.
*
* NOTE: Process-local only. In a multi-instance setup (e.g., ECS),
* each task maintains its own dedupe cache.
*/
const seenToolCalls = new Set<string>()
const seenToolResults = new Set<string>()
function addToSet(set: Set<string>, id: string): void {
if (set.size >= STREAM_BUFFER_MAX_DEDUP_ENTRIES) {
const first = set.values().next().value
if (first) set.delete(first)
}
set.add(id)
}
const parseEventData = (data: unknown): EventDataObject => {
if (!data) return undefined
if (typeof data !== 'string') {
if (typeof data === 'object' && !Array.isArray(data)) {
return data as EventDataObject
}
return undefined
}
try {
const parsed = JSON.parse(data)
if (typeof parsed === 'object' && parsed !== null && !Array.isArray(parsed)) {
return parsed as EventDataObject
}
return undefined
} catch {
return undefined
}
}
const hasToolFields = (data: EventDataObject): boolean => {
if (!data) return false
return (
data.id !== undefined ||
data.toolCallId !== undefined ||
data.name !== undefined ||
data.success !== undefined ||
data.result !== undefined ||
data.arguments !== undefined
)
}
export const getEventData = (event: SSEEvent): EventDataObject => {
const topLevel = parseEventData(event.data)
if (!topLevel) return undefined
if (hasToolFields(topLevel)) return topLevel
const nested = parseEventData(topLevel.data)
return nested || topLevel
}
function getToolCallIdFromEvent(event: SSEEvent): string | undefined {
const data = getEventData(event)
return (
event.toolCallId || (data?.id as string | undefined) || (data?.toolCallId as string | undefined)
)
}
/** Normalizes SSE events so tool metadata is available at the top level. */
export function normalizeSseEvent(event: SSEEvent): SSEEvent {
if (!event) return event
const data = getEventData(event)
if (!data) return event
const toolCallId =
event.toolCallId || (data.id as string | undefined) || (data.toolCallId as string | undefined)
const toolName =
event.toolName || (data.name as string | undefined) || (data.toolName as string | undefined)
const success = event.success ?? (data.success as boolean | undefined)
const result = event.result ?? data.result
const normalizedData = typeof event.data === 'string' ? data : event.data
return {
...event,
data: normalizedData,
toolCallId,
toolName,
success,
result,
}
}
function markToolCallSeen(toolCallId: string): void {
addToSet(seenToolCalls, toolCallId)
}
function wasToolCallSeen(toolCallId: string): boolean {
return seenToolCalls.has(toolCallId)
}
export function markToolResultSeen(toolCallId: string): void {
addToSet(seenToolResults, toolCallId)
}
export function wasToolResultSeen(toolCallId: string): boolean {
return seenToolResults.has(toolCallId)
}
export function shouldSkipToolCallEvent(event: SSEEvent): boolean {
if (event.type !== 'tool_call') return false
const toolCallId = getToolCallIdFromEvent(event)
if (!toolCallId) return false
const eventData = getEventData(event)
if (eventData?.partial === true) return false
if (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) {
return true
}
markToolCallSeen(toolCallId)
return false
}
export function shouldSkipToolResultEvent(event: SSEEvent): boolean {
if (event.type !== 'tool_result') return false
const toolCallId = getToolCallIdFromEvent(event)
if (!toolCallId) return false
return wasToolResultSeen(toolCallId)
}

View File

@@ -1,119 +0,0 @@
/**
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@sim/logger', () => loggerMock)
type StoredEntry = { score: number; value: string }
const createRedisStub = () => {
const events = new Map<string, StoredEntry[]>()
const counters = new Map<string, number>()
const readEntries = (key: string, min: number, max: number) => {
const list = events.get(key) || []
return list
.filter((entry) => entry.score >= min && entry.score <= max)
.sort((a, b) => a.score - b.score)
.map((entry) => entry.value)
}
return {
del: vi.fn().mockResolvedValue(1),
hset: vi.fn().mockResolvedValue(1),
hgetall: vi.fn().mockResolvedValue({}),
expire: vi.fn().mockResolvedValue(1),
eval: vi
.fn()
.mockImplementation(
(
_lua: string,
_keysCount: number,
seqKey: string,
eventsKey: string,
_ttl: number,
_limit: number,
streamId: string,
eventJson: string
) => {
const current = counters.get(seqKey) || 0
const next = current + 1
counters.set(seqKey, next)
const entry = JSON.stringify({ eventId: next, streamId, event: JSON.parse(eventJson) })
const list = events.get(eventsKey) || []
list.push({ score: next, value: entry })
events.set(eventsKey, list)
return next
}
),
incrby: vi.fn().mockImplementation((key: string, amount: number) => {
const current = counters.get(key) || 0
const next = current + amount
counters.set(key, next)
return next
}),
zrangebyscore: vi.fn().mockImplementation((key: string, min: string, max: string) => {
const minVal = Number(min)
const maxVal = max === '+inf' ? Number.POSITIVE_INFINITY : Number(max)
return Promise.resolve(readEntries(key, minVal, maxVal))
}),
pipeline: vi.fn().mockImplementation(() => {
const api: Record<string, any> = {}
api.zadd = vi.fn().mockImplementation((key: string, ...args: Array<string | number>) => {
const list = events.get(key) || []
for (let i = 0; i < args.length; i += 2) {
list.push({ score: Number(args[i]), value: String(args[i + 1]) })
}
events.set(key, list)
return api
})
api.expire = vi.fn().mockReturnValue(api)
api.zremrangebyrank = vi.fn().mockReturnValue(api)
api.exec = vi.fn().mockResolvedValue([])
return api
}),
}
}
let mockRedis: ReturnType<typeof createRedisStub>
vi.mock('@/lib/core/config/redis', () => ({
getRedisClient: () => mockRedis,
}))
import {
appendStreamEvent,
createStreamEventWriter,
readStreamEvents,
} from '@/lib/copilot/orchestrator/stream/buffer'
describe('stream-buffer', () => {
beforeEach(() => {
mockRedis = createRedisStub()
vi.clearAllMocks()
})
it.concurrent('replays events after a given event id', async () => {
await appendStreamEvent('stream-1', { type: 'content', data: 'hello' })
await appendStreamEvent('stream-1', { type: 'content', data: 'world' })
const allEvents = await readStreamEvents('stream-1', 0)
expect(allEvents.map((entry) => entry.event.data)).toEqual(['hello', 'world'])
const replayed = await readStreamEvents('stream-1', 1)
expect(replayed.map((entry) => entry.event.data)).toEqual(['world'])
})
it.concurrent('flushes buffered events for resume', async () => {
const writer = createStreamEventWriter('stream-2')
await writer.write({ type: 'content', data: 'a' })
await writer.write({ type: 'content', data: 'b' })
await writer.flush()
const events = await readStreamEvents('stream-2', 0)
expect(events.map((entry) => entry.event.data)).toEqual(['a', 'b'])
})
})

View File

@@ -1,322 +0,0 @@
import { createLogger } from '@sim/logger'
import { REDIS_COPILOT_STREAM_PREFIX } from '@/lib/copilot/constants'
import { env } from '@/lib/core/config/env'
import { getRedisClient } from '@/lib/core/config/redis'
const logger = createLogger('CopilotStreamBuffer')
const STREAM_DEFAULTS = {
ttlSeconds: 60 * 60,
eventLimit: 5000,
reserveBatch: 200,
flushIntervalMs: 15,
flushMaxBatch: 200,
}
export type StreamBufferConfig = {
ttlSeconds: number
eventLimit: number
reserveBatch: number
flushIntervalMs: number
flushMaxBatch: number
}
const parseNumber = (value: number | string | undefined, fallback: number): number => {
if (typeof value === 'number' && Number.isFinite(value)) return value
const parsed = Number(value)
return Number.isFinite(parsed) ? parsed : fallback
}
export function getStreamBufferConfig(): StreamBufferConfig {
return {
ttlSeconds: parseNumber(env.COPILOT_STREAM_TTL_SECONDS, STREAM_DEFAULTS.ttlSeconds),
eventLimit: parseNumber(env.COPILOT_STREAM_EVENT_LIMIT, STREAM_DEFAULTS.eventLimit),
reserveBatch: parseNumber(env.COPILOT_STREAM_RESERVE_BATCH, STREAM_DEFAULTS.reserveBatch),
flushIntervalMs: parseNumber(
env.COPILOT_STREAM_FLUSH_INTERVAL_MS,
STREAM_DEFAULTS.flushIntervalMs
),
flushMaxBatch: parseNumber(env.COPILOT_STREAM_FLUSH_MAX_BATCH, STREAM_DEFAULTS.flushMaxBatch),
}
}
const APPEND_STREAM_EVENT_LUA = `
local seqKey = KEYS[1]
local eventsKey = KEYS[2]
local ttl = tonumber(ARGV[1])
local limit = tonumber(ARGV[2])
local streamId = ARGV[3]
local eventJson = ARGV[4]
local id = redis.call('INCR', seqKey)
local entry = '{"eventId":' .. id .. ',"streamId":' .. cjson.encode(streamId) .. ',"event":' .. eventJson .. '}'
redis.call('ZADD', eventsKey, id, entry)
redis.call('EXPIRE', eventsKey, ttl)
redis.call('EXPIRE', seqKey, ttl)
if limit > 0 then
redis.call('ZREMRANGEBYRANK', eventsKey, 0, -limit-1)
end
return id
`
function getStreamKeyPrefix(streamId: string) {
return `${REDIS_COPILOT_STREAM_PREFIX}${streamId}`
}
function getEventsKey(streamId: string) {
return `${getStreamKeyPrefix(streamId)}:events`
}
function getSeqKey(streamId: string) {
return `${getStreamKeyPrefix(streamId)}:seq`
}
function getMetaKey(streamId: string) {
return `${getStreamKeyPrefix(streamId)}:meta`
}
export type StreamStatus = 'active' | 'complete' | 'cancelled' | 'error'
export type StreamMeta = {
status: StreamStatus
userId?: string
executionId?: string
runId?: string
updatedAt?: string
error?: string
}
export type StreamEventEntry = {
eventId: number
streamId: string
event: Record<string, unknown>
}
export type StreamEventWriter = {
write: (event: Record<string, unknown>) => Promise<StreamEventEntry>
flush: () => Promise<void>
close: () => Promise<void>
}
export async function resetStreamBuffer(streamId: string): Promise<void> {
const redis = getRedisClient()
if (!redis) return
try {
await redis.del(getEventsKey(streamId), getSeqKey(streamId), getMetaKey(streamId))
} catch (error) {
logger.warn('Failed to reset stream buffer', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
}
export async function setStreamMeta(streamId: string, meta: StreamMeta): Promise<void> {
const redis = getRedisClient()
if (!redis) return
try {
const config = getStreamBufferConfig()
const payload: Record<string, string> = {
status: meta.status,
updatedAt: meta.updatedAt || new Date().toISOString(),
}
if (meta.userId) payload.userId = meta.userId
if (meta.executionId) payload.executionId = meta.executionId
if (meta.runId) payload.runId = meta.runId
if (meta.error) payload.error = meta.error
await redis.hset(getMetaKey(streamId), payload)
await redis.expire(getMetaKey(streamId), config.ttlSeconds)
} catch (error) {
logger.warn('Failed to update stream meta', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
}
export async function getStreamMeta(streamId: string): Promise<StreamMeta | null> {
const redis = getRedisClient()
if (!redis) return null
try {
const meta = await redis.hgetall(getMetaKey(streamId))
if (!meta || Object.keys(meta).length === 0) return null
return meta as StreamMeta
} catch (error) {
logger.warn('Failed to read stream meta', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
return null
}
}
export async function appendStreamEvent(
streamId: string,
event: Record<string, unknown>
): Promise<StreamEventEntry> {
const redis = getRedisClient()
if (!redis) {
return { eventId: 0, streamId, event }
}
try {
const config = getStreamBufferConfig()
const eventJson = JSON.stringify(event)
const nextId = await redis.eval(
APPEND_STREAM_EVENT_LUA,
2,
getSeqKey(streamId),
getEventsKey(streamId),
config.ttlSeconds,
config.eventLimit,
streamId,
eventJson
)
const eventId = typeof nextId === 'number' ? nextId : Number(nextId)
return { eventId, streamId, event }
} catch (error) {
logger.warn('Failed to append stream event', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
return { eventId: 0, streamId, event }
}
}
export function createStreamEventWriter(streamId: string): StreamEventWriter {
const redis = getRedisClient()
if (!redis) {
return {
write: async (event) => ({ eventId: 0, streamId, event }),
flush: async () => {},
close: async () => {},
}
}
const config = getStreamBufferConfig()
let pending: StreamEventEntry[] = []
let nextEventId = 0
let maxReservedId = 0
let flushTimer: ReturnType<typeof setTimeout> | null = null
const scheduleFlush = () => {
if (flushTimer) return
flushTimer = setTimeout(() => {
flushTimer = null
void flush()
}, config.flushIntervalMs)
}
const reserveIds = async (minCount: number) => {
const reserveCount = Math.max(config.reserveBatch, minCount)
const newMax = await redis.incrby(getSeqKey(streamId), reserveCount)
const startId = newMax - reserveCount + 1
if (nextEventId === 0 || nextEventId > maxReservedId) {
nextEventId = startId
maxReservedId = newMax
}
}
let flushPromise: Promise<void> | null = null
let closed = false
const doFlush = async () => {
if (pending.length === 0) return
const batch = pending
pending = []
try {
const key = getEventsKey(streamId)
const zaddArgs: (string | number)[] = []
for (const entry of batch) {
zaddArgs.push(entry.eventId, JSON.stringify(entry))
}
const pipeline = redis.pipeline()
pipeline.zadd(key, ...(zaddArgs as [number, string]))
pipeline.expire(key, config.ttlSeconds)
pipeline.expire(getSeqKey(streamId), config.ttlSeconds)
pipeline.zremrangebyrank(key, 0, -config.eventLimit - 1)
await pipeline.exec()
} catch (error) {
logger.warn('Failed to flush stream events', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
pending = batch.concat(pending)
if (pending.length > config.eventLimit) {
const dropped = pending.length - config.eventLimit
pending = pending.slice(-config.eventLimit)
logger.warn('Dropped oldest pending stream events due to sustained Redis failure', {
streamId,
dropped,
remaining: pending.length,
})
}
}
}
const flush = async () => {
if (flushPromise) {
await flushPromise
return
}
flushPromise = doFlush()
try {
await flushPromise
} finally {
flushPromise = null
if (pending.length > 0) scheduleFlush()
}
}
const write = async (event: Record<string, unknown>) => {
if (closed) return { eventId: 0, streamId, event }
if (nextEventId === 0 || nextEventId > maxReservedId) {
await reserveIds(1)
}
const eventId = nextEventId++
const entry: StreamEventEntry = { eventId, streamId, event }
pending.push(entry)
if (pending.length >= config.flushMaxBatch) {
await flush()
} else {
scheduleFlush()
}
return entry
}
const close = async () => {
closed = true
if (flushTimer) {
clearTimeout(flushTimer)
flushTimer = null
}
await flush()
}
return { write, flush, close }
}
export async function readStreamEvents(
streamId: string,
afterEventId: number
): Promise<StreamEventEntry[]> {
const redis = getRedisClient()
if (!redis) return []
try {
const raw = await redis.zrangebyscore(getEventsKey(streamId), afterEventId + 1, '+inf')
return raw
.map((entry) => {
try {
return JSON.parse(entry) as StreamEventEntry
} catch {
return null
}
})
.filter((entry): entry is StreamEventEntry => Boolean(entry))
} catch (error) {
logger.warn('Failed to read stream events', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
return []
}
}

View File

@@ -1,264 +0,0 @@
import { createLogger } from '@sim/logger'
import { getHighestPrioritySubscription } from '@/lib/billing/core/plan'
import { isPaid } from '@/lib/billing/plan-helpers'
import { ORCHESTRATION_TIMEOUT_MS } from '@/lib/copilot/constants'
import {
handleSubagentRouting,
sseHandlers,
subAgentHandlers,
} from '@/lib/copilot/orchestrator/sse/handlers'
import { parseSSEStream } from '@/lib/copilot/orchestrator/sse/parser'
import {
normalizeSseEvent,
shouldSkipToolCallEvent,
shouldSkipToolResultEvent,
} from '@/lib/copilot/orchestrator/sse/utils'
import type {
ExecutionContext,
OrchestratorOptions,
SSEEvent,
StreamingContext,
ToolCallSummary,
} from '@/lib/copilot/orchestrator/types'
const logger = createLogger('CopilotStreamCore')
/**
* Options for the shared stream processing loop.
*/
export interface StreamLoopOptions extends OrchestratorOptions {
/**
* Called for each normalized event BEFORE standard handler dispatch.
* Return true to skip the default handler for this event.
*/
onBeforeDispatch?: (event: SSEEvent, context: StreamingContext) => boolean | undefined
}
/**
* Create a fresh StreamingContext.
*/
export function createStreamingContext(overrides?: Partial<StreamingContext>): StreamingContext {
return {
chatId: undefined,
executionId: undefined,
runId: undefined,
messageId: crypto.randomUUID(),
accumulatedContent: '',
contentBlocks: [],
toolCalls: new Map(),
pendingToolPromises: new Map(),
currentThinkingBlock: null,
isInThinkingBlock: false,
subAgentParentToolCallId: undefined,
subAgentParentStack: [],
subAgentContent: {},
subAgentToolCalls: {},
pendingContent: '',
streamComplete: false,
wasAborted: false,
errors: [],
...overrides,
}
}
/**
* Run the SSE stream processing loop.
*
* Handles: fetch -> parse -> normalize -> dedupe -> subagent routing -> handler dispatch.
* Callers provide the fetch URL/options and can intercept events via onBeforeDispatch.
*/
export async function runStreamLoop(
fetchUrl: string,
fetchOptions: RequestInit,
context: StreamingContext,
execContext: ExecutionContext,
options: StreamLoopOptions
): Promise<void> {
const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options
const response = await fetch(fetchUrl, {
...fetchOptions,
signal: abortSignal,
})
if (!response.ok) {
const errorText = await response.text().catch(() => '')
if (response.status === 402) {
let action = 'upgrade_plan'
let message = "You've reached your usage limit. Please upgrade your plan to continue."
try {
const sub = await getHighestPrioritySubscription(execContext.userId)
if (sub && isPaid(sub.plan)) {
action = 'increase_limit'
message =
"You've reached your usage limit for this billing period. Please increase your usage limit to continue."
}
} catch {
// Fall back to upgrade_plan if we can't determine the plan
}
const upgradePayload = JSON.stringify({
reason: 'usage_limit',
action,
message,
})
const syntheticContent = `<usage_upgrade>${upgradePayload}</usage_upgrade>`
const syntheticEvents: SSEEvent[] = [
{ type: 'content', data: syntheticContent as unknown as Record<string, unknown> },
{ type: 'done', data: {} },
]
for (const event of syntheticEvents) {
try {
await options.onEvent?.(event)
} catch {
// best-effort forwarding
}
const handler = sseHandlers[event.type]
if (handler) {
await handler(event, context, execContext, options)
}
if (context.streamComplete) break
}
return
}
throw new Error(
`Copilot backend error (${response.status}): ${errorText || response.statusText}`
)
}
if (!response.body) {
throw new Error('Copilot backend response missing body')
}
const reader = response.body.getReader()
const decoder = new TextDecoder()
const timeoutId = setTimeout(() => {
context.errors.push('Request timed out')
context.streamComplete = true
reader.cancel().catch(() => {})
}, timeout)
try {
for await (const event of parseSSEStream(reader, decoder, abortSignal)) {
if (abortSignal?.aborted) {
context.wasAborted = true
await reader.cancel().catch(() => {})
break
}
const normalizedEvent = normalizeSseEvent(event)
// Skip duplicate tool events — both forwarding AND handler dispatch.
const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent)
const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent)
if (shouldSkipToolCall || shouldSkipToolResult) {
continue
}
try {
await options.onEvent?.(normalizedEvent)
} catch (error) {
logger.withMetadata({ messageId: context.messageId }).warn('Failed to forward SSE event', {
type: normalizedEvent.type,
error: error instanceof Error ? error.message : String(error),
})
}
// Let the caller intercept before standard dispatch.
if (options.onBeforeDispatch?.(normalizedEvent, context)) {
if (context.streamComplete) break
continue
}
// Standard subagent start/end handling (stack-based for nested agents).
if (normalizedEvent.type === 'subagent_start') {
const eventData = normalizedEvent.data as Record<string, unknown> | undefined
const toolCallId = eventData?.tool_call_id as string | undefined
const subagentName = normalizedEvent.subagent || (eventData?.agent as string | undefined)
if (toolCallId) {
context.subAgentParentStack.push(toolCallId)
context.subAgentParentToolCallId = toolCallId
context.subAgentContent[toolCallId] = ''
context.subAgentToolCalls[toolCallId] = []
}
if (subagentName) {
context.contentBlocks.push({
type: 'subagent',
content: subagentName,
timestamp: Date.now(),
})
}
continue
}
if (normalizedEvent.type === 'subagent_end') {
if (context.subAgentParentStack.length > 0) {
context.subAgentParentStack.pop()
} else {
logger
.withMetadata({ messageId: context.messageId })
.warn('subagent_end without matching subagent_start')
}
context.subAgentParentToolCallId =
context.subAgentParentStack.length > 0
? context.subAgentParentStack[context.subAgentParentStack.length - 1]
: undefined
continue
}
// Subagent event routing.
if (handleSubagentRouting(normalizedEvent, context)) {
const handler = subAgentHandlers[normalizedEvent.type]
if (handler) {
await handler(normalizedEvent, context, execContext, options)
}
if (context.streamComplete) break
continue
}
// Main event handler dispatch.
const handler = sseHandlers[normalizedEvent.type]
if (handler) {
await handler(normalizedEvent, context, execContext, options)
}
if (context.streamComplete) break
}
} finally {
if (abortSignal?.aborted) {
context.wasAborted = true
await reader.cancel().catch(() => {})
}
clearTimeout(timeoutId)
}
}
/**
* Build a ToolCallSummary array from the streaming context.
*/
export function buildToolCallSummaries(context: StreamingContext): ToolCallSummary[] {
return Array.from(context.toolCalls.values()).map((toolCall) => {
let status = toolCall.status
if (toolCall.result && toolCall.result.success !== undefined) {
status = toolCall.result.success ? 'success' : 'error'
} else if ((status === 'pending' || status === 'executing') && toolCall.error) {
status = 'error'
}
return {
id: toolCall.id,
name: toolCall.name,
status,
params: toolCall.params,
result: toolCall.result?.output,
error: toolCall.error,
durationMs:
toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined,
}
})
}

View File

@@ -1,2 +0,0 @@
export * from './deploy'
export * from './manage'

File diff suppressed because it is too large Load Diff

View File

@@ -1,285 +0,0 @@
import { db } from '@sim/db'
import { account } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import type {
ExecutionContext,
ToolCallResult,
ToolCallState,
} from '@/lib/copilot/orchestrator/types'
import { isHosted } from '@/lib/core/config/feature-flags'
import { generateRequestId } from '@/lib/core/utils/request'
import { getCredentialActorContext } from '@/lib/credentials/access'
import { getAccessibleOAuthCredentials } from '@/lib/credentials/environment'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { getServiceAccountProviderForProviderId } from '@/lib/oauth/utils'
import { getTableById, queryRows } from '@/lib/table/service'
import {
downloadWorkspaceFile,
findWorkspaceFileRecord,
getSandboxWorkspaceFilePath,
listWorkspaceFiles,
} from '@/lib/uploads/contexts/workspace/workspace-file-manager'
import { getWorkflowById } from '@/lib/workflows/utils'
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
import { resolveEnvVarReferences } from '@/executor/utils/reference-validation'
import { executeTool } from '@/tools'
import type { ToolConfig } from '@/tools/types'
import { resolveToolId } from '@/tools/utils'
const logger = createLogger('CopilotIntegrationTools')
function csvEscapeValue(value: unknown): string {
if (value === null || value === undefined) return ''
if (typeof value === 'number' || typeof value === 'boolean') return String(value)
const str = String(value)
if (str.includes(',') || str.includes('"') || str.includes('\n') || str.includes('\r')) {
return `"${str.replace(/"/g, '""')}"`
}
return str
}
export async function executeIntegrationToolDirect(
toolCall: ToolCallState,
toolConfig: ToolConfig,
context: ExecutionContext
): Promise<ToolCallResult> {
const { userId, workflowId } = context
const toolName = resolveToolId(toolCall.name)
const toolArgs = toolCall.params || {}
let workspaceId = context.workspaceId
if (!workspaceId && workflowId) {
const wf = await getWorkflowById(workflowId)
workspaceId = wf?.workspaceId ?? undefined
}
const decryptedEnvVars =
context.decryptedEnvVars || (await getEffectiveDecryptedEnv(userId, workspaceId))
const executionParams = resolveEnvVarReferences(toolArgs, decryptedEnvVars, {
deep: true,
}) as Record<string, unknown>
// If the LLM passed a credential/oauthCredential ID directly, verify the user
// has active credential_member access before proceeding. This prevents
// unauthorized credential usage even if the agent hallucinated or received
// a credential ID the user doesn't have access to.
const suppliedCredentialId = (executionParams.credentialId ||
executionParams.oauthCredential ||
executionParams.credential) as string | undefined
if (suppliedCredentialId) {
const actorCtx = await getCredentialActorContext(suppliedCredentialId, userId)
if (!actorCtx.member) {
logger.warn('Blocked credential use: user lacks credential_member access', {
credentialId: suppliedCredentialId,
userId,
toolName,
})
return {
success: false,
error: `You do not have access to credential "${suppliedCredentialId}". Ask the credential admin to add you as a member, or connect your own account.`,
}
}
}
if (toolConfig.oauth?.required && toolConfig.oauth.provider) {
const provider = toolConfig.oauth.provider
// Determine which credential to use: supplied by the LLM or auto-resolved
let resolvedCredentialId = suppliedCredentialId
if (!resolvedCredentialId) {
if (!workspaceId) {
return {
success: false,
error: `Cannot resolve ${provider} credential without a workspace context.`,
}
}
const accessibleCreds = await getAccessibleOAuthCredentials(workspaceId, userId)
const saProviderId = getServiceAccountProviderForProviderId(provider)
const match =
accessibleCreds.find((c) => c.providerId === provider) ||
(saProviderId ? accessibleCreds.find((c) => c.providerId === saProviderId) : undefined)
if (!match) {
return {
success: false,
error: `No accessible ${provider} account found. You either don't have a ${provider} account connected in this workspace, or you don't have access to the existing one. Please connect your own account.`,
}
}
resolvedCredentialId = match.id
}
const matchCtx = await getCredentialActorContext(resolvedCredentialId, userId)
if (matchCtx.credential?.type === 'service_account') {
executionParams.oauthCredential = resolvedCredentialId
} else {
const accountId = matchCtx.credential?.accountId
if (!accountId) {
return {
success: false,
error: `OAuth account for ${provider} not found. Please reconnect your account.`,
}
}
const [acc] = await db.select().from(account).where(eq(account.id, accountId)).limit(1)
if (!acc) {
return {
success: false,
error: `OAuth account for ${provider} not found. Please reconnect your account.`,
}
}
const requestId = generateRequestId()
const { accessToken } = await refreshTokenIfNeeded(requestId, acc, acc.id)
if (!accessToken) {
return {
success: false,
error: `OAuth token not available for ${provider}. Please reconnect your account.`,
}
}
executionParams.accessToken = accessToken
}
}
const hasHostedKeySupport = isHosted && !!toolConfig.hosting
if (toolConfig.params?.apiKey?.required && !executionParams.apiKey && !hasHostedKeySupport) {
return {
success: false,
error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`,
}
}
executionParams._context = {
workflowId,
workspaceId,
userId,
enforceCredentialAccess: true,
}
if (toolName === 'function_execute') {
executionParams.envVars = decryptedEnvVars
executionParams.workflowVariables = {}
executionParams.blockData = {}
executionParams.blockNameMapping = {}
executionParams.language = executionParams.language || 'javascript'
executionParams.timeout = executionParams.timeout || 30000
if (isHosted && workspaceId) {
const sandboxFiles: Array<{ path: string; content: string }> = []
const MAX_FILE_SIZE = 10 * 1024 * 1024
const MAX_TOTAL_SIZE = 50 * 1024 * 1024
const TEXT_EXTENSIONS = new Set([
'csv',
'json',
'txt',
'md',
'html',
'xml',
'tsv',
'yaml',
'yml',
])
let totalSize = 0
const inputFileIds = executionParams.inputFiles as string[] | undefined
if (inputFileIds?.length) {
const allFiles = await listWorkspaceFiles(workspaceId)
for (const fileRef of inputFileIds) {
const record = findWorkspaceFileRecord(allFiles, fileRef)
if (!record) {
logger.warn('Sandbox input file not found', { fileRef })
continue
}
const ext = record.name.split('.').pop()?.toLowerCase() ?? ''
if (!TEXT_EXTENSIONS.has(ext)) {
logger.warn('Skipping non-text sandbox input file', {
fileId: record.id,
fileName: record.name,
ext,
})
continue
}
if (record.size > MAX_FILE_SIZE) {
logger.warn('Sandbox input file exceeds size limit', {
fileId: record.id,
fileName: record.name,
size: record.size,
})
continue
}
if (totalSize + record.size > MAX_TOTAL_SIZE) {
logger.warn('Sandbox input total size limit reached, skipping remaining files')
break
}
const buffer = await downloadWorkspaceFile(record)
totalSize += buffer.length
const textContent = buffer.toString('utf-8')
sandboxFiles.push({
path: getSandboxWorkspaceFilePath(record),
content: textContent,
})
sandboxFiles.push({
path: `/home/user/${record.name}`,
content: textContent,
})
}
}
const inputTableIds = executionParams.inputTables as string[] | undefined
if (inputTableIds?.length) {
for (const tableId of inputTableIds) {
const table = await getTableById(tableId)
if (!table) {
logger.warn('Sandbox input table not found', { tableId })
continue
}
const { rows } = await queryRows(tableId, workspaceId, { limit: 10000 }, 'sandbox-input')
const schema = table.schema as { columns: Array<{ name: string; type?: string }> }
const cols = schema.columns.map((c) => c.name)
const typeComment = `# types: ${schema.columns.map((c) => `${c.name}=${c.type || 'string'}`).join(', ')}`
const csvLines = [typeComment, cols.join(',')]
for (const row of rows) {
csvLines.push(
cols.map((c) => csvEscapeValue((row.data as Record<string, unknown>)[c])).join(',')
)
}
const csvContent = csvLines.join('\n')
if (totalSize + csvContent.length > MAX_TOTAL_SIZE) {
logger.warn('Sandbox input total size limit reached, skipping remaining tables')
break
}
totalSize += csvContent.length
sandboxFiles.push({ path: `/home/user/tables/${tableId}.csv`, content: csvContent })
}
}
if (sandboxFiles.length > 0) {
executionParams._sandboxFiles = sandboxFiles
logger.info('Prepared sandbox input files', {
fileCount: sandboxFiles.length,
totalSize,
paths: sandboxFiles.map((f) => f.path),
})
}
executionParams.inputFiles = undefined
executionParams.inputTables = undefined
}
}
const result = await executeTool(toolName, executionParams)
return {
success: result.success,
output: result.output,
error: result.error,
}
}

View File

@@ -1,2 +0,0 @@
export * from './mutations'
export * from './queries'

View File

@@ -1,9 +1,13 @@
import { createLogger } from '@sim/logger'
import type { AsyncCompletionEnvelope } from '@/lib/copilot/async-runs/lifecycle'
import { ASYNC_TOOL_STATUS, type AsyncCompletionEnvelope } from '@/lib/copilot/async-runs/lifecycle'
import { getAsyncToolCalls } from '@/lib/copilot/async-runs/repository'
import { MothershipStreamV1ToolOutcome } from '@/lib/copilot/generated/mothership-stream-v1'
import { getRedisClient } from '@/lib/core/config/redis'
import { createPubSubChannel } from '@/lib/events/pubsub'
const logger = createLogger('CopilotOrchestratorPersistence')
const TOOL_CONFIRMATION_TTL_SECONDS = 60 * 10
const toolConfirmationKey = (toolCallId: string) => `copilot:tool-confirmation:${toolCallId}`
const toolConfirmationChannel = createPubSubChannel<AsyncCompletionEnvelope>({
channel: 'copilot:tool-confirmation',
@@ -19,16 +23,22 @@ export async function getToolConfirmation(toolCallId: string): Promise<{
timestamp?: string
data?: Record<string, unknown>
} | null> {
const [row] = await getAsyncToolCalls([toolCallId]).catch(() => [])
const [row] = await getAsyncToolCalls([toolCallId]).catch((err) => {
logger.warn('Failed to fetch async tool calls', {
toolCallId,
error: err instanceof Error ? err.message : String(err),
})
return []
})
if (!row) return null
return {
status:
row.status === 'completed'
? 'success'
: row.status === 'failed'
? 'error'
: row.status === 'cancelled'
? 'cancelled'
row.status === ASYNC_TOOL_STATUS.completed
? MothershipStreamV1ToolOutcome.success
: row.status === ASYNC_TOOL_STATUS.failed
? MothershipStreamV1ToolOutcome.error
: row.status === ASYNC_TOOL_STATUS.cancelled
? MothershipStreamV1ToolOutcome.cancelled
: row.status,
message: row.error || undefined,
data: (row.result as Record<string, unknown> | null) || undefined,
@@ -41,6 +51,34 @@ export function publishToolConfirmation(event: AsyncCompletionEnvelope): void {
toolCallId: event.toolCallId,
status: event.status,
})
const redis = getRedisClient()
if (redis) {
void redis
.set(
toolConfirmationKey(event.toolCallId),
JSON.stringify(event),
'EX',
TOOL_CONFIRMATION_TTL_SECONDS
)
.then(() => {
logger.info('Persisted tool confirmation in Redis', {
toolCallId: event.toolCallId,
status: event.status,
redisKey: toolConfirmationKey(event.toolCallId),
})
})
.catch((error) => {
logger.warn('Failed to persist tool confirmation in Redis', {
toolCallId: event.toolCallId,
error: error instanceof Error ? error.message : String(error),
})
})
} else {
logger.warn('Redis unavailable while publishing tool confirmation', {
toolCallId: event.toolCallId,
status: event.status,
})
}
toolConfirmationChannel.publish(event)
}

View File

@@ -33,7 +33,7 @@ import {
getToolConfirmation,
publishToolConfirmation,
waitForToolConfirmation,
} from './persistence'
} from '@/lib/copilot/persistence/tool-confirm'
describe('copilot orchestrator persistence', () => {
let row: {

View File

@@ -0,0 +1,30 @@
import { TraceCollector } from '@/lib/copilot/request/trace'
import type { StreamingContext } from '@/lib/copilot/request/types'
/**
* Create a fresh StreamingContext.
*/
export function createStreamingContext(overrides?: Partial<StreamingContext>): StreamingContext {
return {
chatId: undefined,
executionId: undefined,
runId: undefined,
messageId: crypto.randomUUID(),
accumulatedContent: '',
contentBlocks: [],
toolCalls: new Map(),
pendingToolPromises: new Map(),
currentThinkingBlock: null,
isInThinkingBlock: false,
subAgentParentToolCallId: undefined,
subAgentParentStack: [],
subAgentContent: {},
subAgentToolCalls: {},
pendingContent: '',
streamComplete: false,
wasAborted: false,
errors: [],
trace: new TraceCollector(),
...overrides,
}
}

View File

@@ -2,8 +2,10 @@
* @vitest-environment node
*/
import { describe, expect, it } from 'vitest'
import { buildToolCallSummaries } from '@/lib/copilot/orchestrator/stream/core'
import type { StreamingContext } from '@/lib/copilot/orchestrator/types'
import { FunctionExecute } from '@/lib/copilot/generated/tool-catalog-v1'
import { buildToolCallSummaries } from '@/lib/copilot/request/context/result'
import { TraceCollector } from '@/lib/copilot/request/trace'
import type { StreamingContext } from '@/lib/copilot/request/types'
function makeContext(): StreamingContext {
return {
@@ -27,6 +29,7 @@ function makeContext(): StreamingContext {
streamComplete: false,
wasAborted: false,
errors: [],
trace: new TraceCollector(),
}
}
@@ -50,7 +53,7 @@ describe('buildToolCallSummaries', () => {
const context = makeContext()
context.toolCalls.set('tool-2', {
id: 'tool-2',
name: 'function_execute',
name: FunctionExecute.id,
status: 'executing',
startTime: 1,
})

View File

@@ -0,0 +1,29 @@
import { MothershipStreamV1ToolOutcome } from '@/lib/copilot/generated/mothership-stream-v1'
import type { StreamingContext, ToolCallSummary } from '@/lib/copilot/request/types'
/**
* Build a ToolCallSummary array from the streaming context.
*/
export function buildToolCallSummaries(context: StreamingContext): ToolCallSummary[] {
return Array.from(context.toolCalls.values()).map((toolCall) => {
let status = toolCall.status
if (toolCall.result && toolCall.result.success !== undefined) {
status = toolCall.result.success
? MothershipStreamV1ToolOutcome.success
: MothershipStreamV1ToolOutcome.error
} else if ((status === 'pending' || status === 'executing') && toolCall.error) {
status = MothershipStreamV1ToolOutcome.error
}
return {
id: toolCall.id,
name: toolCall.name,
status,
params: toolCall.params,
result: toolCall.result?.output,
error: toolCall.error,
durationMs:
toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined,
}
})
}

View File

@@ -1,16 +1,23 @@
import { createLogger } from '@sim/logger'
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
const logger = createLogger('CopilotSseParser')
/**
* Parses SSE streams from the copilot backend into typed events.
* Processes an SSE stream by calling onEvent synchronously for each parsed event
* within a single reader.read() chunk. All events from one chunk are processed
* in the same microtask no yield/next() boundaries between them.
*
* Replaces the async generator approach which incurred 2 microtask yields per
* event (one for yield, one for the consumer's next() resumption).
*
* @param onEvent Called synchronously per parsed event. Return true to stop processing.
*/
export async function* parseSSEStream(
export async function processSSEStream(
reader: ReadableStreamDefaultReader<Uint8Array>,
decoder: TextDecoder,
abortSignal?: AbortSignal
): AsyncGenerator<SSEEvent> {
abortSignal: AbortSignal | undefined,
onEvent: (event: unknown) => boolean | undefined
): Promise<void> {
let buffer = ''
try {
@@ -28,6 +35,7 @@ export async function* parseSSEStream(
const lines = buffer.split('\n')
buffer = lines.pop() || ''
let stopped = false
for (const line of lines) {
if (abortSignal?.aborted) {
logger.info('SSE stream aborted mid-chunk (between events)')
@@ -40,9 +48,9 @@ export async function* parseSSEStream(
if (jsonStr === '[DONE]') continue
try {
const event = JSON.parse(jsonStr) as SSEEvent
if (event?.type) {
yield event
if (onEvent(JSON.parse(jsonStr))) {
stopped = true
break
}
} catch (error) {
logger.warn('Failed to parse SSE event', {
@@ -51,6 +59,7 @@ export async function* parseSSEStream(
})
}
}
if (stopped) break
}
} catch (error) {
const aborted =
@@ -64,10 +73,7 @@ export async function* parseSSEStream(
if (buffer.trim() && buffer.startsWith('data: ')) {
try {
const event = JSON.parse(buffer.slice(6)) as SSEEvent
if (event?.type) {
yield event
}
onEvent(JSON.parse(buffer.slice(6)))
} catch (error) {
logger.warn('Failed to parse final SSE buffer', {
preview: buffer.slice(0, 200),

View File

@@ -0,0 +1,282 @@
import { createLogger } from '@sim/logger'
import { ORCHESTRATION_TIMEOUT_MS } from '@/lib/copilot/constants'
import {
MothershipStreamV1EventType,
MothershipStreamV1SpanLifecycleEvent,
MothershipStreamV1SpanPayloadKind,
} from '@/lib/copilot/generated/mothership-stream-v1'
import { processSSEStream } from '@/lib/copilot/request/go/parser'
import {
handleSubagentRouting,
sseHandlers,
subAgentHandlers,
} from '@/lib/copilot/request/handlers'
import { eventToStreamEvent, isEventRecord } from '@/lib/copilot/request/session'
import { shouldSkipToolCallEvent, shouldSkipToolResultEvent } from '@/lib/copilot/request/sse-utils'
import type {
ExecutionContext,
OrchestratorOptions,
StreamEvent,
StreamingContext,
} from '@/lib/copilot/request/types'
const logger = createLogger('CopilotGoStream')
export class CopilotBackendError extends Error {
status?: number
body?: string
constructor(message: string, options?: { status?: number; body?: string }) {
super(message)
this.name = 'CopilotBackendError'
this.status = options?.status
this.body = options?.body
}
}
export class BillingLimitError extends Error {
constructor(public readonly userId: string) {
super('Usage limit reached')
this.name = 'BillingLimitError'
}
}
/**
* Options for the shared stream processing loop.
*/
export interface StreamLoopOptions extends OrchestratorOptions {
/**
* Called for each normalized event BEFORE standard handler dispatch.
* Return true to skip the default handler for this event.
*/
onBeforeDispatch?: (event: StreamEvent, context: StreamingContext) => boolean | undefined
}
// Pre-resolve text handlers at module level to avoid map lookups in the hot path.
const textHandler = sseHandlers[MothershipStreamV1EventType.text]
const subagentTextHandler = subAgentHandlers[MothershipStreamV1EventType.text]
/**
* Run the SSE stream processing loop against the Go backend.
*
* Handles: fetch -> parse -> normalize -> dedupe -> subagent routing -> handler dispatch.
* Callers provide the fetch URL/options and can intercept events via onBeforeDispatch.
*
* Optimised hot path: text events (the most frequent) bypass tool-call dedup
* checks and are dispatched synchronously without any await, eliminating ~4
* microtask yields per text event vs the previous async-generator + await chain.
*/
export async function runStreamLoop(
fetchUrl: string,
fetchOptions: RequestInit,
context: StreamingContext,
execContext: ExecutionContext,
options: StreamLoopOptions
): Promise<void> {
const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options
const fetchSpan = context.trace.startSpan(
`HTTP Request → ${new URL(fetchUrl).pathname}`,
'sim.http.fetch',
{ url: fetchUrl }
)
const response = await fetch(fetchUrl, {
...fetchOptions,
signal: abortSignal,
})
if (!response.ok) {
context.trace.endSpan(fetchSpan, 'error')
const errorText = await response.text().catch(() => '')
if (response.status === 402) {
throw new BillingLimitError(execContext.userId)
}
throw new CopilotBackendError(
`Copilot backend error (${response.status}): ${errorText || response.statusText}`,
{ status: response.status, body: errorText || response.statusText }
)
}
if (!response.body) {
context.trace.endSpan(fetchSpan, 'error')
throw new CopilotBackendError('Copilot backend response missing body')
}
context.trace.endSpan(fetchSpan)
const reader = response.body.getReader()
const decoder = new TextDecoder()
const timeoutId = setTimeout(() => {
context.errors.push('Request timed out')
context.streamComplete = true
reader.cancel().catch(() => {})
}, timeout)
try {
await processSSEStream(reader, decoder, abortSignal, (raw) => {
// --- Abort gate (sync check, no await) ---
if (abortSignal?.aborted) {
context.wasAborted = true
return true
}
if (!isEventRecord(raw)) {
logger.warn('Received non-contract stream event on shared path; dropping event')
return
}
const streamEvent = eventToStreamEvent(raw)
if (raw.trace?.requestId) {
context.requestId = raw.trace.requestId
context.trace.setGoTraceId(raw.trace.requestId)
}
// ---------------------------------------------------------------
// FAST PATH — text events
//
// Text is the most frequent event type. We skip two things that
// can never match for text events:
// • shouldSkipToolCallEvent (early-exits for type !== 'tool')
// • shouldSkipToolResultEvent (early-exits for type !== 'tool')
//
// All calls in this path are synchronous: onEvent (publish) returns
// void, and both textHandler / subagentTextHandler return void.
// Eliminating the awaits saves 2 microtask yields per text event
// (on top of the 2 saved by replacing the async generator).
// ---------------------------------------------------------------
if (streamEvent.type === MothershipStreamV1EventType.text) {
try {
options.onEvent?.(streamEvent)
} catch (error) {
logger.warn('Failed to forward stream event', {
type: streamEvent.type,
error: error instanceof Error ? error.message : String(error),
})
}
if (options.onBeforeDispatch?.(streamEvent, context)) {
return context.streamComplete || undefined
}
if (handleSubagentRouting(streamEvent, context)) {
subagentTextHandler(streamEvent, context, execContext, options)
} else {
textHandler(streamEvent, context, execContext, options)
}
return context.streamComplete || undefined
}
// ---------------------------------------------------------------
// STANDARD PATH — all other event types
// ---------------------------------------------------------------
if (shouldSkipToolCallEvent(streamEvent) || shouldSkipToolResultEvent(streamEvent)) {
return
}
// onEvent (publish) is synchronous — no await needed.
try {
options.onEvent?.(streamEvent)
} catch (error) {
logger.warn('Failed to forward stream event', {
type: streamEvent.type,
error: error instanceof Error ? error.message : String(error),
})
}
if (options.onBeforeDispatch?.(streamEvent, context)) {
return context.streamComplete || undefined
}
// --- Subagent span lifecycle ---
if (
streamEvent.type === MothershipStreamV1EventType.span &&
streamEvent.payload.kind === MothershipStreamV1SpanPayloadKind.subagent
) {
const spanData =
streamEvent.payload.data &&
typeof streamEvent.payload.data === 'object' &&
!Array.isArray(streamEvent.payload.data)
? (streamEvent.payload.data as Record<string, unknown>)
: undefined
const toolCallId =
(streamEvent.payload.parentToolCallId as string | undefined) ||
(spanData?.tool_call_id as string | undefined)
const subagentName = streamEvent.payload.agent as string | undefined
const spanEvent = streamEvent.payload.event as string | undefined
const isPendingPause = spanData?.pending === true
if (spanEvent === MothershipStreamV1SpanLifecycleEvent.start) {
const lastParent = context.subAgentParentStack[context.subAgentParentStack.length - 1]
const lastBlock = context.contentBlocks[context.contentBlocks.length - 1]
if (toolCallId) {
if (lastParent !== toolCallId) {
context.subAgentParentStack.push(toolCallId)
}
context.subAgentParentToolCallId = toolCallId
context.subAgentContent[toolCallId] ??= ''
context.subAgentToolCalls[toolCallId] ??= []
}
if (
subagentName &&
!(
lastParent === toolCallId &&
lastBlock?.type === 'subagent' &&
lastBlock.content === subagentName
)
) {
context.contentBlocks.push({
type: 'subagent',
content: subagentName,
timestamp: Date.now(),
})
}
return
}
if (spanEvent === MothershipStreamV1SpanLifecycleEvent.end) {
if (isPendingPause) {
return
}
if (context.subAgentParentStack.length > 0) {
context.subAgentParentStack.pop()
} else {
logger.warn('subagent end without matching start')
}
context.subAgentParentToolCallId =
context.subAgentParentStack.length > 0
? context.subAgentParentStack[context.subAgentParentStack.length - 1]
: undefined
return
}
}
// --- Subagent-scoped event dispatch ---
if (handleSubagentRouting(streamEvent, context)) {
const handler = subAgentHandlers[streamEvent.type]
if (handler) {
// All current subagent handlers (text, tool, span) resolve
// synchronously or fire-and-forget their async work internally.
// Calling without await saves 1 microtask yield per event.
handler(streamEvent, context, execContext, options)
}
return context.streamComplete || undefined
}
// --- Main handler dispatch ---
const handler = sseHandlers[streamEvent.type]
if (handler) {
// session, complete, error, run, span handlers are synchronous.
// tool handler is async but resolves immediately (fire-and-forget
// internal dispatch). Calling without await saves 1 microtask yield.
handler(streamEvent, context, execContext, options)
}
return context.streamComplete || undefined
})
} finally {
if (abortSignal?.aborted) {
context.wasAborted = true
await reader.cancel().catch(() => {})
}
clearTimeout(timeoutId)
}
}

View File

@@ -0,0 +1,29 @@
import { asRecord, getEventData } from '@/lib/copilot/request/sse-utils'
import type { StreamHandler } from './types'
export const handleCompleteEvent: StreamHandler = (event, context) => {
const d = getEventData(event)
if (!d) {
context.streamComplete = true
return
}
if (d.usage) {
const u = asRecord(d.usage)
context.usage = {
prompt: (context.usage?.prompt || 0) + ((u.input_tokens as number) || 0),
completion: (context.usage?.completion || 0) + ((u.output_tokens as number) || 0),
}
}
if (d.cost) {
const c = asRecord(d.cost)
context.cost = {
input: (context.cost?.input || 0) + ((c.input as number) || 0),
output: (context.cost?.output || 0) + ((c.output as number) || 0),
total: (context.cost?.total || 0) + ((c.total as number) || 0),
}
}
context.streamComplete = true
}

View File

@@ -0,0 +1,11 @@
import { getEventData } from '@/lib/copilot/request/sse-utils'
import type { StreamHandler } from './types'
export const handleErrorEvent: StreamHandler = (event, context) => {
const d = getEventData(event)
const message = (d?.message || d?.error) as string | undefined
if (message) {
context.errors.push(message)
}
context.streamComplete = true
}

View File

@@ -0,0 +1,509 @@
/**
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { TraceCollector } from '@/lib/copilot/request/trace'
vi.mock('@sim/logger', () => loggerMock)
const { isSimExecuted, executeTool, ensureHandlersRegistered } = vi.hoisted(() => ({
isSimExecuted: vi.fn().mockReturnValue(true),
executeTool: vi.fn().mockResolvedValue({ success: true, output: { ok: true } }),
ensureHandlersRegistered: vi.fn(),
}))
const { upsertAsyncToolCall, markAsyncToolRunning, completeAsyncToolCall } = vi.hoisted(() => ({
upsertAsyncToolCall: vi.fn(),
markAsyncToolRunning: vi.fn(),
completeAsyncToolCall: vi.fn(),
}))
vi.mock('@/lib/copilot/tool-executor', () => ({
isSimExecuted,
executeTool,
ensureHandlersRegistered,
}))
vi.mock('@/lib/copilot/async-runs/repository', async () => {
const actual = await vi.importActual<typeof import('@/lib/copilot/async-runs/repository')>(
'@/lib/copilot/async-runs/repository'
)
return {
...actual,
upsertAsyncToolCall,
markAsyncToolRunning,
completeAsyncToolCall,
}
})
import {
MothershipStreamV1EventType,
MothershipStreamV1TextChannel,
MothershipStreamV1ToolExecutor,
MothershipStreamV1ToolMode,
MothershipStreamV1ToolOutcome,
MothershipStreamV1ToolPhase,
} from '@/lib/copilot/generated/mothership-stream-v1'
import { Read as ReadTool } from '@/lib/copilot/generated/tool-catalog-v1'
import { sseHandlers, subAgentHandlers } from '@/lib/copilot/request/handlers'
import type { ExecutionContext, StreamEvent, StreamingContext } from '@/lib/copilot/request/types'
describe('sse-handlers tool lifecycle', () => {
let context: StreamingContext
let execContext: ExecutionContext
beforeEach(() => {
vi.clearAllMocks()
upsertAsyncToolCall.mockResolvedValue(null)
markAsyncToolRunning.mockResolvedValue(null)
completeAsyncToolCall.mockResolvedValue(null)
context = {
chatId: undefined,
messageId: 'msg-1',
accumulatedContent: '',
trace: new TraceCollector(),
contentBlocks: [],
toolCalls: new Map(),
pendingToolPromises: new Map(),
currentThinkingBlock: null,
isInThinkingBlock: false,
subAgentParentToolCallId: undefined,
subAgentParentStack: [],
subAgentContent: {},
subAgentToolCalls: {},
pendingContent: '',
streamComplete: false,
wasAborted: false,
errors: [],
}
execContext = {
userId: 'user-1',
workflowId: 'workflow-1',
}
})
it('executes tool_call and emits tool_result', async () => {
executeTool.mockResolvedValueOnce({ success: true, output: { ok: true } })
const onEvent = vi.fn()
await sseHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-1',
toolName: ReadTool.id,
arguments: { workflowId: 'workflow-1' },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
},
} satisfies StreamEvent,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
// tool_call fires execution without awaiting (fire-and-forget for parallel execution),
// so we flush pending microtasks before asserting
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeTool).toHaveBeenCalledTimes(1)
expect(onEvent).toHaveBeenCalledWith(
expect.objectContaining({
type: MothershipStreamV1EventType.tool,
payload: expect.objectContaining({
toolCallId: 'tool-1',
success: true,
phase: MothershipStreamV1ToolPhase.result,
}),
})
)
const updated = context.toolCalls.get('tool-1')
expect(updated?.status).toBe(MothershipStreamV1ToolOutcome.success)
expect(updated?.result?.output).toEqual({ ok: true })
})
it('updates stored params when a subagent generating event is followed by the final tool call', async () => {
executeTool.mockResolvedValueOnce({ success: true, output: { ok: true } })
context.subAgentParentToolCallId = 'parent-1'
context.subAgentParentStack = ['parent-1']
context.toolCalls.set('parent-1', {
id: 'parent-1',
name: 'build',
status: 'pending',
startTime: Date.now(),
})
await subAgentHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
scope: { lane: 'subagent', parentToolCallId: 'parent-1', agentId: 'build' },
payload: {
toolCallId: 'sub-tool-1',
toolName: 'create_workflow',
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
status: 'generating',
},
} satisfies StreamEvent,
context,
execContext,
{ interactive: false, timeout: 1000 }
)
await subAgentHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
scope: { lane: 'subagent', parentToolCallId: 'parent-1', agentId: 'build' },
payload: {
toolCallId: 'sub-tool-1',
toolName: 'create_workflow',
arguments: { name: 'Example Workflow' },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
status: 'executing',
},
} satisfies StreamEvent,
context,
execContext,
{ interactive: false, timeout: 1000 }
)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeTool).toHaveBeenCalledWith(
'create_workflow',
{ name: 'Example Workflow' },
expect.any(Object)
)
expect(context.toolCalls.get('sub-tool-1')?.params).toEqual({ name: 'Example Workflow' })
expect(context.subAgentToolCalls['parent-1']?.[0]?.params).toEqual({
name: 'Example Workflow',
})
})
it('routes subagent text using the event scope parent tool call id', async () => {
context.subAgentParentToolCallId = 'wrong-parent'
context.subAgentContent['parent-1'] = ''
await subAgentHandlers.text(
{
type: MothershipStreamV1EventType.text,
scope: { lane: 'subagent', parentToolCallId: 'parent-1', agentId: 'deploy' },
payload: {
channel: MothershipStreamV1TextChannel.assistant,
text: 'hello from deploy',
},
} satisfies StreamEvent,
context,
execContext,
{ interactive: false, timeout: 1000 }
)
expect(context.subAgentContent['parent-1']).toBe('hello from deploy')
expect(context.contentBlocks.at(-1)).toEqual(
expect.objectContaining({
type: 'subagent_text',
content: 'hello from deploy',
})
)
})
it('routes subagent tool calls using the event scope parent tool call id', async () => {
executeTool.mockResolvedValueOnce({ success: true, output: { ok: true } })
context.subAgentParentToolCallId = 'wrong-parent'
context.toolCalls.set('parent-1', {
id: 'parent-1',
name: 'deploy',
status: 'pending',
startTime: Date.now(),
})
await subAgentHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
scope: { lane: 'subagent', parentToolCallId: 'parent-1', agentId: 'deploy' },
payload: {
toolCallId: 'sub-tool-scope-1',
toolName: 'read',
arguments: { path: 'workflow.json' },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
},
} satisfies StreamEvent,
context,
execContext,
{ interactive: false, timeout: 1000 }
)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(context.subAgentToolCalls['parent-1']?.[0]?.id).toBe('sub-tool-scope-1')
})
it('skips duplicate tool_call after result', async () => {
executeTool.mockResolvedValueOnce({ success: true, output: { ok: true } })
const event = {
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-dup',
toolName: ReadTool.id,
arguments: { workflowId: 'workflow-1' },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
},
}
await sseHandlers.tool(event as StreamEvent, context, execContext, { interactive: false })
await new Promise((resolve) => setTimeout(resolve, 0))
await sseHandlers.tool(event as StreamEvent, context, execContext, { interactive: false })
expect(executeTool).toHaveBeenCalledTimes(1)
})
it('marks an in-flight tool as cancelled when aborted mid-execution', async () => {
const abortController = new AbortController()
const userStopController = new AbortController()
execContext.abortSignal = abortController.signal
execContext.userStopSignal = userStopController.signal
executeTool.mockImplementationOnce(
() =>
new Promise((resolve) => {
setTimeout(() => resolve({ success: true, output: { ok: true } }), 0)
})
)
await sseHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-cancel',
toolName: ReadTool.id,
arguments: { workflowId: 'workflow-1' },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
},
} satisfies StreamEvent,
context,
execContext,
{
interactive: false,
timeout: 1000,
abortSignal: abortController.signal,
userStopSignal: userStopController.signal,
}
)
userStopController.abort()
abortController.abort()
await new Promise((resolve) => setTimeout(resolve, 10))
const updated = context.toolCalls.get('tool-cancel')
expect(updated?.status).toBe(MothershipStreamV1ToolOutcome.cancelled)
})
it('does not replace an in-flight pending promise on duplicate tool_call', async () => {
let resolveTool: ((value: { success: boolean; output: { ok: boolean } }) => void) | undefined
executeTool.mockImplementationOnce(
() =>
new Promise((resolve) => {
resolveTool = resolve
})
)
const event = {
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-inflight',
toolName: ReadTool.id,
arguments: { workflowId: 'workflow-1' },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
},
}
await sseHandlers.tool(event as StreamEvent, context, execContext, { interactive: false })
await new Promise((resolve) => setTimeout(resolve, 0))
const firstPromise = context.pendingToolPromises.get('tool-inflight')
expect(firstPromise).toBeDefined()
await sseHandlers.tool(event as StreamEvent, context, execContext, { interactive: false })
expect(executeTool).toHaveBeenCalledTimes(1)
expect(context.pendingToolPromises.get('tool-inflight')).toBe(firstPromise)
resolveTool?.({ success: true, output: { ok: true } })
await new Promise((resolve) => setTimeout(resolve, 0))
expect(context.pendingToolPromises.has('tool-inflight')).toBe(false)
})
it('still executes the tool when async row upsert fails', async () => {
upsertAsyncToolCall.mockRejectedValueOnce(new Error('db down'))
executeTool.mockResolvedValueOnce({ success: true, output: { ok: true } })
await sseHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-upsert-fail',
toolName: ReadTool.id,
arguments: { workflowId: 'workflow-1' },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
},
} satisfies StreamEvent,
context,
execContext,
{ onEvent: vi.fn(), interactive: false, timeout: 1000 }
)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeTool).toHaveBeenCalledTimes(1)
expect(context.toolCalls.get('tool-upsert-fail')?.status).toBe(
MothershipStreamV1ToolOutcome.success
)
})
it('does not execute a tool if a terminal tool_result arrives before local execution starts', async () => {
let resolveUpsert: ((value: null) => void) | undefined
upsertAsyncToolCall.mockImplementationOnce(
() =>
new Promise((resolve) => {
resolveUpsert = resolve
})
)
const onEvent = vi.fn()
await sseHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-race',
toolName: ReadTool.id,
arguments: { workflowId: 'workflow-1' },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
},
} satisfies StreamEvent,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
await sseHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-race',
toolName: ReadTool.id,
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.result,
success: true,
result: { ok: true },
},
} satisfies StreamEvent,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
resolveUpsert?.(null)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeTool).not.toHaveBeenCalled()
expect(context.toolCalls.get('tool-race')?.status).toBe(MothershipStreamV1ToolOutcome.success)
expect(context.toolCalls.get('tool-race')?.result?.output).toEqual({ ok: true })
})
it('does not execute a tool if a tool_result arrives before the tool_call event', async () => {
const onEvent = vi.fn()
await sseHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-early-result',
toolName: ReadTool.id,
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.result,
success: true,
result: { ok: true },
},
} satisfies StreamEvent,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
await sseHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-early-result',
toolName: ReadTool.id,
arguments: { workflowId: 'workflow-1' },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
},
} satisfies StreamEvent,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeTool).not.toHaveBeenCalled()
expect(context.toolCalls.get('tool-early-result')?.status).toBe(
MothershipStreamV1ToolOutcome.success
)
})
it('executes dynamic sim tools based on payload executor', async () => {
isSimExecuted.mockReturnValueOnce(false)
executeTool.mockResolvedValueOnce({ success: true, output: { emails: [] } })
await sseHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-dynamic-sim',
toolName: 'gmail_read',
arguments: { maxResults: 10 },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
},
} satisfies StreamEvent,
context,
execContext,
{ interactive: false, timeout: 1000 }
)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeTool).toHaveBeenCalledWith('gmail_read', { maxResults: 10 }, expect.any(Object))
expect(context.toolCalls.get('tool-dynamic-sim')?.status).toBe(
MothershipStreamV1ToolOutcome.success
)
})
})

View File

@@ -0,0 +1,50 @@
import { createLogger } from '@sim/logger'
import { MothershipStreamV1EventType } from '@/lib/copilot/generated/mothership-stream-v1'
import type { StreamEvent, StreamingContext } from '@/lib/copilot/request/types'
import { handleCompleteEvent } from './complete'
import { handleErrorEvent } from './error'
import { handleRunEvent } from './run'
import { handleSessionEvent } from './session'
import { handleSpanEvent } from './span'
import { handleTextEvent } from './text'
import { handleToolEvent } from './tool'
import type { StreamHandler } from './types'
export type { StreamHandler, ToolScope } from './types'
const logger = createLogger('CopilotHandlerRouting')
export const sseHandlers: Record<string, StreamHandler> = {
[MothershipStreamV1EventType.session]: handleSessionEvent,
[MothershipStreamV1EventType.tool]: (e, c, ec, o) => handleToolEvent(e, c, ec, o, 'main'),
[MothershipStreamV1EventType.text]: handleTextEvent('main'),
[MothershipStreamV1EventType.run]: handleRunEvent,
[MothershipStreamV1EventType.complete]: handleCompleteEvent,
[MothershipStreamV1EventType.error]: handleErrorEvent,
[MothershipStreamV1EventType.span]: handleSpanEvent,
}
export const subAgentHandlers: Record<string, StreamHandler> = {
[MothershipStreamV1EventType.text]: handleTextEvent('subagent'),
[MothershipStreamV1EventType.tool]: (e, c, ec, o) => handleToolEvent(e, c, ec, o, 'subagent'),
[MothershipStreamV1EventType.span]: handleSpanEvent,
}
export function handleSubagentRouting(event: StreamEvent, context: StreamingContext): boolean {
if (event.scope?.lane !== 'subagent') return false
// Keep the latest scoped parent on hand for legacy callers, but subagent
// handlers should prefer the event-local scope for correctness.
if (event.scope?.parentToolCallId) {
context.subAgentParentToolCallId = event.scope.parentToolCallId
}
if (!context.subAgentParentToolCallId) {
logger.warn('Subagent event missing parent tool call', {
type: event.type,
subagent: event.scope?.agentId,
})
return false
}
return true
}

View File

@@ -0,0 +1,70 @@
import { createLogger } from '@sim/logger'
import {
MothershipStreamV1RunKind,
MothershipStreamV1ToolOutcome,
} from '@/lib/copilot/generated/mothership-stream-v1'
import { getEventData } from '@/lib/copilot/request/sse-utils'
import type { StreamHandler } from './types'
import { addContentBlock } from './types'
const logger = createLogger('CopilotRunHandler')
export const handleRunEvent: StreamHandler = (event, context) => {
const d = getEventData(event)
if (!d) return
const kind = d?.kind as string | undefined
if (kind === MothershipStreamV1RunKind.checkpoint_pause) {
const rawFrames = Array.isArray(d?.frames) ? d.frames : []
const frames = rawFrames.map((f: Record<string, unknown>) => ({
parentToolCallId: String(f.parentToolCallId),
parentToolName: String(f.parentToolName ?? ''),
pendingToolIds: Array.isArray(f.pendingToolIds)
? f.pendingToolIds.map((id: unknown) => String(id))
: [],
}))
context.awaitingAsyncContinuation = {
checkpointId: String(d?.checkpointId),
executionId: typeof d?.executionId === 'string' ? d.executionId : context.executionId,
runId: typeof d?.runId === 'string' && d.runId ? d.runId : context.runId,
pendingToolCallIds: Array.isArray(d?.pendingToolCallIds)
? d.pendingToolCallIds.map((id) => String(id))
: [],
frames: frames.length > 0 ? frames : undefined,
}
logger.info('Received checkpoint pause', {
checkpointId: context.awaitingAsyncContinuation.checkpointId,
executionId: context.awaitingAsyncContinuation.executionId,
runId: context.awaitingAsyncContinuation.runId,
pendingToolCallIds: context.awaitingAsyncContinuation.pendingToolCallIds,
frameCount: frames.length,
})
context.streamComplete = true
return
}
if (kind === MothershipStreamV1RunKind.compaction_start) {
addContentBlock(context, {
type: 'tool_call',
toolCall: {
id: `compaction-${Date.now()}`,
name: 'context_compaction',
status: 'executing',
},
})
return
}
if (kind === MothershipStreamV1RunKind.compaction_done) {
addContentBlock(context, {
type: 'tool_call',
toolCall: {
id: `compaction-${Date.now()}`,
name: 'context_compaction',
status: MothershipStreamV1ToolOutcome.success,
},
})
}
}

View File

@@ -0,0 +1,14 @@
import { MothershipStreamV1SessionKind } from '@/lib/copilot/generated/mothership-stream-v1'
import { getEventData } from '@/lib/copilot/request/sse-utils'
import type { StreamHandler } from './types'
export const handleSessionEvent: StreamHandler = (event, context, execContext) => {
const data = getEventData(event)
if (data?.kind === MothershipStreamV1SessionKind.chat) {
const chatId = data.chatId as string | undefined
context.chatId = chatId
if (chatId) {
execContext.chatId = chatId
}
}
}

View File

@@ -0,0 +1,3 @@
import type { StreamHandler } from './types'
export const handleSpanEvent: StreamHandler = () => {}

View File

@@ -0,0 +1,54 @@
import {
MothershipStreamV1SpanLifecycleEvent,
MothershipStreamV1TextChannel,
} from '@/lib/copilot/generated/mothership-stream-v1'
import { getEventData } from '@/lib/copilot/request/sse-utils'
import type { StreamHandler, ToolScope } from './types'
import { addContentBlock, getScopedParentToolCallId } from './types'
export function handleTextEvent(scope: ToolScope): StreamHandler {
return (event, context) => {
const d = getEventData(event)
if (scope === 'subagent') {
const parentToolCallId = getScopedParentToolCallId(event, context)
if (!parentToolCallId || d?.channel !== MothershipStreamV1TextChannel.assistant) return
const chunk = d?.text as string | undefined
if (!chunk) return
context.subAgentContent[parentToolCallId] =
(context.subAgentContent[parentToolCallId] || '') + chunk
addContentBlock(context, { type: 'subagent_text', content: chunk })
return
}
if (d?.channel === MothershipStreamV1TextChannel.thinking) {
const phase = d.phase as string | undefined
if (phase === MothershipStreamV1SpanLifecycleEvent.start) {
context.isInThinkingBlock = true
context.currentThinkingBlock = {
type: 'thinking',
content: '',
timestamp: Date.now(),
}
return
}
if (phase === MothershipStreamV1SpanLifecycleEvent.end) {
if (context.currentThinkingBlock) {
context.contentBlocks.push(context.currentThinkingBlock)
}
context.isInThinkingBlock = false
context.currentThinkingBlock = null
return
}
const chunk = d?.text as string | undefined
if (!chunk || !context.currentThinkingBlock) return
context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}`
return
}
const chunk = d?.text as string | undefined
if (!chunk) return
context.accumulatedContent += chunk
addContentBlock(context, { type: 'text', content: chunk })
}
}

Some files were not shown because too many files have changed in this diff Show More