Compare commits

...

11 Commits
v0.6.23 ... dev

Author SHA1 Message Date
Siddharth Ganesan
8848780f56 Fix stream reconnect 2026-04-04 17:26:36 -07:00
Siddharth Ganesan
fefeb010de Add client retry logic 2026-04-04 17:13:38 -07:00
Siddharth Ganesan
ee6c7f98ff File types 2026-04-04 17:04:14 -07:00
Siddharth Ganesan
64758af2b6 improvement(mothership): docs 2026-04-04 14:19:24 -07:00
Siddharth Ganesan
8c09e19293 Add deps 2026-04-04 13:06:45 -07:00
Siddharth Ganesan
feb1c88d2f feat(mothership): append 2026-04-04 12:31:23 -07:00
Siddharth Ganesan
78007c11a0 feat(motheship): add docx support 2026-04-04 11:41:27 -07:00
Siddharth Ganesan
bac1d5e588 Force redeploy 2026-04-03 18:43:50 -07:00
Siddharth Ganesan
7fdab14266 improvement(mothership): new agent loop (#3920)
* feat(transport): replace shared chat transport with mothership-stream module

* improvement(contracts): regenerate contracts from go

* feat(tools): add tool catalog codegen from go tool contracts

* feat(tools): add tool-executor dispatch framework for sim side tool routing

* feat(orchestrator): rewrite tool dispatch with catalog-driven executor and simplified resume loop

* feat(orchestrator): checkpoint resume flow

* refactor(copilot): consolidate orchestrator into request/ layer

* refactor(mothership): reorganize lib/copilot into structured subdirectories

* refactor(mothership): canonical transcript layer, dead code cleanup, type consolidation

* refactor(mothership): rebase onto latest staging

* refactor(mothership): rename request continue to lifecycle

* feat(trace): add initial version of request traces

* improvement(stream): batch stream from redis

* fix(resume): fix the resume checkpoint

* fix(resume): fix resume client tool

* fix(subagents): subagent resume should join on existing subagent text block

* improvement(reconnect): harden reconnect logic

* fix(superagent): fix superagent integration tools

* improvement(stream): improve stream perf

* Rebase with origin dev

* fix(tests): fix failing test

* fix(build): fix type errors

* fix(build): fix build errors

* fix(build): fix type errors

* feat(mothership): add cli execution

* fix(mothership): fix function execute tests
2026-04-03 17:27:51 -07:00
Vikhyath Mondreti
3b9e663f25 fix build error 2026-04-02 18:53:40 -07:00
Vikhyath Mondreti
381bc1d556 fix(concurrency): cleanup worker code 2026-04-02 18:48:00 -07:00
278 changed files with 13462 additions and 15585 deletions

View File

@@ -74,10 +74,6 @@ docker compose -f docker-compose.prod.yml up -d
Open [http://localhost:3000](http://localhost:3000)
#### Background worker note
The Docker Compose stack starts a dedicated worker container by default. If `REDIS_URL` is not configured, the worker will start, log that it is idle, and do no queue processing. This is expected. Queue-backed API, webhook, and schedule execution requires Redis; installs without Redis continue to use the inline execution path.
Sim also supports local models via [Ollama](https://ollama.ai) and [vLLM](https://docs.vllm.ai/) — see the [Docker self-hosting docs](https://docs.sim.ai/self-hosting/docker) for setup details.
### Self-hosted: Manual Setup
@@ -117,12 +113,10 @@ cd packages/db && bunx drizzle-kit migrate --config=./drizzle.config.ts
5. Start development servers:
```bash
bun run dev:full # Starts Next.js app, realtime socket server, and the BullMQ worker
bun run dev:full # Starts Next.js app and realtime socket server
```
If `REDIS_URL` is not configured, the worker will remain idle and execution continues inline.
Or run separately: `bun run dev` (Next.js), `cd apps/sim && bun run dev:sockets` (realtime), and `cd apps/sim && bun run worker` (BullMQ worker).
Or run separately: `bun run dev` (Next.js) and `cd apps/sim && bun run dev:sockets` (realtime).
## Copilot API Keys

View File

@@ -195,17 +195,6 @@ By default, your usage is capped at the credits included in your plan. To allow
Max (individual) shares the same rate limits as team plans. Team plans (Pro or Max for Teams) use the Max-tier rate limits.
### Concurrent Execution Limits
| Plan | Concurrent Executions |
|------|----------------------|
| **Free** | 5 |
| **Pro** | 50 |
| **Max / Team** | 200 |
| **Enterprise** | 200 (customizable) |
Concurrent execution limits control how many workflow executions can run simultaneously within a workspace. When the limit is reached, new executions are queued and admitted as running executions complete. Manual runs from the editor are not subject to these limits.
### File Storage
| Plan | Storage |

View File

@@ -4,7 +4,7 @@ import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { recordUsage } from '@/lib/billing/core/usage-log'
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
import { checkInternalApiKey } from '@/lib/copilot/utils'
import { checkInternalApiKey } from '@/lib/copilot/request/http'
import { isBillingEnabled } from '@/lib/core/config/feature-flags'
import { generateRequestId } from '@/lib/core/utils/request'

View File

@@ -2,7 +2,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkServerSideUsageLimits } from '@/lib/billing/calculations/usage-monitor'
import { checkInternalApiKey } from '@/lib/copilot/utils'
import { checkInternalApiKey } from '@/lib/copilot/request/http'
const logger = createLogger('CopilotApiKeysValidate')

View File

@@ -1,10 +1,12 @@
import { createLogger } from '@sim/logger'
import { NextResponse } from 'next/server'
import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository'
import { abortActiveStream, waitForPendingChatStream } from '@/lib/copilot/chat-streaming'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
import { abortActiveStream } from '@/lib/copilot/request/session/abort'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotChatAbortAPI')
const GO_EXPLICIT_ABORT_TIMEOUT_MS = 3000
export async function POST(request: Request) {
@@ -15,7 +17,12 @@ export async function POST(request: Request) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const body = await request.json().catch(() => ({}))
const body = await request.json().catch((err) => {
logger.warn('Abort request body parse failed; continuing with empty object', {
error: err instanceof Error ? err.message : String(err),
})
return {}
})
const streamId = typeof body.streamId === 'string' ? body.streamId : ''
let chatId = typeof body.chatId === 'string' ? body.chatId : ''
@@ -24,7 +31,13 @@ export async function POST(request: Request) {
}
if (!chatId) {
const run = await getLatestRunForStream(streamId, authenticatedUserId).catch(() => null)
const run = await getLatestRunForStream(streamId, authenticatedUserId).catch((err) => {
logger.warn('getLatestRunForStream failed while resolving chatId for abort', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (run?.chatId) {
chatId = run.chatId
}
@@ -50,15 +63,13 @@ export async function POST(request: Request) {
if (!response.ok) {
throw new Error(`Explicit abort marker request failed: ${response.status}`)
}
} catch {
// best effort: local abort should still proceed even if Go marker fails
} catch (err) {
logger.warn('Explicit abort marker request failed; proceeding with local abort', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
}
const aborted = await abortActiveStream(streamId)
if (chatId) {
await waitForPendingChatStream(chatId, GO_EXPLICIT_ABORT_TIMEOUT_MS + 1000, streamId).catch(
() => false
)
}
return NextResponse.json({ aborted })
}

View File

@@ -36,11 +36,11 @@ vi.mock('drizzle-orm', () => ({
eq: vi.fn((field: unknown, value: unknown) => ({ field, value, type: 'eq' })),
}))
vi.mock('@/lib/copilot/chat-lifecycle', () => ({
vi.mock('@/lib/copilot/chat/lifecycle', () => ({
getAccessibleCopilotChat: mockGetAccessibleCopilotChat,
}))
vi.mock('@/lib/copilot/task-events', () => ({
vi.mock('@/lib/copilot/tasks', () => ({
taskPubSub: { publishStatusChanged: vi.fn() },
}))

View File

@@ -5,8 +5,8 @@ import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { taskPubSub } from '@/lib/copilot/task-events'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('DeleteChatAPI')

View File

@@ -0,0 +1,119 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, desc, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request/http'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { assertActiveWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('CopilotChatAPI')
function transformChat(chat: {
id: string
title: string | null
model: string | null
messages: unknown
planArtifact?: unknown
config?: unknown
conversationId?: string | null
resources?: unknown
createdAt: Date | null
updatedAt: Date | null
}) {
return {
id: chat.id,
title: chat.title,
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
planArtifact: chat.planArtifact || null,
config: chat.config || null,
...('conversationId' in chat ? { activeStreamId: chat.conversationId || null } : {}),
...('resources' in chat
? { resources: Array.isArray(chat.resources) ? chat.resources : [] }
: {}),
createdAt: chat.createdAt,
updatedAt: chat.updatedAt,
}
}
export async function GET(req: NextRequest) {
try {
const { searchParams } = new URL(req.url)
const workflowId = searchParams.get('workflowId')
const workspaceId = searchParams.get('workspaceId')
const chatId = searchParams.get('chatId')
const { userId: authenticatedUserId, isAuthenticated } =
await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !authenticatedUserId) {
return createUnauthorizedResponse()
}
if (chatId) {
const chat = await getAccessibleCopilotChat(chatId, authenticatedUserId)
if (!chat) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
}
logger.info(`Retrieved chat ${chatId}`)
return NextResponse.json({ success: true, chat: transformChat(chat) })
}
if (!workflowId && !workspaceId) {
return createBadRequestResponse('workflowId, workspaceId, or chatId is required')
}
if (workspaceId) {
await assertActiveWorkspaceAccess(workspaceId, authenticatedUserId)
}
if (workflowId) {
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: authenticatedUserId,
action: 'read',
})
if (!authorization.allowed) {
return createUnauthorizedResponse()
}
}
const scopeFilter = workflowId
? eq(copilotChats.workflowId, workflowId)
: eq(copilotChats.workspaceId, workspaceId!)
const chats = await db
.select({
id: copilotChats.id,
title: copilotChats.title,
model: copilotChats.model,
messages: copilotChats.messages,
planArtifact: copilotChats.planArtifact,
config: copilotChats.config,
createdAt: copilotChats.createdAt,
updatedAt: copilotChats.updatedAt,
})
.from(copilotChats)
.where(and(eq(copilotChats.userId, authenticatedUserId), scopeFilter))
.orderBy(desc(copilotChats.updatedAt))
const scope = workflowId ? `workflow ${workflowId}` : `workspace ${workspaceId}`
logger.info(`Retrieved ${chats.length} chats for ${scope}`)
return NextResponse.json({
success: true,
chats: chats.map(transformChat),
})
} catch (error) {
logger.error('Error fetching copilot chats:', error)
return createInternalServerErrorResponse('Failed to fetch chats')
}
}

View File

@@ -0,0 +1,65 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('RenameChatAPI')
const RenameChatSchema = z.object({
chatId: z.string().min(1),
title: z.string().min(1).max(200),
})
export async function PATCH(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ success: false, error: 'Unauthorized' }, { status: 401 })
}
const body = await request.json()
const { chatId, title } = RenameChatSchema.parse(body)
const chat = await getAccessibleCopilotChat(chatId, session.user.id)
if (!chat) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
}
const now = new Date()
const [updated] = await db
.update(copilotChats)
.set({ title, updatedAt: now, lastSeenAt: now })
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, session.user.id)))
.returning({ id: copilotChats.id, workspaceId: copilotChats.workspaceId })
if (!updated) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
}
logger.info('Chat renamed', { chatId, title })
if (updated.workspaceId) {
taskPubSub?.publishStatusChanged({
workspaceId: updated.workspaceId,
chatId,
type: 'renamed',
})
}
return NextResponse.json({ success: true })
} catch (error) {
if (error instanceof z.ZodError) {
return NextResponse.json(
{ success: false, error: 'Invalid request data', details: error.errors },
{ status: 400 }
)
}
logger.error('Error renaming chat:', error)
return NextResponse.json({ success: false, error: 'Failed to rename chat' }, { status: 500 })
}
}

View File

@@ -10,8 +10,8 @@ import {
createInternalServerErrorResponse,
createNotFoundResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import type { ChatResource, ResourceType } from '@/lib/copilot/resources'
} from '@/lib/copilot/request/http'
import type { ChatResource, ResourceType } from '@/lib/copilot/resources/persistence'
const logger = createLogger('CopilotChatResourcesAPI')

View File

@@ -1,45 +1,45 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, desc, eq, sql } from 'drizzle-orm'
import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { getAccessibleCopilotChat, resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat-payload'
import { type ChatLoadResult, resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat/payload'
import {
acquirePendingChatStream,
createSSEStream,
releasePendingChatStream,
requestChatTitle,
SSE_RESPONSE_HEADERS,
} from '@/lib/copilot/chat-streaming'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { getStreamMeta, readStreamEvents } from '@/lib/copilot/orchestrator/stream/buffer'
import type { OrchestratorResult } from '@/lib/copilot/orchestrator/types'
import { resolveActiveResourceContext } from '@/lib/copilot/process-contents'
buildPersistedAssistantMessage,
buildPersistedUserMessage,
} from '@/lib/copilot/chat/persisted-message'
import {
processContextsServer,
resolveActiveResourceContext,
} from '@/lib/copilot/chat/process-contents'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { createSSEStream, SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/lifecycle/start'
import {
authorizeWorkflowByWorkspacePermission,
resolveWorkflowIdForUser,
} from '@/lib/workflows/utils'
import {
assertActiveWorkspaceAccess,
getUserEntityPermissions,
} from '@/lib/workspaces/permissions/utils'
acquirePendingChatStream,
getPendingChatStreamId,
releasePendingChatStream,
} from '@/lib/copilot/request/session'
import type { OrchestratorResult } from '@/lib/copilot/request/types'
import { getWorkflowById, resolveWorkflowIdForUser } from '@/lib/workflows/utils'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
import type { ChatContext } from '@/stores/panel'
export const maxDuration = 3600
const logger = createLogger('CopilotChatAPI')
// ---------------------------------------------------------------------------
// Schemas
// ---------------------------------------------------------------------------
const FileAttachmentSchema = z.object({
id: z.string(),
key: z.string(),
@@ -66,7 +66,6 @@ const ChatMessageSchema = z.object({
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
prefetch: z.boolean().optional(),
createNewChat: z.boolean().optional().default(false),
stream: z.boolean().optional().default(true),
implicitFeedback: z.string().optional(),
fileAttachments: z.array(FileAttachmentSchema).optional(),
resourceAttachments: z.array(ResourceAttachmentSchema).optional(),
@@ -104,27 +103,25 @@ const ChatMessageSchema = z.object({
userTimezone: z.string().optional(),
})
/**
* POST /api/copilot/chat
* Send messages to sim agent and handle chat persistence
*/
// ---------------------------------------------------------------------------
// POST /api/copilot/chat
// ---------------------------------------------------------------------------
export async function POST(req: NextRequest) {
const tracker = createRequestTracker()
let actualChatId: string | undefined
let pendingChatStreamAcquired = false
let pendingChatStreamHandedOff = false
let pendingChatStreamID: string | undefined
let chatStreamLockAcquired = false
let userMessageIdToUse = ''
try {
// Get session to access user information including name
// 1. Auth
const session = await getSession()
if (!session?.user?.id) {
return createUnauthorizedResponse()
}
const authenticatedUserId = session.user.id
// 2. Parse & validate
const body = await req.json()
const {
message,
@@ -137,7 +134,6 @@ export async function POST(req: NextRequest) {
mode,
prefetch,
createNewChat,
stream,
implicitFeedback,
fileAttachments,
resourceAttachments,
@@ -151,17 +147,12 @@ export async function POST(req: NextRequest) {
? contexts.map((ctx) => {
if (ctx.kind !== 'blocks') return ctx
if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx
if (ctx.blockId) {
return {
...ctx,
blockIds: [ctx.blockId],
}
}
if (ctx.blockId) return { ...ctx, blockIds: [ctx.blockId] }
return ctx
})
: contexts
// Copilot route always requires a workflow scope
// 3. Resolve workflow & workspace
const resolved = await resolveWorkflowIdForUser(
authenticatedUserId,
providedWorkflowId,
@@ -173,48 +164,29 @@ export async function POST(req: NextRequest) {
'No workflows found. Create a workflow first or provide a valid workflowId.'
)
}
const workflowId = resolved.workflowId
const workflowResolvedName = resolved.workflowName
const { workflowId, workflowName: workflowResolvedName } = resolved
// Resolve workspace from workflow so it can be sent as implicit context to the copilot.
let resolvedWorkspaceId: string | undefined
try {
const { getWorkflowById } = await import('@/lib/workflows/utils')
const wf = await getWorkflowById(workflowId)
resolvedWorkspaceId = wf?.workspaceId ?? undefined
} catch {
logger
.withMetadata({ requestId: tracker.requestId, messageId: userMessageId })
.warn('Failed to resolve workspaceId from workflow')
logger.warn(`[${tracker.requestId}] Failed to resolve workspaceId from workflow`)
}
const userMessageIdToUse = userMessageId || crypto.randomUUID()
const reqLogger = logger.withMetadata({
requestId: tracker.requestId,
messageId: userMessageIdToUse,
})
try {
reqLogger.info('Received chat POST', {
workflowId,
hasContexts: Array.isArray(normalizedContexts),
contextsCount: Array.isArray(normalizedContexts) ? normalizedContexts.length : 0,
contextsPreview: Array.isArray(normalizedContexts)
? normalizedContexts.map((c: any) => ({
kind: c?.kind,
chatId: c?.chatId,
workflowId: c?.workflowId,
executionId: (c as any)?.executionId,
label: c?.label,
}))
: undefined,
})
} catch {}
let currentChat: any = null
let conversationHistory: any[] = []
actualChatId = chatId
userMessageIdToUse = userMessageId || crypto.randomUUID()
const selectedModel = model || 'claude-opus-4-6'
logger.info(`[${tracker.requestId}] Received chat POST`, {
workflowId,
contextsCount: Array.isArray(normalizedContexts) ? normalizedContexts.length : 0,
})
// 4. Resolve or create chat
let currentChat: ChatLoadResult['chat'] = null
let conversationHistory: unknown[] = []
actualChatId = chatId
if (chatId || createNewChat) {
const chatResult = await resolveOrCreateChat({
chatId,
@@ -233,37 +205,48 @@ export async function POST(req: NextRequest) {
}
}
if (actualChatId) {
chatStreamLockAcquired = await acquirePendingChatStream(actualChatId, userMessageIdToUse)
if (!chatStreamLockAcquired) {
const activeStreamId = await getPendingChatStreamId(actualChatId)
return NextResponse.json(
{
error: 'A response is already in progress for this chat.',
...(activeStreamId ? { activeStreamId } : {}),
},
{ status: 409 }
)
}
}
// 5. Process contexts
let agentContexts: Array<{ type: string; content: string }> = []
if (Array.isArray(normalizedContexts) && normalizedContexts.length > 0) {
try {
const { processContextsServer } = await import('@/lib/copilot/process-contents')
const processed = await processContextsServer(
normalizedContexts as any,
normalizedContexts as ChatContext[],
authenticatedUserId,
message,
resolvedWorkspaceId,
actualChatId
)
agentContexts = processed
reqLogger.info('Contexts processed for request', {
logger.info(`[${tracker.requestId}] Contexts processed`, {
processedCount: agentContexts.length,
kinds: agentContexts.map((c) => c.type),
lengthPreview: agentContexts.map((c) => c.content?.length ?? 0),
})
if (
Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 &&
agentContexts.length === 0
) {
reqLogger.warn(
'Contexts provided but none processed. Check executionId for logs contexts.'
if (agentContexts.length === 0) {
logger.warn(
`[${tracker.requestId}] Contexts provided but none processed. Check executionId for logs contexts.`
)
}
} catch (e) {
reqLogger.error('Failed to process contexts', e)
logger.error(`[${tracker.requestId}] Failed to process contexts`, e)
}
}
// 5b. Process resource attachments
if (
Array.isArray(resourceAttachments) &&
resourceAttachments.length > 0 &&
@@ -279,26 +262,30 @@ export async function POST(req: NextRequest) {
actualChatId
)
if (!ctx) return null
return {
...ctx,
tag: r.active ? '@active_tab' : '@open_tab',
}
return { ...ctx, tag: r.active ? '@active_tab' : '@open_tab' }
})
)
for (const result of results) {
if (result.status === 'fulfilled' && result.value) {
agentContexts.push(result.value)
} else if (result.status === 'rejected') {
reqLogger.error('Failed to resolve resource attachment', result.reason)
logger.error(
`[${tracker.requestId}] Failed to resolve resource attachment`,
result.reason
)
}
}
}
const effectiveMode = mode === 'agent' ? 'build' : mode
// 6. Build copilot request payload
const userPermission = resolvedWorkspaceId
? await getUserEntityPermissions(authenticatedUserId, 'workspace', resolvedWorkspaceId).catch(
() => null
(err) => {
logger.warn('Failed to load user permissions', {
error: err instanceof Error ? err.message : String(err),
})
return null
}
)
: null
@@ -322,55 +309,24 @@ export async function POST(req: NextRequest) {
userPermission: userPermission ?? undefined,
userTimezone,
},
{
selectedModel,
}
{ selectedModel }
)
try {
reqLogger.info('About to call Sim Agent', {
hasContext: agentContexts.length > 0,
contextCount: agentContexts.length,
hasFileAttachments: Array.isArray(requestPayload.fileAttachments),
messageLength: message.length,
mode: effectiveMode,
hasTools: Array.isArray(requestPayload.tools),
toolCount: Array.isArray(requestPayload.tools) ? requestPayload.tools.length : 0,
hasBaseTools: Array.isArray(requestPayload.baseTools),
baseToolCount: Array.isArray(requestPayload.baseTools)
? requestPayload.baseTools.length
: 0,
hasCredentials: !!requestPayload.credentials,
})
} catch {}
if (stream && actualChatId) {
const acquired = await acquirePendingChatStream(actualChatId, userMessageIdToUse)
if (!acquired) {
return NextResponse.json(
{
error:
'A response is already in progress for this chat. Wait for it to finish or use Stop.',
},
{ status: 409 }
)
}
pendingChatStreamAcquired = true
pendingChatStreamID = userMessageIdToUse
}
logger.info(`[${tracker.requestId}] About to call Sim Agent`, {
contextCount: agentContexts.length,
hasFileAttachments: Array.isArray(requestPayload.fileAttachments),
messageLength: message.length,
mode,
})
// 7. Persist user message
if (actualChatId) {
const userMsg = {
const userMsg = buildPersistedUserMessage({
id: userMessageIdToUse,
role: 'user' as const,
content: message,
timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contexts: normalizedContexts,
}),
}
fileAttachments,
contexts: normalizedContexts,
})
const [updated] = await db
.update(copilotChats)
@@ -383,268 +339,66 @@ export async function POST(req: NextRequest) {
.returning({ messages: copilotChats.messages })
if (updated) {
const freshMessages: any[] = Array.isArray(updated.messages) ? updated.messages : []
conversationHistory = freshMessages.filter((m: any) => m.id !== userMessageIdToUse)
const freshMessages: Record<string, unknown>[] = Array.isArray(updated.messages)
? updated.messages
: []
conversationHistory = freshMessages.filter(
(m: Record<string, unknown>) => m.id !== userMessageIdToUse
)
}
}
if (stream) {
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const sseStream = createSSEStream({
requestPayload,
userId: authenticatedUserId,
streamId: userMessageIdToUse,
executionId,
runId,
chatId: actualChatId,
currentChat,
isNewChat: conversationHistory.length === 0,
message,
titleModel: selectedModel,
titleProvider: provider,
requestId: tracker.requestId,
workspaceId: resolvedWorkspaceId,
pendingChatStreamAlreadyRegistered: Boolean(actualChatId && stream),
orchestrateOptions: {
userId: authenticatedUserId,
workflowId,
chatId: actualChatId,
executionId,
runId,
goRoute: '/api/copilot',
autoExecuteTools: true,
interactive: true,
onComplete: async (result: OrchestratorResult) => {
if (!actualChatId) return
if (!result.success) return
// 8. Create SSE stream with onComplete for assistant message persistence
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const assistantMessage: Record<string, unknown> = {
id: crypto.randomUUID(),
role: 'assistant' as const,
content: result.content,
timestamp: new Date().toISOString(),
...(result.requestId ? { requestId: result.requestId } : {}),
}
if (result.toolCalls.length > 0) {
assistantMessage.toolCalls = result.toolCalls
}
if (result.contentBlocks.length > 0) {
assistantMessage.contentBlocks = result.contentBlocks.map((block) => {
const stored: Record<string, unknown> = { type: block.type }
if (block.content) stored.content = block.content
if (block.type === 'tool_call' && block.toolCall) {
const state =
block.toolCall.result?.success !== undefined
? block.toolCall.result.success
? 'success'
: 'error'
: block.toolCall.status
const isSubagentTool = !!block.calledBy
const isNonTerminal =
state === 'cancelled' || state === 'pending' || state === 'executing'
stored.toolCall = {
id: block.toolCall.id,
name: block.toolCall.name,
state,
...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }),
...(isSubagentTool && isNonTerminal
? {}
: block.toolCall.params
? { params: block.toolCall.params }
: {}),
...(block.calledBy ? { calledBy: block.calledBy } : {}),
}
}
return stored
})
}
try {
const [row] = await db
.select({ messages: copilotChats.messages })
.from(copilotChats)
.where(eq(copilotChats.id, actualChatId))
.limit(1)
const msgs: any[] = Array.isArray(row?.messages) ? row.messages : []
const userIdx = msgs.findIndex((m: any) => m.id === userMessageIdToUse)
const alreadyHasResponse =
userIdx >= 0 &&
userIdx + 1 < msgs.length &&
(msgs[userIdx + 1] as any)?.role === 'assistant'
if (!alreadyHasResponse) {
await db
.update(copilotChats)
.set({
messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`,
conversationId: sql`CASE WHEN ${copilotChats.conversationId} = ${userMessageIdToUse} THEN NULL ELSE ${copilotChats.conversationId} END`,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, actualChatId))
}
} catch (error) {
reqLogger.error('Failed to persist chat messages', {
chatId: actualChatId,
error: error instanceof Error ? error.message : 'Unknown error',
})
}
},
},
})
pendingChatStreamHandedOff = true
return new Response(sseStream, { headers: SSE_RESPONSE_HEADERS })
}
const nsExecutionId = crypto.randomUUID()
const nsRunId = crypto.randomUUID()
if (actualChatId) {
await createRunSegment({
id: nsRunId,
executionId: nsExecutionId,
chatId: actualChatId,
const sseStream = createSSEStream({
requestPayload,
userId: authenticatedUserId,
streamId: userMessageIdToUse,
executionId,
runId,
chatId: actualChatId,
currentChat,
isNewChat: conversationHistory.length === 0,
message,
titleModel: selectedModel,
titleProvider: provider,
requestId: tracker.requestId,
workspaceId: resolvedWorkspaceId,
orchestrateOptions: {
userId: authenticatedUserId,
workflowId,
streamId: userMessageIdToUse,
}).catch(() => {})
}
const nonStreamingResult = await orchestrateCopilotStream(requestPayload, {
userId: authenticatedUserId,
workflowId,
chatId: actualChatId,
executionId: nsExecutionId,
runId: nsRunId,
goRoute: '/api/copilot',
autoExecuteTools: true,
interactive: true,
})
const responseData = {
content: nonStreamingResult.content,
toolCalls: nonStreamingResult.toolCalls,
model: selectedModel,
provider: typeof requestPayload?.provider === 'string' ? requestPayload.provider : undefined,
}
reqLogger.info('Non-streaming response from orchestrator', {
hasContent: !!responseData.content,
contentLength: responseData.content?.length || 0,
model: responseData.model,
provider: responseData.provider,
toolCallsCount: responseData.toolCalls?.length || 0,
})
// Save messages if we have a chat
if (currentChat && responseData.content) {
const userMessage = {
id: userMessageIdToUse, // Consistent ID used for request and persistence
role: 'user',
content: message,
timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contexts: normalizedContexts,
}),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contentBlocks: [
{ type: 'contexts', contexts: normalizedContexts as any, timestamp: Date.now() },
],
}),
}
const assistantMessage = {
id: crypto.randomUUID(),
role: 'assistant',
content: responseData.content,
timestamp: new Date().toISOString(),
}
const updatedMessages = [...conversationHistory, userMessage, assistantMessage]
// Start title generation in parallel if this is first message (non-streaming)
if (actualChatId && !currentChat.title && conversationHistory.length === 0) {
reqLogger.info('Starting title generation for non-streaming response')
requestChatTitle({ message, model: selectedModel, provider, messageId: userMessageIdToUse })
.then(async (title) => {
if (title) {
await db
.update(copilotChats)
.set({
title,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, actualChatId!))
reqLogger.info(`Generated and saved title: ${title}`)
}
})
.catch((error) => {
reqLogger.error('Title generation failed', error)
})
}
// Update chat in database immediately (without blocking for title)
await db
.update(copilotChats)
.set({
messages: updatedMessages,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, actualChatId!))
}
reqLogger.info('Returning non-streaming response', {
duration: tracker.getDuration(),
chatId: actualChatId,
responseLength: responseData.content?.length || 0,
})
return NextResponse.json({
success: true,
response: responseData,
chatId: actualChatId,
metadata: {
requestId: tracker.requestId,
message,
duration: tracker.getDuration(),
chatId: actualChatId,
executionId,
runId,
goRoute: '/api/copilot',
autoExecuteTools: true,
interactive: true,
onComplete: buildOnComplete(actualChatId, userMessageIdToUse, tracker.requestId),
},
})
return new Response(sseStream, { headers: SSE_RESPONSE_HEADERS })
} catch (error) {
if (
actualChatId &&
pendingChatStreamAcquired &&
!pendingChatStreamHandedOff &&
pendingChatStreamID
) {
await releasePendingChatStream(actualChatId, pendingChatStreamID).catch(() => {})
if (chatStreamLockAcquired && actualChatId && userMessageIdToUse) {
await releasePendingChatStream(actualChatId, userMessageIdToUse)
}
const duration = tracker.getDuration()
if (error instanceof z.ZodError) {
logger
.withMetadata({ requestId: tracker.requestId, messageId: pendingChatStreamID ?? undefined })
.error('Validation error', {
duration,
errors: error.errors,
})
logger.error(`[${tracker.requestId}] Validation error:`, { duration, errors: error.errors })
return NextResponse.json(
{ error: 'Invalid request data', details: error.errors },
{ status: 400 }
)
}
logger
.withMetadata({ requestId: tracker.requestId, messageId: pendingChatStreamID ?? undefined })
.error('Error handling copilot chat', {
duration,
error: error instanceof Error ? error.message : 'Unknown error',
stack: error instanceof Error ? error.stack : undefined,
})
logger.error(`[${tracker.requestId}] Error handling copilot chat:`, {
duration,
error: error instanceof Error ? error.message : 'Unknown error',
stack: error instanceof Error ? error.stack : undefined,
})
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Internal server error' },
@@ -653,132 +407,55 @@ export async function POST(req: NextRequest) {
}
}
export async function GET(req: NextRequest) {
try {
const { searchParams } = new URL(req.url)
const workflowId = searchParams.get('workflowId')
const workspaceId = searchParams.get('workspaceId')
const chatId = searchParams.get('chatId')
// ---------------------------------------------------------------------------
// onComplete: persist assistant message after streaming finishes
// ---------------------------------------------------------------------------
const { userId: authenticatedUserId, isAuthenticated } =
await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !authenticatedUserId) {
return createUnauthorizedResponse()
}
function buildOnComplete(
chatId: string | undefined,
userMessageId: string,
requestId: string
): (result: OrchestratorResult) => Promise<void> {
return async (result) => {
if (!chatId || !result.success) return
if (chatId) {
const chat = await getAccessibleCopilotChat(chatId, authenticatedUserId)
const assistantMessage = buildPersistedAssistantMessage(result, result.requestId)
if (!chat) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
try {
const [row] = await db
.select({ messages: copilotChats.messages })
.from(copilotChats)
.where(eq(copilotChats.id, chatId))
.limit(1)
const msgs: Record<string, unknown>[] = Array.isArray(row?.messages) ? row.messages : []
const userIdx = msgs.findIndex((m: Record<string, unknown>) => m.id === userMessageId)
const alreadyHasResponse =
userIdx >= 0 &&
userIdx + 1 < msgs.length &&
(msgs[userIdx + 1] as Record<string, unknown>)?.role === 'assistant'
if (!alreadyHasResponse) {
await db
.update(copilotChats)
.set({
messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`,
conversationId: sql`CASE WHEN ${copilotChats.conversationId} = ${userMessageId} THEN NULL ELSE ${copilotChats.conversationId} END`,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, chatId))
}
let streamSnapshot: {
events: Array<{ eventId: number; streamId: string; event: Record<string, unknown> }>
status: string
} | null = null
if (chat.conversationId) {
try {
const [meta, events] = await Promise.all([
getStreamMeta(chat.conversationId),
readStreamEvents(chat.conversationId, 0),
])
streamSnapshot = {
events: events || [],
status: meta?.status || 'unknown',
}
} catch (err) {
logger
.withMetadata({ messageId: chat.conversationId || undefined })
.warn('Failed to read stream snapshot for chat', {
chatId,
conversationId: chat.conversationId,
error: err instanceof Error ? err.message : String(err),
})
}
}
const transformedChat = {
id: chat.id,
title: chat.title,
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
planArtifact: chat.planArtifact || null,
config: chat.config || null,
conversationId: chat.conversationId || null,
resources: Array.isArray(chat.resources) ? chat.resources : [],
createdAt: chat.createdAt,
updatedAt: chat.updatedAt,
...(streamSnapshot ? { streamSnapshot } : {}),
}
logger
.withMetadata({ messageId: chat.conversationId || undefined })
.info(`Retrieved chat ${chatId}`)
return NextResponse.json({ success: true, chat: transformedChat })
}
if (!workflowId && !workspaceId) {
return createBadRequestResponse('workflowId, workspaceId, or chatId is required')
}
if (workspaceId) {
await assertActiveWorkspaceAccess(workspaceId, authenticatedUserId)
}
if (workflowId) {
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: authenticatedUserId,
action: 'read',
} catch (error) {
logger.error(`[${requestId}] Failed to persist chat messages`, {
chatId,
error: error instanceof Error ? error.message : 'Unknown error',
})
if (!authorization.allowed) {
return createUnauthorizedResponse()
}
}
const scopeFilter = workflowId
? eq(copilotChats.workflowId, workflowId)
: eq(copilotChats.workspaceId, workspaceId!)
const chats = await db
.select({
id: copilotChats.id,
title: copilotChats.title,
model: copilotChats.model,
messages: copilotChats.messages,
planArtifact: copilotChats.planArtifact,
config: copilotChats.config,
createdAt: copilotChats.createdAt,
updatedAt: copilotChats.updatedAt,
})
.from(copilotChats)
.where(and(eq(copilotChats.userId, authenticatedUserId), scopeFilter))
.orderBy(desc(copilotChats.updatedAt))
const transformedChats = chats.map((chat) => ({
id: chat.id,
title: chat.title,
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
planArtifact: chat.planArtifact || null,
config: chat.config || null,
createdAt: chat.createdAt,
updatedAt: chat.updatedAt,
}))
const scope = workflowId ? `workflow ${workflowId}` : `workspace ${workspaceId}`
logger.info(`Retrieved ${transformedChats.length} chats for ${scope}`)
return NextResponse.json({
success: true,
chats: transformedChats,
})
} catch (error) {
logger.error('Error fetching copilot chats', error)
return createInternalServerErrorResponse('Failed to fetch chats')
}
}
// ---------------------------------------------------------------------------
// GET handler (read-only queries, extracted to queries.ts)
// ---------------------------------------------------------------------------
export { GET } from './queries'

View File

@@ -4,25 +4,67 @@
import { NextRequest } from 'next/server'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import {
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
} from '@/lib/copilot/generated/mothership-stream-v1'
const { getStreamMeta, readStreamEvents, authenticateCopilotRequestSessionOnly } = vi.hoisted(
() => ({
getStreamMeta: vi.fn(),
readStreamEvents: vi.fn(),
authenticateCopilotRequestSessionOnly: vi.fn(),
})
)
vi.mock('@/lib/copilot/orchestrator/stream/buffer', () => ({
getStreamMeta,
readStreamEvents,
const {
getLatestRunForStream,
readEvents,
checkForReplayGap,
authenticateCopilotRequestSessionOnly,
} = vi.hoisted(() => ({
getLatestRunForStream: vi.fn(),
readEvents: vi.fn(),
checkForReplayGap: vi.fn(),
authenticateCopilotRequestSessionOnly: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/async-runs/repository', () => ({
getLatestRunForStream,
}))
vi.mock('@/lib/copilot/request/session', () => ({
readEvents,
checkForReplayGap,
createEvent: (event: Record<string, unknown>) => ({
stream: {
streamId: event.streamId,
cursor: event.cursor,
},
seq: event.seq,
trace: { requestId: event.requestId ?? '' },
type: event.type,
payload: event.payload,
}),
encodeSSEEnvelope: (event: Record<string, unknown>) =>
new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`),
SSE_RESPONSE_HEADERS: {
'Content-Type': 'text/event-stream',
},
}))
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly,
}))
import { GET } from '@/app/api/copilot/chat/stream/route'
import { GET } from './route'
async function readAllChunks(response: Response): Promise<string[]> {
const reader = response.body?.getReader()
expect(reader).toBeTruthy()
const chunks: string[] = []
while (true) {
const { done, value } = await reader!.read()
if (done) {
break
}
chunks.push(new TextDecoder().decode(value))
}
return chunks
}
describe('copilot chat stream replay route', () => {
beforeEach(() => {
@@ -31,29 +73,54 @@ describe('copilot chat stream replay route', () => {
userId: 'user-1',
isAuthenticated: true,
})
readStreamEvents.mockResolvedValue([])
readEvents.mockResolvedValue([])
checkForReplayGap.mockResolvedValue(null)
})
it('stops replay polling when stream meta becomes cancelled', async () => {
getStreamMeta
it('stops replay polling when run becomes cancelled', async () => {
getLatestRunForStream
.mockResolvedValueOnce({
status: 'active',
userId: 'user-1',
executionId: 'exec-1',
id: 'run-1',
})
.mockResolvedValueOnce({
status: 'cancelled',
userId: 'user-1',
executionId: 'exec-1',
id: 'run-1',
})
const response = await GET(
new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1')
new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0')
)
const reader = response.body?.getReader()
expect(reader).toBeTruthy()
const chunks = await readAllChunks(response)
expect(chunks.join('')).toContain(
JSON.stringify({
status: MothershipStreamV1CompletionStatus.cancelled,
reason: 'terminal_status',
})
)
expect(getLatestRunForStream).toHaveBeenCalledTimes(2)
})
const first = await reader!.read()
expect(first.done).toBe(true)
expect(getStreamMeta).toHaveBeenCalledTimes(2)
it('emits structured terminal replay error when run metadata disappears', async () => {
getLatestRunForStream
.mockResolvedValueOnce({
status: 'active',
executionId: 'exec-1',
id: 'run-1',
})
.mockResolvedValueOnce(null)
const response = await GET(
new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0')
)
const chunks = await readAllChunks(response)
const body = chunks.join('')
expect(body).toContain(`"type":"${MothershipStreamV1EventType.error}"`)
expect(body).toContain('"code":"resume_run_unavailable"')
expect(body).toContain(`"type":"${MothershipStreamV1EventType.complete}"`)
})
})

View File

@@ -1,12 +1,18 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository'
import {
getStreamMeta,
readStreamEvents,
type StreamMeta,
} from '@/lib/copilot/orchestrator/stream/buffer'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
} from '@/lib/copilot/generated/mothership-stream-v1'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
import {
checkForReplayGap,
createEvent,
encodeSSEEnvelope,
readEvents,
SSE_RESPONSE_HEADERS,
} from '@/lib/copilot/request/session'
export const maxDuration = 3600
@@ -14,8 +20,59 @@ const logger = createLogger('CopilotChatStreamAPI')
const POLL_INTERVAL_MS = 250
const MAX_STREAM_MS = 60 * 60 * 1000
function encodeEvent(event: Record<string, any>): Uint8Array {
return new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`)
function isTerminalStatus(
status: string | null | undefined
): status is MothershipStreamV1CompletionStatus {
return (
status === MothershipStreamV1CompletionStatus.complete ||
status === MothershipStreamV1CompletionStatus.error ||
status === MothershipStreamV1CompletionStatus.cancelled
)
}
function buildResumeTerminalEnvelopes(options: {
streamId: string
afterCursor: string
status: MothershipStreamV1CompletionStatus
message?: string
code: string
reason?: string
}) {
const baseSeq = Number(options.afterCursor || '0')
const seq = Number.isFinite(baseSeq) ? baseSeq : 0
const envelopes: ReturnType<typeof createEvent>[] = []
if (options.status === MothershipStreamV1CompletionStatus.error) {
envelopes.push(
createEvent({
streamId: options.streamId,
cursor: String(seq + 1),
seq: seq + 1,
requestId: '',
type: MothershipStreamV1EventType.error,
payload: {
message: options.message || 'Stream recovery failed before completion.',
code: options.code,
},
})
)
}
envelopes.push(
createEvent({
streamId: options.streamId,
cursor: String(seq + envelopes.length + 1),
seq: seq + envelopes.length + 1,
requestId: '',
type: MothershipStreamV1EventType.complete,
payload: {
status: options.status,
...(options.reason ? { reason: options.reason } : {}),
},
})
)
return envelopes
}
export async function GET(request: NextRequest) {
@@ -28,58 +85,49 @@ export async function GET(request: NextRequest) {
const url = new URL(request.url)
const streamId = url.searchParams.get('streamId') || ''
const fromParam = url.searchParams.get('from') || '0'
const fromEventId = Number(fromParam || 0)
// If batch=true, return buffered events as JSON instead of SSE
const afterCursor = url.searchParams.get('after') || ''
const batchMode = url.searchParams.get('batch') === 'true'
const toParam = url.searchParams.get('to')
const toEventId = toParam ? Number(toParam) : undefined
const reqLogger = logger.withMetadata({ messageId: streamId || undefined })
reqLogger.info('[Resume] Received resume request', {
streamId: streamId || undefined,
fromEventId,
toEventId,
batchMode,
})
if (!streamId) {
return NextResponse.json({ error: 'streamId is required' }, { status: 400 })
}
const meta = (await getStreamMeta(streamId)) as StreamMeta | null
reqLogger.info('[Resume] Stream lookup', {
streamId,
fromEventId,
toEventId,
batchMode,
hasMeta: !!meta,
metaStatus: meta?.status,
const run = await getLatestRunForStream(streamId, authenticatedUserId).catch((err) => {
logger.warn('Failed to fetch latest run for stream', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (!meta) {
logger.info('[Resume] Stream lookup', {
streamId,
afterCursor,
batchMode,
hasRun: !!run,
runStatus: run?.status,
})
if (!run) {
return NextResponse.json({ error: 'Stream not found' }, { status: 404 })
}
if (meta.userId && meta.userId !== authenticatedUserId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
}
// Batch mode: return all buffered events as JSON
if (batchMode) {
const events = await readStreamEvents(streamId, fromEventId)
const filteredEvents = toEventId ? events.filter((e) => e.eventId <= toEventId) : events
reqLogger.info('[Resume] Batch response', {
const afterSeq = afterCursor || '0'
const events = await readEvents(streamId, afterSeq)
const batchEvents = events.map((envelope) => ({
eventId: envelope.seq,
streamId: envelope.stream.streamId,
event: envelope,
}))
logger.info('[Resume] Batch response', {
streamId,
fromEventId,
toEventId,
eventCount: filteredEvents.length,
afterCursor: afterSeq,
eventCount: batchEvents.length,
runStatus: run.status,
})
return NextResponse.json({
success: true,
events: filteredEvents,
status: meta.status,
executionId: meta.executionId,
runId: meta.runId,
events: batchEvents,
status: run.status,
})
}
@@ -87,9 +135,9 @@ export async function GET(request: NextRequest) {
const stream = new ReadableStream({
async start(controller) {
let lastEventId = Number.isFinite(fromEventId) ? fromEventId : 0
let latestMeta = meta
let cursor = afterCursor || '0'
let controllerClosed = false
let sawTerminalEvent = false
const closeController = () => {
if (controllerClosed) return
@@ -97,14 +145,14 @@ export async function GET(request: NextRequest) {
try {
controller.close()
} catch {
// Controller already closed by runtime/client - treat as normal.
// Controller already closed by runtime/client
}
}
const enqueueEvent = (payload: Record<string, any>) => {
const enqueueEvent = (payload: unknown) => {
if (controllerClosed) return false
try {
controller.enqueue(encodeEvent(payload))
controller.enqueue(encodeSSEEnvelope(payload))
return true
} catch {
controllerClosed = true
@@ -118,47 +166,96 @@ export async function GET(request: NextRequest) {
request.signal.addEventListener('abort', abortListener, { once: true })
const flushEvents = async () => {
const events = await readStreamEvents(streamId, lastEventId)
const events = await readEvents(streamId, cursor)
if (events.length > 0) {
reqLogger.info('[Resume] Flushing events', {
logger.info('[Resume] Flushing events', {
streamId,
fromEventId: lastEventId,
afterCursor: cursor,
eventCount: events.length,
})
}
for (const entry of events) {
lastEventId = entry.eventId
const payload = {
...entry.event,
eventId: entry.eventId,
streamId: entry.streamId,
executionId: latestMeta?.executionId,
runId: latestMeta?.runId,
for (const envelope of events) {
cursor = envelope.stream.cursor ?? String(envelope.seq)
if (envelope.type === MothershipStreamV1EventType.complete) {
sawTerminalEvent = true
}
if (!enqueueEvent(payload)) {
if (!enqueueEvent(envelope)) {
break
}
}
}
const emitTerminalIfMissing = (
status: MothershipStreamV1CompletionStatus,
options?: { message?: string; code: string; reason?: string }
) => {
if (controllerClosed || sawTerminalEvent) {
return
}
for (const envelope of buildResumeTerminalEnvelopes({
streamId,
afterCursor: cursor,
status,
message: options?.message,
code: options?.code ?? 'resume_terminal',
reason: options?.reason,
})) {
cursor = envelope.stream.cursor ?? String(envelope.seq)
if (envelope.type === MothershipStreamV1EventType.complete) {
sawTerminalEvent = true
}
if (!enqueueEvent(envelope)) {
break
}
}
}
try {
const gap = await checkForReplayGap(streamId, afterCursor)
if (gap) {
for (const envelope of gap.envelopes) {
enqueueEvent(envelope)
}
return
}
await flushEvents()
while (!controllerClosed && Date.now() - startTime < MAX_STREAM_MS) {
const currentMeta = await getStreamMeta(streamId)
if (!currentMeta) break
latestMeta = currentMeta
const currentRun = await getLatestRunForStream(streamId, authenticatedUserId).catch(
(err) => {
logger.warn('Failed to poll latest run for stream', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
return null
}
)
if (!currentRun) {
emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, {
message: 'The stream could not be recovered because its run metadata is unavailable.',
code: 'resume_run_unavailable',
reason: 'run_unavailable',
})
break
}
await flushEvents()
if (controllerClosed) {
break
}
if (
currentMeta.status === 'complete' ||
currentMeta.status === 'error' ||
currentMeta.status === 'cancelled'
) {
if (isTerminalStatus(currentRun.status)) {
emitTerminalIfMissing(currentRun.status, {
message:
currentRun.status === MothershipStreamV1CompletionStatus.error
? typeof currentRun.error === 'string'
? currentRun.error
: 'The recovered stream ended with an error.'
: undefined,
code: 'resume_terminal_status',
reason: 'terminal_status',
})
break
}
@@ -169,12 +266,24 @@ export async function GET(request: NextRequest) {
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS))
}
if (!controllerClosed && Date.now() - startTime >= MAX_STREAM_MS) {
emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, {
message: 'The stream recovery timed out before completion.',
code: 'resume_timeout',
reason: 'timeout',
})
}
} catch (error) {
if (!controllerClosed && !request.signal.aborted) {
reqLogger.warn('Stream replay failed', {
logger.warn('Stream replay failed', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, {
message: 'The stream replay failed before completion.',
code: 'resume_internal',
reason: 'stream_replay_failed',
})
}
} finally {
request.signal.removeEventListener('abort', abortListener)
@@ -183,5 +292,5 @@ export async function GET(request: NextRequest) {
},
})
return new Response(stream, { headers: SSE_HEADERS })
return new Response(stream, { headers: SSE_RESPONSE_HEADERS })
}

View File

@@ -327,7 +327,35 @@ describe('Copilot Chat Update Messages API Route', () => {
})
expect(mockSet).toHaveBeenCalledWith({
messages,
messages: [
{
id: 'msg-1',
role: 'user',
content: 'Hello',
timestamp: '2024-01-01T10:00:00.000Z',
},
{
id: 'msg-2',
role: 'assistant',
content: 'Hi there!',
timestamp: '2024-01-01T10:01:00.000Z',
contentBlocks: [
{
type: 'text',
content: 'Here is the weather information',
},
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'get_weather',
state: 'pending',
},
},
],
},
],
updatedAt: expect.any(Date),
})
})

View File

@@ -4,15 +4,16 @@ import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { COPILOT_MODES } from '@/lib/copilot/models'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import { normalizeMessage, type PersistedMessage } from '@/lib/copilot/chat/persisted-message'
import { COPILOT_MODES } from '@/lib/copilot/constants'
import {
authenticateCopilotRequestSessionOnly,
createInternalServerErrorResponse,
createNotFoundResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
const logger = createLogger('CopilotChatUpdateAPI')
@@ -78,12 +79,15 @@ export async function POST(req: NextRequest) {
}
const { chatId, messages, planArtifact, config } = UpdateMessagesSchema.parse(body)
const normalizedMessages: PersistedMessage[] = messages.map((message) =>
normalizeMessage(message as Record<string, unknown>)
)
// Debug: Log what we're about to save
const lastMsgParsed = messages[messages.length - 1]
const lastMsgParsed = normalizedMessages[normalizedMessages.length - 1]
if (lastMsgParsed?.role === 'assistant') {
logger.info(`[${tracker.requestId}] Parsed messages to save`, {
messageCount: messages.length,
messageCount: normalizedMessages.length,
lastMsgId: lastMsgParsed.id,
lastMsgContentLength: lastMsgParsed.content?.length || 0,
lastMsgContentBlockCount: lastMsgParsed.contentBlocks?.length || 0,
@@ -99,8 +103,8 @@ export async function POST(req: NextRequest) {
}
// Update chat with new messages, plan artifact, and config
const updateData: Record<string, any> = {
messages: messages,
const updateData: Record<string, unknown> = {
messages: normalizedMessages,
updatedAt: new Date(),
}
@@ -116,14 +120,14 @@ export async function POST(req: NextRequest) {
logger.info(`[${tracker.requestId}] Successfully updated chat`, {
chatId,
newMessageCount: messages.length,
newMessageCount: normalizedMessages.length,
hasPlanArtifact: !!planArtifact,
hasConfig: !!config,
})
return NextResponse.json({
success: true,
messageCount: messages.length,
messageCount: normalizedMessages.length,
})
} catch (error) {
logger.error(`[${tracker.requestId}] Error updating chat messages:`, error)

View File

@@ -66,7 +66,7 @@ vi.mock('drizzle-orm', () => ({
sql: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly: mockAuthenticate,
createUnauthorizedResponse: mockCreateUnauthorizedResponse,
createInternalServerErrorResponse: mockCreateInternalServerErrorResponse,

View File

@@ -4,14 +4,14 @@ import { createLogger } from '@sim/logger'
import { and, desc, eq, isNull, or, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle'
import { resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
} from '@/lib/copilot/request/http'
import { taskPubSub } from '@/lib/copilot/tasks'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { assertActiveWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
@@ -37,7 +37,7 @@ export async function GET(_request: NextRequest) {
title: copilotChats.title,
workflowId: copilotChats.workflowId,
workspaceId: copilotChats.workspaceId,
conversationId: copilotChats.conversationId,
activeStreamId: copilotChats.conversationId,
updatedAt: copilotChats.updatedAt,
})
.from(copilotChats)

View File

@@ -43,7 +43,7 @@ vi.mock('@/lib/workflows/utils', () => ({
authorizeWorkflowByWorkspacePermission: mockAuthorize,
}))
vi.mock('@/lib/copilot/chat-lifecycle', () => ({
vi.mock('@/lib/copilot/chat/lifecycle', () => ({
getAccessibleCopilotChat: mockGetAccessibleCopilotChat,
}))

View File

@@ -4,14 +4,14 @@ import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createInternalServerErrorResponse,
createNotFoundResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { getInternalApiBaseUrl } from '@/lib/core/utils/urls'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { isUuidV4 } from '@/executor/constants'

View File

@@ -62,7 +62,7 @@ vi.mock('drizzle-orm', () => ({
desc: vi.fn((field: unknown) => ({ field, type: 'desc' })),
}))
vi.mock('@/lib/copilot/chat-lifecycle', () => ({
vi.mock('@/lib/copilot/chat/lifecycle', () => ({
getAccessibleCopilotChat: mockGetAccessibleCopilotChat,
}))

View File

@@ -4,14 +4,14 @@ import { createLogger } from '@sim/logger'
import { and, desc, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
const logger = createLogger('WorkflowCheckpointsAPI')

View File

@@ -38,7 +38,7 @@ const {
publishToolConfirmation: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
@@ -54,7 +54,7 @@ vi.mock('@/lib/copilot/async-runs/repository', () => ({
completeAsyncToolCall,
}))
vi.mock('@/lib/copilot/orchestrator/persistence', () => ({
vi.mock('@/lib/copilot/persistence/tool-confirm', () => ({
publishToolConfirmation,
}))

View File

@@ -1,13 +1,14 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { ASYNC_TOOL_STATUS } from '@/lib/copilot/async-runs/lifecycle'
import {
completeAsyncToolCall,
getAsyncToolCall,
getRunSegment,
upsertAsyncToolCall,
} from '@/lib/copilot/async-runs/repository'
import { publishToolConfirmation } from '@/lib/copilot/orchestrator/persistence'
import { publishToolConfirmation } from '@/lib/copilot/persistence/tool-confirm'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
@@ -16,7 +17,7 @@ import {
createRequestTracker,
createUnauthorizedResponse,
type NotificationStatus,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
const logger = createLogger('CopilotConfirmAPI')
@@ -42,17 +43,17 @@ async function updateToolCallStatus(
const toolCallId = existing.toolCallId
const durableStatus =
status === 'success'
? 'completed'
? ASYNC_TOOL_STATUS.completed
: status === 'cancelled'
? 'cancelled'
? ASYNC_TOOL_STATUS.cancelled
: status === 'error' || status === 'rejected'
? 'failed'
: 'pending'
? ASYNC_TOOL_STATUS.failed
: ASYNC_TOOL_STATUS.pending
try {
if (
durableStatus === 'completed' ||
durableStatus === 'failed' ||
durableStatus === 'cancelled'
durableStatus === ASYNC_TOOL_STATUS.completed ||
durableStatus === ASYNC_TOOL_STATUS.failed ||
durableStatus === ASYNC_TOOL_STATUS.cancelled
) {
await completeAsyncToolCall({
toolCallId,
@@ -107,13 +108,25 @@ export async function POST(req: NextRequest) {
const body = await req.json()
const { toolCallId, status, message, data } = ConfirmationSchema.parse(body)
const existing = await getAsyncToolCall(toolCallId).catch(() => null)
const existing = await getAsyncToolCall(toolCallId).catch((err) => {
logger.warn('Failed to fetch async tool call', {
toolCallId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (!existing) {
return createNotFoundResponse('Tool call not found')
}
const run = await getRunSegment(existing.runId).catch(() => null)
const run = await getRunSegment(existing.runId).catch((err) => {
logger.warn('Failed to fetch run segment', {
runId: existing.runId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (!run) {
return createNotFoundResponse('Tool call run not found')
}

View File

@@ -1,5 +1,5 @@
import { type NextRequest, NextResponse } from 'next/server'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
import { routeExecution } from '@/lib/copilot/tools/server/router'
/**

View File

@@ -57,7 +57,7 @@ vi.mock('drizzle-orm', () => ({
eq: vi.fn((field: unknown, value: unknown) => ({ field, value, type: 'eq' })),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly: mockAuthenticate,
createUnauthorizedResponse: mockCreateUnauthorizedResponse,
createBadRequestResponse: mockCreateBadRequestResponse,

View File

@@ -10,7 +10,7 @@ import {
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
const logger = createLogger('CopilotFeedbackAPI')

View File

@@ -1,8 +1,14 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import type { AvailableModel } from '@/lib/copilot/types'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
interface AvailableModel {
id: string
friendlyName: string
provider: string
}
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotModelsAPI')

View File

@@ -23,7 +23,7 @@ const {
mockFetch: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly: mockAuthenticateCopilotRequestSessionOnly,
createUnauthorizedResponse: mockCreateUnauthorizedResponse,
createBadRequestResponse: mockCreateBadRequestResponse,

View File

@@ -7,7 +7,7 @@ import {
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { env } from '@/lib/core/config/env'
const BodySchema = z.object({

View File

@@ -4,7 +4,7 @@ import { z } from 'zod'
import {
authenticateCopilotRequestSessionOnly,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotTrainingExamplesAPI')

View File

@@ -4,7 +4,7 @@ import { z } from 'zod'
import {
authenticateCopilotRequestSessionOnly,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotTrainingAPI')

View File

@@ -75,6 +75,16 @@ vi.mock('@/lib/uploads/utils/file-utils', () => ({
vi.mock('@/lib/uploads/setup.server', () => ({}))
vi.mock('@/lib/execution/doc-vm', () => ({
generatePdfFromCode: vi.fn().mockResolvedValue(Buffer.from('%PDF-compiled')),
generateDocxFromCode: vi.fn().mockResolvedValue(Buffer.from('PK\x03\x04compiled')),
generatePptxFromCode: vi.fn().mockResolvedValue(Buffer.from('PK\x03\x04compiled')),
}))
vi.mock('@/lib/uploads/contexts/workspace/workspace-file-manager', () => ({
parseWorkspaceFileKey: vi.fn().mockReturnValue(undefined),
}))
vi.mock('@/app/api/files/utils', () => ({
FileNotFoundError,
createFileResponse: mockCreateFileResponse,

View File

@@ -4,7 +4,11 @@ import { createLogger } from '@sim/logger'
import type { NextRequest } from 'next/server'
import { NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
import { generatePptxFromCode } from '@/lib/execution/pptx-vm'
import {
generateDocxFromCode,
generatePdfFromCode,
generatePptxFromCode,
} from '@/lib/execution/doc-vm'
import { CopilotFiles, isUsingCloudStorage } from '@/lib/uploads'
import type { StorageContext } from '@/lib/uploads/config'
import { parseWorkspaceFileKey } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
@@ -22,47 +26,73 @@ import {
const logger = createLogger('FilesServeAPI')
const ZIP_MAGIC = Buffer.from([0x50, 0x4b, 0x03, 0x04])
const PDF_MAGIC = Buffer.from([0x25, 0x50, 0x44, 0x46, 0x2d]) // %PDF-
const MAX_COMPILED_PPTX_CACHE = 10
const compiledPptxCache = new Map<string, Buffer>()
function compiledCacheSet(key: string, buffer: Buffer): void {
if (compiledPptxCache.size >= MAX_COMPILED_PPTX_CACHE) {
compiledPptxCache.delete(compiledPptxCache.keys().next().value as string)
}
compiledPptxCache.set(key, buffer)
interface CompilableFormat {
magic: Buffer
compile: (code: string, workspaceId: string) => Promise<Buffer>
contentType: string
}
async function compilePptxIfNeeded(
const COMPILABLE_FORMATS: Record<string, CompilableFormat> = {
'.pptx': {
magic: ZIP_MAGIC,
compile: generatePptxFromCode,
contentType: 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
},
'.docx': {
magic: ZIP_MAGIC,
compile: generateDocxFromCode,
contentType: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
},
'.pdf': {
magic: PDF_MAGIC,
compile: generatePdfFromCode,
contentType: 'application/pdf',
},
}
const MAX_COMPILED_DOC_CACHE = 10
const compiledDocCache = new Map<string, Buffer>()
function compiledCacheSet(key: string, buffer: Buffer): void {
if (compiledDocCache.size >= MAX_COMPILED_DOC_CACHE) {
compiledDocCache.delete(compiledDocCache.keys().next().value as string)
}
compiledDocCache.set(key, buffer)
}
async function compileDocumentIfNeeded(
buffer: Buffer,
filename: string,
workspaceId?: string,
raw?: boolean
): Promise<{ buffer: Buffer; contentType: string }> {
const isPptx = filename.toLowerCase().endsWith('.pptx')
if (raw || !isPptx || buffer.subarray(0, 4).equals(ZIP_MAGIC)) {
if (raw) return { buffer, contentType: getContentType(filename) }
const ext = filename.slice(filename.lastIndexOf('.')).toLowerCase()
const format = COMPILABLE_FORMATS[ext]
if (!format) return { buffer, contentType: getContentType(filename) }
const magicLen = format.magic.length
if (buffer.length >= magicLen && buffer.subarray(0, magicLen).equals(format.magic)) {
return { buffer, contentType: getContentType(filename) }
}
const code = buffer.toString('utf-8')
const cacheKey = createHash('sha256')
.update(ext)
.update(code)
.update(workspaceId ?? '')
.digest('hex')
const cached = compiledPptxCache.get(cacheKey)
const cached = compiledDocCache.get(cacheKey)
if (cached) {
return {
buffer: cached,
contentType: 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
}
return { buffer: cached, contentType: format.contentType }
}
const compiled = await generatePptxFromCode(code, workspaceId || '')
const compiled = await format.compile(code, workspaceId || '')
compiledCacheSet(cacheKey, compiled)
return {
buffer: compiled,
contentType: 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
}
return { buffer: compiled, contentType: format.contentType }
}
const STORAGE_KEY_PREFIX_RE = /^\d{13}-[a-z0-9]{7}-/
@@ -169,7 +199,7 @@ async function handleLocalFile(
const segment = filename.split('/').pop() || filename
const displayName = stripStorageKeyPrefix(segment)
const workspaceId = getWorkspaceIdForCompile(filename)
const { buffer: fileBuffer, contentType } = await compilePptxIfNeeded(
const { buffer: fileBuffer, contentType } = await compileDocumentIfNeeded(
rawBuffer,
displayName,
workspaceId,
@@ -226,7 +256,7 @@ async function handleCloudProxy(
const segment = cloudKey.split('/').pop() || 'download'
const displayName = stripStorageKeyPrefix(segment)
const workspaceId = getWorkspaceIdForCompile(cloudKey)
const { buffer: fileBuffer, contentType } = await compilePptxIfNeeded(
const { buffer: fileBuffer, contentType } = await compileDocumentIfNeeded(
rawBuffer,
displayName,
workspaceId,

View File

@@ -24,6 +24,27 @@ vi.mock('@/lib/auth/hybrid', () => ({
vi.mock('@/lib/execution/e2b', () => ({
executeInE2B: mockExecuteInE2B,
executeShellInE2B: vi.fn(),
}))
vi.mock('@/lib/copilot/request/tools/files', () => ({
FORMAT_TO_CONTENT_TYPE: {
json: 'application/json',
csv: 'text/csv',
txt: 'text/plain',
md: 'text/markdown',
html: 'text/html',
},
normalizeOutputWorkspaceFileName: vi.fn((p: string) => p.replace(/^files\//, '')),
resolveOutputFormat: vi.fn(() => 'json'),
}))
vi.mock('@/lib/uploads/contexts/workspace/workspace-file-manager', () => ({
uploadWorkspaceFile: vi.fn(),
}))
vi.mock('@/lib/workflows/utils', () => ({
getWorkflowById: vi.fn(),
}))
vi.mock('@/lib/core/config/feature-flags', () => ({
@@ -32,6 +53,7 @@ vi.mock('@/lib/core/config/feature-flags', () => ({
isProd: false,
isDev: false,
isTest: true,
isEmailVerificationEnabled: false,
}))
import { validateProxyUrl } from '@/lib/core/security/input-validation'

View File

@@ -1,11 +1,18 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkInternalAuth } from '@/lib/auth/hybrid'
import {
FORMAT_TO_CONTENT_TYPE,
normalizeOutputWorkspaceFileName,
resolveOutputFormat,
} from '@/lib/copilot/request/tools/files'
import { isE2bEnabled } from '@/lib/core/config/feature-flags'
import { generateRequestId } from '@/lib/core/utils/request'
import { executeInE2B } from '@/lib/execution/e2b'
import { executeInE2B, executeShellInE2B } from '@/lib/execution/e2b'
import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
import { CodeLanguage, DEFAULT_CODE_LANGUAGE, isValidCodeLanguage } from '@/lib/execution/languages'
import { uploadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
import { getWorkflowById } from '@/lib/workflows/utils'
import { escapeRegExp, normalizeName, REFERENCE } from '@/executor/constants'
import { type OutputSchema, resolveBlockReference } from '@/executor/utils/block-reference'
import { formatLiteralForCode } from '@/executor/utils/code-formatting'
@@ -580,6 +587,107 @@ function cleanStdout(stdout: string): string {
return stdout
}
async function maybeExportSandboxFileToWorkspace(args: {
authUserId: string
workflowId?: string
workspaceId?: string
outputPath?: string
outputFormat?: string
outputMimeType?: string
outputSandboxPath?: string
exportedFileContent?: string
stdout: string
executionTime: number
}) {
const {
authUserId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputMimeType,
outputSandboxPath,
exportedFileContent,
stdout,
executionTime,
} = args
if (!outputSandboxPath) return null
if (!outputPath) {
return NextResponse.json(
{
success: false,
error:
'outputSandboxPath requires outputPath. Set outputPath to the destination workspace file, e.g. "files/result.csv".',
output: { result: null, stdout: cleanStdout(stdout), executionTime },
},
{ status: 400 }
)
}
const resolvedWorkspaceId =
workspaceId || (workflowId ? (await getWorkflowById(workflowId))?.workspaceId : undefined)
if (!resolvedWorkspaceId) {
return NextResponse.json(
{
success: false,
error: 'Workspace context required to save sandbox file to workspace',
output: { result: null, stdout: cleanStdout(stdout), executionTime },
},
{ status: 400 }
)
}
if (exportedFileContent === undefined) {
return NextResponse.json(
{
success: false,
error: `Sandbox file "${outputSandboxPath}" was not found or could not be read`,
output: { result: null, stdout: cleanStdout(stdout), executionTime },
},
{ status: 500 }
)
}
const fileName = normalizeOutputWorkspaceFileName(outputPath)
const TEXT_MIMES = new Set(Object.values(FORMAT_TO_CONTENT_TYPE))
const resolvedMimeType =
outputMimeType ||
FORMAT_TO_CONTENT_TYPE[resolveOutputFormat(fileName, outputFormat)] ||
'application/octet-stream'
const isBinary = !TEXT_MIMES.has(resolvedMimeType)
const fileBuffer = isBinary
? Buffer.from(exportedFileContent, 'base64')
: Buffer.from(exportedFileContent, 'utf-8')
const uploaded = await uploadWorkspaceFile(
resolvedWorkspaceId,
authUserId,
fileBuffer,
fileName,
resolvedMimeType
)
return NextResponse.json({
success: true,
output: {
result: {
message: `Sandbox file exported to files/${fileName}`,
fileId: uploaded.id,
fileName,
downloadUrl: uploaded.url,
sandboxPath: outputSandboxPath,
},
stdout: cleanStdout(stdout),
executionTime,
},
resources: [{ type: 'file', id: uploaded.id, title: fileName }],
})
}
export async function POST(req: NextRequest) {
const requestId = generateRequestId()
const startTime = Date.now()
@@ -603,12 +711,17 @@ export async function POST(req: NextRequest) {
params = {},
timeout = DEFAULT_EXECUTION_TIMEOUT_MS,
language = DEFAULT_CODE_LANGUAGE,
outputPath,
outputFormat,
outputMimeType,
outputSandboxPath,
envVars = {},
blockData = {},
blockNameMapping = {},
blockOutputSchemas = {},
workflowVariables = {},
workflowId,
workspaceId,
isCustomTool = false,
_sandboxFiles,
} = body
@@ -652,6 +765,83 @@ export async function POST(req: NextRequest) {
hasImports = jsImports.trim().length > 0 || hasRequireStatements
}
if (lang === CodeLanguage.Shell) {
if (!isE2bEnabled) {
throw new Error(
'Shell execution requires E2B to be enabled. Please contact your administrator to enable E2B.'
)
}
const shellEnvs: Record<string, string> = {}
for (const [k, v] of Object.entries(envVars)) {
shellEnvs[k] = String(v)
}
for (const [k, v] of Object.entries(contextVariables)) {
shellEnvs[k] = String(v)
}
logger.info(`[${requestId}] E2B shell execution`, {
enabled: isE2bEnabled,
hasApiKey: Boolean(process.env.E2B_API_KEY),
envVarCount: Object.keys(shellEnvs).length,
})
const execStart = Date.now()
const {
result: shellResult,
stdout: shellStdout,
sandboxId,
error: shellError,
exportedFileContent,
} = await executeShellInE2B({
code: resolvedCode,
envs: shellEnvs,
timeoutMs: timeout,
sandboxFiles: _sandboxFiles,
outputSandboxPath,
})
const executionTime = Date.now() - execStart
logger.info(`[${requestId}] E2B shell sandbox`, {
sandboxId,
stdoutPreview: shellStdout?.slice(0, 200),
error: shellError,
executionTime,
})
if (shellError) {
return NextResponse.json(
{
success: false,
error: shellError,
output: { result: null, stdout: cleanStdout(shellStdout), executionTime },
},
{ status: 500 }
)
}
if (outputSandboxPath) {
const fileExportResponse = await maybeExportSandboxFileToWorkspace({
authUserId: auth.userId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputMimeType,
outputSandboxPath,
exportedFileContent,
stdout: shellStdout,
executionTime,
})
if (fileExportResponse) return fileExportResponse
}
return NextResponse.json({
success: true,
output: { result: shellResult ?? null, stdout: cleanStdout(shellStdout), executionTime },
})
}
if (lang === CodeLanguage.Python && !isE2bEnabled) {
throw new Error(
'Python execution requires E2B to be enabled. Please contact your administrator to enable E2B, or use JavaScript instead.'
@@ -719,11 +909,13 @@ export async function POST(req: NextRequest) {
stdout: e2bStdout,
sandboxId,
error: e2bError,
exportedFileContent,
} = await executeInE2B({
code: codeForE2B,
language: CodeLanguage.JavaScript,
timeoutMs: timeout,
sandboxFiles: _sandboxFiles,
outputSandboxPath,
})
const executionTime = Date.now() - execStart
stdout += e2bStdout
@@ -752,6 +944,22 @@ export async function POST(req: NextRequest) {
)
}
if (outputSandboxPath) {
const fileExportResponse = await maybeExportSandboxFileToWorkspace({
authUserId: auth.userId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputMimeType,
outputSandboxPath,
exportedFileContent,
stdout,
executionTime,
})
if (fileExportResponse) return fileExportResponse
}
return NextResponse.json({
success: true,
output: { result: e2bResult ?? null, stdout: cleanStdout(stdout), executionTime },
@@ -783,11 +991,13 @@ export async function POST(req: NextRequest) {
stdout: e2bStdout,
sandboxId,
error: e2bError,
exportedFileContent,
} = await executeInE2B({
code: codeForE2B,
language: CodeLanguage.Python,
timeoutMs: timeout,
sandboxFiles: _sandboxFiles,
outputSandboxPath,
})
const executionTime = Date.now() - execStart
stdout += e2bStdout
@@ -816,6 +1026,22 @@ export async function POST(req: NextRequest) {
)
}
if (outputSandboxPath) {
const fileExportResponse = await maybeExportSandboxFileToWorkspace({
authUserId: auth.userId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputMimeType,
outputSandboxPath,
exportedFileContent,
stdout,
executionTime,
})
if (fileExportResponse) return fileExportResponse
}
return NextResponse.json({
success: true,
output: { result: e2bResult ?? null, stdout: cleanStdout(stdout), executionTime },

View File

@@ -4,19 +4,13 @@
import type { NextRequest } from 'next/server'
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
mockCheckHybridAuth,
mockGetDispatchJobRecord,
mockGetJobQueue,
mockVerifyWorkflowAccess,
mockGetWorkflowById,
} = vi.hoisted(() => ({
mockCheckHybridAuth: vi.fn(),
mockGetDispatchJobRecord: vi.fn(),
mockGetJobQueue: vi.fn(),
mockVerifyWorkflowAccess: vi.fn(),
mockGetWorkflowById: vi.fn(),
}))
const { mockCheckHybridAuth, mockGetJobQueue, mockVerifyWorkflowAccess, mockGetWorkflowById } =
vi.hoisted(() => ({
mockCheckHybridAuth: vi.fn(),
mockGetJobQueue: vi.fn(),
mockVerifyWorkflowAccess: vi.fn(),
mockGetWorkflowById: vi.fn(),
}))
vi.mock('@sim/logger', () => ({
createLogger: () => ({
@@ -32,19 +26,9 @@ vi.mock('@/lib/auth/hybrid', () => ({
}))
vi.mock('@/lib/core/async-jobs', () => ({
JOB_STATUS: {
PENDING: 'pending',
PROCESSING: 'processing',
COMPLETED: 'completed',
FAILED: 'failed',
},
getJobQueue: mockGetJobQueue,
}))
vi.mock('@/lib/core/workspace-dispatch/store', () => ({
getDispatchJobRecord: mockGetDispatchJobRecord,
}))
vi.mock('@/lib/core/utils/request', () => ({
generateRequestId: vi.fn().mockReturnValue('request-1'),
}))
@@ -89,72 +73,78 @@ describe('GET /api/jobs/[jobId]', () => {
})
})
it('returns dispatcher-aware waiting status with metadata', async () => {
mockGetDispatchJobRecord.mockResolvedValue({
id: 'dispatch-1',
workspaceId: 'workspace-1',
lane: 'runtime',
queueName: 'workflow-execution',
bullmqJobName: 'workflow-execution',
bullmqPayload: {},
metadata: {
workflowId: 'workflow-1',
},
priority: 10,
status: 'waiting',
createdAt: 1000,
admittedAt: 2000,
it('returns pending status for a queued job', async () => {
mockGetJobQueue.mockResolvedValue({
getJob: vi.fn().mockResolvedValue({
id: 'job-1',
type: 'workflow-execution',
payload: {},
status: 'pending',
createdAt: new Date('2025-01-01T00:00:00Z'),
attempts: 0,
maxAttempts: 1,
metadata: {
workflowId: 'workflow-1',
},
}),
})
const response = await GET(createMockRequest(), {
params: Promise.resolve({ jobId: 'dispatch-1' }),
params: Promise.resolve({ jobId: 'job-1' }),
})
const body = await response.json()
expect(response.status).toBe(200)
expect(body.status).toBe('waiting')
expect(body.metadata.queueName).toBe('workflow-execution')
expect(body.metadata.lane).toBe('runtime')
expect(body.metadata.workspaceId).toBe('workspace-1')
expect(body.status).toBe('pending')
})
it('returns completed output from dispatch state', async () => {
mockGetDispatchJobRecord.mockResolvedValue({
id: 'dispatch-2',
workspaceId: 'workspace-1',
lane: 'interactive',
queueName: 'workflow-execution',
bullmqJobName: 'direct-workflow-execution',
bullmqPayload: {},
metadata: {
workflowId: 'workflow-1',
},
priority: 1,
status: 'completed',
createdAt: 1000,
startedAt: 2000,
completedAt: 7000,
output: { success: true },
it('returns completed output from job', async () => {
mockGetJobQueue.mockResolvedValue({
getJob: vi.fn().mockResolvedValue({
id: 'job-2',
type: 'workflow-execution',
payload: {},
status: 'completed',
createdAt: new Date('2025-01-01T00:00:00Z'),
startedAt: new Date('2025-01-01T00:00:01Z'),
completedAt: new Date('2025-01-01T00:00:06Z'),
attempts: 1,
maxAttempts: 1,
output: { success: true },
metadata: {
workflowId: 'workflow-1',
},
}),
})
const response = await GET(createMockRequest(), {
params: Promise.resolve({ jobId: 'dispatch-2' }),
params: Promise.resolve({ jobId: 'job-2' }),
})
const body = await response.json()
expect(response.status).toBe(200)
expect(body.status).toBe('completed')
expect(body.output).toEqual({ success: true })
expect(body.metadata.duration).toBe(5000)
})
it('returns 404 when neither dispatch nor BullMQ job exists', async () => {
mockGetDispatchJobRecord.mockResolvedValue(null)
it('returns 404 when job does not exist', async () => {
const response = await GET(createMockRequest(), {
params: Promise.resolve({ jobId: 'missing-job' }),
})
expect(response.status).toBe(404)
})
it('returns 401 for unauthenticated requests', async () => {
mockCheckHybridAuth.mockResolvedValue({
success: false,
error: 'Not authenticated',
})
const response = await GET(createMockRequest(), {
params: Promise.resolve({ jobId: 'job-1' }),
})
expect(response.status).toBe(401)
})
})

View File

@@ -2,13 +2,27 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { getJobQueue } from '@/lib/core/async-jobs'
import type { Job } from '@/lib/core/async-jobs/types'
import { generateRequestId } from '@/lib/core/utils/request'
import { presentDispatchOrJobStatus } from '@/lib/core/workspace-dispatch/status'
import { getDispatchJobRecord } from '@/lib/core/workspace-dispatch/store'
import { createErrorResponse } from '@/app/api/workflows/utils'
const logger = createLogger('TaskStatusAPI')
function presentJobStatus(job: Job) {
return {
status: job.status,
metadata: {
createdAt: job.createdAt.toISOString(),
startedAt: job.startedAt?.toISOString(),
completedAt: job.completedAt?.toISOString(),
attempts: job.attempts,
maxAttempts: job.maxAttempts,
},
output: job.output,
error: job.error,
}
}
export async function GET(
request: NextRequest,
{ params }: { params: Promise<{ jobId: string }> }
@@ -25,15 +39,14 @@ export async function GET(
const authenticatedUserId = authResult.userId
const dispatchJob = await getDispatchJobRecord(taskId)
const jobQueue = await getJobQueue()
const job = dispatchJob ? null : await jobQueue.getJob(taskId)
const job = await jobQueue.getJob(taskId)
if (!job && !dispatchJob) {
if (!job) {
return createErrorResponse('Task not found', 404)
}
const metadataToCheck = dispatchJob?.metadata ?? job?.metadata
const metadataToCheck = job.metadata
if (metadataToCheck?.workflowId) {
const { verifyWorkflowAccess } = await import('@/socket/middleware/permissions')
@@ -61,7 +74,7 @@ export async function GET(
return createErrorResponse('Access denied', 403)
}
const presented = presentDispatchOrJobStatus(dispatchJob, job)
const presented = presentJobStatus(job)
const response: any = {
success: true,
taskId,
@@ -71,9 +84,6 @@ export async function GET(
if (presented.output !== undefined) response.output = presented.output
if (presented.error !== undefined) response.error = presented.error
if (presented.estimatedDuration !== undefined) {
response.estimatedDuration = presented.estimatedDuration
}
return NextResponse.json(response)
} catch (error: any) {

View File

@@ -18,14 +18,11 @@ import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { validateOAuthAccessToken } from '@/lib/auth/oauth-token'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { ORCHESTRATION_TIMEOUT_MS, SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent'
import {
executeToolServerSide,
prepareExecutionContext,
} from '@/lib/copilot/orchestrator/tool-executor'
import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run'
import { orchestrateSubagentStream } from '@/lib/copilot/request/subagent'
import { ensureHandlersRegistered, executeTool } from '@/lib/copilot/tool-executor'
import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context'
import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions'
import { env } from '@/lib/core/config/env'
import { RateLimiter } from '@/lib/core/rate-limiter'
@@ -645,7 +642,8 @@ async function handleDirectToolCall(
startTime: Date.now(),
}
const result = await executeToolServerSide(toolCall, execContext)
ensureHandlersRegistered()
const result = await executeTool(toolCall.name, toolCall.params || {}, execContext)
return {
content: [
@@ -728,25 +726,10 @@ async function handleBuildToolCall(
chatId,
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const messageId = requestPayload.messageId as string
await createRunSegment({
id: runId,
executionId,
chatId,
userId,
workflowId: resolved.workflowId,
streamId: messageId,
}).catch(() => {})
const result = await orchestrateCopilotStream(requestPayload, {
const result = await runCopilotLifecycle(requestPayload, {
userId,
workflowId: resolved.workflowId,
chatId,
executionId,
runId,
goRoute: '/api/mcp',
autoExecuteTools: true,
timeout: ORCHESTRATION_TIMEOUT_MS,

View File

@@ -5,18 +5,26 @@ import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat-payload'
import { resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat/payload'
import {
buildPersistedAssistantMessage,
buildPersistedUserMessage,
} from '@/lib/copilot/chat/persisted-message'
import {
processContextsServer,
resolveActiveResourceContext,
} from '@/lib/copilot/chat/process-contents'
import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context'
import { createRequestTracker, createUnauthorizedResponse } from '@/lib/copilot/request/http'
import { createSSEStream, SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/lifecycle/start'
import {
acquirePendingChatStream,
createSSEStream,
SSE_RESPONSE_HEADERS,
} from '@/lib/copilot/chat-streaming'
import type { OrchestratorResult } from '@/lib/copilot/orchestrator/types'
import { processContextsServer, resolveActiveResourceContext } from '@/lib/copilot/process-contents'
import { createRequestTracker, createUnauthorizedResponse } from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
import { generateWorkspaceContext } from '@/lib/copilot/workspace-context'
getPendingChatStreamId,
releasePendingChatStream,
} from '@/lib/copilot/request/session'
import type { OrchestratorResult } from '@/lib/copilot/request/types'
import { taskPubSub } from '@/lib/copilot/tasks'
import {
assertActiveWorkspaceAccess,
getUserEntityPermissions,
@@ -37,7 +45,6 @@ const FileAttachmentSchema = z.object({
const ResourceAttachmentSchema = z.object({
type: z.enum(['workflow', 'table', 'file', 'knowledgebase']),
id: z.string().min(1),
title: z.string().optional(),
active: z.boolean().optional(),
})
@@ -87,7 +94,9 @@ const MothershipMessageSchema = z.object({
*/
export async function POST(req: NextRequest) {
const tracker = createRequestTracker()
let userMessageIdForLogs: string | undefined
let lockChatId: string | undefined
let lockStreamId = ''
let chatStreamLockAcquired = false
try {
const session = await getSession()
@@ -110,27 +119,23 @@ export async function POST(req: NextRequest) {
} = MothershipMessageSchema.parse(body)
const userMessageId = providedMessageId || crypto.randomUUID()
userMessageIdForLogs = userMessageId
const reqLogger = logger.withMetadata({
requestId: tracker.requestId,
messageId: userMessageId,
})
lockStreamId = userMessageId
reqLogger.info('Received mothership chat start request', {
workspaceId,
chatId,
createNewChat,
hasContexts: Array.isArray(contexts) && contexts.length > 0,
contextsCount: Array.isArray(contexts) ? contexts.length : 0,
hasResourceAttachments: Array.isArray(resourceAttachments) && resourceAttachments.length > 0,
resourceAttachmentCount: Array.isArray(resourceAttachments) ? resourceAttachments.length : 0,
hasFileAttachments: Array.isArray(fileAttachments) && fileAttachments.length > 0,
fileAttachmentCount: Array.isArray(fileAttachments) ? fileAttachments.length : 0,
})
// Phase 1: workspace access + chat resolution in parallel
const [accessResult, chatResult] = await Promise.allSettled([
assertActiveWorkspaceAccess(workspaceId, authenticatedUserId),
chatId || createNewChat
? resolveOrCreateChat({
chatId,
userId: authenticatedUserId,
workspaceId,
model: 'claude-opus-4-6',
type: 'mothership',
})
: null,
])
try {
await assertActiveWorkspaceAccess(workspaceId, authenticatedUserId)
} catch {
if (accessResult.status === 'rejected') {
return NextResponse.json({ error: 'Workspace not found or access denied' }, { status: 403 })
}
@@ -138,18 +143,12 @@ export async function POST(req: NextRequest) {
let conversationHistory: any[] = []
let actualChatId = chatId
if (chatId || createNewChat) {
const chatResult = await resolveOrCreateChat({
chatId,
userId: authenticatedUserId,
workspaceId,
model: 'claude-opus-4-6',
type: 'mothership',
})
currentChat = chatResult.chat
actualChatId = chatResult.chatId || chatId
conversationHistory = Array.isArray(chatResult.conversationHistory)
? chatResult.conversationHistory
if (chatResult.status === 'fulfilled' && chatResult.value) {
const resolved = chatResult.value
currentChat = resolved.chat
actualChatId = resolved.chatId || chatId
conversationHistory = Array.isArray(resolved.conversationHistory)
? resolved.conversationHistory
: []
if (chatId && !currentChat) {
@@ -157,76 +156,73 @@ export async function POST(req: NextRequest) {
}
}
let agentContexts: Array<{ type: string; content: string }> = []
if (Array.isArray(contexts) && contexts.length > 0) {
try {
agentContexts = await processContextsServer(
contexts as any,
authenticatedUserId,
message,
workspaceId,
actualChatId
if (actualChatId) {
chatStreamLockAcquired = await acquirePendingChatStream(actualChatId, userMessageId)
if (!chatStreamLockAcquired) {
const activeStreamId = await getPendingChatStreamId(actualChatId)
return NextResponse.json(
{
error: 'A response is already in progress for this chat.',
...(activeStreamId ? { activeStreamId } : {}),
},
{ status: 409 }
)
} catch (e) {
reqLogger.error('Failed to process contexts', e)
}
lockChatId = actualChatId
}
if (Array.isArray(resourceAttachments) && resourceAttachments.length > 0) {
const results = await Promise.allSettled(
resourceAttachments.map(async (r) => {
const ctx = await resolveActiveResourceContext(
r.type,
r.id,
workspaceId,
// Phase 2: contexts + workspace context + user message persistence in parallel
const contextPromise = (async () => {
let agentCtxs: Array<{ type: string; content: string }> = []
if (Array.isArray(contexts) && contexts.length > 0) {
try {
agentCtxs = await processContextsServer(
contexts as any,
authenticatedUserId,
message,
workspaceId,
actualChatId
)
if (!ctx) return null
return {
...ctx,
tag: r.active ? '@active_tab' : '@open_tab',
}
})
)
for (const result of results) {
if (result.status === 'fulfilled' && result.value) {
agentContexts.push(result.value)
} else if (result.status === 'rejected') {
reqLogger.error('Failed to resolve resource attachment', result.reason)
} catch (e) {
logger.error(`[${tracker.requestId}] Failed to process contexts`, e)
}
}
}
if (actualChatId) {
const userMsg = {
id: userMessageId,
role: 'user' as const,
content: message,
timestamp: new Date().toISOString(),
...(fileAttachments &&
fileAttachments.length > 0 && {
fileAttachments: fileAttachments.map((f) => ({
id: f.id,
key: f.key,
filename: f.filename,
media_type: f.media_type,
size: f.size,
})),
}),
...(contexts &&
contexts.length > 0 && {
contexts: contexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId && { workflowId: c.workflowId }),
...(c.knowledgeId && { knowledgeId: c.knowledgeId }),
...(c.tableId && { tableId: c.tableId }),
...(c.fileId && { fileId: c.fileId }),
})),
}),
if (Array.isArray(resourceAttachments) && resourceAttachments.length > 0) {
const results = await Promise.allSettled(
resourceAttachments.map(async (r) => {
const ctx = await resolveActiveResourceContext(
r.type,
r.id,
workspaceId,
authenticatedUserId,
actualChatId
)
if (!ctx) return null
return { ...ctx, tag: r.active ? '@active_tab' : '@open_tab' }
})
)
for (const result of results) {
if (result.status === 'fulfilled' && result.value) {
agentCtxs.push(result.value)
} else if (result.status === 'rejected') {
logger.error(
`[${tracker.requestId}] Failed to resolve resource attachment`,
result.reason
)
}
}
}
return agentCtxs
})()
const userMsgPromise = (async () => {
if (!actualChatId) return
const userMsg = buildPersistedUserMessage({
id: userMessageId,
content: message,
fileAttachments,
contexts,
})
const [updated] = await db
.update(copilotChats)
.set({
@@ -242,11 +238,15 @@ export async function POST(req: NextRequest) {
conversationHistory = freshMessages.filter((m: any) => m.id !== userMessageId)
taskPubSub?.publishStatusChanged({ workspaceId, chatId: actualChatId, type: 'started' })
}
}
})()
const [workspaceContext, userPermission] = await Promise.all([
generateWorkspaceContext(workspaceId, authenticatedUserId),
getUserEntityPermissions(authenticatedUserId, 'workspace', workspaceId).catch(() => null),
const [agentContexts, [workspaceContext, userPermission]] = await Promise.all([
contextPromise,
Promise.all([
generateWorkspaceContext(workspaceId, authenticatedUserId),
getUserEntityPermissions(authenticatedUserId, 'workspace', workspaceId).catch(() => null),
]),
userMsgPromise,
])
const requestPayload = await buildCopilotRequestPayload(
@@ -267,19 +267,6 @@ export async function POST(req: NextRequest) {
{ selectedModel: '' }
)
if (actualChatId) {
const acquired = await acquirePendingChatStream(actualChatId, userMessageId)
if (!acquired) {
return NextResponse.json(
{
error:
'A response is already in progress for this chat. Wait for it to finish or use Stop.',
},
{ status: 409 }
)
}
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const stream = createSSEStream({
@@ -295,7 +282,6 @@ export async function POST(req: NextRequest) {
titleModel: 'claude-opus-4-6',
requestId: tracker.requestId,
workspaceId,
pendingChatStreamAlreadyRegistered: Boolean(actualChatId),
orchestrateOptions: {
userId: authenticatedUserId,
workspaceId,
@@ -309,46 +295,7 @@ export async function POST(req: NextRequest) {
if (!actualChatId) return
if (!result.success) return
const assistantMessage: Record<string, unknown> = {
id: crypto.randomUUID(),
role: 'assistant' as const,
content: result.content,
timestamp: new Date().toISOString(),
...(result.requestId ? { requestId: result.requestId } : {}),
}
if (result.toolCalls.length > 0) {
assistantMessage.toolCalls = result.toolCalls
}
if (result.contentBlocks.length > 0) {
assistantMessage.contentBlocks = result.contentBlocks.map((block) => {
const stored: Record<string, unknown> = { type: block.type }
if (block.content) stored.content = block.content
if (block.type === 'tool_call' && block.toolCall) {
const state =
block.toolCall.result?.success !== undefined
? block.toolCall.result.success
? 'success'
: 'error'
: block.toolCall.status
const isSubagentTool = !!block.calledBy
const isNonTerminal =
state === 'cancelled' || state === 'pending' || state === 'executing'
stored.toolCall = {
id: block.toolCall.id,
name: block.toolCall.name,
state,
...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }),
...(isSubagentTool && isNonTerminal
? {}
: block.toolCall.params
? { params: block.toolCall.params }
: {}),
...(block.calledBy ? { calledBy: block.calledBy } : {}),
}
}
return stored
})
}
const assistantMessage = buildPersistedAssistantMessage(result, result.requestId)
try {
const [row] = await db
@@ -381,7 +328,7 @@ export async function POST(req: NextRequest) {
})
}
} catch (error) {
reqLogger.error('Failed to persist chat messages', {
logger.error(`[${tracker.requestId}] Failed to persist chat messages`, {
chatId: actualChatId,
error: error instanceof Error ? error.message : 'Unknown error',
})
@@ -392,6 +339,9 @@ export async function POST(req: NextRequest) {
return new Response(stream, { headers: SSE_RESPONSE_HEADERS })
} catch (error) {
if (chatStreamLockAcquired && lockChatId && lockStreamId) {
await releasePendingChatStream(lockChatId, lockStreamId)
}
if (error instanceof z.ZodError) {
return NextResponse.json(
{ error: 'Invalid request data', details: error.errors },
@@ -399,11 +349,9 @@ export async function POST(req: NextRequest) {
)
}
logger
.withMetadata({ requestId: tracker.requestId, messageId: userMessageIdForLogs })
.error('Error handling mothership chat', {
error: error instanceof Error ? error.message : 'Unknown error',
})
logger.error(`[${tracker.requestId}] Error handling mothership chat:`, {
error: error instanceof Error ? error.message : 'Unknown error',
})
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Internal server error' },

View File

@@ -5,8 +5,9 @@ import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { releasePendingChatStream } from '@/lib/copilot/chat-streaming'
import { taskPubSub } from '@/lib/copilot/task-events'
import { normalizeMessage, type PersistedMessage } from '@/lib/copilot/chat/persisted-message'
import { releasePendingChatStream } from '@/lib/copilot/request/session'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('MothershipChatStopAPI')
@@ -26,15 +27,25 @@ const StoredToolCallSchema = z
display: z
.object({
text: z.string().optional(),
title: z.string().optional(),
phaseLabel: z.string().optional(),
})
.optional(),
calledBy: z.string().optional(),
durationMs: z.number().optional(),
error: z.string().optional(),
})
.nullable()
const ContentBlockSchema = z.object({
type: z.string(),
lane: z.enum(['main', 'subagent']).optional(),
content: z.string().optional(),
channel: z.enum(['assistant', 'thinking']).optional(),
phase: z.enum(['call', 'args_delta', 'result']).optional(),
kind: z.enum(['subagent', 'structured_result', 'subagent_result']).optional(),
lifecycle: z.enum(['start', 'end']).optional(),
status: z.enum(['complete', 'error', 'cancelled']).optional(),
toolCall: StoredToolCallSchema.optional(),
})
@@ -70,15 +81,14 @@ export async function POST(req: NextRequest) {
const hasBlocks = Array.isArray(contentBlocks) && contentBlocks.length > 0
if (hasContent || hasBlocks) {
const assistantMessage: Record<string, unknown> = {
const normalized = normalizeMessage({
id: crypto.randomUUID(),
role: 'assistant' as const,
role: 'assistant',
content,
timestamp: new Date().toISOString(),
}
if (hasBlocks) {
assistantMessage.contentBlocks = contentBlocks
}
...(hasBlocks ? { contentBlocks } : {}),
})
const assistantMessage: PersistedMessage = normalized
setClause.messages = sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`
}

View File

@@ -4,15 +4,15 @@ import { createLogger } from '@sim/logger'
import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { getStreamMeta, readStreamEvents } from '@/lib/copilot/orchestrator/stream/buffer'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
} from '@/lib/copilot/request/http'
import { readEvents } from '@/lib/copilot/request/session/buffer'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('MothershipChatAPI')
@@ -46,29 +46,24 @@ export async function GET(
}
let streamSnapshot: {
events: Array<{ eventId: number; streamId: string; event: Record<string, unknown> }>
events: unknown[]
status: string
} | null = null
if (chat.conversationId) {
try {
const [meta, events] = await Promise.all([
getStreamMeta(chat.conversationId),
readStreamEvents(chat.conversationId, 0),
])
const events = await readEvents(chat.conversationId, '0')
streamSnapshot = {
events: events || [],
status: meta?.status || 'unknown',
status: events.length > 0 ? 'active' : 'unknown',
}
} catch (error) {
logger
.withMetadata({ messageId: chat.conversationId || undefined })
.warn('Failed to read stream snapshot for mothership chat', {
chatId,
conversationId: chat.conversationId,
error: error instanceof Error ? error.message : String(error),
})
logger.warn('Failed to read stream snapshot for mothership chat', {
chatId,
conversationId: chat.conversationId,
error: error instanceof Error ? error.message : String(error),
})
}
}

View File

@@ -0,0 +1,43 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request/http'
const logger = createLogger('MarkTaskReadAPI')
const MarkReadSchema = z.object({
chatId: z.string().min(1),
})
export async function POST(request: NextRequest) {
try {
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !userId) {
return createUnauthorizedResponse()
}
const body = await request.json()
const { chatId } = MarkReadSchema.parse(body)
await db
.update(copilotChats)
.set({ lastSeenAt: sql`GREATEST(${copilotChats.updatedAt}, NOW())` })
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
return NextResponse.json({ success: true })
} catch (error) {
if (error instanceof z.ZodError) {
return createBadRequestResponse('chatId is required')
}
logger.error('Error marking task as read:', error)
return createInternalServerErrorResponse('Failed to mark task as read')
}
}

View File

@@ -9,8 +9,8 @@ import {
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
} from '@/lib/copilot/request/http'
import { taskPubSub } from '@/lib/copilot/tasks'
import { assertActiveWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('MothershipChatsAPI')
@@ -38,7 +38,7 @@ export async function GET(request: NextRequest) {
id: copilotChats.id,
title: copilotChats.title,
updatedAt: copilotChats.updatedAt,
conversationId: copilotChats.conversationId,
activeStreamId: copilotChats.conversationId,
lastSeenAt: copilotChats.lastSeenAt,
})
.from(copilotChats)

View File

@@ -7,7 +7,7 @@
* Auth is handled via session cookies (EventSource sends cookies automatically).
*/
import { taskPubSub } from '@/lib/copilot/task-events'
import { taskPubSub } from '@/lib/copilot/tasks'
import { createWorkspaceSSE } from '@/lib/events/sse-endpoint'
export const dynamic = 'force-dynamic'

View File

@@ -2,10 +2,9 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkInternalAuth } from '@/lib/auth/hybrid'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { buildIntegrationToolSchemas } from '@/lib/copilot/chat-payload'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { generateWorkspaceContext } from '@/lib/copilot/workspace-context'
import { buildIntegrationToolSchemas } from '@/lib/copilot/chat/payload'
import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context'
import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run'
import {
assertActiveWorkspaceAccess,
getUserEntityPermissions,
@@ -72,34 +71,25 @@ export async function POST(req: NextRequest) {
...(userPermission ? { userPermission } : {}),
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
await createRunSegment({
id: runId,
executionId,
chatId: effectiveChatId,
userId,
workspaceId,
streamId: messageId,
}).catch(() => {})
const result = await orchestrateCopilotStream(requestPayload, {
const result = await runCopilotLifecycle(requestPayload, {
userId,
workspaceId,
chatId: effectiveChatId,
executionId,
runId,
goRoute: '/api/mothership/execute',
autoExecuteTools: true,
interactive: false,
})
if (!result.success) {
reqLogger.error('Mothership execute failed', {
error: result.error,
errors: result.errors,
})
logger.error(
messageId
? `Mothership execute failed [messageId:${messageId}]`
: 'Mothership execute failed',
{
error: result.error,
errors: result.errors,
}
)
return NextResponse.json(
{
error: result.error || 'Mothership execution failed',
@@ -135,9 +125,12 @@ export async function POST(req: NextRequest) {
)
}
logger.withMetadata({ messageId }).error('Mothership execute error', {
error: error instanceof Error ? error.message : 'Unknown error',
})
logger.error(
messageId ? `Mothership execute error [messageId:${messageId}]` : 'Mothership execute error',
{
error: error instanceof Error ? error.message : 'Unknown error',
}
)
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Internal server error' },

View File

@@ -14,7 +14,6 @@ const {
mockDbReturning,
mockDbUpdate,
mockEnqueue,
mockEnqueueWorkspaceDispatch,
mockStartJob,
mockCompleteJob,
mockMarkJobFailed,
@@ -24,7 +23,6 @@ const {
const mockDbSet = vi.fn().mockReturnValue({ where: mockDbWhere })
const mockDbUpdate = vi.fn().mockReturnValue({ set: mockDbSet })
const mockEnqueue = vi.fn().mockResolvedValue('job-id-1')
const mockEnqueueWorkspaceDispatch = vi.fn().mockResolvedValue('job-id-1')
const mockStartJob = vi.fn().mockResolvedValue(undefined)
const mockCompleteJob = vi.fn().mockResolvedValue(undefined)
const mockMarkJobFailed = vi.fn().mockResolvedValue(undefined)
@@ -42,7 +40,6 @@ const {
mockDbReturning,
mockDbUpdate,
mockEnqueue,
mockEnqueueWorkspaceDispatch,
mockStartJob,
mockCompleteJob,
mockMarkJobFailed,
@@ -75,15 +72,6 @@ vi.mock('@/lib/core/async-jobs', () => ({
shouldExecuteInline: vi.fn().mockReturnValue(false),
}))
vi.mock('@/lib/core/bullmq', () => ({
isBullMQEnabled: vi.fn().mockReturnValue(true),
createBullMQJobData: vi.fn((payload: unknown) => ({ payload })),
}))
vi.mock('@/lib/core/workspace-dispatch', () => ({
enqueueWorkspaceDispatch: mockEnqueueWorkspaceDispatch,
}))
vi.mock('@/lib/workflows/utils', () => ({
getWorkflowById: vi.fn().mockResolvedValue({
id: 'workflow-1',
@@ -246,29 +234,19 @@ describe('Scheduled Workflow Execution API Route', () => {
expect(data).toHaveProperty('executedCount', 2)
})
it('should queue mothership jobs to BullMQ when available', async () => {
it('should execute mothership jobs inline', async () => {
mockDbReturning.mockReturnValueOnce([]).mockReturnValueOnce(SINGLE_JOB)
const response = await GET(createMockRequest())
expect(response.status).toBe(200)
expect(mockEnqueueWorkspaceDispatch).toHaveBeenCalledWith(
expect(mockExecuteJobInline).toHaveBeenCalledWith(
expect.objectContaining({
workspaceId: 'workspace-1',
lane: 'runtime',
queueName: 'mothership-job-execution',
bullmqJobName: 'mothership-job-execution',
bullmqPayload: {
payload: {
scheduleId: 'job-1',
cronExpression: '0 * * * *',
failedCount: 0,
now: expect.any(String),
},
},
scheduleId: 'job-1',
cronExpression: '0 * * * *',
failedCount: 0,
})
)
expect(mockExecuteJobInline).not.toHaveBeenCalled()
})
it('should enqueue preassigned correlation metadata for schedules', async () => {
@@ -277,25 +255,23 @@ describe('Scheduled Workflow Execution API Route', () => {
const response = await GET(createMockRequest())
expect(response.status).toBe(200)
expect(mockEnqueueWorkspaceDispatch).toHaveBeenCalledWith(
expect(mockEnqueue).toHaveBeenCalledWith(
'schedule-execution',
expect.objectContaining({
id: 'schedule-execution-1',
workspaceId: 'workspace-1',
lane: 'runtime',
queueName: 'schedule-execution',
bullmqJobName: 'schedule-execution',
metadata: {
scheduleId: 'schedule-1',
workflowId: 'workflow-1',
executionId: 'schedule-execution-1',
}),
expect.objectContaining({
metadata: expect.objectContaining({
workflowId: 'workflow-1',
correlation: {
correlation: expect.objectContaining({
executionId: 'schedule-execution-1',
requestId: 'test-request-id',
source: 'schedule',
workflowId: 'workflow-1',
scheduleId: 'schedule-1',
triggerType: 'schedule',
scheduledFor: '2025-01-01T00:00:00.000Z',
},
},
}),
}),
})
)
})

View File

@@ -5,9 +5,7 @@ import { type NextRequest, NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { verifyCronAuth } from '@/lib/auth/internal'
import { getJobQueue, shouldExecuteInline } from '@/lib/core/async-jobs'
import { createBullMQJobData, isBullMQEnabled } from '@/lib/core/bullmq'
import { generateRequestId } from '@/lib/core/utils/request'
import { enqueueWorkspaceDispatch } from '@/lib/core/workspace-dispatch'
import {
executeJobInline,
executeScheduleJob,
@@ -121,38 +119,13 @@ export async function GET(request: NextRequest) {
: null
const resolvedWorkspaceId = resolvedWorkflow?.workspaceId
let jobId: string
if (isBullMQEnabled()) {
if (!resolvedWorkspaceId) {
throw new Error(
`Missing workspace for scheduled workflow ${schedule.workflowId}; refusing to bypass workspace admission`
)
}
jobId = await enqueueWorkspaceDispatch({
id: executionId,
workspaceId: resolvedWorkspaceId,
lane: 'runtime',
queueName: 'schedule-execution',
bullmqJobName: 'schedule-execution',
bullmqPayload: createBullMQJobData(payload, {
workflowId: schedule.workflowId ?? undefined,
correlation,
}),
metadata: {
workflowId: schedule.workflowId ?? undefined,
correlation,
},
})
} else {
jobId = await jobQueue.enqueue('schedule-execution', payload, {
metadata: {
workflowId: schedule.workflowId ?? undefined,
workspaceId: resolvedWorkspaceId ?? undefined,
correlation,
},
})
}
const jobId = await jobQueue.enqueue('schedule-execution', payload, {
metadata: {
workflowId: schedule.workflowId ?? undefined,
workspaceId: resolvedWorkspaceId ?? undefined,
correlation,
},
})
logger.info(
`[${requestId}] Queued schedule execution task ${jobId} for workflow ${schedule.workflowId}`
)
@@ -204,7 +177,7 @@ export async function GET(request: NextRequest) {
}
})
// Mothership jobs use BullMQ when available, otherwise direct inline execution.
// Mothership jobs execute inline directly.
const jobPromises = dueJobs.map(async (job) => {
const queueTime = job.lastQueuedAt ?? queuedAt
const payload = {
@@ -215,24 +188,7 @@ export async function GET(request: NextRequest) {
}
try {
if (isBullMQEnabled()) {
if (!job.sourceWorkspaceId || !job.sourceUserId) {
throw new Error(`Mothership job ${job.id} is missing workspace/user ownership`)
}
await enqueueWorkspaceDispatch({
workspaceId: job.sourceWorkspaceId!,
lane: 'runtime',
queueName: 'mothership-job-execution',
bullmqJobName: 'mothership-job-execution',
bullmqPayload: createBullMQJobData(payload),
metadata: {
userId: job.sourceUserId,
},
})
} else {
await executeJobInline(payload)
}
await executeJobInline(payload)
} catch (error) {
logger.error(`[${requestId}] Job execution failed for ${job.id}`, {
error: error instanceof Error ? error.message : String(error),

View File

@@ -3,7 +3,7 @@ import { templates } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { checkInternalApiKey } from '@/lib/copilot/utils'
import { checkInternalApiKey } from '@/lib/copilot/request/http'
import { generateRequestId } from '@/lib/core/utils/request'
import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer'

View File

@@ -1,9 +1,8 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants'
import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run'
import { getWorkflowById, resolveWorkflowIdForUser } from '@/lib/workflows/utils'
import { authenticateV1Request } from '@/app/api/v1/auth'
@@ -83,15 +82,19 @@ export async function POST(req: NextRequest) {
const chatId = parsed.chatId || crypto.randomUUID()
messageId = crypto.randomUUID()
const reqLogger = logger.withMetadata({ messageId })
reqLogger.info('Received headless copilot chat start request', {
workflowId: resolved.workflowId,
workflowName: parsed.workflowName,
chatId,
mode: transportMode,
autoExecuteTools: parsed.autoExecuteTools,
timeout: parsed.timeout,
})
logger.info(
messageId
? `Received headless copilot chat start request [messageId:${messageId}]`
: 'Received headless copilot chat start request',
{
workflowId: resolved.workflowId,
workflowName: parsed.workflowName,
chatId,
mode: transportMode,
autoExecuteTools: parsed.autoExecuteTools,
timeout: parsed.timeout,
}
)
const requestPayload = {
message: parsed.message,
workflowId: resolved.workflowId,
@@ -102,24 +105,10 @@ export async function POST(req: NextRequest) {
chatId,
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
await createRunSegment({
id: runId,
executionId,
chatId,
userId: auth.userId,
workflowId: resolved.workflowId,
streamId: messageId,
}).catch(() => {})
const result = await orchestrateCopilotStream(requestPayload, {
const result = await runCopilotLifecycle(requestPayload, {
userId: auth.userId,
workflowId: resolved.workflowId,
chatId,
executionId,
runId,
goRoute: '/api/mcp',
autoExecuteTools: parsed.autoExecuteTools,
timeout: parsed.timeout,
@@ -141,9 +130,14 @@ export async function POST(req: NextRequest) {
)
}
logger.withMetadata({ messageId }).error('Headless copilot request failed', {
error: error instanceof Error ? error.message : String(error),
})
logger.error(
messageId
? `Headless copilot request failed [messageId:${messageId}]`
: 'Headless copilot request failed',
{
error: error instanceof Error ? error.message : String(error),
}
)
return NextResponse.json({ success: false, error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -2,7 +2,6 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { admissionRejectedResponse, tryAdmit } from '@/lib/core/admission/gate'
import { generateRequestId } from '@/lib/core/utils/request'
import { DispatchQueueFullError } from '@/lib/core/workspace-dispatch'
import {
checkWebhookPreprocessing,
findAllWebhooksForPath,
@@ -156,29 +155,14 @@ async function handleWebhookPost(
if (shouldSkipWebhookEvent(foundWebhook, body, requestId)) {
continue
}
try {
const response = await queueWebhookExecution(foundWebhook, foundWorkflow, body, request, {
requestId,
path,
actorUserId: preprocessResult.actorUserId,
executionId: preprocessResult.executionId,
correlation: preprocessResult.correlation,
})
responses.push(response)
} catch (error) {
if (error instanceof DispatchQueueFullError) {
return NextResponse.json(
{
error: 'Service temporarily at capacity',
message: error.message,
retryAfterSeconds: 10,
},
{ status: 503, headers: { 'Retry-After': '10' } }
)
}
throw error
}
const response = await queueWebhookExecution(foundWebhook, foundWorkflow, body, request, {
requestId,
path,
actorUserId: preprocessResult.actorUserId,
executionId: preprocessResult.executionId,
correlation: preprocessResult.correlation,
})
responses.push(response)
}
if (responses.length === 0) {

View File

@@ -10,13 +10,11 @@ const {
mockAuthorizeWorkflowByWorkspacePermission,
mockPreprocessExecution,
mockEnqueue,
mockEnqueueWorkspaceDispatch,
} = vi.hoisted(() => ({
mockCheckHybridAuth: vi.fn(),
mockAuthorizeWorkflowByWorkspacePermission: vi.fn(),
mockPreprocessExecution: vi.fn(),
mockEnqueue: vi.fn().mockResolvedValue('job-123'),
mockEnqueueWorkspaceDispatch: vi.fn().mockResolvedValue('job-123'),
}))
vi.mock('@/lib/auth/hybrid', () => ({
@@ -47,16 +45,6 @@ vi.mock('@/lib/core/async-jobs', () => ({
markJobFailed: vi.fn(),
}),
shouldExecuteInline: vi.fn().mockReturnValue(false),
shouldUseBullMQ: vi.fn().mockReturnValue(true),
}))
vi.mock('@/lib/core/bullmq', () => ({
createBullMQJobData: vi.fn((payload: unknown, metadata?: unknown) => ({ payload, metadata })),
}))
vi.mock('@/lib/core/workspace-dispatch', () => ({
enqueueWorkspaceDispatch: mockEnqueueWorkspaceDispatch,
waitForDispatchJob: vi.fn(),
}))
vi.mock('@/lib/core/utils/request', () => ({
@@ -147,24 +135,20 @@ describe('workflow execute async route', () => {
expect(response.status).toBe(202)
expect(body.executionId).toBe('execution-123')
expect(body.jobId).toBe('job-123')
expect(mockEnqueueWorkspaceDispatch).toHaveBeenCalledWith(
expect(mockEnqueue).toHaveBeenCalledWith(
'workflow-execution',
expect.objectContaining({
id: 'execution-123',
workflowId: 'workflow-1',
userId: 'actor-1',
workspaceId: 'workspace-1',
lane: 'runtime',
queueName: 'workflow-execution',
bullmqJobName: 'workflow-execution',
metadata: {
executionId: 'execution-123',
}),
expect.objectContaining({
metadata: expect.objectContaining({
workflowId: 'workflow-1',
userId: 'actor-1',
correlation: {
executionId: 'execution-123',
requestId: 'req-12345678',
source: 'workflow',
workflowId: 'workflow-1',
triggerType: 'manual',
},
},
workspaceId: 'workspace-1',
}),
})
)
})

View File

@@ -4,8 +4,7 @@ import { validate as uuidValidate, v4 as uuidv4 } from 'uuid'
import { z } from 'zod'
import { AuthType, checkHybridAuth, hasExternalApiCredentials } from '@/lib/auth/hybrid'
import { admissionRejectedResponse, tryAdmit } from '@/lib/core/admission/gate'
import { getJobQueue, shouldExecuteInline, shouldUseBullMQ } from '@/lib/core/async-jobs'
import { createBullMQJobData } from '@/lib/core/bullmq'
import { getJobQueue, shouldExecuteInline } from '@/lib/core/async-jobs'
import {
createTimeoutAbortController,
getTimeoutErrorMessage,
@@ -14,13 +13,6 @@ import {
import { generateRequestId } from '@/lib/core/utils/request'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
import { getBaseUrl } from '@/lib/core/utils/urls'
import {
DispatchQueueFullError,
enqueueWorkspaceDispatch,
type WorkspaceDispatchLane,
waitForDispatchJob,
} from '@/lib/core/workspace-dispatch'
import { createBufferedExecutionStream } from '@/lib/execution/buffered-stream'
import {
buildNextCallChain,
parseCallChain,
@@ -42,11 +34,6 @@ import {
import { executeWorkflowCore } from '@/lib/workflows/executor/execution-core'
import { type ExecutionEvent, encodeSSEEvent } from '@/lib/workflows/executor/execution-events'
import { PauseResumeManager } from '@/lib/workflows/executor/human-in-the-loop-manager'
import {
DIRECT_WORKFLOW_JOB_NAME,
type QueuedWorkflowExecutionPayload,
type QueuedWorkflowExecutionResult,
} from '@/lib/workflows/executor/queued-workflow-execution'
import {
loadDeployedWorkflowState,
loadWorkflowFromNormalizedTables,
@@ -118,8 +105,6 @@ const ExecuteWorkflowSchema = z.object({
export const runtime = 'nodejs'
export const dynamic = 'force-dynamic'
const INLINE_TRIGGER_TYPES = new Set<CoreTriggerType>(['manual', 'workflow'])
function resolveOutputIds(
selectedOutputs: string[] | undefined,
blocks: Record<string, any>
@@ -216,39 +201,19 @@ async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextR
}
try {
const useBullMQ = shouldUseBullMQ()
const jobQueue = useBullMQ ? null : await getJobQueue()
const jobId = useBullMQ
? await enqueueWorkspaceDispatch({
id: executionId,
workspaceId,
lane: 'runtime',
queueName: 'workflow-execution',
bullmqJobName: 'workflow-execution',
bullmqPayload: createBullMQJobData(payload, {
workflowId,
userId,
correlation,
}),
metadata: {
workflowId,
userId,
correlation,
},
})
: await jobQueue!.enqueue('workflow-execution', payload, {
metadata: { workflowId, workspaceId, userId, correlation },
})
const jobQueue = await getJobQueue()
const jobId = await jobQueue.enqueue('workflow-execution', payload, {
metadata: { workflowId, workspaceId, userId, correlation },
})
asyncLogger.info('Queued async workflow execution', { jobId })
if (shouldExecuteInline() && jobQueue) {
const inlineJobQueue = jobQueue
if (shouldExecuteInline()) {
void (async () => {
try {
await inlineJobQueue.startJob(jobId)
await jobQueue.startJob(jobId)
const output = await executeWorkflowJob(payload)
await inlineJobQueue.completeJob(jobId, output)
await jobQueue.completeJob(jobId, output)
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error)
asyncLogger.error('Async workflow execution failed', {
@@ -256,7 +221,7 @@ async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextR
error: errorMessage,
})
try {
await inlineJobQueue.markJobFailed(jobId, errorMessage)
await jobQueue.markJobFailed(jobId, errorMessage)
} catch (markFailedError) {
asyncLogger.error('Failed to mark job as failed', {
jobId,
@@ -282,17 +247,6 @@ async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextR
{ status: 202 }
)
} catch (error: any) {
if (error instanceof DispatchQueueFullError) {
return NextResponse.json(
{
error: 'Service temporarily at capacity',
message: error.message,
retryAfterSeconds: 10,
},
{ status: 503, headers: { 'Retry-After': '10' } }
)
}
asyncLogger.error('Failed to queue async execution', error)
return NextResponse.json(
{ error: `Failed to queue async execution: ${error.message}` },
@@ -301,31 +255,6 @@ async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextR
}
}
async function enqueueDirectWorkflowExecution(
payload: QueuedWorkflowExecutionPayload,
priority: number,
lane: WorkspaceDispatchLane
) {
return enqueueWorkspaceDispatch({
id: payload.metadata.executionId,
workspaceId: payload.metadata.workspaceId,
lane,
queueName: 'workflow-execution',
bullmqJobName: DIRECT_WORKFLOW_JOB_NAME,
bullmqPayload: createBullMQJobData(payload, {
workflowId: payload.metadata.workflowId,
userId: payload.metadata.userId,
correlation: payload.metadata.correlation,
}),
metadata: {
workflowId: payload.metadata.workflowId,
userId: payload.metadata.userId,
correlation: payload.metadata.correlation,
},
priority,
})
}
/**
* POST /api/workflows/[id]/execute
*
@@ -793,92 +722,6 @@ async function handleExecutePost(
const executionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}
if (shouldUseBullMQ() && !INLINE_TRIGGER_TYPES.has(triggerType)) {
try {
const dispatchJobId = await enqueueDirectWorkflowExecution(
{
workflow,
metadata,
input: processedInput,
variables: executionVariables,
selectedOutputs,
includeFileBase64,
base64MaxBytes,
stopAfterBlockId,
timeoutMs: preprocessResult.executionTimeout?.sync,
runFromBlock: resolvedRunFromBlock,
},
5,
'interactive'
)
const resultRecord = await waitForDispatchJob(
dispatchJobId,
(preprocessResult.executionTimeout?.sync ?? 300000) + 30000
)
if (resultRecord.status === 'failed') {
return NextResponse.json(
{
success: false,
executionId,
error: resultRecord.error ?? 'Workflow execution failed',
},
{ status: 500 }
)
}
const result = resultRecord.output as QueuedWorkflowExecutionResult
const resultForResponseBlock = {
success: result.success,
logs: result.logs,
output: result.output,
}
if (
auth.authType !== AuthType.INTERNAL_JWT &&
workflowHasResponseBlock(resultForResponseBlock)
) {
return createHttpResponseFromBlock(resultForResponseBlock)
}
return NextResponse.json(
{
success: result.success,
executionId,
output: result.output,
error: result.error,
metadata: result.metadata,
},
{ status: result.statusCode ?? 200 }
)
} catch (error: unknown) {
if (error instanceof DispatchQueueFullError) {
return NextResponse.json(
{
error: 'Service temporarily at capacity',
message: error.message,
retryAfterSeconds: 10,
},
{ status: 503, headers: { 'Retry-After': '10' } }
)
}
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
reqLogger.error(`Queued non-SSE execution failed: ${errorMessage}`)
return NextResponse.json(
{
success: false,
error: errorMessage,
},
{ status: 500 }
)
}
}
const timeoutController = createTimeoutAbortController(
preprocessResult.executionTimeout?.sync
)
@@ -993,53 +836,6 @@ async function handleExecutePost(
}
if (shouldUseDraftState) {
const shouldDispatchViaQueue = shouldUseBullMQ() && !INLINE_TRIGGER_TYPES.has(triggerType)
if (shouldDispatchViaQueue) {
const metadata: ExecutionMetadata = {
requestId,
executionId,
workflowId,
workspaceId,
userId: actorUserId,
sessionUserId: isClientSession ? userId : undefined,
workflowUserId: workflow.userId,
triggerType,
useDraftState: shouldUseDraftState,
startTime: new Date().toISOString(),
isClientSession,
enforceCredentialAccess: useAuthenticatedUserAsActor,
workflowStateOverride: effectiveWorkflowStateOverride,
callChain,
}
const executionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}
await enqueueDirectWorkflowExecution(
{
workflow,
metadata,
input: processedInput,
variables: executionVariables,
selectedOutputs,
includeFileBase64,
base64MaxBytes,
stopAfterBlockId,
timeoutMs: preprocessResult.executionTimeout?.sync,
runFromBlock: resolvedRunFromBlock,
streamEvents: true,
},
1,
'interactive'
)
return new NextResponse(createBufferedExecutionStream(executionId), {
headers: {
...SSE_HEADERS,
'X-Execution-Id': executionId,
},
})
}
reqLogger.info('Using SSE console log streaming (manual execution)')
} else {
reqLogger.info('Using streaming API response')
@@ -1505,17 +1301,6 @@ async function handleExecutePost(
},
})
} catch (error: any) {
if (error instanceof DispatchQueueFullError) {
return NextResponse.json(
{
error: 'Service temporarily at capacity',
message: error.message,
retryAfterSeconds: 10,
},
{ status: 503, headers: { 'Retry-After': '10' } }
)
}
reqLogger.error('Failed to start workflow execution:', error)
return NextResponse.json(
{ error: error.message || 'Failed to start workflow execution' },

View File

@@ -1,7 +1,7 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { generatePptxFromCode } from '@/lib/execution/pptx-vm'
import { generatePptxFromCode } from '@/lib/execution/doc-vm'
import { verifyWorkspaceMembership } from '@/app/api/workflows/utils'
export const dynamic = 'force-dynamic'

View File

@@ -44,7 +44,7 @@ const TEXT_EDITABLE_EXTENSIONS = new Set([
'svg',
])
const IFRAME_PREVIEWABLE_MIME_TYPES = new Set(['application/pdf'])
const IFRAME_PREVIEWABLE_MIME_TYPES = new Set(['application/pdf', 'text/x-pdflibjs'])
const IFRAME_PREVIEWABLE_EXTENSIONS = new Set(['pdf'])
const IMAGE_PREVIEWABLE_MIME_TYPES = new Set(['image/png', 'image/jpeg', 'image/gif', 'image/webp'])
@@ -52,26 +52,36 @@ const IMAGE_PREVIEWABLE_EXTENSIONS = new Set(['png', 'jpg', 'jpeg', 'gif', 'webp
const PPTX_PREVIEWABLE_MIME_TYPES = new Set([
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'text/x-pptxgenjs',
])
const PPTX_PREVIEWABLE_EXTENSIONS = new Set(['pptx'])
const DOCX_PREVIEWABLE_MIME_TYPES = new Set([
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'text/x-docxjs',
])
const DOCX_PREVIEWABLE_EXTENSIONS = new Set(['docx'])
type FileCategory =
| 'text-editable'
| 'iframe-previewable'
| 'image-previewable'
| 'pptx-previewable'
| 'docx-previewable'
| 'unsupported'
function resolveFileCategory(mimeType: string | null, filename: string): FileCategory {
if (mimeType && TEXT_EDITABLE_MIME_TYPES.has(mimeType)) return 'text-editable'
if (mimeType && IFRAME_PREVIEWABLE_MIME_TYPES.has(mimeType)) return 'iframe-previewable'
if (mimeType && IMAGE_PREVIEWABLE_MIME_TYPES.has(mimeType)) return 'image-previewable'
if (mimeType && DOCX_PREVIEWABLE_MIME_TYPES.has(mimeType)) return 'docx-previewable'
if (mimeType && PPTX_PREVIEWABLE_MIME_TYPES.has(mimeType)) return 'pptx-previewable'
const ext = getFileExtension(filename)
if (TEXT_EDITABLE_EXTENSIONS.has(ext)) return 'text-editable'
if (IFRAME_PREVIEWABLE_EXTENSIONS.has(ext)) return 'iframe-previewable'
if (IMAGE_PREVIEWABLE_EXTENSIONS.has(ext)) return 'image-previewable'
if (DOCX_PREVIEWABLE_EXTENSIONS.has(ext)) return 'docx-previewable'
if (PPTX_PREVIEWABLE_EXTENSIONS.has(ext)) return 'pptx-previewable'
return 'unsupported'
@@ -131,13 +141,17 @@ export function FileViewer({
}
if (category === 'iframe-previewable') {
return <IframePreview file={file} />
return <IframePreview file={file} workspaceId={workspaceId} />
}
if (category === 'image-previewable') {
return <ImagePreview file={file} />
}
if (category === 'docx-previewable') {
return <DocxPreview file={file} workspaceId={workspaceId} />
}
if (category === 'pptx-previewable') {
return <PptxPreview file={file} workspaceId={workspaceId} streamingContent={streamingContent} />
}
@@ -181,7 +195,14 @@ function TextEditor({
isLoading,
error,
dataUpdatedAt,
} = useWorkspaceFileContent(workspaceId, file.id, file.key, file.type === 'text/x-pptxgenjs')
} = useWorkspaceFileContent(
workspaceId,
file.id,
file.key,
file.type === 'text/x-pptxgenjs' ||
file.type === 'text/x-docxjs' ||
file.type === 'text/x-pdflibjs'
)
const updateContent = useUpdateWorkspaceFileContent()
const updateContentRef = useRef(updateContent)
@@ -416,13 +437,36 @@ function TextEditor({
)
}
const IframePreview = memo(function IframePreview({ file }: { file: WorkspaceFileRecord }) {
const serveUrl = `/api/files/serve/${encodeURIComponent(file.key)}?context=workspace`
const IframePreview = memo(function IframePreview({
file,
workspaceId,
}: {
file: WorkspaceFileRecord
workspaceId: string
}) {
const { data: fileData, isLoading } = useWorkspaceFileBinary(workspaceId, file.id, file.key)
const [blobUrl, setBlobUrl] = useState<string | null>(null)
useEffect(() => {
if (!fileData) return
const blob = new Blob([fileData], { type: 'application/pdf' })
const url = URL.createObjectURL(blob)
setBlobUrl(url)
return () => URL.revokeObjectURL(url)
}, [fileData])
if (isLoading || !blobUrl) {
return (
<div className='flex h-full items-center justify-center'>
<Skeleton className='h-[200px] w-[80%]' />
</div>
)
}
return (
<div className='flex flex-1 overflow-hidden'>
<iframe
src={serveUrl}
src={blobUrl}
className='h-full w-full border-0'
title={file.name}
onError={() => {
@@ -551,6 +595,71 @@ const ImagePreview = memo(function ImagePreview({ file }: { file: WorkspaceFileR
)
})
const DocxPreview = memo(function DocxPreview({
file,
workspaceId,
}: {
file: WorkspaceFileRecord
workspaceId: string
}) {
const containerRef = useRef<HTMLDivElement>(null)
const {
data: fileData,
isLoading,
error: fetchError,
} = useWorkspaceFileBinary(workspaceId, file.id, file.key)
const [renderError, setRenderError] = useState<string | null>(null)
useEffect(() => {
if (!containerRef.current || !fileData) return
let cancelled = false
async function render() {
try {
const { renderAsync } = await import('docx-preview')
if (cancelled || !containerRef.current) return
containerRef.current.innerHTML = ''
await renderAsync(fileData, containerRef.current, undefined, {
inWrapper: true,
ignoreWidth: false,
ignoreHeight: false,
})
} catch (err) {
if (!cancelled) {
const msg = err instanceof Error ? err.message : 'Failed to render document'
logger.error('DOCX render failed', { error: msg })
setRenderError(msg)
}
}
}
render()
return () => {
cancelled = true
}
}, [fileData])
if (isLoading) {
return (
<div className='flex h-full items-center justify-center'>
<Skeleton className='h-[200px] w-[80%]' />
</div>
)
}
if (fetchError || renderError) {
return (
<div className='flex h-full flex-col items-center justify-center gap-2 text-[var(--text-muted)]'>
<p className='text-[13px]'>Failed to preview document</p>
<p className='text-[11px]'>{renderError || 'Could not load file'}</p>
</div>
)
}
return <div ref={containerRef} className='h-full w-full overflow-auto bg-white' />
})
const pptxSlideCache = new Map<string, string[]>()
function pptxCacheKey(fileId: string, dataUpdatedAt: number, byteLength: number): string {

View File

@@ -1,21 +1,17 @@
'use client'
import type { AgentGroupItem } from '@/app/workspace/[workspaceId]/home/components/message-content/components'
import {
AgentGroup,
ChatContent,
CircleStop,
Options,
PendingTagIndicator,
} from '@/app/workspace/[workspaceId]/home/components/message-content/components'
import type {
ContentBlock,
MothershipToolName,
OptionItem,
SubagentName,
ToolCallData,
} from '@/app/workspace/[workspaceId]/home/types'
import { SUBAGENT_LABELS, TOOL_UI_METADATA } from '@/app/workspace/[workspaceId]/home/types'
FileWrite,
Read as ReadTool,
ToolSearchToolRegex,
WorkspaceFile,
} from '@/lib/copilot/generated/tool-catalog-v1'
import { resolveToolDisplay } from '@/lib/copilot/tools/client/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { ContentBlock, OptionItem, ToolCallData } from '../../types'
import { SUBAGENT_LABELS, TOOL_UI_METADATA } from '../../types'
import type { AgentGroupItem } from './components'
import { AgentGroup, ChatContent, CircleStop, Options, PendingTagIndicator } from './components'
interface TextSegment {
type: 'text'
@@ -52,11 +48,19 @@ const SUBAGENT_KEYS = new Set(Object.keys(SUBAGENT_LABELS))
* group is absorbed so it doesn't render as a separate Mothership entry.
*/
const SUBAGENT_DISPATCH_TOOLS: Record<string, string> = {
file_write: 'workspace_file',
[FileWrite.id]: WorkspaceFile.id,
}
function formatToolName(name: string): string {
return name
.replace(/_v\d+$/, '')
.split('_')
.map((w) => w.charAt(0).toUpperCase() + w.slice(1))
.join(' ')
}
function resolveAgentLabel(key: string): string {
return SUBAGENT_LABELS[key as SubagentName] ?? key
return SUBAGENT_LABELS[key] ?? formatToolName(key)
}
function isToolDone(status: ToolCallData['status']): boolean {
@@ -67,12 +71,41 @@ function isDelegatingTool(tc: NonNullable<ContentBlock['toolCall']>): boolean {
return tc.status === 'executing'
}
function mapToolStatusToClientState(
status: ContentBlock['toolCall'] extends { status: infer T } ? T : string
) {
switch (status) {
case 'success':
return ClientToolCallState.success
case 'error':
return ClientToolCallState.error
case 'cancelled':
return ClientToolCallState.cancelled
default:
return ClientToolCallState.executing
}
}
function getOverrideDisplayTitle(tc: NonNullable<ContentBlock['toolCall']>): string | undefined {
if (tc.name === ReadTool.id || tc.name.endsWith('_respond')) {
return resolveToolDisplay(tc.name, mapToolStatusToClientState(tc.status), tc.id, tc.params)
?.text
}
return undefined
}
function toToolData(tc: NonNullable<ContentBlock['toolCall']>): ToolCallData {
const overrideDisplayTitle = getOverrideDisplayTitle(tc)
const displayTitle =
overrideDisplayTitle ||
tc.displayTitle ||
TOOL_UI_METADATA[tc.name as keyof typeof TOOL_UI_METADATA]?.title ||
formatToolName(tc.name)
return {
id: tc.id,
toolName: tc.name,
displayTitle:
tc.displayTitle ?? TOOL_UI_METADATA[tc.name as MothershipToolName]?.title ?? tc.name,
displayTitle,
status: tc.status,
params: tc.params,
result: tc.result,
@@ -172,7 +205,7 @@ function parseBlocks(blocks: ContentBlock[]): MessageSegment[] {
if (block.type === 'tool_call') {
if (!block.toolCall) continue
const tc = block.toolCall
if (tc.name === 'tool_search_tool_regex') continue
if (tc.name === ToolSearchToolRegex.id) continue
const isDispatch = SUBAGENT_KEYS.has(tc.name) && !tc.calledBy
if (isDispatch) {
@@ -312,7 +345,7 @@ export function MessageContent({
if (segments.length === 0) {
if (isStreaming) {
return (
<div className='space-y-2.5'>
<div className='space-y-[10px]'>
<PendingTagIndicator />
</div>
)
@@ -341,7 +374,7 @@ export function MessageContent({
)?.id
return (
<div className='space-y-2.5'>
<div className='space-y-[10px]'>
{segments.map((segment, i) => {
switch (segment.type) {
case 'text':
@@ -384,9 +417,11 @@ export function MessageContent({
)
case 'stopped':
return (
<div key={`stopped-${i}`} className='flex items-center gap-2'>
<div key={`stopped-${i}`} className='flex items-center gap-[8px]'>
<CircleStop className='h-[16px] w-[16px] flex-shrink-0 text-[var(--text-icon)]' />
<span className='font-base text-[var(--text-body)] text-sm'>Stopped by user</span>
<span className='font-base text-[14px] text-[var(--text-body)]'>
Stopped by user
</span>
</div>
)
}

View File

@@ -23,96 +23,33 @@ import {
} from '@/components/emcn'
import { Table as TableIcon } from '@/components/emcn/icons'
import { AgentIcon } from '@/components/icons'
import type { MothershipToolName, SubagentName } from '@/app/workspace/[workspaceId]/home/types'
export type IconComponent = ComponentType<SVGProps<SVGSVGElement>>
const TOOL_ICONS: Record<MothershipToolName | SubagentName | 'mothership', IconComponent> = {
const TOOL_ICONS: Record<string, IconComponent> = {
mothership: Blimp,
// Workspace
glob: FolderCode,
grep: Search,
read: File,
// Search
search_online: Search,
scrape_page: Search,
get_page_contents: Search,
search_library_docs: Library,
crawl_website: Search,
// Execution
function_execute: TerminalWindow,
superagent: Blimp,
run_workflow: PlayOutline,
run_block: PlayOutline,
run_from_block: PlayOutline,
run_workflow_until_block: PlayOutline,
complete_job: PlayOutline,
get_execution_summary: ClipboardList,
get_job_logs: ClipboardList,
get_workflow_logs: ClipboardList,
get_workflow_data: Layout,
get_block_outputs: ClipboardList,
get_block_upstream_references: ClipboardList,
get_deployed_workflow_state: Rocket,
check_deployment_status: Rocket,
// Workflows & folders
create_workflow: Layout,
delete_workflow: Layout,
edit_workflow: Pencil,
rename_workflow: Pencil,
move_workflow: Layout,
create_folder: FolderCode,
delete_folder: FolderCode,
move_folder: FolderCode,
list_folders: FolderCode,
list_user_workspaces: Layout,
revert_to_version: Rocket,
get_deployment_version: Rocket,
open_resource: Eye,
// Files
workspace_file: File,
download_to_workspace_file: File,
materialize_file: File,
generate_image: File,
generate_visualization: File,
// Tables & knowledge
user_table: TableIcon,
knowledge_base: Database,
// Jobs
create_job: Calendar,
manage_job: Calendar,
update_job_history: Calendar,
job_respond: Calendar,
// Management
manage_mcp_tool: Settings,
manage_skill: Asterisk,
manage_credential: Integration,
manage_custom_tool: Wrench,
update_workspace_mcp_server: Settings,
delete_workspace_mcp_server: Settings,
create_workspace_mcp_server: Settings,
list_workspace_mcp_servers: Settings,
oauth_get_auth_link: Integration,
oauth_request_access: Integration,
set_environment_variables: Settings,
set_global_workflow_variables: Settings,
get_platform_actions: Settings,
search_documentation: Library,
search_patterns: Search,
deploy_api: Rocket,
deploy_chat: Rocket,
deploy_mcp: Rocket,
redeploy: Rocket,
generate_api_key: Asterisk,
user_memory: Database,
context_write: Pencil,
context_compaction: Asterisk,
// Subagents
function_execute: TerminalWindow,
superagent: Blimp,
user_table: TableIcon,
workspace_file: File,
create_workflow: Layout,
edit_workflow: Pencil,
build: Hammer,
run: PlayOutline,
deploy: Rocket,
auth: Integration,
knowledge: Database,
knowledge_base: Database,
table: TableIcon,
job: Calendar,
agent: AgentIcon,
@@ -122,6 +59,8 @@ const TOOL_ICONS: Record<MothershipToolName | SubagentName | 'mothership', IconC
debug: Bug,
edit: Pencil,
fast_edit: Pencil,
context_compaction: Asterisk,
open_resource: Eye,
file_write: File,
}

View File

@@ -10,7 +10,7 @@ import {
cancelRunToolExecution,
markRunToolManuallyStopped,
reportManualRunToolStop,
} from '@/lib/copilot/client-sse/run-tool-execution'
} from '@/lib/copilot/tools/client/run-tool-execution'
import {
downloadWorkspaceFile,
getFileExtension,
@@ -83,7 +83,12 @@ export const ResourceContent = memo(function ResourceContent({
}, [streamingFile])
const syntheticFile = useMemo(() => {
const ext = getFileExtension(streamFileName)
const type = ext === 'pptx' ? 'text/x-pptxgenjs' : getMimeTypeFromExtension(ext)
const SOURCE_MIME_MAP: Record<string, string> = {
pptx: 'text/x-pptxgenjs',
docx: 'text/x-docxjs',
pdf: 'text/x-pdflibjs',
}
const type = SOURCE_MIME_MAP[ext] ?? getMimeTypeFromExtension(ext)
return {
id: 'streaming-file',
workspaceId,

View File

@@ -9,7 +9,7 @@ import {
} from 'react'
import { Button, Tooltip } from '@/components/emcn'
import { Columns3, Eye, PanelLeft, Pencil } from '@/components/emcn/icons'
import { isEphemeralResource } from '@/lib/copilot/resource-extraction'
import { isEphemeralResource } from '@/lib/copilot/resources/types'
import { cn } from '@/lib/core/utils/cn'
import type { PreviewMode } from '@/app/workspace/[workspaceId]/files/components/file-viewer'
import { AddResourceDropdown } from '@/app/workspace/[workspaceId]/home/components/mothership-view/components/add-resource-dropdown'

View File

@@ -369,7 +369,7 @@ export function Home({ chatId }: HomeProps = {}) {
onCollapse={collapseResource}
isCollapsed={isResourceCollapsed}
streamingFile={streamingFile}
genericResourceData={genericResourceData}
genericResourceData={genericResourceData ?? undefined}
className={skipResourceTransition ? '!transition-none' : undefined}
/>

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,39 @@
import type { MothershipResourceType } from '@/lib/copilot/resource-types'
import {
Agent,
Auth,
Build,
CreateWorkflow,
Debug,
Deploy,
EditWorkflow,
FunctionExecute,
GetPageContents,
Glob,
Grep,
Job,
Knowledge,
KnowledgeBase,
ManageMcpTool,
ManageSkill,
OpenResource,
Read as ReadTool,
Research,
Run,
ScrapePage,
SearchLibraryDocs,
SearchOnline,
Superagent,
Table,
UserMemory,
UserTable,
WorkspaceFile,
} from '@/lib/copilot/generated/tool-catalog-v1'
import type { ChatContext } from '@/stores/panel'
export type {
MothershipResource,
MothershipResourceType,
} from '@/lib/copilot/resource-types'
} from '@/lib/copilot/resources/types'
export interface FileAttachmentForApi {
id: string
@@ -21,169 +50,34 @@ export interface QueuedMessage {
contexts?: ChatContext[]
}
/**
* SSE event types emitted by the Go orchestrator backend.
*
* @example
* ```json
* { "type": "content", "data": "Hello world" }
* { "type": "tool_call", "state": "executing", "toolCallId": "toolu_...", "toolName": "glob", "ui": { "title": "..." } }
* { "type": "subagent_start", "subagent": "build" }
* ```
*/
export type SSEEventType =
| 'chat_id'
| 'request_id'
| 'title_updated'
| 'content'
| 'reasoning' // openai reasoning - render as thinking text
| 'tool_call' // tool call name
| 'tool_call_delta' // chunk of tool call
| 'tool_generating' // start a tool call
| 'tool_result' // tool call result
| 'tool_error' // tool call error
| 'resource_added' // add a resource to the chat
| 'resource_deleted' // delete a resource from the chat
| 'subagent_start' // start a subagent
| 'subagent_end' // end a subagent
| 'structured_result' // structured result from a tool call
| 'subagent_result' // result from a subagent
| 'done' // end of the chat
| 'context_compaction_start' // context compaction started
| 'context_compaction' // conversation context was compacted
| 'error' // error in the chat
| 'start' // start of the chat
/**
* All tool names observed in the mothership SSE stream, grouped by phase.
*
* @example
* ```json
* { "type": "tool_generating", "toolName": "glob" }
* { "type": "tool_call", "toolName": "function_execute", "ui": { "title": "Running code", "icon": "code" } }
* { "type": "tool", "phase": "call", "toolName": "glob" }
* { "type": "tool", "phase": "call", "toolName": "function_execute", "ui": { "title": "Running code", "icon": "code" } }
* ```
* Stream `type` is `MothershipStreamV1EventType.tool` (`mothership-stream-v1`) with `phase: 'call'`.
*/
export type MothershipToolName =
| 'glob'
| 'grep'
| 'read'
| 'search_online'
| 'scrape_page'
| 'get_page_contents'
| 'search_library_docs'
| 'manage_mcp_tool'
| 'manage_skill'
| 'manage_credential'
| 'manage_custom_tool'
| 'manage_job'
| 'user_memory'
| 'function_execute'
| 'superagent'
| 'user_table'
| 'workspace_file'
| 'create_workflow'
| 'delete_workflow'
| 'edit_workflow'
| 'rename_workflow'
| 'move_workflow'
| 'run_workflow'
| 'run_block'
| 'run_from_block'
| 'run_workflow_until_block'
| 'create_folder'
| 'delete_folder'
| 'move_folder'
| 'list_folders'
| 'list_user_workspaces'
| 'create_job'
| 'complete_job'
| 'update_job_history'
| 'job_respond'
| 'download_to_workspace_file'
| 'materialize_file'
| 'context_write'
| 'generate_image'
| 'generate_visualization'
| 'crawl_website'
| 'get_execution_summary'
| 'get_job_logs'
| 'get_deployment_version'
| 'revert_to_version'
| 'check_deployment_status'
| 'get_deployed_workflow_state'
| 'get_workflow_data'
| 'get_workflow_logs'
| 'get_block_outputs'
| 'get_block_upstream_references'
| 'set_global_workflow_variables'
| 'set_environment_variables'
| 'get_platform_actions'
| 'search_documentation'
| 'search_patterns'
| 'update_workspace_mcp_server'
| 'delete_workspace_mcp_server'
| 'create_workspace_mcp_server'
| 'list_workspace_mcp_servers'
| 'deploy_api'
| 'deploy_chat'
| 'deploy_mcp'
| 'redeploy'
| 'generate_api_key'
| 'oauth_get_auth_link'
| 'oauth_request_access'
| 'build'
| 'run'
| 'deploy'
| 'auth'
| 'knowledge'
| 'knowledge_base'
| 'table'
| 'job'
| 'agent'
| 'custom_tool'
| 'research'
| 'plan'
| 'debug'
| 'edit'
| 'fast_edit'
| 'open_resource'
| 'context_compaction'
/**
* Subagent identifiers dispatched via `subagent_start` SSE events.
*
* @example
* ```json
* { "type": "subagent_start", "subagent": "build" }
* ```
*/
export type SubagentName =
| 'build'
| 'deploy'
| 'auth'
| 'research'
| 'knowledge'
| 'table'
| 'custom_tool'
| 'superagent'
| 'plan'
| 'debug'
| 'edit'
| 'fast_edit'
| 'run'
| 'agent'
| 'job'
| 'file_write'
export const ToolPhase = {
workspace: 'workspace',
search: 'search',
management: 'management',
execution: 'execution',
resource: 'resource',
subagent: 'subagent',
} as const
export type ToolPhase = (typeof ToolPhase)[keyof typeof ToolPhase]
export type ToolPhase =
| 'workspace'
| 'search'
| 'management'
| 'execution'
| 'resource'
| 'subagent'
export type ToolCallStatus = 'executing' | 'success' | 'error' | 'cancelled'
export const ToolCallStatus = {
executing: 'executing',
success: 'success',
error: 'error',
cancelled: 'cancelled',
} as const
export type ToolCallStatus = (typeof ToolCallStatus)[keyof typeof ToolCallStatus]
export interface ToolCallResult {
success: boolean
@@ -191,7 +85,6 @@ export interface ToolCallResult {
error?: string
}
/** A single tool call result entry in the generic Results resource tab. */
export interface GenericResourceEntry {
toolCallId: string
toolName: string
@@ -202,7 +95,6 @@ export interface GenericResourceEntry {
result?: ToolCallResult
}
/** Accumulated feed of tool call results shown in the generic Results tab. */
export interface GenericResourceData {
entries: GenericResourceEntry[]
}
@@ -225,7 +117,7 @@ export interface ToolCallInfo {
phaseLabel?: string
params?: Record<string, unknown>
calledBy?: string
result?: { success: boolean; output?: unknown; error?: string }
result?: ToolCallResult
streamingArgs?: string
}
@@ -234,14 +126,16 @@ export interface OptionItem {
label: string
}
export type ContentBlockType =
| 'text'
| 'tool_call'
| 'subagent'
| 'subagent_end'
| 'subagent_text'
| 'options'
| 'stopped'
export const ContentBlockType = {
text: 'text',
tool_call: 'tool_call',
subagent: 'subagent',
subagent_end: 'subagent_end',
subagent_text: 'subagent_text',
options: 'options',
stopped: 'stopped',
} as const
export type ContentBlockType = (typeof ContentBlockType)[keyof typeof ContentBlockType]
export interface ContentBlock {
type: ContentBlockType
@@ -278,7 +172,7 @@ export interface ChatMessage {
requestId?: string
}
export const SUBAGENT_LABELS: Record<SubagentName, string> = {
export const SUBAGENT_LABELS: Record<string, string> = {
build: 'Build agent',
deploy: 'Deploy agent',
auth: 'Integration agent',
@@ -304,206 +198,130 @@ export interface ToolUIMetadata {
}
/**
* Primary UI metadata for tools observed in the SSE stream.
* Maps tool IDs to human-readable display names shown in the chat.
* This is the single source of truth — server-sent `ui.title` values are not used.
* Default UI metadata for tools observed in the SSE stream.
* The backend may send `ui` on some `MothershipStreamV1EventType.tool` payloads (`phase: 'call'`);
* this map provides fallback metadata when `ui` is absent.
*/
export const TOOL_UI_METADATA: Record<MothershipToolName, ToolUIMetadata> = {
// Workspace
glob: { title: 'Searching workspace', phaseLabel: 'Workspace', phase: 'workspace' },
grep: { title: 'Searching workspace', phaseLabel: 'Workspace', phase: 'workspace' },
read: { title: 'Reading file', phaseLabel: 'Workspace', phase: 'workspace' },
// Search
search_online: { title: 'Searching online', phaseLabel: 'Search', phase: 'search' },
scrape_page: { title: 'Reading webpage', phaseLabel: 'Search', phase: 'search' },
get_page_contents: { title: 'Reading page', phaseLabel: 'Search', phase: 'search' },
search_library_docs: { title: 'Searching docs', phaseLabel: 'Search', phase: 'search' },
crawl_website: { title: 'Browsing website', phaseLabel: 'Search', phase: 'search' },
// Execution
function_execute: { title: 'Running code', phaseLabel: 'Code', phase: 'execution' },
superagent: { title: 'Taking action', phaseLabel: 'Action', phase: 'execution' },
run_workflow: { title: 'Running workflow', phaseLabel: 'Execution', phase: 'execution' },
run_block: { title: 'Running block', phaseLabel: 'Execution', phase: 'execution' },
run_from_block: { title: 'Running from block', phaseLabel: 'Execution', phase: 'execution' },
run_workflow_until_block: {
title: 'Running partial workflow',
phaseLabel: 'Execution',
export const TOOL_UI_METADATA: Record<string, ToolUIMetadata> = {
[Glob.id]: {
title: 'Searching files',
phaseLabel: 'Workspace',
phase: 'workspace',
},
[Grep.id]: {
title: 'Searching code',
phaseLabel: 'Workspace',
phase: 'workspace',
},
[ReadTool.id]: { title: 'Reading file', phaseLabel: 'Workspace', phase: 'workspace' },
[SearchOnline.id]: {
title: 'Searching online',
phaseLabel: 'Search',
phase: 'search',
},
[ScrapePage.id]: {
title: 'Scraping page',
phaseLabel: 'Search',
phase: 'search',
},
[GetPageContents.id]: {
title: 'Getting page contents',
phaseLabel: 'Search',
phase: 'search',
},
[SearchLibraryDocs.id]: {
title: 'Searching library docs',
phaseLabel: 'Search',
phase: 'search',
},
[ManageMcpTool.id]: {
title: 'Managing MCP tool',
phaseLabel: 'Management',
phase: 'management',
},
[ManageSkill.id]: {
title: 'Managing skill',
phaseLabel: 'Management',
phase: 'management',
},
[UserMemory.id]: {
title: 'Accessing memory',
phaseLabel: 'Management',
phase: 'management',
},
[FunctionExecute.id]: {
title: 'Running code',
phaseLabel: 'Code',
phase: 'execution',
},
complete_job: { title: 'Completing job', phaseLabel: 'Execution', phase: 'execution' },
get_execution_summary: { title: 'Checking results', phaseLabel: 'Execution', phase: 'execution' },
get_job_logs: { title: 'Checking logs', phaseLabel: 'Execution', phase: 'execution' },
get_workflow_logs: { title: 'Checking logs', phaseLabel: 'Execution', phase: 'execution' },
get_workflow_data: { title: 'Loading workflow', phaseLabel: 'Execution', phase: 'execution' },
get_block_outputs: {
title: 'Checking block outputs',
phaseLabel: 'Execution',
[Superagent.id]: {
title: 'Executing action',
phaseLabel: 'Action',
phase: 'execution',
},
get_block_upstream_references: {
title: 'Checking references',
phaseLabel: 'Execution',
phase: 'execution',
},
get_deployed_workflow_state: {
title: 'Checking deployment',
phaseLabel: 'Execution',
phase: 'execution',
},
check_deployment_status: {
title: 'Checking deployment',
phaseLabel: 'Execution',
phase: 'execution',
},
// Workflows & folders
create_workflow: { title: 'Creating workflow', phaseLabel: 'Resource', phase: 'resource' },
delete_workflow: { title: 'Deleting workflow', phaseLabel: 'Resource', phase: 'resource' },
edit_workflow: { title: 'Editing workflow', phaseLabel: 'Resource', phase: 'resource' },
rename_workflow: { title: 'Renaming workflow', phaseLabel: 'Resource', phase: 'resource' },
move_workflow: { title: 'Moving workflow', phaseLabel: 'Resource', phase: 'resource' },
create_folder: { title: 'Creating folder', phaseLabel: 'Resource', phase: 'resource' },
delete_folder: { title: 'Deleting folder', phaseLabel: 'Resource', phase: 'resource' },
move_folder: { title: 'Moving folder', phaseLabel: 'Resource', phase: 'resource' },
list_folders: { title: 'Browsing folders', phaseLabel: 'Resource', phase: 'resource' },
list_user_workspaces: { title: 'Browsing workspaces', phaseLabel: 'Resource', phase: 'resource' },
revert_to_version: { title: 'Restoring version', phaseLabel: 'Resource', phase: 'resource' },
get_deployment_version: {
title: 'Checking deployment',
[UserTable.id]: {
title: 'Managing table',
phaseLabel: 'Resource',
phase: 'resource',
},
open_resource: { title: 'Opening resource', phaseLabel: 'Resource', phase: 'resource' },
// Files
workspace_file: { title: 'Working with files', phaseLabel: 'Resource', phase: 'resource' },
download_to_workspace_file: {
title: 'Downloading file',
[WorkspaceFile.id]: {
title: 'Managing file',
phaseLabel: 'Resource',
phase: 'resource',
},
materialize_file: { title: 'Saving file', phaseLabel: 'Resource', phase: 'resource' },
generate_image: { title: 'Generating image', phaseLabel: 'Resource', phase: 'resource' },
generate_visualization: {
title: 'Generating visualization',
[CreateWorkflow.id]: {
title: 'Creating workflow',
phaseLabel: 'Resource',
phase: 'resource',
},
// Tables & knowledge
user_table: { title: 'Editing table', phaseLabel: 'Resource', phase: 'resource' },
knowledge_base: { title: 'Updating knowledge base', phaseLabel: 'Resource', phase: 'resource' },
// Jobs
create_job: { title: 'Creating job', phaseLabel: 'Resource', phase: 'resource' },
manage_job: { title: 'Updating job', phaseLabel: 'Management', phase: 'management' },
update_job_history: { title: 'Updating job', phaseLabel: 'Management', phase: 'management' },
job_respond: { title: 'Explaining job scheduled', phaseLabel: 'Execution', phase: 'execution' },
// Management
manage_mcp_tool: { title: 'Updating integration', phaseLabel: 'Management', phase: 'management' },
manage_skill: { title: 'Updating skill', phaseLabel: 'Management', phase: 'management' },
manage_credential: { title: 'Connecting account', phaseLabel: 'Management', phase: 'management' },
manage_custom_tool: { title: 'Updating tool', phaseLabel: 'Management', phase: 'management' },
update_workspace_mcp_server: {
title: 'Updating MCP server',
phaseLabel: 'Management',
phase: 'management',
[EditWorkflow.id]: {
title: 'Editing workflow',
phaseLabel: 'Resource',
phase: 'resource',
},
delete_workspace_mcp_server: {
title: 'Removing MCP server',
phaseLabel: 'Management',
phase: 'management',
[Build.id]: { title: 'Building', phaseLabel: 'Build', phase: 'subagent' },
[Run.id]: { title: 'Running', phaseLabel: 'Run', phase: 'subagent' },
[Deploy.id]: { title: 'Deploying', phaseLabel: 'Deploy', phase: 'subagent' },
[Auth.id]: {
title: 'Connecting credentials',
phaseLabel: 'Auth',
phase: 'subagent',
},
create_workspace_mcp_server: {
title: 'Creating MCP server',
phaseLabel: 'Management',
phase: 'management',
[Knowledge.id]: {
title: 'Managing knowledge',
phaseLabel: 'Knowledge',
phase: 'subagent',
},
list_workspace_mcp_servers: {
title: 'Browsing MCP servers',
phaseLabel: 'Management',
phase: 'management',
[KnowledgeBase.id]: {
title: 'Managing knowledge base',
phaseLabel: 'Resource',
phase: 'resource',
},
oauth_get_auth_link: {
title: 'Connecting account',
phaseLabel: 'Management',
phase: 'management',
[Table.id]: { title: 'Managing tables', phaseLabel: 'Table', phase: 'subagent' },
[Job.id]: { title: 'Managing jobs', phaseLabel: 'Job', phase: 'subagent' },
[Agent.id]: { title: 'Agent action', phaseLabel: 'Agent', phase: 'subagent' },
custom_tool: {
title: 'Creating tool',
phaseLabel: 'Tool',
phase: 'subagent',
},
oauth_request_access: {
title: 'Connecting account',
phaseLabel: 'Management',
phase: 'management',
},
set_environment_variables: {
title: 'Updating environment',
phaseLabel: 'Management',
phase: 'management',
},
set_global_workflow_variables: {
title: 'Updating variables',
phaseLabel: 'Management',
phase: 'management',
},
get_platform_actions: { title: 'Loading actions', phaseLabel: 'Management', phase: 'management' },
search_documentation: { title: 'Searching docs', phaseLabel: 'Search', phase: 'search' },
search_patterns: { title: 'Searching patterns', phaseLabel: 'Search', phase: 'search' },
deploy_api: { title: 'Deploying API', phaseLabel: 'Deploy', phase: 'management' },
deploy_chat: { title: 'Deploying chat', phaseLabel: 'Deploy', phase: 'management' },
deploy_mcp: { title: 'Deploying MCP', phaseLabel: 'Deploy', phase: 'management' },
redeploy: { title: 'Redeploying', phaseLabel: 'Deploy', phase: 'management' },
generate_api_key: { title: 'Generating API key', phaseLabel: 'Deploy', phase: 'management' },
user_memory: { title: 'Updating memory', phaseLabel: 'Management', phase: 'management' },
context_write: { title: 'Writing notes', phaseLabel: 'Management', phase: 'management' },
context_compaction: {
title: 'Optimizing context',
phaseLabel: 'Management',
phase: 'management',
},
// Subagents
build: { title: 'Building', phaseLabel: 'Build', phase: 'subagent' },
run: { title: 'Running', phaseLabel: 'Run', phase: 'subagent' },
deploy: { title: 'Deploying', phaseLabel: 'Deploy', phase: 'subagent' },
auth: { title: 'Connecting integration', phaseLabel: 'Auth', phase: 'subagent' },
knowledge: { title: 'Working with knowledge', phaseLabel: 'Knowledge', phase: 'subagent' },
table: { title: 'Working with tables', phaseLabel: 'Table', phase: 'subagent' },
job: { title: 'Working with jobs', phaseLabel: 'Job', phase: 'subagent' },
agent: { title: 'Taking action', phaseLabel: 'Agent', phase: 'subagent' },
custom_tool: { title: 'Creating tool', phaseLabel: 'Tool', phase: 'subagent' },
research: { title: 'Researching', phaseLabel: 'Research', phase: 'subagent' },
[Research.id]: { title: 'Researching', phaseLabel: 'Research', phase: 'subagent' },
plan: { title: 'Planning', phaseLabel: 'Plan', phase: 'subagent' },
debug: { title: 'Debugging', phaseLabel: 'Debug', phase: 'subagent' },
[Debug.id]: { title: 'Debugging', phaseLabel: 'Debug', phase: 'subagent' },
edit: { title: 'Editing workflow', phaseLabel: 'Edit', phase: 'subagent' },
fast_edit: { title: 'Editing workflow', phaseLabel: 'Edit', phase: 'subagent' },
}
export interface SSEPayloadUI {
hidden?: boolean
title?: string
phaseLabel?: string
icon?: string
internal?: boolean
clientExecutable?: boolean
}
export interface SSEPayloadData {
name?: string
ui?: SSEPayloadUI
id?: string
agent?: string
partial?: boolean
arguments?: Record<string, unknown>
input?: Record<string, unknown>
result?: unknown
error?: string
}
export interface SSEPayload {
type: SSEEventType | (string & {})
chatId?: string
data?: string | SSEPayloadData
content?: string
toolCallId?: string
toolName?: string
ui?: SSEPayloadUI
success?: boolean
result?: unknown
error?: string
subagent?: string
resource?: { type: MothershipResourceType; id: string; title: string }
fast_edit: {
title: 'Editing workflow',
phaseLabel: 'Edit',
phase: 'subagent',
},
[OpenResource.id]: {
title: 'Opening resource',
phaseLabel: 'Resource',
phase: 'resource',
},
context_compaction: {
title: 'Compacted context',
phaseLabel: 'Context',
phase: 'management',
},
}

View File

@@ -218,7 +218,7 @@ export const Panel = memo(function Panel({ workspaceId: propWorkspaceId }: Panel
const [copilotChatId, setCopilotChatId] = useState<string | undefined>(undefined)
const [copilotChatTitle, setCopilotChatTitle] = useState<string | null>(null)
const [copilotChatList, setCopilotChatList] = useState<
{ id: string; title: string | null; updatedAt: string; conversationId: string | null }[]
{ id: string; title: string | null; updatedAt: string; activeStreamId: string | null }[]
>([])
const [isCopilotHistoryOpen, setIsCopilotHistoryOpen] = useState(false)
@@ -238,7 +238,7 @@ export const Panel = memo(function Panel({ workspaceId: propWorkspaceId }: Panel
id: string
title: string | null
updatedAt: string
conversationId: string | null
activeStreamId: string | null
}>
setCopilotChatList(filtered)
@@ -784,7 +784,7 @@ export const Panel = memo(function Panel({ workspaceId: propWorkspaceId }: Panel
>
<ConversationListItem
title={chat.title || 'New Chat'}
isActive={Boolean(chat.conversationId)}
isActive={Boolean(chat.activeStreamId)}
titleClassName='text-[13px]'
actions={
<div

View File

@@ -1,5 +1,5 @@
import { createHmac } from 'crypto'
import { db, workflowExecutionLogs } from '@sim/db'
import { db } from '@sim/db'
import {
account,
workspaceNotificationDelivery,
@@ -17,14 +17,11 @@ import {
import { checkUsageStatus } from '@/lib/billing/calculations/usage-monitor'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { dollarsToCredits } from '@/lib/billing/credits/conversion'
import { createBullMQJobData, isBullMQEnabled } from '@/lib/core/bullmq'
import { acquireLock } from '@/lib/core/config/redis'
import { RateLimiter } from '@/lib/core/rate-limiter'
import { decryptSecret } from '@/lib/core/security/encryption'
import { secureFetchWithValidation } from '@/lib/core/security/input-validation.server'
import { formatDuration } from '@/lib/core/utils/formatting'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { enqueueWorkspaceDispatch } from '@/lib/core/workspace-dispatch'
import type { TraceSpan, WorkflowExecutionLog } from '@/lib/logs/types'
import { sendEmail } from '@/lib/messaging/email/mailer'
import type { AlertConfig } from '@/lib/notifications/alert-rules'
@@ -35,7 +32,6 @@ const logger = createLogger('WorkspaceNotificationDelivery')
const MAX_ATTEMPTS = 5
const RETRY_DELAYS = [5 * 1000, 15 * 1000, 60 * 1000, 3 * 60 * 1000, 10 * 60 * 1000]
const NOTIFICATION_DISPATCH_LOCK_TTL_SECONDS = 3
function getRetryDelayWithJitter(baseDelay: number): number {
const jitter = Math.random() * 0.1 * baseDelay
@@ -500,157 +496,6 @@ export type NotificationDeliveryResult =
| { status: 'success' | 'skipped' | 'failed' }
| { status: 'retry'; retryDelayMs: number }
async function buildRetryLog(params: NotificationDeliveryParams): Promise<WorkflowExecutionLog> {
const conditions = [eq(workflowExecutionLogs.executionId, params.log.executionId)]
if (params.log.workflowId) {
conditions.push(eq(workflowExecutionLogs.workflowId, params.log.workflowId))
}
const [storedLog] = await db
.select()
.from(workflowExecutionLogs)
.where(and(...conditions))
.limit(1)
if (storedLog) {
return storedLog as unknown as WorkflowExecutionLog
}
const now = new Date().toISOString()
return {
id: `retry_log_${params.deliveryId}`,
workflowId: params.log.workflowId,
executionId: params.log.executionId,
stateSnapshotId: '',
level: 'info',
trigger: 'system',
startedAt: now,
endedAt: now,
totalDurationMs: 0,
executionData: {},
cost: { total: 0 },
createdAt: now,
}
}
export async function enqueueNotificationDeliveryDispatch(
params: NotificationDeliveryParams
): Promise<boolean> {
if (!isBullMQEnabled()) {
return false
}
const lockAcquired = await acquireLock(
`workspace-notification-dispatch:${params.deliveryId}`,
params.deliveryId,
NOTIFICATION_DISPATCH_LOCK_TTL_SECONDS
)
if (!lockAcquired) {
return false
}
await enqueueWorkspaceDispatch({
workspaceId: params.workspaceId,
lane: 'lightweight',
queueName: 'workspace-notification-delivery',
bullmqJobName: 'workspace-notification-delivery',
bullmqPayload: createBullMQJobData(params),
metadata: {
workflowId: params.log.workflowId ?? undefined,
},
})
return true
}
const STUCK_IN_PROGRESS_THRESHOLD_MS = 5 * 60 * 1000
export async function sweepPendingNotificationDeliveries(limit = 50): Promise<number> {
if (!isBullMQEnabled()) {
return 0
}
const stuckThreshold = new Date(Date.now() - STUCK_IN_PROGRESS_THRESHOLD_MS)
await db
.update(workspaceNotificationDelivery)
.set({
status: 'pending',
updatedAt: new Date(),
})
.where(
and(
eq(workspaceNotificationDelivery.status, 'in_progress'),
lte(workspaceNotificationDelivery.lastAttemptAt, stuckThreshold)
)
)
const dueDeliveries = await db
.select({
deliveryId: workspaceNotificationDelivery.id,
subscriptionId: workspaceNotificationDelivery.subscriptionId,
workflowId: workspaceNotificationDelivery.workflowId,
executionId: workspaceNotificationDelivery.executionId,
workspaceId: workspaceNotificationSubscription.workspaceId,
alertConfig: workspaceNotificationSubscription.alertConfig,
notificationType: workspaceNotificationSubscription.notificationType,
})
.from(workspaceNotificationDelivery)
.innerJoin(
workspaceNotificationSubscription,
eq(workspaceNotificationDelivery.subscriptionId, workspaceNotificationSubscription.id)
)
.where(
and(
eq(workspaceNotificationDelivery.status, 'pending'),
or(
isNull(workspaceNotificationDelivery.nextAttemptAt),
lte(workspaceNotificationDelivery.nextAttemptAt, new Date())
)
)
)
.limit(limit)
let enqueued = 0
for (const delivery of dueDeliveries) {
const params: NotificationDeliveryParams = {
deliveryId: delivery.deliveryId,
subscriptionId: delivery.subscriptionId,
workspaceId: delivery.workspaceId,
notificationType: delivery.notificationType,
log: await buildRetryLog({
deliveryId: delivery.deliveryId,
subscriptionId: delivery.subscriptionId,
workspaceId: delivery.workspaceId,
notificationType: delivery.notificationType,
log: {
id: '',
workflowId: delivery.workflowId,
executionId: delivery.executionId,
stateSnapshotId: '',
level: 'info',
trigger: 'system',
startedAt: '',
endedAt: '',
totalDurationMs: 0,
executionData: {},
cost: { total: 0 },
createdAt: '',
},
alertConfig: (delivery.alertConfig as AlertConfig | null) ?? undefined,
}),
alertConfig: (delivery.alertConfig as AlertConfig | null) ?? undefined,
}
if (await enqueueNotificationDeliveryDispatch(params)) {
enqueued += 1
}
}
return enqueued
}
export async function executeNotificationDelivery(
params: NotificationDeliveryParams
): Promise<NotificationDeliveryResult> {

View File

@@ -1,4 +1,6 @@
import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
import type { PersistedMessage } from '@/lib/copilot/chat/persisted-message'
import { normalizeMessage } from '@/lib/copilot/chat/persisted-message'
import type { MothershipResource } from '@/app/workspace/[workspaceId]/home/types'
export interface TaskMetadata {
@@ -9,70 +11,13 @@ export interface TaskMetadata {
isUnread: boolean
}
export interface StreamSnapshot {
events: Array<{ eventId: number; streamId: string; event: Record<string, unknown> }>
status: string
}
export interface TaskChatHistory {
id: string
title: string | null
messages: TaskStoredMessage[]
messages: PersistedMessage[]
activeStreamId: string | null
resources: MothershipResource[]
streamSnapshot?: StreamSnapshot | null
}
export interface TaskStoredToolCall {
id: string
name: string
status: string
params?: Record<string, unknown>
result?: unknown
error?: string
durationMs?: number
}
export interface TaskStoredFileAttachment {
id: string
key: string
filename: string
media_type: string
size: number
}
export interface TaskStoredMessageContext {
kind: string
label: string
workflowId?: string
knowledgeId?: string
tableId?: string
fileId?: string
}
export interface TaskStoredMessage {
id: string
role: 'user' | 'assistant'
content: string
requestId?: string
toolCalls?: TaskStoredToolCall[]
contentBlocks?: TaskStoredContentBlock[]
fileAttachments?: TaskStoredFileAttachment[]
contexts?: TaskStoredMessageContext[]
}
export interface TaskStoredContentBlock {
type: string
content?: string
toolCall?: {
id?: string
name?: string
state?: string
params?: Record<string, unknown>
result?: { success: boolean; output?: unknown; error?: string }
display?: { text?: string }
calledBy?: string
} | null
streamSnapshot?: { events: unknown[]; status: string } | null
}
export const taskKeys = {
@@ -87,7 +32,7 @@ interface TaskResponse {
id: string
title: string | null
updatedAt: string
conversationId: string | null
activeStreamId: string | null
lastSeenAt: string | null
}
@@ -97,9 +42,9 @@ function mapTask(chat: TaskResponse): TaskMetadata {
id: chat.id,
name: chat.title ?? 'New task',
updatedAt,
isActive: chat.conversationId !== null,
isActive: chat.activeStreamId !== null,
isUnread:
chat.conversationId === null &&
chat.activeStreamId === null &&
(chat.lastSeenAt === null || updatedAt > new Date(chat.lastSeenAt)),
}
}
@@ -159,10 +104,11 @@ export async function fetchChatHistory(
return {
id: chat.id,
title: chat.title,
messages: Array.isArray(chat.messages) ? chat.messages : [],
activeStreamId: chat.conversationId || null,
messages: Array.isArray(chat.messages)
? chat.messages.map((m: Record<string, unknown>) => normalizeMessage(m))
: [],
activeStreamId: chat.activeStreamId || null,
resources: Array.isArray(chat.resources) ? chat.resources : [],
streamSnapshot: chat.streamSnapshot || null,
}
}

View File

@@ -17,20 +17,10 @@ export const enterpriseSubscriptionMetadataSchema = z.object({
monthlyPrice: z.coerce.number().positive(),
// Number of seats for invitation limits (not for billing)
seats: z.coerce.number().int().positive(),
// Optional custom workspace concurrency limit for enterprise workspaces
workspaceConcurrencyLimit: z.coerce.number().int().positive().optional(),
})
export type EnterpriseSubscriptionMetadata = z.infer<typeof enterpriseSubscriptionMetadataSchema>
const enterpriseWorkspaceConcurrencyMetadataSchema = z.object({
workspaceConcurrencyLimit: z.coerce.number().int().positive().optional(),
})
export type EnterpriseWorkspaceConcurrencyMetadata = z.infer<
typeof enterpriseWorkspaceConcurrencyMetadataSchema
>
export function parseEnterpriseSubscriptionMetadata(
value: unknown
): EnterpriseSubscriptionMetadata | null {
@@ -38,13 +28,6 @@ export function parseEnterpriseSubscriptionMetadata(
return result.success ? result.data : null
}
export function parseEnterpriseWorkspaceConcurrencyMetadata(
value: unknown
): EnterpriseWorkspaceConcurrencyMetadata | null {
const result = enterpriseWorkspaceConcurrencyMetadataSchema.safeParse(value)
return result.success ? result.data : null
}
export interface UsageData {
currentUsage: number
limit: number

View File

@@ -1,146 +0,0 @@
/**
* @vitest-environment node
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
mockGetHighestPrioritySubscription,
mockGetWorkspaceBilledAccountUserId,
mockFeatureFlags,
mockRedisGet,
mockRedisSet,
mockRedisDel,
mockRedisKeys,
mockGetRedisClient,
} = vi.hoisted(() => ({
mockGetHighestPrioritySubscription: vi.fn(),
mockGetWorkspaceBilledAccountUserId: vi.fn(),
mockFeatureFlags: {
isBillingEnabled: true,
},
mockRedisGet: vi.fn(),
mockRedisSet: vi.fn(),
mockRedisDel: vi.fn(),
mockRedisKeys: vi.fn(),
mockGetRedisClient: vi.fn(),
}))
vi.mock('@sim/logger', () => ({
createLogger: () => ({
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}),
}))
vi.mock('@/lib/billing/core/plan', () => ({
getHighestPrioritySubscription: mockGetHighestPrioritySubscription,
}))
vi.mock('@/lib/workspaces/utils', () => ({
getWorkspaceBilledAccountUserId: mockGetWorkspaceBilledAccountUserId,
}))
vi.mock('@/lib/core/config/redis', () => ({
getRedisClient: mockGetRedisClient,
}))
vi.mock('@/lib/core/config/feature-flags', () => mockFeatureFlags)
import {
getWorkspaceConcurrencyLimit,
resetWorkspaceConcurrencyLimitCache,
} from '@/lib/billing/workspace-concurrency'
describe('workspace concurrency billing', () => {
beforeEach(() => {
vi.clearAllMocks()
mockFeatureFlags.isBillingEnabled = true
mockRedisGet.mockResolvedValue(null)
mockRedisSet.mockResolvedValue('OK')
mockRedisDel.mockResolvedValue(1)
mockRedisKeys.mockResolvedValue([])
mockGetRedisClient.mockReturnValue({
get: mockRedisGet,
set: mockRedisSet,
del: mockRedisDel,
keys: mockRedisKeys,
})
})
it('returns free tier when no billed account exists', async () => {
mockGetWorkspaceBilledAccountUserId.mockResolvedValue(null)
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(5)
})
it('returns pro limit for pro billing accounts', async () => {
mockGetWorkspaceBilledAccountUserId.mockResolvedValue('user-1')
mockGetHighestPrioritySubscription.mockResolvedValue({
plan: 'pro_6000',
metadata: null,
})
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(50)
})
it('returns max limit for max plan tiers', async () => {
mockGetWorkspaceBilledAccountUserId.mockResolvedValue('user-1')
mockGetHighestPrioritySubscription.mockResolvedValue({
plan: 'pro_25000',
metadata: null,
})
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(200)
})
it('returns max limit for legacy team plans', async () => {
mockGetWorkspaceBilledAccountUserId.mockResolvedValue('user-1')
mockGetHighestPrioritySubscription.mockResolvedValue({
plan: 'team',
metadata: null,
})
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(200)
})
it('returns enterprise metadata override when present', async () => {
mockGetWorkspaceBilledAccountUserId.mockResolvedValue('user-1')
mockGetHighestPrioritySubscription.mockResolvedValue({
plan: 'enterprise',
metadata: {
workspaceConcurrencyLimit: '350',
},
})
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(350)
})
it('uses free-tier limit when billing is disabled', async () => {
mockFeatureFlags.isBillingEnabled = false
mockGetWorkspaceBilledAccountUserId.mockResolvedValue('user-1')
mockGetHighestPrioritySubscription.mockResolvedValue({
plan: 'pro_25000',
metadata: {
workspaceConcurrencyLimit: 999,
},
})
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(5)
})
it('uses redis cache when available', async () => {
mockRedisGet.mockResolvedValueOnce('123')
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(123)
expect(mockGetWorkspaceBilledAccountUserId).not.toHaveBeenCalled()
})
it('can clear a specific workspace cache entry', async () => {
await resetWorkspaceConcurrencyLimitCache('workspace-1')
expect(mockRedisDel).toHaveBeenCalledWith('workspace-concurrency-limit:workspace-1')
})
})

View File

@@ -1,170 +0,0 @@
import { createLogger } from '@sim/logger'
import { getHighestPrioritySubscription } from '@/lib/billing/core/plan'
import { getPlanTierCredits, isEnterprise, isPro, isTeam } from '@/lib/billing/plan-helpers'
import { parseEnterpriseWorkspaceConcurrencyMetadata } from '@/lib/billing/types'
import { env } from '@/lib/core/config/env'
import { isBillingEnabled } from '@/lib/core/config/feature-flags'
import { getRedisClient } from '@/lib/core/config/redis'
import { getWorkspaceBilledAccountUserId } from '@/lib/workspaces/utils'
const logger = createLogger('WorkspaceConcurrencyBilling')
const CACHE_TTL_MS = 60_000
const CACHE_TTL_SECONDS = Math.floor(CACHE_TTL_MS / 1000)
interface CacheEntry {
value: number
expiresAt: number
}
const inMemoryConcurrencyCache = new Map<string, CacheEntry>()
function cacheKey(workspaceId: string): string {
return `workspace-concurrency-limit:${workspaceId}`
}
function parsePositiveLimit(value: unknown): number | null {
if (typeof value === 'number' && Number.isFinite(value) && value > 0) {
return Math.floor(value)
}
if (typeof value === 'string') {
const parsed = Number.parseInt(value, 10)
if (Number.isFinite(parsed) && parsed > 0) {
return parsed
}
}
return null
}
function getFreeConcurrencyLimit(): number {
return Number.parseInt(env.WORKSPACE_CONCURRENCY_FREE, 10) || 5
}
function getProConcurrencyLimit(): number {
return Number.parseInt(env.WORKSPACE_CONCURRENCY_PRO, 10) || 50
}
function getTeamConcurrencyLimit(): number {
return Number.parseInt(env.WORKSPACE_CONCURRENCY_TEAM, 10) || 200
}
function getEnterpriseDefaultConcurrencyLimit(): number {
return Number.parseInt(env.WORKSPACE_CONCURRENCY_ENTERPRISE, 10) || 200
}
function getEnterpriseConcurrencyLimit(metadata: unknown): number {
const enterpriseMetadata = parseEnterpriseWorkspaceConcurrencyMetadata(metadata)
return enterpriseMetadata?.workspaceConcurrencyLimit ?? getEnterpriseDefaultConcurrencyLimit()
}
function getPlanConcurrencyLimit(plan: string | null | undefined, metadata: unknown): number {
if (!isBillingEnabled) {
return getFreeConcurrencyLimit()
}
if (!plan) {
return getFreeConcurrencyLimit()
}
if (isEnterprise(plan)) {
return getEnterpriseConcurrencyLimit(metadata)
}
if (isTeam(plan)) {
return getTeamConcurrencyLimit()
}
const credits = getPlanTierCredits(plan)
if (credits >= 25_000) {
return getTeamConcurrencyLimit()
}
if (isPro(plan)) {
return getProConcurrencyLimit()
}
return getFreeConcurrencyLimit()
}
export async function getWorkspaceConcurrencyLimit(workspaceId: string): Promise<number> {
const redis = getRedisClient()
if (redis) {
const cached = await redis.get(cacheKey(workspaceId))
const cachedValue = parsePositiveLimit(cached)
if (cachedValue !== null) {
return cachedValue
}
} else {
const cached = inMemoryConcurrencyCache.get(workspaceId)
if (cached && cached.expiresAt > Date.now()) {
return cached.value
}
}
try {
const billedAccountUserId = await getWorkspaceBilledAccountUserId(workspaceId)
if (!billedAccountUserId) {
if (redis) {
await redis.set(
cacheKey(workspaceId),
String(getFreeConcurrencyLimit()),
'EX',
CACHE_TTL_SECONDS
)
} else {
inMemoryConcurrencyCache.set(workspaceId, {
value: getFreeConcurrencyLimit(),
expiresAt: Date.now() + CACHE_TTL_MS,
})
}
return getFreeConcurrencyLimit()
}
const subscription = await getHighestPrioritySubscription(billedAccountUserId)
const limit = getPlanConcurrencyLimit(subscription?.plan, subscription?.metadata)
if (redis) {
await redis.set(cacheKey(workspaceId), String(limit), 'EX', CACHE_TTL_SECONDS)
} else {
inMemoryConcurrencyCache.set(workspaceId, {
value: limit,
expiresAt: Date.now() + CACHE_TTL_MS,
})
}
return limit
} catch (error) {
logger.error('Failed to resolve workspace concurrency limit, using free tier', {
workspaceId,
error,
})
return getFreeConcurrencyLimit()
}
}
export async function resetWorkspaceConcurrencyLimitCache(workspaceId?: string): Promise<void> {
if (!workspaceId) {
inMemoryConcurrencyCache.clear()
} else {
inMemoryConcurrencyCache.delete(workspaceId)
}
const redis = getRedisClient()
if (!redis) {
return
}
if (workspaceId) {
await redis.del(cacheKey(workspaceId))
return
}
const keys = await redis.keys('workspace-concurrency-limit:*')
if (keys.length > 0) {
await redis.del(...keys)
}
}

View File

@@ -1,13 +1,7 @@
import type { CopilotAsyncToolStatus } from '@sim/db/schema'
import { MothershipStreamV1AsyncToolRecordStatus } from '@/lib/copilot/generated/mothership-stream-v1'
export const ASYNC_TOOL_STATUS = {
pending: 'pending',
running: 'running',
completed: 'completed',
failed: 'failed',
cancelled: 'cancelled',
delivered: 'delivered',
} as const
export const ASYNC_TOOL_STATUS = MothershipStreamV1AsyncToolRecordStatus
export type AsyncLifecycleStatus =
| typeof ASYNC_TOOL_STATUS.pending

View File

@@ -1,53 +0,0 @@
import { createLogger } from '@sim/logger'
import { CopilotFiles } from '@/lib/uploads'
import { createFileContent } from '@/lib/uploads/utils/file-utils'
const logger = createLogger('CopilotChatContext')
export interface FileAttachmentInput {
id: string
key: string
name?: string
filename?: string
mimeType?: string
media_type?: string
size: number
}
export interface FileContent {
type: string
[key: string]: unknown
}
/**
* Process file attachments into content for the payload.
*/
export async function processFileAttachments(
fileAttachments: FileAttachmentInput[],
userId: string
): Promise<FileContent[]> {
if (!Array.isArray(fileAttachments) || fileAttachments.length === 0) return []
const processedFileContents: FileContent[] = []
const requestId = `copilot-${userId}-${Date.now()}`
const processedAttachments = await CopilotFiles.processCopilotAttachments(
fileAttachments as Parameters<typeof CopilotFiles.processCopilotAttachments>[0],
requestId
)
for (const { buffer, attachment } of processedAttachments) {
const fileContent = createFileContent(buffer, attachment.media_type)
if (fileContent) {
const enriched: FileContent = { ...fileContent, filename: attachment.filename }
processedFileContents.push(enriched)
}
}
logger.debug('Processed file attachments for payload', {
userId,
inputCount: fileAttachments.length,
outputCount: processedFileContents.length,
})
return processedFileContents
}

View File

@@ -1,140 +0,0 @@
/**
* @vitest-environment node
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
orchestrateCopilotStream,
createRunSegment,
updateRunStatus,
resetStreamBuffer,
setStreamMeta,
createStreamEventWriter,
} = vi.hoisted(() => ({
orchestrateCopilotStream: vi.fn(),
createRunSegment: vi.fn(),
updateRunStatus: vi.fn(),
resetStreamBuffer: vi.fn(),
setStreamMeta: vi.fn(),
createStreamEventWriter: vi.fn(),
}))
vi.mock('@/lib/copilot/orchestrator', () => ({
orchestrateCopilotStream,
}))
vi.mock('@/lib/copilot/async-runs/repository', () => ({
createRunSegment,
updateRunStatus,
}))
vi.mock('@/lib/copilot/orchestrator/stream/buffer', () => ({
createStreamEventWriter,
resetStreamBuffer,
setStreamMeta,
}))
vi.mock('@sim/db', () => ({
db: {
update: vi.fn(() => ({
set: vi.fn(() => ({
where: vi.fn(),
})),
})),
},
}))
vi.mock('@/lib/copilot/task-events', () => ({
taskPubSub: null,
}))
import { createSSEStream } from '@/lib/copilot/chat-streaming'
async function drainStream(stream: ReadableStream) {
const reader = stream.getReader()
while (true) {
const { done } = await reader.read()
if (done) break
}
}
describe('createSSEStream terminal error handling', () => {
const write = vi.fn().mockResolvedValue({ eventId: 1, streamId: 'stream-1', event: {} })
const flush = vi.fn().mockResolvedValue(undefined)
const close = vi.fn().mockResolvedValue(undefined)
beforeEach(() => {
vi.clearAllMocks()
write.mockResolvedValue({ eventId: 1, streamId: 'stream-1', event: {} })
flush.mockResolvedValue(undefined)
close.mockResolvedValue(undefined)
createStreamEventWriter.mockReturnValue({ write, flush, close })
resetStreamBuffer.mockResolvedValue(undefined)
setStreamMeta.mockResolvedValue(undefined)
createRunSegment.mockResolvedValue(null)
updateRunStatus.mockResolvedValue(null)
})
it('writes a terminal error event before close when orchestration returns success=false', async () => {
orchestrateCopilotStream.mockResolvedValue({
success: false,
error: 'resume failed',
content: '',
contentBlocks: [],
toolCalls: [],
})
const stream = createSSEStream({
requestPayload: { message: 'hello' },
userId: 'user-1',
streamId: 'stream-1',
executionId: 'exec-1',
runId: 'run-1',
currentChat: null,
isNewChat: false,
message: 'hello',
titleModel: 'gpt-5.4',
requestId: 'req-1',
orchestrateOptions: {},
})
await drainStream(stream)
expect(write).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
error: 'resume failed',
})
)
expect(write.mock.invocationCallOrder.at(-1)).toBeLessThan(close.mock.invocationCallOrder[0])
})
it('writes the thrown terminal error event before close for replay durability', async () => {
orchestrateCopilotStream.mockRejectedValue(new Error('kaboom'))
const stream = createSSEStream({
requestPayload: { message: 'hello' },
userId: 'user-1',
streamId: 'stream-1',
executionId: 'exec-1',
runId: 'run-1',
currentChat: null,
isNewChat: false,
message: 'hello',
titleModel: 'gpt-5.4',
requestId: 'req-1',
orchestrateOptions: {},
})
await drainStream(stream)
expect(write).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
error: 'kaboom',
})
)
expect(write.mock.invocationCallOrder.at(-1)).toBeLessThan(close.mock.invocationCallOrder[0])
})
})

View File

@@ -1,579 +0,0 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { createRunSegment, updateRunStatus } from '@/lib/copilot/async-runs/repository'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import type { OrchestrateStreamOptions } from '@/lib/copilot/orchestrator'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import {
createStreamEventWriter,
getStreamMeta,
resetStreamBuffer,
setStreamMeta,
} from '@/lib/copilot/orchestrator/stream/buffer'
import { taskPubSub } from '@/lib/copilot/task-events'
import { env } from '@/lib/core/config/env'
import { acquireLock, getRedisClient, releaseLock } from '@/lib/core/config/redis'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
const logger = createLogger('CopilotChatStreaming')
const CHAT_STREAM_LOCK_TTL_SECONDS = 2 * 60 * 60
const STREAM_ABORT_TTL_SECONDS = 10 * 60
const STREAM_ABORT_POLL_MS = 1000
interface ActiveStreamEntry {
abortController: AbortController
userStopController: AbortController
}
const activeStreams = new Map<string, ActiveStreamEntry>()
// Tracks in-flight streams by chatId so that a subsequent request for the
// same chat can force-abort the previous stream and wait for it to settle
// before forwarding to Go.
const pendingChatStreams = new Map<
string,
{ promise: Promise<void>; resolve: () => void; streamId: string }
>()
function registerPendingChatStream(chatId: string, streamId: string): void {
if (pendingChatStreams.has(chatId)) {
logger.warn(`registerPendingChatStream: overwriting existing entry for chatId ${chatId}`)
}
let resolve!: () => void
const promise = new Promise<void>((r) => {
resolve = r
})
pendingChatStreams.set(chatId, { promise, resolve, streamId })
}
function resolvePendingChatStream(chatId: string, streamId: string): void {
const entry = pendingChatStreams.get(chatId)
if (entry && entry.streamId === streamId) {
entry.resolve()
pendingChatStreams.delete(chatId)
}
}
function getChatStreamLockKey(chatId: string): string {
return `copilot:chat-stream-lock:${chatId}`
}
function getStreamAbortKey(streamId: string): string {
return `copilot:stream-abort:${streamId}`
}
/**
* Wait for any in-flight stream on `chatId` to settle without force-aborting it.
* Returns true when no stream is active (or it settles in time), false on timeout.
*/
export async function waitForPendingChatStream(
chatId: string,
timeoutMs = 5_000,
expectedStreamId?: string
): Promise<boolean> {
const redis = getRedisClient()
const deadline = Date.now() + timeoutMs
for (;;) {
const entry = pendingChatStreams.get(chatId)
const localPending = !!entry && (!expectedStreamId || entry.streamId === expectedStreamId)
if (redis) {
try {
const ownerStreamId = await redis.get(getChatStreamLockKey(chatId))
const lockReleased =
!ownerStreamId || (expectedStreamId !== undefined && ownerStreamId !== expectedStreamId)
if (!localPending && lockReleased) {
return true
}
} catch (error) {
logger.warn('Failed to check distributed chat stream lock while waiting', {
chatId,
expectedStreamId,
error: error instanceof Error ? error.message : String(error),
})
}
} else if (!localPending) {
return true
}
if (Date.now() >= deadline) return false
await new Promise((resolve) => setTimeout(resolve, 200))
}
}
export async function releasePendingChatStream(chatId: string, streamId: string): Promise<void> {
const redis = getRedisClient()
if (redis) {
await releaseLock(getChatStreamLockKey(chatId), streamId).catch(() => false)
}
resolvePendingChatStream(chatId, streamId)
}
export async function acquirePendingChatStream(
chatId: string,
streamId: string,
timeoutMs = 5_000
): Promise<boolean> {
const redis = getRedisClient()
if (redis) {
const deadline = Date.now() + timeoutMs
for (;;) {
try {
const acquired = await acquireLock(
getChatStreamLockKey(chatId),
streamId,
CHAT_STREAM_LOCK_TTL_SECONDS
)
if (acquired) {
registerPendingChatStream(chatId, streamId)
return true
}
if (!pendingChatStreams.has(chatId)) {
const ownerStreamId = await redis.get(getChatStreamLockKey(chatId))
if (ownerStreamId) {
const ownerMeta = await getStreamMeta(ownerStreamId)
const ownerTerminal =
ownerMeta?.status === 'complete' ||
ownerMeta?.status === 'error' ||
ownerMeta?.status === 'cancelled'
if (ownerTerminal) {
await releaseLock(getChatStreamLockKey(chatId), ownerStreamId).catch(() => false)
continue
}
}
}
} catch (error) {
logger.warn('Distributed chat stream lock failed; retrying distributed coordination', {
chatId,
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
if (Date.now() >= deadline) return false
await new Promise((resolve) => setTimeout(resolve, 200))
}
}
for (;;) {
const existing = pendingChatStreams.get(chatId)
if (!existing) {
registerPendingChatStream(chatId, streamId)
return true
}
const settled = await Promise.race([
existing.promise.then(() => true),
new Promise<boolean>((r) => setTimeout(() => r(false), timeoutMs)),
])
if (!settled) return false
}
}
export async function abortActiveStream(streamId: string): Promise<boolean> {
const redis = getRedisClient()
let published = false
if (redis) {
try {
await redis.set(getStreamAbortKey(streamId), '1', 'EX', STREAM_ABORT_TTL_SECONDS)
published = true
} catch (error) {
logger.warn('Failed to publish distributed stream abort', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
}
const entry = activeStreams.get(streamId)
if (!entry) return published
entry.userStopController.abort()
entry.abortController.abort()
activeStreams.delete(streamId)
return true
}
const FLUSH_EVENT_TYPES = new Set([
'tool_call',
'tool_result',
'tool_error',
'subagent_end',
'structured_result',
'subagent_result',
'done',
'error',
])
export async function requestChatTitle(params: {
message: string
model: string
provider?: string
messageId?: string
}): Promise<string | null> {
const { message, model, provider, messageId } = params
if (!message || !model) return null
const headers: Record<string, string> = { 'Content-Type': 'application/json' }
if (env.COPILOT_API_KEY) {
headers['x-api-key'] = env.COPILOT_API_KEY
}
try {
const response = await fetch(`${SIM_AGENT_API_URL}/api/generate-chat-title`, {
method: 'POST',
headers,
body: JSON.stringify({ message, model, ...(provider ? { provider } : {}) }),
})
const payload = await response.json().catch(() => ({}))
if (!response.ok) {
logger.withMetadata({ messageId }).warn('Failed to generate chat title via copilot backend', {
status: response.status,
error: payload,
})
return null
}
const title = typeof payload?.title === 'string' ? payload.title.trim() : ''
return title || null
} catch (error) {
logger.withMetadata({ messageId }).error('Error generating chat title', error)
return null
}
}
export interface StreamingOrchestrationParams {
requestPayload: Record<string, unknown>
userId: string
streamId: string
executionId: string
runId: string
chatId?: string
currentChat: any
isNewChat: boolean
message: string
titleModel: string
titleProvider?: string
requestId: string
workspaceId?: string
orchestrateOptions: Omit<OrchestrateStreamOptions, 'onEvent'>
pendingChatStreamAlreadyRegistered?: boolean
}
export function createSSEStream(params: StreamingOrchestrationParams): ReadableStream {
const {
requestPayload,
userId,
streamId,
executionId,
runId,
chatId,
currentChat,
isNewChat,
message,
titleModel,
titleProvider,
requestId,
workspaceId,
orchestrateOptions,
pendingChatStreamAlreadyRegistered = false,
} = params
const messageId =
typeof requestPayload.messageId === 'string' ? requestPayload.messageId : streamId
const reqLogger = logger.withMetadata({ requestId, messageId })
let eventWriter: ReturnType<typeof createStreamEventWriter> | null = null
let clientDisconnected = false
const abortController = new AbortController()
const userStopController = new AbortController()
const clientDisconnectedController = new AbortController()
activeStreams.set(streamId, { abortController, userStopController })
if (chatId && !pendingChatStreamAlreadyRegistered) {
registerPendingChatStream(chatId, streamId)
}
return new ReadableStream({
async start(controller) {
const encoder = new TextEncoder()
const markClientDisconnected = (reason: string) => {
if (clientDisconnected) return
clientDisconnected = true
if (!clientDisconnectedController.signal.aborted) {
clientDisconnectedController.abort()
}
reqLogger.info('Client disconnected from live SSE stream', {
streamId,
runId,
reason,
})
}
await resetStreamBuffer(streamId)
await setStreamMeta(streamId, { status: 'active', userId, executionId, runId })
if (chatId) {
await createRunSegment({
id: runId,
executionId,
chatId,
userId,
workflowId: (requestPayload.workflowId as string | undefined) || null,
workspaceId,
streamId,
model: (requestPayload.model as string | undefined) || null,
provider: (requestPayload.provider as string | undefined) || null,
requestContext: { requestId },
}).catch((error) => {
reqLogger.warn('Failed to create copilot run segment', {
error: error instanceof Error ? error.message : String(error),
})
})
}
eventWriter = createStreamEventWriter(streamId)
let localSeq = 0
let abortPoller: ReturnType<typeof setInterval> | null = null
const redis = getRedisClient()
if (redis) {
abortPoller = setInterval(() => {
void (async () => {
try {
const shouldAbort = await redis.get(getStreamAbortKey(streamId))
if (shouldAbort && !abortController.signal.aborted) {
userStopController.abort()
abortController.abort()
await redis.del(getStreamAbortKey(streamId))
}
} catch (error) {
reqLogger.warn('Failed to poll distributed stream abort', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
})()
}, STREAM_ABORT_POLL_MS)
}
const pushEvent = async (event: Record<string, any>) => {
if (!eventWriter) return
const eventId = ++localSeq
try {
await eventWriter.write(event)
if (FLUSH_EVENT_TYPES.has(event.type)) {
await eventWriter.flush()
}
} catch (error) {
reqLogger.error('Failed to persist stream event', {
eventType: event.type,
eventId,
error: error instanceof Error ? error.message : String(error),
})
// Keep the live SSE stream going even if durable buffering hiccups.
}
try {
if (!clientDisconnected) {
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ ...event, eventId, streamId })}\n\n`)
)
}
} catch {
markClientDisconnected('enqueue_failed')
}
}
const pushEventBestEffort = async (event: Record<string, any>) => {
try {
await pushEvent(event)
} catch (error) {
reqLogger.error('Failed to push event', {
eventType: event.type,
error: error instanceof Error ? error.message : String(error),
})
}
}
if (chatId) {
await pushEvent({ type: 'chat_id', chatId })
}
if (chatId && !currentChat?.title && isNewChat) {
requestChatTitle({ message, model: titleModel, provider: titleProvider, messageId })
.then(async (title) => {
if (title) {
await db.update(copilotChats).set({ title }).where(eq(copilotChats.id, chatId!))
await pushEvent({ type: 'title_updated', title })
if (workspaceId) {
taskPubSub?.publishStatusChanged({ workspaceId, chatId: chatId!, type: 'renamed' })
}
}
})
.catch((error) => {
reqLogger.error('Title generation failed', error)
})
}
const keepaliveInterval = setInterval(() => {
if (clientDisconnected) return
try {
controller.enqueue(encoder.encode(': keepalive\n\n'))
} catch {
markClientDisconnected('keepalive_failed')
}
}, 15_000)
try {
const result = await orchestrateCopilotStream(requestPayload, {
...orchestrateOptions,
executionId,
runId,
abortSignal: abortController.signal,
userStopSignal: userStopController.signal,
clientDisconnectedSignal: clientDisconnectedController.signal,
onEvent: async (event) => {
await pushEvent(event)
},
})
if (abortController.signal.aborted) {
reqLogger.info('Stream aborted by explicit stop')
await eventWriter.close().catch(() => {})
await setStreamMeta(streamId, { status: 'cancelled', userId, executionId, runId })
await updateRunStatus(runId, 'cancelled', { completedAt: new Date() }).catch(() => {})
return
}
if (!result.success) {
const errorMessage =
result.error ||
result.errors?.[0] ||
'An unexpected error occurred while processing the response.'
if (clientDisconnected) {
reqLogger.info('Stream failed after client disconnect', {
error: errorMessage,
})
}
reqLogger.error('Orchestration returned failure', {
error: errorMessage,
})
await pushEventBestEffort({
type: 'error',
error: errorMessage,
data: {
displayMessage: errorMessage,
},
})
await eventWriter.close()
await setStreamMeta(streamId, {
status: 'error',
userId,
executionId,
runId,
error: errorMessage,
})
await updateRunStatus(runId, 'error', {
completedAt: new Date(),
error: errorMessage,
}).catch(() => {})
return
}
await eventWriter.close()
await setStreamMeta(streamId, { status: 'complete', userId, executionId, runId })
await updateRunStatus(runId, 'complete', { completedAt: new Date() }).catch(() => {})
if (clientDisconnected) {
reqLogger.info('Orchestration completed after client disconnect', {
streamId,
runId,
})
}
} catch (error) {
if (abortController.signal.aborted) {
reqLogger.info('Stream aborted by explicit stop')
await eventWriter.close().catch(() => {})
await setStreamMeta(streamId, { status: 'cancelled', userId, executionId, runId })
await updateRunStatus(runId, 'cancelled', { completedAt: new Date() }).catch(() => {})
return
}
if (clientDisconnected) {
reqLogger.info('Stream errored after client disconnect', {
error: error instanceof Error ? error.message : 'Stream error',
})
}
reqLogger.error('Orchestration error', error)
const errorMessage = error instanceof Error ? error.message : 'Stream error'
await pushEventBestEffort({
type: 'error',
error: errorMessage,
data: {
displayMessage: 'An unexpected error occurred while processing the response.',
},
})
await eventWriter.close()
await setStreamMeta(streamId, {
status: 'error',
userId,
executionId,
runId,
error: errorMessage,
})
await updateRunStatus(runId, 'error', {
completedAt: new Date(),
error: errorMessage,
}).catch(() => {})
} finally {
reqLogger.info('Closing live SSE stream', {
streamId,
runId,
clientDisconnected,
aborted: abortController.signal.aborted,
})
clearInterval(keepaliveInterval)
if (abortPoller) {
clearInterval(abortPoller)
}
activeStreams.delete(streamId)
if (chatId) {
if (redis) {
await releaseLock(getChatStreamLockKey(chatId), streamId).catch(() => false)
}
resolvePendingChatStream(chatId, streamId)
}
if (redis) {
await redis.del(getStreamAbortKey(streamId)).catch(() => {})
}
try {
controller.close()
} catch {
// Controller already closed from cancel() — safe to ignore
}
}
},
cancel() {
reqLogger.info('ReadableStream cancel received from client', {
streamId,
runId,
})
if (!clientDisconnected) {
clientDisconnected = true
if (!clientDisconnectedController.signal.aborted) {
clientDisconnectedController.abort()
}
}
if (eventWriter) {
eventWriter.flush().catch(() => {})
}
},
})
}
export const SSE_RESPONSE_HEADERS = {
...SSE_HEADERS,
'Content-Encoding': 'none',
} as const

View File

@@ -0,0 +1,63 @@
/**
* @vitest-environment node
*/
import { describe, expect, it } from 'vitest'
import { toDisplayMessage } from './display-message'
describe('display-message', () => {
it('maps canonical tool, subagent text, and cancelled complete blocks to display blocks', () => {
const display = toDisplayMessage({
id: 'msg-1',
role: 'assistant',
content: 'done',
timestamp: '2024-01-01T00:00:00.000Z',
requestId: 'req-1',
contentBlocks: [
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'cancelled',
display: { title: 'Stopped by user' },
},
},
{
type: 'text',
lane: 'subagent',
channel: 'assistant',
content: 'subagent output',
},
{
type: 'complete',
status: 'cancelled',
},
],
})
expect(display.contentBlocks).toEqual([
{
type: 'tool_call',
toolCall: {
id: 'tool-1',
name: 'read',
status: 'cancelled',
displayTitle: 'Stopped by user',
phaseLabel: undefined,
params: undefined,
calledBy: undefined,
result: undefined,
},
},
{
type: 'subagent_text',
content: 'subagent output',
},
{
type: 'stopped',
},
])
})
})

View File

@@ -0,0 +1,118 @@
import {
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
MothershipStreamV1SpanLifecycleEvent,
MothershipStreamV1ToolOutcome,
} from '@/lib/copilot/generated/mothership-stream-v1'
import {
type ChatMessage,
type ChatMessageAttachment,
type ChatMessageContext,
type ContentBlock,
ContentBlockType,
type ToolCallInfo,
ToolCallStatus,
} from '@/app/workspace/[workspaceId]/home/types'
import type { PersistedContentBlock, PersistedMessage } from './persisted-message'
const STATE_TO_STATUS: Record<string, ToolCallStatus> = {
[MothershipStreamV1ToolOutcome.success]: ToolCallStatus.success,
[MothershipStreamV1ToolOutcome.error]: ToolCallStatus.error,
[MothershipStreamV1ToolOutcome.cancelled]: ToolCallStatus.cancelled,
[MothershipStreamV1ToolOutcome.rejected]: ToolCallStatus.error,
[MothershipStreamV1ToolOutcome.skipped]: ToolCallStatus.success,
pending: ToolCallStatus.executing,
executing: ToolCallStatus.executing,
}
function toToolCallInfo(block: PersistedContentBlock): ToolCallInfo | undefined {
const tc = block.toolCall
if (!tc) return undefined
const status: ToolCallStatus = STATE_TO_STATUS[tc.state] ?? ToolCallStatus.error
return {
id: tc.id,
name: tc.name,
status,
displayTitle: status === ToolCallStatus.cancelled ? 'Stopped by user' : tc.display?.title,
phaseLabel: tc.display?.phaseLabel,
params: tc.params,
calledBy: tc.calledBy,
result: tc.result,
}
}
function toDisplayBlock(block: PersistedContentBlock): ContentBlock {
switch (block.type) {
case MothershipStreamV1EventType.text:
if (block.lane === 'subagent') {
return { type: ContentBlockType.subagent_text, content: block.content }
}
return { type: ContentBlockType.text, content: block.content }
case MothershipStreamV1EventType.tool:
return { type: ContentBlockType.tool_call, toolCall: toToolCallInfo(block) }
case MothershipStreamV1EventType.span:
if (block.lifecycle === MothershipStreamV1SpanLifecycleEvent.end) {
return { type: ContentBlockType.subagent_end }
}
return { type: ContentBlockType.subagent, content: block.content }
case MothershipStreamV1EventType.complete:
if (block.status === MothershipStreamV1CompletionStatus.cancelled) {
return { type: ContentBlockType.stopped }
}
return { type: ContentBlockType.text, content: block.content }
default:
return { type: ContentBlockType.text, content: block.content }
}
}
function toDisplayAttachment(f: PersistedMessage['fileAttachments']): ChatMessageAttachment[] {
if (!f || f.length === 0) return []
return f.map((a) => ({
id: a.id,
filename: a.filename,
media_type: a.media_type,
size: a.size,
previewUrl: a.media_type.startsWith('image/')
? `/api/files/serve/${encodeURIComponent(a.key)}?context=mothership`
: undefined,
}))
}
function toDisplayContexts(
contexts: PersistedMessage['contexts']
): ChatMessageContext[] | undefined {
if (!contexts || contexts.length === 0) return undefined
return contexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId ? { workflowId: c.workflowId } : {}),
...(c.knowledgeId ? { knowledgeId: c.knowledgeId } : {}),
...(c.tableId ? { tableId: c.tableId } : {}),
...(c.fileId ? { fileId: c.fileId } : {}),
}))
}
export function toDisplayMessage(msg: PersistedMessage): ChatMessage {
const display: ChatMessage = {
id: msg.id,
role: msg.role,
content: msg.content,
}
if (msg.requestId) {
display.requestId = msg.requestId
}
if (msg.contentBlocks && msg.contentBlocks.length > 0) {
display.contentBlocks = msg.contentBlocks.map(toDisplayBlock)
}
const attachments = toDisplayAttachment(msg.fileAttachments)
if (attachments.length > 0) {
display.attachments = attachments
}
display.contexts = toDisplayContexts(msg.contexts)
return display
}

View File

@@ -17,10 +17,6 @@ vi.mock('@/lib/billing/core/subscription', () => ({
getUserSubscriptionState: vi.fn(),
}))
vi.mock('@/lib/copilot/chat-context', () => ({
processFileAttachments: vi.fn(),
}))
vi.mock('@/lib/core/config/feature-flags', () => ({
isHosted: false,
}))
@@ -45,6 +41,12 @@ vi.mock('@/tools/registry', () => ({
name: 'Brandfetch Search',
description: 'Search for brands by company name',
},
// Catalog marks run_workflow as client / clientExecutable; registry ToolConfig has no executor fields.
run_workflow: {
id: 'run_workflow',
name: 'Run Workflow',
description: 'Run a workflow from the client',
},
},
}))
@@ -58,7 +60,7 @@ vi.mock('@/tools/params', () => ({
}))
import { getUserSubscriptionState } from '@/lib/billing/core/subscription'
import { buildIntegrationToolSchemas } from '@/lib/copilot/chat-payload'
import { buildIntegrationToolSchemas } from './payload'
const mockedGetUserSubscriptionState = getUserSubscriptionState as unknown as {
mockResolvedValue: (value: unknown) => void
@@ -102,4 +104,15 @@ describe('buildIntegrationToolSchemas', () => {
expect(gmailTool?.description).toBe('Send emails using Gmail')
expect(brandfetchTool?.description).toBe('Search for brands by company name')
})
it('emits executeLocally for dynamic client tools only', async () => {
mockedGetUserSubscriptionState.mockResolvedValue({ isFree: false })
const toolSchemas = await buildIntegrationToolSchemas('user-client')
const gmailTool = toolSchemas.find((tool) => tool.name === 'gmail_send')
const runTool = toolSchemas.find((tool) => tool.name === 'run_workflow')
expect(gmailTool?.executeLocally).toBe(false)
expect(runTool?.executeLocally).toBe(true)
})
})

View File

@@ -1,6 +1,7 @@
import { createLogger } from '@sim/logger'
import { getUserSubscriptionState } from '@/lib/billing/core/subscription'
import { getCopilotToolDescription } from '@/lib/copilot/tool-descriptions'
import { getToolEntry } from '@/lib/copilot/tool-executor/router'
import { getCopilotToolDescription } from '@/lib/copilot/tools/descriptions'
import { isHosted } from '@/lib/core/config/feature-flags'
import { createMcpToolId } from '@/lib/mcp/utils'
import { trackChatUpload } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
@@ -10,7 +11,7 @@ import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils'
const logger = createLogger('CopilotChatPayload')
export interface BuildPayloadParams {
interface BuildPayloadParams {
message: string
workflowId?: string
workflowName?: string
@@ -60,16 +61,22 @@ export async function buildIntegrationToolSchemas(
const subscriptionState = await getUserSubscriptionState(userId)
shouldAppendEmailTagline = subscriptionState.isFree
} catch (error) {
reqLogger.warn('Failed to load subscription state for copilot tool descriptions', {
userId,
error: error instanceof Error ? error.message : String(error),
})
logger.warn(
messageId
? `Failed to load subscription state for copilot tool descriptions [messageId:${messageId}]`
: 'Failed to load subscription state for copilot tool descriptions',
{
userId,
error: error instanceof Error ? error.message : String(error),
}
)
}
for (const [toolId, toolConfig] of Object.entries(latestTools)) {
try {
const userSchema = createUserToolSchema(toolConfig)
const strippedName = stripVersionSuffix(toolId)
const catalogEntry = getToolEntry(strippedName)
integrationTools.push({
name: strippedName,
description: getCopilotToolDescription(toolConfig, {
@@ -79,6 +86,8 @@ export async function buildIntegrationToolSchemas(
}),
input_schema: userSchema as unknown as Record<string, unknown>,
defer_loading: true,
executeLocally:
catalogEntry?.clientExecutable === true || catalogEntry?.executor === 'client',
...(toolConfig.oauth?.required && {
oauth: {
required: true,
@@ -87,16 +96,26 @@ export async function buildIntegrationToolSchemas(
}),
})
} catch (toolError) {
reqLogger.warn('Failed to build schema for tool, skipping', {
toolId,
error: toolError instanceof Error ? toolError.message : String(toolError),
})
logger.warn(
messageId
? `Failed to build schema for tool, skipping [messageId:${messageId}]`
: 'Failed to build schema for tool, skipping',
{
toolId,
error: toolError instanceof Error ? toolError.message : String(toolError),
}
)
}
}
} catch (error) {
reqLogger.warn('Failed to build tool schemas', {
error: error instanceof Error ? error.message : String(error),
})
logger.warn(
messageId
? `Failed to build tool schemas [messageId:${messageId}]`
: 'Failed to build tool schemas',
{
error: error instanceof Error ? error.message : String(error),
}
)
}
return integrationTools
}
@@ -192,16 +211,27 @@ export async function buildCopilotRequestPayload(
description:
mcpTool.description || `MCP tool: ${mcpTool.name} (${mcpTool.serverName})`,
input_schema: mcpTool.inputSchema as unknown as Record<string, unknown>,
executeLocally: false,
})
}
if (mcpTools.length > 0) {
payloadLogger.info('Added MCP tools to copilot payload', { count: mcpTools.length })
logger.error(
userMessageId
? `Added MCP tools to copilot payload [messageId:${userMessageId}]`
: 'Added MCP tools to copilot payload',
{ count: mcpTools.length }
)
}
}
} catch (error) {
payloadLogger.warn('Failed to discover MCP tools for copilot', {
error: error instanceof Error ? error.message : String(error),
})
logger.warn(
userMessageId
? `Failed to discover MCP tools for copilot [messageId:${userMessageId}]`
: 'Failed to discover MCP tools for copilot',
{
error: error instanceof Error ? error.message : String(error),
}
)
}
}
}

View File

@@ -0,0 +1,122 @@
/**
* @vitest-environment node
*/
import { describe, expect, it } from 'vitest'
import type { OrchestratorResult } from '@/lib/copilot/request/types'
import {
buildPersistedAssistantMessage,
buildPersistedUserMessage,
normalizeMessage,
} from './persisted-message'
describe('persisted-message', () => {
it('round-trips canonical tool blocks through normalizeMessage', () => {
const result: OrchestratorResult = {
success: true,
content: 'done',
requestId: 'req-1',
contentBlocks: [
{
type: 'tool_call',
timestamp: Date.now(),
calledBy: 'build',
toolCall: {
id: 'tool-1',
name: 'read',
status: 'success',
params: { path: 'foo.txt' },
result: { success: true, output: { ok: true } },
},
},
],
toolCalls: [],
}
const persisted = buildPersistedAssistantMessage(result)
const normalized = normalizeMessage(persisted as unknown as Record<string, unknown>)
expect(normalized.contentBlocks).toEqual([
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'success',
params: { path: 'foo.txt' },
result: { success: true, output: { ok: true } },
calledBy: 'build',
},
},
{
type: 'text',
channel: 'assistant',
content: 'done',
},
])
})
it('normalizes legacy tool_call and top-level toolCalls shapes', () => {
const normalized = normalizeMessage({
id: 'msg-1',
role: 'assistant',
content: 'hello',
timestamp: '2024-01-01T00:00:00.000Z',
contentBlocks: [
{
type: 'tool_call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'cancelled',
display: { text: 'Stopped by user' },
},
},
],
toolCalls: [
{
id: 'tool-2',
name: 'glob',
status: 'success',
result: { matches: [] },
},
],
})
expect(normalized.contentBlocks).toEqual([
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'cancelled',
display: { title: 'Stopped by user' },
},
},
{
type: 'text',
channel: 'assistant',
content: 'hello',
},
])
})
it('builds normalized user messages with stripped optional empties', () => {
const msg = buildPersistedUserMessage({
id: 'user-1',
content: 'hello',
fileAttachments: [],
contexts: [],
})
expect(msg).toMatchObject({
id: 'user-1',
role: 'user',
content: 'hello',
})
expect(msg.fileAttachments).toBeUndefined()
expect(msg.contexts).toBeUndefined()
})
})

View File

@@ -0,0 +1,469 @@
import {
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
MothershipStreamV1SpanLifecycleEvent,
MothershipStreamV1SpanPayloadKind,
type MothershipStreamV1StreamScope,
MothershipStreamV1TextChannel,
MothershipStreamV1ToolOutcome,
MothershipStreamV1ToolPhase,
} from '@/lib/copilot/generated/mothership-stream-v1'
import type {
ContentBlock,
LocalToolCallStatus,
OrchestratorResult,
} from '@/lib/copilot/request/types'
export type PersistedToolState = LocalToolCallStatus | MothershipStreamV1ToolOutcome
export interface PersistedToolCall {
id: string
name: string
state: PersistedToolState
params?: Record<string, unknown>
result?: { success: boolean; output?: unknown; error?: string }
error?: string
calledBy?: string
durationMs?: number
display?: { title?: string; phaseLabel?: string }
}
export interface PersistedContentBlock {
type: MothershipStreamV1EventType
lane?: MothershipStreamV1StreamScope['lane']
channel?: MothershipStreamV1TextChannel
phase?: MothershipStreamV1ToolPhase
kind?: MothershipStreamV1SpanPayloadKind
lifecycle?: MothershipStreamV1SpanLifecycleEvent
status?: MothershipStreamV1CompletionStatus
content?: string
toolCall?: PersistedToolCall
}
export interface PersistedFileAttachment {
id: string
key: string
filename: string
media_type: string
size: number
}
export interface PersistedMessageContext {
kind: string
label: string
workflowId?: string
knowledgeId?: string
tableId?: string
fileId?: string
}
export interface PersistedMessage {
id: string
role: 'user' | 'assistant'
content: string
timestamp: string
requestId?: string
contentBlocks?: PersistedContentBlock[]
fileAttachments?: PersistedFileAttachment[]
contexts?: PersistedMessageContext[]
}
// ---------------------------------------------------------------------------
// Write: OrchestratorResult → PersistedMessage
// ---------------------------------------------------------------------------
function resolveToolState(block: ContentBlock): PersistedToolState {
const tc = block.toolCall
if (!tc) return 'pending'
if (tc.result?.success !== undefined) {
return tc.result.success
? MothershipStreamV1ToolOutcome.success
: MothershipStreamV1ToolOutcome.error
}
return tc.status as PersistedToolState
}
function mapContentBlock(block: ContentBlock): PersistedContentBlock {
switch (block.type) {
case 'text':
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
case 'thinking':
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.thinking,
content: block.content,
}
case 'subagent':
return {
type: MothershipStreamV1EventType.span,
kind: MothershipStreamV1SpanPayloadKind.subagent,
lifecycle: MothershipStreamV1SpanLifecycleEvent.start,
content: block.content,
}
case 'subagent_text':
return {
type: MothershipStreamV1EventType.text,
lane: 'subagent',
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
case 'tool_call': {
if (!block.toolCall) {
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
content: block.content,
}
}
const state = resolveToolState(block)
const isSubagentTool = !!block.calledBy
const isNonTerminal =
state === MothershipStreamV1ToolOutcome.cancelled ||
state === 'pending' ||
state === 'executing'
const toolCall: PersistedToolCall = {
id: block.toolCall.id,
name: block.toolCall.name,
state,
...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }),
...(isSubagentTool && isNonTerminal
? {}
: block.toolCall.params
? { params: block.toolCall.params }
: {}),
...(block.calledBy ? { calledBy: block.calledBy } : {}),
}
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
toolCall,
}
}
default:
return { type: MothershipStreamV1EventType.text, content: block.content }
}
}
export function buildPersistedAssistantMessage(
result: OrchestratorResult,
requestId?: string
): PersistedMessage {
const message: PersistedMessage = {
id: crypto.randomUUID(),
role: 'assistant',
content: result.content,
timestamp: new Date().toISOString(),
}
if (requestId || result.requestId) {
message.requestId = requestId || result.requestId
}
if (result.contentBlocks.length > 0) {
message.contentBlocks = result.contentBlocks.map(mapContentBlock)
}
return message
}
export interface UserMessageParams {
id: string
content: string
fileAttachments?: PersistedFileAttachment[]
contexts?: PersistedMessageContext[]
}
export function buildPersistedUserMessage(params: UserMessageParams): PersistedMessage {
const message: PersistedMessage = {
id: params.id,
role: 'user',
content: params.content,
timestamp: new Date().toISOString(),
}
if (params.fileAttachments && params.fileAttachments.length > 0) {
message.fileAttachments = params.fileAttachments
}
if (params.contexts && params.contexts.length > 0) {
message.contexts = params.contexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId ? { workflowId: c.workflowId } : {}),
...(c.knowledgeId ? { knowledgeId: c.knowledgeId } : {}),
...(c.tableId ? { tableId: c.tableId } : {}),
...(c.fileId ? { fileId: c.fileId } : {}),
}))
}
return message
}
// ---------------------------------------------------------------------------
// Read: raw JSONB → PersistedMessage
// Handles both canonical (type: 'tool', 'text', 'span', 'complete') and
// legacy (type: 'tool_call', 'thinking', 'subagent', 'stopped') blocks.
// ---------------------------------------------------------------------------
const CANONICAL_BLOCK_TYPES: Set<string> = new Set(Object.values(MothershipStreamV1EventType))
interface RawBlock {
type: string
lane?: string
content?: string
channel?: string
phase?: string
kind?: string
lifecycle?: string
status?: string
toolCall?: {
id?: string
name?: string
state?: string
params?: Record<string, unknown>
result?: { success: boolean; output?: unknown; error?: string }
display?: { text?: string; title?: string; phaseLabel?: string }
calledBy?: string
durationMs?: number
error?: string
} | null
}
interface LegacyToolCall {
id: string
name: string
status: string
params?: Record<string, unknown>
result?: unknown
error?: string
durationMs?: number
}
const OUTCOME_NORMALIZATION: Record<string, PersistedToolState> = {
[MothershipStreamV1ToolOutcome.success]: MothershipStreamV1ToolOutcome.success,
[MothershipStreamV1ToolOutcome.error]: MothershipStreamV1ToolOutcome.error,
[MothershipStreamV1ToolOutcome.cancelled]: MothershipStreamV1ToolOutcome.cancelled,
[MothershipStreamV1ToolOutcome.skipped]: MothershipStreamV1ToolOutcome.skipped,
[MothershipStreamV1ToolOutcome.rejected]: MothershipStreamV1ToolOutcome.rejected,
pending: 'pending',
executing: 'executing',
}
function normalizeToolState(state: string | undefined): PersistedToolState {
if (!state) return 'pending'
return OUTCOME_NORMALIZATION[state] ?? MothershipStreamV1ToolOutcome.error
}
function isCanonicalBlock(block: RawBlock): boolean {
return CANONICAL_BLOCK_TYPES.has(block.type)
}
function normalizeCanonicalBlock(block: RawBlock): PersistedContentBlock {
const result: PersistedContentBlock = {
type: block.type as MothershipStreamV1EventType,
}
if (block.lane === 'main' || block.lane === 'subagent') {
result.lane = block.lane
}
if (block.content !== undefined) result.content = block.content
if (block.channel) result.channel = block.channel as MothershipStreamV1TextChannel
if (block.phase) result.phase = block.phase as MothershipStreamV1ToolPhase
if (block.kind) result.kind = block.kind as MothershipStreamV1SpanPayloadKind
if (block.lifecycle) result.lifecycle = block.lifecycle as MothershipStreamV1SpanLifecycleEvent
if (block.status) result.status = block.status as MothershipStreamV1CompletionStatus
if (block.toolCall) {
result.toolCall = {
id: block.toolCall.id ?? '',
name: block.toolCall.name ?? '',
state: normalizeToolState(block.toolCall.state),
...(block.toolCall.params ? { params: block.toolCall.params } : {}),
...(block.toolCall.result ? { result: block.toolCall.result } : {}),
...(block.toolCall.calledBy ? { calledBy: block.toolCall.calledBy } : {}),
...(block.toolCall.error ? { error: block.toolCall.error } : {}),
...(block.toolCall.durationMs ? { durationMs: block.toolCall.durationMs } : {}),
...(block.toolCall.display
? {
display: {
title: block.toolCall.display.title ?? block.toolCall.display.text,
phaseLabel: block.toolCall.display.phaseLabel,
},
}
: {}),
}
}
return result
}
function normalizeLegacyBlock(block: RawBlock): PersistedContentBlock {
if (block.type === 'tool_call' && block.toolCall) {
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
toolCall: {
id: block.toolCall.id ?? '',
name: block.toolCall.name ?? '',
state: normalizeToolState(block.toolCall.state),
...(block.toolCall.params ? { params: block.toolCall.params } : {}),
...(block.toolCall.result ? { result: block.toolCall.result } : {}),
...(block.toolCall.calledBy ? { calledBy: block.toolCall.calledBy } : {}),
...(block.toolCall.display ? { display: { title: block.toolCall.display.text } } : {}),
},
}
}
if (block.type === 'thinking') {
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.thinking,
content: block.content,
}
}
if (block.type === 'subagent' || block.type === 'subagent_text') {
if (block.type === 'subagent_text') {
return {
type: MothershipStreamV1EventType.text,
lane: 'subagent',
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
}
return {
type: MothershipStreamV1EventType.span,
kind: MothershipStreamV1SpanPayloadKind.subagent,
lifecycle: MothershipStreamV1SpanLifecycleEvent.start,
content: block.content,
}
}
if (block.type === 'subagent_end') {
return {
type: MothershipStreamV1EventType.span,
kind: MothershipStreamV1SpanPayloadKind.subagent,
lifecycle: MothershipStreamV1SpanLifecycleEvent.end,
}
}
if (block.type === 'stopped') {
return {
type: MothershipStreamV1EventType.complete,
status: MothershipStreamV1CompletionStatus.cancelled,
}
}
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
}
function normalizeBlock(block: RawBlock): PersistedContentBlock {
return isCanonicalBlock(block) ? normalizeCanonicalBlock(block) : normalizeLegacyBlock(block)
}
function normalizeLegacyToolCall(tc: LegacyToolCall): PersistedContentBlock {
const state = normalizeToolState(tc.status)
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
toolCall: {
id: tc.id,
name: tc.name,
state,
...(tc.params ? { params: tc.params } : {}),
...(tc.result != null
? {
result: {
success: tc.status === MothershipStreamV1ToolOutcome.success,
output: tc.result,
...(tc.error ? { error: tc.error } : {}),
},
}
: {}),
...(tc.durationMs ? { durationMs: tc.durationMs } : {}),
},
}
}
function blocksContainTools(blocks: RawBlock[]): boolean {
return blocks.some((b) => b.type === 'tool_call' || b.type === MothershipStreamV1EventType.tool)
}
function normalizeBlocks(rawBlocks: RawBlock[], messageContent: string): PersistedContentBlock[] {
const blocks = rawBlocks.map(normalizeBlock)
const hasAssistantText = blocks.some(
(b) =>
b.type === MothershipStreamV1EventType.text &&
b.channel !== MothershipStreamV1TextChannel.thinking &&
b.content?.trim()
)
if (!hasAssistantText && messageContent.trim()) {
blocks.push({
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: messageContent,
})
}
return blocks
}
export function normalizeMessage(raw: Record<string, unknown>): PersistedMessage {
const msg: PersistedMessage = {
id: (raw.id as string) ?? crypto.randomUUID(),
role: (raw.role as 'user' | 'assistant') ?? 'assistant',
content: (raw.content as string) ?? '',
timestamp: (raw.timestamp as string) ?? new Date().toISOString(),
}
if (raw.requestId && typeof raw.requestId === 'string') {
msg.requestId = raw.requestId
}
const rawBlocks = raw.contentBlocks as RawBlock[] | undefined
const rawToolCalls = raw.toolCalls as LegacyToolCall[] | undefined
const hasBlocks = Array.isArray(rawBlocks) && rawBlocks.length > 0
const hasToolCalls = Array.isArray(rawToolCalls) && rawToolCalls.length > 0
if (hasBlocks) {
msg.contentBlocks = normalizeBlocks(rawBlocks!, msg.content)
const contentBlocksAlreadyContainTools = blocksContainTools(rawBlocks!)
if (hasToolCalls && !contentBlocksAlreadyContainTools) {
msg.contentBlocks.push(...rawToolCalls!.map(normalizeLegacyToolCall))
}
} else if (hasToolCalls) {
msg.contentBlocks = rawToolCalls!.map(normalizeLegacyToolCall)
if (msg.content.trim()) {
msg.contentBlocks.push({
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: msg.content,
})
}
}
const rawAttachments = raw.fileAttachments as PersistedFileAttachment[] | undefined
if (Array.isArray(rawAttachments) && rawAttachments.length > 0) {
msg.fileAttachments = rawAttachments
}
const rawContexts = raw.contexts as PersistedMessageContext[] | undefined
if (Array.isArray(rawContexts) && rawContexts.length > 0) {
msg.contexts = rawContexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId ? { workflowId: c.workflowId } : {}),
...(c.knowledgeId ? { knowledgeId: c.knowledgeId } : {}),
...(c.tableId ? { tableId: c.tableId } : {}),
...(c.fileId ? { fileId: c.fileId } : {}),
}))
}
return msg
}

View File

@@ -19,9 +19,9 @@ import { checkKnowledgeBaseAccess } from '@/app/api/knowledge/utils'
import { isHiddenFromDisplay } from '@/blocks/types'
import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check'
import { escapeRegExp } from '@/executor/constants'
import type { ChatContext } from '@/stores/panel/copilot/types'
import type { ChatContext } from '@/stores/panel'
export type AgentContextType =
type AgentContextType =
| 'past_chat'
| 'workflow'
| 'current_workflow'
@@ -35,7 +35,7 @@ export type AgentContextType =
| 'docs'
| 'active_resource'
export interface AgentContext {
interface AgentContext {
type: AgentContextType
tag: string
content: string
@@ -43,62 +43,6 @@ export interface AgentContext {
const logger = createLogger('ProcessContents')
export async function processContexts(
contexts: ChatContext[] | undefined
): Promise<AgentContext[]> {
if (!Array.isArray(contexts) || contexts.length === 0) return []
const tasks = contexts.map(async (ctx) => {
try {
if (ctx.kind === 'past_chat') {
return await processPastChatViaApi(ctx.chatId, ctx.label ? `@${ctx.label}` : '@')
}
if ((ctx.kind === 'workflow' || ctx.kind === 'current_workflow') && ctx.workflowId) {
return await processWorkflowFromDb(
ctx.workflowId,
undefined,
ctx.label ? `@${ctx.label}` : '@',
ctx.kind
)
}
if (ctx.kind === 'knowledge' && ctx.knowledgeId) {
return await processKnowledgeFromDb(
ctx.knowledgeId,
undefined,
ctx.label ? `@${ctx.label}` : '@'
)
}
if (ctx.kind === 'blocks' && ctx.blockIds?.length > 0) {
return await processBlockMetadata(ctx.blockIds[0], ctx.label ? `@${ctx.label}` : '@')
}
if (ctx.kind === 'templates' && ctx.templateId) {
return await processTemplateFromDb(
ctx.templateId,
undefined,
ctx.label ? `@${ctx.label}` : '@'
)
}
if (ctx.kind === 'logs' && ctx.executionId) {
return await processExecutionLogFromDb(
ctx.executionId,
undefined,
ctx.label ? `@${ctx.label}` : '@'
)
}
if (ctx.kind === 'workflow_block' && ctx.workflowId && ctx.blockId) {
return await processWorkflowBlockFromDb(ctx.workflowId, undefined, ctx.blockId, ctx.label)
}
// Other kinds can be added here: workflow, blocks, logs, knowledge, templates, docs
return null
} catch (error) {
logger.error('Failed processing context', { ctx, error })
return null
}
})
const results = await Promise.all(tasks)
return results.filter((r): r is AgentContext => !!r) as AgentContext[]
}
// Server-side variant (recommended for use in API routes)
export async function processContextsServer(
contexts: ChatContext[] | undefined,
@@ -265,7 +209,7 @@ async function processPastChatFromDb(
currentWorkspaceId?: string
): Promise<AgentContext | null> {
try {
const { getAccessibleCopilotChat } = await import('@/lib/copilot/chat-lifecycle')
const { getAccessibleCopilotChat } = await import('./lifecycle')
const chat = await getAccessibleCopilotChat(chatId, userId)
if (!chat) {
return null

View File

@@ -1,48 +0,0 @@
import type {
ChatContext,
CopilotToolCall,
SubAgentContentBlock,
} from '@/stores/panel/copilot/types'
/**
* A content block used in copilot messages and during streaming.
* Uses a literal type union for `type` to stay compatible with CopilotMessage.
*/
export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'contexts'
export interface ClientContentBlock {
type: ContentBlockType
content?: string
timestamp: number
toolCall?: CopilotToolCall | null
startTime?: number
duration?: number
contexts?: ChatContext[]
}
export interface StreamingContext {
messageId: string
requestId?: string
accumulatedContent: string
contentBlocks: ClientContentBlock[]
currentTextBlock: ClientContentBlock | null
isInThinkingBlock: boolean
currentThinkingBlock: ClientContentBlock | null
isInDesignWorkflowBlock: boolean
designWorkflowContent: string
pendingContent: string
newChatId?: string
doneEventCount: number
streamComplete?: boolean
wasAborted?: boolean
suppressContinueOption?: boolean
subAgentParentToolCallId?: string
subAgentParentStack: string[]
subAgentContent: Record<string, string>
subAgentToolCalls: Record<string, CopilotToolCall[]>
subAgentBlocks: Record<string, SubAgentContentBlock[]>
suppressStreamingUpdates?: boolean
activeCompactionId?: string
}
export type ClientStreamingContext = StreamingContext

View File

@@ -10,13 +10,6 @@ export const SIM_AGENT_API_URL =
? rawAgentUrl
: SIM_AGENT_API_URL_DEFAULT
// ---------------------------------------------------------------------------
// Redis key prefixes
// ---------------------------------------------------------------------------
/** Redis key prefix for copilot SSE stream buffers. */
export const REDIS_COPILOT_STREAM_PREFIX = 'copilot_stream:'
// ---------------------------------------------------------------------------
// Timeouts
// ---------------------------------------------------------------------------
@@ -31,29 +24,9 @@ export const STREAM_TIMEOUT_MS = 3_600_000
// Stream resume
// ---------------------------------------------------------------------------
/** Maximum number of resume attempts before giving up. */
export const MAX_RESUME_ATTEMPTS = 3
/** SessionStorage key for persisting active stream metadata across page reloads. */
export const STREAM_STORAGE_KEY = 'copilot_active_stream'
// ---------------------------------------------------------------------------
// Client-side streaming batching
// ---------------------------------------------------------------------------
/** Delay (ms) before processing the next queued message after stream completion. */
export const QUEUE_PROCESS_DELAY_MS = 100
/** Delay (ms) before invalidating subscription queries after stream completion. */
export const SUBSCRIPTION_INVALIDATE_DELAY_MS = 1_000
// ---------------------------------------------------------------------------
// UI helpers
// ---------------------------------------------------------------------------
/** Maximum character length for an optimistic chat title derived from a user message. */
export const OPTIMISTIC_TITLE_MAX_LENGTH = 50
// ---------------------------------------------------------------------------
// Copilot API paths (client-side fetch targets)
// ---------------------------------------------------------------------------
@@ -64,39 +37,23 @@ export const COPILOT_CHAT_API_PATH = '/api/copilot/chat'
/** POST — send a workspace-scoped chat message (mothership). */
export const MOTHERSHIP_CHAT_API_PATH = '/api/mothership/chat'
/** GET — resume/replay a copilot SSE stream. */
export const COPILOT_CHAT_STREAM_API_PATH = '/api/copilot/chat/stream'
/** POST — persist chat messages / plan artifact / config. */
export const COPILOT_UPDATE_MESSAGES_API_PATH = '/api/copilot/chat/update-messages'
/** DELETE — delete a copilot chat. */
export const COPILOT_DELETE_CHAT_API_PATH = '/api/copilot/chat/delete'
/** POST — confirm or reject a tool call. */
export const COPILOT_CONFIRM_API_PATH = '/api/copilot/confirm'
/** POST — forward diff-accepted/rejected stats to the copilot backend. */
export const COPILOT_STATS_API_PATH = '/api/copilot/stats'
/** GET — load checkpoints for a chat. */
export const COPILOT_CHECKPOINTS_API_PATH = '/api/copilot/checkpoints'
/** POST — revert to a checkpoint. */
export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert'
/** GET/POST/DELETE — manage auto-allowed tools. */
export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools'
/** GET — fetch dynamically available copilot models. */
export const COPILOT_MODELS_API_PATH = '/api/copilot/models'
/** GET — fetch user credentials for masking. */
export const COPILOT_CREDENTIALS_API_PATH = '/api/copilot/credentials'
// ---------------------------------------------------------------------------
// Dedup limits
// ---------------------------------------------------------------------------
/** Maximum entries in the in-memory SSE tool-event dedup cache. */
export const STREAM_BUFFER_MAX_DEDUP_ENTRIES = 1_000
// ---------------------------------------------------------------------------
// Copilot modes
// ---------------------------------------------------------------------------
export const COPILOT_MODES = ['ask', 'build', 'plan'] as const
export const COPILOT_REQUEST_MODES = ['ask', 'build', 'plan', 'agent'] as const

View File

@@ -0,0 +1,295 @@
// AUTO-GENERATED FILE. DO NOT EDIT.
//
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1EventType".
*/
export type MothershipStreamV1EventType =
| 'session'
| 'text'
| 'tool'
| 'span'
| 'resource'
| 'run'
| 'error'
| 'complete'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1AsyncToolRecordStatus".
*/
export type MothershipStreamV1AsyncToolRecordStatus =
| 'pending'
| 'running'
| 'completed'
| 'failed'
| 'cancelled'
| 'delivered'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1CompletionStatus".
*/
export type MothershipStreamV1CompletionStatus = 'complete' | 'error' | 'cancelled'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ResourceOp".
*/
export type MothershipStreamV1ResourceOp = 'upsert' | 'remove'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1RunKind".
*/
export type MothershipStreamV1RunKind =
| 'checkpoint_pause'
| 'resumed'
| 'compaction_start'
| 'compaction_done'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1SessionKind".
*/
export type MothershipStreamV1SessionKind = 'trace' | 'chat' | 'title' | 'start'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1SpanKind".
*/
export type MothershipStreamV1SpanKind = 'subagent'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1SpanLifecycleEvent".
*/
export type MothershipStreamV1SpanLifecycleEvent = 'start' | 'end'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1SpanPayloadKind".
*/
export type MothershipStreamV1SpanPayloadKind = 'subagent' | 'structured_result' | 'subagent_result'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1TextChannel".
*/
export type MothershipStreamV1TextChannel = 'assistant' | 'thinking'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolExecutor".
*/
export type MothershipStreamV1ToolExecutor = 'go' | 'sim' | 'client'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolMode".
*/
export type MothershipStreamV1ToolMode = 'sync' | 'async'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolPhase".
*/
export type MothershipStreamV1ToolPhase = 'call' | 'args_delta' | 'result'
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolOutcome".
*/
export type MothershipStreamV1ToolOutcome =
| 'success'
| 'error'
| 'cancelled'
| 'skipped'
| 'rejected'
/**
* Shared execution-oriented mothership stream contract from Go to Sim.
*/
export interface MothershipStreamV1EventEnvelope {
payload: MothershipStreamV1AdditionalPropertiesMap
scope?: MothershipStreamV1StreamScope
seq: number
stream: MothershipStreamV1StreamRef
trace?: MothershipStreamV1Trace
ts: string
type: MothershipStreamV1EventType
v: number
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1AdditionalPropertiesMap".
*/
export interface MothershipStreamV1AdditionalPropertiesMap {
[k: string]: unknown
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1StreamScope".
*/
export interface MothershipStreamV1StreamScope {
agentId?: string
lane: 'main' | 'subagent'
parentToolCallId?: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1StreamRef".
*/
export interface MothershipStreamV1StreamRef {
chatId?: string
cursor?: string
streamId: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1Trace".
*/
export interface MothershipStreamV1Trace {
requestId: string
spanId?: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1CheckpointPausePayload".
*/
export interface MothershipStreamV1CheckpointPausePayload {
checkpointId: string
executionId: string
pendingToolCallIds: string[]
runId: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ResumeRequest".
*/
export interface MothershipStreamV1ResumeRequest {
checkpointId: string
results: MothershipStreamV1ResumeToolResult[]
streamId: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ResumeToolResult".
*/
export interface MothershipStreamV1ResumeToolResult {
error?: string
output?: unknown
success: boolean
toolCallId: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1StreamCursor".
*/
export interface MothershipStreamV1StreamCursor {
cursor: string
seq: number
streamId: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolCallDescriptor".
*/
export interface MothershipStreamV1ToolCallDescriptor {
arguments?: MothershipStreamV1AdditionalPropertiesMap
argumentsDelta?: string
executor: MothershipStreamV1ToolExecutor
mode: MothershipStreamV1ToolMode
partial?: boolean
phase: MothershipStreamV1ToolPhase
requiresConfirmation?: boolean
toolCallId: string
toolName: string
}
/**
* This interface was referenced by `MothershipStreamV1EventEnvelope`'s JSON-Schema
* via the `definition` "MothershipStreamV1ToolResultPayload".
*/
export interface MothershipStreamV1ToolResultPayload {
error?: string
output?: unknown
success: boolean
}
export const MothershipStreamV1AsyncToolRecordStatus = {
pending: 'pending',
running: 'running',
completed: 'completed',
failed: 'failed',
cancelled: 'cancelled',
delivered: 'delivered',
} as const
export const MothershipStreamV1CompletionStatus = {
complete: 'complete',
error: 'error',
cancelled: 'cancelled',
} as const
export const MothershipStreamV1EventType = {
session: 'session',
text: 'text',
tool: 'tool',
span: 'span',
resource: 'resource',
run: 'run',
error: 'error',
complete: 'complete',
} as const
export const MothershipStreamV1ResourceOp = {
upsert: 'upsert',
remove: 'remove',
} as const
export const MothershipStreamV1RunKind = {
checkpoint_pause: 'checkpoint_pause',
resumed: 'resumed',
compaction_start: 'compaction_start',
compaction_done: 'compaction_done',
} as const
export const MothershipStreamV1SessionKind = {
trace: 'trace',
chat: 'chat',
title: 'title',
start: 'start',
} as const
export const MothershipStreamV1SpanKind = {
subagent: 'subagent',
} as const
export const MothershipStreamV1SpanLifecycleEvent = {
start: 'start',
end: 'end',
} as const
export const MothershipStreamV1SpanPayloadKind = {
subagent: 'subagent',
structured_result: 'structured_result',
subagent_result: 'subagent_result',
} as const
export const MothershipStreamV1TextChannel = {
assistant: 'assistant',
thinking: 'thinking',
} as const
export const MothershipStreamV1ToolExecutor = {
go: 'go',
sim: 'sim',
client: 'client',
} as const
export const MothershipStreamV1ToolMode = {
sync: 'sync',
async: 'async',
} as const
export const MothershipStreamV1ToolOutcome = {
success: 'success',
error: 'error',
cancelled: 'cancelled',
skipped: 'skipped',
rejected: 'rejected',
} as const
export const MothershipStreamV1ToolPhase = {
call: 'call',
args_delta: 'args_delta',
result: 'result',
} as const

View File

@@ -0,0 +1,136 @@
// AUTO-GENERATED FILE. DO NOT EDIT.
//
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1Outcome".
*/
export type RequestTraceV1Outcome = 'success' | 'error' | 'cancelled'
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1SpanSource".
*/
export type RequestTraceV1SpanSource = 'sim' | 'go'
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1SpanStatus".
*/
export type RequestTraceV1SpanStatus = 'ok' | 'error' | 'cancelled'
/**
* Trace report sent from Sim to Go after a request completes.
*/
export interface RequestTraceV1SimReport {
chatId?: string
cost?: RequestTraceV1CostSummary
durationMs: number
endMs: number
executionId?: string
goTraceId?: string
outcome: RequestTraceV1Outcome
runId?: string
simRequestId: string
spans: RequestTraceV1Span[]
startMs: number
streamId?: string
usage?: RequestTraceV1UsageSummary
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1CostSummary".
*/
export interface RequestTraceV1CostSummary {
billedTotalCost?: number
rawTotalCost?: number
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1Span".
*/
export interface RequestTraceV1Span {
attributes?: MothershipStreamV1AdditionalPropertiesMap
durationMs?: number
endMs?: number
kind?: string
name: string
parentName?: string
source?: RequestTraceV1SpanSource
startMs: number
status: RequestTraceV1SpanStatus
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "MothershipStreamV1AdditionalPropertiesMap".
*/
export interface MothershipStreamV1AdditionalPropertiesMap {
[k: string]: unknown
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1UsageSummary".
*/
export interface RequestTraceV1UsageSummary {
cacheReadTokens?: number
cacheWriteTokens?: number
inputTokens?: number
outputTokens?: number
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1MergedTrace".
*/
export interface RequestTraceV1MergedTrace {
chatId?: string
cost?: RequestTraceV1CostSummary
durationMs: number
endMs: number
executionId?: string
goTraceId: string
model?: string
outcome: RequestTraceV1Outcome
provider?: string
runId?: string
serviceCharges?: MothershipStreamV1AdditionalPropertiesMap
simRequestId?: string
spans: RequestTraceV1Span[]
startMs: number
streamId?: string
usage?: RequestTraceV1UsageSummary
userId?: string
}
/**
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1SimReport".
*/
export interface RequestTraceV1SimReport1 {
chatId?: string
cost?: RequestTraceV1CostSummary
durationMs: number
endMs: number
executionId?: string
goTraceId?: string
outcome: RequestTraceV1Outcome
runId?: string
simRequestId: string
spans: RequestTraceV1Span[]
startMs: number
streamId?: string
usage?: RequestTraceV1UsageSummary
}
export const RequestTraceV1Outcome = {
success: 'success',
error: 'error',
cancelled: 'cancelled',
} as const
export const RequestTraceV1SpanSource = {
sim: 'sim',
go: 'go',
} as const
export const RequestTraceV1SpanStatus = {
ok: 'ok',
error: 'error',
cancelled: 'cancelled',
} as const

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +0,0 @@
export interface CopilotLogContext {
requestId?: string
messageId?: string
}
/**
* Appends copilot request identifiers to a log message.
*/
export function appendCopilotLogContext(message: string, context: CopilotLogContext = {}): string {
const suffixParts: string[] = []
if (context.requestId) {
suffixParts.push(`requestId:${context.requestId}`)
}
if (context.messageId) {
suffixParts.push(`messageId:${context.messageId}`)
}
if (suffixParts.length === 0) {
return message
}
return `${message} [${suffixParts.join(' ')}]`
}

View File

@@ -1,10 +0,0 @@
export type CopilotModelId = string
export const COPILOT_MODES = ['ask', 'build', 'plan'] as const
export type CopilotMode = (typeof COPILOT_MODES)[number]
export const COPILOT_TRANSPORT_MODES = ['ask', 'agent', 'plan'] as const
export type CopilotTransportMode = (typeof COPILOT_TRANSPORT_MODES)[number]
export const COPILOT_REQUEST_MODES = ['ask', 'build', 'plan', 'agent'] as const
export type CopilotRequestMode = (typeof COPILOT_REQUEST_MODES)[number]

View File

@@ -1,570 +0,0 @@
import { createLogger } from '@sim/logger'
import {
ASYNC_TOOL_STATUS,
inferDeliveredAsyncSuccess,
isDeliveredAsyncStatus,
isTerminalAsyncStatus,
} from '@/lib/copilot/async-runs/lifecycle'
import {
claimCompletedAsyncToolCall,
getAsyncToolCall,
getAsyncToolCalls,
markAsyncToolDelivered,
releaseCompletedAsyncToolClaim,
updateRunStatus,
} from '@/lib/copilot/async-runs/repository'
import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from '@/lib/copilot/constants'
import {
isToolAvailableOnSimSide,
prepareExecutionContext,
} from '@/lib/copilot/orchestrator/tool-executor'
import {
type ExecutionContext,
isTerminalToolCallStatus,
type OrchestratorOptions,
type OrchestratorResult,
type SSEEvent,
type ToolCallState,
} from '@/lib/copilot/orchestrator/types'
import { env } from '@/lib/core/config/env'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { buildToolCallSummaries, createStreamingContext, runStreamLoop } from './stream/core'
const logger = createLogger('CopilotOrchestrator')
function didAsyncToolSucceed(input: {
durableStatus?: string | null
durableResult?: Record<string, unknown>
durableError?: string | null
toolStateStatus?: string | undefined
}) {
const { durableStatus, durableResult, durableError, toolStateStatus } = input
if (durableStatus === ASYNC_TOOL_STATUS.completed) {
return true
}
if (durableStatus === ASYNC_TOOL_STATUS.failed || durableStatus === ASYNC_TOOL_STATUS.cancelled) {
return false
}
if (durableStatus === ASYNC_TOOL_STATUS.delivered) {
return inferDeliveredAsyncSuccess({
result: durableResult,
error: durableError,
})
}
if (toolStateStatus === 'success') return true
if (toolStateStatus === 'error' || toolStateStatus === 'cancelled') return false
return false
}
interface ReadyContinuationTool {
toolCallId: string
toolState?: ToolCallState
durableRow?: Awaited<ReturnType<typeof getAsyncToolCall>>
needsDurableClaim: boolean
alreadyClaimedByWorker: boolean
}
export interface OrchestrateStreamOptions extends OrchestratorOptions {
userId: string
workflowId?: string
workspaceId?: string
chatId?: string
executionId?: string
runId?: string
/** Go-side route to proxy to. Defaults to '/api/copilot'. */
goRoute?: string
}
export async function orchestrateCopilotStream(
requestPayload: Record<string, unknown>,
options: OrchestrateStreamOptions
): Promise<OrchestratorResult> {
const {
userId,
workflowId,
workspaceId,
chatId,
executionId,
runId,
goRoute = '/api/copilot',
} = options
const userTimezone =
typeof requestPayload?.userTimezone === 'string' ? requestPayload.userTimezone : undefined
let execContext: ExecutionContext
if (workflowId) {
execContext = await prepareExecutionContext(userId, workflowId, chatId)
} else {
const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId)
execContext = {
userId,
workflowId: '',
workspaceId,
chatId,
decryptedEnvVars,
}
}
if (userTimezone) {
execContext.userTimezone = userTimezone
}
execContext.executionId = executionId
execContext.runId = runId
execContext.abortSignal = options.abortSignal
execContext.userStopSignal = options.userStopSignal
const payloadMsgId = requestPayload?.messageId
const messageId = typeof payloadMsgId === 'string' ? payloadMsgId : crypto.randomUUID()
execContext.messageId = messageId
const context = createStreamingContext({
chatId,
executionId,
runId,
messageId,
})
const continuationWorkerId = `sim-resume:${crypto.randomUUID()}`
const reqLogger = logger.withMetadata({ requestId: context.requestId, messageId })
let claimedToolCallIds: string[] = []
let claimedByWorkerId: string | null = null
reqLogger.info('Starting copilot orchestration', {
goRoute,
workflowId,
workspaceId,
chatId,
executionId,
runId,
hasUserTimezone: Boolean(userTimezone),
})
try {
let route = goRoute
let payload = requestPayload
const callerOnEvent = options.onEvent
for (;;) {
context.streamComplete = false
reqLogger.info('Starting orchestration loop iteration', {
route,
hasPendingAsyncContinuation: Boolean(context.awaitingAsyncContinuation),
claimedToolCallCount: claimedToolCallIds.length,
})
const loopOptions = {
...options,
onEvent: async (event: SSEEvent) => {
if (event.type === 'done') {
const d = (event.data ?? {}) as Record<string, unknown>
const response = (d.response ?? {}) as Record<string, unknown>
if (response.async_pause) {
reqLogger.info('Detected async pause from copilot backend', {
route,
checkpointId:
typeof (response.async_pause as Record<string, unknown>)?.checkpointId ===
'string'
? (response.async_pause as Record<string, unknown>).checkpointId
: undefined,
})
if (runId) {
await updateRunStatus(runId, 'paused_waiting_for_tool').catch(() => {})
}
}
}
await callerOnEvent?.(event)
},
}
await runStreamLoop(
`${SIM_AGENT_API_URL}${route}`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
'X-Client-Version': SIM_AGENT_VERSION,
},
body: JSON.stringify(payload),
},
context,
execContext,
loopOptions
)
reqLogger.info('Completed orchestration loop iteration', {
route,
streamComplete: context.streamComplete,
wasAborted: context.wasAborted,
hasAsyncContinuation: Boolean(context.awaitingAsyncContinuation),
errorCount: context.errors.length,
})
if (claimedToolCallIds.length > 0) {
reqLogger.info('Marking async tool calls as delivered', {
toolCallIds: claimedToolCallIds,
})
await Promise.all(
claimedToolCallIds.map((toolCallId) =>
markAsyncToolDelivered(toolCallId).catch(() => null)
)
)
claimedToolCallIds = []
claimedByWorkerId = null
}
if (options.abortSignal?.aborted || context.wasAborted) {
reqLogger.info('Stopping orchestration because request was aborted', {
pendingToolCallCount: Array.from(context.toolCalls.values()).filter(
(toolCall) => toolCall.status === 'pending' || toolCall.status === 'executing'
).length,
})
for (const [toolCallId, toolCall] of context.toolCalls) {
if (toolCall.status === 'pending' || toolCall.status === 'executing') {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
toolCall.error = 'Stopped by user'
}
}
context.awaitingAsyncContinuation = undefined
break
}
const continuation = context.awaitingAsyncContinuation
if (!continuation) {
reqLogger.info('No async continuation pending; finishing orchestration')
break
}
let resumeReady = false
let resumeRetries = 0
reqLogger.info('Processing async continuation', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
pendingToolCallIds: continuation.pendingToolCallIds,
})
for (;;) {
claimedToolCallIds = []
claimedByWorkerId = null
const resumeWorkerId = continuationWorkerId
const readyTools: ReadyContinuationTool[] = []
const localPendingPromises: Promise<unknown>[] = []
const missingToolCallIds: string[] = []
for (const toolCallId of continuation.pendingToolCallIds) {
const durableRow = await getAsyncToolCall(toolCallId).catch(() => null)
const localPendingPromise = context.pendingToolPromises.get(toolCallId)
const toolState = context.toolCalls.get(toolCallId)
if (localPendingPromise) {
localPendingPromises.push(localPendingPromise)
reqLogger.info('Waiting for local async tool completion before retrying resume claim', {
toolCallId,
runId: continuation.runId,
workerId: resumeWorkerId,
})
continue
}
if (durableRow && isTerminalAsyncStatus(durableRow.status)) {
if (durableRow.claimedBy && durableRow.claimedBy !== resumeWorkerId) {
missingToolCallIds.push(toolCallId)
reqLogger.warn(
'Async tool continuation is waiting on a claim held by another worker',
{
toolCallId,
runId: continuation.runId,
workerId: resumeWorkerId,
claimedBy: durableRow.claimedBy,
}
)
continue
}
readyTools.push({
toolCallId,
toolState,
durableRow,
needsDurableClaim: durableRow.claimedBy !== resumeWorkerId,
alreadyClaimedByWorker: durableRow.claimedBy === resumeWorkerId,
})
continue
}
if (
!durableRow &&
toolState &&
isTerminalToolCallStatus(toolState.status) &&
!isToolAvailableOnSimSide(toolState.name)
) {
reqLogger.info('Including Go-handled tool in resume payload (no Sim-side row)', {
toolCallId,
toolName: toolState.name,
status: toolState.status,
runId: continuation.runId,
})
readyTools.push({
toolCallId,
toolState,
needsDurableClaim: false,
alreadyClaimedByWorker: false,
})
continue
}
reqLogger.warn('Skipping already-claimed or missing async tool resume', {
toolCallId,
runId: continuation.runId,
durableStatus: durableRow?.status,
toolStateStatus: toolState?.status,
})
missingToolCallIds.push(toolCallId)
}
if (localPendingPromises.length > 0) {
reqLogger.info('Waiting for local pending async tools before resuming continuation', {
checkpointId: continuation.checkpointId,
pendingPromiseCount: localPendingPromises.length,
})
await Promise.allSettled(localPendingPromises)
continue
}
if (missingToolCallIds.length > 0) {
if (resumeRetries < 3) {
resumeRetries++
reqLogger.info('Retrying async resume after some tool calls were not yet ready', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
workerId: resumeWorkerId,
retry: resumeRetries,
missingToolCallIds,
})
await new Promise((resolve) => setTimeout(resolve, 250 * resumeRetries))
continue
}
reqLogger.error(
'Async continuation failed because pending tool calls never became ready',
{
checkpointId: continuation.checkpointId,
runId: continuation.runId,
missingToolCallIds,
}
)
throw new Error(
`Failed to resume async tool continuation: pending tool calls were not ready (${missingToolCallIds.join(', ')})`
)
}
if (readyTools.length === 0) {
if (resumeRetries < 3 && continuation.pendingToolCallIds.length > 0) {
resumeRetries++
reqLogger.info('Retrying async resume because no tool calls were ready yet', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
workerId: resumeWorkerId,
retry: resumeRetries,
})
await new Promise((resolve) => setTimeout(resolve, 250 * resumeRetries))
continue
}
reqLogger.error('Async continuation failed because no tool calls were ready', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
requestedToolCallIds: continuation.pendingToolCallIds,
})
throw new Error('Failed to resume async tool continuation: no tool calls were ready')
}
const claimCandidates = readyTools.filter((tool) => tool.needsDurableClaim)
const newlyClaimedToolCallIds: string[] = []
const claimFailures: string[] = []
for (const tool of claimCandidates) {
const claimed = await claimCompletedAsyncToolCall(tool.toolCallId, resumeWorkerId).catch(
() => null
)
if (!claimed) {
claimFailures.push(tool.toolCallId)
continue
}
newlyClaimedToolCallIds.push(tool.toolCallId)
}
if (claimFailures.length > 0) {
if (newlyClaimedToolCallIds.length > 0) {
reqLogger.info('Releasing async tool claims after claim contention during resume', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
workerId: resumeWorkerId,
newlyClaimedToolCallIds,
claimFailures,
})
await Promise.all(
newlyClaimedToolCallIds.map((toolCallId) =>
releaseCompletedAsyncToolClaim(toolCallId, resumeWorkerId).catch(() => null)
)
)
}
if (resumeRetries < 3) {
resumeRetries++
reqLogger.info('Retrying async resume after claim contention', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
workerId: resumeWorkerId,
retry: resumeRetries,
claimFailures,
})
await new Promise((resolve) => setTimeout(resolve, 250 * resumeRetries))
continue
}
reqLogger.error('Async continuation failed because tool claims could not be acquired', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
claimFailures,
})
throw new Error(
`Failed to resume async tool continuation: unable to claim tool calls (${claimFailures.join(', ')})`
)
}
claimedToolCallIds = [
...readyTools
.filter((tool) => tool.alreadyClaimedByWorker)
.map((tool) => tool.toolCallId),
...newlyClaimedToolCallIds,
]
claimedByWorkerId = claimedToolCallIds.length > 0 ? resumeWorkerId : null
reqLogger.info('Resuming async tool continuation', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
workerId: resumeWorkerId,
toolCallIds: readyTools.map((tool) => tool.toolCallId),
})
const durableRows = await getAsyncToolCalls(
readyTools.map((tool) => tool.toolCallId)
).catch(() => [])
const durableByToolCallId = new Map(durableRows.map((row) => [row.toolCallId, row]))
const results = await Promise.all(
readyTools.map(async (tool) => {
const durable = durableByToolCallId.get(tool.toolCallId) || tool.durableRow
const durableStatus = durable?.status
const durableResult =
durable?.result && typeof durable.result === 'object'
? (durable.result as Record<string, unknown>)
: undefined
const success = didAsyncToolSucceed({
durableStatus,
durableResult,
durableError: durable?.error,
toolStateStatus: tool.toolState?.status,
})
const data =
durableResult ||
(tool.toolState?.result?.output as Record<string, unknown> | undefined) ||
(success
? { message: 'Tool completed' }
: {
error: durable?.error || tool.toolState?.error || 'Tool failed',
})
if (
durableStatus &&
!isTerminalAsyncStatus(durableStatus) &&
!isDeliveredAsyncStatus(durableStatus)
) {
reqLogger.warn(
'Async tool row was claimed for resume without terminal durable state',
{
toolCallId: tool.toolCallId,
status: durableStatus,
}
)
}
return {
callId: tool.toolCallId,
name: durable?.toolName || tool.toolState?.name || '',
data,
success,
}
})
)
context.awaitingAsyncContinuation = undefined
route = '/api/tools/resume'
payload = {
checkpointId: continuation.checkpointId,
results,
}
reqLogger.info('Prepared async continuation payload for resume endpoint', {
route,
checkpointId: continuation.checkpointId,
resultCount: results.length,
})
resumeReady = true
break
}
if (!resumeReady) {
reqLogger.warn('Async continuation loop exited without resume payload', {
checkpointId: continuation.checkpointId,
runId: continuation.runId,
})
break
}
}
const result: OrchestratorResult = {
success: context.errors.length === 0 && !context.wasAborted,
content: context.accumulatedContent,
contentBlocks: context.contentBlocks,
toolCalls: buildToolCallSummaries(context),
chatId: context.chatId,
requestId: context.requestId,
errors: context.errors.length ? context.errors : undefined,
usage: context.usage,
cost: context.cost,
}
reqLogger.info('Completing copilot orchestration', {
success: result.success,
chatId: result.chatId,
hasRequestId: Boolean(result.requestId),
errorCount: result.errors?.length || 0,
toolCallCount: result.toolCalls.length,
})
await options.onComplete?.(result)
return result
} catch (error) {
const err = error instanceof Error ? error : new Error('Copilot orchestration failed')
if (claimedToolCallIds.length > 0 && claimedByWorkerId) {
reqLogger.warn('Releasing async tool claims after delivery failure', {
toolCallIds: claimedToolCallIds,
workerId: claimedByWorkerId,
})
await Promise.all(
claimedToolCallIds.map((toolCallId) =>
releaseCompletedAsyncToolClaim(toolCallId, claimedByWorkerId!).catch(() => null)
)
)
}
reqLogger.error('Copilot orchestration failed', {
error: err.message,
})
await options.onError?.(err)
return {
success: false,
content: '',
contentBlocks: [],
toolCalls: [],
chatId: context.chatId,
error: err.message,
}
}
}

View File

@@ -1,293 +0,0 @@
/**
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@sim/logger', () => loggerMock)
const { executeToolServerSide, markToolComplete, isToolAvailableOnSimSide } = vi.hoisted(() => ({
executeToolServerSide: vi.fn(),
markToolComplete: vi.fn(),
isToolAvailableOnSimSide: vi.fn().mockReturnValue(true),
}))
const { upsertAsyncToolCall } = vi.hoisted(() => ({
upsertAsyncToolCall: vi.fn(),
}))
vi.mock('@/lib/copilot/orchestrator/tool-executor', () => ({
executeToolServerSide,
markToolComplete,
isToolAvailableOnSimSide,
}))
vi.mock('@/lib/copilot/async-runs/repository', async () => {
const actual = await vi.importActual<typeof import('@/lib/copilot/async-runs/repository')>(
'@/lib/copilot/async-runs/repository'
)
return {
...actual,
upsertAsyncToolCall,
}
})
import { sseHandlers } from '@/lib/copilot/orchestrator/sse/handlers'
import type { ExecutionContext, StreamingContext } from '@/lib/copilot/orchestrator/types'
describe('sse-handlers tool lifecycle', () => {
let context: StreamingContext
let execContext: ExecutionContext
beforeEach(() => {
vi.clearAllMocks()
upsertAsyncToolCall.mockResolvedValue(null)
context = {
chatId: undefined,
messageId: 'msg-1',
accumulatedContent: '',
contentBlocks: [],
toolCalls: new Map(),
pendingToolPromises: new Map(),
currentThinkingBlock: null,
isInThinkingBlock: false,
subAgentParentToolCallId: undefined,
subAgentParentStack: [],
subAgentContent: {},
subAgentToolCalls: {},
pendingContent: '',
streamComplete: false,
wasAborted: false,
errors: [],
}
execContext = {
userId: 'user-1',
workflowId: 'workflow-1',
}
})
it('executes tool_call and emits tool_result + mark-complete', async () => {
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
markToolComplete.mockResolvedValueOnce(true)
const onEvent = vi.fn()
await sseHandlers.tool_call(
{
type: 'tool_call',
data: { id: 'tool-1', name: 'read', arguments: { workflowId: 'workflow-1' } },
} as any,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
// tool_call fires execution without awaiting (fire-and-forget for parallel execution),
// so we flush pending microtasks before asserting
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
expect(markToolComplete).toHaveBeenCalledTimes(1)
expect(onEvent).toHaveBeenCalledWith(
expect.objectContaining({
type: 'tool_result',
toolCallId: 'tool-1',
success: true,
})
)
const updated = context.toolCalls.get('tool-1')
expect(updated?.status).toBe('success')
expect(updated?.result?.output).toEqual({ ok: true })
})
it('skips duplicate tool_call after result', async () => {
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
markToolComplete.mockResolvedValueOnce(true)
const event = {
type: 'tool_call',
data: { id: 'tool-dup', name: 'read', arguments: { workflowId: 'workflow-1' } },
}
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
await new Promise((resolve) => setTimeout(resolve, 0))
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
expect(markToolComplete).toHaveBeenCalledTimes(1)
})
it('marks an in-flight tool as cancelled when aborted mid-execution', async () => {
const abortController = new AbortController()
const userStopController = new AbortController()
execContext.abortSignal = abortController.signal
execContext.userStopSignal = userStopController.signal
executeToolServerSide.mockImplementationOnce(
() =>
new Promise((resolve) => {
setTimeout(() => resolve({ success: true, output: { ok: true } }), 0)
})
)
markToolComplete.mockResolvedValue(true)
await sseHandlers.tool_call(
{
type: 'tool_call',
data: { id: 'tool-cancel', name: 'read', arguments: { workflowId: 'workflow-1' } },
} as any,
context,
execContext,
{
interactive: false,
timeout: 1000,
abortSignal: abortController.signal,
userStopSignal: userStopController.signal,
}
)
userStopController.abort()
abortController.abort()
await new Promise((resolve) => setTimeout(resolve, 10))
expect(markToolComplete).toHaveBeenCalledWith(
'tool-cancel',
'read',
499,
'Request aborted during tool execution',
{ cancelled: true },
'msg-1'
)
const updated = context.toolCalls.get('tool-cancel')
expect(updated?.status).toBe('cancelled')
})
it('does not replace an in-flight pending promise on duplicate tool_call', async () => {
let resolveTool: ((value: { success: boolean; output: { ok: boolean } }) => void) | undefined
executeToolServerSide.mockImplementationOnce(
() =>
new Promise((resolve) => {
resolveTool = resolve
})
)
markToolComplete.mockResolvedValueOnce(true)
const event = {
type: 'tool_call',
data: { id: 'tool-inflight', name: 'read', arguments: { workflowId: 'workflow-1' } },
}
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
await new Promise((resolve) => setTimeout(resolve, 0))
const firstPromise = context.pendingToolPromises.get('tool-inflight')
expect(firstPromise).toBeDefined()
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
expect(context.pendingToolPromises.get('tool-inflight')).toBe(firstPromise)
resolveTool?.({ success: true, output: { ok: true } })
await new Promise((resolve) => setTimeout(resolve, 0))
expect(context.pendingToolPromises.has('tool-inflight')).toBe(false)
expect(markToolComplete).toHaveBeenCalledTimes(1)
})
it('still executes the tool when async row upsert fails', async () => {
upsertAsyncToolCall.mockRejectedValueOnce(new Error('db down'))
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
markToolComplete.mockResolvedValueOnce(true)
await sseHandlers.tool_call(
{
type: 'tool_call',
data: { id: 'tool-upsert-fail', name: 'read', arguments: { workflowId: 'workflow-1' } },
} as any,
context,
execContext,
{ onEvent: vi.fn(), interactive: false, timeout: 1000 }
)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
expect(markToolComplete).toHaveBeenCalledTimes(1)
expect(context.toolCalls.get('tool-upsert-fail')?.status).toBe('success')
})
it('does not execute a tool if a terminal tool_result arrives before local execution starts', async () => {
let resolveUpsert: ((value: null) => void) | undefined
upsertAsyncToolCall.mockImplementationOnce(
() =>
new Promise((resolve) => {
resolveUpsert = resolve
})
)
const onEvent = vi.fn()
await sseHandlers.tool_call(
{
type: 'tool_call',
data: { id: 'tool-race', name: 'read', arguments: { workflowId: 'workflow-1' } },
} as any,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
await sseHandlers.tool_result(
{
type: 'tool_result',
toolCallId: 'tool-race',
data: { id: 'tool-race', success: true, result: { ok: true } },
} as any,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
resolveUpsert?.(null)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeToolServerSide).not.toHaveBeenCalled()
expect(markToolComplete).not.toHaveBeenCalled()
expect(context.toolCalls.get('tool-race')?.status).toBe('success')
expect(context.toolCalls.get('tool-race')?.result?.output).toEqual({ ok: true })
})
it('does not execute a tool if a tool_result arrives before the tool_call event', async () => {
const onEvent = vi.fn()
await sseHandlers.tool_result(
{
type: 'tool_result',
toolCallId: 'tool-early-result',
toolName: 'read',
data: { id: 'tool-early-result', name: 'read', success: true, result: { ok: true } },
} as any,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
await sseHandlers.tool_call(
{
type: 'tool_call',
data: { id: 'tool-early-result', name: 'read', arguments: { workflowId: 'workflow-1' } },
} as any,
context,
execContext,
{ onEvent, interactive: false, timeout: 1000 }
)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeToolServerSide).not.toHaveBeenCalled()
expect(markToolComplete).not.toHaveBeenCalled()
expect(context.toolCalls.get('tool-early-result')?.status).toBe('success')
})
})

View File

@@ -1,852 +0,0 @@
import { createLogger } from '@sim/logger'
import { upsertAsyncToolCall } from '@/lib/copilot/async-runs/repository'
import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants'
import {
asRecord,
getEventData,
markToolResultSeen,
wasToolResultSeen,
} from '@/lib/copilot/orchestrator/sse/utils'
import {
isToolAvailableOnSimSide,
markToolComplete,
} from '@/lib/copilot/orchestrator/tool-executor'
import type {
ContentBlock,
ExecutionContext,
OrchestratorOptions,
SSEEvent,
StreamingContext,
ToolCallState,
} from '@/lib/copilot/orchestrator/types'
import { isWorkflowToolName } from '@/lib/copilot/workflow-tools'
import { executeToolAndReport, waitForToolCompletion } from './tool-execution'
const logger = createLogger('CopilotSseHandlers')
/**
* Builds an AbortSignal that fires when either the main abort signal OR
* the client-disconnect signal fires. Used for client-executable tool waits
* so the orchestrator doesn't block for the full timeout when the browser dies.
*/
function buildClientToolAbortSignal(options: OrchestratorOptions): AbortSignal | undefined {
const { abortSignal, clientDisconnectedSignal } = options
if (!clientDisconnectedSignal || clientDisconnectedSignal.aborted) {
return clientDisconnectedSignal?.aborted ? AbortSignal.abort() : abortSignal
}
if (!abortSignal) return clientDisconnectedSignal
const combined = new AbortController()
const fire = () => combined.abort()
abortSignal.addEventListener('abort', fire, { once: true })
clientDisconnectedSignal.addEventListener('abort', fire, { once: true })
return combined.signal
}
function registerPendingToolPromise(
context: StreamingContext,
toolCallId: string,
pendingPromise: Promise<{ status: string; message?: string; data?: Record<string, unknown> }>
) {
context.pendingToolPromises.set(toolCallId, pendingPromise)
pendingPromise.finally(() => {
if (context.pendingToolPromises.get(toolCallId) === pendingPromise) {
context.pendingToolPromises.delete(toolCallId)
}
})
}
/**
* When the Sim→Go stream is aborted, avoid starting server-side tool work and
* unblock the Go async waiter with a terminal 499 completion.
*/
function abortPendingToolIfStreamDead(
toolCall: ToolCallState,
toolCallId: string,
options: OrchestratorOptions,
context: StreamingContext
): boolean {
if (!options.abortSignal?.aborted && !context.wasAborted) {
return false
}
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCallId)
markToolComplete(
toolCall.id,
toolCall.name,
499,
'Request aborted before tool execution',
{
cancelled: true,
},
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed (stream aborted)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
return true
}
/**
* Extract the `ui` object from an SSE event. The server enriches
* tool_call events with `ui: { requiresConfirmation, clientExecutable, ... }`.
*/
function getEventUI(event: SSEEvent): {
requiresConfirmation: boolean
clientExecutable: boolean
internal: boolean
hidden: boolean
} {
const raw = asRecord((event as unknown as Record<string, unknown>).ui)
return {
requiresConfirmation: raw.requiresConfirmation === true,
clientExecutable: raw.clientExecutable === true,
internal: raw.internal === true,
hidden: raw.hidden === true,
}
}
/**
* Handle the completion signal from a client-executable tool.
* Shared by both the main and subagent tool_call handlers.
*/
function handleClientCompletion(
toolCall: ToolCallState,
toolCallId: string,
completion: { status: string; message?: string; data?: Record<string, unknown> } | null,
context: StreamingContext
): void {
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true },
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed (client background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
if (completion?.status === 'rejected') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
400,
completion.message || 'Tool execution rejected',
undefined,
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed (client rejected)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
if (completion?.status === 'cancelled') {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
499,
completion.message || 'Workflow execution was stopped manually by the user.',
completion.data,
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed (client cancelled)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
markToolComplete(
toolCall.id,
toolCall.name,
success ? 200 : 500,
msg,
completion?.data,
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed (client completion)', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
}
/**
* Emit a synthetic tool_result SSE event to the client after a client-executable
* tool completes. The server's actual tool_result is skipped (markToolResultSeen),
* so the client would never learn the outcome without this.
*/
async function emitSyntheticToolResult(
toolCallId: string,
toolName: string,
completion: { status: string; message?: string; data?: Record<string, unknown> } | null,
options: OrchestratorOptions,
context: StreamingContext
): Promise<void> {
const success = completion?.status === 'success'
const isCancelled = completion?.status === 'cancelled'
const resultPayload = isCancelled
? { ...completion?.data, reason: 'user_cancelled', cancelledByUser: true }
: completion?.data
try {
await options.onEvent?.({
type: 'tool_result',
toolCallId,
toolName,
success,
result: resultPayload,
error: !success ? completion?.message : undefined,
} as SSEEvent)
} catch (error) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to emit synthetic tool_result', {
toolCallId,
toolName,
error: error instanceof Error ? error.message : String(error),
})
}
}
// Normalization + dedupe helpers live in sse-utils to keep server/client in sync.
function inferToolSuccess(data: Record<string, unknown> | undefined): {
success: boolean
hasResultData: boolean
hasError: boolean
} {
const resultObj = asRecord(data?.result)
const hasExplicitSuccess = data?.success !== undefined || resultObj.success !== undefined
const explicitSuccess = data?.success ?? resultObj.success
const hasResultData = data?.result !== undefined || data?.data !== undefined
const hasError = !!data?.error || !!resultObj.error
const success = hasExplicitSuccess ? !!explicitSuccess : !hasError
return { success, hasResultData, hasError }
}
function ensureTerminalToolCallState(
context: StreamingContext,
toolCallId: string,
toolName: string
): ToolCallState {
const existing = context.toolCalls.get(toolCallId)
if (existing) {
return existing
}
const toolCall: ToolCallState = {
id: toolCallId,
name: toolName || 'unknown_tool',
status: 'pending',
startTime: Date.now(),
}
context.toolCalls.set(toolCallId, toolCall)
addContentBlock(context, { type: 'tool_call', toolCall })
return toolCall
}
export type SSEHandler = (
event: SSEEvent,
context: StreamingContext,
execContext: ExecutionContext,
options: OrchestratorOptions
) => void | Promise<void>
function addContentBlock(context: StreamingContext, block: Omit<ContentBlock, 'timestamp'>): void {
context.contentBlocks.push({
...block,
timestamp: Date.now(),
})
}
export const sseHandlers: Record<string, SSEHandler> = {
chat_id: (event, context, execContext) => {
const chatId = asRecord(event.data).chatId as string | undefined
context.chatId = chatId
if (chatId) {
execContext.chatId = chatId
}
},
request_id: (event, context) => {
const rid = typeof event.data === 'string' ? event.data : undefined
if (rid) {
context.requestId = rid
logger
.withMetadata({ messageId: context.messageId })
.info('Mapped copilot message to Go trace ID', {
goTraceId: rid,
chatId: context.chatId,
executionId: context.executionId,
runId: context.runId,
})
}
},
title_updated: () => {},
tool_result: (event, context) => {
const data = getEventData(event)
const toolCallId = event.toolCallId || (data?.id as string | undefined)
if (!toolCallId) return
const toolName =
event.toolName ||
(data?.name as string | undefined) ||
context.toolCalls.get(toolCallId)?.name ||
''
const current = ensureTerminalToolCallState(context, toolCallId, toolName)
const { success, hasResultData, hasError } = inferToolSuccess(data)
current.status = success ? 'success' : 'error'
current.endTime = Date.now()
if (hasResultData) {
current.result = {
success,
output: data?.result || data?.data,
}
}
if (hasError) {
const resultObj = asRecord(data?.result)
current.error = (data?.error || resultObj.error) as string | undefined
}
markToolResultSeen(toolCallId)
},
tool_error: (event, context) => {
const data = getEventData(event)
const toolCallId = event.toolCallId || (data?.id as string | undefined)
if (!toolCallId) return
const toolName =
event.toolName ||
(data?.name as string | undefined) ||
context.toolCalls.get(toolCallId)?.name ||
''
const current = ensureTerminalToolCallState(context, toolCallId, toolName)
current.status = 'error'
current.error = (data?.error as string | undefined) || 'Tool execution failed'
current.endTime = Date.now()
markToolResultSeen(toolCallId)
},
tool_call_delta: () => {
// Argument streaming delta — no action needed on orchestrator side
},
tool_generating: (event, context) => {
const data = getEventData(event)
const toolCallId =
event.toolCallId ||
(data?.toolCallId as string | undefined) ||
(data?.id as string | undefined)
const toolName =
event.toolName || (data?.toolName as string | undefined) || (data?.name as string | undefined)
if (!toolCallId || !toolName) return
if (!context.toolCalls.has(toolCallId)) {
const toolCall = {
id: toolCallId,
name: toolName,
status: 'pending' as const,
startTime: Date.now(),
}
context.toolCalls.set(toolCallId, toolCall)
addContentBlock(context, { type: 'tool_call', toolCall })
}
},
tool_call: async (event, context, execContext, options) => {
const toolData = getEventData(event) || ({} as Record<string, unknown>)
const toolCallId = (toolData.id as string | undefined) || event.toolCallId
const toolName = (toolData.name as string | undefined) || event.toolName
if (!toolCallId || !toolName) return
const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as
| Record<string, unknown>
| undefined
const isPartial = toolData.partial === true
const existing = context.toolCalls.get(toolCallId)
if (
existing?.endTime ||
(existing && existing.status !== 'pending' && existing.status !== 'executing')
) {
if (!existing.name && toolName) {
existing.name = toolName
}
if (!existing.params && args) {
existing.params = args
}
return
}
if (existing) {
if (args && !existing.params) existing.params = args
if (
!context.contentBlocks.some((b) => b.type === 'tool_call' && b.toolCall?.id === toolCallId)
) {
addContentBlock(context, { type: 'tool_call', toolCall: existing })
}
} else {
const created = {
id: toolCallId,
name: toolName,
status: 'pending' as const,
params: args,
startTime: Date.now(),
}
context.toolCalls.set(toolCallId, created)
addContentBlock(context, { type: 'tool_call', toolCall: created })
}
if (isPartial) return
if (wasToolResultSeen(toolCallId)) return
if (context.pendingToolPromises.has(toolCallId) || existing?.status === 'executing') {
return
}
const toolCall = context.toolCalls.get(toolCallId)
if (!toolCall) return
const { clientExecutable, internal } = getEventUI(event)
if (internal) {
return
}
if (!isToolAvailableOnSimSide(toolName) && !clientExecutable) {
return
}
/**
* Fire tool execution without awaiting so parallel tool calls from the
* same LLM turn execute concurrently. executeToolAndReport is self-contained:
* it updates tool state, calls markToolComplete, and emits result events.
*/
const fireToolExecution = () => {
const pendingPromise = (async () => {
try {
await upsertAsyncToolCall({
runId: context.runId || crypto.randomUUID(),
toolCallId,
toolName,
args,
})
} catch (err) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to persist async tool row before execution', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
}
return executeToolAndReport(toolCallId, context, execContext, options)
})().catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('Parallel tool execution failed', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
return {
status: 'error',
message: err instanceof Error ? err.message : String(err),
data: { error: err instanceof Error ? err.message : String(err) },
}
})
registerPendingToolPromise(context, toolCallId, pendingPromise)
}
if (options.interactive === false) {
if (options.autoExecuteTools !== false) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
}
return
}
// Client-executable tool: execute server-side if available, otherwise
// delegate to the client (React UI) and wait for completion.
// Workflow run tools are implemented on Sim for MCP/server callers but must
// still run in the browser when clientExecutable so the workflow terminal
// receives SSE block logs (executeWorkflowWithFullLogging).
if (clientExecutable) {
const delegateWorkflowRunToClient = isWorkflowToolName(toolName)
if (isToolAvailableOnSimSide(toolName) && !delegateWorkflowRunToClient) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
} else {
toolCall.status = 'executing'
await upsertAsyncToolCall({
runId: context.runId || crypto.randomUUID(),
toolCallId,
toolName,
args,
status: 'running',
}).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to persist async tool row for client-executable tool', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
})
const clientWaitSignal = buildClientToolAbortSignal(options)
const completion = await waitForToolCompletion(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
clientWaitSignal
)
handleClientCompletion(toolCall, toolCallId, completion, context)
await emitSyntheticToolResult(toolCallId, toolCall.name, completion, options, context)
}
return
}
if (options.autoExecuteTools !== false) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
}
},
reasoning: (event, context) => {
const d = asRecord(event.data)
const phase = d.phase || asRecord(d.data).phase
if (phase === 'start') {
context.isInThinkingBlock = true
context.currentThinkingBlock = {
type: 'thinking',
content: '',
timestamp: Date.now(),
}
return
}
if (phase === 'end') {
if (context.currentThinkingBlock) {
context.contentBlocks.push(context.currentThinkingBlock)
}
context.isInThinkingBlock = false
context.currentThinkingBlock = null
return
}
const chunk = (d.data || d.content || event.content) as string | undefined
if (!chunk || !context.currentThinkingBlock) return
context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}`
},
content: (event, context) => {
// Server sends content as a plain string in event.data, not wrapped in an object.
let chunk: string | undefined
if (typeof event.data === 'string') {
chunk = event.data
} else {
const d = asRecord(event.data)
chunk = (d.content || d.data || event.content) as string | undefined
}
if (!chunk) return
context.accumulatedContent += chunk
addContentBlock(context, { type: 'text', content: chunk })
},
done: (event, context) => {
const d = asRecord(event.data)
const response = asRecord(d.response)
const asyncPause = asRecord(response.async_pause)
if (asyncPause.checkpointId) {
context.awaitingAsyncContinuation = {
checkpointId: String(asyncPause.checkpointId),
executionId:
typeof asyncPause.executionId === 'string' ? asyncPause.executionId : context.executionId,
runId: typeof asyncPause.runId === 'string' ? asyncPause.runId : context.runId,
pendingToolCallIds: Array.isArray(asyncPause.pendingToolCallIds)
? asyncPause.pendingToolCallIds.map((id) => String(id))
: [],
}
}
if (d.usage) {
const u = asRecord(d.usage)
context.usage = {
prompt: (u.input_tokens as number) || 0,
completion: (u.output_tokens as number) || 0,
}
}
if (d.cost) {
const c = asRecord(d.cost)
context.cost = {
input: (c.input as number) || 0,
output: (c.output as number) || 0,
total: (c.total as number) || 0,
}
}
context.streamComplete = true
},
start: () => {},
error: (event, context) => {
const d = asRecord(event.data)
const message = (d.message || d.error || event.error) as string | undefined
if (message) {
context.errors.push(message)
}
context.streamComplete = true
},
}
export const subAgentHandlers: Record<string, SSEHandler> = {
content: (event, context) => {
const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId || !event.data) return
// Server sends content as a plain string in event.data
let chunk: string | undefined
if (typeof event.data === 'string') {
chunk = event.data
} else {
const d = asRecord(event.data)
chunk = (d.content || d.data || event.content) as string | undefined
}
if (!chunk) return
context.subAgentContent[parentToolCallId] =
(context.subAgentContent[parentToolCallId] || '') + chunk
addContentBlock(context, { type: 'subagent_text', content: chunk })
},
tool_call: async (event, context, execContext, options) => {
const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return
const toolData = getEventData(event) || ({} as Record<string, unknown>)
const toolCallId = (toolData.id as string | undefined) || event.toolCallId
const toolName = (toolData.name as string | undefined) || event.toolName
if (!toolCallId || !toolName) return
const isPartial = toolData.partial === true
const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as
| Record<string, unknown>
| undefined
const existing = context.toolCalls.get(toolCallId)
// Ignore late/duplicate tool_call events once we already have a result.
if (wasToolResultSeen(toolCallId) || existing?.endTime) {
if (existing && !existing.name && toolName) {
existing.name = toolName
}
if (existing && !existing.params && args) {
existing.params = args
}
return
}
const toolCall: ToolCallState = {
id: toolCallId,
name: toolName,
status: 'pending',
params: args,
startTime: Date.now(),
}
// Store in both places - but do NOT overwrite existing tool call state for the same id.
if (!context.subAgentToolCalls[parentToolCallId]) {
context.subAgentToolCalls[parentToolCallId] = []
}
if (!context.subAgentToolCalls[parentToolCallId].some((tc) => tc.id === toolCallId)) {
context.subAgentToolCalls[parentToolCallId].push(toolCall)
}
if (!context.toolCalls.has(toolCallId)) {
context.toolCalls.set(toolCallId, toolCall)
const parentToolCall = context.toolCalls.get(parentToolCallId)
addContentBlock(context, {
type: 'tool_call',
toolCall,
calledBy: parentToolCall?.name,
})
}
if (isPartial) return
if (context.pendingToolPromises.has(toolCallId) || existing?.status === 'executing') {
return
}
const { clientExecutable, internal } = getEventUI(event)
if (internal) {
return
}
if (!isToolAvailableOnSimSide(toolName) && !clientExecutable) {
return
}
const fireToolExecution = () => {
const pendingPromise = (async () => {
try {
await upsertAsyncToolCall({
runId: context.runId || crypto.randomUUID(),
toolCallId,
toolName,
args,
})
} catch (err) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to persist async subagent tool row before execution', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
}
return executeToolAndReport(toolCallId, context, execContext, options)
})().catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('Parallel subagent tool execution failed', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
return {
status: 'error',
message: err instanceof Error ? err.message : String(err),
data: { error: err instanceof Error ? err.message : String(err) },
}
})
registerPendingToolPromise(context, toolCallId, pendingPromise)
}
if (options.interactive === false) {
if (options.autoExecuteTools !== false) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
}
return
}
if (clientExecutable) {
const delegateWorkflowRunToClient = isWorkflowToolName(toolName)
if (isToolAvailableOnSimSide(toolName) && !delegateWorkflowRunToClient) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
} else {
toolCall.status = 'executing'
await upsertAsyncToolCall({
runId: context.runId || crypto.randomUUID(),
toolCallId,
toolName,
args,
status: 'running',
}).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to persist async tool row for client-executable subagent tool', {
toolCallId,
toolName,
error: err instanceof Error ? err.message : String(err),
})
})
const clientWaitSignal = buildClientToolAbortSignal(options)
const completion = await waitForToolCompletion(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
clientWaitSignal
)
handleClientCompletion(toolCall, toolCallId, completion, context)
await emitSyntheticToolResult(toolCallId, toolCall.name, completion, options, context)
}
return
}
if (options.autoExecuteTools !== false) {
if (!abortPendingToolIfStreamDead(toolCall, toolCallId, options, context)) {
fireToolExecution()
}
}
},
tool_result: (event, context) => {
const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return
const data = getEventData(event)
const toolCallId = event.toolCallId || (data?.id as string | undefined)
if (!toolCallId) return
const toolName = event.toolName || (data?.name as string | undefined) || ''
// Update in subAgentToolCalls.
const toolCalls = context.subAgentToolCalls[parentToolCallId] || []
const subAgentToolCall = toolCalls.find((tc) => tc.id === toolCallId)
// Also update in main toolCalls (where we added it for execution).
const mainToolCall = ensureTerminalToolCallState(context, toolCallId, toolName)
const { success, hasResultData, hasError } = inferToolSuccess(data)
const status = success ? 'success' : 'error'
const endTime = Date.now()
const result = hasResultData ? { success, output: data?.result || data?.data } : undefined
if (subAgentToolCall) {
subAgentToolCall.status = status
subAgentToolCall.endTime = endTime
if (result) subAgentToolCall.result = result
if (hasError) {
const resultObj = asRecord(data?.result)
subAgentToolCall.error = (data?.error || resultObj.error) as string | undefined
}
}
if (mainToolCall) {
mainToolCall.status = status
mainToolCall.endTime = endTime
if (result) mainToolCall.result = result
if (hasError) {
const resultObj = asRecord(data?.result)
mainToolCall.error = (data?.error || resultObj.error) as string | undefined
}
}
if (subAgentToolCall || mainToolCall) {
markToolResultSeen(toolCallId)
}
},
}
export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean {
if (!event.subagent) return false
if (!context.subAgentParentToolCallId) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Subagent event missing parent tool call', {
type: event.type,
subagent: event.subagent,
})
return false
}
return true
}

View File

@@ -1,2 +0,0 @@
export type { SSEHandler } from './handlers'
export { handleSubagentRouting, sseHandlers, subAgentHandlers } from './handlers'

View File

@@ -1,936 +0,0 @@
import { db } from '@sim/db'
import { userTableRows } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { completeAsyncToolCall, markAsyncToolRunning } from '@/lib/copilot/async-runs/repository'
import { waitForToolConfirmation } from '@/lib/copilot/orchestrator/persistence'
import { asRecord, markToolResultSeen } from '@/lib/copilot/orchestrator/sse/utils'
import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor'
import {
type ExecutionContext,
isTerminalToolCallStatus,
type OrchestratorOptions,
type SSEEvent,
type StreamingContext,
type ToolCallResult,
} from '@/lib/copilot/orchestrator/types'
import {
extractDeletedResourcesFromToolResult,
extractResourcesFromToolResult,
hasDeleteCapability,
isResourceToolName,
persistChatResources,
removeChatResources,
} from '@/lib/copilot/resources'
import { getTableById } from '@/lib/table/service'
import { uploadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
const logger = createLogger('CopilotSseToolExecution')
const OUTPUT_PATH_TOOLS = new Set(['function_execute', 'user_table'])
/**
* Try to pull a flat array of row-objects out of the various shapes that
* `function_execute` and `user_table` can return.
*/
function extractTabularData(output: unknown): Record<string, unknown>[] | null {
if (!output || typeof output !== 'object') return null
if (Array.isArray(output)) {
if (output.length > 0 && typeof output[0] === 'object' && output[0] !== null) {
return output as Record<string, unknown>[]
}
return null
}
const obj = output as Record<string, unknown>
// function_execute shape: { result: [...], stdout: "..." }
if (Array.isArray(obj.result)) {
const rows = obj.result
if (rows.length > 0 && typeof rows[0] === 'object' && rows[0] !== null) {
return rows as Record<string, unknown>[]
}
}
// user_table query_rows shape: { data: { rows: [{ data: {...} }], totalCount } }
if (obj.data && typeof obj.data === 'object' && !Array.isArray(obj.data)) {
const data = obj.data as Record<string, unknown>
if (Array.isArray(data.rows) && data.rows.length > 0) {
const rows = data.rows as Record<string, unknown>[]
// user_table rows nest actual values inside .data
if (typeof rows[0].data === 'object' && rows[0].data !== null) {
return rows.map((r) => r.data as Record<string, unknown>)
}
return rows
}
}
return null
}
function escapeCsvValue(value: unknown): string {
if (value === null || value === undefined) return ''
const str = typeof value === 'object' ? JSON.stringify(value) : String(value)
if (str.includes(',') || str.includes('"') || str.includes('\n') || str.includes('\r')) {
return `"${str.replace(/"/g, '""')}"`
}
return str
}
function convertRowsToCsv(rows: Record<string, unknown>[]): string {
if (rows.length === 0) return ''
const headerSet = new Set<string>()
for (const row of rows) {
for (const key of Object.keys(row)) {
headerSet.add(key)
}
}
const headers = [...headerSet]
const lines = [headers.map(escapeCsvValue).join(',')]
for (const row of rows) {
lines.push(headers.map((h) => escapeCsvValue(row[h])).join(','))
}
return lines.join('\n')
}
type OutputFormat = 'json' | 'csv' | 'txt' | 'md' | 'html'
const EXT_TO_FORMAT: Record<string, OutputFormat> = {
'.json': 'json',
'.csv': 'csv',
'.txt': 'txt',
'.md': 'md',
'.html': 'html',
}
const FORMAT_TO_CONTENT_TYPE: Record<OutputFormat, string> = {
json: 'application/json',
csv: 'text/csv',
txt: 'text/plain',
md: 'text/markdown',
html: 'text/html',
}
function normalizeOutputWorkspaceFileName(outputPath: string): string {
const trimmed = outputPath.trim().replace(/^\/+/, '')
const withoutPrefix = trimmed.startsWith('files/') ? trimmed.slice('files/'.length) : trimmed
if (!withoutPrefix) {
throw new Error('outputPath must include a file name, e.g. "files/result.json"')
}
if (withoutPrefix.includes('/')) {
throw new Error(
'outputPath must target a flat workspace file, e.g. "files/result.json". Nested paths like "files/reports/result.json" are not supported.'
)
}
return withoutPrefix
}
function resolveOutputFormat(fileName: string, explicit?: string): OutputFormat {
if (explicit && explicit in FORMAT_TO_CONTENT_TYPE) return explicit as OutputFormat
const ext = fileName.slice(fileName.lastIndexOf('.')).toLowerCase()
return EXT_TO_FORMAT[ext] ?? 'json'
}
function serializeOutputForFile(output: unknown, format: OutputFormat): string {
if (typeof output === 'string') return output
if (format === 'csv') {
const rows = extractTabularData(output)
if (rows && rows.length > 0) {
return convertRowsToCsv(rows)
}
}
return JSON.stringify(output, null, 2)
}
async function maybeWriteOutputToFile(
toolName: string,
params: Record<string, unknown> | undefined,
result: ToolCallResult,
context: ExecutionContext
): Promise<ToolCallResult> {
if (!result.success || !result.output) return result
if (!OUTPUT_PATH_TOOLS.has(toolName)) return result
if (!context.workspaceId || !context.userId) return result
const args = params?.args as Record<string, unknown> | undefined
const outputPath =
(params?.outputPath as string | undefined) ?? (args?.outputPath as string | undefined)
if (!outputPath) return result
const explicitFormat =
(params?.outputFormat as string | undefined) ?? (args?.outputFormat as string | undefined)
try {
const fileName = normalizeOutputWorkspaceFileName(outputPath)
const format = resolveOutputFormat(fileName, explicitFormat)
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
const content = serializeOutputForFile(result.output, format)
const contentType = FORMAT_TO_CONTENT_TYPE[format]
const buffer = Buffer.from(content, 'utf-8')
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
const uploaded = await uploadWorkspaceFile(
context.workspaceId,
context.userId,
buffer,
fileName,
contentType
)
logger.withMetadata({ messageId: context.messageId }).info('Tool output written to file', {
toolName,
fileName,
size: buffer.length,
fileId: uploaded.id,
})
return {
success: true,
output: {
message: `Output written to files/${fileName} (${buffer.length} bytes)`,
fileId: uploaded.id,
fileName,
size: buffer.length,
downloadUrl: uploaded.url,
},
}
} catch (err) {
const message = err instanceof Error ? err.message : String(err)
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to write tool output to file', {
toolName,
outputPath,
error: message,
})
return {
success: false,
error: `Failed to write output file: ${message}`,
}
}
}
const MAX_OUTPUT_TABLE_ROWS = 10_000
const BATCH_CHUNK_SIZE = 500
export interface AsyncToolCompletion {
status: string
message?: string
data?: Record<string, unknown>
}
function abortRequested(
context: StreamingContext,
execContext: ExecutionContext,
options?: OrchestratorOptions
): boolean {
if (options?.userStopSignal?.aborted || execContext.userStopSignal?.aborted) {
return true
}
if (context.wasAborted) {
return true
}
return false
}
function cancelledCompletion(message: string): AsyncToolCompletion {
return {
status: 'cancelled',
message,
data: { cancelled: true },
}
}
function terminalCompletionFromToolCall(toolCall: {
status: string
error?: string
result?: { output?: unknown; error?: string }
}): AsyncToolCompletion {
if (toolCall.status === 'cancelled') {
return cancelledCompletion(toolCall.error || 'Tool execution cancelled')
}
if (toolCall.status === 'success') {
return {
status: 'success',
message: 'Tool completed',
data:
toolCall.result?.output &&
typeof toolCall.result.output === 'object' &&
!Array.isArray(toolCall.result.output)
? (toolCall.result.output as Record<string, unknown>)
: undefined,
}
}
if (toolCall.status === 'skipped') {
return {
status: 'success',
message: 'Tool skipped',
data:
toolCall.result?.output &&
typeof toolCall.result.output === 'object' &&
!Array.isArray(toolCall.result.output)
? (toolCall.result.output as Record<string, unknown>)
: undefined,
}
}
return {
status: toolCall.status === 'rejected' ? 'rejected' : 'error',
message: toolCall.error || toolCall.result?.error || 'Tool failed',
data: { error: toolCall.error || toolCall.result?.error || 'Tool failed' },
}
}
function reportCancelledTool(
toolCall: { id: string; name: string },
message: string,
messageId?: string,
data: Record<string, unknown> = { cancelled: true }
): void {
markToolComplete(toolCall.id, toolCall.name, 499, message, data, messageId).catch((err) => {
logger.withMetadata({ messageId }).error('markToolComplete failed (cancelled)', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
}
async function maybeWriteOutputToTable(
toolName: string,
params: Record<string, unknown> | undefined,
result: ToolCallResult,
context: ExecutionContext
): Promise<ToolCallResult> {
if (toolName !== 'function_execute') return result
if (!result.success || !result.output) return result
if (!context.workspaceId || !context.userId) return result
const outputTable = params?.outputTable as string | undefined
if (!outputTable) return result
try {
const table = await getTableById(outputTable)
if (!table) {
return {
success: false,
error: `Table "${outputTable}" not found`,
}
}
const rawOutput = result.output
let rows: Array<Record<string, unknown>>
if (rawOutput && typeof rawOutput === 'object' && 'result' in rawOutput) {
const inner = (rawOutput as Record<string, unknown>).result
if (Array.isArray(inner)) {
rows = inner
} else {
return {
success: false,
error: 'outputTable requires the code to return an array of objects',
}
}
} else if (Array.isArray(rawOutput)) {
rows = rawOutput
} else {
return {
success: false,
error: 'outputTable requires the code to return an array of objects',
}
}
if (rows.length > MAX_OUTPUT_TABLE_ROWS) {
return {
success: false,
error: `outputTable row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`,
}
}
if (rows.length === 0) {
return {
success: false,
error: 'outputTable requires at least one row — code returned an empty array',
}
}
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
await db.transaction(async (tx) => {
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
await tx.delete(userTableRows).where(eq(userTableRows.tableId, outputTable))
const now = new Date()
for (let i = 0; i < rows.length; i += BATCH_CHUNK_SIZE) {
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
const chunk = rows.slice(i, i + BATCH_CHUNK_SIZE)
const values = chunk.map((rowData, j) => ({
id: `row_${crypto.randomUUID().replace(/-/g, '')}`,
tableId: outputTable,
workspaceId: context.workspaceId!,
data: rowData,
position: i + j,
createdAt: now,
updatedAt: now,
createdBy: context.userId,
}))
await tx.insert(userTableRows).values(values)
}
})
logger.withMetadata({ messageId: context.messageId }).info('Tool output written to table', {
toolName,
tableId: outputTable,
rowCount: rows.length,
})
return {
success: true,
output: {
message: `Wrote ${rows.length} rows to table ${outputTable}`,
tableId: outputTable,
rowCount: rows.length,
},
}
} catch (err) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to write tool output to table', {
toolName,
outputTable,
error: err instanceof Error ? err.message : String(err),
})
return {
success: false,
error: `Failed to write to table: ${err instanceof Error ? err.message : String(err)}`,
}
}
}
async function maybeWriteReadCsvToTable(
toolName: string,
params: Record<string, unknown> | undefined,
result: ToolCallResult,
context: ExecutionContext
): Promise<ToolCallResult> {
if (toolName !== 'read') return result
if (!result.success || !result.output) return result
if (!context.workspaceId || !context.userId) return result
const outputTable = params?.outputTable as string | undefined
if (!outputTable) return result
try {
const table = await getTableById(outputTable)
if (!table) {
return { success: false, error: `Table "${outputTable}" not found` }
}
const output = result.output as Record<string, unknown>
const content = (output.content as string) || ''
if (!content.trim()) {
return { success: false, error: 'File has no content to import into table' }
}
const filePath = (params?.path as string) || ''
const ext = filePath.split('.').pop()?.toLowerCase()
let rows: Record<string, unknown>[]
if (ext === 'json') {
const parsed = JSON.parse(content)
if (!Array.isArray(parsed)) {
return {
success: false,
error: 'JSON file must contain an array of objects for table import',
}
}
rows = parsed
} else {
const { parse } = await import('csv-parse/sync')
rows = parse(content, {
columns: true,
skip_empty_lines: true,
trim: true,
relax_column_count: true,
relax_quotes: true,
skip_records_with_error: true,
cast: false,
}) as Record<string, unknown>[]
}
if (rows.length === 0) {
return { success: false, error: 'File has no data rows to import' }
}
if (rows.length > MAX_OUTPUT_TABLE_ROWS) {
return {
success: false,
error: `Row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`,
}
}
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
await db.transaction(async (tx) => {
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
await tx.delete(userTableRows).where(eq(userTableRows.tableId, outputTable))
const now = new Date()
for (let i = 0; i < rows.length; i += BATCH_CHUNK_SIZE) {
if (context.abortSignal?.aborted) {
throw new Error('Request aborted before tool mutation could be applied')
}
const chunk = rows.slice(i, i + BATCH_CHUNK_SIZE)
const values = chunk.map((rowData, j) => ({
id: `row_${crypto.randomUUID().replace(/-/g, '')}`,
tableId: outputTable,
workspaceId: context.workspaceId!,
data: rowData,
position: i + j,
createdAt: now,
updatedAt: now,
createdBy: context.userId,
}))
await tx.insert(userTableRows).values(values)
}
})
logger.withMetadata({ messageId: context.messageId }).info('Read output written to table', {
toolName,
tableId: outputTable,
tableName: table.name,
rowCount: rows.length,
filePath,
})
return {
success: true,
output: {
message: `Imported ${rows.length} rows from "${filePath}" into table "${table.name}"`,
tableId: outputTable,
tableName: table.name,
rowCount: rows.length,
},
}
} catch (err) {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to write read output to table', {
toolName,
outputTable,
error: err instanceof Error ? err.message : String(err),
})
return {
success: false,
error: `Failed to import into table: ${err instanceof Error ? err.message : String(err)}`,
}
}
}
export async function executeToolAndReport(
toolCallId: string,
context: StreamingContext,
execContext: ExecutionContext,
options?: OrchestratorOptions
): Promise<AsyncToolCompletion> {
const toolCall = context.toolCalls.get(toolCallId)
if (!toolCall) return { status: 'error', message: 'Tool call not found' }
if (toolCall.status === 'executing') {
return { status: 'running', message: 'Tool already executing' }
}
if (toolCall.endTime || isTerminalToolCallStatus(toolCall.status)) {
return terminalCompletionFromToolCall(toolCall)
}
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted before tool execution',
}).catch(() => {})
reportCancelledTool(toolCall, 'Request aborted before tool execution', context.messageId)
return cancelledCompletion('Request aborted before tool execution')
}
toolCall.status = 'executing'
await markAsyncToolRunning(toolCall.id, 'sim-stream').catch(() => {})
logger.withMetadata({ messageId: context.messageId }).info('Tool execution started', {
toolCallId: toolCall.id,
toolName: toolCall.name,
params: toolCall.params,
})
try {
let result = await executeToolServerSide(toolCall, execContext)
if (toolCall.endTime || isTerminalToolCallStatus(toolCall.status)) {
return terminalCompletionFromToolCall(toolCall)
}
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted during tool execution',
}).catch(() => {})
reportCancelledTool(toolCall, 'Request aborted during tool execution', context.messageId)
return cancelledCompletion('Request aborted during tool execution')
}
result = await maybeWriteOutputToFile(toolCall.name, toolCall.params, result, execContext)
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted during tool post-processing',
}).catch(() => {})
reportCancelledTool(
toolCall,
'Request aborted during tool post-processing',
context.messageId
)
return cancelledCompletion('Request aborted during tool post-processing')
}
result = await maybeWriteOutputToTable(toolCall.name, toolCall.params, result, execContext)
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted during tool post-processing',
}).catch(() => {})
reportCancelledTool(
toolCall,
'Request aborted during tool post-processing',
context.messageId
)
return cancelledCompletion('Request aborted during tool post-processing')
}
result = await maybeWriteReadCsvToTable(toolCall.name, toolCall.params, result, execContext)
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted during tool post-processing',
}).catch(() => {})
reportCancelledTool(
toolCall,
'Request aborted during tool post-processing',
context.messageId
)
return cancelledCompletion('Request aborted during tool post-processing')
}
toolCall.status = result.success ? 'success' : 'error'
toolCall.result = result
toolCall.error = result.error
toolCall.endTime = Date.now()
if (result.success) {
const raw = result.output
const preview =
typeof raw === 'string'
? raw.slice(0, 200)
: raw && typeof raw === 'object'
? JSON.stringify(raw).slice(0, 200)
: undefined
logger.withMetadata({ messageId: context.messageId }).info('Tool execution succeeded', {
toolCallId: toolCall.id,
toolName: toolCall.name,
outputPreview: preview,
})
} else {
logger.withMetadata({ messageId: context.messageId }).warn('Tool execution failed', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: result.error,
params: toolCall.params,
})
}
// If create_workflow was successful, update the execution context with the new workflowId.
// This ensures subsequent tools in the same stream have access to the workflowId.
const output = asRecord(result.output)
if (
toolCall.name === 'create_workflow' &&
result.success &&
output.workflowId &&
!execContext.workflowId
) {
execContext.workflowId = output.workflowId as string
if (output.workspaceId) {
execContext.workspaceId = output.workspaceId as string
}
}
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: result.success ? 'completed' : 'failed',
result: result.success ? asRecord(result.output) : { error: result.error || 'Tool failed' },
error: result.success ? null : result.error || 'Tool failed',
}).catch(() => {})
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
reportCancelledTool(
toolCall,
'Request aborted before tool result delivery',
context.messageId
)
return cancelledCompletion('Request aborted before tool result delivery')
}
// Fire-and-forget: notify the copilot backend that the tool completed.
// IMPORTANT: We must NOT await this — the server may block on the
// mark-complete handler until it can write back on the SSE stream, but
// the SSE reader (our for-await loop) is paused while we're in this
// handler. Awaiting here would deadlock: sim waits for the server's response,
// the server waits for sim to drain the SSE stream.
markToolComplete(
toolCall.id,
toolCall.name,
result.success ? 200 : 500,
result.error || (result.success ? 'Tool completed' : 'Tool failed'),
result.output,
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
const resultEvent: SSEEvent = {
type: 'tool_result',
toolCallId: toolCall.id,
toolName: toolCall.name,
success: result.success,
result: result.output,
data: {
id: toolCall.id,
name: toolCall.name,
success: result.success,
result: result.output,
},
}
await options?.onEvent?.(resultEvent)
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
return cancelledCompletion('Request aborted before resource persistence')
}
if (result.success && execContext.chatId && !abortRequested(context, execContext, options)) {
let isDeleteOp = false
if (hasDeleteCapability(toolCall.name)) {
const deleted = extractDeletedResourcesFromToolResult(
toolCall.name,
toolCall.params,
result.output
)
if (deleted.length > 0) {
isDeleteOp = true
removeChatResources(execContext.chatId, deleted).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to remove chat resources after deletion', {
chatId: execContext.chatId,
error: err instanceof Error ? err.message : String(err),
})
})
for (const resource of deleted) {
if (abortRequested(context, execContext, options)) break
await options?.onEvent?.({
type: 'resource_deleted',
resource: { type: resource.type, id: resource.id, title: resource.title },
})
}
}
}
if (!isDeleteOp && !abortRequested(context, execContext, options)) {
const resources =
result.resources && result.resources.length > 0
? result.resources
: isResourceToolName(toolCall.name)
? extractResourcesFromToolResult(toolCall.name, toolCall.params, result.output)
: []
if (resources.length > 0) {
persistChatResources(execContext.chatId, resources).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.warn('Failed to persist chat resources', {
chatId: execContext.chatId,
error: err instanceof Error ? err.message : String(err),
})
})
for (const resource of resources) {
if (abortRequested(context, execContext, options)) break
await options?.onEvent?.({
type: 'resource_added',
resource: { type: resource.type, id: resource.id, title: resource.title },
})
}
}
}
}
return {
status: result.success ? 'success' : 'error',
message: result.error || (result.success ? 'Tool completed' : 'Tool failed'),
data: asRecord(result.output),
}
} catch (error) {
if (abortRequested(context, execContext, options)) {
toolCall.status = 'cancelled'
toolCall.endTime = Date.now()
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'cancelled',
result: { cancelled: true },
error: 'Request aborted during tool execution',
}).catch(() => {})
reportCancelledTool(toolCall, 'Request aborted during tool execution', context.messageId)
return cancelledCompletion('Request aborted during tool execution')
}
toolCall.status = 'error'
toolCall.error = error instanceof Error ? error.message : String(error)
toolCall.endTime = Date.now()
logger.withMetadata({ messageId: context.messageId }).error('Tool execution threw', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: toolCall.error,
params: toolCall.params,
})
markToolResultSeen(toolCall.id)
await completeAsyncToolCall({
toolCallId: toolCall.id,
status: 'failed',
result: { error: toolCall.error },
error: toolCall.error,
}).catch(() => {})
// Fire-and-forget (same reasoning as above).
// Pass error as structured data so the Go side can surface it to the LLM.
markToolComplete(
toolCall.id,
toolCall.name,
500,
toolCall.error,
{
error: toolCall.error,
},
context.messageId
).catch((err) => {
logger
.withMetadata({ messageId: context.messageId })
.error('markToolComplete fire-and-forget failed', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
const errorEvent: SSEEvent = {
type: 'tool_error',
state: 'error',
toolCallId: toolCall.id,
data: {
id: toolCall.id,
name: toolCall.name,
error: toolCall.error,
},
}
await options?.onEvent?.(errorEvent)
return {
status: 'error',
message: toolCall.error,
data: { error: toolCall.error },
}
}
}
/**
* Wait for a tool completion signal (success/error/rejected) from the client.
* Ignores intermediate statuses like `accepted` and only returns terminal statuses:
* - success: client finished executing successfully
* - error: client execution failed
* - rejected: user clicked Skip (subagent run tools where user hasn't auto-allowed)
*
* Used for client-executable run tools: the client executes the workflow
* and posts success/error to /api/copilot/confirm when done. The server
* waits here until that completion signal arrives.
*/
export async function waitForToolCompletion(
toolCallId: string,
timeoutMs: number,
abortSignal?: AbortSignal
): Promise<{ status: string; message?: string; data?: Record<string, unknown> } | null> {
const decision = await waitForToolConfirmation(toolCallId, timeoutMs, abortSignal, {
acceptStatus: (status) =>
status === 'success' ||
status === 'error' ||
status === 'rejected' ||
status === 'background' ||
status === 'cancelled' ||
status === 'delivered',
})
if (
decision?.status === 'success' ||
decision?.status === 'error' ||
decision?.status === 'rejected' ||
decision?.status === 'background' ||
decision?.status === 'cancelled' ||
decision?.status === 'delivered'
) {
return decision
}
return null
}

View File

@@ -1,44 +0,0 @@
/**
* @vitest-environment node
*/
import { describe, expect, it } from 'vitest'
import {
markToolResultSeen,
normalizeSseEvent,
shouldSkipToolCallEvent,
shouldSkipToolResultEvent,
} from '@/lib/copilot/orchestrator/sse/utils'
describe('sse-utils', () => {
it.concurrent('normalizes tool fields from string data', () => {
const event = {
type: 'tool_result',
data: JSON.stringify({
id: 'tool_1',
name: 'edit_workflow',
success: true,
result: { ok: true },
}),
}
const normalized = normalizeSseEvent(event as any)
expect(normalized.toolCallId).toBe('tool_1')
expect(normalized.toolName).toBe('edit_workflow')
expect(normalized.success).toBe(true)
expect(normalized.result).toEqual({ ok: true })
})
it.concurrent('dedupes tool_call events', () => {
const event = { type: 'tool_call', data: { id: 'tool_call_1', name: 'plan' } }
expect(shouldSkipToolCallEvent(event as any)).toBe(false)
expect(shouldSkipToolCallEvent(event as any)).toBe(true)
})
it.concurrent('dedupes tool_result events', () => {
const event = { type: 'tool_result', data: { id: 'tool_result_1', name: 'plan' } }
expect(shouldSkipToolResultEvent(event as any)).toBe(false)
markToolResultSeen('tool_result_1')
expect(shouldSkipToolResultEvent(event as any)).toBe(true)
})
})

View File

@@ -1,129 +0,0 @@
import { STREAM_BUFFER_MAX_DEDUP_ENTRIES } from '@/lib/copilot/constants'
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
type EventDataObject = Record<string, unknown> | undefined
/** Safely cast event.data to a record for property access. */
export const asRecord = (data: unknown): Record<string, unknown> =>
(data && typeof data === 'object' && !Array.isArray(data) ? data : {}) as Record<string, unknown>
/**
* In-memory tool event dedupe with bounded size.
*
* NOTE: Process-local only. In a multi-instance setup (e.g., ECS),
* each task maintains its own dedupe cache.
*/
const seenToolCalls = new Set<string>()
const seenToolResults = new Set<string>()
function addToSet(set: Set<string>, id: string): void {
if (set.size >= STREAM_BUFFER_MAX_DEDUP_ENTRIES) {
const first = set.values().next().value
if (first) set.delete(first)
}
set.add(id)
}
const parseEventData = (data: unknown): EventDataObject => {
if (!data) return undefined
if (typeof data !== 'string') {
if (typeof data === 'object' && !Array.isArray(data)) {
return data as EventDataObject
}
return undefined
}
try {
const parsed = JSON.parse(data)
if (typeof parsed === 'object' && parsed !== null && !Array.isArray(parsed)) {
return parsed as EventDataObject
}
return undefined
} catch {
return undefined
}
}
const hasToolFields = (data: EventDataObject): boolean => {
if (!data) return false
return (
data.id !== undefined ||
data.toolCallId !== undefined ||
data.name !== undefined ||
data.success !== undefined ||
data.result !== undefined ||
data.arguments !== undefined
)
}
export const getEventData = (event: SSEEvent): EventDataObject => {
const topLevel = parseEventData(event.data)
if (!topLevel) return undefined
if (hasToolFields(topLevel)) return topLevel
const nested = parseEventData(topLevel.data)
return nested || topLevel
}
function getToolCallIdFromEvent(event: SSEEvent): string | undefined {
const data = getEventData(event)
return (
event.toolCallId || (data?.id as string | undefined) || (data?.toolCallId as string | undefined)
)
}
/** Normalizes SSE events so tool metadata is available at the top level. */
export function normalizeSseEvent(event: SSEEvent): SSEEvent {
if (!event) return event
const data = getEventData(event)
if (!data) return event
const toolCallId =
event.toolCallId || (data.id as string | undefined) || (data.toolCallId as string | undefined)
const toolName =
event.toolName || (data.name as string | undefined) || (data.toolName as string | undefined)
const success = event.success ?? (data.success as boolean | undefined)
const result = event.result ?? data.result
const normalizedData = typeof event.data === 'string' ? data : event.data
return {
...event,
data: normalizedData,
toolCallId,
toolName,
success,
result,
}
}
function markToolCallSeen(toolCallId: string): void {
addToSet(seenToolCalls, toolCallId)
}
function wasToolCallSeen(toolCallId: string): boolean {
return seenToolCalls.has(toolCallId)
}
export function markToolResultSeen(toolCallId: string): void {
addToSet(seenToolResults, toolCallId)
}
export function wasToolResultSeen(toolCallId: string): boolean {
return seenToolResults.has(toolCallId)
}
export function shouldSkipToolCallEvent(event: SSEEvent): boolean {
if (event.type !== 'tool_call') return false
const toolCallId = getToolCallIdFromEvent(event)
if (!toolCallId) return false
const eventData = getEventData(event)
if (eventData?.partial === true) return false
if (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) {
return true
}
markToolCallSeen(toolCallId)
return false
}
export function shouldSkipToolResultEvent(event: SSEEvent): boolean {
if (event.type !== 'tool_result') return false
const toolCallId = getToolCallIdFromEvent(event)
if (!toolCallId) return false
return wasToolResultSeen(toolCallId)
}

View File

@@ -1,119 +0,0 @@
/**
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@sim/logger', () => loggerMock)
type StoredEntry = { score: number; value: string }
const createRedisStub = () => {
const events = new Map<string, StoredEntry[]>()
const counters = new Map<string, number>()
const readEntries = (key: string, min: number, max: number) => {
const list = events.get(key) || []
return list
.filter((entry) => entry.score >= min && entry.score <= max)
.sort((a, b) => a.score - b.score)
.map((entry) => entry.value)
}
return {
del: vi.fn().mockResolvedValue(1),
hset: vi.fn().mockResolvedValue(1),
hgetall: vi.fn().mockResolvedValue({}),
expire: vi.fn().mockResolvedValue(1),
eval: vi
.fn()
.mockImplementation(
(
_lua: string,
_keysCount: number,
seqKey: string,
eventsKey: string,
_ttl: number,
_limit: number,
streamId: string,
eventJson: string
) => {
const current = counters.get(seqKey) || 0
const next = current + 1
counters.set(seqKey, next)
const entry = JSON.stringify({ eventId: next, streamId, event: JSON.parse(eventJson) })
const list = events.get(eventsKey) || []
list.push({ score: next, value: entry })
events.set(eventsKey, list)
return next
}
),
incrby: vi.fn().mockImplementation((key: string, amount: number) => {
const current = counters.get(key) || 0
const next = current + amount
counters.set(key, next)
return next
}),
zrangebyscore: vi.fn().mockImplementation((key: string, min: string, max: string) => {
const minVal = Number(min)
const maxVal = max === '+inf' ? Number.POSITIVE_INFINITY : Number(max)
return Promise.resolve(readEntries(key, minVal, maxVal))
}),
pipeline: vi.fn().mockImplementation(() => {
const api: Record<string, any> = {}
api.zadd = vi.fn().mockImplementation((key: string, ...args: Array<string | number>) => {
const list = events.get(key) || []
for (let i = 0; i < args.length; i += 2) {
list.push({ score: Number(args[i]), value: String(args[i + 1]) })
}
events.set(key, list)
return api
})
api.expire = vi.fn().mockReturnValue(api)
api.zremrangebyrank = vi.fn().mockReturnValue(api)
api.exec = vi.fn().mockResolvedValue([])
return api
}),
}
}
let mockRedis: ReturnType<typeof createRedisStub>
vi.mock('@/lib/core/config/redis', () => ({
getRedisClient: () => mockRedis,
}))
import {
appendStreamEvent,
createStreamEventWriter,
readStreamEvents,
} from '@/lib/copilot/orchestrator/stream/buffer'
describe('stream-buffer', () => {
beforeEach(() => {
mockRedis = createRedisStub()
vi.clearAllMocks()
})
it.concurrent('replays events after a given event id', async () => {
await appendStreamEvent('stream-1', { type: 'content', data: 'hello' })
await appendStreamEvent('stream-1', { type: 'content', data: 'world' })
const allEvents = await readStreamEvents('stream-1', 0)
expect(allEvents.map((entry) => entry.event.data)).toEqual(['hello', 'world'])
const replayed = await readStreamEvents('stream-1', 1)
expect(replayed.map((entry) => entry.event.data)).toEqual(['world'])
})
it.concurrent('flushes buffered events for resume', async () => {
const writer = createStreamEventWriter('stream-2')
await writer.write({ type: 'content', data: 'a' })
await writer.write({ type: 'content', data: 'b' })
await writer.flush()
const events = await readStreamEvents('stream-2', 0)
expect(events.map((entry) => entry.event.data)).toEqual(['a', 'b'])
})
})

View File

@@ -1,322 +0,0 @@
import { createLogger } from '@sim/logger'
import { REDIS_COPILOT_STREAM_PREFIX } from '@/lib/copilot/constants'
import { env } from '@/lib/core/config/env'
import { getRedisClient } from '@/lib/core/config/redis'
const logger = createLogger('CopilotStreamBuffer')
const STREAM_DEFAULTS = {
ttlSeconds: 60 * 60,
eventLimit: 5000,
reserveBatch: 200,
flushIntervalMs: 15,
flushMaxBatch: 200,
}
export type StreamBufferConfig = {
ttlSeconds: number
eventLimit: number
reserveBatch: number
flushIntervalMs: number
flushMaxBatch: number
}
const parseNumber = (value: number | string | undefined, fallback: number): number => {
if (typeof value === 'number' && Number.isFinite(value)) return value
const parsed = Number(value)
return Number.isFinite(parsed) ? parsed : fallback
}
export function getStreamBufferConfig(): StreamBufferConfig {
return {
ttlSeconds: parseNumber(env.COPILOT_STREAM_TTL_SECONDS, STREAM_DEFAULTS.ttlSeconds),
eventLimit: parseNumber(env.COPILOT_STREAM_EVENT_LIMIT, STREAM_DEFAULTS.eventLimit),
reserveBatch: parseNumber(env.COPILOT_STREAM_RESERVE_BATCH, STREAM_DEFAULTS.reserveBatch),
flushIntervalMs: parseNumber(
env.COPILOT_STREAM_FLUSH_INTERVAL_MS,
STREAM_DEFAULTS.flushIntervalMs
),
flushMaxBatch: parseNumber(env.COPILOT_STREAM_FLUSH_MAX_BATCH, STREAM_DEFAULTS.flushMaxBatch),
}
}
const APPEND_STREAM_EVENT_LUA = `
local seqKey = KEYS[1]
local eventsKey = KEYS[2]
local ttl = tonumber(ARGV[1])
local limit = tonumber(ARGV[2])
local streamId = ARGV[3]
local eventJson = ARGV[4]
local id = redis.call('INCR', seqKey)
local entry = '{"eventId":' .. id .. ',"streamId":' .. cjson.encode(streamId) .. ',"event":' .. eventJson .. '}'
redis.call('ZADD', eventsKey, id, entry)
redis.call('EXPIRE', eventsKey, ttl)
redis.call('EXPIRE', seqKey, ttl)
if limit > 0 then
redis.call('ZREMRANGEBYRANK', eventsKey, 0, -limit-1)
end
return id
`
function getStreamKeyPrefix(streamId: string) {
return `${REDIS_COPILOT_STREAM_PREFIX}${streamId}`
}
function getEventsKey(streamId: string) {
return `${getStreamKeyPrefix(streamId)}:events`
}
function getSeqKey(streamId: string) {
return `${getStreamKeyPrefix(streamId)}:seq`
}
function getMetaKey(streamId: string) {
return `${getStreamKeyPrefix(streamId)}:meta`
}
export type StreamStatus = 'active' | 'complete' | 'cancelled' | 'error'
export type StreamMeta = {
status: StreamStatus
userId?: string
executionId?: string
runId?: string
updatedAt?: string
error?: string
}
export type StreamEventEntry = {
eventId: number
streamId: string
event: Record<string, unknown>
}
export type StreamEventWriter = {
write: (event: Record<string, unknown>) => Promise<StreamEventEntry>
flush: () => Promise<void>
close: () => Promise<void>
}
export async function resetStreamBuffer(streamId: string): Promise<void> {
const redis = getRedisClient()
if (!redis) return
try {
await redis.del(getEventsKey(streamId), getSeqKey(streamId), getMetaKey(streamId))
} catch (error) {
logger.warn('Failed to reset stream buffer', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
}
export async function setStreamMeta(streamId: string, meta: StreamMeta): Promise<void> {
const redis = getRedisClient()
if (!redis) return
try {
const config = getStreamBufferConfig()
const payload: Record<string, string> = {
status: meta.status,
updatedAt: meta.updatedAt || new Date().toISOString(),
}
if (meta.userId) payload.userId = meta.userId
if (meta.executionId) payload.executionId = meta.executionId
if (meta.runId) payload.runId = meta.runId
if (meta.error) payload.error = meta.error
await redis.hset(getMetaKey(streamId), payload)
await redis.expire(getMetaKey(streamId), config.ttlSeconds)
} catch (error) {
logger.warn('Failed to update stream meta', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
}
export async function getStreamMeta(streamId: string): Promise<StreamMeta | null> {
const redis = getRedisClient()
if (!redis) return null
try {
const meta = await redis.hgetall(getMetaKey(streamId))
if (!meta || Object.keys(meta).length === 0) return null
return meta as StreamMeta
} catch (error) {
logger.warn('Failed to read stream meta', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
return null
}
}
export async function appendStreamEvent(
streamId: string,
event: Record<string, unknown>
): Promise<StreamEventEntry> {
const redis = getRedisClient()
if (!redis) {
return { eventId: 0, streamId, event }
}
try {
const config = getStreamBufferConfig()
const eventJson = JSON.stringify(event)
const nextId = await redis.eval(
APPEND_STREAM_EVENT_LUA,
2,
getSeqKey(streamId),
getEventsKey(streamId),
config.ttlSeconds,
config.eventLimit,
streamId,
eventJson
)
const eventId = typeof nextId === 'number' ? nextId : Number(nextId)
return { eventId, streamId, event }
} catch (error) {
logger.warn('Failed to append stream event', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
return { eventId: 0, streamId, event }
}
}
export function createStreamEventWriter(streamId: string): StreamEventWriter {
const redis = getRedisClient()
if (!redis) {
return {
write: async (event) => ({ eventId: 0, streamId, event }),
flush: async () => {},
close: async () => {},
}
}
const config = getStreamBufferConfig()
let pending: StreamEventEntry[] = []
let nextEventId = 0
let maxReservedId = 0
let flushTimer: ReturnType<typeof setTimeout> | null = null
const scheduleFlush = () => {
if (flushTimer) return
flushTimer = setTimeout(() => {
flushTimer = null
void flush()
}, config.flushIntervalMs)
}
const reserveIds = async (minCount: number) => {
const reserveCount = Math.max(config.reserveBatch, minCount)
const newMax = await redis.incrby(getSeqKey(streamId), reserveCount)
const startId = newMax - reserveCount + 1
if (nextEventId === 0 || nextEventId > maxReservedId) {
nextEventId = startId
maxReservedId = newMax
}
}
let flushPromise: Promise<void> | null = null
let closed = false
const doFlush = async () => {
if (pending.length === 0) return
const batch = pending
pending = []
try {
const key = getEventsKey(streamId)
const zaddArgs: (string | number)[] = []
for (const entry of batch) {
zaddArgs.push(entry.eventId, JSON.stringify(entry))
}
const pipeline = redis.pipeline()
pipeline.zadd(key, ...(zaddArgs as [number, string]))
pipeline.expire(key, config.ttlSeconds)
pipeline.expire(getSeqKey(streamId), config.ttlSeconds)
pipeline.zremrangebyrank(key, 0, -config.eventLimit - 1)
await pipeline.exec()
} catch (error) {
logger.warn('Failed to flush stream events', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
pending = batch.concat(pending)
if (pending.length > config.eventLimit) {
const dropped = pending.length - config.eventLimit
pending = pending.slice(-config.eventLimit)
logger.warn('Dropped oldest pending stream events due to sustained Redis failure', {
streamId,
dropped,
remaining: pending.length,
})
}
}
}
const flush = async () => {
if (flushPromise) {
await flushPromise
return
}
flushPromise = doFlush()
try {
await flushPromise
} finally {
flushPromise = null
if (pending.length > 0) scheduleFlush()
}
}
const write = async (event: Record<string, unknown>) => {
if (closed) return { eventId: 0, streamId, event }
if (nextEventId === 0 || nextEventId > maxReservedId) {
await reserveIds(1)
}
const eventId = nextEventId++
const entry: StreamEventEntry = { eventId, streamId, event }
pending.push(entry)
if (pending.length >= config.flushMaxBatch) {
await flush()
} else {
scheduleFlush()
}
return entry
}
const close = async () => {
closed = true
if (flushTimer) {
clearTimeout(flushTimer)
flushTimer = null
}
await flush()
}
return { write, flush, close }
}
export async function readStreamEvents(
streamId: string,
afterEventId: number
): Promise<StreamEventEntry[]> {
const redis = getRedisClient()
if (!redis) return []
try {
const raw = await redis.zrangebyscore(getEventsKey(streamId), afterEventId + 1, '+inf')
return raw
.map((entry) => {
try {
return JSON.parse(entry) as StreamEventEntry
} catch {
return null
}
})
.filter((entry): entry is StreamEventEntry => Boolean(entry))
} catch (error) {
logger.warn('Failed to read stream events', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
return []
}
}

View File

@@ -1,264 +0,0 @@
import { createLogger } from '@sim/logger'
import { getHighestPrioritySubscription } from '@/lib/billing/core/plan'
import { isPaid } from '@/lib/billing/plan-helpers'
import { ORCHESTRATION_TIMEOUT_MS } from '@/lib/copilot/constants'
import {
handleSubagentRouting,
sseHandlers,
subAgentHandlers,
} from '@/lib/copilot/orchestrator/sse/handlers'
import { parseSSEStream } from '@/lib/copilot/orchestrator/sse/parser'
import {
normalizeSseEvent,
shouldSkipToolCallEvent,
shouldSkipToolResultEvent,
} from '@/lib/copilot/orchestrator/sse/utils'
import type {
ExecutionContext,
OrchestratorOptions,
SSEEvent,
StreamingContext,
ToolCallSummary,
} from '@/lib/copilot/orchestrator/types'
const logger = createLogger('CopilotStreamCore')
/**
* Options for the shared stream processing loop.
*/
export interface StreamLoopOptions extends OrchestratorOptions {
/**
* Called for each normalized event BEFORE standard handler dispatch.
* Return true to skip the default handler for this event.
*/
onBeforeDispatch?: (event: SSEEvent, context: StreamingContext) => boolean | undefined
}
/**
* Create a fresh StreamingContext.
*/
export function createStreamingContext(overrides?: Partial<StreamingContext>): StreamingContext {
return {
chatId: undefined,
executionId: undefined,
runId: undefined,
messageId: crypto.randomUUID(),
accumulatedContent: '',
contentBlocks: [],
toolCalls: new Map(),
pendingToolPromises: new Map(),
currentThinkingBlock: null,
isInThinkingBlock: false,
subAgentParentToolCallId: undefined,
subAgentParentStack: [],
subAgentContent: {},
subAgentToolCalls: {},
pendingContent: '',
streamComplete: false,
wasAborted: false,
errors: [],
...overrides,
}
}
/**
* Run the SSE stream processing loop.
*
* Handles: fetch -> parse -> normalize -> dedupe -> subagent routing -> handler dispatch.
* Callers provide the fetch URL/options and can intercept events via onBeforeDispatch.
*/
export async function runStreamLoop(
fetchUrl: string,
fetchOptions: RequestInit,
context: StreamingContext,
execContext: ExecutionContext,
options: StreamLoopOptions
): Promise<void> {
const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options
const response = await fetch(fetchUrl, {
...fetchOptions,
signal: abortSignal,
})
if (!response.ok) {
const errorText = await response.text().catch(() => '')
if (response.status === 402) {
let action = 'upgrade_plan'
let message = "You've reached your usage limit. Please upgrade your plan to continue."
try {
const sub = await getHighestPrioritySubscription(execContext.userId)
if (sub && isPaid(sub.plan)) {
action = 'increase_limit'
message =
"You've reached your usage limit for this billing period. Please increase your usage limit to continue."
}
} catch {
// Fall back to upgrade_plan if we can't determine the plan
}
const upgradePayload = JSON.stringify({
reason: 'usage_limit',
action,
message,
})
const syntheticContent = `<usage_upgrade>${upgradePayload}</usage_upgrade>`
const syntheticEvents: SSEEvent[] = [
{ type: 'content', data: syntheticContent as unknown as Record<string, unknown> },
{ type: 'done', data: {} },
]
for (const event of syntheticEvents) {
try {
await options.onEvent?.(event)
} catch {
// best-effort forwarding
}
const handler = sseHandlers[event.type]
if (handler) {
await handler(event, context, execContext, options)
}
if (context.streamComplete) break
}
return
}
throw new Error(
`Copilot backend error (${response.status}): ${errorText || response.statusText}`
)
}
if (!response.body) {
throw new Error('Copilot backend response missing body')
}
const reader = response.body.getReader()
const decoder = new TextDecoder()
const timeoutId = setTimeout(() => {
context.errors.push('Request timed out')
context.streamComplete = true
reader.cancel().catch(() => {})
}, timeout)
try {
for await (const event of parseSSEStream(reader, decoder, abortSignal)) {
if (abortSignal?.aborted) {
context.wasAborted = true
await reader.cancel().catch(() => {})
break
}
const normalizedEvent = normalizeSseEvent(event)
// Skip duplicate tool events — both forwarding AND handler dispatch.
const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent)
const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent)
if (shouldSkipToolCall || shouldSkipToolResult) {
continue
}
try {
await options.onEvent?.(normalizedEvent)
} catch (error) {
logger.withMetadata({ messageId: context.messageId }).warn('Failed to forward SSE event', {
type: normalizedEvent.type,
error: error instanceof Error ? error.message : String(error),
})
}
// Let the caller intercept before standard dispatch.
if (options.onBeforeDispatch?.(normalizedEvent, context)) {
if (context.streamComplete) break
continue
}
// Standard subagent start/end handling (stack-based for nested agents).
if (normalizedEvent.type === 'subagent_start') {
const eventData = normalizedEvent.data as Record<string, unknown> | undefined
const toolCallId = eventData?.tool_call_id as string | undefined
const subagentName = normalizedEvent.subagent || (eventData?.agent as string | undefined)
if (toolCallId) {
context.subAgentParentStack.push(toolCallId)
context.subAgentParentToolCallId = toolCallId
context.subAgentContent[toolCallId] = ''
context.subAgentToolCalls[toolCallId] = []
}
if (subagentName) {
context.contentBlocks.push({
type: 'subagent',
content: subagentName,
timestamp: Date.now(),
})
}
continue
}
if (normalizedEvent.type === 'subagent_end') {
if (context.subAgentParentStack.length > 0) {
context.subAgentParentStack.pop()
} else {
logger
.withMetadata({ messageId: context.messageId })
.warn('subagent_end without matching subagent_start')
}
context.subAgentParentToolCallId =
context.subAgentParentStack.length > 0
? context.subAgentParentStack[context.subAgentParentStack.length - 1]
: undefined
continue
}
// Subagent event routing.
if (handleSubagentRouting(normalizedEvent, context)) {
const handler = subAgentHandlers[normalizedEvent.type]
if (handler) {
await handler(normalizedEvent, context, execContext, options)
}
if (context.streamComplete) break
continue
}
// Main event handler dispatch.
const handler = sseHandlers[normalizedEvent.type]
if (handler) {
await handler(normalizedEvent, context, execContext, options)
}
if (context.streamComplete) break
}
} finally {
if (abortSignal?.aborted) {
context.wasAborted = true
await reader.cancel().catch(() => {})
}
clearTimeout(timeoutId)
}
}
/**
* Build a ToolCallSummary array from the streaming context.
*/
export function buildToolCallSummaries(context: StreamingContext): ToolCallSummary[] {
return Array.from(context.toolCalls.values()).map((toolCall) => {
let status = toolCall.status
if (toolCall.result && toolCall.result.success !== undefined) {
status = toolCall.result.success ? 'success' : 'error'
} else if ((status === 'pending' || status === 'executing') && toolCall.error) {
status = 'error'
}
return {
id: toolCall.id,
name: toolCall.name,
status,
params: toolCall.params,
result: toolCall.result?.output,
error: toolCall.error,
durationMs:
toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined,
}
})
}

View File

@@ -1,2 +0,0 @@
export * from './deploy'
export * from './manage'

File diff suppressed because it is too large Load Diff

View File

@@ -1,285 +0,0 @@
import { db } from '@sim/db'
import { account } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import type {
ExecutionContext,
ToolCallResult,
ToolCallState,
} from '@/lib/copilot/orchestrator/types'
import { isHosted } from '@/lib/core/config/feature-flags'
import { generateRequestId } from '@/lib/core/utils/request'
import { getCredentialActorContext } from '@/lib/credentials/access'
import { getAccessibleOAuthCredentials } from '@/lib/credentials/environment'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { getServiceAccountProviderForProviderId } from '@/lib/oauth/utils'
import { getTableById, queryRows } from '@/lib/table/service'
import {
downloadWorkspaceFile,
findWorkspaceFileRecord,
getSandboxWorkspaceFilePath,
listWorkspaceFiles,
} from '@/lib/uploads/contexts/workspace/workspace-file-manager'
import { getWorkflowById } from '@/lib/workflows/utils'
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
import { resolveEnvVarReferences } from '@/executor/utils/reference-validation'
import { executeTool } from '@/tools'
import type { ToolConfig } from '@/tools/types'
import { resolveToolId } from '@/tools/utils'
const logger = createLogger('CopilotIntegrationTools')
function csvEscapeValue(value: unknown): string {
if (value === null || value === undefined) return ''
if (typeof value === 'number' || typeof value === 'boolean') return String(value)
const str = String(value)
if (str.includes(',') || str.includes('"') || str.includes('\n') || str.includes('\r')) {
return `"${str.replace(/"/g, '""')}"`
}
return str
}
export async function executeIntegrationToolDirect(
toolCall: ToolCallState,
toolConfig: ToolConfig,
context: ExecutionContext
): Promise<ToolCallResult> {
const { userId, workflowId } = context
const toolName = resolveToolId(toolCall.name)
const toolArgs = toolCall.params || {}
let workspaceId = context.workspaceId
if (!workspaceId && workflowId) {
const wf = await getWorkflowById(workflowId)
workspaceId = wf?.workspaceId ?? undefined
}
const decryptedEnvVars =
context.decryptedEnvVars || (await getEffectiveDecryptedEnv(userId, workspaceId))
const executionParams = resolveEnvVarReferences(toolArgs, decryptedEnvVars, {
deep: true,
}) as Record<string, unknown>
// If the LLM passed a credential/oauthCredential ID directly, verify the user
// has active credential_member access before proceeding. This prevents
// unauthorized credential usage even if the agent hallucinated or received
// a credential ID the user doesn't have access to.
const suppliedCredentialId = (executionParams.credentialId ||
executionParams.oauthCredential ||
executionParams.credential) as string | undefined
if (suppliedCredentialId) {
const actorCtx = await getCredentialActorContext(suppliedCredentialId, userId)
if (!actorCtx.member) {
logger.warn('Blocked credential use: user lacks credential_member access', {
credentialId: suppliedCredentialId,
userId,
toolName,
})
return {
success: false,
error: `You do not have access to credential "${suppliedCredentialId}". Ask the credential admin to add you as a member, or connect your own account.`,
}
}
}
if (toolConfig.oauth?.required && toolConfig.oauth.provider) {
const provider = toolConfig.oauth.provider
// Determine which credential to use: supplied by the LLM or auto-resolved
let resolvedCredentialId = suppliedCredentialId
if (!resolvedCredentialId) {
if (!workspaceId) {
return {
success: false,
error: `Cannot resolve ${provider} credential without a workspace context.`,
}
}
const accessibleCreds = await getAccessibleOAuthCredentials(workspaceId, userId)
const saProviderId = getServiceAccountProviderForProviderId(provider)
const match =
accessibleCreds.find((c) => c.providerId === provider) ||
(saProviderId ? accessibleCreds.find((c) => c.providerId === saProviderId) : undefined)
if (!match) {
return {
success: false,
error: `No accessible ${provider} account found. You either don't have a ${provider} account connected in this workspace, or you don't have access to the existing one. Please connect your own account.`,
}
}
resolvedCredentialId = match.id
}
const matchCtx = await getCredentialActorContext(resolvedCredentialId, userId)
if (matchCtx.credential?.type === 'service_account') {
executionParams.oauthCredential = resolvedCredentialId
} else {
const accountId = matchCtx.credential?.accountId
if (!accountId) {
return {
success: false,
error: `OAuth account for ${provider} not found. Please reconnect your account.`,
}
}
const [acc] = await db.select().from(account).where(eq(account.id, accountId)).limit(1)
if (!acc) {
return {
success: false,
error: `OAuth account for ${provider} not found. Please reconnect your account.`,
}
}
const requestId = generateRequestId()
const { accessToken } = await refreshTokenIfNeeded(requestId, acc, acc.id)
if (!accessToken) {
return {
success: false,
error: `OAuth token not available for ${provider}. Please reconnect your account.`,
}
}
executionParams.accessToken = accessToken
}
}
const hasHostedKeySupport = isHosted && !!toolConfig.hosting
if (toolConfig.params?.apiKey?.required && !executionParams.apiKey && !hasHostedKeySupport) {
return {
success: false,
error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`,
}
}
executionParams._context = {
workflowId,
workspaceId,
userId,
enforceCredentialAccess: true,
}
if (toolName === 'function_execute') {
executionParams.envVars = decryptedEnvVars
executionParams.workflowVariables = {}
executionParams.blockData = {}
executionParams.blockNameMapping = {}
executionParams.language = executionParams.language || 'javascript'
executionParams.timeout = executionParams.timeout || 30000
if (isHosted && workspaceId) {
const sandboxFiles: Array<{ path: string; content: string }> = []
const MAX_FILE_SIZE = 10 * 1024 * 1024
const MAX_TOTAL_SIZE = 50 * 1024 * 1024
const TEXT_EXTENSIONS = new Set([
'csv',
'json',
'txt',
'md',
'html',
'xml',
'tsv',
'yaml',
'yml',
])
let totalSize = 0
const inputFileIds = executionParams.inputFiles as string[] | undefined
if (inputFileIds?.length) {
const allFiles = await listWorkspaceFiles(workspaceId)
for (const fileRef of inputFileIds) {
const record = findWorkspaceFileRecord(allFiles, fileRef)
if (!record) {
logger.warn('Sandbox input file not found', { fileRef })
continue
}
const ext = record.name.split('.').pop()?.toLowerCase() ?? ''
if (!TEXT_EXTENSIONS.has(ext)) {
logger.warn('Skipping non-text sandbox input file', {
fileId: record.id,
fileName: record.name,
ext,
})
continue
}
if (record.size > MAX_FILE_SIZE) {
logger.warn('Sandbox input file exceeds size limit', {
fileId: record.id,
fileName: record.name,
size: record.size,
})
continue
}
if (totalSize + record.size > MAX_TOTAL_SIZE) {
logger.warn('Sandbox input total size limit reached, skipping remaining files')
break
}
const buffer = await downloadWorkspaceFile(record)
totalSize += buffer.length
const textContent = buffer.toString('utf-8')
sandboxFiles.push({
path: getSandboxWorkspaceFilePath(record),
content: textContent,
})
sandboxFiles.push({
path: `/home/user/${record.name}`,
content: textContent,
})
}
}
const inputTableIds = executionParams.inputTables as string[] | undefined
if (inputTableIds?.length) {
for (const tableId of inputTableIds) {
const table = await getTableById(tableId)
if (!table) {
logger.warn('Sandbox input table not found', { tableId })
continue
}
const { rows } = await queryRows(tableId, workspaceId, { limit: 10000 }, 'sandbox-input')
const schema = table.schema as { columns: Array<{ name: string; type?: string }> }
const cols = schema.columns.map((c) => c.name)
const typeComment = `# types: ${schema.columns.map((c) => `${c.name}=${c.type || 'string'}`).join(', ')}`
const csvLines = [typeComment, cols.join(',')]
for (const row of rows) {
csvLines.push(
cols.map((c) => csvEscapeValue((row.data as Record<string, unknown>)[c])).join(',')
)
}
const csvContent = csvLines.join('\n')
if (totalSize + csvContent.length > MAX_TOTAL_SIZE) {
logger.warn('Sandbox input total size limit reached, skipping remaining tables')
break
}
totalSize += csvContent.length
sandboxFiles.push({ path: `/home/user/tables/${tableId}.csv`, content: csvContent })
}
}
if (sandboxFiles.length > 0) {
executionParams._sandboxFiles = sandboxFiles
logger.info('Prepared sandbox input files', {
fileCount: sandboxFiles.length,
totalSize,
paths: sandboxFiles.map((f) => f.path),
})
}
executionParams.inputFiles = undefined
executionParams.inputTables = undefined
}
}
const result = await executeTool(toolName, executionParams)
return {
success: result.success,
output: result.output,
error: result.error,
}
}

Some files were not shown because too many files have changed in this diff Show More