Compare commits

..

1 Commits

Author SHA1 Message Date
waleed
031866e07c fix(copilot): persist thinking blocks on page refresh via sendBeacon
- Use navigator.sendBeacon in beforeunload handler to reliably persist
  in-progress messages (including thinking blocks) during page teardown
- Flush batched streaming updates before beacon persistence
- Fall back to sendBeacon in abortMessage when page is unloading
- Fix double-digit ordered list clipping in thinking block (pl-6 → pl-8)
2026-02-10 22:46:39 -08:00
87 changed files with 2415 additions and 8154 deletions

View File

@@ -41,6 +41,9 @@ Diese Tastenkombinationen wechseln zwischen den Panel-Tabs auf der rechten Seite
| Tastenkombination | Aktion | | Tastenkombination | Aktion |
|----------|--------| |----------|--------|
| `C` | Copilot-Tab fokussieren |
| `T` | Toolbar-Tab fokussieren |
| `E` | Editor-Tab fokussieren |
| `Mod` + `F` | Toolbar-Suche fokussieren | | `Mod` + `F` | Toolbar-Suche fokussieren |
## Globale Navigation ## Globale Navigation

View File

@@ -43,6 +43,9 @@ These shortcuts switch between panel tabs on the right side of the canvas.
| Shortcut | Action | | Shortcut | Action |
|----------|--------| |----------|--------|
| `C` | Focus Copilot tab |
| `T` | Focus Toolbar tab |
| `E` | Focus Editor tab |
| `Mod` + `F` | Focus Toolbar search | | `Mod` + `F` | Focus Toolbar search |
## Global Navigation ## Global Navigation

View File

@@ -399,28 +399,6 @@ Create a new custom property (metadata) on a Confluence page.
| ↳ `authorId` | string | Account ID of the version author | | ↳ `authorId` | string | Account ID of the version author |
| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | | ↳ `createdAt` | string | ISO 8601 timestamp of version creation |
### `confluence_delete_page_property`
Delete a content property from a Confluence page by its property ID.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `pageId` | string | Yes | The ID of the page containing the property |
| `propertyId` | string | Yes | The ID of the property to delete |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `pageId` | string | ID of the page |
| `propertyId` | string | ID of the deleted property |
| `deleted` | boolean | Deletion status |
### `confluence_search` ### `confluence_search`
Search for content across Confluence pages, blog posts, and other content. Search for content across Confluence pages, blog posts, and other content.
@@ -894,90 +872,6 @@ Add a label to a Confluence page for organization and categorization.
| `labelName` | string | Name of the added label | | `labelName` | string | Name of the added label |
| `labelId` | string | ID of the added label | | `labelId` | string | ID of the added label |
### `confluence_delete_label`
Remove a label from a Confluence page.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `pageId` | string | Yes | Confluence page ID to remove the label from |
| `labelName` | string | Yes | Name of the label to remove |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `pageId` | string | Page ID the label was removed from |
| `labelName` | string | Name of the removed label |
| `deleted` | boolean | Deletion status |
### `confluence_get_pages_by_label`
Retrieve all pages that have a specific label applied.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `labelId` | string | Yes | The ID of the label to get pages for |
| `limit` | number | No | Maximum number of pages to return \(default: 50, max: 250\) |
| `cursor` | string | No | Pagination cursor from previous response |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `labelId` | string | ID of the label |
| `pages` | array | Array of pages with this label |
| ↳ `id` | string | Unique page identifier |
| ↳ `title` | string | Page title |
| ↳ `status` | string | Page status \(e.g., current, archived, trashed, draft\) |
| ↳ `spaceId` | string | ID of the space containing the page |
| ↳ `parentId` | string | ID of the parent page \(null if top-level\) |
| ↳ `authorId` | string | Account ID of the page author |
| ↳ `createdAt` | string | ISO 8601 timestamp when the page was created |
| ↳ `version` | object | Page version information |
| ↳ `number` | number | Version number |
| ↳ `message` | string | Version message |
| ↳ `minorEdit` | boolean | Whether this is a minor edit |
| ↳ `authorId` | string | Account ID of the version author |
| ↳ `createdAt` | string | ISO 8601 timestamp of version creation |
| `nextCursor` | string | Cursor for fetching the next page of results |
### `confluence_list_space_labels`
List all labels associated with a Confluence space.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `spaceId` | string | Yes | The ID of the Confluence space to list labels from |
| `limit` | number | No | Maximum number of labels to return \(default: 25, max: 250\) |
| `cursor` | string | No | Pagination cursor from previous response |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `spaceId` | string | ID of the space |
| `labels` | array | Array of labels on the space |
| ↳ `id` | string | Unique label identifier |
| ↳ `name` | string | Label name |
| ↳ `prefix` | string | Label prefix/type \(e.g., global, my, team\) |
| `nextCursor` | string | Cursor for fetching the next page of results |
### `confluence_get_space` ### `confluence_get_space`
Get details about a specific Confluence space. Get details about a specific Confluence space.

View File

@@ -42,6 +42,9 @@ Estos atajos cambian entre las pestañas del panel en el lado derecho del lienzo
| Atajo | Acción | | Atajo | Acción |
|----------|--------| |----------|--------|
| `C` | Enfocar pestaña Copilot |
| `T` | Enfocar pestaña Barra de herramientas |
| `E` | Enfocar pestaña Editor |
| `Mod` + `F` | Enfocar búsqueda de Barra de herramientas | | `Mod` + `F` | Enfocar búsqueda de Barra de herramientas |
## Navegación global ## Navegación global

View File

@@ -42,6 +42,9 @@ Ces raccourcis permettent de basculer entre les onglets du panneau sur le côté
| Raccourci | Action | | Raccourci | Action |
|----------|--------| |----------|--------|
| `C` | Activer l'onglet Copilot |
| `T` | Activer l'onglet Barre d'outils |
| `E` | Activer l'onglet Éditeur |
| `Mod` + `F` | Activer la recherche dans la barre d'outils | | `Mod` + `F` | Activer la recherche dans la barre d'outils |
## Navigation globale ## Navigation globale

View File

@@ -41,6 +41,9 @@ import { Callout } from 'fumadocs-ui/components/callout'
| ショートカット | 操作 | | ショートカット | 操作 |
|----------|--------| |----------|--------|
| `C` | Copilotタブにフォーカス |
| `T` | Toolbarタブにフォーカス |
| `E` | Editorタブにフォーカス |
| `Mod` + `F` | Toolbar検索にフォーカス | | `Mod` + `F` | Toolbar検索にフォーカス |
## グローバルナビゲーション ## グローバルナビゲーション

View File

@@ -41,6 +41,9 @@ import { Callout } from 'fumadocs-ui/components/callout'
| 快捷键 | 操作 | | 快捷键 | 操作 |
|----------|--------| |----------|--------|
| `C` | 聚焦 Copilot 标签页 |
| `T` | 聚焦 Toolbar 标签页 |
| `E` | 聚焦 Editor 标签页 |
| `Mod` + `F` | 聚焦 Toolbar 搜索 | | `Mod` + `F` | 聚焦 Toolbar 搜索 |
## 全局导航 ## 全局导航

View File

@@ -1,81 +1,145 @@
import { db } from '@sim/db'
import { settings } from '@sim/db/schema'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { getSession } from '@/lib/auth'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotAutoAllowedToolsAPI') const logger = createLogger('CopilotAutoAllowedToolsAPI')
function copilotHeaders(): HeadersInit { /**
const headers: Record<string, string> = { * GET - Fetch user's auto-allowed integration tools
'Content-Type': 'application/json', */
} export async function GET() {
if (env.COPILOT_API_KEY) { try {
headers['x-api-key'] = env.COPILOT_API_KEY const session = await getSession()
}
return headers
}
export async function DELETE(request: NextRequest) { if (!session?.user?.id) {
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} }
const toolIdFromQuery = new URL(request.url).searchParams.get('toolId') || undefined const userId = session.user.id
const toolIdFromBody = await request
.json() const [userSettings] = await db
.then((body) => (typeof body?.toolId === 'string' ? body.toolId : undefined)) .select()
.catch(() => undefined) .from(settings)
const toolId = toolIdFromBody || toolIdFromQuery .where(eq(settings.userId, userId))
if (!toolId) { .limit(1)
return NextResponse.json({ error: 'toolId is required' }, { status: 400 })
if (userSettings) {
const autoAllowedTools = (userSettings.copilotAutoAllowedTools as string[]) || []
return NextResponse.json({ autoAllowedTools })
} }
try { await db.insert(settings).values({
const res = await fetch(`${SIM_AGENT_API_URL}/api/tool-preferences/auto-allowed`, { id: userId,
method: 'DELETE',
headers: copilotHeaders(),
body: JSON.stringify({
userId, userId,
toolId, copilotAutoAllowedTools: [],
}),
}) })
const payload = await res.json().catch(() => ({})) return NextResponse.json({ autoAllowedTools: [] })
if (!res.ok) {
logger.warn('Failed to remove auto-allowed tool via copilot backend', {
status: res.status,
userId,
toolId,
})
return NextResponse.json(
{
success: false,
error: payload?.error || 'Failed to remove auto-allowed tool',
autoAllowedTools: [],
},
{ status: res.status }
)
}
return NextResponse.json({
success: true,
autoAllowedTools: Array.isArray(payload?.autoAllowedTools) ? payload.autoAllowedTools : [],
})
} catch (error) { } catch (error) {
logger.error('Error removing auto-allowed tool', { logger.error('Failed to fetch auto-allowed tools', { error })
userId, return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
toolId, }
error: error instanceof Error ? error.message : String(error), }
/**
* POST - Add a tool to the auto-allowed list
*/
export async function POST(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const body = await request.json()
if (!body.toolId || typeof body.toolId !== 'string') {
return NextResponse.json({ error: 'toolId must be a string' }, { status: 400 })
}
const toolId = body.toolId
const [existing] = await db.select().from(settings).where(eq(settings.userId, userId)).limit(1)
if (existing) {
const currentTools = (existing.copilotAutoAllowedTools as string[]) || []
if (!currentTools.includes(toolId)) {
const updatedTools = [...currentTools, toolId]
await db
.update(settings)
.set({
copilotAutoAllowedTools: updatedTools,
updatedAt: new Date(),
}) })
return NextResponse.json( .where(eq(settings.userId, userId))
{
success: false, logger.info('Added tool to auto-allowed list', { userId, toolId })
error: 'Failed to remove auto-allowed tool', return NextResponse.json({ success: true, autoAllowedTools: updatedTools })
autoAllowedTools: [], }
},
{ status: 500 } return NextResponse.json({ success: true, autoAllowedTools: currentTools })
) }
await db.insert(settings).values({
id: userId,
userId,
copilotAutoAllowedTools: [toolId],
})
logger.info('Created settings and added tool to auto-allowed list', { userId, toolId })
return NextResponse.json({ success: true, autoAllowedTools: [toolId] })
} catch (error) {
logger.error('Failed to add auto-allowed tool', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}
/**
* DELETE - Remove a tool from the auto-allowed list
*/
export async function DELETE(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const { searchParams } = new URL(request.url)
const toolId = searchParams.get('toolId')
if (!toolId) {
return NextResponse.json({ error: 'toolId query parameter is required' }, { status: 400 })
}
const [existing] = await db.select().from(settings).where(eq(settings.userId, userId)).limit(1)
if (existing) {
const currentTools = (existing.copilotAutoAllowedTools as string[]) || []
const updatedTools = currentTools.filter((t) => t !== toolId)
await db
.update(settings)
.set({
copilotAutoAllowedTools: updatedTools,
updatedAt: new Date(),
})
.where(eq(settings.userId, userId))
logger.info('Removed tool from auto-allowed list', { userId, toolId })
return NextResponse.json({ success: true, autoAllowedTools: updatedTools })
}
return NextResponse.json({ success: true, autoAllowedTools: [] })
} catch (error) {
logger.error('Failed to remove auto-allowed tool', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
} }
} }

View File

@@ -28,24 +28,13 @@ import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
const logger = createLogger('CopilotChatAPI') const logger = createLogger('CopilotChatAPI')
function truncateForLog(value: string, maxLength = 120): string {
if (!value || maxLength <= 0) return ''
return value.length <= maxLength ? value : `${value.slice(0, maxLength)}...`
}
async function requestChatTitleFromCopilot(params: { async function requestChatTitleFromCopilot(params: {
message: string message: string
model: string model: string
provider?: string provider?: string
}): Promise<string | null> { }): Promise<string | null> {
const { message, model, provider } = params const { message, model, provider } = params
if (!message || !model) { if (!message || !model) return null
logger.warn('Skipping chat title request because message/model is missing', {
hasMessage: !!message,
hasModel: !!model,
})
return null
}
const headers: Record<string, string> = { const headers: Record<string, string> = {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
@@ -55,13 +44,6 @@ async function requestChatTitleFromCopilot(params: {
} }
try { try {
logger.info('Requesting chat title from copilot backend', {
model,
provider: provider || null,
messageLength: message.length,
messagePreview: truncateForLog(message),
})
const response = await fetch(`${SIM_AGENT_API_URL}/api/generate-chat-title`, { const response = await fetch(`${SIM_AGENT_API_URL}/api/generate-chat-title`, {
method: 'POST', method: 'POST',
headers, headers,
@@ -81,32 +63,10 @@ async function requestChatTitleFromCopilot(params: {
return null return null
} }
const rawTitle = typeof payload?.title === 'string' ? payload.title : '' const title = typeof payload?.title === 'string' ? payload.title.trim() : ''
const title = rawTitle.trim()
logger.info('Received chat title response from copilot backend', {
status: response.status,
hasRawTitle: !!rawTitle,
rawTitle,
normalizedTitle: title,
messagePreview: truncateForLog(message),
})
if (!title) {
logger.warn('Copilot backend returned empty chat title', {
payload,
model,
provider: provider || null,
})
}
return title || null return title || null
} catch (error) { } catch (error) {
logger.error('Error generating chat title:', { logger.error('Error generating chat title:', error)
error,
model,
provider: provider || null,
messagePreview: truncateForLog(message),
})
return null return null
} }
} }
@@ -153,7 +113,6 @@ const ChatMessageSchema = z.object({
workflowId: z.string().optional(), workflowId: z.string().optional(),
knowledgeId: z.string().optional(), knowledgeId: z.string().optional(),
blockId: z.string().optional(), blockId: z.string().optional(),
blockIds: z.array(z.string()).optional(),
templateId: z.string().optional(), templateId: z.string().optional(),
executionId: z.string().optional(), executionId: z.string().optional(),
// For workflow_block, provide both workflowId and blockId // For workflow_block, provide both workflowId and blockId
@@ -200,20 +159,6 @@ export async function POST(req: NextRequest) {
commands, commands,
} = ChatMessageSchema.parse(body) } = ChatMessageSchema.parse(body)
const normalizedContexts = Array.isArray(contexts)
? contexts.map((ctx) => {
if (ctx.kind !== 'blocks') return ctx
if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx
if (ctx.blockId) {
return {
...ctx,
blockIds: [ctx.blockId],
}
}
return ctx
})
: contexts
// Resolve workflowId - if not provided, use first workflow or find by name // Resolve workflowId - if not provided, use first workflow or find by name
const resolved = await resolveWorkflowIdForUser( const resolved = await resolveWorkflowIdForUser(
authenticatedUserId, authenticatedUserId,
@@ -231,10 +176,10 @@ export async function POST(req: NextRequest) {
const userMessageIdToUse = userMessageId || crypto.randomUUID() const userMessageIdToUse = userMessageId || crypto.randomUUID()
try { try {
logger.info(`[${tracker.requestId}] Received chat POST`, { logger.info(`[${tracker.requestId}] Received chat POST`, {
hasContexts: Array.isArray(normalizedContexts), hasContexts: Array.isArray(contexts),
contextsCount: Array.isArray(normalizedContexts) ? normalizedContexts.length : 0, contextsCount: Array.isArray(contexts) ? contexts.length : 0,
contextsPreview: Array.isArray(normalizedContexts) contextsPreview: Array.isArray(contexts)
? normalizedContexts.map((c: any) => ({ ? contexts.map((c: any) => ({
kind: c?.kind, kind: c?.kind,
chatId: c?.chatId, chatId: c?.chatId,
workflowId: c?.workflowId, workflowId: c?.workflowId,
@@ -246,25 +191,17 @@ export async function POST(req: NextRequest) {
} catch {} } catch {}
// Preprocess contexts server-side // Preprocess contexts server-side
let agentContexts: Array<{ type: string; content: string }> = [] let agentContexts: Array<{ type: string; content: string }> = []
if (Array.isArray(normalizedContexts) && normalizedContexts.length > 0) { if (Array.isArray(contexts) && contexts.length > 0) {
try { try {
const { processContextsServer } = await import('@/lib/copilot/process-contents') const { processContextsServer } = await import('@/lib/copilot/process-contents')
const processed = await processContextsServer( const processed = await processContextsServer(contexts as any, authenticatedUserId, message)
normalizedContexts as any,
authenticatedUserId,
message
)
agentContexts = processed agentContexts = processed
logger.info(`[${tracker.requestId}] Contexts processed for request`, { logger.info(`[${tracker.requestId}] Contexts processed for request`, {
processedCount: agentContexts.length, processedCount: agentContexts.length,
kinds: agentContexts.map((c) => c.type), kinds: agentContexts.map((c) => c.type),
lengthPreview: agentContexts.map((c) => c.content?.length ?? 0), lengthPreview: agentContexts.map((c) => c.content?.length ?? 0),
}) })
if ( if (Array.isArray(contexts) && contexts.length > 0 && agentContexts.length === 0) {
Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 &&
agentContexts.length === 0
) {
logger.warn( logger.warn(
`[${tracker.requestId}] Contexts provided but none processed. Check executionId for logs contexts.` `[${tracker.requestId}] Contexts provided but none processed. Check executionId for logs contexts.`
) )
@@ -278,7 +215,6 @@ export async function POST(req: NextRequest) {
let currentChat: any = null let currentChat: any = null
let conversationHistory: any[] = [] let conversationHistory: any[] = []
let actualChatId = chatId let actualChatId = chatId
let chatWasCreatedForRequest = false
const selectedModel = model || 'claude-opus-4-6' const selectedModel = model || 'claude-opus-4-6'
if (chatId || createNewChat) { if (chatId || createNewChat) {
@@ -290,7 +226,6 @@ export async function POST(req: NextRequest) {
}) })
currentChat = chatResult.chat currentChat = chatResult.chat
actualChatId = chatResult.chatId || chatId actualChatId = chatResult.chatId || chatId
chatWasCreatedForRequest = chatResult.isNew
const history = buildConversationHistory( const history = buildConversationHistory(
chatResult.conversationHistory, chatResult.conversationHistory,
(chatResult.chat?.conversationId as string | undefined) || conversationId (chatResult.chat?.conversationId as string | undefined) || conversationId
@@ -298,18 +233,6 @@ export async function POST(req: NextRequest) {
conversationHistory = history.history conversationHistory = history.history
} }
const shouldGenerateTitleForRequest =
!!actualChatId &&
chatWasCreatedForRequest &&
!currentChat?.title &&
conversationHistory.length === 0
const titleGenerationParams = {
message,
model: selectedModel,
provider,
}
const effectiveMode = mode === 'agent' ? 'build' : mode const effectiveMode = mode === 'agent' ? 'build' : mode
const effectiveConversationId = const effectiveConversationId =
(currentChat?.conversationId as string | undefined) || conversationId (currentChat?.conversationId as string | undefined) || conversationId
@@ -323,13 +246,11 @@ export async function POST(req: NextRequest) {
mode, mode,
model: selectedModel, model: selectedModel,
provider, provider,
conversationId: effectiveConversationId,
conversationHistory, conversationHistory,
contexts: agentContexts, contexts: agentContexts,
fileAttachments, fileAttachments,
commands, commands,
chatId: actualChatId, chatId: actualChatId,
prefetch,
implicitFeedback, implicitFeedback,
}, },
{ {
@@ -402,22 +323,10 @@ export async function POST(req: NextRequest) {
await pushEvent({ type: 'chat_id', chatId: actualChatId }) await pushEvent({ type: 'chat_id', chatId: actualChatId })
} }
if (shouldGenerateTitleForRequest) { if (actualChatId && !currentChat?.title && conversationHistory.length === 0) {
logger.info(`[${tracker.requestId}] Starting title generation for streaming response`, { requestChatTitleFromCopilot({ message, model: selectedModel, provider })
chatId: actualChatId,
model: titleGenerationParams.model,
provider: provider || null,
messageLength: message.length,
messagePreview: truncateForLog(message),
chatWasCreatedForRequest,
})
requestChatTitleFromCopilot(titleGenerationParams)
.then(async (title) => { .then(async (title) => {
if (title) { if (title) {
logger.info(`[${tracker.requestId}] Generated title for streaming response`, {
chatId: actualChatId,
title,
})
await db await db
.update(copilotChats) .update(copilotChats)
.set({ .set({
@@ -425,30 +334,12 @@ export async function POST(req: NextRequest) {
updatedAt: new Date(), updatedAt: new Date(),
}) })
.where(eq(copilotChats.id, actualChatId!)) .where(eq(copilotChats.id, actualChatId!))
await pushEvent({ type: 'title_updated', title, chatId: actualChatId }) await pushEvent({ type: 'title_updated', title })
logger.info(`[${tracker.requestId}] Emitted title_updated SSE event`, {
chatId: actualChatId,
title,
})
} else {
logger.warn(`[${tracker.requestId}] No title returned for streaming response`, {
chatId: actualChatId,
model: selectedModel,
})
} }
}) })
.catch((error) => { .catch((error) => {
logger.error(`[${tracker.requestId}] Title generation failed:`, error) logger.error(`[${tracker.requestId}] Title generation failed:`, error)
}) })
} else if (actualChatId && !chatWasCreatedForRequest) {
logger.info(
`[${tracker.requestId}] Skipping title generation because chat already exists`,
{
chatId: actualChatId,
model: titleGenerationParams.model,
provider: provider || null,
}
)
} }
try { try {
@@ -541,15 +432,10 @@ export async function POST(req: NextRequest) {
content: message, content: message,
timestamp: new Date().toISOString(), timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }), ...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(Array.isArray(normalizedContexts) && ...(Array.isArray(contexts) && contexts.length > 0 && { contexts }),
normalizedContexts.length > 0 && { ...(Array.isArray(contexts) &&
contexts: normalizedContexts, contexts.length > 0 && {
}), contentBlocks: [{ type: 'contexts', contexts: contexts as any, timestamp: Date.now() }],
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contentBlocks: [
{ type: 'contexts', contexts: normalizedContexts as any, timestamp: Date.now() },
],
}), }),
} }
@@ -563,9 +449,9 @@ export async function POST(req: NextRequest) {
const updatedMessages = [...conversationHistory, userMessage, assistantMessage] const updatedMessages = [...conversationHistory, userMessage, assistantMessage]
// Start title generation in parallel if this is first message (non-streaming) // Start title generation in parallel if this is first message (non-streaming)
if (shouldGenerateTitleForRequest) { if (actualChatId && !currentChat.title && conversationHistory.length === 0) {
logger.info(`[${tracker.requestId}] Starting title generation for non-streaming response`) logger.info(`[${tracker.requestId}] Starting title generation for non-streaming response`)
requestChatTitleFromCopilot(titleGenerationParams) requestChatTitleFromCopilot({ message, model: selectedModel, provider })
.then(async (title) => { .then(async (title) => {
if (title) { if (title) {
await db await db
@@ -576,22 +462,11 @@ export async function POST(req: NextRequest) {
}) })
.where(eq(copilotChats.id, actualChatId!)) .where(eq(copilotChats.id, actualChatId!))
logger.info(`[${tracker.requestId}] Generated and saved title: ${title}`) logger.info(`[${tracker.requestId}] Generated and saved title: ${title}`)
} else {
logger.warn(`[${tracker.requestId}] No title returned for non-streaming response`, {
chatId: actualChatId,
model: selectedModel,
})
} }
}) })
.catch((error) => { .catch((error) => {
logger.error(`[${tracker.requestId}] Title generation failed:`, error) logger.error(`[${tracker.requestId}] Title generation failed:`, error)
}) })
} else if (actualChatId && !chatWasCreatedForRequest) {
logger.info(`[${tracker.requestId}] Skipping title generation because chat already exists`, {
chatId: actualChatId,
model: titleGenerationParams.model,
provider: provider || null,
})
} }
// Update chat in database immediately (without blocking for title) // Update chat in database immediately (without blocking for title)

View File

@@ -1,11 +1,7 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { import { REDIS_TOOL_CALL_PREFIX, REDIS_TOOL_CALL_TTL_SECONDS } from '@/lib/copilot/constants'
REDIS_TOOL_CALL_PREFIX,
REDIS_TOOL_CALL_TTL_SECONDS,
SIM_AGENT_API_URL,
} from '@/lib/copilot/constants'
import { import {
authenticateCopilotRequestSessionOnly, authenticateCopilotRequestSessionOnly,
createBadRequestResponse, createBadRequestResponse,
@@ -14,7 +10,6 @@ import {
createUnauthorizedResponse, createUnauthorizedResponse,
type NotificationStatus, type NotificationStatus,
} from '@/lib/copilot/request-helpers' } from '@/lib/copilot/request-helpers'
import { env } from '@/lib/core/config/env'
import { getRedisClient } from '@/lib/core/config/redis' import { getRedisClient } from '@/lib/core/config/redis'
const logger = createLogger('CopilotConfirmAPI') const logger = createLogger('CopilotConfirmAPI')
@@ -26,8 +21,6 @@ const ConfirmationSchema = z.object({
errorMap: () => ({ message: 'Invalid notification status' }), errorMap: () => ({ message: 'Invalid notification status' }),
}), }),
message: z.string().optional(), // Optional message for background moves or additional context message: z.string().optional(), // Optional message for background moves or additional context
toolName: z.string().optional(),
remember: z.boolean().optional(),
}) })
/** /**
@@ -64,44 +57,6 @@ async function updateToolCallStatus(
} }
} }
async function saveAutoAllowedToolPreference(userId: string, toolName: string): Promise<boolean> {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (env.COPILOT_API_KEY) {
headers['x-api-key'] = env.COPILOT_API_KEY
}
try {
const response = await fetch(`${SIM_AGENT_API_URL}/api/tool-preferences/auto-allowed`, {
method: 'POST',
headers,
body: JSON.stringify({
userId,
toolId: toolName,
}),
})
if (!response.ok) {
logger.warn('Failed to persist auto-allowed tool preference', {
userId,
toolName,
status: response.status,
})
return false
}
return true
} catch (error) {
logger.error('Error persisting auto-allowed tool preference', {
userId,
toolName,
error: error instanceof Error ? error.message : String(error),
})
return false
}
}
/** /**
* POST /api/copilot/confirm * POST /api/copilot/confirm
* Update tool call status (Accept/Reject) * Update tool call status (Accept/Reject)
@@ -119,7 +74,7 @@ export async function POST(req: NextRequest) {
} }
const body = await req.json() const body = await req.json()
const { toolCallId, status, message, toolName, remember } = ConfirmationSchema.parse(body) const { toolCallId, status, message } = ConfirmationSchema.parse(body)
// Update the tool call status in Redis // Update the tool call status in Redis
const updated = await updateToolCallStatus(toolCallId, status, message) const updated = await updateToolCallStatus(toolCallId, status, message)
@@ -135,22 +90,14 @@ export async function POST(req: NextRequest) {
return createBadRequestResponse('Failed to update tool call status or tool call not found') return createBadRequestResponse('Failed to update tool call status or tool call not found')
} }
let rememberSaved = false const duration = tracker.getDuration()
if (status === 'accepted' && remember === true && toolName && authenticatedUserId) {
rememberSaved = await saveAutoAllowedToolPreference(authenticatedUserId, toolName)
}
const response: Record<string, unknown> = { return NextResponse.json({
success: true, success: true,
message: message || `Tool call ${toolCallId} has been ${status.toLowerCase()}`, message: message || `Tool call ${toolCallId} has been ${status.toLowerCase()}`,
toolCallId, toolCallId,
status, status,
} })
if (remember === true) {
response.rememberSaved = rememberSaved
}
return NextResponse.json(response)
} catch (error) { } catch (error) {
const duration = tracker.getDuration() const duration = tracker.getDuration()

View File

@@ -1,89 +0,0 @@
/**
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
describe('mcp copilot route manifest contract', () => {
const previousInternalSecret = process.env.INTERNAL_API_SECRET
const previousAgentUrl = process.env.SIM_AGENT_API_URL
const previousFetch = global.fetch
beforeEach(() => {
vi.resetModules()
process.env.INTERNAL_API_SECRET = 'x'.repeat(32)
process.env.SIM_AGENT_API_URL = 'https://copilot.sim.ai'
})
afterEach(() => {
vi.restoreAllMocks()
global.fetch = previousFetch
if (previousInternalSecret === undefined) {
delete process.env.INTERNAL_API_SECRET
} else {
process.env.INTERNAL_API_SECRET = previousInternalSecret
}
if (previousAgentUrl === undefined) {
delete process.env.SIM_AGENT_API_URL
} else {
process.env.SIM_AGENT_API_URL = previousAgentUrl
}
})
it('loads and caches tool manifest from copilot backend', async () => {
const payload = {
directTools: [
{
name: 'list_workspaces',
description: 'List workspaces',
inputSchema: { type: 'object', properties: {} },
toolId: 'list_user_workspaces',
},
],
subagentTools: [
{
name: 'sim_build',
description: 'Build workflows',
inputSchema: { type: 'object', properties: {} },
agentId: 'build',
},
],
generatedAt: '2026-02-12T00:00:00Z',
}
const fetchSpy = vi.spyOn(global, 'fetch').mockResolvedValue(
new Response(JSON.stringify(payload), {
status: 200,
headers: { 'Content-Type': 'application/json' },
})
)
const mod = await import('./route')
mod.clearMcpToolManifestCacheForTests()
const first = await mod.getMcpToolManifest()
const second = await mod.getMcpToolManifest()
expect(first).toEqual(payload)
expect(second).toEqual(payload)
expect(fetchSpy).toHaveBeenCalledTimes(1)
expect(fetchSpy.mock.calls[0]?.[0]).toBe('https://copilot.sim.ai/api/mcp/tools/manifest')
})
it('rejects invalid manifest payloads from copilot backend', async () => {
const fetchSpy = vi.spyOn(global, 'fetch').mockResolvedValue(
new Response(JSON.stringify({ tools: [] }), {
status: 200,
headers: { 'Content-Type': 'application/json' },
})
)
const mod = await import('./route')
mod.clearMcpToolManifestCacheForTests()
await expect(mod.fetchMcpToolManifestFromCopilot()).rejects.toThrow(
'invalid manifest payload from copilot'
)
expect(fetchSpy).toHaveBeenCalledTimes(1)
})
})

View File

@@ -28,6 +28,7 @@ import {
executeToolServerSide, executeToolServerSide,
prepareExecutionContext, prepareExecutionContext,
} from '@/lib/copilot/orchestrator/tool-executor' } from '@/lib/copilot/orchestrator/tool-executor'
import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions'
import { env } from '@/lib/core/config/env' import { env } from '@/lib/core/config/env'
import { RateLimiter } from '@/lib/core/rate-limiter' import { RateLimiter } from '@/lib/core/rate-limiter'
import { import {
@@ -38,32 +39,6 @@ import {
const logger = createLogger('CopilotMcpAPI') const logger = createLogger('CopilotMcpAPI')
const mcpRateLimiter = new RateLimiter() const mcpRateLimiter = new RateLimiter()
const DEFAULT_COPILOT_MODEL = 'claude-opus-4-6' const DEFAULT_COPILOT_MODEL = 'claude-opus-4-6'
const MCP_TOOL_MANIFEST_CACHE_TTL_MS = 60_000
type McpDirectToolDef = {
name: string
description: string
inputSchema: { type: 'object'; properties?: Record<string, unknown>; required?: string[] }
toolId: string
}
type McpSubagentToolDef = {
name: string
description: string
inputSchema: { type: 'object'; properties?: Record<string, unknown>; required?: string[] }
agentId: string
}
type McpToolManifest = {
directTools: McpDirectToolDef[]
subagentTools: McpSubagentToolDef[]
generatedAt?: string
}
let cachedMcpToolManifest: {
value: McpToolManifest
expiresAt: number
} | null = null
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
export const runtime = 'nodejs' export const runtime = 'nodejs'
@@ -137,58 +112,6 @@ async function authenticateCopilotApiKey(apiKey: string): Promise<CopilotKeyAuth
} }
} }
export function isMcpToolManifest(value: unknown): value is McpToolManifest {
if (!value || typeof value !== 'object') return false
const payload = value as Record<string, unknown>
return Array.isArray(payload.directTools) && Array.isArray(payload.subagentTools)
}
export async function fetchMcpToolManifestFromCopilot(): Promise<McpToolManifest> {
const internalSecret = env.INTERNAL_API_SECRET
if (!internalSecret) {
throw new Error('INTERNAL_API_SECRET not configured')
}
const res = await fetch(`${SIM_AGENT_API_URL}/api/mcp/tools/manifest`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
'x-api-key': internalSecret,
},
signal: AbortSignal.timeout(10_000),
})
if (!res.ok) {
const bodyText = await res.text().catch(() => '')
throw new Error(`manifest fetch failed (${res.status}): ${bodyText || res.statusText}`)
}
const payload: unknown = await res.json()
if (!isMcpToolManifest(payload)) {
throw new Error('invalid manifest payload from copilot')
}
return payload
}
export async function getMcpToolManifest(): Promise<McpToolManifest> {
const now = Date.now()
if (cachedMcpToolManifest && cachedMcpToolManifest.expiresAt > now) {
return cachedMcpToolManifest.value
}
const manifest = await fetchMcpToolManifestFromCopilot()
cachedMcpToolManifest = {
value: manifest,
expiresAt: now + MCP_TOOL_MANIFEST_CACHE_TTL_MS,
}
return manifest
}
export function clearMcpToolManifestCacheForTests(): void {
cachedMcpToolManifest = null
}
/** /**
* MCP Server instructions that guide LLMs on how to use the Sim copilot tools. * MCP Server instructions that guide LLMs on how to use the Sim copilot tools.
* This is included in the initialize response to help external LLMs understand * This is included in the initialize response to help external LLMs understand
@@ -457,15 +380,13 @@ function buildMcpServer(abortSignal?: AbortSignal): Server {
) )
server.setRequestHandler(ListToolsRequestSchema, async () => { server.setRequestHandler(ListToolsRequestSchema, async () => {
const manifest = await getMcpToolManifest() const directTools = DIRECT_TOOL_DEFS.map((tool) => ({
const directTools = manifest.directTools.map((tool) => ({
name: tool.name, name: tool.name,
description: tool.description, description: tool.description,
inputSchema: tool.inputSchema, inputSchema: tool.inputSchema,
})) }))
const subagentTools = manifest.subagentTools.map((tool) => ({ const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({
name: tool.name, name: tool.name,
description: tool.description, description: tool.description,
inputSchema: tool.inputSchema, inputSchema: tool.inputSchema,
@@ -534,15 +455,12 @@ function buildMcpServer(abortSignal?: AbortSignal): Server {
throw new McpError(ErrorCode.InvalidParams, 'Tool name required') throw new McpError(ErrorCode.InvalidParams, 'Tool name required')
} }
const manifest = await getMcpToolManifest()
const result = await handleToolsCall( const result = await handleToolsCall(
{ {
name: params.name, name: params.name,
arguments: params.arguments, arguments: params.arguments,
}, },
authResult.userId, authResult.userId,
manifest,
abortSignal abortSignal
) )
@@ -638,17 +556,16 @@ function trackMcpCopilotCall(userId: string): void {
async function handleToolsCall( async function handleToolsCall(
params: { name: string; arguments?: Record<string, unknown> }, params: { name: string; arguments?: Record<string, unknown> },
userId: string, userId: string,
manifest: McpToolManifest,
abortSignal?: AbortSignal abortSignal?: AbortSignal
): Promise<CallToolResult> { ): Promise<CallToolResult> {
const args = params.arguments || {} const args = params.arguments || {}
const directTool = manifest.directTools.find((tool) => tool.name === params.name) const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name)
if (directTool) { if (directTool) {
return handleDirectToolCall(directTool, args, userId) return handleDirectToolCall(directTool, args, userId)
} }
const subagentTool = manifest.subagentTools.find((tool) => tool.name === params.name) const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name)
if (subagentTool) { if (subagentTool) {
return handleSubagentToolCall(subagentTool, args, userId, abortSignal) return handleSubagentToolCall(subagentTool, args, userId, abortSignal)
} }
@@ -657,7 +574,7 @@ async function handleToolsCall(
} }
async function handleDirectToolCall( async function handleDirectToolCall(
toolDef: McpDirectToolDef, toolDef: (typeof DIRECT_TOOL_DEFS)[number],
args: Record<string, unknown>, args: Record<string, unknown>,
userId: string userId: string
): Promise<CallToolResult> { ): Promise<CallToolResult> {
@@ -794,7 +711,7 @@ async function handleBuildToolCall(
} }
async function handleSubagentToolCall( async function handleSubagentToolCall(
toolDef: McpSubagentToolDef, toolDef: (typeof SUBAGENT_TOOL_DEFS)[number],
args: Record<string, unknown>, args: Record<string, unknown>,
userId: string, userId: string,
abortSignal?: AbortSignal abortSignal?: AbortSignal

View File

@@ -191,84 +191,3 @@ export async function GET(request: NextRequest) {
) )
} }
} }
// Delete a label from a page
export async function DELETE(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const {
domain,
accessToken,
cloudId: providedCloudId,
pageId,
labelName,
} = await request.json()
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!pageId) {
return NextResponse.json({ error: 'Page ID is required' }, { status: 400 })
}
if (!labelName) {
return NextResponse.json({ error: 'Label name is required' }, { status: 400 })
}
const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255)
if (!pageIdValidation.isValid) {
return NextResponse.json({ error: pageIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const encodedLabel = encodeURIComponent(labelName.trim())
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/rest/api/content/${pageId}/label?name=${encodedLabel}`
const response = await fetch(url, {
method: 'DELETE',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage =
errorData?.message || `Failed to delete Confluence label (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
return NextResponse.json({
pageId,
labelName,
deleted: true,
})
} catch (error) {
logger.error('Error deleting Confluence label:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -1,103 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation'
import { getConfluenceCloudId } from '@/tools/confluence/utils'
const logger = createLogger('ConfluencePagesByLabelAPI')
export const dynamic = 'force-dynamic'
export async function GET(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const { searchParams } = new URL(request.url)
const domain = searchParams.get('domain')
const accessToken = searchParams.get('accessToken')
const labelId = searchParams.get('labelId')
const providedCloudId = searchParams.get('cloudId')
const limit = searchParams.get('limit') || '50'
const cursor = searchParams.get('cursor')
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!labelId) {
return NextResponse.json({ error: 'Label ID is required' }, { status: 400 })
}
const labelIdValidation = validateAlphanumericId(labelId, 'labelId', 255)
if (!labelIdValidation.isValid) {
return NextResponse.json({ error: labelIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const queryParams = new URLSearchParams()
queryParams.append('limit', String(Math.min(Number(limit), 250)))
if (cursor) {
queryParams.append('cursor', cursor)
}
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/labels/${labelId}/pages?${queryParams.toString()}`
const response = await fetch(url, {
method: 'GET',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage = errorData?.message || `Failed to get pages by label (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
const data = await response.json()
const pages = (data.results || []).map((page: any) => ({
id: page.id,
title: page.title,
status: page.status ?? null,
spaceId: page.spaceId ?? null,
parentId: page.parentId ?? null,
authorId: page.authorId ?? null,
createdAt: page.createdAt ?? null,
version: page.version ?? null,
}))
return NextResponse.json({
pages,
labelId,
nextCursor: data._links?.next
? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor')
: null,
})
} catch (error) {
logger.error('Error getting pages by label:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -1,98 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation'
import { getConfluenceCloudId } from '@/tools/confluence/utils'
const logger = createLogger('ConfluenceSpaceLabelsAPI')
export const dynamic = 'force-dynamic'
export async function GET(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const { searchParams } = new URL(request.url)
const domain = searchParams.get('domain')
const accessToken = searchParams.get('accessToken')
const spaceId = searchParams.get('spaceId')
const providedCloudId = searchParams.get('cloudId')
const limit = searchParams.get('limit') || '25'
const cursor = searchParams.get('cursor')
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!spaceId) {
return NextResponse.json({ error: 'Space ID is required' }, { status: 400 })
}
const spaceIdValidation = validateAlphanumericId(spaceId, 'spaceId', 255)
if (!spaceIdValidation.isValid) {
return NextResponse.json({ error: spaceIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const queryParams = new URLSearchParams()
queryParams.append('limit', String(Math.min(Number(limit), 250)))
if (cursor) {
queryParams.append('cursor', cursor)
}
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/spaces/${spaceId}/labels?${queryParams.toString()}`
const response = await fetch(url, {
method: 'GET',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage = errorData?.message || `Failed to list space labels (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
const data = await response.json()
const labels = (data.results || []).map((label: any) => ({
id: label.id,
name: label.name,
prefix: label.prefix || 'global',
}))
return NextResponse.json({
labels,
spaceId,
nextCursor: data._links?.next
? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor')
: null,
})
} catch (error) {
logger.error('Error listing space labels:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -38,7 +38,6 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} }
const isInternalCall = auth.authType === 'internal_jwt'
const userId = auth.userId || null const userId = auth.userId || null
let workflowData = await getWorkflowById(workflowId) let workflowData = await getWorkflowById(workflowId)
@@ -48,14 +47,12 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 }) return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
} }
if (isInternalCall && !userId) { // Check if user has access to this workflow
// Internal system calls (e.g. workflow-in-workflow executor) may not carry a userId. if (!userId) {
// These are already authenticated via internal JWT; allow read access.
logger.info(`[${requestId}] Internal API call for workflow ${workflowId}`)
} else if (!userId) {
logger.warn(`[${requestId}] Unauthorized access attempt for workflow ${workflowId}`) logger.warn(`[${requestId}] Unauthorized access attempt for workflow ${workflowId}`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} else { }
const authorization = await authorizeWorkflowByWorkspacePermission({ const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId, workflowId,
userId, userId,
@@ -74,7 +71,6 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
{ status: authorization.status } { status: authorization.status }
) )
} }
}
logger.debug(`[${requestId}] Attempting to load workflow ${workflowId} from normalized tables`) logger.debug(`[${requestId}] Attempting to load workflow ${workflowId} from normalized tables`)
const normalizedData = await loadWorkflowFromNormalizedTables(workflowId) const normalizedData = await loadWorkflowFromNormalizedTables(workflowId)

View File

@@ -13,6 +13,9 @@ export type CommandId =
| 'goto-logs' | 'goto-logs'
| 'open-search' | 'open-search'
| 'run-workflow' | 'run-workflow'
| 'focus-copilot-tab'
| 'focus-toolbar-tab'
| 'focus-editor-tab'
| 'clear-terminal-console' | 'clear-terminal-console'
| 'focus-toolbar-search' | 'focus-toolbar-search'
| 'clear-notifications' | 'clear-notifications'
@@ -72,6 +75,21 @@ export const COMMAND_DEFINITIONS: Record<CommandId, CommandDefinition> = {
shortcut: 'Mod+Enter', shortcut: 'Mod+Enter',
allowInEditable: false, allowInEditable: false,
}, },
'focus-copilot-tab': {
id: 'focus-copilot-tab',
shortcut: 'C',
allowInEditable: false,
},
'focus-toolbar-tab': {
id: 'focus-toolbar-tab',
shortcut: 'T',
allowInEditable: false,
},
'focus-editor-tab': {
id: 'focus-editor-tab',
shortcut: 'E',
allowInEditable: false,
},
'clear-terminal-console': { 'clear-terminal-console': {
id: 'clear-terminal-console', id: 'clear-terminal-console',
shortcut: 'Mod+D', shortcut: 'Mod+D',

View File

@@ -14,14 +14,6 @@ const logger = createLogger('DiffControls')
const NOTIFICATION_WIDTH = 240 const NOTIFICATION_WIDTH = 240
const NOTIFICATION_GAP = 16 const NOTIFICATION_GAP = 16
function isWorkflowEditToolCall(name?: string, params?: Record<string, unknown>): boolean {
if (name !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
export const DiffControls = memo(function DiffControls() { export const DiffControls = memo(function DiffControls() {
const isTerminalResizing = useTerminalStore((state) => state.isResizing) const isTerminalResizing = useTerminalStore((state) => state.isResizing)
const isPanelResizing = usePanelStore((state) => state.isResizing) const isPanelResizing = usePanelStore((state) => state.isResizing)
@@ -72,7 +64,7 @@ export const DiffControls = memo(function DiffControls() {
const b = blocks[bi] const b = blocks[bi]
if (b?.type === 'tool_call') { if (b?.type === 'tool_call') {
const tn = b.toolCall?.name const tn = b.toolCall?.name
if (isWorkflowEditToolCall(tn, b.toolCall?.params)) { if (tn === 'edit_workflow') {
id = b.toolCall?.id id = b.toolCall?.id
break outer break outer
} }
@@ -80,9 +72,7 @@ export const DiffControls = memo(function DiffControls() {
} }
} }
if (!id) { if (!id) {
const candidates = Object.values(toolCallsById).filter((t) => const candidates = Object.values(toolCallsById).filter((t) => t.name === 'edit_workflow')
isWorkflowEditToolCall(t.name, t.params)
)
id = candidates.length ? candidates[candidates.length - 1].id : undefined id = candidates.length ? candidates[candidates.length - 1].id : undefined
} }
if (id) updatePreviewToolCallState('accepted', id) if (id) updatePreviewToolCallState('accepted', id)
@@ -112,7 +102,7 @@ export const DiffControls = memo(function DiffControls() {
const b = blocks[bi] const b = blocks[bi]
if (b?.type === 'tool_call') { if (b?.type === 'tool_call') {
const tn = b.toolCall?.name const tn = b.toolCall?.name
if (isWorkflowEditToolCall(tn, b.toolCall?.params)) { if (tn === 'edit_workflow') {
id = b.toolCall?.id id = b.toolCall?.id
break outer break outer
} }
@@ -120,9 +110,7 @@ export const DiffControls = memo(function DiffControls() {
} }
} }
if (!id) { if (!id) {
const candidates = Object.values(toolCallsById).filter((t) => const candidates = Object.values(toolCallsById).filter((t) => t.name === 'edit_workflow')
isWorkflowEditToolCall(t.name, t.params)
)
id = candidates.length ? candidates[candidates.length - 1].id : undefined id = candidates.length ? candidates[candidates.length - 1].id : undefined
} }
if (id) updatePreviewToolCallState('rejected', id) if (id) updatePreviewToolCallState('rejected', id)

View File

@@ -108,7 +108,7 @@ const SmoothThinkingText = memo(
return ( return (
<div <div
ref={textRef} ref={textRef}
className='[&_*]:!text-[var(--text-muted)] [&_*]:!text-[12px] [&_*]:!leading-[1.4] [&_p]:!m-0 [&_p]:!mb-1 [&_h1]:!text-[12px] [&_h1]:!font-semibold [&_h1]:!m-0 [&_h1]:!mb-1 [&_h2]:!text-[12px] [&_h2]:!font-semibold [&_h2]:!m-0 [&_h2]:!mb-1 [&_h3]:!text-[12px] [&_h3]:!font-semibold [&_h3]:!m-0 [&_h3]:!mb-1 [&_code]:!text-[11px] [&_ul]:!pl-5 [&_ul]:!my-1 [&_ol]:!pl-6 [&_ol]:!my-1 [&_li]:!my-0.5 [&_li]:!py-0 font-season text-[12px] text-[var(--text-muted)]' className='[&_*]:!text-[var(--text-muted)] [&_*]:!text-[12px] [&_*]:!leading-[1.4] [&_p]:!m-0 [&_p]:!mb-1 [&_h1]:!text-[12px] [&_h1]:!font-semibold [&_h1]:!m-0 [&_h1]:!mb-1 [&_h2]:!text-[12px] [&_h2]:!font-semibold [&_h2]:!m-0 [&_h2]:!mb-1 [&_h3]:!text-[12px] [&_h3]:!font-semibold [&_h3]:!m-0 [&_h3]:!mb-1 [&_code]:!text-[11px] [&_ul]:!pl-5 [&_ul]:!my-1 [&_ol]:!pl-8 [&_ol]:!my-1 [&_li]:!my-0.5 [&_li]:!py-0 font-season text-[12px] text-[var(--text-muted)]'
> >
<CopilotMarkdownRenderer content={displayedContent} /> <CopilotMarkdownRenderer content={displayedContent} />
</div> </div>
@@ -355,7 +355,7 @@ export function ThinkingBlock({
isExpanded ? 'mt-1.5 max-h-[150px] opacity-100' : 'max-h-0 opacity-0' isExpanded ? 'mt-1.5 max-h-[150px] opacity-100' : 'max-h-0 opacity-0'
)} )}
> >
<div className='[&_*]:!text-[var(--text-muted)] [&_*]:!text-[12px] [&_*]:!leading-[1.4] [&_p]:!m-0 [&_p]:!mb-1 [&_h1]:!text-[12px] [&_h1]:!font-semibold [&_h1]:!m-0 [&_h1]:!mb-1 [&_h2]:!text-[12px] [&_h2]:!font-semibold [&_h2]:!m-0 [&_h2]:!mb-1 [&_h3]:!text-[12px] [&_h3]:!font-semibold [&_h3]:!m-0 [&_h3]:!mb-1 [&_code]:!text-[11px] [&_ul]:!pl-5 [&_ul]:!my-1 [&_ol]:!pl-6 [&_ol]:!my-1 [&_li]:!my-0.5 [&_li]:!py-0 font-season text-[12px] text-[var(--text-muted)]'> <div className='[&_*]:!text-[var(--text-muted)] [&_*]:!text-[12px] [&_*]:!leading-[1.4] [&_p]:!m-0 [&_p]:!mb-1 [&_h1]:!text-[12px] [&_h1]:!font-semibold [&_h1]:!m-0 [&_h1]:!mb-1 [&_h2]:!text-[12px] [&_h2]:!font-semibold [&_h2]:!m-0 [&_h2]:!mb-1 [&_h3]:!text-[12px] [&_h3]:!font-semibold [&_h3]:!m-0 [&_h3]:!mb-1 [&_code]:!text-[11px] [&_ul]:!pl-5 [&_ul]:!my-1 [&_ol]:!pl-8 [&_ol]:!my-1 [&_li]:!my-0.5 [&_li]:!py-0 font-season text-[12px] text-[var(--text-muted)]'>
<CopilotMarkdownRenderer content={cleanContent} /> <CopilotMarkdownRenderer content={cleanContent} />
</div> </div>
</div> </div>

View File

@@ -47,27 +47,6 @@ interface ParsedTags {
cleanContent: string cleanContent: string
} }
function getToolCallParams(toolCall?: CopilotToolCall): Record<string, unknown> {
const candidate = ((toolCall as any)?.parameters ||
(toolCall as any)?.input ||
(toolCall as any)?.params ||
{}) as Record<string, unknown>
return candidate && typeof candidate === 'object' ? candidate : {}
}
function isWorkflowChangeApplyMode(toolCall?: CopilotToolCall): boolean {
if (!toolCall || toolCall.name !== 'workflow_change') return false
const params = getToolCallParams(toolCall)
const mode = typeof params.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params.proposalId === 'string' && params.proposalId.length > 0
}
function isWorkflowEditSummaryTool(toolCall?: CopilotToolCall): boolean {
if (!toolCall) return false
return isWorkflowChangeApplyMode(toolCall)
}
/** /**
* Extracts plan steps from plan_respond tool calls in subagent blocks. * Extracts plan steps from plan_respond tool calls in subagent blocks.
* @param blocks - The subagent content blocks to search * @param blocks - The subagent content blocks to search
@@ -892,10 +871,7 @@ const SubagentContentRenderer = memo(function SubagentContentRenderer({
) )
} }
if (segment.type === 'tool' && segment.block.toolCall) { if (segment.type === 'tool' && segment.block.toolCall) {
if ( if (toolCall.name === 'edit' && segment.block.toolCall.name === 'edit_workflow') {
(toolCall.name === 'edit' || toolCall.name === 'build') &&
isWorkflowEditSummaryTool(segment.block.toolCall)
) {
return ( return (
<div key={`tool-${segment.block.toolCall.id || index}`}> <div key={`tool-${segment.block.toolCall.id || index}`}>
<WorkflowEditSummary toolCall={segment.block.toolCall} /> <WorkflowEditSummary toolCall={segment.block.toolCall} />
@@ -992,11 +968,12 @@ const WorkflowEditSummary = memo(function WorkflowEditSummary({
} }
}, [blocks]) }, [blocks])
if (!isWorkflowEditSummaryTool(toolCall)) { if (toolCall.name !== 'edit_workflow') {
return null return null
} }
const params = getToolCallParams(toolCall) const params =
(toolCall as any).parameters || (toolCall as any).input || (toolCall as any).params || {}
let operations = Array.isArray(params.operations) ? params.operations : [] let operations = Array.isArray(params.operations) ? params.operations : []
if (operations.length === 0 && Array.isArray((toolCall as any).operations)) { if (operations.length === 0 && Array.isArray((toolCall as any).operations)) {
@@ -1242,6 +1219,11 @@ const WorkflowEditSummary = memo(function WorkflowEditSummary({
) )
}) })
/** Checks if a tool is server-side executed (not a client tool) */
function isIntegrationTool(toolName: string): boolean {
return !TOOL_DISPLAY_REGISTRY[toolName]
}
function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
if (!toolCall.name || toolCall.name === 'unknown_tool') { if (!toolCall.name || toolCall.name === 'unknown_tool') {
return false return false
@@ -1251,96 +1233,59 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
return false return false
} }
if (toolCall.ui?.showInterrupt !== true) { // Never show buttons for tools the user has marked as always-allowed
if (useCopilotStore.getState().isToolAutoAllowed(toolCall.name)) {
return false return false
} }
const hasInterrupt = !!TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.interrupt
if (hasInterrupt) {
return true return true
} }
// Integration tools (user-installed) always require approval
if (isIntegrationTool(toolCall.name)) {
return true
}
return false
}
const toolCallLogger = createLogger('CopilotToolCall') const toolCallLogger = createLogger('CopilotToolCall')
async function sendToolDecision( async function sendToolDecision(
toolCallId: string, toolCallId: string,
status: 'accepted' | 'rejected' | 'background', status: 'accepted' | 'rejected' | 'background'
options?: {
toolName?: string
remember?: boolean
}
) { ) {
try { try {
await fetch('/api/copilot/confirm', { await fetch('/api/copilot/confirm', {
method: 'POST', method: 'POST',
headers: { 'Content-Type': 'application/json' }, headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ body: JSON.stringify({ toolCallId, status }),
toolCallId,
status,
...(options?.toolName ? { toolName: options.toolName } : {}),
...(options?.remember ? { remember: true } : {}),
}),
}) })
} catch (error) { } catch (error) {
toolCallLogger.warn('Failed to send tool decision', { toolCallLogger.warn('Failed to send tool decision', {
toolCallId, toolCallId,
status, status,
remember: options?.remember === true,
toolName: options?.toolName,
error: error instanceof Error ? error.message : String(error), error: error instanceof Error ? error.message : String(error),
}) })
} }
} }
async function removeAutoAllowedToolPreference(toolName: string): Promise<boolean> {
try {
const response = await fetch(`/api/copilot/auto-allowed-tools?toolId=${encodeURIComponent(toolName)}`, {
method: 'DELETE',
})
return response.ok
} catch (error) {
toolCallLogger.warn('Failed to remove auto-allowed tool preference', {
toolName,
error: error instanceof Error ? error.message : String(error),
})
return false
}
}
type ToolUiAction = NonNullable<NonNullable<CopilotToolCall['ui']>['actions']>[number]
function actionDecision(action: ToolUiAction): 'accepted' | 'rejected' | 'background' {
const id = action.id.toLowerCase()
if (id.includes('background')) return 'background'
if (action.kind === 'reject') return 'rejected'
return 'accepted'
}
function isClientRunCapability(toolCall: CopilotToolCall): boolean {
if (toolCall.execution?.target === 'sim_client_capability') {
return toolCall.execution.capabilityId === 'workflow.run' || !toolCall.execution.capabilityId
}
return CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)
}
async function handleRun( async function handleRun(
toolCall: CopilotToolCall, toolCall: CopilotToolCall,
setToolCallState: any, setToolCallState: any,
onStateChange?: any, onStateChange?: any,
editedParams?: any, editedParams?: any
options?: {
remember?: boolean
}
) { ) {
setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined) setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined)
onStateChange?.('executing') onStateChange?.('executing')
await sendToolDecision(toolCall.id, 'accepted', { await sendToolDecision(toolCall.id, 'accepted')
toolName: toolCall.name,
remember: options?.remember === true,
})
// Client-executable run tools: execute on the client for real-time feedback // Client-executable run tools: execute on the client for real-time feedback
// (block pulsing, console logs, stop button). The server defers execution // (block pulsing, console logs, stop button). The server defers execution
// for these tools; the client reports back via mark-complete. // for these tools; the client reports back via mark-complete.
if (isClientRunCapability(toolCall)) { if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)) {
const params = editedParams || toolCall.params || {} const params = editedParams || toolCall.params || {}
executeRunToolOnClient(toolCall.id, toolCall.name, params) executeRunToolOnClient(toolCall.id, toolCall.name, params)
} }
@@ -1353,9 +1298,6 @@ async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onSt
} }
function getDisplayName(toolCall: CopilotToolCall): string { function getDisplayName(toolCall: CopilotToolCall): string {
if (toolCall.ui?.phaseLabel) return toolCall.ui.phaseLabel
if (toolCall.ui?.title) return `${getStateVerb(toolCall.state)} ${toolCall.ui.title}`
const fromStore = (toolCall as any).display?.text const fromStore = (toolCall as any).display?.text
if (fromStore) return fromStore if (fromStore) return fromStore
const registryEntry = TOOL_DISPLAY_REGISTRY[toolCall.name] const registryEntry = TOOL_DISPLAY_REGISTRY[toolCall.name]
@@ -1400,37 +1342,53 @@ function RunSkipButtons({
toolCall, toolCall,
onStateChange, onStateChange,
editedParams, editedParams,
actions,
}: { }: {
toolCall: CopilotToolCall toolCall: CopilotToolCall
onStateChange?: (state: any) => void onStateChange?: (state: any) => void
editedParams?: any editedParams?: any
actions: ToolUiAction[]
}) { }) {
const [isProcessing, setIsProcessing] = useState(false) const [isProcessing, setIsProcessing] = useState(false)
const [buttonsHidden, setButtonsHidden] = useState(false) const [buttonsHidden, setButtonsHidden] = useState(false)
const actionInProgressRef = useRef(false) const actionInProgressRef = useRef(false)
const { setToolCallState } = useCopilotStore() const { setToolCallState, addAutoAllowedTool } = useCopilotStore()
const onAction = async (action: ToolUiAction) => { const onRun = async () => {
// Prevent race condition - check ref synchronously // Prevent race condition - check ref synchronously
if (actionInProgressRef.current) return if (actionInProgressRef.current) return
actionInProgressRef.current = true actionInProgressRef.current = true
setIsProcessing(true) setIsProcessing(true)
setButtonsHidden(true) setButtonsHidden(true)
try { try {
const decision = actionDecision(action) await handleRun(toolCall, setToolCallState, onStateChange, editedParams)
if (decision === 'accepted') { } finally {
await handleRun(toolCall, setToolCallState, onStateChange, editedParams, { setIsProcessing(false)
remember: action.remember === true, actionInProgressRef.current = false
})
} else if (decision === 'rejected') {
await handleSkip(toolCall, setToolCallState, onStateChange)
} else {
setToolCallState(toolCall, ClientToolCallState.background)
onStateChange?.('background')
await sendToolDecision(toolCall.id, 'background')
} }
}
const onAlwaysAllow = async () => {
// Prevent race condition - check ref synchronously
if (actionInProgressRef.current) return
actionInProgressRef.current = true
setIsProcessing(true)
setButtonsHidden(true)
try {
await addAutoAllowedTool(toolCall.name)
await handleRun(toolCall, setToolCallState, onStateChange, editedParams)
} finally {
setIsProcessing(false)
actionInProgressRef.current = false
}
}
const onSkip = async () => {
// Prevent race condition - check ref synchronously
if (actionInProgressRef.current) return
actionInProgressRef.current = true
setIsProcessing(true)
setButtonsHidden(true)
try {
await handleSkip(toolCall, setToolCallState, onStateChange)
} finally { } finally {
setIsProcessing(false) setIsProcessing(false)
actionInProgressRef.current = false actionInProgressRef.current = false
@@ -1439,22 +1397,23 @@ function RunSkipButtons({
if (buttonsHidden) return null if (buttonsHidden) return null
// Show "Always Allow" for all tools that require confirmation
const showAlwaysAllow = true
// Standardized buttons for all interrupt tools: Allow, Always Allow, Skip
return ( return (
<div className='mt-[10px] flex gap-[6px]'> <div className='mt-[10px] flex gap-[6px]'>
{actions.map((action, index) => { <Button onClick={onRun} disabled={isProcessing} variant='tertiary'>
const variant = {isProcessing ? 'Allowing...' : 'Allow'}
action.kind === 'reject' ? 'default' : action.remember ? 'default' : 'tertiary' </Button>
return ( {showAlwaysAllow && (
<Button <Button onClick={onAlwaysAllow} disabled={isProcessing} variant='default'>
key={action.id} {isProcessing ? 'Allowing...' : 'Always Allow'}
onClick={() => onAction(action)} </Button>
disabled={isProcessing} )}
variant={variant} <Button onClick={onSkip} disabled={isProcessing} variant='default'>
> Skip
{isProcessing && index === 0 ? 'Working...' : action.label}
</Button> </Button>
)
})}
</div> </div>
) )
} }
@@ -1471,16 +1430,10 @@ export function ToolCall({
const liveToolCall = useCopilotStore((s) => const liveToolCall = useCopilotStore((s) =>
effectiveId ? s.toolCallsById[effectiveId] : undefined effectiveId ? s.toolCallsById[effectiveId] : undefined
) )
const rawToolCall = liveToolCall || toolCallProp const toolCall = liveToolCall || toolCallProp
const hasRealToolCall = !!rawToolCall
const toolCall: CopilotToolCall = // Guard: nothing to render without a toolCall
rawToolCall || if (!toolCall) return null
({
id: effectiveId || '',
name: '',
state: ClientToolCallState.generating,
params: {},
} as CopilotToolCall)
const isExpandablePending = const isExpandablePending =
toolCall?.state === 'pending' && toolCall?.state === 'pending' &&
@@ -1488,15 +1441,17 @@ export function ToolCall({
const [expanded, setExpanded] = useState(isExpandablePending) const [expanded, setExpanded] = useState(isExpandablePending)
const [showRemoveAutoAllow, setShowRemoveAutoAllow] = useState(false) const [showRemoveAutoAllow, setShowRemoveAutoAllow] = useState(false)
const [autoAllowRemovedForCall, setAutoAllowRemovedForCall] = useState(false)
// State for editable parameters // State for editable parameters
const params = (toolCall as any).parameters || (toolCall as any).input || toolCall.params || {} const params = (toolCall as any).parameters || (toolCall as any).input || toolCall.params || {}
const [editedParams, setEditedParams] = useState(params) const [editedParams, setEditedParams] = useState(params)
const paramsRef = useRef(params) const paramsRef = useRef(params)
const { setToolCallState } = useCopilotStore() // Check if this integration tool is auto-allowed
const isAutoAllowed = toolCall.ui?.autoAllowed === true && !autoAllowRemovedForCall const { removeAutoAllowedTool, setToolCallState } = useCopilotStore()
const isAutoAllowed = useCopilotStore(
(s) => isIntegrationTool(toolCall.name) && s.isToolAutoAllowed(toolCall.name)
)
// Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change) // Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change)
useEffect(() => { useEffect(() => {
@@ -1506,14 +1461,6 @@ export function ToolCall({
} }
}, [params]) }, [params])
useEffect(() => {
setAutoAllowRemovedForCall(false)
setShowRemoveAutoAllow(false)
}, [toolCall.id])
// Guard: nothing to render without a toolCall
if (!hasRealToolCall) return null
// Skip rendering some internal tools // Skip rendering some internal tools
if ( if (
toolCall.name === 'checkoff_todo' || toolCall.name === 'checkoff_todo' ||
@@ -1525,9 +1472,7 @@ export function ToolCall({
return null return null
// Special rendering for subagent tools - show as thinking text with tool calls at top level // Special rendering for subagent tools - show as thinking text with tool calls at top level
const isSubagentTool = const isSubagentTool = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.subagent === true
toolCall.execution?.target === 'go_subagent' ||
TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.subagent === true
// For ALL subagent tools, don't show anything until we have blocks with content // For ALL subagent tools, don't show anything until we have blocks with content
if (isSubagentTool) { if (isSubagentTool) {
@@ -1554,6 +1499,28 @@ export function ToolCall({
) )
} }
// Get current mode from store to determine if we should render integration tools
const mode = useCopilotStore.getState().mode
// Check if this is a completed/historical tool call (not pending/executing)
// Use string comparison to handle both enum values and string values from DB
const stateStr = String(toolCall.state)
const isCompletedToolCall =
stateStr === 'success' ||
stateStr === 'error' ||
stateStr === 'rejected' ||
stateStr === 'aborted'
// Allow rendering if:
// 1. Tool is in TOOL_DISPLAY_REGISTRY (client tools), OR
// 2. We're in build mode (integration tools are executed server-side), OR
// 3. Tool call is already completed (historical - should always render)
const isClientTool = !!TOOL_DISPLAY_REGISTRY[toolCall.name]
const isIntegrationToolInBuildMode = mode === 'build' && !isClientTool
if (!isClientTool && !isIntegrationToolInBuildMode && !isCompletedToolCall) {
return null
}
const toolUIConfig = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig const toolUIConfig = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig
// Check if tool has params table config (meaning it's expandable) // Check if tool has params table config (meaning it's expandable)
const hasParamsTable = !!toolUIConfig?.paramsTable const hasParamsTable = !!toolUIConfig?.paramsTable
@@ -1563,14 +1530,6 @@ export function ToolCall({
toolCall.name === 'make_api_request' || toolCall.name === 'make_api_request' ||
toolCall.name === 'set_global_workflow_variables' toolCall.name === 'set_global_workflow_variables'
const interruptActions =
(toolCall.ui?.actions && toolCall.ui.actions.length > 0
? toolCall.ui.actions
: [
{ id: 'allow_once', label: 'Allow', kind: 'accept' as const },
{ id: 'allow_always', label: 'Always Allow', kind: 'accept' as const, remember: true },
{ id: 'reject', label: 'Skip', kind: 'reject' as const },
]) as ToolUiAction[]
const showButtons = isCurrentMessage && shouldShowRunSkipButtons(toolCall) const showButtons = isCurrentMessage && shouldShowRunSkipButtons(toolCall)
// Check UI config for secondary action - only show for current message tool calls // Check UI config for secondary action - only show for current message tool calls
@@ -2028,12 +1987,9 @@ export function ToolCall({
<div className='mt-[10px]'> <div className='mt-[10px]'>
<Button <Button
onClick={async () => { onClick={async () => {
const removed = await removeAutoAllowedToolPreference(toolCall.name) await removeAutoAllowedTool(toolCall.name)
if (removed) {
setAutoAllowRemovedForCall(true)
setShowRemoveAutoAllow(false) setShowRemoveAutoAllow(false)
forceUpdate({}) forceUpdate({})
}
}} }}
variant='default' variant='default'
className='text-xs' className='text-xs'
@@ -2047,7 +2003,6 @@ export function ToolCall({
toolCall={toolCall} toolCall={toolCall}
onStateChange={handleStateChange} onStateChange={handleStateChange}
editedParams={editedParams} editedParams={editedParams}
actions={interruptActions}
/> />
)} )}
{/* Render subagent content as thinking text */} {/* Render subagent content as thinking text */}
@@ -2093,12 +2048,9 @@ export function ToolCall({
<div className='mt-[10px]'> <div className='mt-[10px]'>
<Button <Button
onClick={async () => { onClick={async () => {
const removed = await removeAutoAllowedToolPreference(toolCall.name) await removeAutoAllowedTool(toolCall.name)
if (removed) {
setAutoAllowRemovedForCall(true)
setShowRemoveAutoAllow(false) setShowRemoveAutoAllow(false)
forceUpdate({}) forceUpdate({})
}
}} }}
variant='default' variant='default'
className='text-xs' className='text-xs'
@@ -2112,7 +2064,6 @@ export function ToolCall({
toolCall={toolCall} toolCall={toolCall}
onStateChange={handleStateChange} onStateChange={handleStateChange}
editedParams={editedParams} editedParams={editedParams}
actions={interruptActions}
/> />
)} )}
{/* Render subagent content as thinking text */} {/* Render subagent content as thinking text */}
@@ -2136,7 +2087,7 @@ export function ToolCall({
} }
} }
const isEditWorkflow = isWorkflowEditSummaryTool(toolCall) const isEditWorkflow = toolCall.name === 'edit_workflow'
const shouldShowDetails = isRunWorkflow || (isExpandableTool && expanded) const shouldShowDetails = isRunWorkflow || (isExpandableTool && expanded)
const hasOperations = Array.isArray(params.operations) && params.operations.length > 0 const hasOperations = Array.isArray(params.operations) && params.operations.length > 0
const hideTextForEditWorkflow = isEditWorkflow && hasOperations const hideTextForEditWorkflow = isEditWorkflow && hasOperations
@@ -2158,12 +2109,9 @@ export function ToolCall({
<div className='mt-[10px]'> <div className='mt-[10px]'>
<Button <Button
onClick={async () => { onClick={async () => {
const removed = await removeAutoAllowedToolPreference(toolCall.name) await removeAutoAllowedTool(toolCall.name)
if (removed) {
setAutoAllowRemovedForCall(true)
setShowRemoveAutoAllow(false) setShowRemoveAutoAllow(false)
forceUpdate({}) forceUpdate({})
}
}} }}
variant='default' variant='default'
className='text-xs' className='text-xs'
@@ -2177,7 +2125,6 @@ export function ToolCall({
toolCall={toolCall} toolCall={toolCall}
onStateChange={handleStateChange} onStateChange={handleStateChange}
editedParams={editedParams} editedParams={editedParams}
actions={interruptActions}
/> />
) : showMoveToBackground ? ( ) : showMoveToBackground ? (
<div className='mt-[10px]'> <div className='mt-[10px]'>
@@ -2208,7 +2155,7 @@ export function ToolCall({
</Button> </Button>
</div> </div>
) : null} ) : null}
{/* Workflow edit summary - shows block changes after workflow_change(apply) */} {/* Workflow edit summary - shows block changes after edit_workflow completes */}
<WorkflowEditSummary toolCall={toolCall} /> <WorkflowEditSummary toolCall={toolCall} />
{/* Render subagent content as thinking text */} {/* Render subagent content as thinking text */}

View File

@@ -113,6 +113,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
clearPlanArtifact, clearPlanArtifact,
savePlanArtifact, savePlanArtifact,
loadAvailableModels, loadAvailableModels,
loadAutoAllowedTools,
resumeActiveStream, resumeActiveStream,
} = useCopilotStore() } = useCopilotStore()
@@ -124,6 +125,8 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
setCopilotWorkflowId, setCopilotWorkflowId,
loadChats, loadChats,
loadAvailableModels, loadAvailableModels,
loadAutoAllowedTools,
currentChat,
isSendingMessage, isSendingMessage,
resumeActiveStream, resumeActiveStream,
}) })
@@ -151,8 +154,6 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
planTodos, planTodos,
}) })
const renderedChatTitle = currentChat?.title || 'New Chat'
/** Gets markdown content for design document section (available in all modes once created) */ /** Gets markdown content for design document section (available in all modes once created) */
const designDocumentContent = useMemo(() => { const designDocumentContent = useMemo(() => {
if (streamingPlanContent) { if (streamingPlanContent) {
@@ -165,14 +166,6 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
return '' return ''
}, [streamingPlanContent]) }, [streamingPlanContent])
useEffect(() => {
logger.info('[TitleRender] Copilot header title changed', {
currentChatId: currentChat?.id || null,
currentChatTitle: currentChat?.title || null,
renderedTitle: renderedChatTitle,
})
}, [currentChat?.id, currentChat?.title, renderedChatTitle])
/** Focuses the copilot input */ /** Focuses the copilot input */
const focusInput = useCallback(() => { const focusInput = useCallback(() => {
userInputRef.current?.focus() userInputRef.current?.focus()
@@ -355,7 +348,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
{/* Header */} {/* Header */}
<div className='mx-[-1px] flex flex-shrink-0 items-center justify-between gap-[8px] rounded-[4px] border border-[var(--border)] bg-[var(--surface-4)] px-[12px] py-[6px]'> <div className='mx-[-1px] flex flex-shrink-0 items-center justify-between gap-[8px] rounded-[4px] border border-[var(--border)] bg-[var(--surface-4)] px-[12px] py-[6px]'>
<h2 className='min-w-0 flex-1 truncate font-medium text-[14px] text-[var(--text-primary)]'> <h2 className='min-w-0 flex-1 truncate font-medium text-[14px] text-[var(--text-primary)]'>
{renderedChatTitle} {currentChat?.title || 'New Chat'}
</h2> </h2>
<div className='flex items-center gap-[8px]'> <div className='flex items-center gap-[8px]'>
<Button variant='ghost' className='p-0' onClick={handleStartNewChat}> <Button variant='ghost' className='p-0' onClick={handleStartNewChat}>

View File

@@ -12,6 +12,8 @@ interface UseCopilotInitializationProps {
setCopilotWorkflowId: (workflowId: string | null) => Promise<void> setCopilotWorkflowId: (workflowId: string | null) => Promise<void>
loadChats: (forceRefresh?: boolean) => Promise<void> loadChats: (forceRefresh?: boolean) => Promise<void>
loadAvailableModels: () => Promise<void> loadAvailableModels: () => Promise<void>
loadAutoAllowedTools: () => Promise<void>
currentChat: any
isSendingMessage: boolean isSendingMessage: boolean
resumeActiveStream: () => Promise<boolean> resumeActiveStream: () => Promise<boolean>
} }
@@ -30,6 +32,8 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
setCopilotWorkflowId, setCopilotWorkflowId,
loadChats, loadChats,
loadAvailableModels, loadAvailableModels,
loadAutoAllowedTools,
currentChat,
isSendingMessage, isSendingMessage,
resumeActiveStream, resumeActiveStream,
} = props } = props
@@ -116,6 +120,17 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
}) })
}, [isSendingMessage, resumeActiveStream]) }, [isSendingMessage, resumeActiveStream])
/** Load auto-allowed tools once on mount - runs immediately, independent of workflow */
const hasLoadedAutoAllowedToolsRef = useRef(false)
useEffect(() => {
if (!hasLoadedAutoAllowedToolsRef.current) {
hasLoadedAutoAllowedToolsRef.current = true
loadAutoAllowedTools().catch((err) => {
logger.warn('[Copilot] Failed to load auto-allowed tools', err)
})
}
}, [loadAutoAllowedTools])
/** Load available models once on mount */ /** Load available models once on mount */
const hasLoadedModelsRef = useRef(false) const hasLoadedModelsRef = useRef(false)
useEffect(() => { useEffect(() => {

View File

@@ -340,7 +340,13 @@ export const Panel = memo(function Panel() {
* Register global keyboard shortcuts using the central commands registry. * Register global keyboard shortcuts using the central commands registry.
* *
* - Mod+Enter: Run / cancel workflow (matches the Run button behavior) * - Mod+Enter: Run / cancel workflow (matches the Run button behavior)
* - C: Focus Copilot tab
* - T: Focus Toolbar tab
* - E: Focus Editor tab
* - Mod+F: Focus Toolbar tab and search input * - Mod+F: Focus Toolbar tab and search input
*
* The tab-switching commands are disabled inside editable elements so typing
* in inputs or textareas is not interrupted.
*/ */
useRegisterGlobalCommands(() => useRegisterGlobalCommands(() =>
createCommands([ createCommands([
@@ -357,6 +363,33 @@ export const Panel = memo(function Panel() {
allowInEditable: false, allowInEditable: false,
}, },
}, },
{
id: 'focus-copilot-tab',
handler: () => {
setActiveTab('copilot')
},
overrides: {
allowInEditable: false,
},
},
{
id: 'focus-toolbar-tab',
handler: () => {
setActiveTab('toolbar')
},
overrides: {
allowInEditable: false,
},
},
{
id: 'focus-editor-tab',
handler: () => {
setActiveTab('editor')
},
overrides: {
allowInEditable: false,
},
},
{ {
id: 'focus-toolbar-search', id: 'focus-toolbar-search',
handler: () => { handler: () => {

View File

@@ -589,7 +589,6 @@ export async function executeScheduleJob(payload: ScheduleExecutionPayload) {
export const scheduleExecution = task({ export const scheduleExecution = task({
id: 'schedule-execution', id: 'schedule-execution',
machine: 'medium-1x',
retry: { retry: {
maxAttempts: 1, maxAttempts: 1,
}, },

View File

@@ -669,7 +669,6 @@ async function executeWebhookJobInternal(
export const webhookExecution = task({ export const webhookExecution = task({
id: 'webhook-execution', id: 'webhook-execution',
machine: 'medium-1x',
retry: { retry: {
maxAttempts: 1, maxAttempts: 1,
}, },

View File

@@ -197,6 +197,5 @@ export async function executeWorkflowJob(payload: WorkflowExecutionPayload) {
export const workflowExecutionTask = task({ export const workflowExecutionTask = task({
id: 'workflow-execution', id: 'workflow-execution',
machine: 'medium-1x',
run: executeWorkflowJob, run: executeWorkflowJob,
}) })

View File

@@ -10,11 +10,9 @@ import {
getReasoningEffortValuesForModel, getReasoningEffortValuesForModel,
getThinkingLevelsForModel, getThinkingLevelsForModel,
getVerbosityValuesForModel, getVerbosityValuesForModel,
MODELS_WITH_DEEP_RESEARCH,
MODELS_WITH_REASONING_EFFORT, MODELS_WITH_REASONING_EFFORT,
MODELS_WITH_THINKING, MODELS_WITH_THINKING,
MODELS_WITH_VERBOSITY, MODELS_WITH_VERBOSITY,
MODELS_WITHOUT_MEMORY,
providers, providers,
supportsTemperature, supportsTemperature,
} from '@/providers/utils' } from '@/providers/utils'
@@ -414,22 +412,12 @@ Return ONLY the JSON array.`,
title: 'Tools', title: 'Tools',
type: 'tool-input', type: 'tool-input',
defaultValue: [], defaultValue: [],
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
}, },
{ {
id: 'skills', id: 'skills',
title: 'Skills', title: 'Skills',
type: 'skill-input', type: 'skill-input',
defaultValue: [], defaultValue: [],
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
}, },
{ {
id: 'memoryType', id: 'memoryType',
@@ -443,11 +431,6 @@ Return ONLY the JSON array.`,
{ label: 'Sliding window (tokens)', id: 'sliding_window_tokens' }, { label: 'Sliding window (tokens)', id: 'sliding_window_tokens' },
], ],
defaultValue: 'none', defaultValue: 'none',
condition: {
field: 'model',
value: MODELS_WITHOUT_MEMORY,
not: true,
},
}, },
{ {
id: 'conversationId', id: 'conversationId',
@@ -461,7 +444,6 @@ Return ONLY the JSON array.`,
condition: { condition: {
field: 'memoryType', field: 'memoryType',
value: ['conversation', 'sliding_window', 'sliding_window_tokens'], value: ['conversation', 'sliding_window', 'sliding_window_tokens'],
and: { field: 'model', value: MODELS_WITHOUT_MEMORY, not: true },
}, },
}, },
{ {
@@ -472,7 +454,6 @@ Return ONLY the JSON array.`,
condition: { condition: {
field: 'memoryType', field: 'memoryType',
value: ['sliding_window'], value: ['sliding_window'],
and: { field: 'model', value: MODELS_WITHOUT_MEMORY, not: true },
}, },
}, },
{ {
@@ -483,7 +464,6 @@ Return ONLY the JSON array.`,
condition: { condition: {
field: 'memoryType', field: 'memoryType',
value: ['sliding_window_tokens'], value: ['sliding_window_tokens'],
and: { field: 'model', value: MODELS_WITHOUT_MEMORY, not: true },
}, },
}, },
{ {
@@ -497,13 +477,9 @@ Return ONLY the JSON array.`,
condition: () => ({ condition: () => ({
field: 'model', field: 'model',
value: (() => { value: (() => {
const deepResearch = new Set(MODELS_WITH_DEEP_RESEARCH.map((m) => m.toLowerCase()))
const allModels = Object.keys(getBaseModelProviders()) const allModels = Object.keys(getBaseModelProviders())
return allModels.filter( return allModels.filter(
(model) => (model) => supportsTemperature(model) && getMaxTemperature(model) === 1
supportsTemperature(model) &&
getMaxTemperature(model) === 1 &&
!deepResearch.has(model.toLowerCase())
) )
})(), })(),
}), }),
@@ -519,13 +495,9 @@ Return ONLY the JSON array.`,
condition: () => ({ condition: () => ({
field: 'model', field: 'model',
value: (() => { value: (() => {
const deepResearch = new Set(MODELS_WITH_DEEP_RESEARCH.map((m) => m.toLowerCase()))
const allModels = Object.keys(getBaseModelProviders()) const allModels = Object.keys(getBaseModelProviders())
return allModels.filter( return allModels.filter(
(model) => (model) => supportsTemperature(model) && getMaxTemperature(model) === 2
supportsTemperature(model) &&
getMaxTemperature(model) === 2 &&
!deepResearch.has(model.toLowerCase())
) )
})(), })(),
}), }),
@@ -536,11 +508,6 @@ Return ONLY the JSON array.`,
type: 'short-input', type: 'short-input',
placeholder: 'Enter max tokens (e.g., 4096)...', placeholder: 'Enter max tokens (e.g., 4096)...',
mode: 'advanced', mode: 'advanced',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
}, },
{ {
id: 'responseFormat', id: 'responseFormat',
@@ -548,11 +515,6 @@ Return ONLY the JSON array.`,
type: 'code', type: 'code',
placeholder: 'Enter JSON schema...', placeholder: 'Enter JSON schema...',
language: 'json', language: 'json',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
wandConfig: { wandConfig: {
enabled: true, enabled: true,
maintainHistory: true, maintainHistory: true,
@@ -645,16 +607,6 @@ Example 3 (Array Input):
generationType: 'json-schema', generationType: 'json-schema',
}, },
}, },
{
id: 'previousInteractionId',
title: 'Previous Interaction ID',
type: 'short-input',
placeholder: 'e.g., {{agent_1.interactionId}}',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
},
},
], ],
tools: { tools: {
access: [ access: [
@@ -818,13 +770,5 @@ Example 3 (Array Input):
description: 'Provider timing information', description: 'Provider timing information',
}, },
cost: { type: 'json', description: 'Cost of the API call' }, cost: { type: 'json', description: 'Cost of the API call' },
interactionId: {
type: 'string',
description: 'Interaction ID for multi-turn deep research follow-ups',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
},
},
}, },
} }

View File

@@ -394,7 +394,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Page Property Operations // Page Property Operations
{ label: 'List Page Properties', id: 'list_page_properties' }, { label: 'List Page Properties', id: 'list_page_properties' },
{ label: 'Create Page Property', id: 'create_page_property' }, { label: 'Create Page Property', id: 'create_page_property' },
{ label: 'Delete Page Property', id: 'delete_page_property' },
// Search Operations // Search Operations
{ label: 'Search Content', id: 'search' }, { label: 'Search Content', id: 'search' },
{ label: 'Search in Space', id: 'search_in_space' }, { label: 'Search in Space', id: 'search_in_space' },
@@ -415,9 +414,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Operations // Label Operations
{ label: 'List Labels', id: 'list_labels' }, { label: 'List Labels', id: 'list_labels' },
{ label: 'Add Label', id: 'add_label' }, { label: 'Add Label', id: 'add_label' },
{ label: 'Delete Label', id: 'delete_label' },
{ label: 'Get Pages by Label', id: 'get_pages_by_label' },
{ label: 'List Space Labels', id: 'list_space_labels' },
// Space Operations // Space Operations
{ label: 'Get Space', id: 'get_space' }, { label: 'Get Space', id: 'get_space' },
{ label: 'List Spaces', id: 'list_spaces' }, { label: 'List Spaces', id: 'list_spaces' },
@@ -489,8 +485,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space', 'search_in_space',
'get_space', 'get_space',
'list_spaces', 'list_spaces',
'get_pages_by_label',
'list_space_labels',
], ],
not: true, not: true,
}, },
@@ -506,8 +500,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_labels', 'list_labels',
'upload_attachment', 'upload_attachment',
'add_label', 'add_label',
'delete_label',
'delete_page_property',
'get_page_children', 'get_page_children',
'get_page_ancestors', 'get_page_ancestors',
'list_page_versions', 'list_page_versions',
@@ -535,8 +527,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space', 'search_in_space',
'get_space', 'get_space',
'list_spaces', 'list_spaces',
'get_pages_by_label',
'list_space_labels',
], ],
not: true, not: true,
}, },
@@ -552,8 +542,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_labels', 'list_labels',
'upload_attachment', 'upload_attachment',
'add_label', 'add_label',
'delete_label',
'delete_page_property',
'get_page_children', 'get_page_children',
'get_page_ancestors', 'get_page_ancestors',
'list_page_versions', 'list_page_versions',
@@ -578,7 +566,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space', 'search_in_space',
'create_blogpost', 'create_blogpost',
'list_blogposts_in_space', 'list_blogposts_in_space',
'list_space_labels',
], ],
}, },
}, },
@@ -614,14 +601,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
required: true, required: true,
condition: { field: 'operation', value: 'create_page_property' }, condition: { field: 'operation', value: 'create_page_property' },
}, },
{
id: 'propertyId',
title: 'Property ID',
type: 'short-input',
placeholder: 'Enter property ID to delete',
required: true,
condition: { field: 'operation', value: 'delete_page_property' },
},
{ {
id: 'title', id: 'title',
title: 'Title', title: 'Title',
@@ -715,7 +694,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
type: 'short-input', type: 'short-input',
placeholder: 'Enter label name', placeholder: 'Enter label name',
required: true, required: true,
condition: { field: 'operation', value: ['add_label', 'delete_label'] }, condition: { field: 'operation', value: 'add_label' },
}, },
{ {
id: 'labelPrefix', id: 'labelPrefix',
@@ -730,14 +709,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
value: () => 'global', value: () => 'global',
condition: { field: 'operation', value: 'add_label' }, condition: { field: 'operation', value: 'add_label' },
}, },
{
id: 'labelId',
title: 'Label ID',
type: 'short-input',
placeholder: 'Enter label ID',
required: true,
condition: { field: 'operation', value: 'get_pages_by_label' },
},
{ {
id: 'blogPostStatus', id: 'blogPostStatus',
title: 'Status', title: 'Status',
@@ -788,8 +759,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_page_versions', 'list_page_versions',
'list_page_properties', 'list_page_properties',
'list_labels', 'list_labels',
'get_pages_by_label',
'list_space_labels',
], ],
}, },
}, },
@@ -811,8 +780,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_page_versions', 'list_page_versions',
'list_page_properties', 'list_page_properties',
'list_labels', 'list_labels',
'get_pages_by_label',
'list_space_labels',
], ],
}, },
}, },
@@ -833,7 +800,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Property Tools // Property Tools
'confluence_list_page_properties', 'confluence_list_page_properties',
'confluence_create_page_property', 'confluence_create_page_property',
'confluence_delete_page_property',
// Search Tools // Search Tools
'confluence_search', 'confluence_search',
'confluence_search_in_space', 'confluence_search_in_space',
@@ -854,9 +820,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Tools // Label Tools
'confluence_list_labels', 'confluence_list_labels',
'confluence_add_label', 'confluence_add_label',
'confluence_delete_label',
'confluence_get_pages_by_label',
'confluence_list_space_labels',
// Space Tools // Space Tools
'confluence_get_space', 'confluence_get_space',
'confluence_list_spaces', 'confluence_list_spaces',
@@ -889,8 +852,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
return 'confluence_list_page_properties' return 'confluence_list_page_properties'
case 'create_page_property': case 'create_page_property':
return 'confluence_create_page_property' return 'confluence_create_page_property'
case 'delete_page_property':
return 'confluence_delete_page_property'
// Search Operations // Search Operations
case 'search': case 'search':
return 'confluence_search' return 'confluence_search'
@@ -926,12 +887,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
return 'confluence_list_labels' return 'confluence_list_labels'
case 'add_label': case 'add_label':
return 'confluence_add_label' return 'confluence_add_label'
case 'delete_label':
return 'confluence_delete_label'
case 'get_pages_by_label':
return 'confluence_get_pages_by_label'
case 'list_space_labels':
return 'confluence_list_space_labels'
// Space Operations // Space Operations
case 'get_space': case 'get_space':
return 'confluence_get_space' return 'confluence_get_space'
@@ -953,9 +908,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
versionNumber, versionNumber,
propertyKey, propertyKey,
propertyValue, propertyValue,
propertyId,
labelPrefix, labelPrefix,
labelId,
blogPostStatus, blogPostStatus,
purge, purge,
bodyFormat, bodyFormat,
@@ -1006,9 +959,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
} }
} }
// Operations that support generic cursor pagination. // Operations that support cursor pagination
// get_pages_by_label and list_space_labels have dedicated handlers
// below that pass cursor along with their required params (labelId, spaceId).
const supportsCursor = [ const supportsCursor = [
'list_attachments', 'list_attachments',
'list_spaces', 'list_spaces',
@@ -1045,35 +996,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
} }
} }
if (operation === 'delete_page_property') {
return {
credential,
pageId: effectivePageId,
operation,
propertyId,
...rest,
}
}
if (operation === 'get_pages_by_label') {
return {
credential,
operation,
labelId,
cursor: cursor || undefined,
...rest,
}
}
if (operation === 'list_space_labels') {
return {
credential,
operation,
cursor: cursor || undefined,
...rest,
}
}
if (operation === 'upload_attachment') { if (operation === 'upload_attachment') {
const normalizedFile = normalizeFileInput(attachmentFile, { single: true }) const normalizedFile = normalizeFileInput(attachmentFile, { single: true })
if (!normalizedFile) { if (!normalizedFile) {
@@ -1122,9 +1044,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
attachmentFileName: { type: 'string', description: 'Custom file name for attachment' }, attachmentFileName: { type: 'string', description: 'Custom file name for attachment' },
attachmentComment: { type: 'string', description: 'Comment for the attachment' }, attachmentComment: { type: 'string', description: 'Comment for the attachment' },
labelName: { type: 'string', description: 'Label name' }, labelName: { type: 'string', description: 'Label name' },
labelId: { type: 'string', description: 'Label identifier' },
labelPrefix: { type: 'string', description: 'Label prefix (global, my, team, system)' }, labelPrefix: { type: 'string', description: 'Label prefix (global, my, team, system)' },
propertyId: { type: 'string', description: 'Property identifier' },
blogPostStatus: { type: 'string', description: 'Blog post status (current or draft)' }, blogPostStatus: { type: 'string', description: 'Blog post status (current or draft)' },
purge: { type: 'boolean', description: 'Permanently delete instead of moving to trash' }, purge: { type: 'boolean', description: 'Permanently delete instead of moving to trash' },
bodyFormat: { type: 'string', description: 'Body format for comments' }, bodyFormat: { type: 'string', description: 'Body format for comments' },
@@ -1160,7 +1080,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Results // Label Results
labels: { type: 'array', description: 'List of labels' }, labels: { type: 'array', description: 'List of labels' },
labelName: { type: 'string', description: 'Label name' }, labelName: { type: 'string', description: 'Label name' },
labelId: { type: 'string', description: 'Label identifier' },
// Space Results // Space Results
spaces: { type: 'array', description: 'List of spaces' }, spaces: { type: 'array', description: 'List of spaces' },
spaceId: { type: 'string', description: 'Space identifier' }, spaceId: { type: 'string', description: 'Space identifier' },

View File

@@ -2,8 +2,8 @@
slug: enterprise slug: enterprise
title: 'Build with Sim for Enterprise' title: 'Build with Sim for Enterprise'
description: 'Access control, BYOK, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, Admin API, and flexible data retention—enterprise features for teams with strict security and compliance requirements.' description: 'Access control, BYOK, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, Admin API, and flexible data retention—enterprise features for teams with strict security and compliance requirements.'
date: 2026-02-11 date: 2026-01-23
updated: 2026-02-11 updated: 2026-01-23
authors: authors:
- vik - vik
readingTime: 10 readingTime: 10
@@ -13,8 +13,8 @@ ogAlt: 'Sim Enterprise features overview'
about: ['Enterprise Software', 'Security', 'Compliance', 'Self-Hosting'] about: ['Enterprise Software', 'Security', 'Compliance', 'Self-Hosting']
timeRequired: PT10M timeRequired: PT10M
canonical: https://sim.ai/studio/enterprise canonical: https://sim.ai/studio/enterprise
featured: true featured: false
draft: false draft: true
--- ---
We've been working with security teams at larger organizations to bring Sim into environments with strict compliance and data handling requirements. This post covers the enterprise capabilities we've built: granular access control, bring-your-own-keys, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, compliance, and programmatic management via the Admin API. We've been working with security teams at larger organizations to bring Sim into environments with strict compliance and data handling requirements. This post covers the enterprise capabilities we've built: granular access control, bring-your-own-keys, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, compliance, and programmatic management via the Admin API.

View File

@@ -999,7 +999,6 @@ export class AgentBlockHandler implements BlockHandler {
reasoningEffort: inputs.reasoningEffort, reasoningEffort: inputs.reasoningEffort,
verbosity: inputs.verbosity, verbosity: inputs.verbosity,
thinkingLevel: inputs.thinkingLevel, thinkingLevel: inputs.thinkingLevel,
previousInteractionId: inputs.previousInteractionId,
} }
} }
@@ -1070,7 +1069,6 @@ export class AgentBlockHandler implements BlockHandler {
reasoningEffort: providerRequest.reasoningEffort, reasoningEffort: providerRequest.reasoningEffort,
verbosity: providerRequest.verbosity, verbosity: providerRequest.verbosity,
thinkingLevel: providerRequest.thinkingLevel, thinkingLevel: providerRequest.thinkingLevel,
previousInteractionId: providerRequest.previousInteractionId,
}) })
return this.processProviderResponse(response, block, responseFormat) return this.processProviderResponse(response, block, responseFormat)
@@ -1271,7 +1269,6 @@ export class AgentBlockHandler implements BlockHandler {
content: result.content, content: result.content,
model: result.model, model: result.model,
...this.createResponseMetadata(result), ...this.createResponseMetadata(result),
...(result.interactionId && { interactionId: result.interactionId }),
} }
} }

View File

@@ -20,8 +20,6 @@ export interface AgentInputs {
conversationId?: string // Required for all non-none memory types conversationId?: string // Required for all non-none memory types
slidingWindowSize?: string // For message-based sliding window slidingWindowSize?: string // For message-based sliding window
slidingWindowTokens?: string // For token-based sliding window slidingWindowTokens?: string // For token-based sliding window
// Deep research multi-turn
previousInteractionId?: string // Interactions API previous interaction reference
// LLM parameters // LLM parameters
temperature?: string temperature?: string
maxTokens?: string maxTokens?: string

View File

@@ -20,8 +20,6 @@ export interface BuildPayloadParams {
fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }> fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }>
commands?: string[] commands?: string[]
chatId?: string chatId?: string
conversationId?: string
prefetch?: boolean
implicitFeedback?: string implicitFeedback?: string
} }
@@ -66,10 +64,6 @@ export async function buildCopilotRequestPayload(
fileAttachments, fileAttachments,
commands, commands,
chatId, chatId,
conversationId,
prefetch,
conversationHistory,
implicitFeedback,
} = params } = params
const selectedModel = options.selectedModel const selectedModel = options.selectedModel
@@ -160,12 +154,6 @@ export async function buildCopilotRequestPayload(
version: SIM_AGENT_VERSION, version: SIM_AGENT_VERSION,
...(contexts && contexts.length > 0 ? { context: contexts } : {}), ...(contexts && contexts.length > 0 ? { context: contexts } : {}),
...(chatId ? { chatId } : {}), ...(chatId ? { chatId } : {}),
...(conversationId ? { conversationId } : {}),
...(Array.isArray(conversationHistory) && conversationHistory.length > 0
? { conversationHistory }
: {}),
...(typeof prefetch === 'boolean' ? { prefetch } : {}),
...(implicitFeedback ? { implicitFeedback } : {}),
...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}), ...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}),
...(integrationTools.length > 0 ? { integrationTools } : {}), ...(integrationTools.length > 0 ? { integrationTools } : {}),
...(credentials ? { credentials } : {}), ...(credentials ? { credentials } : {}),

View File

@@ -1,21 +1,22 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { STREAM_STORAGE_KEY } from '@/lib/copilot/constants' import { COPILOT_CONFIRM_API_PATH, STREAM_STORAGE_KEY } from '@/lib/copilot/constants'
import { asRecord } from '@/lib/copilot/orchestrator/sse-utils' import { asRecord } from '@/lib/copilot/orchestrator/sse-utils'
import type { SSEEvent } from '@/lib/copilot/orchestrator/types' import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
import { isBackgroundState, isRejectedState, isReviewState } from '@/lib/copilot/store-utils' import {
isBackgroundState,
isRejectedState,
isReviewState,
resolveToolDisplay,
} from '@/lib/copilot/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types' import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types'
import { useVariablesStore } from '@/stores/panel/variables/store'
import { useEnvironmentStore } from '@/stores/settings/environment/store'
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks' import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks'
import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution' import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution'
import {
extractOperationListFromResultPayload,
extractToolExecutionMetadata,
extractToolUiMetadata,
isWorkflowChangeApplyCall,
mapServerStateToClientState,
resolveDisplayFromServerUi,
} from './tool-call-helpers'
import { applyToolEffects } from './tool-effects'
import type { ClientContentBlock, ClientStreamingContext } from './types' import type { ClientContentBlock, ClientStreamingContext } from './types'
const logger = createLogger('CopilotClientSseHandlers') const logger = createLogger('CopilotClientSseHandlers')
@@ -25,11 +26,21 @@ const MAX_BATCH_INTERVAL = 50
const MIN_BATCH_INTERVAL = 16 const MIN_BATCH_INTERVAL = 16
const MAX_QUEUE_SIZE = 5 const MAX_QUEUE_SIZE = 5
function isClientRunCapability(toolCall: CopilotToolCall): boolean { /**
if (toolCall.execution?.target === 'sim_client_capability') { * Send an auto-accept confirmation to the server for auto-allowed tools.
return toolCall.execution.capabilityId === 'workflow.run' || !toolCall.execution.capabilityId * The server-side orchestrator polls Redis for this decision.
} */
return CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name) export function sendAutoAcceptConfirmation(toolCallId: string): void {
fetch(COPILOT_CONFIRM_API_PATH, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ toolCallId, status: 'accepted' }),
}).catch((error) => {
logger.warn('Failed to send auto-accept confirmation', {
toolCallId,
error: error instanceof Error ? error.message : String(error),
})
})
} }
function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void {
@@ -219,86 +230,28 @@ export const sseHandlers: Record<string, SSEHandler> = {
} }
}, },
title_updated: (_data, _context, get, set) => { title_updated: (_data, _context, get, set) => {
const title = typeof _data.title === 'string' ? _data.title.trim() : '' const title = _data.title
const eventChatId = typeof _data.chatId === 'string' ? _data.chatId : undefined if (!title) return
const { currentChat, chats } = get() const { currentChat, chats } = get()
if (currentChat) {
logger.info('[Title] Received title_updated SSE event', {
eventTitle: title,
eventChatId: eventChatId || null,
currentChatId: currentChat?.id || null,
currentChatTitle: currentChat?.title || null,
chatCount: chats.length,
})
if (!title) {
logger.warn('[Title] Ignoring title_updated event with empty title', {
payload: _data,
})
return
}
if (!currentChat) {
logger.warn('[Title] Received title_updated event without an active currentChat', {
eventChatId: eventChatId || null,
title,
})
return
}
const targetChatId = eventChatId || currentChat.id
if (eventChatId && eventChatId !== currentChat.id) {
logger.warn('[Title] title_updated event chatId does not match currentChat', {
eventChatId,
currentChatId: currentChat.id,
})
}
set({ set({
currentChat: currentChat: { ...currentChat, title },
currentChat.id === targetChatId chats: chats.map((c) => (c.id === currentChat.id ? { ...c, title } : c)),
? { })
...currentChat,
title,
} }
: currentChat,
chats: chats.map((c) => (c.id === targetChatId ? { ...c, title } : c)),
})
const updatedState = get()
logger.info('[Title] Applied title_updated event to copilot store', {
targetChatId,
renderedCurrentChatId: updatedState.currentChat?.id || null,
renderedCurrentChatTitle: updatedState.currentChat?.title || null,
chatListTitle: updatedState.chats.find((c) => c.id === targetChatId)?.title || null,
})
}, },
'copilot.tool.result': (data, context, get, set) => { tool_result: (data, context, get, set) => {
try { try {
const eventData = asRecord(data?.data) const eventData = asRecord(data?.data)
const toolCallId: string | undefined = const toolCallId: string | undefined =
data?.toolCallId || data?.toolCallId || (eventData.id as string | undefined)
(eventData.id as string | undefined) ||
(eventData.callId as string | undefined)
const success: boolean | undefined = data?.success const success: boolean | undefined = data?.success
const failedDependency: boolean = data?.failedDependency === true const failedDependency: boolean = data?.failedDependency === true
const resultObj = asRecord(data?.result) const resultObj = asRecord(data?.result)
const skipped: boolean = resultObj.skipped === true const skipped: boolean = resultObj.skipped === true
if (!toolCallId) return if (!toolCallId) return
const uiMetadata = extractToolUiMetadata(eventData)
const executionMetadata = extractToolExecutionMetadata(eventData)
const serverState = (eventData.state as string | undefined) || undefined
const targetState = serverState
? mapServerStateToClientState(serverState)
: success
? ClientToolCallState.success
: failedDependency || skipped
? ClientToolCallState.rejected
: ClientToolCallState.error
const resultPayload = asRecord(data?.result || eventData.result || eventData.data || data?.data)
const { toolCallsById } = get() const { toolCallsById } = get()
const current = toolCallsById[toolCallId] const current = toolCallsById[toolCallId]
let paramsForCurrentToolCall: Record<string, unknown> | undefined = current?.params
if (current) { if (current) {
if ( if (
isRejectedState(current.state) || isRejectedState(current.state) ||
@@ -307,33 +260,16 @@ export const sseHandlers: Record<string, SSEHandler> = {
) { ) {
return return
} }
if ( const targetState = success
targetState === ClientToolCallState.success && ? ClientToolCallState.success
isWorkflowChangeApplyCall(current.name, paramsForCurrentToolCall) : failedDependency || skipped
) { ? ClientToolCallState.rejected
const operations = extractOperationListFromResultPayload(resultPayload || {}) : ClientToolCallState.error
if (operations && operations.length > 0) {
paramsForCurrentToolCall = {
...(current.params || {}),
operations,
}
}
}
const updatedMap = { ...toolCallsById } const updatedMap = { ...toolCallsById }
updatedMap[toolCallId] = { updatedMap[toolCallId] = {
...current, ...current,
ui: uiMetadata || current.ui,
execution: executionMetadata || current.execution,
params: paramsForCurrentToolCall,
state: targetState, state: targetState,
display: resolveDisplayFromServerUi( display: resolveToolDisplay(current.name, targetState, current.id, current.params),
current.name,
targetState,
current.id,
paramsForCurrentToolCall,
uiMetadata || current.ui
),
} }
set({ toolCallsById: updatedMap }) set({ toolCallsById: updatedMap })
@@ -376,11 +312,138 @@ export const sseHandlers: Record<string, SSEHandler> = {
} }
} }
applyToolEffects({ if (current.name === 'edit_workflow') {
effectsRaw: eventData.effects, try {
toolCall: updatedMap[toolCallId], const resultPayload = asRecord(
resultPayload, data?.result || eventData.result || eventData.data || data?.data
)
const workflowState = asRecord(resultPayload?.workflowState)
const hasWorkflowState = !!resultPayload?.workflowState
logger.info('[SSE] edit_workflow result received', {
hasWorkflowState,
blockCount: hasWorkflowState ? Object.keys(workflowState.blocks ?? {}).length : 0,
edgeCount: Array.isArray(workflowState.edges) ? workflowState.edges.length : 0,
}) })
if (hasWorkflowState) {
const diffStore = useWorkflowDiffStore.getState()
diffStore
.setProposedChanges(resultPayload.workflowState as WorkflowState)
.catch((err) => {
logger.error('[SSE] Failed to apply edit_workflow diff', {
error: err instanceof Error ? err.message : String(err),
})
})
}
} catch (err) {
logger.error('[SSE] edit_workflow result handling failed', {
error: err instanceof Error ? err.message : String(err),
})
}
}
// Deploy tools: update deployment status in workflow registry
if (
targetState === ClientToolCallState.success &&
(current.name === 'deploy_api' ||
current.name === 'deploy_chat' ||
current.name === 'deploy_mcp' ||
current.name === 'redeploy')
) {
try {
const resultPayload = asRecord(
data?.result || eventData.result || eventData.data || data?.data
)
const input = asRecord(current.params)
const workflowId =
(resultPayload?.workflowId as string) ||
(input?.workflowId as string) ||
useWorkflowRegistry.getState().activeWorkflowId
const isDeployed = resultPayload?.isDeployed !== false
if (workflowId) {
useWorkflowRegistry
.getState()
.setDeploymentStatus(workflowId, isDeployed, isDeployed ? new Date() : undefined)
logger.info('[SSE] Updated deployment status from tool result', {
toolName: current.name,
workflowId,
isDeployed,
})
}
} catch (err) {
logger.warn('[SSE] Failed to hydrate deployment status', {
error: err instanceof Error ? err.message : String(err),
})
}
}
// Environment variables: reload store after successful set
if (
targetState === ClientToolCallState.success &&
current.name === 'set_environment_variables'
) {
try {
useEnvironmentStore.getState().loadEnvironmentVariables()
logger.info('[SSE] Triggered environment variables reload')
} catch (err) {
logger.warn('[SSE] Failed to reload environment variables', {
error: err instanceof Error ? err.message : String(err),
})
}
}
// Workflow variables: reload store after successful set
if (
targetState === ClientToolCallState.success &&
current.name === 'set_global_workflow_variables'
) {
try {
const input = asRecord(current.params)
const workflowId =
(input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId
if (workflowId) {
useVariablesStore.getState().loadForWorkflow(workflowId)
logger.info('[SSE] Triggered workflow variables reload', { workflowId })
}
} catch (err) {
logger.warn('[SSE] Failed to reload workflow variables', {
error: err instanceof Error ? err.message : String(err),
})
}
}
// Generate API key: update deployment status with the new key
if (targetState === ClientToolCallState.success && current.name === 'generate_api_key') {
try {
const resultPayload = asRecord(
data?.result || eventData.result || eventData.data || data?.data
)
const input = asRecord(current.params)
const workflowId =
(input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId
const apiKey = (resultPayload?.apiKey || resultPayload?.key) as string | undefined
if (workflowId) {
const existingStatus = useWorkflowRegistry
.getState()
.getWorkflowDeploymentStatus(workflowId)
useWorkflowRegistry
.getState()
.setDeploymentStatus(
workflowId,
existingStatus?.isDeployed ?? false,
existingStatus?.deployedAt,
apiKey
)
logger.info('[SSE] Updated deployment status with API key', {
workflowId,
hasKey: !!apiKey,
})
}
} catch (err) {
logger.warn('[SSE] Failed to hydrate API key status', {
error: err instanceof Error ? err.message : String(err),
})
}
}
} }
for (let i = 0; i < context.contentBlocks.length; i++) { for (let i = 0; i < context.contentBlocks.length; i++) {
@@ -397,24 +460,16 @@ export const sseHandlers: Record<string, SSEHandler> = {
: failedDependency || skipped : failedDependency || skipped
? ClientToolCallState.rejected ? ClientToolCallState.rejected
: ClientToolCallState.error : ClientToolCallState.error
const paramsForBlock =
b.toolCall?.id === toolCallId
? paramsForCurrentToolCall || b.toolCall?.params
: b.toolCall?.params
context.contentBlocks[i] = { context.contentBlocks[i] = {
...b, ...b,
toolCall: { toolCall: {
...b.toolCall, ...b.toolCall,
params: paramsForBlock,
ui: uiMetadata || b.toolCall?.ui,
execution: executionMetadata || b.toolCall?.execution,
state: targetState, state: targetState,
display: resolveDisplayFromServerUi( display: resolveToolDisplay(
b.toolCall?.name, b.toolCall?.name,
targetState, targetState,
toolCallId, toolCallId,
paramsForBlock, b.toolCall?.params
uiMetadata || b.toolCall?.ui
), ),
}, },
} }
@@ -428,29 +483,106 @@ export const sseHandlers: Record<string, SSEHandler> = {
}) })
} }
}, },
'copilot.tool.call': (data, context, get, set) => { tool_error: (data, context, get, set) => {
try {
const errorData = asRecord(data?.data)
const toolCallId: string | undefined =
data?.toolCallId || (errorData.id as string | undefined)
const failedDependency: boolean = data?.failedDependency === true
if (!toolCallId) return
const { toolCallsById } = get()
const current = toolCallsById[toolCallId]
if (current) {
if (
isRejectedState(current.state) ||
isReviewState(current.state) ||
isBackgroundState(current.state)
) {
return
}
const targetState = failedDependency
? ClientToolCallState.rejected
: ClientToolCallState.error
const updatedMap = { ...toolCallsById }
updatedMap[toolCallId] = {
...current,
state: targetState,
display: resolveToolDisplay(current.name, targetState, current.id, current.params),
}
set({ toolCallsById: updatedMap })
}
for (let i = 0; i < context.contentBlocks.length; i++) {
const b = context.contentBlocks[i]
if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) {
if (
isRejectedState(b.toolCall?.state) ||
isReviewState(b.toolCall?.state) ||
isBackgroundState(b.toolCall?.state)
)
break
const targetState = failedDependency
? ClientToolCallState.rejected
: ClientToolCallState.error
context.contentBlocks[i] = {
...b,
toolCall: {
...b.toolCall,
state: targetState,
display: resolveToolDisplay(
b.toolCall?.name,
targetState,
toolCallId,
b.toolCall?.params
),
},
}
break
}
}
updateStreamingMessage(set, context)
} catch (error) {
logger.warn('Failed to process tool_error SSE event', {
error: error instanceof Error ? error.message : String(error),
})
}
},
tool_generating: (data, context, get, set) => {
const { toolCallId, toolName } = data
if (!toolCallId || !toolName) return
const { toolCallsById } = get()
if (!toolCallsById[toolCallId]) {
const isAutoAllowed = get().isToolAutoAllowed(toolName)
const initialState = isAutoAllowed
? ClientToolCallState.executing
: ClientToolCallState.pending
const tc: CopilotToolCall = {
id: toolCallId,
name: toolName,
state: initialState,
display: resolveToolDisplay(toolName, initialState, toolCallId),
}
const updated = { ...toolCallsById, [toolCallId]: tc }
set({ toolCallsById: updated })
logger.info('[toolCallsById] map updated', updated)
upsertToolCallBlock(context, tc)
updateStreamingMessage(set, context)
}
},
tool_call: (data, context, get, set) => {
const toolData = asRecord(data?.data) const toolData = asRecord(data?.data)
const id: string | undefined = const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId
(toolData.id as string | undefined) || const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
(toolData.callId as string | undefined) ||
data?.toolCallId
const name: string | undefined =
(toolData.name as string | undefined) ||
(toolData.toolName as string | undefined) ||
data?.toolName
if (!id) return if (!id) return
const args = toolData.arguments as Record<string, unknown> | undefined const args = toolData.arguments as Record<string, unknown> | undefined
const isPartial = toolData.partial === true const isPartial = toolData.partial === true
const uiMetadata = extractToolUiMetadata(toolData)
const executionMetadata = extractToolExecutionMetadata(toolData)
const serverState = toolData.state
const { toolCallsById } = get() const { toolCallsById } = get()
const existing = toolCallsById[id] const existing = toolCallsById[id]
const toolName = name || existing?.name || 'unknown_tool' const toolName = name || existing?.name || 'unknown_tool'
let initialState = serverState const isAutoAllowed = get().isToolAutoAllowed(toolName)
? mapServerStateToClientState(serverState) let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending
: ClientToolCallState.pending
// Avoid flickering back to pending on partial/duplicate events once a tool is executing. // Avoid flickering back to pending on partial/duplicate events once a tool is executing.
if ( if (
@@ -465,25 +597,15 @@ export const sseHandlers: Record<string, SSEHandler> = {
...existing, ...existing,
name: toolName, name: toolName,
state: initialState, state: initialState,
ui: uiMetadata || existing.ui,
execution: executionMetadata || existing.execution,
...(args ? { params: args } : {}), ...(args ? { params: args } : {}),
display: resolveDisplayFromServerUi( display: resolveToolDisplay(toolName, initialState, id, args || existing.params),
toolName,
initialState,
id,
args || existing.params,
uiMetadata || existing.ui
),
} }
: { : {
id, id,
name: toolName, name: toolName,
state: initialState, state: initialState,
ui: uiMetadata,
execution: executionMetadata,
...(args ? { params: args } : {}), ...(args ? { params: args } : {}),
display: resolveDisplayFromServerUi(toolName, initialState, id, args, uiMetadata), display: resolveToolDisplay(toolName, initialState, id, args),
} }
const updated = { ...toolCallsById, [id]: next } const updated = { ...toolCallsById, [id]: next }
set({ toolCallsById: updated }) set({ toolCallsById: updated })
@@ -496,12 +618,20 @@ export const sseHandlers: Record<string, SSEHandler> = {
return return
} }
const shouldInterrupt = next.ui?.showInterrupt === true // Auto-allowed tools: send confirmation to the server so it can proceed
// without waiting for the user to click "Allow".
if (isAutoAllowed) {
sendAutoAcceptConfirmation(id)
}
// Client-run capability: execution is delegated to the browser. // Client-executable run tools: execute on the client for real-time feedback
// We run immediately only when no interrupt is required. // (block pulsing, console logs, stop button). The server defers execution
if (isClientRunCapability(next) && !shouldInterrupt) { // for these tools in interactive mode; the client reports back via mark-complete.
executeRunToolOnClient(id, toolName, args || next.params || {}) if (
CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName) &&
initialState === ClientToolCallState.executing
) {
executeRunToolOnClient(id, toolName, args || existing?.params || {})
} }
// OAuth: dispatch event to open the OAuth connect modal // OAuth: dispatch event to open the OAuth connect modal
@@ -531,7 +661,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
return return
}, },
'copilot.phase.progress': (data, context, _get, set) => { reasoning: (data, context, _get, set) => {
const phase = (data && (data.phase || data?.data?.phase)) as string | undefined const phase = (data && (data.phase || data?.data?.phase)) as string | undefined
if (phase === 'start') { if (phase === 'start') {
beginThinkingBlock(context) beginThinkingBlock(context)
@@ -548,7 +678,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
appendThinkingContent(context, chunk) appendThinkingContent(context, chunk)
updateStreamingMessage(set, context) updateStreamingMessage(set, context)
}, },
'copilot.content': (data, context, get, set) => { content: (data, context, get, set) => {
if (!data.data) return if (!data.data) return
context.pendingContent += data.data context.pendingContent += data.data
@@ -763,7 +893,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
updateStreamingMessage(set, context) updateStreamingMessage(set, context)
} }
}, },
'copilot.phase.completed': (_data, context) => { done: (_data, context) => {
logger.info('[SSE] DONE EVENT RECEIVED', { logger.info('[SSE] DONE EVENT RECEIVED', {
doneEventCount: context.doneEventCount, doneEventCount: context.doneEventCount,
data: _data, data: _data,
@@ -774,7 +904,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
context.streamComplete = true context.streamComplete = true
} }
}, },
'copilot.error': (data, context, _get, set) => { error: (data, context, _get, set) => {
logger.error('Stream error:', data.error) logger.error('Stream error:', data.error)
set((state: CopilotStore) => ({ set((state: CopilotStore) => ({
messages: state.messages.map((msg) => messages: state.messages.map((msg) =>
@@ -789,7 +919,6 @@ export const sseHandlers: Record<string, SSEHandler> = {
})) }))
context.streamComplete = true context.streamComplete = true
}, },
'copilot.phase.started': () => {},
stream_end: (_data, context, _get, set) => { stream_end: (_data, context, _get, set) => {
if (context.pendingContent) { if (context.pendingContent) {
if (context.isInThinkingBlock && context.currentThinkingBlock) { if (context.isInThinkingBlock && context.currentThinkingBlock) {
@@ -804,8 +933,3 @@ export const sseHandlers: Record<string, SSEHandler> = {
}, },
default: () => {}, default: () => {},
} }
sseHandlers['copilot.tool.interrupt_required'] = sseHandlers['copilot.tool.call']
sseHandlers['copilot.workflow.patch'] = sseHandlers['copilot.tool.result']
sseHandlers['copilot.workflow.verify'] = sseHandlers['copilot.tool.result']
sseHandlers['copilot.tool.interrupt_resolved'] = sseHandlers['copilot.tool.result']

View File

@@ -15,7 +15,10 @@ const logger = createLogger('CopilotRunToolExecution')
* (block pulsing, logs, stop button, etc.). * (block pulsing, logs, stop button, etc.).
*/ */
export const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([ export const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([
'workflow_run', 'run_workflow',
'run_workflow_until_block',
'run_from_block',
'run_block',
]) ])
/** /**
@@ -71,44 +74,21 @@ async function doExecuteRunTool(
| Record<string, unknown> | Record<string, unknown>
| undefined | undefined
const runMode =
toolName === 'workflow_run' ? ((params.mode as string | undefined) || 'full').toLowerCase() : undefined
if (
toolName === 'workflow_run' &&
runMode !== 'full' &&
runMode !== 'until_block' &&
runMode !== 'from_block' &&
runMode !== 'block'
) {
const error = `Unsupported workflow_run mode: ${String(params.mode)}`
logger.warn('[RunTool] Execution prevented: unsupported workflow_run mode', {
toolCallId,
mode: params.mode,
})
setToolState(toolCallId, ClientToolCallState.error)
await reportCompletion(toolCallId, false, error)
return
}
const stopAfterBlockId = (() => { const stopAfterBlockId = (() => {
if (toolName === 'workflow_run' && runMode === 'until_block') { if (toolName === 'run_workflow_until_block')
return params.stopAfterBlockId as string | undefined return params.stopAfterBlockId as string | undefined
} if (toolName === 'run_block') return params.blockId as string | undefined
if (toolName === 'workflow_run' && runMode === 'block') {
return params.blockId as string | undefined
}
return undefined return undefined
})() })()
const runFromBlock = (() => { const runFromBlock = (() => {
if (toolName === 'workflow_run' && runMode === 'from_block' && params.startBlockId) { if (toolName === 'run_from_block' && params.startBlockId) {
return { return {
startBlockId: params.startBlockId as string, startBlockId: params.startBlockId as string,
executionId: (params.executionId as string | undefined) || 'latest', executionId: (params.executionId as string | undefined) || 'latest',
} }
} }
if (toolName === 'workflow_run' && runMode === 'block' && params.blockId) { if (toolName === 'run_block' && params.blockId) {
return { return {
startBlockId: params.blockId as string, startBlockId: params.blockId as string,
executionId: (params.executionId as string | undefined) || 'latest', executionId: (params.executionId as string | undefined) || 'latest',

View File

@@ -1,172 +0,0 @@
/**
* @vitest-environment node
*/
import { describe, expect, it, vi } from 'vitest'
import { applySseEvent } from '@/lib/copilot/client-sse/subagent-handlers'
import type { ClientStreamingContext } from '@/lib/copilot/client-sse/types'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
type StoreSet = (
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
) => void
function createTestStore(initialToolCalls: Record<string, CopilotToolCall>) {
const state: Partial<CopilotStore> = {
messages: [{ id: 'assistant-msg', role: 'assistant', content: '', timestamp: new Date().toISOString() }],
toolCallsById: { ...initialToolCalls },
currentChat: null,
chats: [],
activeStream: null,
updatePlanTodoStatus: vi.fn(),
handleNewChatCreation: vi.fn().mockResolvedValue(undefined),
}
const get = () => state as CopilotStore
const set: StoreSet = (partial) => {
const patch = typeof partial === 'function' ? partial(get()) : partial
Object.assign(state, patch)
}
return { get, set }
}
function createStreamingContext(): ClientStreamingContext {
return {
messageId: 'assistant-msg',
accumulatedContent: '',
contentBlocks: [],
currentTextBlock: null,
isInThinkingBlock: false,
currentThinkingBlock: null,
isInDesignWorkflowBlock: false,
designWorkflowContent: '',
pendingContent: '',
doneEventCount: 0,
streamComplete: false,
subAgentContent: {},
subAgentToolCalls: {},
subAgentBlocks: {},
suppressStreamingUpdates: true,
}
}
describe('client SSE copilot.* stream smoke', () => {
it('processes main tool call/result events with copilot.* keys', async () => {
const { get, set } = createTestStore({})
const context = createStreamingContext()
await applySseEvent(
{
type: 'copilot.tool.call',
data: { id: 'main-tool-1', name: 'get_user_workflow', state: 'executing', arguments: {} },
} as any,
context,
get,
set
)
await applySseEvent(
{
type: 'copilot.tool.result',
toolCallId: 'main-tool-1',
success: true,
result: { ok: true },
data: {
id: 'main-tool-1',
name: 'get_user_workflow',
phase: 'completed',
state: 'success',
success: true,
result: { ok: true },
},
} as any,
context,
get,
set
)
expect(get().toolCallsById['main-tool-1']).toBeDefined()
expect(get().toolCallsById['main-tool-1'].state).toBe(ClientToolCallState.success)
expect(
context.contentBlocks.some(
(block) => block.type === 'tool_call' && block.toolCall?.id === 'main-tool-1'
)
).toBe(true)
})
it('processes subagent start/tool/result/end with copilot.* keys', async () => {
const parentToolCallId = 'parent-edit-tool'
const { get, set } = createTestStore({
[parentToolCallId]: {
id: parentToolCallId,
name: 'edit',
state: ClientToolCallState.executing,
},
})
const context = createStreamingContext()
await applySseEvent(
{
type: 'copilot.subagent.started',
subagent: 'edit',
data: { tool_call_id: parentToolCallId },
} as any,
context,
get,
set
)
await applySseEvent(
{
type: 'copilot.tool.call',
subagent: 'edit',
data: {
id: 'sub-tool-1',
name: 'workflow_context_get',
state: 'executing',
arguments: { includeSchemas: false },
},
} as any,
context,
get,
set
)
await applySseEvent(
{
type: 'copilot.tool.result',
subagent: 'edit',
data: {
id: 'sub-tool-1',
name: 'workflow_context_get',
phase: 'completed',
state: 'success',
success: true,
result: { contextPackId: 'pack-1' },
},
} as any,
context,
get,
set
)
await applySseEvent(
{
type: 'copilot.subagent.completed',
subagent: 'edit',
data: {},
} as any,
context,
get,
set
)
const parentToolCall = get().toolCallsById[parentToolCallId]
expect(parentToolCall).toBeDefined()
expect(parentToolCall.subAgentStreaming).toBe(false)
expect(parentToolCall.subAgentToolCalls?.length).toBe(1)
expect(parentToolCall.subAgentToolCalls?.[0]?.id).toBe('sub-tool-1')
expect(parentToolCall.subAgentToolCalls?.[0]?.state).toBe(ClientToolCallState.success)
})
})

View File

@@ -6,23 +6,16 @@ import {
shouldSkipToolResultEvent, shouldSkipToolResultEvent,
} from '@/lib/copilot/orchestrator/sse-utils' } from '@/lib/copilot/orchestrator/sse-utils'
import type { SSEEvent } from '@/lib/copilot/orchestrator/types' import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
import { resolveToolDisplay } from '@/lib/copilot/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
import { import {
type SSEHandler, type SSEHandler,
sendAutoAcceptConfirmation,
sseHandlers, sseHandlers,
updateStreamingMessage, updateStreamingMessage,
} from './handlers' } from './handlers'
import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution' import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution'
import {
extractOperationListFromResultPayload,
extractToolExecutionMetadata,
extractToolUiMetadata,
isWorkflowChangeApplyCall,
mapServerStateToClientState,
resolveDisplayFromServerUi,
} from './tool-call-helpers'
import { applyToolEffects } from './tool-effects'
import type { ClientStreamingContext } from './types' import type { ClientStreamingContext } from './types'
const logger = createLogger('CopilotClientSubagentHandlers') const logger = createLogger('CopilotClientSubagentHandlers')
@@ -31,13 +24,6 @@ type StoreSet = (
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>) partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
) => void ) => void
function isClientRunCapability(toolCall: CopilotToolCall): boolean {
if (toolCall.execution?.target === 'sim_client_capability') {
return toolCall.execution.capabilityId === 'workflow.run' || !toolCall.execution.capabilityId
}
return CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)
}
export function appendSubAgentContent( export function appendSubAgentContent(
context: ClientStreamingContext, context: ClientStreamingContext,
parentToolCallId: string, parentToolCallId: string,
@@ -124,11 +110,11 @@ export function updateToolCallWithSubAgentData(
} }
export const subAgentSSEHandlers: Record<string, SSEHandler> = { export const subAgentSSEHandlers: Record<string, SSEHandler> = {
'copilot.phase.started': () => { start: () => {
// No-op: parent subagent association is handled by copilot.subagent.started. // Subagent start event - no action needed, parent is already tracked from subagent_start
}, },
'copilot.content': (data, context, get, set) => { content: (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
const contentStr = typeof data.data === 'string' ? data.data : data.content || '' const contentStr = typeof data.data === 'string' ? data.data : data.content || ''
logger.info('[SubAgent] content event', { logger.info('[SubAgent] content event', {
@@ -149,7 +135,7 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
updateToolCallWithSubAgentData(context, get, set, parentToolCallId) updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
}, },
'copilot.phase.progress': (data, context, get, set) => { reasoning: (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
const dataObj = asRecord(data?.data) const dataObj = asRecord(data?.data)
const phase = data?.phase || (dataObj.phase as string | undefined) const phase = data?.phase || (dataObj.phase as string | undefined)
@@ -165,7 +151,11 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
updateToolCallWithSubAgentData(context, get, set, parentToolCallId) updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
}, },
'copilot.tool.call': async (data, context, get, set) => { tool_generating: () => {
// Tool generating event - no action needed, we'll handle the actual tool_call
},
tool_call: async (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return if (!parentToolCallId) return
@@ -174,8 +164,6 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
if (!id || !name) return if (!id || !name) return
const isPartial = toolData.partial === true const isPartial = toolData.partial === true
const uiMetadata = extractToolUiMetadata(toolData)
const executionMetadata = extractToolExecutionMetadata(toolData)
let args: Record<string, unknown> | undefined = (toolData.arguments || toolData.input) as let args: Record<string, unknown> | undefined = (toolData.arguments || toolData.input) as
| Record<string, unknown> | Record<string, unknown>
@@ -211,10 +199,9 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
const existingToolCall = const existingToolCall =
existingIndex >= 0 ? context.subAgentToolCalls[parentToolCallId][existingIndex] : undefined existingIndex >= 0 ? context.subAgentToolCalls[parentToolCallId][existingIndex] : undefined
const serverState = toolData.state // Auto-allowed tools skip pending state to avoid flashing interrupt buttons
let initialState = serverState const isAutoAllowed = get().isToolAutoAllowed(name)
? mapServerStateToClientState(serverState) let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending
: ClientToolCallState.pending
// Avoid flickering back to pending on partial/duplicate events once a tool is executing. // Avoid flickering back to pending on partial/duplicate events once a tool is executing.
if ( if (
@@ -228,10 +215,8 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
id, id,
name, name,
state: initialState, state: initialState,
ui: uiMetadata,
execution: executionMetadata,
...(args ? { params: args } : {}), ...(args ? { params: args } : {}),
display: resolveDisplayFromServerUi(name, initialState, id, args, uiMetadata), display: resolveToolDisplay(name, initialState, id, args),
} }
if (existingIndex >= 0) { if (existingIndex >= 0) {
@@ -256,16 +241,21 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
return return
} }
const shouldInterrupt = subAgentToolCall.ui?.showInterrupt === true // Auto-allowed tools: send confirmation to the server so it can proceed
// without waiting for the user to click "Allow".
if (isAutoAllowed) {
sendAutoAcceptConfirmation(id)
}
// Client-run capability: execution is delegated to the browser. // Client-executable run tools: if auto-allowed, execute immediately for
// Execute immediately only for non-interrupting calls. // real-time feedback. For non-auto-allowed, the user must click "Allow"
if (isClientRunCapability(subAgentToolCall) && !shouldInterrupt) { // first — handleRun in tool-call.tsx triggers executeRunToolOnClient.
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(name) && isAutoAllowed) {
executeRunToolOnClient(id, name, args || {}) executeRunToolOnClient(id, name, args || {})
} }
}, },
'copilot.tool.result': (data, context, get, set) => { tool_result: (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return if (!parentToolCallId) return
@@ -285,51 +275,17 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
if (!context.subAgentToolCalls[parentToolCallId]) return if (!context.subAgentToolCalls[parentToolCallId]) return
if (!context.subAgentBlocks[parentToolCallId]) return if (!context.subAgentBlocks[parentToolCallId]) return
const serverState = resultData.state const targetState = success ? ClientToolCallState.success : ClientToolCallState.error
const targetState = serverState
? mapServerStateToClientState(serverState)
: success
? ClientToolCallState.success
: ClientToolCallState.error
const uiMetadata = extractToolUiMetadata(resultData)
const executionMetadata = extractToolExecutionMetadata(resultData)
const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex(
(tc: CopilotToolCall) => tc.id === toolCallId (tc: CopilotToolCall) => tc.id === toolCallId
) )
if (existingIndex >= 0) { if (existingIndex >= 0) {
const existing = context.subAgentToolCalls[parentToolCallId][existingIndex] const existing = context.subAgentToolCalls[parentToolCallId][existingIndex]
let nextParams = existing.params
const resultPayload = asRecord(
data?.result || resultData.result || resultData.data || data?.data
)
if (
targetState === ClientToolCallState.success &&
isWorkflowChangeApplyCall(existing.name, existing.params as Record<string, unknown>) &&
resultPayload
) {
const operations = extractOperationListFromResultPayload(resultPayload)
if (operations && operations.length > 0) {
nextParams = {
...(existing.params || {}),
operations,
}
}
}
const updatedSubAgentToolCall = { const updatedSubAgentToolCall = {
...existing, ...existing,
params: nextParams,
ui: uiMetadata || existing.ui,
execution: executionMetadata || existing.execution,
state: targetState, state: targetState,
display: resolveDisplayFromServerUi( display: resolveToolDisplay(existing.name, targetState, toolCallId, existing.params),
existing.name,
targetState,
toolCallId,
nextParams,
uiMetadata || existing.ui
),
} }
context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall
@@ -353,18 +309,12 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
state: targetState, state: targetState,
}) })
} }
applyToolEffects({
effectsRaw: resultData.effects,
toolCall: updatedSubAgentToolCall,
resultPayload,
})
} }
updateToolCallWithSubAgentData(context, get, set, parentToolCallId) updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
}, },
'copilot.phase.completed': (_data, context, get, set) => { done: (_data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return if (!parentToolCallId) return
@@ -372,11 +322,6 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
}, },
} }
subAgentSSEHandlers['copilot.tool.interrupt_required'] = subAgentSSEHandlers['copilot.tool.call']
subAgentSSEHandlers['copilot.workflow.patch'] = subAgentSSEHandlers['copilot.tool.result']
subAgentSSEHandlers['copilot.workflow.verify'] = subAgentSSEHandlers['copilot.tool.result']
subAgentSSEHandlers['copilot.tool.interrupt_resolved'] = subAgentSSEHandlers['copilot.tool.result']
export async function applySseEvent( export async function applySseEvent(
rawData: SSEEvent, rawData: SSEEvent,
context: ClientStreamingContext, context: ClientStreamingContext,
@@ -389,7 +334,7 @@ export async function applySseEvent(
} }
const data = normalizedEvent const data = normalizedEvent
if (data.type === 'copilot.subagent.started') { if (data.type === 'subagent_start') {
const startData = asRecord(data.data) const startData = asRecord(data.data)
const toolCallId = startData.tool_call_id as string | undefined const toolCallId = startData.tool_call_id as string | undefined
if (toolCallId) { if (toolCallId) {
@@ -412,7 +357,7 @@ export async function applySseEvent(
return true return true
} }
if (data.type === 'copilot.subagent.completed') { if (data.type === 'subagent_end') {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (parentToolCallId) { if (parentToolCallId) {
const { toolCallsById } = get() const { toolCallsById } = get()

View File

@@ -1,134 +0,0 @@
import { asRecord } from '@/lib/copilot/orchestrator/sse-utils'
import { humanizedFallback, resolveToolDisplay } from '@/lib/copilot/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotToolCall } from '@/stores/panel/copilot/types'
export function mapServerStateToClientState(state: unknown): ClientToolCallState {
switch (String(state || '')) {
case 'generating':
return ClientToolCallState.generating
case 'pending':
case 'awaiting_approval':
return ClientToolCallState.pending
case 'executing':
return ClientToolCallState.executing
case 'success':
return ClientToolCallState.success
case 'rejected':
case 'skipped':
return ClientToolCallState.rejected
case 'aborted':
return ClientToolCallState.aborted
case 'error':
case 'failed':
return ClientToolCallState.error
default:
return ClientToolCallState.pending
}
}
export function extractToolUiMetadata(
data: Record<string, unknown>
): CopilotToolCall['ui'] | undefined {
const ui = asRecord(data.ui)
if (!ui || Object.keys(ui).length === 0) return undefined
const autoAllowedFromUi = ui.autoAllowed === true
const autoAllowedFromData = data.autoAllowed === true
return {
title: typeof ui.title === 'string' ? ui.title : undefined,
phaseLabel: typeof ui.phaseLabel === 'string' ? ui.phaseLabel : undefined,
icon: typeof ui.icon === 'string' ? ui.icon : undefined,
showInterrupt: ui.showInterrupt === true,
showRemember: ui.showRemember === true,
autoAllowed: autoAllowedFromUi || autoAllowedFromData,
actions: Array.isArray(ui.actions)
? ui.actions
.map((action) => {
const a = asRecord(action)
const id = typeof a.id === 'string' ? a.id : undefined
const label = typeof a.label === 'string' ? a.label : undefined
const kind: 'accept' | 'reject' = a.kind === 'reject' ? 'reject' : 'accept'
if (!id || !label) return null
return {
id,
label,
kind,
remember: a.remember === true,
}
})
.filter((a): a is NonNullable<typeof a> => !!a)
: undefined,
}
}
export function extractToolExecutionMetadata(
data: Record<string, unknown>
): CopilotToolCall['execution'] | undefined {
const execution = asRecord(data.execution)
if (!execution || Object.keys(execution).length === 0) return undefined
return {
target: typeof execution.target === 'string' ? execution.target : undefined,
capabilityId: typeof execution.capabilityId === 'string' ? execution.capabilityId : undefined,
}
}
function displayVerb(state: ClientToolCallState): string {
switch (state) {
case ClientToolCallState.success:
return 'Completed'
case ClientToolCallState.error:
return 'Failed'
case ClientToolCallState.rejected:
return 'Skipped'
case ClientToolCallState.aborted:
return 'Aborted'
case ClientToolCallState.generating:
return 'Preparing'
case ClientToolCallState.pending:
return 'Waiting'
default:
return 'Running'
}
}
export function resolveDisplayFromServerUi(
toolName: string,
state: ClientToolCallState,
toolCallId: string,
params: Record<string, unknown> | undefined,
ui?: CopilotToolCall['ui']
) {
const fallback =
resolveToolDisplay(toolName, state, toolCallId, params) ||
humanizedFallback(toolName, state)
if (!fallback) return undefined
if (ui?.phaseLabel) {
return { text: ui.phaseLabel, icon: fallback.icon }
}
if (ui?.title) {
return { text: `${displayVerb(state)} ${ui.title}`, icon: fallback.icon }
}
return fallback
}
export function isWorkflowChangeApplyCall(
toolName?: string,
params?: Record<string, unknown>
): boolean {
if (toolName !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
export function extractOperationListFromResultPayload(
resultPayload: Record<string, unknown>
): Array<Record<string, unknown>> | undefined {
const operations = resultPayload.operations
if (Array.isArray(operations)) return operations as Array<Record<string, unknown>>
const compiled = resultPayload.compiledOperations
if (Array.isArray(compiled)) return compiled as Array<Record<string, unknown>>
return undefined
}

View File

@@ -1,170 +0,0 @@
/**
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@sim/logger', () => loggerMock)
const mocked = vi.hoisted(() => ({
setProposedChanges: vi.fn().mockResolvedValue(undefined),
loadEnvironmentVariables: vi.fn(),
loadVariablesForWorkflow: vi.fn(),
getWorkflowDeploymentStatus: vi.fn().mockReturnValue(null),
setDeploymentStatus: vi.fn(),
registryState: {
activeWorkflowId: 'workflow-active',
},
}))
vi.mock('@/stores/workflow-diff/store', () => ({
useWorkflowDiffStore: {
getState: () => ({
setProposedChanges: mocked.setProposedChanges,
}),
},
}))
vi.mock('@/stores/settings/environment/store', () => ({
useEnvironmentStore: {
getState: () => ({
loadEnvironmentVariables: mocked.loadEnvironmentVariables,
}),
},
}))
vi.mock('@/stores/panel/variables/store', () => ({
useVariablesStore: {
getState: () => ({
loadForWorkflow: mocked.loadVariablesForWorkflow,
}),
},
}))
vi.mock('@/stores/workflows/registry/store', () => ({
useWorkflowRegistry: {
getState: () => ({
activeWorkflowId: mocked.registryState.activeWorkflowId,
getWorkflowDeploymentStatus: mocked.getWorkflowDeploymentStatus,
setDeploymentStatus: mocked.setDeploymentStatus,
}),
},
}))
import { applyToolEffects } from '@/lib/copilot/client-sse/tool-effects'
describe('applyToolEffects', () => {
beforeEach(() => {
vi.clearAllMocks()
mocked.registryState.activeWorkflowId = 'workflow-active'
})
it('applies workflow_change fallback diff when effects are absent', () => {
const workflowState = {
blocks: {
start: { id: 'start', metadata: { id: 'start', type: 'start' }, inputs: {}, outputs: {} },
},
edges: [],
loops: {},
parallels: {},
}
applyToolEffects({
effectsRaw: [],
toolCall: {
id: 'tool-1',
name: 'workflow_change',
state: 'success',
params: { workflowId: 'workflow-123' },
} as any,
resultPayload: {
workflowState,
},
})
expect(mocked.setProposedChanges).toHaveBeenCalledTimes(1)
expect(mocked.setProposedChanges).toHaveBeenCalledWith(workflowState)
})
it('applies workflow_change fallback diff from nested editResult.workflowState', () => {
const workflowState = {
blocks: {
start: { id: 'start', metadata: { id: 'start', type: 'start' }, inputs: {}, outputs: {} },
},
edges: [],
loops: {},
parallels: {},
}
applyToolEffects({
effectsRaw: [],
toolCall: {
id: 'tool-2',
name: 'workflow_change',
state: 'success',
} as any,
resultPayload: {
editResult: {
workflowState,
},
},
})
expect(mocked.setProposedChanges).toHaveBeenCalledTimes(1)
expect(mocked.setProposedChanges).toHaveBeenCalledWith(workflowState)
})
it('applies explicit workflow.diff.proposed effect', () => {
const workflowState = {
blocks: {
start: { id: 'start', metadata: { id: 'start', type: 'start' }, inputs: {}, outputs: {} },
},
edges: [],
loops: {},
parallels: {},
}
applyToolEffects({
effectsRaw: [
{
kind: 'workflow.diff.proposed',
payload: {
workflowState,
},
},
],
toolCall: {
id: 'tool-3',
name: 'workflow_change',
state: 'success',
} as any,
})
expect(mocked.setProposedChanges).toHaveBeenCalledTimes(1)
expect(mocked.setProposedChanges).toHaveBeenCalledWith(workflowState)
})
it('does not apply fallback diff for non-workflow_change tools', () => {
const workflowState = {
blocks: {},
edges: [],
loops: {},
parallels: {},
}
applyToolEffects({
effectsRaw: [],
toolCall: {
id: 'tool-4',
name: 'list_workflows',
state: 'success',
} as any,
resultPayload: {
workflowState,
},
})
expect(mocked.setProposedChanges).not.toHaveBeenCalled()
})
})

View File

@@ -1,180 +0,0 @@
import { createLogger } from '@sim/logger'
import { asRecord } from '@/lib/copilot/orchestrator/sse-utils'
import type { CopilotToolCall } from '@/stores/panel/copilot/types'
import { useVariablesStore } from '@/stores/panel/variables/store'
import { useEnvironmentStore } from '@/stores/settings/environment/store'
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
const logger = createLogger('CopilotToolEffects')
type ParsedToolEffect = {
kind: string
payload: Record<string, unknown>
}
function asNonEmptyRecord(value: unknown): Record<string, unknown> | null {
const record = asRecord(value)
return Object.keys(record).length > 0 ? record : null
}
function parseToolEffects(raw: unknown): ParsedToolEffect[] {
if (!Array.isArray(raw)) return []
const effects: ParsedToolEffect[] = []
for (const item of raw) {
const effect = asRecord(item)
const kind = typeof effect.kind === 'string' ? effect.kind : ''
if (!kind) continue
effects.push({
kind,
payload: asRecord(effect.payload) || {},
})
}
return effects
}
function resolveWorkflowId(
payload: Record<string, unknown>,
toolCall?: CopilotToolCall
): string | undefined {
const payloadWorkflowId = typeof payload.workflowId === 'string' ? payload.workflowId : undefined
if (payloadWorkflowId) return payloadWorkflowId
const params = asRecord(toolCall?.params)
const paramWorkflowId = typeof params?.workflowId === 'string' ? params.workflowId : undefined
if (paramWorkflowId) return paramWorkflowId
return useWorkflowRegistry.getState().activeWorkflowId || undefined
}
function resolveWorkflowState(
payload: Record<string, unknown>,
resultPayload?: Record<string, unknown>
): WorkflowState | null {
const payloadState = asNonEmptyRecord(payload.workflowState)
if (payloadState) return payloadState as unknown as WorkflowState
if (resultPayload) {
const directState = asNonEmptyRecord(resultPayload.workflowState)
if (directState) return directState as unknown as WorkflowState
const editResult = asRecord(resultPayload.editResult)
const nestedState = asNonEmptyRecord(editResult?.workflowState)
if (nestedState) return nestedState as unknown as WorkflowState
}
return null
}
function applyDeploymentSyncEffect(payload: Record<string, unknown>, toolCall?: CopilotToolCall): void {
const workflowId = resolveWorkflowId(payload, toolCall)
if (!workflowId) return
const registry = useWorkflowRegistry.getState()
const existingStatus = registry.getWorkflowDeploymentStatus(workflowId)
const isDeployed =
typeof payload.isDeployed === 'boolean'
? payload.isDeployed
: (existingStatus?.isDeployed ?? true)
const deployedAt = (() => {
if (typeof payload.deployedAt === 'string' && payload.deployedAt) {
const parsed = new Date(payload.deployedAt)
if (!Number.isNaN(parsed.getTime())) return parsed
}
return existingStatus?.deployedAt
})()
const apiKey =
typeof payload.apiKey === 'string' && payload.apiKey.length > 0
? payload.apiKey
: existingStatus?.apiKey
registry.setDeploymentStatus(workflowId, isDeployed, deployedAt, apiKey)
}
function applyApiKeySyncEffect(payload: Record<string, unknown>, toolCall?: CopilotToolCall): void {
const workflowId = resolveWorkflowId(payload, toolCall)
if (!workflowId) return
const apiKey = typeof payload.apiKey === 'string' ? payload.apiKey : undefined
const registry = useWorkflowRegistry.getState()
const existingStatus = registry.getWorkflowDeploymentStatus(workflowId)
registry.setDeploymentStatus(
workflowId,
existingStatus?.isDeployed ?? false,
existingStatus?.deployedAt,
apiKey || existingStatus?.apiKey
)
}
function applyWorkflowVariablesReload(
payload: Record<string, unknown>,
toolCall?: CopilotToolCall
): void {
const workflowId = resolveWorkflowId(payload, toolCall)
if (!workflowId) return
useVariablesStore.getState().loadForWorkflow(workflowId)
}
export function applyToolEffects(params: {
effectsRaw: unknown
toolCall?: CopilotToolCall
resultPayload?: Record<string, unknown>
}): void {
const effects = parseToolEffects(params.effectsRaw)
if (effects.length === 0) {
if (params.toolCall?.name === 'workflow_change' && params.resultPayload) {
const workflowState = resolveWorkflowState({}, params.resultPayload)
if (!workflowState) return
useWorkflowDiffStore
.getState()
.setProposedChanges(workflowState)
.catch((error) => {
logger.error('Failed to apply fallback workflow diff from result payload', {
error: error instanceof Error ? error.message : String(error),
})
})
}
return
}
for (const effect of effects) {
switch (effect.kind) {
case 'workflow.diff.proposed': {
const workflowState = resolveWorkflowState(effect.payload, params.resultPayload)
if (!workflowState) break
useWorkflowDiffStore
.getState()
.setProposedChanges(workflowState)
.catch((error) => {
logger.error('Failed to apply workflow diff effect', {
error: error instanceof Error ? error.message : String(error),
})
})
break
}
case 'workflow.deployment.sync':
applyDeploymentSyncEffect(effect.payload, params.toolCall)
break
case 'workflow.api_key.sync':
applyApiKeySyncEffect(effect.payload, params.toolCall)
break
case 'environment.variables.reload':
useEnvironmentStore.getState().loadEnvironmentVariables()
break
case 'workflow.variables.reload':
applyWorkflowVariablesReload(effect.payload, params.toolCall)
break
default:
logger.debug('Ignoring unknown tool effect', { kind: effect.kind })
break
}
}
}

View File

@@ -101,6 +101,9 @@ export const COPILOT_CHECKPOINTS_API_PATH = '/api/copilot/checkpoints'
/** POST — revert to a checkpoint. */ /** POST — revert to a checkpoint. */
export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert' export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert'
/** GET/POST/DELETE — manage auto-allowed tools. */
export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools'
/** GET — fetch dynamically available copilot models. */ /** GET — fetch dynamically available copilot models. */
export const COPILOT_MODELS_API_PATH = '/api/copilot/models' export const COPILOT_MODELS_API_PATH = '/api/copilot/models'

View File

@@ -5,7 +5,7 @@ import { serializeMessagesForDB } from './serialization'
const logger = createLogger('CopilotMessagePersistence') const logger = createLogger('CopilotMessagePersistence')
export async function persistMessages(params: { interface PersistParams {
chatId: string chatId: string
messages: CopilotMessage[] messages: CopilotMessage[]
sensitiveCredentialIds?: Set<string> sensitiveCredentialIds?: Set<string>
@@ -13,24 +13,29 @@ export async function persistMessages(params: {
mode?: string mode?: string
model?: string model?: string
conversationId?: string conversationId?: string
}): Promise<boolean> { }
try {
/** Builds the JSON body used by both fetch and sendBeacon persistence paths. */
function buildPersistBody(params: PersistParams): string {
const dbMessages = serializeMessagesForDB( const dbMessages = serializeMessagesForDB(
params.messages, params.messages,
params.sensitiveCredentialIds ?? new Set<string>() params.sensitiveCredentialIds ?? new Set<string>()
) )
const response = await fetch(COPILOT_UPDATE_MESSAGES_API_PATH, { return JSON.stringify({
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
chatId: params.chatId, chatId: params.chatId,
messages: dbMessages, messages: dbMessages,
...(params.planArtifact !== undefined ? { planArtifact: params.planArtifact } : {}), ...(params.planArtifact !== undefined ? { planArtifact: params.planArtifact } : {}),
...(params.mode || params.model ...(params.mode || params.model ? { config: { mode: params.mode, model: params.model } } : {}),
? { config: { mode: params.mode, model: params.model } }
: {}),
...(params.conversationId ? { conversationId: params.conversationId } : {}), ...(params.conversationId ? { conversationId: params.conversationId } : {}),
}), })
}
export async function persistMessages(params: PersistParams): Promise<boolean> {
try {
const response = await fetch(COPILOT_UPDATE_MESSAGES_API_PATH, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: buildPersistBody(params),
}) })
return response.ok return response.ok
} catch (error) { } catch (error) {
@@ -41,3 +46,27 @@ export async function persistMessages(params: {
return false return false
} }
} }
/**
* Persists messages using navigator.sendBeacon, which is reliable during page unload.
* Unlike fetch, sendBeacon is guaranteed to be queued even when the page is being torn down.
*/
export function persistMessagesBeacon(params: PersistParams): boolean {
try {
const body = buildPersistBody(params)
const blob = new Blob([body], { type: 'application/json' })
const sent = navigator.sendBeacon(COPILOT_UPDATE_MESSAGES_API_PATH, blob)
if (!sent) {
logger.warn('sendBeacon returned false — browser may have rejected the request', {
chatId: params.chatId,
})
}
return sent
} catch (error) {
logger.warn('Failed to persist messages via sendBeacon', {
chatId: params.chatId,
error: error instanceof Error ? error.message : String(error),
})
return false
}
}

View File

@@ -0,0 +1,67 @@
export const INTERRUPT_TOOL_NAMES = [
'set_global_workflow_variables',
'run_workflow',
'run_workflow_until_block',
'run_from_block',
'run_block',
'manage_mcp_tool',
'manage_custom_tool',
'deploy_mcp',
'deploy_chat',
'deploy_api',
'create_workspace_mcp_server',
'set_environment_variables',
'make_api_request',
'oauth_request_access',
'navigate_ui',
'knowledge_base',
'generate_api_key',
] as const
export const INTERRUPT_TOOL_SET = new Set<string>(INTERRUPT_TOOL_NAMES)
export const SUBAGENT_TOOL_NAMES = [
'debug',
'edit',
'build',
'plan',
'test',
'deploy',
'auth',
'research',
'knowledge',
'custom_tool',
'tour',
'info',
'workflow',
'evaluate',
'superagent',
'discovery',
] as const
export const SUBAGENT_TOOL_SET = new Set<string>(SUBAGENT_TOOL_NAMES)
/**
* Respond tools are internal to the copilot's subagent system.
* They're used by subagents to signal completion and should NOT be executed by the sim side.
* The copilot backend handles these internally.
*/
export const RESPOND_TOOL_NAMES = [
'plan_respond',
'edit_respond',
'build_respond',
'debug_respond',
'info_respond',
'research_respond',
'deploy_respond',
'superagent_respond',
'discovery_respond',
'tour_respond',
'auth_respond',
'workflow_respond',
'knowledge_respond',
'custom_tool_respond',
'test_respond',
] as const
export const RESPOND_TOOL_SET = new Set<string>(RESPOND_TOOL_NAMES)

View File

@@ -54,14 +54,14 @@ describe('sse-handlers tool lifecycle', () => {
} }
}) })
it('executes copilot.tool.call and emits copilot.tool.result + mark-complete', async () => { it('executes tool_call and emits tool_result + mark-complete', async () => {
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } }) executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
markToolComplete.mockResolvedValueOnce(true) markToolComplete.mockResolvedValueOnce(true)
const onEvent = vi.fn() const onEvent = vi.fn()
await sseHandlers['copilot.tool.call']( await sseHandlers.tool_call(
{ {
type: 'copilot.tool.call', type: 'tool_call',
data: { id: 'tool-1', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } }, data: { id: 'tool-1', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } },
} as any, } as any,
context, context,
@@ -73,7 +73,7 @@ describe('sse-handlers tool lifecycle', () => {
expect(markToolComplete).toHaveBeenCalledTimes(1) expect(markToolComplete).toHaveBeenCalledTimes(1)
expect(onEvent).toHaveBeenCalledWith( expect(onEvent).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
type: 'copilot.tool.result', type: 'tool_result',
toolCallId: 'tool-1', toolCallId: 'tool-1',
success: true, success: true,
}) })
@@ -84,17 +84,17 @@ describe('sse-handlers tool lifecycle', () => {
expect(updated?.result?.output).toEqual({ ok: true }) expect(updated?.result?.output).toEqual({ ok: true })
}) })
it('skips duplicate copilot.tool.call after result', async () => { it('skips duplicate tool_call after result', async () => {
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } }) executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
markToolComplete.mockResolvedValueOnce(true) markToolComplete.mockResolvedValueOnce(true)
const event = { const event = {
type: 'copilot.tool.call', type: 'tool_call',
data: { id: 'tool-dup', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } }, data: { id: 'tool-dup', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } },
} }
await sseHandlers['copilot.tool.call'](event as any, context, execContext, { interactive: false }) await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
await sseHandlers['copilot.tool.call'](event as any, context, execContext, { interactive: false }) await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
expect(executeToolServerSide).toHaveBeenCalledTimes(1) expect(executeToolServerSide).toHaveBeenCalledTimes(1)
expect(markToolComplete).toHaveBeenCalledTimes(1) expect(markToolComplete).toHaveBeenCalledTimes(1)

View File

@@ -1,12 +1,17 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants' import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants'
import { RESPOND_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config'
import { import {
asRecord, asRecord,
getEventData, getEventData,
markToolResultSeen, markToolResultSeen,
wasToolResultSeen, wasToolResultSeen,
} from '@/lib/copilot/orchestrator/sse-utils' } from '@/lib/copilot/orchestrator/sse-utils'
import { markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' import {
isIntegrationTool,
isToolAvailableOnSimSide,
markToolComplete,
} from '@/lib/copilot/orchestrator/tool-executor'
import type { import type {
ContentBlock, ContentBlock,
ExecutionContext, ExecutionContext,
@@ -17,6 +22,7 @@ import type {
} from '@/lib/copilot/orchestrator/types' } from '@/lib/copilot/orchestrator/types'
import { import {
executeToolAndReport, executeToolAndReport,
isInterruptToolName,
waitForToolCompletion, waitForToolCompletion,
waitForToolDecision, waitForToolDecision,
} from './tool-execution' } from './tool-execution'
@@ -29,208 +35,12 @@ const logger = createLogger('CopilotSseHandlers')
* execution to the browser client instead of running executeWorkflow directly. * execution to the browser client instead of running executeWorkflow directly.
*/ */
const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([ const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([
'workflow_run', 'run_workflow',
'run_workflow_until_block',
'run_from_block',
'run_block',
]) ])
function mapServerStateToToolStatus(state: unknown): ToolCallState['status'] {
switch (String(state || '')) {
case 'generating':
case 'pending':
case 'awaiting_approval':
return 'pending'
case 'executing':
return 'executing'
case 'success':
return 'success'
case 'rejected':
case 'skipped':
return 'rejected'
case 'aborted':
return 'skipped'
case 'error':
case 'failed':
return 'error'
default:
return 'pending'
}
}
function getExecutionTarget(
toolData: Record<string, unknown>,
toolName: string
): { target: string; capabilityId?: string } {
const execution = asRecord(toolData.execution)
if (typeof execution.target === 'string' && execution.target.length > 0) {
return {
target: execution.target,
capabilityId:
typeof execution.capabilityId === 'string' ? execution.capabilityId : undefined,
}
}
// Fallback only when metadata is missing.
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
return { target: 'sim_client_capability', capabilityId: 'workflow.run' }
}
return { target: 'sim_server' }
}
function needsApproval(toolData: Record<string, unknown>): boolean {
const ui = asRecord(toolData.ui)
return ui.showInterrupt === true
}
async function waitForClientCapabilityAndReport(
toolCall: ToolCallState,
options: OrchestratorOptions,
logScope: string
): Promise<void> {
toolCall.status = 'executing'
const completion = await waitForToolCompletion(
toolCall.id,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope} background)`, {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
if (completion?.status === 'rejected') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(toolCall.id, toolCall.name, 400, completion.message || 'Tool execution rejected')
.catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope} rejected)`, {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope})`, {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
}
function markToolCallAndNotify(
toolCall: ToolCallState,
statusCode: number,
message: string,
data: Record<string, unknown> | undefined,
logScope: string
): void {
markToolComplete(toolCall.id, toolCall.name, statusCode, message, data).catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope})`, {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
}
async function executeToolCallWithPolicy(
toolCall: ToolCallState,
toolName: string,
toolData: Record<string, unknown>,
context: StreamingContext,
execContext: ExecutionContext,
options: OrchestratorOptions,
logScope: string
): Promise<void> {
const execution = getExecutionTarget(toolData, toolName)
const isInteractive = options.interactive === true
const requiresApproval = isInteractive && needsApproval(toolData)
if (toolData.state) {
toolCall.status = mapServerStateToToolStatus(toolData.state)
}
if (requiresApproval) {
const decision = await waitForToolDecision(
toolCall.id,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (decision?.status === 'accepted' || decision?.status === 'success') {
// Continue below into normal execution path.
} else if (decision?.status === 'rejected' || decision?.status === 'error') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolCallAndNotify(
toolCall,
400,
decision.message || 'Tool execution rejected',
{ skipped: true, reason: 'user_rejected' },
`${logScope} rejected`
)
return
} else if (decision?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolCallAndNotify(
toolCall,
202,
decision.message || 'Tool execution moved to background',
{ background: true },
`${logScope} background`
)
return
} else {
// Decision was null (timeout/abort).
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolCallAndNotify(
toolCall,
408,
'Tool approval timed out',
{ skipped: true, reason: 'timeout' },
`${logScope} timeout`
)
return
}
}
if (execution.target === 'sim_client_capability' && isInteractive) {
await waitForClientCapabilityAndReport(toolCall, options, logScope)
return
}
if (
(execution.target === 'sim_server' || execution.target === 'sim_client_capability') &&
options.autoExecuteTools !== false
) {
await executeToolAndReport(toolCall.id, context, execContext, options)
}
}
// Normalization + dedupe helpers live in sse-utils to keep server/client in sync. // Normalization + dedupe helpers live in sse-utils to keep server/client in sync.
function inferToolSuccess(data: Record<string, unknown> | undefined): { function inferToolSuccess(data: Record<string, unknown> | undefined): {
@@ -266,7 +76,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
context.chatId = asRecord(event.data).chatId as string | undefined context.chatId = asRecord(event.data).chatId as string | undefined
}, },
title_updated: () => {}, title_updated: () => {},
'copilot.tool.result': (event, context) => { tool_result: (event, context) => {
const data = getEventData(event) const data = getEventData(event)
const toolCallId = event.toolCallId || (data?.id as string | undefined) const toolCallId = event.toolCallId || (data?.id as string | undefined)
if (!toolCallId) return if (!toolCallId) return
@@ -275,11 +85,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
const { success, hasResultData, hasError } = inferToolSuccess(data) const { success, hasResultData, hasError } = inferToolSuccess(data)
current.status = data?.state current.status = success ? 'success' : 'error'
? mapServerStateToToolStatus(data.state)
: success
? 'success'
: 'error'
current.endTime = Date.now() current.endTime = Date.now()
if (hasResultData) { if (hasResultData) {
current.result = { current.result = {
@@ -292,7 +98,35 @@ export const sseHandlers: Record<string, SSEHandler> = {
current.error = (data?.error || resultObj.error) as string | undefined current.error = (data?.error || resultObj.error) as string | undefined
} }
}, },
'copilot.tool.call': async (event, context, execContext, options) => { tool_error: (event, context) => {
const data = getEventData(event)
const toolCallId = event.toolCallId || (data?.id as string | undefined)
if (!toolCallId) return
const current = context.toolCalls.get(toolCallId)
if (!current) return
current.status = 'error'
current.error = (data?.error as string | undefined) || 'Tool execution failed'
current.endTime = Date.now()
},
tool_generating: (event, context) => {
const data = getEventData(event)
const toolCallId =
event.toolCallId ||
(data?.toolCallId as string | undefined) ||
(data?.id as string | undefined)
const toolName =
event.toolName || (data?.toolName as string | undefined) || (data?.name as string | undefined)
if (!toolCallId || !toolName) return
if (!context.toolCalls.has(toolCallId)) {
context.toolCalls.set(toolCallId, {
id: toolCallId,
name: toolName,
status: 'pending',
startTime: Date.now(),
})
}
},
tool_call: async (event, context, execContext, options) => {
const toolData = getEventData(event) || ({} as Record<string, unknown>) const toolData = getEventData(event) || ({} as Record<string, unknown>)
const toolCallId = (toolData.id as string | undefined) || event.toolCallId const toolCallId = (toolData.id as string | undefined) || event.toolCallId
const toolName = (toolData.name as string | undefined) || event.toolName const toolName = (toolData.name as string | undefined) || event.toolName
@@ -322,7 +156,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
context.toolCalls.set(toolCallId, { context.toolCalls.set(toolCallId, {
id: toolCallId, id: toolCallId,
name: toolName, name: toolName,
status: toolData.state ? mapServerStateToToolStatus(toolData.state) : 'pending', status: 'pending',
params: args, params: args,
startTime: Date.now(), startTime: Date.now(),
}) })
@@ -336,17 +170,149 @@ export const sseHandlers: Record<string, SSEHandler> = {
const toolCall = context.toolCalls.get(toolCallId) const toolCall = context.toolCalls.get(toolCallId)
if (!toolCall) return if (!toolCall) return
await executeToolCallWithPolicy( // Subagent tools are executed by the copilot backend, not sim side.
toolCall, if (SUBAGENT_TOOL_SET.has(toolName)) {
toolName, return
toolData, }
context,
execContext, // Respond tools are internal to copilot's subagent system - skip execution.
options, // The copilot backend handles these internally to signal subagent completion.
'run tool' if (RESPOND_TOOL_SET.has(toolName)) {
toolCall.status = 'success'
toolCall.endTime = Date.now()
toolCall.result = {
success: true,
output: 'Internal respond tool - handled by copilot backend',
}
return
}
const isInterruptTool = isInterruptToolName(toolName)
const isInteractive = options.interactive === true
// Integration tools (user-installed) also require approval in interactive mode
const needsApproval = isInterruptTool || isIntegrationTool(toolName)
if (needsApproval && isInteractive) {
const decision = await waitForToolDecision(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
) )
if (decision?.status === 'accepted' || decision?.status === 'success') {
// Client-executable run tools: defer execution to the browser client.
// The client calls executeWorkflowWithFullLogging for real-time feedback
// (block pulsing, logs, stop button) and reports completion via
// /api/copilot/confirm with status success/error. We poll Redis for
// that completion signal, then fire-and-forget markToolComplete to Go.
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
toolCall.status = 'executing'
const completion = await waitForToolCompletion(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (run tool background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg =
completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
// Fire-and-forget: tell Go backend the tool is done
// (must NOT await — see deadlock note in executeToolAndReport)
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (run tool)', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
await executeToolAndReport(toolCallId, context, execContext, options)
return
}
if (decision?.status === 'rejected' || decision?.status === 'error') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
markToolComplete(
toolCall.id,
toolCall.name,
400,
decision.message || 'Tool execution rejected',
{ skipped: true, reason: 'user_rejected' }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (rejected)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
if (decision?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
markToolComplete(
toolCall.id,
toolCall.name,
202,
decision.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
// Decision was null — timed out or aborted.
// Do NOT fall through to auto-execute. Mark the tool as timed out
// and notify Go so it can unblock waitForExternalTool.
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(toolCall.id, toolCall.name, 408, 'Tool approval timed out', {
skipped: true,
reason: 'timeout',
}).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (timeout)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
if (options.autoExecuteTools !== false) {
await executeToolAndReport(toolCallId, context, execContext, options)
}
}, },
'copilot.phase.progress': (event, context) => { reasoning: (event, context) => {
const d = asRecord(event.data) const d = asRecord(event.data)
const phase = d.phase || asRecord(d.data).phase const phase = d.phase || asRecord(d.data).phase
if (phase === 'start') { if (phase === 'start') {
@@ -370,7 +336,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
if (!chunk || !context.currentThinkingBlock) return if (!chunk || !context.currentThinkingBlock) return
context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}` context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}`
}, },
'copilot.content': (event, context) => { content: (event, context) => {
// Go backend sends content as a plain string in event.data, not wrapped in an object. // Go backend sends content as a plain string in event.data, not wrapped in an object.
let chunk: string | undefined let chunk: string | undefined
if (typeof event.data === 'string') { if (typeof event.data === 'string') {
@@ -383,20 +349,20 @@ export const sseHandlers: Record<string, SSEHandler> = {
context.accumulatedContent += chunk context.accumulatedContent += chunk
addContentBlock(context, { type: 'text', content: chunk }) addContentBlock(context, { type: 'text', content: chunk })
}, },
'copilot.phase.completed': (event, context) => { done: (event, context) => {
const d = asRecord(event.data) const d = asRecord(event.data)
if (d.responseId) { if (d.responseId) {
context.conversationId = d.responseId as string context.conversationId = d.responseId as string
} }
context.streamComplete = true context.streamComplete = true
}, },
'copilot.phase.started': (event, context) => { start: (event, context) => {
const d = asRecord(event.data) const d = asRecord(event.data)
if (d.responseId) { if (d.responseId) {
context.conversationId = d.responseId as string context.conversationId = d.responseId as string
} }
}, },
'copilot.error': (event, context) => { error: (event, context) => {
const d = asRecord(event.data) const d = asRecord(event.data)
const message = (d.message || d.error || event.error) as string | undefined const message = (d.message || d.error || event.error) as string | undefined
if (message) { if (message) {
@@ -407,7 +373,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
} }
export const subAgentHandlers: Record<string, SSEHandler> = { export const subAgentHandlers: Record<string, SSEHandler> = {
'copilot.content': (event, context) => { content: (event, context) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId || !event.data) return if (!parentToolCallId || !event.data) return
// Go backend sends content as a plain string in event.data // Go backend sends content as a plain string in event.data
@@ -423,7 +389,7 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
(context.subAgentContent[parentToolCallId] || '') + chunk (context.subAgentContent[parentToolCallId] || '') + chunk
addContentBlock(context, { type: 'subagent_text', content: chunk }) addContentBlock(context, { type: 'subagent_text', content: chunk })
}, },
'copilot.tool.call': async (event, context, execContext, options) => { tool_call: async (event, context, execContext, options) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return if (!parentToolCallId) return
const toolData = getEventData(event) || ({} as Record<string, unknown>) const toolData = getEventData(event) || ({} as Record<string, unknown>)
@@ -444,7 +410,7 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
const toolCall: ToolCallState = { const toolCall: ToolCallState = {
id: toolCallId, id: toolCallId,
name: toolName, name: toolName,
status: toolData.state ? mapServerStateToToolStatus(toolData.state) : 'pending', status: 'pending',
params: args, params: args,
startTime: Date.now(), startTime: Date.now(),
} }
@@ -462,17 +428,159 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
if (isPartial) return if (isPartial) return
await executeToolCallWithPolicy( // Respond tools are internal to copilot's subagent system - skip execution.
toolCall, if (RESPOND_TOOL_SET.has(toolName)) {
toolName, toolCall.status = 'success'
toolData, toolCall.endTime = Date.now()
context, toolCall.result = {
execContext, success: true,
options, output: 'Internal respond tool - handled by copilot backend',
'subagent run tool' }
return
}
// Tools that only exist on the Go backend (e.g. search_patterns,
// search_errors, remember_debug) should NOT be re-executed on the Sim side.
// The Go backend already executed them and will send its own tool_result
// SSE event with the real outcome. Trying to execute them here would fail
// with "Tool not found" and incorrectly mark the tool as failed.
if (!isToolAvailableOnSimSide(toolName)) {
return
}
// Interrupt tools and integration tools (user-installed) require approval
// in interactive mode, same as top-level handler.
const needsSubagentApproval = isInterruptToolName(toolName) || isIntegrationTool(toolName)
if (options.interactive === true && needsSubagentApproval) {
const decision = await waitForToolDecision(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
) )
if (decision?.status === 'accepted' || decision?.status === 'success') {
await executeToolAndReport(toolCallId, context, execContext, options)
return
}
if (decision?.status === 'rejected' || decision?.status === 'error') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
markToolComplete(
toolCall.id,
toolCall.name,
400,
decision.message || 'Tool execution rejected',
{ skipped: true, reason: 'user_rejected' }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent rejected)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
if (decision?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
markToolComplete(
toolCall.id,
toolCall.name,
202,
decision.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
// Decision was null — timed out or aborted.
// Do NOT fall through to auto-execute.
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(toolCall.id, toolCall.name, 408, 'Tool approval timed out', {
skipped: true,
reason: 'timeout',
}).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent timeout)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
// Client-executable run tools in interactive mode: defer to client.
// Same pattern as main handler: wait for client completion, then tell Go.
if (options.interactive === true && CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
toolCall.status = 'executing'
const completion = await waitForToolCompletion(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (completion?.status === 'rejected') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
400,
completion.message || 'Tool execution rejected'
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent run tool rejected)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent run tool background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent run tool)', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
if (options.autoExecuteTools !== false) {
await executeToolAndReport(toolCallId, context, execContext, options)
}
}, },
'copilot.tool.result': (event, context) => { tool_result: (event, context) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return if (!parentToolCallId) return
const data = getEventData(event) const data = getEventData(event)
@@ -488,7 +596,7 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
const { success, hasResultData, hasError } = inferToolSuccess(data) const { success, hasResultData, hasError } = inferToolSuccess(data)
const status = data?.state ? mapServerStateToToolStatus(data.state) : success ? 'success' : 'error' const status = success ? 'success' : 'error'
const endTime = Date.now() const endTime = Date.now()
const result = hasResultData ? { success, output: data?.result || data?.data } : undefined const result = hasResultData ? { success, output: data?.result || data?.data } : undefined
@@ -512,22 +620,8 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
} }
} }
}, },
'copilot.phase.progress': () => {
// Subagent reasoning chunks are surfaced via copilot.content.
},
'copilot.phase.completed': () => {},
} }
sseHandlers['copilot.tool.interrupt_required'] = sseHandlers['copilot.tool.call']
sseHandlers['copilot.workflow.patch'] = sseHandlers['copilot.tool.result']
sseHandlers['copilot.workflow.verify'] = sseHandlers['copilot.tool.result']
sseHandlers['copilot.tool.interrupt_resolved'] = sseHandlers['copilot.tool.result']
subAgentHandlers['copilot.tool.interrupt_required'] = subAgentHandlers['copilot.tool.call']
subAgentHandlers['copilot.workflow.patch'] = subAgentHandlers['copilot.tool.result']
subAgentHandlers['copilot.workflow.verify'] = subAgentHandlers['copilot.tool.result']
subAgentHandlers['copilot.tool.interrupt_resolved'] = subAgentHandlers['copilot.tool.result']
export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean { export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean {
if (!event.subagent) return false if (!event.subagent) return false
if (!context.subAgentParentToolCallId) { if (!context.subAgentParentToolCallId) {

View File

@@ -4,6 +4,7 @@ import {
TOOL_DECISION_MAX_POLL_MS, TOOL_DECISION_MAX_POLL_MS,
TOOL_DECISION_POLL_BACKOFF, TOOL_DECISION_POLL_BACKOFF,
} from '@/lib/copilot/constants' } from '@/lib/copilot/constants'
import { INTERRUPT_TOOL_SET } from '@/lib/copilot/orchestrator/config'
import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence'
import { import {
asRecord, asRecord,
@@ -20,6 +21,10 @@ import type {
const logger = createLogger('CopilotSseToolExecution') const logger = createLogger('CopilotSseToolExecution')
export function isInterruptToolName(toolName: string): boolean {
return INTERRUPT_TOOL_SET.has(toolName)
}
export async function executeToolAndReport( export async function executeToolAndReport(
toolCallId: string, toolCallId: string,
context: StreamingContext, context: StreamingContext,
@@ -29,11 +34,9 @@ export async function executeToolAndReport(
const toolCall = context.toolCalls.get(toolCallId) const toolCall = context.toolCalls.get(toolCallId)
if (!toolCall) return if (!toolCall) return
const lockable = toolCall as typeof toolCall & { __simExecuting?: boolean } if (toolCall.status === 'executing') return
if (lockable.__simExecuting) return
if (wasToolResultSeen(toolCall.id)) return if (wasToolResultSeen(toolCall.id)) return
lockable.__simExecuting = true
toolCall.status = 'executing' toolCall.status = 'executing'
try { try {
const result = await executeToolServerSide(toolCall, execContext) const result = await executeToolServerSide(toolCall, execContext)
@@ -80,7 +83,7 @@ export async function executeToolAndReport(
}) })
const resultEvent: SSEEvent = { const resultEvent: SSEEvent = {
type: 'copilot.tool.result', type: 'tool_result',
toolCallId: toolCall.id, toolCallId: toolCall.id,
toolName: toolCall.name, toolName: toolCall.name,
success: result.success, success: result.success,
@@ -88,8 +91,6 @@ export async function executeToolAndReport(
data: { data: {
id: toolCall.id, id: toolCall.id,
name: toolCall.name, name: toolCall.name,
phase: 'completed',
state: result.success ? 'success' : 'error',
success: result.success, success: result.success,
result: result.output, result: result.output,
}, },
@@ -112,22 +113,15 @@ export async function executeToolAndReport(
}) })
const errorEvent: SSEEvent = { const errorEvent: SSEEvent = {
type: 'copilot.tool.result', type: 'tool_error',
toolCallId: toolCall.id, toolCallId: toolCall.id,
toolName: toolCall.name,
success: false,
data: { data: {
id: toolCall.id, id: toolCall.id,
name: toolCall.name, name: toolCall.name,
phase: 'completed',
state: 'error',
success: false,
error: toolCall.error, error: toolCall.error,
}, },
} }
await options?.onEvent?.(errorEvent) await options?.onEvent?.(errorEvent)
} finally {
delete lockable.__simExecuting
} }
} }

View File

@@ -11,10 +11,10 @@ import {
describe('sse-utils', () => { describe('sse-utils', () => {
it.concurrent('normalizes tool fields from string data', () => { it.concurrent('normalizes tool fields from string data', () => {
const event = { const event = {
type: 'copilot.tool.result', type: 'tool_result',
data: JSON.stringify({ data: JSON.stringify({
id: 'tool_1', id: 'tool_1',
name: 'workflow_change', name: 'edit_workflow',
success: true, success: true,
result: { ok: true }, result: { ok: true },
}), }),
@@ -22,62 +22,21 @@ describe('sse-utils', () => {
const normalized = normalizeSseEvent(event as any) const normalized = normalizeSseEvent(event as any)
expect(normalized.type).toBe('copilot.tool.result')
expect(normalized.toolCallId).toBe('tool_1') expect(normalized.toolCallId).toBe('tool_1')
expect(normalized.toolName).toBe('workflow_change') expect(normalized.toolName).toBe('edit_workflow')
expect(normalized.success).toBe(true) expect(normalized.success).toBe(true)
expect(normalized.result).toEqual({ ok: true }) expect(normalized.result).toEqual({ ok: true })
}) })
it.concurrent('maps copilot tool event aliases and preserves tool metadata', () => { it.concurrent('dedupes tool_call events', () => {
const event = { const event = { type: 'tool_call', data: { id: 'tool_call_1', name: 'plan' } }
type: 'copilot.tool.interrupt_required',
data: {
id: 'tool_legacy_1',
name: 'workflow_run',
state: 'pending',
ui: { showInterrupt: true },
},
}
const normalized = normalizeSseEvent(event as any)
expect(normalized.type).toBe('copilot.tool.interrupt_required')
expect(normalized.toolCallId).toBe('tool_legacy_1')
expect(normalized.toolName).toBe('workflow_run')
})
it.concurrent('keeps copilot content event type when payload is plain string', () => {
const event = {
type: 'copilot.content',
data: 'hello world',
}
const normalized = normalizeSseEvent(event as any)
expect(normalized.type).toBe('copilot.content')
expect(normalized.data).toBe('hello world')
})
it.concurrent('dedupes copilot tool call events', () => {
const event = { type: 'copilot.tool.call', data: { id: 'tool_call_1', name: 'plan' } }
expect(shouldSkipToolCallEvent(event as any)).toBe(false) expect(shouldSkipToolCallEvent(event as any)).toBe(false)
expect(shouldSkipToolCallEvent(event as any)).toBe(true) expect(shouldSkipToolCallEvent(event as any)).toBe(true)
}) })
it.concurrent('dedupes copilot tool result events', () => { it.concurrent('dedupes tool_result events', () => {
const event = { type: 'copilot.tool.result', data: { id: 'tool_result_1', name: 'plan' } } const event = { type: 'tool_result', data: { id: 'tool_result_1', name: 'plan' } }
expect(shouldSkipToolResultEvent(event as any)).toBe(false) expect(shouldSkipToolResultEvent(event as any)).toBe(false)
expect(shouldSkipToolResultEvent(event as any)).toBe(true) expect(shouldSkipToolResultEvent(event as any)).toBe(true)
}) })
it.concurrent('dedupes copilot workflow patch result events', () => {
const normalized = normalizeSseEvent({
type: 'copilot.workflow.patch',
data: { id: 'tool_result_aliased_1', name: 'workflow_change' },
} as any)
expect(shouldSkipToolResultEvent(normalized as any)).toBe(false)
expect(shouldSkipToolResultEvent(normalized as any)).toBe(true)
})
}) })

View File

@@ -101,21 +101,8 @@ export function wasToolResultSeen(toolCallId: string): boolean {
return seenToolResults.has(toolCallId) return seenToolResults.has(toolCallId)
} }
function isToolCallEventType(type: string): boolean {
return type === 'copilot.tool.call' || type === 'copilot.tool.interrupt_required'
}
function isToolResultEventType(type: string): boolean {
return (
type === 'copilot.tool.result' ||
type === 'copilot.workflow.patch' ||
type === 'copilot.workflow.verify' ||
type === 'copilot.tool.interrupt_resolved'
)
}
export function shouldSkipToolCallEvent(event: SSEEvent): boolean { export function shouldSkipToolCallEvent(event: SSEEvent): boolean {
if (!isToolCallEventType(String(event.type || ''))) return false if (event.type !== 'tool_call') return false
const toolCallId = getToolCallIdFromEvent(event) const toolCallId = getToolCallIdFromEvent(event)
if (!toolCallId) return false if (!toolCallId) return false
const eventData = getEventData(event) const eventData = getEventData(event)
@@ -128,7 +115,7 @@ export function shouldSkipToolCallEvent(event: SSEEvent): boolean {
} }
export function shouldSkipToolResultEvent(event: SSEEvent): boolean { export function shouldSkipToolResultEvent(event: SSEEvent): boolean {
if (!isToolResultEventType(String(event.type || ''))) return false if (event.type !== 'tool_result') return false
const toolCallId = getToolCallIdFromEvent(event) const toolCallId = getToolCallIdFromEvent(event)
if (!toolCallId) return false if (!toolCallId) return false
if (wasToolResultSeen(toolCallId)) return true if (wasToolResultSeen(toolCallId)) return true

View File

@@ -97,8 +97,8 @@ describe('stream-buffer', () => {
}) })
it.concurrent('replays events after a given event id', async () => { it.concurrent('replays events after a given event id', async () => {
await appendStreamEvent('stream-1', { type: 'copilot.content', data: 'hello' }) await appendStreamEvent('stream-1', { type: 'content', data: 'hello' })
await appendStreamEvent('stream-1', { type: 'copilot.content', data: 'world' }) await appendStreamEvent('stream-1', { type: 'content', data: 'world' })
const allEvents = await readStreamEvents('stream-1', 0) const allEvents = await readStreamEvents('stream-1', 0)
expect(allEvents.map((entry) => entry.event.data)).toEqual(['hello', 'world']) expect(allEvents.map((entry) => entry.event.data)).toEqual(['hello', 'world'])
@@ -109,8 +109,8 @@ describe('stream-buffer', () => {
it.concurrent('flushes buffered events for resume', async () => { it.concurrent('flushes buffered events for resume', async () => {
const writer = createStreamEventWriter('stream-2') const writer = createStreamEventWriter('stream-2')
await writer.write({ type: 'copilot.content', data: 'a' }) await writer.write({ type: 'content', data: 'a' })
await writer.write({ type: 'copilot.content', data: 'b' }) await writer.write({ type: 'content', data: 'b' })
await writer.flush() await writer.flush()
const events = await readStreamEvents('stream-2', 0) const events = await readStreamEvents('stream-2', 0)

View File

@@ -127,7 +127,7 @@ export async function runStreamLoop(
} }
// Standard subagent start/end handling. // Standard subagent start/end handling.
if (normalizedEvent.type === 'copilot.subagent.started') { if (normalizedEvent.type === 'subagent_start') {
const eventData = normalizedEvent.data as Record<string, unknown> | undefined const eventData = normalizedEvent.data as Record<string, unknown> | undefined
const toolCallId = eventData?.tool_call_id as string | undefined const toolCallId = eventData?.tool_call_id as string | undefined
if (toolCallId) { if (toolCallId) {
@@ -138,7 +138,7 @@ export async function runStreamLoop(
continue continue
} }
if (normalizedEvent.type === 'copilot.subagent.completed') { if (normalizedEvent.type === 'subagent_end') {
context.subAgentParentToolCallId = undefined context.subAgentParentToolCallId = undefined
continue continue
} }

View File

@@ -74,7 +74,7 @@ export async function orchestrateSubagentStream(
} }
// For direct subagent calls, events may have the subagent field set // For direct subagent calls, events may have the subagent field set
// but no copilot.subagent.started because this IS the top-level agent. // but no subagent_start because this IS the top-level agent.
// Skip subagent routing for events where the subagent field matches // Skip subagent routing for events where the subagent field matches
// the current agentId - these are top-level events. // the current agentId - these are top-level events.
if (event.subagent === agentId && !ctx.subAgentParentToolCallId) { if (event.subagent === agentId && !ctx.subAgentParentToolCallId) {

View File

@@ -220,8 +220,7 @@ export async function executeDeployMcp(
if (!workflowRecord.isDeployed) { if (!workflowRecord.isDeployed) {
return { return {
success: false, success: false,
error: error: 'Workflow must be deployed before adding as an MCP tool. Use deploy_api first.',
'Workflow must be deployed before adding as an MCP tool. Use workflow_deploy(mode: "api") first.',
} }
} }

View File

@@ -1,7 +1,7 @@
import { db } from '@sim/db' import { db } from '@sim/db'
import { customTools, workflow } from '@sim/db/schema' import { workflow } from '@sim/db/schema'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { and, desc, eq, isNull, or } from 'drizzle-orm' import { eq } from 'drizzle-orm'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import type { import type {
ExecutionContext, ExecutionContext,
@@ -12,7 +12,6 @@ import { routeExecution } from '@/lib/copilot/tools/server/router'
import { env } from '@/lib/core/config/env' import { env } from '@/lib/core/config/env'
import { getBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { upsertCustomTools } from '@/lib/workflows/custom-tools/operations'
import { getTool, resolveToolId } from '@/tools/utils' import { getTool, resolveToolId } from '@/tools/utils'
import { import {
executeCheckDeploymentStatus, executeCheckDeploymentStatus,
@@ -50,8 +49,6 @@ import type {
RunWorkflowParams, RunWorkflowParams,
RunWorkflowUntilBlockParams, RunWorkflowUntilBlockParams,
SetGlobalWorkflowVariablesParams, SetGlobalWorkflowVariablesParams,
WorkflowDeployParams,
WorkflowRunParams,
} from './param-types' } from './param-types'
import { PLATFORM_ACTIONS_CONTENT } from './platform-actions' import { PLATFORM_ACTIONS_CONTENT } from './platform-actions'
import { import {
@@ -79,332 +76,13 @@ import {
const logger = createLogger('CopilotToolExecutor') const logger = createLogger('CopilotToolExecutor')
type ManageCustomToolOperation = 'add' | 'edit' | 'delete' | 'list'
interface ManageCustomToolSchema {
type: 'function'
function: {
name: string
description?: string
parameters: Record<string, unknown>
}
}
interface ManageCustomToolParams {
operation?: string
toolId?: string
schema?: ManageCustomToolSchema
code?: string
title?: string
workspaceId?: string
}
async function executeManageCustomTool(
rawParams: Record<string, unknown>,
context: ExecutionContext
): Promise<ToolCallResult> {
const params = rawParams as ManageCustomToolParams
const operation = String(params.operation || '').toLowerCase() as ManageCustomToolOperation
const workspaceId = params.workspaceId || context.workspaceId
if (!operation) {
return { success: false, error: "Missing required 'operation' argument" }
}
try {
if (operation === 'list') {
const toolsForUser = workspaceId
? await db
.select()
.from(customTools)
.where(
or(
eq(customTools.workspaceId, workspaceId),
and(isNull(customTools.workspaceId), eq(customTools.userId, context.userId))
)
)
.orderBy(desc(customTools.createdAt))
: await db
.select()
.from(customTools)
.where(and(isNull(customTools.workspaceId), eq(customTools.userId, context.userId)))
.orderBy(desc(customTools.createdAt))
return {
success: true,
output: {
success: true,
operation,
tools: toolsForUser,
count: toolsForUser.length,
},
}
}
if (operation === 'add') {
if (!workspaceId) {
return {
success: false,
error: "workspaceId is required for operation 'add'",
}
}
if (!params.schema || !params.code) {
return {
success: false,
error: "Both 'schema' and 'code' are required for operation 'add'",
}
}
const title = params.title || params.schema.function?.name
if (!title) {
return { success: false, error: "Missing tool title or schema.function.name for 'add'" }
}
const resultTools = await upsertCustomTools({
tools: [
{
title,
schema: params.schema,
code: params.code,
},
],
workspaceId,
userId: context.userId,
})
const created = resultTools.find((tool) => tool.title === title)
return {
success: true,
output: {
success: true,
operation,
toolId: created?.id,
title,
message: `Created custom tool "${title}"`,
},
}
}
if (operation === 'edit') {
if (!workspaceId) {
return {
success: false,
error: "workspaceId is required for operation 'edit'",
}
}
if (!params.toolId) {
return { success: false, error: "'toolId' is required for operation 'edit'" }
}
if (!params.schema && !params.code) {
return {
success: false,
error: "At least one of 'schema' or 'code' is required for operation 'edit'",
}
}
const workspaceTool = await db
.select()
.from(customTools)
.where(and(eq(customTools.id, params.toolId), eq(customTools.workspaceId, workspaceId)))
.limit(1)
const legacyTool =
workspaceTool.length === 0
? await db
.select()
.from(customTools)
.where(
and(
eq(customTools.id, params.toolId),
isNull(customTools.workspaceId),
eq(customTools.userId, context.userId)
)
)
.limit(1)
: []
const existing = workspaceTool[0] || legacyTool[0]
if (!existing) {
return { success: false, error: `Custom tool not found: ${params.toolId}` }
}
const mergedSchema = params.schema || (existing.schema as ManageCustomToolSchema)
const mergedCode = params.code || existing.code
const title = params.title || mergedSchema.function?.name || existing.title
await upsertCustomTools({
tools: [
{
id: params.toolId,
title,
schema: mergedSchema,
code: mergedCode,
},
],
workspaceId,
userId: context.userId,
})
return {
success: true,
output: {
success: true,
operation,
toolId: params.toolId,
title,
message: `Updated custom tool "${title}"`,
},
}
}
if (operation === 'delete') {
if (!params.toolId) {
return { success: false, error: "'toolId' is required for operation 'delete'" }
}
const workspaceDelete =
workspaceId != null
? await db
.delete(customTools)
.where(
and(eq(customTools.id, params.toolId), eq(customTools.workspaceId, workspaceId))
)
.returning({ id: customTools.id })
: []
const legacyDelete =
workspaceDelete.length === 0
? await db
.delete(customTools)
.where(
and(
eq(customTools.id, params.toolId),
isNull(customTools.workspaceId),
eq(customTools.userId, context.userId)
)
)
.returning({ id: customTools.id })
: []
const deleted = workspaceDelete[0] || legacyDelete[0]
if (!deleted) {
return { success: false, error: `Custom tool not found: ${params.toolId}` }
}
return {
success: true,
output: {
success: true,
operation,
toolId: params.toolId,
message: 'Deleted custom tool',
},
}
}
return {
success: false,
error: `Unsupported operation for manage_custom_tool: ${operation}`,
}
} catch (error) {
logger.error('manage_custom_tool execution failed', {
operation,
workspaceId,
userId: context.userId,
error: error instanceof Error ? error.message : String(error),
})
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to manage custom tool',
}
}
}
async function executeWorkflowRunUnified(
rawParams: Record<string, unknown>,
context: ExecutionContext
): Promise<ToolCallResult> {
const params = rawParams as WorkflowRunParams
const mode = params.mode || 'full'
switch (mode) {
case 'full':
return executeRunWorkflow(params as RunWorkflowParams, context)
case 'until_block':
if (!params.stopAfterBlockId) {
return { success: false, error: 'stopAfterBlockId is required for mode=until_block' }
}
return executeRunWorkflowUntilBlock(params as RunWorkflowUntilBlockParams, context)
case 'from_block':
if (!params.startBlockId) {
return { success: false, error: 'startBlockId is required for mode=from_block' }
}
return executeRunFromBlock(params as RunFromBlockParams, context)
case 'block':
if (!params.blockId) {
return { success: false, error: 'blockId is required for mode=block' }
}
return executeRunBlock(params as RunBlockParams, context)
default:
return {
success: false,
error: `Unsupported workflow_run mode: ${String(mode)}`,
}
}
}
async function executeWorkflowDeployUnified(
rawParams: Record<string, unknown>,
context: ExecutionContext
): Promise<ToolCallResult> {
const params = rawParams as unknown as WorkflowDeployParams
const mode = params.mode
if (!mode) {
return { success: false, error: 'mode is required for workflow_deploy' }
}
const scopedContext =
params.workflowId && params.workflowId !== context.workflowId
? { ...context, workflowId: params.workflowId }
: context
switch (mode) {
case 'status':
return executeCheckDeploymentStatus(params as CheckDeploymentStatusParams, scopedContext)
case 'redeploy':
return executeRedeploy(scopedContext)
case 'api':
return executeDeployApi(params as DeployApiParams, scopedContext)
case 'chat':
return executeDeployChat(params as DeployChatParams, scopedContext)
case 'mcp':
return executeDeployMcp(params as DeployMcpParams, scopedContext)
case 'list_mcp_servers':
return executeListWorkspaceMcpServers(params as ListWorkspaceMcpServersParams, scopedContext)
case 'create_mcp_server':
return executeCreateWorkspaceMcpServer(
params as CreateWorkspaceMcpServerParams,
scopedContext
)
default:
return {
success: false,
error: `Unsupported workflow_deploy mode: ${String(mode)}`,
}
}
}
const SERVER_TOOLS = new Set<string>([ const SERVER_TOOLS = new Set<string>([
'get_blocks_and_tools', 'get_blocks_and_tools',
'get_blocks_metadata', 'get_blocks_metadata',
'get_block_options', 'get_block_options',
'get_block_config', 'get_block_config',
'get_trigger_blocks', 'get_trigger_blocks',
'workflow_context_get', 'edit_workflow',
'workflow_context_expand',
'workflow_change',
'workflow_verify',
'get_workflow_console', 'get_workflow_console',
'search_documentation', 'search_documentation',
'search_online', 'search_online',
@@ -432,7 +110,11 @@ const SIM_WORKFLOW_TOOL_HANDLERS: Record<
get_block_outputs: (p, c) => executeGetBlockOutputs(p as GetBlockOutputsParams, c), get_block_outputs: (p, c) => executeGetBlockOutputs(p as GetBlockOutputsParams, c),
get_block_upstream_references: (p, c) => get_block_upstream_references: (p, c) =>
executeGetBlockUpstreamReferences(p as unknown as GetBlockUpstreamReferencesParams, c), executeGetBlockUpstreamReferences(p as unknown as GetBlockUpstreamReferencesParams, c),
workflow_run: (p, c) => executeWorkflowRunUnified(p, c), run_workflow: (p, c) => executeRunWorkflow(p as RunWorkflowParams, c),
run_workflow_until_block: (p, c) =>
executeRunWorkflowUntilBlock(p as unknown as RunWorkflowUntilBlockParams, c),
run_from_block: (p, c) => executeRunFromBlock(p as unknown as RunFromBlockParams, c),
run_block: (p, c) => executeRunBlock(p as unknown as RunBlockParams, c),
get_deployed_workflow_state: (p, c) => get_deployed_workflow_state: (p, c) =>
executeGetDeployedWorkflowState(p as GetDeployedWorkflowStateParams, c), executeGetDeployedWorkflowState(p as GetDeployedWorkflowStateParams, c),
generate_api_key: (p, c) => executeGenerateApiKey(p as unknown as GenerateApiKeyParams, c), generate_api_key: (p, c) => executeGenerateApiKey(p as unknown as GenerateApiKeyParams, c),
@@ -443,7 +125,10 @@ const SIM_WORKFLOW_TOOL_HANDLERS: Record<
}), }),
set_global_workflow_variables: (p, c) => set_global_workflow_variables: (p, c) =>
executeSetGlobalWorkflowVariables(p as SetGlobalWorkflowVariablesParams, c), executeSetGlobalWorkflowVariables(p as SetGlobalWorkflowVariablesParams, c),
workflow_deploy: (p, c) => executeWorkflowDeployUnified(p, c), deploy_api: (p, c) => executeDeployApi(p as DeployApiParams, c),
deploy_chat: (p, c) => executeDeployChat(p as DeployChatParams, c),
deploy_mcp: (p, c) => executeDeployMcp(p as DeployMcpParams, c),
redeploy: (_p, c) => executeRedeploy(c),
check_deployment_status: (p, c) => check_deployment_status: (p, c) =>
executeCheckDeploymentStatus(p as CheckDeploymentStatusParams, c), executeCheckDeploymentStatus(p as CheckDeploymentStatusParams, c),
list_workspace_mcp_servers: (p, c) => list_workspace_mcp_servers: (p, c) =>
@@ -476,19 +161,6 @@ const SIM_WORKFLOW_TOOL_HANDLERS: Record<
} }
} }
}, },
oauth_request_access: async (p, _c) => {
const providerName = (p.providerName || p.provider_name || 'the provider') as string
return {
success: true,
output: {
success: true,
status: 'requested',
providerName,
message: `Requested ${providerName} OAuth connection. The user should complete the OAuth modal in the UI, then retry credential-dependent actions.`,
},
}
},
manage_custom_tool: (p, c) => executeManageCustomTool(p, c),
} }
/** /**

View File

@@ -93,18 +93,6 @@ export interface RunBlockParams {
useDeployedState?: boolean useDeployedState?: boolean
} }
export interface WorkflowRunParams {
mode?: 'full' | 'until_block' | 'from_block' | 'block'
workflowId?: string
workflow_input?: unknown
input?: unknown
useDeployedState?: boolean
stopAfterBlockId?: string
startBlockId?: string
blockId?: string
executionId?: string
}
export interface GetDeployedWorkflowStateParams { export interface GetDeployedWorkflowStateParams {
workflowId?: string workflowId?: string
} }
@@ -181,39 +169,6 @@ export interface CreateWorkspaceMcpServerParams {
workflowIds?: string[] workflowIds?: string[]
} }
export interface WorkflowDeployParams {
mode:
| 'status'
| 'redeploy'
| 'api'
| 'chat'
| 'mcp'
| 'list_mcp_servers'
| 'create_mcp_server'
workflowId?: string
action?: 'deploy' | 'undeploy'
identifier?: string
title?: string
description?: string
customizations?: {
primaryColor?: string
secondaryColor?: string
welcomeMessage?: string
iconUrl?: string
}
authType?: 'none' | 'password' | 'public' | 'email' | 'sso'
password?: string
allowedEmails?: string[]
outputConfigs?: unknown[]
serverId?: string
toolName?: string
toolDescription?: string
parameterSchema?: Record<string, unknown>
name?: string
isPublic?: boolean
workflowIds?: string[]
}
// === Workflow Organization Params === // === Workflow Organization Params ===
export interface RenameWorkflowParams { export interface RenameWorkflowParams {

View File

@@ -1,22 +1,19 @@
export type SSEEventType = export type SSEEventType =
| 'chat_id' | 'chat_id'
| 'title_updated' | 'title_updated'
| 'content'
| 'reasoning'
| 'tool_call'
| 'tool_generating'
| 'tool_result'
| 'tool_error'
| 'subagent_start'
| 'subagent_end'
| 'structured_result' | 'structured_result'
| 'subagent_result' | 'subagent_result'
| 'stream_end' | 'done'
| 'copilot.phase.started' | 'error'
| 'copilot.phase.progress' | 'start'
| 'copilot.phase.completed'
| 'copilot.tool.call'
| 'copilot.tool.result'
| 'copilot.tool.interrupt_required'
| 'copilot.tool.interrupt_resolved'
| 'copilot.workflow.patch'
| 'copilot.workflow.verify'
| 'copilot.subagent.started'
| 'copilot.subagent.completed'
| 'copilot.content'
| 'copilot.error'
export interface SSEEvent { export interface SSEEvent {
type: SSEEventType type: SSEEventType

View File

@@ -592,40 +592,16 @@ const META_edit: ToolMetadata = {
}, },
} }
const META_workflow_change: ToolMetadata = { const META_edit_workflow: ToolMetadata = {
displayNames: { displayNames: {
[ClientToolCallState.generating]: { text: 'Planning workflow changes', icon: Loader2 }, [ClientToolCallState.generating]: { text: 'Editing your workflow', icon: Loader2 },
[ClientToolCallState.executing]: { text: 'Applying workflow changes', icon: Loader2 }, [ClientToolCallState.executing]: { text: 'Editing your workflow', icon: Loader2 },
[ClientToolCallState.success]: { text: 'Updated your workflow', icon: Grid2x2Check }, [ClientToolCallState.success]: { text: 'Edited your workflow', icon: Grid2x2Check },
[ClientToolCallState.error]: { text: 'Failed to update your workflow', icon: XCircle }, [ClientToolCallState.error]: { text: 'Failed to edit your workflow', icon: XCircle },
[ClientToolCallState.review]: { text: 'Review your workflow changes', icon: Grid2x2 }, [ClientToolCallState.review]: { text: 'Review your workflow changes', icon: Grid2x2 },
[ClientToolCallState.rejected]: { text: 'Rejected workflow changes', icon: Grid2x2X }, [ClientToolCallState.rejected]: { text: 'Rejected workflow changes', icon: Grid2x2X },
[ClientToolCallState.aborted]: { text: 'Aborted workflow changes', icon: MinusCircle }, [ClientToolCallState.aborted]: { text: 'Aborted editing your workflow', icon: MinusCircle },
[ClientToolCallState.pending]: { text: 'Planning workflow changes', icon: Loader2 }, [ClientToolCallState.pending]: { text: 'Editing your workflow', icon: Loader2 },
},
getDynamicText: (params, state) => {
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'dry_run') {
switch (state) {
case ClientToolCallState.success:
return 'Planned workflow changes'
case ClientToolCallState.executing:
case ClientToolCallState.generating:
case ClientToolCallState.pending:
return 'Planning workflow changes'
}
}
if (mode === 'apply' || typeof params?.proposalId === 'string') {
switch (state) {
case ClientToolCallState.success:
return 'Applied workflow changes'
case ClientToolCallState.executing:
case ClientToolCallState.generating:
case ClientToolCallState.pending:
return 'Applying workflow changes'
}
}
return undefined
}, },
uiConfig: { uiConfig: {
isSpecial: true, isSpecial: true,
@@ -633,42 +609,6 @@ const META_workflow_change: ToolMetadata = {
}, },
} }
const META_workflow_context_get: ToolMetadata = {
displayNames: {
[ClientToolCallState.generating]: { text: 'Gathering workflow context', icon: Loader2 },
[ClientToolCallState.pending]: { text: 'Gathering workflow context', icon: Loader2 },
[ClientToolCallState.executing]: { text: 'Gathering workflow context', icon: Loader2 },
[ClientToolCallState.success]: { text: 'Gathered workflow context', icon: FileText },
[ClientToolCallState.error]: { text: 'Failed to gather workflow context', icon: XCircle },
[ClientToolCallState.rejected]: { text: 'Skipped workflow context', icon: MinusCircle },
[ClientToolCallState.aborted]: { text: 'Aborted workflow context', icon: MinusCircle },
},
}
const META_workflow_context_expand: ToolMetadata = {
displayNames: {
[ClientToolCallState.generating]: { text: 'Expanding workflow schemas', icon: Loader2 },
[ClientToolCallState.pending]: { text: 'Expanding workflow schemas', icon: Loader2 },
[ClientToolCallState.executing]: { text: 'Expanding workflow schemas', icon: Loader2 },
[ClientToolCallState.success]: { text: 'Expanded workflow schemas', icon: FileText },
[ClientToolCallState.error]: { text: 'Failed to expand workflow schemas', icon: XCircle },
[ClientToolCallState.rejected]: { text: 'Skipped schema expansion', icon: MinusCircle },
[ClientToolCallState.aborted]: { text: 'Aborted schema expansion', icon: MinusCircle },
},
}
const META_workflow_verify: ToolMetadata = {
displayNames: {
[ClientToolCallState.generating]: { text: 'Verifying workflow', icon: Loader2 },
[ClientToolCallState.pending]: { text: 'Verifying workflow', icon: Loader2 },
[ClientToolCallState.executing]: { text: 'Verifying workflow', icon: Loader2 },
[ClientToolCallState.success]: { text: 'Verified workflow', icon: CheckCircle2 },
[ClientToolCallState.error]: { text: 'Workflow verification failed', icon: XCircle },
[ClientToolCallState.rejected]: { text: 'Skipped workflow verification', icon: MinusCircle },
[ClientToolCallState.aborted]: { text: 'Aborted workflow verification', icon: MinusCircle },
},
}
const META_evaluate: ToolMetadata = { const META_evaluate: ToolMetadata = {
displayNames: { displayNames: {
[ClientToolCallState.generating]: { text: 'Evaluating', icon: Loader2 }, [ClientToolCallState.generating]: { text: 'Evaluating', icon: Loader2 },
@@ -2601,12 +2541,7 @@ const TOOL_METADATA_BY_ID: Record<string, ToolMetadata> = {
deploy_chat: META_deploy_chat, deploy_chat: META_deploy_chat,
deploy_mcp: META_deploy_mcp, deploy_mcp: META_deploy_mcp,
edit: META_edit, edit: META_edit,
workflow_context_get: META_workflow_context_get, edit_workflow: META_edit_workflow,
workflow_context_expand: META_workflow_context_expand,
workflow_change: META_workflow_change,
workflow_verify: META_workflow_verify,
workflow_run: META_run_workflow,
workflow_deploy: META_deploy_api,
evaluate: META_evaluate, evaluate: META_evaluate,
get_block_config: META_get_block_config, get_block_config: META_get_block_config,
get_block_options: META_get_block_options, get_block_options: META_get_block_options,

View File

@@ -0,0 +1,680 @@
export type DirectToolDef = {
name: string
description: string
inputSchema: { type: 'object'; properties?: Record<string, unknown>; required?: string[] }
toolId: string
}
export type SubagentToolDef = {
name: string
description: string
inputSchema: { type: 'object'; properties?: Record<string, unknown>; required?: string[] }
agentId: string
}
/**
* Direct tools that execute immediately without LLM orchestration.
* These are fast database queries that don't need AI reasoning.
*/
export const DIRECT_TOOL_DEFS: DirectToolDef[] = [
{
name: 'list_workspaces',
toolId: 'list_user_workspaces',
description:
'List all workspaces the user has access to. Returns workspace IDs, names, and roles. Use this first to determine which workspace to operate in.',
inputSchema: {
type: 'object',
properties: {},
},
},
{
name: 'list_workflows',
toolId: 'list_user_workflows',
description:
'List all workflows the user has access to. Returns workflow IDs, names, workspace, and folder info. Use workspaceId/folderId to scope results.',
inputSchema: {
type: 'object',
properties: {
workspaceId: {
type: 'string',
description: 'Optional workspace ID to filter workflows.',
},
folderId: {
type: 'string',
description: 'Optional folder ID to filter workflows.',
},
},
},
},
{
name: 'list_folders',
toolId: 'list_folders',
description:
'List all folders in a workspace. Returns folder IDs, names, and parent relationships for organizing workflows.',
inputSchema: {
type: 'object',
properties: {
workspaceId: {
type: 'string',
description: 'Workspace ID to list folders from.',
},
},
required: ['workspaceId'],
},
},
{
name: 'get_workflow',
toolId: 'get_user_workflow',
description:
'Get a workflow by ID. Returns the full workflow definition including all blocks, connections, and configuration.',
inputSchema: {
type: 'object',
properties: {
workflowId: {
type: 'string',
description: 'Workflow ID to retrieve.',
},
},
required: ['workflowId'],
},
},
{
name: 'create_workflow',
toolId: 'create_workflow',
description:
'Create a new empty workflow. Returns the new workflow ID. Always call this FIRST before sim_build for new workflows. Use workspaceId to place it in a specific workspace.',
inputSchema: {
type: 'object',
properties: {
name: {
type: 'string',
description: 'Name for the new workflow.',
},
workspaceId: {
type: 'string',
description: 'Optional workspace ID. Uses default workspace if not provided.',
},
folderId: {
type: 'string',
description: 'Optional folder ID to place the workflow in.',
},
description: {
type: 'string',
description: 'Optional description for the workflow.',
},
},
required: ['name'],
},
},
{
name: 'create_folder',
toolId: 'create_folder',
description:
'Create a new folder for organizing workflows. Use parentId to create nested folder hierarchies.',
inputSchema: {
type: 'object',
properties: {
name: {
type: 'string',
description: 'Name for the new folder.',
},
workspaceId: {
type: 'string',
description: 'Optional workspace ID. Uses default workspace if not provided.',
},
parentId: {
type: 'string',
description: 'Optional parent folder ID for nested folders.',
},
},
required: ['name'],
},
},
{
name: 'rename_workflow',
toolId: 'rename_workflow',
description: 'Rename an existing workflow.',
inputSchema: {
type: 'object',
properties: {
workflowId: {
type: 'string',
description: 'The workflow ID to rename.',
},
name: {
type: 'string',
description: 'The new name for the workflow.',
},
},
required: ['workflowId', 'name'],
},
},
{
name: 'move_workflow',
toolId: 'move_workflow',
description:
'Move a workflow into a different folder. Omit folderId or pass empty string to move to workspace root.',
inputSchema: {
type: 'object',
properties: {
workflowId: {
type: 'string',
description: 'The workflow ID to move.',
},
folderId: {
type: 'string',
description: 'Target folder ID. Omit or pass empty string to move to workspace root.',
},
},
required: ['workflowId'],
},
},
{
name: 'move_folder',
toolId: 'move_folder',
description:
'Move a folder into another folder. Omit parentId or pass empty string to move to workspace root.',
inputSchema: {
type: 'object',
properties: {
folderId: {
type: 'string',
description: 'The folder ID to move.',
},
parentId: {
type: 'string',
description:
'Target parent folder ID. Omit or pass empty string to move to workspace root.',
},
},
required: ['folderId'],
},
},
{
name: 'run_workflow',
toolId: 'run_workflow',
description:
'Run a workflow and return its output. Works on both draft and deployed states. By default runs the draft (live) state.',
inputSchema: {
type: 'object',
properties: {
workflowId: {
type: 'string',
description: 'REQUIRED. The workflow ID to run.',
},
workflow_input: {
type: 'object',
description:
'JSON object with input values. Keys should match the workflow start block input field names.',
},
useDeployedState: {
type: 'boolean',
description: 'When true, runs the deployed version instead of the draft. Default: false.',
},
},
required: ['workflowId'],
},
},
{
name: 'run_workflow_until_block',
toolId: 'run_workflow_until_block',
description:
'Run a workflow and stop after a specific block completes. Useful for testing partial execution or debugging specific blocks.',
inputSchema: {
type: 'object',
properties: {
workflowId: {
type: 'string',
description: 'REQUIRED. The workflow ID to run.',
},
stopAfterBlockId: {
type: 'string',
description:
'REQUIRED. The block ID to stop after. Execution halts once this block completes.',
},
workflow_input: {
type: 'object',
description: 'JSON object with input values for the workflow.',
},
useDeployedState: {
type: 'boolean',
description: 'When true, runs the deployed version instead of the draft. Default: false.',
},
},
required: ['workflowId', 'stopAfterBlockId'],
},
},
{
name: 'run_from_block',
toolId: 'run_from_block',
description:
'Run a workflow starting from a specific block, using cached outputs from a prior execution for upstream blocks. The workflow must have been run at least once first.',
inputSchema: {
type: 'object',
properties: {
workflowId: {
type: 'string',
description: 'REQUIRED. The workflow ID to run.',
},
startBlockId: {
type: 'string',
description: 'REQUIRED. The block ID to start execution from.',
},
executionId: {
type: 'string',
description:
'Optional. Specific execution ID to load the snapshot from. Uses latest if omitted.',
},
workflow_input: {
type: 'object',
description: 'Optional input values for the workflow.',
},
useDeployedState: {
type: 'boolean',
description: 'When true, runs the deployed version instead of the draft. Default: false.',
},
},
required: ['workflowId', 'startBlockId'],
},
},
{
name: 'run_block',
toolId: 'run_block',
description:
'Run a single block in isolation using cached outputs from a prior execution. Only the specified block executes — nothing upstream or downstream. The workflow must have been run at least once first.',
inputSchema: {
type: 'object',
properties: {
workflowId: {
type: 'string',
description: 'REQUIRED. The workflow ID.',
},
blockId: {
type: 'string',
description: 'REQUIRED. The block ID to run in isolation.',
},
executionId: {
type: 'string',
description:
'Optional. Specific execution ID to load the snapshot from. Uses latest if omitted.',
},
workflow_input: {
type: 'object',
description: 'Optional input values for the workflow.',
},
useDeployedState: {
type: 'boolean',
description: 'When true, runs the deployed version instead of the draft. Default: false.',
},
},
required: ['workflowId', 'blockId'],
},
},
{
name: 'get_deployed_workflow_state',
toolId: 'get_deployed_workflow_state',
description:
'Get the deployed (production) state of a workflow. Returns the full workflow definition as deployed, or indicates if the workflow is not yet deployed.',
inputSchema: {
type: 'object',
properties: {
workflowId: {
type: 'string',
description: 'REQUIRED. The workflow ID to get the deployed state for.',
},
},
required: ['workflowId'],
},
},
{
name: 'generate_api_key',
toolId: 'generate_api_key',
description:
'Generate a new workspace API key for calling workflow API endpoints. The key is only shown once — tell the user to save it immediately.',
inputSchema: {
type: 'object',
properties: {
name: {
type: 'string',
description:
'A descriptive name for the API key (e.g., "production-key", "dev-testing").',
},
workspaceId: {
type: 'string',
description: "Optional workspace ID. Defaults to user's default workspace.",
},
},
required: ['name'],
},
},
]
export const SUBAGENT_TOOL_DEFS: SubagentToolDef[] = [
{
name: 'sim_build',
agentId: 'build',
description: `Build a workflow end-to-end in a single step. This is the fast mode equivalent for headless/MCP usage.
USE THIS WHEN:
- Building a new workflow from scratch
- Modifying an existing workflow
- You want to gather information and build in one pass without separate plan→edit steps
WORKFLOW ID (REQUIRED):
- For NEW workflows: First call create_workflow to get a workflowId, then pass it here
- For EXISTING workflows: Always pass the workflowId parameter
CAN DO:
- Gather information about blocks, credentials, patterns
- Search documentation and patterns for best practices
- Add, modify, or remove blocks
- Configure block settings and connections
- Set environment variables and workflow variables
CANNOT DO:
- Run or test workflows (use sim_test separately)
- Deploy workflows (use sim_deploy separately)
WORKFLOW:
1. Call create_workflow to get a workflowId (for new workflows)
2. Call sim_build with the request and workflowId
3. Build agent gathers info and builds in one pass
4. Call sim_test to verify it works
5. Optionally call sim_deploy to make it externally accessible`,
inputSchema: {
type: 'object',
properties: {
request: {
type: 'string',
description: 'What you want to build or modify in the workflow.',
},
workflowId: {
type: 'string',
description:
'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.',
},
context: { type: 'object' },
},
required: ['request', 'workflowId'],
},
},
{
name: 'sim_discovery',
agentId: 'discovery',
description: `Find workflows by their contents or functionality when the user doesn't know the exact name or ID.
USE THIS WHEN:
- User describes a workflow by what it does: "the one that sends emails", "my Slack notification workflow"
- User refers to workflow contents: "the workflow with the OpenAI block"
- User needs to search/match workflows by functionality or description
DO NOT USE (use direct tools instead):
- User knows the workflow name → use get_workflow
- User wants to list all workflows → use list_workflows
- User wants to list workspaces → use list_workspaces
- User wants to list folders → use list_folders`,
inputSchema: {
type: 'object',
properties: {
request: { type: 'string' },
workspaceId: { type: 'string' },
context: { type: 'object' },
},
required: ['request'],
},
},
{
name: 'sim_plan',
agentId: 'plan',
description: `Plan workflow changes by gathering required information. For most cases, prefer sim_build which combines planning and editing in one step.
USE THIS WHEN:
- You need fine-grained control over the build process
- You want to inspect the plan before executing it
WORKFLOW ID (REQUIRED):
- For NEW workflows: First call create_workflow to get a workflowId, then pass it here
- For EXISTING workflows: Always pass the workflowId parameter
This tool gathers information about available blocks, credentials, and the current workflow state.
RETURNS: A plan object containing block configurations, connections, and technical details.
IMPORTANT: Pass the returned plan EXACTLY to sim_edit - do not modify or summarize it.`,
inputSchema: {
type: 'object',
properties: {
request: {
type: 'string',
description: 'What you want to build or modify in the workflow.',
},
workflowId: {
type: 'string',
description:
'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.',
},
context: { type: 'object' },
},
required: ['request', 'workflowId'],
},
},
{
name: 'sim_edit',
agentId: 'edit',
description: `Execute a workflow plan from sim_plan. For most cases, prefer sim_build which combines planning and editing in one step.
WORKFLOW ID (REQUIRED):
- You MUST provide the workflowId parameter
PLAN (REQUIRED):
- Pass the EXACT plan object from sim_plan in the context.plan field
- Do NOT modify, summarize, or interpret the plan - pass it verbatim
After sim_edit completes, you can test immediately with sim_test, or deploy with sim_deploy to make it accessible externally.`,
inputSchema: {
type: 'object',
properties: {
message: { type: 'string', description: 'Optional additional instructions for the edit.' },
workflowId: {
type: 'string',
description:
'REQUIRED. The workflow ID to edit. Get this from create_workflow for new workflows.',
},
plan: {
type: 'object',
description: 'The plan object from sim_plan. Pass it EXACTLY as returned, do not modify.',
},
context: {
type: 'object',
description:
'Additional context. Put the plan in context.plan if not using the plan field directly.',
},
},
required: ['workflowId'],
},
},
{
name: 'sim_deploy',
agentId: 'deploy',
description: `Deploy a workflow to make it accessible externally. Workflows can be tested without deploying, but deployment is needed for API access, chat UIs, or MCP exposure.
DEPLOYMENT TYPES:
- "deploy as api" - REST API endpoint for programmatic access
- "deploy as chat" - Managed chat UI with auth options
- "deploy as mcp" - Expose as MCP tool on an MCP server for AI agents to call
MCP DEPLOYMENT FLOW:
The deploy subagent will automatically: list available MCP servers → create one if needed → deploy the workflow as an MCP tool to that server. You can specify server name, tool name, and tool description.
ALSO CAN:
- Get the deployed (production) state to compare with draft
- Generate workspace API keys for calling deployed workflows
- List and create MCP servers in the workspace`,
inputSchema: {
type: 'object',
properties: {
request: {
type: 'string',
description: 'The deployment request, e.g. "deploy as api" or "deploy as chat"',
},
workflowId: {
type: 'string',
description: 'REQUIRED. The workflow ID to deploy.',
},
context: { type: 'object' },
},
required: ['request', 'workflowId'],
},
},
{
name: 'sim_test',
agentId: 'test',
description: `Run a workflow and verify its outputs. Works on both deployed and undeployed (draft) workflows. Use after building to verify correctness.
Supports full and partial execution:
- Full run with test inputs
- Stop after a specific block (run_workflow_until_block)
- Run a single block in isolation (run_block)
- Resume from a specific block (run_from_block)`,
inputSchema: {
type: 'object',
properties: {
request: { type: 'string' },
workflowId: {
type: 'string',
description: 'REQUIRED. The workflow ID to test.',
},
context: { type: 'object' },
},
required: ['request', 'workflowId'],
},
},
{
name: 'sim_debug',
agentId: 'debug',
description:
'Diagnose errors or unexpected workflow behavior. Provide the error message and workflowId. Returns root cause analysis and fix suggestions.',
inputSchema: {
type: 'object',
properties: {
error: { type: 'string', description: 'The error message or description of the issue.' },
workflowId: { type: 'string', description: 'REQUIRED. The workflow ID to debug.' },
context: { type: 'object' },
},
required: ['error', 'workflowId'],
},
},
{
name: 'sim_auth',
agentId: 'auth',
description:
'Check OAuth connection status, list connected services, and initiate new OAuth connections. Use when a workflow needs third-party service access (Google, Slack, GitHub, etc.). In MCP/headless mode, returns an authorization URL the user must open in their browser to complete the OAuth flow.',
inputSchema: {
type: 'object',
properties: {
request: { type: 'string' },
context: { type: 'object' },
},
required: ['request'],
},
},
{
name: 'sim_knowledge',
agentId: 'knowledge',
description:
'Manage knowledge bases for RAG-powered document retrieval. Supports listing, creating, updating, and deleting knowledge bases. Knowledge bases can be attached to agent blocks for context-aware responses.',
inputSchema: {
type: 'object',
properties: {
request: { type: 'string' },
context: { type: 'object' },
},
required: ['request'],
},
},
{
name: 'sim_custom_tool',
agentId: 'custom_tool',
description:
'Manage custom tools (reusable API integrations). Supports listing, creating, updating, and deleting custom tools. Custom tools can be added to agent blocks as callable functions.',
inputSchema: {
type: 'object',
properties: {
request: { type: 'string' },
context: { type: 'object' },
},
required: ['request'],
},
},
{
name: 'sim_info',
agentId: 'info',
description:
"Inspect a workflow's blocks, connections, outputs, variables, and metadata. Use for questions about the Sim platform itself — how blocks work, what integrations are available, platform concepts, etc. Always provide workflowId to scope results to a specific workflow.",
inputSchema: {
type: 'object',
properties: {
request: { type: 'string' },
workflowId: { type: 'string' },
context: { type: 'object' },
},
required: ['request'],
},
},
{
name: 'sim_workflow',
agentId: 'workflow',
description:
'Manage workflow-level configuration: environment variables, settings, scheduling, and deployment status. Use for any data about a specific workflow — its settings, credentials, variables, or deployment state.',
inputSchema: {
type: 'object',
properties: {
request: { type: 'string' },
workflowId: { type: 'string' },
context: { type: 'object' },
},
required: ['request'],
},
},
{
name: 'sim_research',
agentId: 'research',
description:
'Research external APIs and documentation. Use when you need to understand third-party services, external APIs, authentication flows, or data formats OUTSIDE of Sim. For questions about Sim itself, use sim_info instead.',
inputSchema: {
type: 'object',
properties: {
request: { type: 'string' },
context: { type: 'object' },
},
required: ['request'],
},
},
{
name: 'sim_superagent',
agentId: 'superagent',
description:
'Execute direct actions NOW: send an email, post to Slack, make an API call, etc. Use when the user wants to DO something immediately rather than build a workflow for it.',
inputSchema: {
type: 'object',
properties: {
request: { type: 'string' },
context: { type: 'object' },
},
required: ['request'],
},
},
{
name: 'sim_platform',
agentId: 'tour',
description:
'Get help with Sim platform navigation, keyboard shortcuts, and UI actions. Use when the user asks "how do I..." about the Sim editor, wants keyboard shortcuts, or needs to know what actions are available in the UI.',
inputSchema: {
type: 'object',
properties: {
request: { type: 'string' },
context: { type: 'object' },
},
required: ['request'],
},
},
]

View File

@@ -109,7 +109,7 @@ function resolveSubBlockOptions(sb: SubBlockConfig): string[] | undefined {
return undefined return undefined
} }
// Return canonical option IDs/values expected by workflow_change compilation and apply // Return the actual option ID/value that edit_workflow expects, not the display label
return rawOptions return rawOptions
.map((opt: any) => { .map((opt: any) => {
if (!opt) return undefined if (!opt) return undefined

View File

@@ -11,13 +11,8 @@ import { makeApiRequestServerTool } from '@/lib/copilot/tools/server/other/make-
import { searchOnlineServerTool } from '@/lib/copilot/tools/server/other/search-online' import { searchOnlineServerTool } from '@/lib/copilot/tools/server/other/search-online'
import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials' import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials'
import { setEnvironmentVariablesServerTool } from '@/lib/copilot/tools/server/user/set-environment-variables' import { setEnvironmentVariablesServerTool } from '@/lib/copilot/tools/server/user/set-environment-variables'
import { editWorkflowServerTool } from '@/lib/copilot/tools/server/workflow/edit-workflow'
import { getWorkflowConsoleServerTool } from '@/lib/copilot/tools/server/workflow/get-workflow-console' import { getWorkflowConsoleServerTool } from '@/lib/copilot/tools/server/workflow/get-workflow-console'
import { workflowChangeServerTool } from '@/lib/copilot/tools/server/workflow/workflow-change'
import {
workflowContextExpandServerTool,
workflowContextGetServerTool,
} from '@/lib/copilot/tools/server/workflow/workflow-context'
import { workflowVerifyServerTool } from '@/lib/copilot/tools/server/workflow/workflow-verify'
import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas'
export { ExecuteResponseSuccessSchema } export { ExecuteResponseSuccessSchema }
@@ -32,6 +27,7 @@ const serverToolRegistry: Record<string, BaseServerTool> = {
[getBlockOptionsServerTool.name]: getBlockOptionsServerTool, [getBlockOptionsServerTool.name]: getBlockOptionsServerTool,
[getBlockConfigServerTool.name]: getBlockConfigServerTool, [getBlockConfigServerTool.name]: getBlockConfigServerTool,
[getTriggerBlocksServerTool.name]: getTriggerBlocksServerTool, [getTriggerBlocksServerTool.name]: getTriggerBlocksServerTool,
[editWorkflowServerTool.name]: editWorkflowServerTool,
[getWorkflowConsoleServerTool.name]: getWorkflowConsoleServerTool, [getWorkflowConsoleServerTool.name]: getWorkflowConsoleServerTool,
[searchDocumentationServerTool.name]: searchDocumentationServerTool, [searchDocumentationServerTool.name]: searchDocumentationServerTool,
[searchOnlineServerTool.name]: searchOnlineServerTool, [searchOnlineServerTool.name]: searchOnlineServerTool,
@@ -39,10 +35,6 @@ const serverToolRegistry: Record<string, BaseServerTool> = {
[getCredentialsServerTool.name]: getCredentialsServerTool, [getCredentialsServerTool.name]: getCredentialsServerTool,
[makeApiRequestServerTool.name]: makeApiRequestServerTool, [makeApiRequestServerTool.name]: makeApiRequestServerTool,
[knowledgeBaseServerTool.name]: knowledgeBaseServerTool, [knowledgeBaseServerTool.name]: knowledgeBaseServerTool,
[workflowContextGetServerTool.name]: workflowContextGetServerTool,
[workflowContextExpandServerTool.name]: workflowContextExpandServerTool,
[workflowChangeServerTool.name]: workflowChangeServerTool,
[workflowVerifyServerTool.name]: workflowVerifyServerTool,
} }
/** /**

View File

@@ -1,225 +0,0 @@
import crypto from 'crypto'
import { createLogger } from '@sim/logger'
import { getRedisClient } from '@/lib/core/config/redis'
type StoreEntry<T> = {
value: T
expiresAt: number
}
const DEFAULT_TTL_MS = 30 * 60 * 1000
const MAX_ENTRIES = 500
const DEFAULT_TTL_SECONDS = Math.floor(DEFAULT_TTL_MS / 1000)
const CONTEXT_PREFIX = 'copilot:workflow_change:context'
const PROPOSAL_PREFIX = 'copilot:workflow_change:proposal'
const logger = createLogger('WorkflowChangeStore')
class TTLStore<T> {
private readonly data = new Map<string, StoreEntry<T>>()
constructor(private readonly ttlMs = DEFAULT_TTL_MS) {}
set(value: T): string {
this.gc()
if (this.data.size >= MAX_ENTRIES) {
const firstKey = this.data.keys().next().value as string | undefined
if (firstKey) {
this.data.delete(firstKey)
}
}
const id = crypto.randomUUID()
this.data.set(id, {
value,
expiresAt: Date.now() + this.ttlMs,
})
return id
}
get(id: string): T | null {
const entry = this.data.get(id)
if (!entry) return null
if (entry.expiresAt <= Date.now()) {
this.data.delete(id)
return null
}
return entry.value
}
upsert(id: string, value: T): void {
this.gc()
this.data.set(id, {
value,
expiresAt: Date.now() + this.ttlMs,
})
}
private gc(): void {
const now = Date.now()
for (const [key, entry] of this.data.entries()) {
if (entry.expiresAt <= now) {
this.data.delete(key)
}
}
}
}
export type WorkflowContextPack = {
workflowId: string
snapshotHash: string
workflowState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
loops: Record<string, any>
parallels: Record<string, any>
}
schemasByType: Record<string, any>
schemaRefsByType: Record<string, string>
summary: Record<string, any>
}
export type WorkflowChangeProposal = {
workflowId: string
baseSnapshotHash: string
compiledOperations: Array<Record<string, any>>
diffSummary: Record<string, any>
warnings: string[]
diagnostics: string[]
touchedBlocks: string[]
resolvedIds?: Record<string, string>
acceptanceAssertions: string[]
postApply?: {
verify?: boolean
run?: Record<string, any>
evaluator?: Record<string, any>
}
handoff?: {
objective?: string
constraints?: string[]
resolvedIds?: Record<string, string>
assumptions?: string[]
unresolvedRisks?: string[]
}
}
const contextPackStore = new TTLStore<WorkflowContextPack>()
const proposalStore = new TTLStore<WorkflowChangeProposal>()
function getContextRedisKey(id: string): string {
return `${CONTEXT_PREFIX}:${id}`
}
function getProposalRedisKey(id: string): string {
return `${PROPOSAL_PREFIX}:${id}`
}
async function writeRedisJson(key: string, value: unknown): Promise<void> {
const redis = getRedisClient()!
await redis.set(key, JSON.stringify(value), 'EX', DEFAULT_TTL_SECONDS)
}
async function readRedisJson<T>(key: string): Promise<T | null> {
const redis = getRedisClient()!
const raw = await redis.get(key)
if (!raw) {
return null
}
try {
return JSON.parse(raw) as T
} catch (error) {
logger.warn('Failed parsing workflow change store JSON payload', { key, error })
await redis.del(key).catch(() => {})
return null
}
}
export async function saveContextPack(pack: WorkflowContextPack): Promise<string> {
if (!getRedisClient()) {
return contextPackStore.set(pack)
}
const id = crypto.randomUUID()
try {
await writeRedisJson(getContextRedisKey(id), pack)
return id
} catch (error) {
logger.warn('Redis write failed for workflow context pack, using memory fallback', { error })
return contextPackStore.set(pack)
}
}
export async function getContextPack(id: string): Promise<WorkflowContextPack | null> {
if (!getRedisClient()) {
return contextPackStore.get(id)
}
try {
const redisPayload = await readRedisJson<WorkflowContextPack>(getContextRedisKey(id))
if (redisPayload) {
return redisPayload
}
} catch (error) {
logger.warn('Redis read failed for workflow context pack, using memory fallback', { error })
}
return contextPackStore.get(id)
}
export async function updateContextPack(
id: string,
patch: Partial<WorkflowContextPack>
): Promise<WorkflowContextPack | null> {
const existing = await getContextPack(id)
if (!existing) return null
const merged: WorkflowContextPack = {
...existing,
...patch,
workflowState: patch.workflowState || existing.workflowState,
schemasByType: patch.schemasByType || existing.schemasByType,
schemaRefsByType: patch.schemaRefsByType || existing.schemaRefsByType,
summary: patch.summary || existing.summary,
}
if (!getRedisClient()) {
contextPackStore.upsert(id, merged)
return merged
}
try {
await writeRedisJson(getContextRedisKey(id), merged)
contextPackStore.upsert(id, merged)
return merged
} catch (error) {
logger.warn('Redis update failed for workflow context pack, using memory fallback', { error })
contextPackStore.upsert(id, merged)
return merged
}
}
export async function saveProposal(proposal: WorkflowChangeProposal): Promise<string> {
if (!getRedisClient()) {
return proposalStore.set(proposal)
}
const id = crypto.randomUUID()
try {
await writeRedisJson(getProposalRedisKey(id), proposal)
return id
} catch (error) {
logger.warn('Redis write failed for workflow proposal, using memory fallback', { error })
return proposalStore.set(proposal)
}
}
export async function getProposal(id: string): Promise<WorkflowChangeProposal | null> {
if (!getRedisClient()) {
return proposalStore.get(id)
}
try {
const redisPayload = await readRedisJson<WorkflowChangeProposal>(getProposalRedisKey(id))
if (redisPayload) {
return redisPayload
}
} catch (error) {
logger.warn('Redis read failed for workflow proposal, using memory fallback', { error })
}
return proposalStore.get(id)
}

View File

@@ -0,0 +1,298 @@
import { db } from '@sim/db'
import { workflow as workflowTable } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool'
import { applyAutoLayout } from '@/lib/workflows/autolayout'
import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence'
import {
loadWorkflowFromNormalizedTables,
saveWorkflowToNormalizedTables,
} from '@/lib/workflows/persistence/utils'
import { validateWorkflowState } from '@/lib/workflows/sanitization/validation'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check'
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
import { applyOperationsToWorkflowState } from './engine'
import type { EditWorkflowParams, ValidationError } from './types'
import { preValidateCredentialInputs, validateWorkflowSelectorIds } from './validation'
async function getCurrentWorkflowStateFromDb(
workflowId: string
): Promise<{ workflowState: any; subBlockValues: Record<string, Record<string, any>> }> {
const logger = createLogger('EditWorkflowServerTool')
const [workflowRecord] = await db
.select()
.from(workflowTable)
.where(eq(workflowTable.id, workflowId))
.limit(1)
if (!workflowRecord) throw new Error(`Workflow ${workflowId} not found in database`)
const normalized = await loadWorkflowFromNormalizedTables(workflowId)
if (!normalized) throw new Error('Workflow has no normalized data')
// Validate and fix blocks without types
const blocks = { ...normalized.blocks }
const invalidBlocks: string[] = []
Object.entries(blocks).forEach(([id, block]: [string, any]) => {
if (!block.type) {
logger.warn(`Block ${id} loaded without type from database`, {
blockKeys: Object.keys(block),
blockName: block.name,
})
invalidBlocks.push(id)
}
})
// Remove invalid blocks
invalidBlocks.forEach((id) => delete blocks[id])
// Remove edges connected to invalid blocks
const edges = normalized.edges.filter(
(edge: any) => !invalidBlocks.includes(edge.source) && !invalidBlocks.includes(edge.target)
)
const workflowState: any = {
blocks,
edges,
loops: normalized.loops || {},
parallels: normalized.parallels || {},
}
const subBlockValues: Record<string, Record<string, any>> = {}
Object.entries(normalized.blocks).forEach(([blockId, block]) => {
subBlockValues[blockId] = {}
Object.entries((block as any).subBlocks || {}).forEach(([subId, sub]) => {
if ((sub as any).value !== undefined) subBlockValues[blockId][subId] = (sub as any).value
})
})
return { workflowState, subBlockValues }
}
export const editWorkflowServerTool: BaseServerTool<EditWorkflowParams, unknown> = {
name: 'edit_workflow',
async execute(params: EditWorkflowParams, context?: { userId: string }): Promise<unknown> {
const logger = createLogger('EditWorkflowServerTool')
const { operations, workflowId, currentUserWorkflow } = params
if (!Array.isArray(operations) || operations.length === 0) {
throw new Error('operations are required and must be an array')
}
if (!workflowId) throw new Error('workflowId is required')
if (!context?.userId) {
throw new Error('Unauthorized workflow access')
}
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: context.userId,
action: 'write',
})
if (!authorization.allowed) {
throw new Error(authorization.message || 'Unauthorized workflow access')
}
logger.info('Executing edit_workflow', {
operationCount: operations.length,
workflowId,
hasCurrentUserWorkflow: !!currentUserWorkflow,
})
// Get current workflow state
let workflowState: any
if (currentUserWorkflow) {
try {
workflowState = JSON.parse(currentUserWorkflow)
} catch (error) {
logger.error('Failed to parse currentUserWorkflow', error)
throw new Error('Invalid currentUserWorkflow format')
}
} else {
const fromDb = await getCurrentWorkflowStateFromDb(workflowId)
workflowState = fromDb.workflowState
}
// Get permission config for the user
const permissionConfig = context?.userId ? await getUserPermissionConfig(context.userId) : null
// Pre-validate credential and apiKey inputs before applying operations
// This filters out invalid credentials and apiKeys for hosted models
let operationsToApply = operations
const credentialErrors: ValidationError[] = []
if (context?.userId) {
const { filteredOperations, errors: credErrors } = await preValidateCredentialInputs(
operations,
{ userId: context.userId },
workflowState
)
operationsToApply = filteredOperations
credentialErrors.push(...credErrors)
}
// Apply operations directly to the workflow state
const {
state: modifiedWorkflowState,
validationErrors,
skippedItems,
} = applyOperationsToWorkflowState(workflowState, operationsToApply, permissionConfig)
// Add credential validation errors
validationErrors.push(...credentialErrors)
// Get workspaceId for selector validation
let workspaceId: string | undefined
try {
const [workflowRecord] = await db
.select({ workspaceId: workflowTable.workspaceId })
.from(workflowTable)
.where(eq(workflowTable.id, workflowId))
.limit(1)
workspaceId = workflowRecord?.workspaceId ?? undefined
} catch (error) {
logger.warn('Failed to get workspaceId for selector validation', { error, workflowId })
}
// Validate selector IDs exist in the database
if (context?.userId) {
try {
const selectorErrors = await validateWorkflowSelectorIds(modifiedWorkflowState, {
userId: context.userId,
workspaceId,
})
validationErrors.push(...selectorErrors)
} catch (error) {
logger.warn('Selector ID validation failed', {
error: error instanceof Error ? error.message : String(error),
})
}
}
// Validate the workflow state
const validation = validateWorkflowState(modifiedWorkflowState, { sanitize: true })
if (!validation.valid) {
logger.error('Edited workflow state is invalid', {
errors: validation.errors,
warnings: validation.warnings,
})
throw new Error(`Invalid edited workflow: ${validation.errors.join('; ')}`)
}
if (validation.warnings.length > 0) {
logger.warn('Edited workflow validation warnings', {
warnings: validation.warnings,
})
}
// Extract and persist custom tools to database (reuse workspaceId from selector validation)
if (context?.userId && workspaceId) {
try {
const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState
const { saved, errors } = await extractAndPersistCustomTools(
finalWorkflowState,
workspaceId,
context.userId
)
if (saved > 0) {
logger.info(`Persisted ${saved} custom tool(s) to database`, { workflowId })
}
if (errors.length > 0) {
logger.warn('Some custom tools failed to persist', { errors, workflowId })
}
} catch (error) {
logger.error('Failed to persist custom tools', { error, workflowId })
}
} else if (context?.userId && !workspaceId) {
logger.warn('Workflow has no workspaceId, skipping custom tools persistence', {
workflowId,
})
} else {
logger.warn('No userId in context - skipping custom tools persistence', { workflowId })
}
logger.info('edit_workflow successfully applied operations', {
operationCount: operations.length,
blocksCount: Object.keys(modifiedWorkflowState.blocks).length,
edgesCount: modifiedWorkflowState.edges.length,
inputValidationErrors: validationErrors.length,
skippedItemsCount: skippedItems.length,
schemaValidationErrors: validation.errors.length,
validationWarnings: validation.warnings.length,
})
// Format validation errors for LLM feedback
const inputErrors =
validationErrors.length > 0
? validationErrors.map((e) => `Block "${e.blockId}" (${e.blockType}): ${e.error}`)
: undefined
// Format skipped items for LLM feedback
const skippedMessages =
skippedItems.length > 0 ? skippedItems.map((item) => item.reason) : undefined
// Persist the workflow state to the database
const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState
// Apply autolayout to position blocks properly
const layoutResult = applyAutoLayout(finalWorkflowState.blocks, finalWorkflowState.edges, {
horizontalSpacing: 250,
verticalSpacing: 100,
padding: { x: 100, y: 100 },
})
const layoutedBlocks =
layoutResult.success && layoutResult.blocks ? layoutResult.blocks : finalWorkflowState.blocks
if (!layoutResult.success) {
logger.warn('Autolayout failed, using default positions', {
workflowId,
error: layoutResult.error,
})
}
const workflowStateForDb = {
blocks: layoutedBlocks,
edges: finalWorkflowState.edges,
loops: generateLoopBlocks(layoutedBlocks as any),
parallels: generateParallelBlocks(layoutedBlocks as any),
lastSaved: Date.now(),
isDeployed: false,
}
const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowStateForDb as any)
if (!saveResult.success) {
logger.error('Failed to persist workflow state to database', {
workflowId,
error: saveResult.error,
})
throw new Error(`Failed to save workflow: ${saveResult.error}`)
}
// Update workflow's lastSynced timestamp
await db
.update(workflowTable)
.set({
lastSynced: new Date(),
updatedAt: new Date(),
})
.where(eq(workflowTable.id, workflowId))
logger.info('Workflow state persisted to database', { workflowId })
// Return the modified workflow state with autolayout applied
return {
success: true,
workflowState: { ...finalWorkflowState, blocks: layoutedBlocks },
// Include input validation errors so the LLM can see what was rejected
...(inputErrors && {
inputValidationErrors: inputErrors,
inputValidationMessage: `${inputErrors.length} input(s) were rejected due to validation errors. The workflow was still updated with valid inputs only. Errors: ${inputErrors.join('; ')}`,
}),
// Include skipped items so the LLM can see what operations were skipped
...(skippedMessages && {
skippedItems: skippedMessages,
skippedItemsMessage: `${skippedItems.length} operation(s) were skipped due to invalid references. Details: ${skippedMessages.join('; ')}`,
}),
}
},
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,272 +0,0 @@
import { createLogger } from '@sim/logger'
import { z } from 'zod'
import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { getBlock } from '@/blocks/registry'
import { getContextPack, saveContextPack, updateContextPack } from './change-store'
import {
buildSchemasByType,
getAllKnownBlockTypes,
hashWorkflowState,
loadWorkflowStateFromDb,
summarizeWorkflowState,
} from './workflow-state'
const logger = createLogger('WorkflowContextServerTool')
const WorkflowContextGetInputSchema = z.object({
workflowId: z.string(),
objective: z.string().optional(),
includeBlockTypes: z.array(z.string()).optional(),
includeAllSchemas: z.boolean().optional(),
schemaMode: z.enum(['minimal', 'workflow', 'all']).optional(),
})
type WorkflowContextGetParams = z.infer<typeof WorkflowContextGetInputSchema>
const WorkflowContextExpandInputSchema = z.object({
contextPackId: z.string(),
blockTypes: z.array(z.string()).optional(),
schemaRefs: z.array(z.string()).optional(),
})
type WorkflowContextExpandParams = z.infer<typeof WorkflowContextExpandInputSchema>
const BLOCK_TYPE_ALIAS_MAP: Record<string, string> = {
start: 'start_trigger',
starttrigger: 'start_trigger',
starter: 'start_trigger',
trigger: 'start_trigger',
loop: 'loop',
parallel: 'parallel',
parallelai: 'parallel',
hitl: 'human_in_the_loop',
humanintheloop: 'human_in_the_loop',
routerv2: 'router_v2',
}
function normalizeToken(value: string): string {
return value
.trim()
.toLowerCase()
.replace(/[^a-z0-9]+/g, '')
}
function buildBlockTypeIndex(knownTypes: string[]): Map<string, string> {
const index = new Map<string, string>()
for (const blockType of knownTypes) {
const canonicalType = String(blockType || '').trim()
if (!canonicalType) continue
const normalizedType = normalizeToken(canonicalType)
if (normalizedType && !index.has(normalizedType)) {
index.set(normalizedType, canonicalType)
}
const blockConfig = getBlock(canonicalType)
const displayName = String(blockConfig?.name || '').trim()
const normalizedDisplayName = normalizeToken(displayName)
if (normalizedDisplayName && !index.has(normalizedDisplayName)) {
index.set(normalizedDisplayName, canonicalType)
}
}
return index
}
function resolveBlockTypes(
requestedBlockTypes: string[],
knownTypes: string[]
): { resolved: string[]; unresolved: string[] } {
const index = buildBlockTypeIndex(knownTypes)
const resolved = new Set<string>()
const unresolved = new Set<string>()
for (const rawType of requestedBlockTypes) {
const normalized = normalizeToken(String(rawType || ''))
if (!normalized) continue
const aliasResolved = BLOCK_TYPE_ALIAS_MAP[normalized]
if (aliasResolved) {
resolved.add(aliasResolved)
continue
}
const direct = index.get(normalized)
if (direct) {
resolved.add(direct)
continue
}
unresolved.add(String(rawType))
}
return {
resolved: [...resolved],
unresolved: [...unresolved],
}
}
function parseSchemaRefToBlockType(schemaRef: string): string | null {
if (!schemaRef) return null
const [blockType] = schemaRef.split('@')
return blockType || null
}
function buildAvailableBlockCatalog(
schemaRefsByType: Record<string, string>
): Array<Record<string, any>> {
return Object.entries(schemaRefsByType)
.sort((a, b) => a[0].localeCompare(b[0]))
.map(([blockType, schemaRef]) => ({
blockType,
schemaRef,
}))
}
export const workflowContextGetServerTool: BaseServerTool<WorkflowContextGetParams, any> = {
name: 'workflow_context_get',
inputSchema: WorkflowContextGetInputSchema,
async execute(params: WorkflowContextGetParams, context?: { userId: string }): Promise<any> {
if (!context?.userId) {
throw new Error('Unauthorized workflow access')
}
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId: params.workflowId,
userId: context.userId,
action: 'read',
})
if (!authorization.allowed) {
throw new Error(authorization.message || 'Unauthorized workflow access')
}
const { workflowState } = await loadWorkflowStateFromDb(params.workflowId)
const snapshotHash = hashWorkflowState(workflowState as unknown as Record<string, unknown>)
const knownTypes = getAllKnownBlockTypes()
const blockTypesInWorkflowRaw = Object.values(workflowState.blocks || {}).map((block: any) =>
String(block?.type || '')
)
const requestedTypesRaw = params.includeBlockTypes || []
const resolvedWorkflowTypes = resolveBlockTypes(blockTypesInWorkflowRaw, knownTypes).resolved
const resolvedRequestedTypes = resolveBlockTypes(requestedTypesRaw, knownTypes)
const schemaMode =
params.includeAllSchemas === true ? 'all' : (params.schemaMode || 'minimal')
const candidateTypes =
schemaMode === 'all'
? knownTypes
: schemaMode === 'workflow'
? [...resolvedWorkflowTypes, ...resolvedRequestedTypes.resolved]
: [...resolvedRequestedTypes.resolved]
const { schemasByType, schemaRefsByType } = buildSchemasByType(candidateTypes)
const suggestedSchemaTypes = [...new Set(resolvedWorkflowTypes.filter(Boolean))]
const summary = summarizeWorkflowState(workflowState)
const packId = await saveContextPack({
workflowId: params.workflowId,
snapshotHash,
workflowState,
schemasByType,
schemaRefsByType,
summary: {
...summary,
objective: params.objective || null,
},
})
logger.info('Generated workflow context pack', {
workflowId: params.workflowId,
contextPackId: packId,
schemaCount: Object.keys(schemaRefsByType).length,
})
return {
success: true,
contextPackId: packId,
workflowId: params.workflowId,
snapshotHash,
schemaMode,
summary: {
...summary,
objective: params.objective || null,
},
schemaRefsByType,
availableBlockCatalog: buildAvailableBlockCatalog(schemaRefsByType),
suggestedSchemaTypes,
unresolvedRequestedBlockTypes: resolvedRequestedTypes.unresolved,
knownBlockTypes: knownTypes,
inScopeSchemas: schemasByType,
}
},
}
export const workflowContextExpandServerTool: BaseServerTool<WorkflowContextExpandParams, any> = {
name: 'workflow_context_expand',
inputSchema: WorkflowContextExpandInputSchema,
async execute(params: WorkflowContextExpandParams, context?: { userId: string }): Promise<any> {
if (!context?.userId) {
throw new Error('Unauthorized workflow access')
}
const contextPack = await getContextPack(params.contextPackId)
if (!contextPack) {
throw new Error(`Context pack not found or expired: ${params.contextPackId}`)
}
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId: contextPack.workflowId,
userId: context.userId,
action: 'read',
})
if (!authorization.allowed) {
throw new Error(authorization.message || 'Unauthorized workflow access')
}
const knownTypes = getAllKnownBlockTypes()
const requestedBlockTypesRaw = new Set<string>()
for (const blockType of params.blockTypes || []) {
if (blockType) requestedBlockTypesRaw.add(String(blockType))
}
for (const schemaRef of params.schemaRefs || []) {
const blockType = parseSchemaRefToBlockType(schemaRef)
if (blockType) requestedBlockTypesRaw.add(blockType)
}
const resolvedTypes = resolveBlockTypes([...requestedBlockTypesRaw], knownTypes)
const typesToExpand = resolvedTypes.resolved
const { schemasByType, schemaRefsByType } = buildSchemasByType(typesToExpand)
const mergedSchemasByType = {
...(contextPack.schemasByType || {}),
...schemasByType,
}
const mergedSchemaRefsByType = {
...(contextPack.schemaRefsByType || {}),
...schemaRefsByType,
}
const updatedContextPack = await updateContextPack(params.contextPackId, {
schemasByType: mergedSchemasByType,
schemaRefsByType: mergedSchemaRefsByType,
})
const warnings =
resolvedTypes.unresolved.length > 0
? [
`Unknown block type(s): ${resolvedTypes.unresolved.join(', ')}. ` +
'Use known block type IDs from knownBlockTypes.',
]
: []
return {
success: true,
contextPackId: params.contextPackId,
workflowId: contextPack.workflowId,
snapshotHash: contextPack.snapshotHash,
schemasByType,
schemaRefsByType,
loadedSchemaTypes: Object.keys(updatedContextPack?.schemasByType || mergedSchemasByType).sort(),
resolvedBlockTypes: resolvedTypes.resolved,
unresolvedBlockTypes: resolvedTypes.unresolved,
knownBlockTypes: knownTypes,
warnings,
}
},
}

View File

@@ -1,286 +0,0 @@
import { db } from '@sim/db'
import { workflow as workflowTable } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { applyAutoLayout } from '@/lib/workflows/autolayout'
import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence'
import {
loadWorkflowFromNormalizedTables,
saveWorkflowToNormalizedTables,
} from '@/lib/workflows/persistence/utils'
import { validateWorkflowState } from '@/lib/workflows/sanitization/validation'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check'
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
import { applyOperationsToWorkflowState } from './engine'
import type { EditWorkflowOperation, ValidationError } from './types'
import { preValidateCredentialInputs, validateWorkflowSelectorIds } from './validation'
type ApplyWorkflowOperationsParams = {
operations: EditWorkflowOperation[]
workflowId: string
userId: string
currentUserWorkflow?: string
}
async function getCurrentWorkflowStateFromDb(
workflowId: string
): Promise<{ workflowState: any; subBlockValues: Record<string, Record<string, any>> }> {
const logger = createLogger('WorkflowOperationApply')
const [workflowRecord] = await db
.select()
.from(workflowTable)
.where(eq(workflowTable.id, workflowId))
.limit(1)
if (!workflowRecord) throw new Error(`Workflow ${workflowId} not found in database`)
const normalized = await loadWorkflowFromNormalizedTables(workflowId)
if (!normalized) throw new Error('Workflow has no normalized data')
// Validate and fix blocks without types
const blocks = { ...normalized.blocks }
const invalidBlocks: string[] = []
Object.entries(blocks).forEach(([id, block]: [string, any]) => {
if (!block.type) {
logger.warn(`Block ${id} loaded without type from database`, {
blockKeys: Object.keys(block),
blockName: block.name,
})
invalidBlocks.push(id)
}
})
// Remove invalid blocks
invalidBlocks.forEach((id) => delete blocks[id])
// Remove edges connected to invalid blocks
const edges = normalized.edges.filter(
(edge: any) => !invalidBlocks.includes(edge.source) && !invalidBlocks.includes(edge.target)
)
const workflowState: any = {
blocks,
edges,
loops: normalized.loops || {},
parallels: normalized.parallels || {},
}
const subBlockValues: Record<string, Record<string, any>> = {}
Object.entries(normalized.blocks).forEach(([blockId, block]) => {
subBlockValues[blockId] = {}
Object.entries((block as any).subBlocks || {}).forEach(([subId, sub]) => {
if ((sub as any).value !== undefined) subBlockValues[blockId][subId] = (sub as any).value
})
})
return { workflowState, subBlockValues }
}
export async function applyWorkflowOperations(params: ApplyWorkflowOperationsParams): Promise<any> {
const logger = createLogger('WorkflowOperationApply')
const { operations, workflowId, currentUserWorkflow, userId } = params
if (!Array.isArray(operations) || operations.length === 0) {
throw new Error('operations are required and must be an array')
}
if (!workflowId) throw new Error('workflowId is required')
if (!userId) throw new Error('Unauthorized workflow access')
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId,
action: 'write',
})
if (!authorization.allowed) {
throw new Error(authorization.message || 'Unauthorized workflow access')
}
logger.info('Executing workflow operation apply', {
operationCount: operations.length,
workflowId,
hasCurrentUserWorkflow: !!currentUserWorkflow,
})
// Get current workflow state
let workflowState: any
if (currentUserWorkflow) {
try {
workflowState = JSON.parse(currentUserWorkflow)
} catch (error) {
logger.error('Failed to parse currentUserWorkflow', error)
throw new Error('Invalid currentUserWorkflow format')
}
} else {
const fromDb = await getCurrentWorkflowStateFromDb(workflowId)
workflowState = fromDb.workflowState
}
// Get permission config for the user
const permissionConfig = await getUserPermissionConfig(userId)
// Pre-validate credential and apiKey inputs before applying operations
// This filters out invalid credentials and apiKeys for hosted models
let operationsToApply = operations
const credentialErrors: ValidationError[] = []
const { filteredOperations, errors: credErrors } = await preValidateCredentialInputs(
operations,
{ userId },
workflowState
)
operationsToApply = filteredOperations
credentialErrors.push(...credErrors)
// Apply operations directly to the workflow state
const {
state: modifiedWorkflowState,
validationErrors,
skippedItems,
} = applyOperationsToWorkflowState(workflowState, operationsToApply, permissionConfig)
// Add credential validation errors
validationErrors.push(...credentialErrors)
// Get workspaceId for selector validation
let workspaceId: string | undefined
try {
const [workflowRecord] = await db
.select({ workspaceId: workflowTable.workspaceId })
.from(workflowTable)
.where(eq(workflowTable.id, workflowId))
.limit(1)
workspaceId = workflowRecord?.workspaceId ?? undefined
} catch (error) {
logger.warn('Failed to get workspaceId for selector validation', { error, workflowId })
}
// Validate selector IDs exist in the database
try {
const selectorErrors = await validateWorkflowSelectorIds(modifiedWorkflowState, {
userId,
workspaceId,
})
validationErrors.push(...selectorErrors)
} catch (error) {
logger.warn('Selector ID validation failed', {
error: error instanceof Error ? error.message : String(error),
})
}
// Validate the workflow state
const validation = validateWorkflowState(modifiedWorkflowState, { sanitize: true })
if (!validation.valid) {
logger.error('Edited workflow state is invalid', {
errors: validation.errors,
warnings: validation.warnings,
})
throw new Error(`Invalid edited workflow: ${validation.errors.join('; ')}`)
}
if (validation.warnings.length > 0) {
logger.warn('Edited workflow validation warnings', {
warnings: validation.warnings,
})
}
// Extract and persist custom tools to database (reuse workspaceId from selector validation)
if (workspaceId) {
try {
const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState
const { saved, errors } = await extractAndPersistCustomTools(finalWorkflowState, workspaceId, userId)
if (saved > 0) {
logger.info(`Persisted ${saved} custom tool(s) to database`, { workflowId })
}
if (errors.length > 0) {
logger.warn('Some custom tools failed to persist', { errors, workflowId })
}
} catch (error) {
logger.error('Failed to persist custom tools', { error, workflowId })
}
} else {
logger.warn('Workflow has no workspaceId, skipping custom tools persistence', {
workflowId,
})
}
logger.info('Workflow operation apply succeeded', {
operationCount: operations.length,
blocksCount: Object.keys(modifiedWorkflowState.blocks).length,
edgesCount: modifiedWorkflowState.edges.length,
inputValidationErrors: validationErrors.length,
skippedItemsCount: skippedItems.length,
schemaValidationErrors: validation.errors.length,
validationWarnings: validation.warnings.length,
})
// Format validation errors for LLM feedback
const inputErrors =
validationErrors.length > 0
? validationErrors.map((e) => `Block "${e.blockId}" (${e.blockType}): ${e.error}`)
: undefined
// Format skipped items for LLM feedback
const skippedMessages =
skippedItems.length > 0 ? skippedItems.map((item) => item.reason) : undefined
// Persist the workflow state to the database
const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState
// Apply autolayout to position blocks properly
const layoutResult = applyAutoLayout(finalWorkflowState.blocks, finalWorkflowState.edges, {
horizontalSpacing: 250,
verticalSpacing: 100,
padding: { x: 100, y: 100 },
})
const layoutedBlocks =
layoutResult.success && layoutResult.blocks ? layoutResult.blocks : finalWorkflowState.blocks
if (!layoutResult.success) {
logger.warn('Autolayout failed, using default positions', {
workflowId,
error: layoutResult.error,
})
}
const workflowStateForDb = {
blocks: layoutedBlocks,
edges: finalWorkflowState.edges,
loops: generateLoopBlocks(layoutedBlocks as any),
parallels: generateParallelBlocks(layoutedBlocks as any),
lastSaved: Date.now(),
isDeployed: false,
}
const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowStateForDb as any)
if (!saveResult.success) {
logger.error('Failed to persist workflow state to database', {
workflowId,
error: saveResult.error,
})
throw new Error(`Failed to save workflow: ${saveResult.error}`)
}
// Update workflow's lastSynced timestamp
await db
.update(workflowTable)
.set({
lastSynced: new Date(),
updatedAt: new Date(),
})
.where(eq(workflowTable.id, workflowId))
logger.info('Workflow state persisted to database', { workflowId })
return {
success: true,
workflowState: { ...finalWorkflowState, blocks: layoutedBlocks },
...(inputErrors && {
inputValidationErrors: inputErrors,
inputValidationMessage: `${inputErrors.length} input(s) were rejected due to validation errors. The workflow was still updated with valid inputs only. Errors: ${inputErrors.join('; ')}`,
}),
...(skippedMessages && {
skippedItems: skippedMessages,
skippedItemsMessage: `${skippedItems.length} operation(s) were skipped due to invalid references. Details: ${skippedMessages.join('; ')}`,
}),
}
}

View File

@@ -1,497 +0,0 @@
import crypto from 'crypto'
import { db } from '@sim/db'
import { workflow as workflowTable } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import { getAllBlockTypes, getBlock } from '@/blocks/registry'
import type { SubBlockConfig } from '@/blocks/types'
const logger = createLogger('WorkflowContextState')
const CONTAINER_BLOCK_TYPES = ['loop', 'parallel'] as const
function stableSortValue(value: any): any {
if (Array.isArray(value)) {
return value.map(stableSortValue)
}
if (value && typeof value === 'object') {
const sorted: Record<string, any> = {}
for (const key of Object.keys(value).sort()) {
sorted[key] = stableSortValue(value[key])
}
return sorted
}
return value
}
export function hashWorkflowState(state: Record<string, unknown>): string {
const stable = stableSortValue(state)
const payload = JSON.stringify(stable)
return `sha256:${crypto.createHash('sha256').update(payload).digest('hex')}`
}
function normalizeOptions(options: unknown): string[] | null {
if (!Array.isArray(options)) return null
const normalized = options
.map((option) => {
if (option == null) return null
if (typeof option === 'object') {
const optionRecord = option as Record<string, unknown>
const id = optionRecord.id
if (typeof id === 'string') return id
const label = optionRecord.label
if (typeof label === 'string') return label
return null
}
return String(option)
})
.filter((value): value is string => Boolean(value))
return normalized.length > 0 ? normalized : null
}
function serializeRequired(required: SubBlockConfig['required']): boolean | Record<string, any> {
if (typeof required === 'boolean') return required
if (!required) return false
if (typeof required === 'object') {
const out: Record<string, any> = {}
const record = required as Record<string, unknown>
for (const key of ['field', 'operator', 'value']) {
if (record[key] !== undefined) {
out[key] = record[key]
}
}
return out
}
return false
}
function serializeSubBlock(subBlock: SubBlockConfig): Record<string, unknown> {
const staticOptions =
typeof subBlock.options === 'function' ? null : normalizeOptions(subBlock.options)
return {
id: subBlock.id,
type: subBlock.type,
title: subBlock.title,
description: subBlock.description || null,
mode: subBlock.mode || null,
placeholder: subBlock.placeholder || null,
hidden: Boolean(subBlock.hidden),
multiSelect: Boolean(subBlock.multiSelect),
required: serializeRequired(subBlock.required),
hasDynamicOptions: typeof subBlock.options === 'function',
options: staticOptions,
defaultValue: subBlock.defaultValue ?? null,
min: subBlock.min ?? null,
max: subBlock.max ?? null,
}
}
function serializeBlockSchema(blockType: string): Record<string, unknown> | null {
if (blockType === 'loop') {
return {
blockType: 'loop',
blockName: 'Loop',
category: 'blocks',
triggerAllowed: false,
hasTriggersConfig: false,
subBlocks: [
{
id: 'loopType',
type: 'dropdown',
title: 'Loop Type',
description: 'Loop mode: for, forEach, while, doWhile',
mode: null,
placeholder: null,
hidden: false,
multiSelect: false,
required: false,
hasDynamicOptions: false,
options: ['for', 'forEach', 'while', 'doWhile'],
defaultValue: 'for',
min: null,
max: null,
},
{
id: 'iterations',
type: 'short-input',
title: 'Iterations',
description: 'Iteration count for for-loops',
mode: null,
placeholder: null,
hidden: false,
multiSelect: false,
required: false,
hasDynamicOptions: false,
options: null,
defaultValue: 1,
min: 1,
max: null,
},
{
id: 'collection',
type: 'long-input',
title: 'Collection',
description: 'Collection expression for forEach loops',
mode: null,
placeholder: null,
hidden: false,
multiSelect: false,
required: false,
hasDynamicOptions: false,
options: null,
defaultValue: null,
min: null,
max: null,
},
{
id: 'condition',
type: 'long-input',
title: 'Condition',
description: 'Condition expression for while/doWhile loops',
mode: null,
placeholder: null,
hidden: false,
multiSelect: false,
required: false,
hasDynamicOptions: false,
options: null,
defaultValue: null,
min: null,
max: null,
},
],
outputKeys: ['index', 'item', 'items'],
longDescription: null,
}
}
if (blockType === 'parallel') {
return {
blockType: 'parallel',
blockName: 'Parallel',
category: 'blocks',
triggerAllowed: false,
hasTriggersConfig: false,
subBlocks: [
{
id: 'parallelType',
type: 'dropdown',
title: 'Parallel Type',
description: 'Parallel mode: count or collection',
mode: null,
placeholder: null,
hidden: false,
multiSelect: false,
required: false,
hasDynamicOptions: false,
options: ['count', 'collection'],
defaultValue: 'count',
min: null,
max: null,
},
{
id: 'count',
type: 'short-input',
title: 'Count',
description: 'Branch count when parallelType is count',
mode: null,
placeholder: null,
hidden: false,
multiSelect: false,
required: false,
hasDynamicOptions: false,
options: null,
defaultValue: 1,
min: 1,
max: null,
},
{
id: 'collection',
type: 'long-input',
title: 'Collection',
description: 'Collection expression when parallelType is collection',
mode: null,
placeholder: null,
hidden: false,
multiSelect: false,
required: false,
hasDynamicOptions: false,
options: null,
defaultValue: null,
min: null,
max: null,
},
],
outputKeys: ['index', 'currentItem', 'items'],
longDescription: null,
}
}
const blockConfig = getBlock(blockType)
if (!blockConfig) return null
const subBlocks = Array.isArray(blockConfig.subBlocks)
? blockConfig.subBlocks.map(serializeSubBlock)
: []
const outputs = blockConfig.outputs || {}
const outputKeys = Object.keys(outputs)
return {
blockType,
blockName: blockConfig.name || blockType,
category: blockConfig.category,
triggerAllowed: Boolean(blockConfig.triggerAllowed || blockConfig.triggers?.enabled),
hasTriggersConfig: Boolean(blockConfig.triggers?.enabled),
subBlocks,
outputKeys,
longDescription: blockConfig.longDescription || null,
}
}
export function buildSchemasByType(blockTypes: string[]): {
schemasByType: Record<string, any>
schemaRefsByType: Record<string, string>
} {
const schemasByType: Record<string, any> = {}
const schemaRefsByType: Record<string, string> = {}
const uniqueTypes = [...new Set(blockTypes.filter(Boolean))]
for (const blockType of uniqueTypes) {
const schema = serializeBlockSchema(blockType)
if (!schema) continue
const stableSchema = stableSortValue(schema)
const schemaHash = crypto
.createHash('sha256')
.update(JSON.stringify(stableSchema))
.digest('hex')
schemasByType[blockType] = stableSchema
schemaRefsByType[blockType] = `${blockType}@sha256:${schemaHash}`
}
return { schemasByType, schemaRefsByType }
}
export async function loadWorkflowStateFromDb(workflowId: string): Promise<{
workflowState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
loops: Record<string, any>
parallels: Record<string, any>
}
workspaceId?: string
}> {
const [workflowRecord] = await db
.select({ workspaceId: workflowTable.workspaceId })
.from(workflowTable)
.where(eq(workflowTable.id, workflowId))
.limit(1)
if (!workflowRecord) {
throw new Error(`Workflow ${workflowId} not found`)
}
const normalized = await loadWorkflowFromNormalizedTables(workflowId)
if (!normalized) {
throw new Error(`Workflow ${workflowId} has no normalized data`)
}
const blocks = { ...normalized.blocks }
const invalidBlockIds: string[] = []
for (const [blockId, block] of Object.entries(blocks)) {
if (!(block as { type?: unknown })?.type) {
invalidBlockIds.push(blockId)
}
}
for (const blockId of invalidBlockIds) {
delete blocks[blockId]
}
const invalidSet = new Set(invalidBlockIds)
const edges = (normalized.edges || []).filter(
(edge: any) => !invalidSet.has(edge.source) && !invalidSet.has(edge.target)
)
if (invalidBlockIds.length > 0) {
logger.warn('Dropped blocks without type while loading workflow state', {
workflowId,
dropped: invalidBlockIds,
})
}
return {
workflowState: {
blocks,
edges,
loops: normalized.loops || {},
parallels: normalized.parallels || {},
},
workspaceId: workflowRecord.workspaceId || undefined,
}
}
export function summarizeWorkflowState(workflowState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
loops: Record<string, any>
parallels: Record<string, any>
}): Record<string, unknown> {
const MAX_BLOCK_INVENTORY = 160
const MAX_EDGE_INVENTORY = 240
const blocks = workflowState.blocks || {}
const edges = workflowState.edges || []
const blockTypes: Record<string, number> = {}
const triggerBlocks: Array<{ id: string; name: string; type: string }> = []
const blockInventoryRaw: Array<{
id: string
name: string
type: string
parentId: string | null
triggerMode: boolean
enabled: boolean
}> = []
const normalizeReferenceToken = (value: string): string =>
value
.toLowerCase()
.replace(/[^a-z0-9]+/g, '')
.trim()
const dedupeStrings = (values: string[]): string[] => [...new Set(values.filter(Boolean))]
const startOutputKeys = ['input', 'files', 'conversationId']
const duplicateNameIndex = new Map<string, { name: string; blockIds: string[] }>()
for (const [blockId, block] of Object.entries(blocks)) {
const blockRecord = block as Record<string, unknown>
const dataRecord = (blockRecord.data as Record<string, unknown> | undefined) || undefined
const blockType = String(blockRecord.type || 'unknown')
const blockName = String(blockRecord.name || blockType)
const parentId = String(dataRecord?.parentId || '').trim() || null
const normalizedName = normalizeReferenceToken(blockName)
blockTypes[blockType] = (blockTypes[blockType] || 0) + 1
if (blockRecord.triggerMode === true) {
triggerBlocks.push({
id: blockId,
name: blockName,
type: blockType,
})
}
blockInventoryRaw.push({
id: blockId,
name: blockName,
type: blockType,
parentId,
triggerMode: blockRecord.triggerMode === true,
enabled: blockRecord.enabled !== false,
})
if (normalizedName) {
const existing = duplicateNameIndex.get(normalizedName)
if (existing) {
existing.blockIds.push(blockId)
} else {
duplicateNameIndex.set(normalizedName, { name: blockName, blockIds: [blockId] })
}
}
}
const blockInventory = [...blockInventoryRaw]
.sort((a, b) => a.name.localeCompare(b.name) || a.id.localeCompare(b.id))
.slice(0, MAX_BLOCK_INVENTORY)
const blockInventoryTruncated = blockInventoryRaw.length > MAX_BLOCK_INVENTORY
const blockNameById = new Map(blockInventoryRaw.map((entry) => [entry.id, entry.name]))
const edgeInventoryRaw = edges.map((edge: any) => {
const source = String(edge.source || '')
const target = String(edge.target || '')
const sourceHandle = String(edge.sourceHandle || '').trim() || null
const targetHandle = String(edge.targetHandle || '').trim() || null
return {
source,
sourceName: blockNameById.get(source) || source,
sourceHandle,
target,
targetName: blockNameById.get(target) || target,
targetHandle,
}
})
const edgeInventory = edgeInventoryRaw
.sort((a, b) => {
const bySource = a.sourceName.localeCompare(b.sourceName)
if (bySource !== 0) return bySource
const byTarget = a.targetName.localeCompare(b.targetName)
if (byTarget !== 0) return byTarget
return a.source.localeCompare(b.source)
})
.slice(0, MAX_EDGE_INVENTORY)
const edgeInventoryTruncated = edgeInventoryRaw.length > MAX_EDGE_INVENTORY
const duplicateBlockNames = [...duplicateNameIndex.values()]
.filter((entry) => entry.blockIds.length > 1)
.map((entry) => ({
name: entry.name,
count: entry.blockIds.length,
blockIds: entry.blockIds.sort(),
}))
.sort((a, b) => b.count - a.count || a.name.localeCompare(b.name))
const subflowChildrenMap = new Map<string, string[]>()
for (const block of blockInventoryRaw) {
if (!block.parentId) continue
const existing = subflowChildrenMap.get(block.parentId) || []
existing.push(block.id)
subflowChildrenMap.set(block.parentId, existing)
}
const subflowChildren = [...subflowChildrenMap.entries()]
.map(([subflowId, childBlockIds]) => {
const subflowBlock = blockInventoryRaw.find((block) => block.id === subflowId)
return {
subflowId,
subflowName: subflowBlock?.name || subflowId,
subflowType: subflowBlock?.type || 'unknown',
childBlockIds: childBlockIds.sort(),
}
})
.sort((a, b) => a.subflowName.localeCompare(b.subflowName))
const referenceGuide = blockInventory.map((entry) => {
const blockSchema = getBlock(entry.type)
const schemaOutputKeys = Object.keys(blockSchema?.outputs || {})
const outputKeys =
entry.type === 'start'
? dedupeStrings([...schemaOutputKeys, ...startOutputKeys])
: dedupeStrings(schemaOutputKeys)
const referenceToken =
normalizeReferenceToken(entry.name) || normalizeReferenceToken(entry.type) || entry.id
return {
blockId: entry.id,
blockName: entry.name,
blockType: entry.type,
parentId: entry.parentId,
referenceToken,
outputKeys,
examples: outputKeys.slice(0, 4).map((key) => `<${referenceToken}.${key}>`),
}
})
return {
blockCount: Object.keys(blocks).length,
edgeCount: edges.length,
loopCount: Object.keys(workflowState.loops || {}).length,
parallelCount: Object.keys(workflowState.parallels || {}).length,
blockTypes,
triggerBlocks,
blockInventory,
blockInventoryTruncated,
edgeInventory,
edgeInventoryTruncated,
duplicateBlockNames,
subflowChildren,
referenceGuide,
}
}
export function getAllKnownBlockTypes(): string[] {
return [...new Set([...getAllBlockTypes(), ...CONTAINER_BLOCK_TYPES])]
}

View File

@@ -1,230 +0,0 @@
import { createLogger } from '@sim/logger'
import { z } from 'zod'
import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool'
import { validateWorkflowState } from '@/lib/workflows/sanitization/validation'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { hashWorkflowState, loadWorkflowStateFromDb } from './workflow-state'
const logger = createLogger('WorkflowVerifyServerTool')
const AcceptanceItemSchema = z.union([
z.string(),
z.object({
kind: z.string().optional(),
assert: z.string(),
}),
])
const WorkflowVerifyInputSchema = z
.object({
workflowId: z.string(),
acceptance: z.array(AcceptanceItemSchema).optional(),
baseSnapshotHash: z.string().optional(),
})
.strict()
type WorkflowVerifyParams = z.infer<typeof WorkflowVerifyInputSchema>
function normalizeName(value: string): string {
return value.trim().toLowerCase()
}
function canonicalizeToken(value: string): string {
return normalizeName(value).replace(/[^a-z0-9]/g, '')
}
function resolveBlockToken(
workflowState: { blocks: Record<string, any> },
token: string
): string | null {
if (!token) return null
if (workflowState.blocks[token]) return token
const normalized = normalizeName(token)
const canonical = canonicalizeToken(token)
for (const [blockId, block] of Object.entries(workflowState.blocks || {})) {
const blockName = normalizeName(String((block as Record<string, unknown>).name || ''))
if (blockName === normalized) return blockId
if (canonicalizeToken(blockName) === canonical) return blockId
}
return null
}
function resolveBlocksByType(
workflowState: { blocks: Record<string, any> },
token: string
): string[] {
const normalized = normalizeName(token)
const canonical = canonicalizeToken(token)
const matches: string[] = []
for (const [blockId, block] of Object.entries(workflowState.blocks || {})) {
const blockType = normalizeName(String((block as Record<string, unknown>).type || ''))
if (!blockType) continue
if (blockType === normalized || canonicalizeToken(blockType) === canonical) {
matches.push(blockId)
}
}
return matches
}
function hasPath(
workflowState: { edges: Array<Record<string, any>> },
blockPath: string[]
): boolean {
if (blockPath.length < 2) return true
const adjacency = new Map<string, string[]>()
for (const edge of workflowState.edges || []) {
const source = String(edge.source || '')
const target = String(edge.target || '')
if (!source || !target) continue
const existing = adjacency.get(source) || []
existing.push(target)
adjacency.set(source, existing)
}
for (let i = 0; i < blockPath.length - 1; i++) {
const from = blockPath[i]
const to = blockPath[i + 1]
const next = adjacency.get(from) || []
if (!next.includes(to)) return false
}
return true
}
function evaluateAssertions(params: {
workflowState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
}
assertions: string[]
}): { failures: string[]; warnings: string[]; checks: Array<Record<string, any>> } {
const failures: string[] = []
const warnings: string[] = []
const checks: Array<Record<string, any>> = []
for (const assertion of params.assertions) {
if (assertion.startsWith('block_exists:')) {
const token = assertion.slice('block_exists:'.length).trim()
const blockId = resolveBlockToken(params.workflowState, token)
const passed = Boolean(blockId)
checks.push({ assert: assertion, passed, resolvedBlockId: blockId || null })
if (!passed) failures.push(`Assertion failed: ${assertion}`)
continue
}
if (assertion.startsWith('block_type_exists:')) {
const token = assertion.slice('block_type_exists:'.length).trim()
const matchedBlockIds = resolveBlocksByType(params.workflowState, token)
const passed = matchedBlockIds.length > 0
checks.push({ assert: assertion, passed, matchedBlockIds })
if (!passed) failures.push(`Assertion failed: ${assertion}`)
continue
}
if (assertion.startsWith('trigger_exists:')) {
const triggerType = normalizeName(assertion.slice('trigger_exists:'.length))
const triggerBlock = Object.values(params.workflowState.blocks || {}).find((block: any) => {
if (block?.triggerMode !== true) return false
return normalizeName(String(block?.type || '')) === triggerType
})
const passed = Boolean(triggerBlock)
checks.push({ assert: assertion, passed })
if (!passed) failures.push(`Assertion failed: ${assertion}`)
continue
}
if (assertion.startsWith('path_exists:')) {
const rawPath = assertion.slice('path_exists:'.length).trim()
const tokens = rawPath
.split('->')
.map((token) => token.trim())
.filter(Boolean)
const resolvedPath = tokens
.map((token) => resolveBlockToken(params.workflowState, token))
.filter((value): value is string => Boolean(value))
const resolvedAll = resolvedPath.length === tokens.length
const passed = resolvedAll && hasPath(params.workflowState, resolvedPath)
checks.push({
assert: assertion,
passed,
resolvedPath,
})
if (!passed) failures.push(`Assertion failed: ${assertion}`)
continue
}
// Unknown assertion format should not fail structural verification.
// Keep explicit visibility via warnings/check metadata.
checks.push({ assert: assertion, passed: false, reason: 'unknown_assertion_type' })
warnings.push(`Unknown assertion format: ${assertion}`)
}
return { failures, warnings, checks }
}
export const workflowVerifyServerTool: BaseServerTool<WorkflowVerifyParams, any> = {
name: 'workflow_verify',
inputSchema: WorkflowVerifyInputSchema,
async execute(params: WorkflowVerifyParams, context?: { userId: string }): Promise<any> {
if (!context?.userId) {
throw new Error('Unauthorized workflow access')
}
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId: params.workflowId,
userId: context.userId,
action: 'read',
})
if (!authorization.allowed) {
throw new Error(authorization.message || 'Unauthorized workflow access')
}
const { workflowState } = await loadWorkflowStateFromDb(params.workflowId)
const snapshotHash = hashWorkflowState(workflowState as unknown as Record<string, unknown>)
if (params.baseSnapshotHash && params.baseSnapshotHash !== snapshotHash) {
return {
success: false,
verified: false,
reason: 'snapshot_mismatch',
expected: params.baseSnapshotHash,
current: snapshotHash,
}
}
const validation = validateWorkflowState(workflowState as any, { sanitize: false })
const assertions = (params.acceptance || []).map((item) =>
typeof item === 'string' ? item : item.assert
)
const assertionResults = evaluateAssertions({
workflowState,
assertions,
})
const verified =
validation.valid && assertionResults.failures.length === 0 && validation.errors.length === 0
logger.info('Workflow verification complete', {
workflowId: params.workflowId,
verified,
errorCount: validation.errors.length,
warningCount: validation.warnings.length,
assertionFailures: assertionResults.failures.length,
assertionWarnings: assertionResults.warnings.length,
})
return {
success: true,
verified,
snapshotHash,
validation: {
valid: validation.valid,
errors: validation.errors,
warnings: validation.warnings,
},
assertions: assertionResults.checks,
failures: assertionResults.failures,
warnings: assertionResults.warnings,
}
},
}

View File

@@ -5,7 +5,6 @@ import {
type GenerateContentConfig, type GenerateContentConfig,
type GenerateContentResponse, type GenerateContentResponse,
type GoogleGenAI, type GoogleGenAI,
type Interactions,
type Part, type Part,
type Schema, type Schema,
type ThinkingConfig, type ThinkingConfig,
@@ -28,7 +27,6 @@ import {
import type { FunctionCallResponse, ProviderRequest, ProviderResponse } from '@/providers/types' import type { FunctionCallResponse, ProviderRequest, ProviderResponse } from '@/providers/types'
import { import {
calculateCost, calculateCost,
isDeepResearchModel,
prepareToolExecution, prepareToolExecution,
prepareToolsWithUsageControl, prepareToolsWithUsageControl,
} from '@/providers/utils' } from '@/providers/utils'
@@ -383,468 +381,6 @@ export interface GeminiExecutionConfig {
providerType: GeminiProviderType providerType: GeminiProviderType
} }
const DEEP_RESEARCH_POLL_INTERVAL_MS = 10_000
const DEEP_RESEARCH_MAX_DURATION_MS = 60 * 60 * 1000
/**
* Sleeps for the specified number of milliseconds
*/
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms))
}
/**
* Collapses a ProviderRequest into a single input string and optional system instruction
* for the Interactions API, which takes a flat input rather than a messages array.
*
* Deep research is single-turn only — it takes one research query and returns a report.
* Memory/conversation history is hidden in the UI for deep research models, so only
* the last user message is used as input. System messages are passed via system_instruction.
*/
function collapseMessagesToInput(request: ProviderRequest): {
input: string
systemInstruction: string | undefined
} {
const systemParts: string[] = []
const userParts: string[] = []
if (request.systemPrompt) {
systemParts.push(request.systemPrompt)
}
if (request.messages) {
for (const msg of request.messages) {
if (msg.role === 'system' && msg.content) {
systemParts.push(msg.content)
} else if (msg.role === 'user' && msg.content) {
userParts.push(msg.content)
}
}
}
return {
input:
userParts.length > 0
? userParts[userParts.length - 1]
: 'Please conduct research on the provided topic.',
systemInstruction: systemParts.length > 0 ? systemParts.join('\n\n') : undefined,
}
}
/**
* Extracts text content from a completed interaction's outputs array.
* The outputs array can contain text, thought, google_search_result, and other types.
* We concatenate all text outputs to get the full research report.
*/
function extractTextFromInteractionOutputs(outputs: Interactions.Interaction['outputs']): string {
if (!outputs || outputs.length === 0) return ''
const textParts: string[] = []
for (const output of outputs) {
if (output.type === 'text') {
const text = (output as Interactions.TextContent).text
if (text) textParts.push(text)
}
}
return textParts.join('\n\n')
}
/**
* Extracts token usage from an Interaction's Usage object.
* The Interactions API provides total_input_tokens, total_output_tokens, total_tokens,
* and total_reasoning_tokens (for thinking models).
*
* Also handles the raw API field name total_thought_tokens which the SDK may
* map to total_reasoning_tokens.
*/
function extractInteractionUsage(usage: Interactions.Usage | undefined): {
inputTokens: number
outputTokens: number
reasoningTokens: number
totalTokens: number
} {
if (!usage) {
return { inputTokens: 0, outputTokens: 0, reasoningTokens: 0, totalTokens: 0 }
}
const usageLogger = createLogger('DeepResearchUsage')
usageLogger.info('Raw interaction usage', { usage: JSON.stringify(usage) })
const inputTokens = usage.total_input_tokens ?? 0
const outputTokens = usage.total_output_tokens ?? 0
const reasoningTokens =
usage.total_reasoning_tokens ??
((usage as Record<string, unknown>).total_thought_tokens as number) ??
0
const totalTokens = usage.total_tokens ?? inputTokens + outputTokens
return { inputTokens, outputTokens, reasoningTokens, totalTokens }
}
/**
* Builds a standard ProviderResponse from a completed deep research interaction.
*/
function buildDeepResearchResponse(
content: string,
model: string,
usage: {
inputTokens: number
outputTokens: number
reasoningTokens: number
totalTokens: number
},
providerStartTime: number,
providerStartTimeISO: string,
interactionId?: string
): ProviderResponse {
const providerEndTime = Date.now()
const duration = providerEndTime - providerStartTime
return {
content,
model,
tokens: {
input: usage.inputTokens,
output: usage.outputTokens,
total: usage.totalTokens,
},
timing: {
startTime: providerStartTimeISO,
endTime: new Date(providerEndTime).toISOString(),
duration,
modelTime: duration,
toolsTime: 0,
firstResponseTime: duration,
iterations: 1,
timeSegments: [
{
type: 'model',
name: 'Deep research',
startTime: providerStartTime,
endTime: providerEndTime,
duration,
},
],
},
cost: calculateCost(model, usage.inputTokens, usage.outputTokens),
interactionId,
}
}
/**
* Creates a ReadableStream from a deep research streaming interaction.
*
* Deep research streaming returns InteractionSSEEvent chunks including:
* - interaction.start: initial interaction with ID
* - content.delta: incremental text and thought_summary updates
* - content.start / content.stop: output boundaries
* - interaction.complete: final event (outputs is undefined in streaming; must reconstruct)
* - error: error events
*
* We stream text deltas to the client and track usage from the interaction.complete event.
*/
function createDeepResearchStream(
stream: AsyncIterable<Interactions.InteractionSSEEvent>,
onComplete?: (
content: string,
usage: {
inputTokens: number
outputTokens: number
reasoningTokens: number
totalTokens: number
},
interactionId?: string
) => void
): ReadableStream<Uint8Array> {
const streamLogger = createLogger('DeepResearchStream')
let fullContent = ''
let completionUsage = { inputTokens: 0, outputTokens: 0, reasoningTokens: 0, totalTokens: 0 }
let completedInteractionId: string | undefined
return new ReadableStream({
async start(controller) {
try {
for await (const event of stream) {
if (event.event_type === 'content.delta') {
const delta = (event as Interactions.ContentDelta).delta
if (delta?.type === 'text' && 'text' in delta && delta.text) {
fullContent += delta.text
controller.enqueue(new TextEncoder().encode(delta.text))
}
} else if (event.event_type === 'interaction.complete') {
const interaction = (event as Interactions.InteractionEvent).interaction
if (interaction?.usage) {
completionUsage = extractInteractionUsage(interaction.usage)
}
completedInteractionId = interaction?.id
} else if (event.event_type === 'interaction.start') {
const interaction = (event as Interactions.InteractionEvent).interaction
if (interaction?.id) {
completedInteractionId = interaction.id
}
} else if (event.event_type === 'error') {
const errorEvent = event as { error?: { code?: string; message?: string } }
const message = errorEvent.error?.message ?? 'Unknown deep research stream error'
streamLogger.error('Deep research stream error', {
code: errorEvent.error?.code,
message,
})
controller.error(new Error(message))
return
}
}
onComplete?.(fullContent, completionUsage, completedInteractionId)
controller.close()
} catch (error) {
streamLogger.error('Error reading deep research stream', {
error: error instanceof Error ? error.message : String(error),
})
controller.error(error)
}
},
})
}
/**
* Executes a deep research request using the Interactions API.
*
* Deep research uses the Interactions API ({@link https://ai.google.dev/api/interactions-api}),
* a completely different surface from generateContent. It creates a background interaction
* that performs comprehensive research (up to 60 minutes).
*
* Supports both streaming and non-streaming modes:
* - Streaming: returns a StreamingExecution with a ReadableStream of text deltas
* - Non-streaming: polls until completion and returns a ProviderResponse
*
* Deep research does NOT support custom function calling tools, MCP servers,
* or structured output (response_format). These are gracefully ignored.
*/
export async function executeDeepResearchRequest(
config: GeminiExecutionConfig
): Promise<ProviderResponse | StreamingExecution> {
const { ai, model, request, providerType } = config
const logger = createLogger(providerType === 'google' ? 'GoogleProvider' : 'VertexProvider')
logger.info('Preparing deep research request', {
model,
hasSystemPrompt: !!request.systemPrompt,
hasMessages: !!request.messages?.length,
streaming: !!request.stream,
hasPreviousInteractionId: !!request.previousInteractionId,
})
if (request.tools?.length) {
logger.warn('Deep research does not support custom tools — ignoring tools parameter')
}
if (request.responseFormat) {
logger.warn(
'Deep research does not support structured output — ignoring responseFormat parameter'
)
}
const providerStartTime = Date.now()
const providerStartTimeISO = new Date(providerStartTime).toISOString()
try {
const { input, systemInstruction } = collapseMessagesToInput(request)
// Deep research requires background=true and store=true (store defaults to true,
// but we set it explicitly per API requirements)
const baseParams = {
agent: model as Interactions.CreateAgentInteractionParamsNonStreaming['agent'],
input,
background: true,
store: true,
...(systemInstruction && { system_instruction: systemInstruction }),
...(request.previousInteractionId && {
previous_interaction_id: request.previousInteractionId,
}),
agent_config: {
type: 'deep-research' as const,
thinking_summaries: 'auto' as const,
},
}
logger.info('Creating deep research interaction', {
inputLength: input.length,
hasSystemInstruction: !!systemInstruction,
streaming: !!request.stream,
})
// Streaming mode: create a streaming interaction and return a StreamingExecution
if (request.stream) {
const streamParams: Interactions.CreateAgentInteractionParamsStreaming = {
...baseParams,
stream: true,
}
const streamResponse = await ai.interactions.create(streamParams)
const firstResponseTime = Date.now() - providerStartTime
const streamingResult: StreamingExecution = {
stream: undefined as unknown as ReadableStream<Uint8Array>,
execution: {
success: true,
output: {
content: '',
model,
tokens: { input: 0, output: 0, total: 0 },
providerTiming: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
duration: Date.now() - providerStartTime,
modelTime: firstResponseTime,
toolsTime: 0,
firstResponseTime,
iterations: 1,
timeSegments: [
{
type: 'model',
name: 'Deep research (streaming)',
startTime: providerStartTime,
endTime: providerStartTime + firstResponseTime,
duration: firstResponseTime,
},
],
},
cost: {
input: 0,
output: 0,
total: 0,
pricing: { input: 0, output: 0, updatedAt: new Date().toISOString() },
},
},
logs: [],
metadata: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
duration: Date.now() - providerStartTime,
},
isStreaming: true,
},
}
streamingResult.stream = createDeepResearchStream(
streamResponse,
(content, usage, streamInteractionId) => {
streamingResult.execution.output.content = content
streamingResult.execution.output.tokens = {
input: usage.inputTokens,
output: usage.outputTokens,
total: usage.totalTokens,
}
streamingResult.execution.output.interactionId = streamInteractionId
const cost = calculateCost(model, usage.inputTokens, usage.outputTokens)
streamingResult.execution.output.cost = cost
const streamEndTime = Date.now()
if (streamingResult.execution.output.providerTiming) {
streamingResult.execution.output.providerTiming.endTime = new Date(
streamEndTime
).toISOString()
streamingResult.execution.output.providerTiming.duration =
streamEndTime - providerStartTime
const segments = streamingResult.execution.output.providerTiming.timeSegments
if (segments?.[0]) {
segments[0].endTime = streamEndTime
segments[0].duration = streamEndTime - providerStartTime
}
}
}
)
return streamingResult
}
// Non-streaming mode: create and poll
const createParams: Interactions.CreateAgentInteractionParamsNonStreaming = {
...baseParams,
stream: false,
}
const interaction = await ai.interactions.create(createParams)
const interactionId = interaction.id
logger.info('Deep research interaction created', { interactionId, status: interaction.status })
// Poll until a terminal status
const pollStartTime = Date.now()
let result: Interactions.Interaction = interaction
while (Date.now() - pollStartTime < DEEP_RESEARCH_MAX_DURATION_MS) {
if (result.status === 'completed') {
break
}
if (result.status === 'failed') {
throw new Error(`Deep research interaction failed: ${interactionId}`)
}
if (result.status === 'cancelled') {
throw new Error(`Deep research interaction was cancelled: ${interactionId}`)
}
logger.info('Deep research in progress, polling...', {
interactionId,
status: result.status,
elapsedMs: Date.now() - pollStartTime,
})
await sleep(DEEP_RESEARCH_POLL_INTERVAL_MS)
result = await ai.interactions.get(interactionId)
}
if (result.status !== 'completed') {
throw new Error(
`Deep research timed out after ${DEEP_RESEARCH_MAX_DURATION_MS / 1000}s (status: ${result.status})`
)
}
const content = extractTextFromInteractionOutputs(result.outputs)
const usage = extractInteractionUsage(result.usage)
logger.info('Deep research completed', {
interactionId,
contentLength: content.length,
inputTokens: usage.inputTokens,
outputTokens: usage.outputTokens,
reasoningTokens: usage.reasoningTokens,
totalTokens: usage.totalTokens,
durationMs: Date.now() - providerStartTime,
})
return buildDeepResearchResponse(
content,
model,
usage,
providerStartTime,
providerStartTimeISO,
interactionId
)
} catch (error) {
const providerEndTime = Date.now()
const duration = providerEndTime - providerStartTime
logger.error('Error in deep research request:', {
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
})
const enhancedError = error instanceof Error ? error : new Error(String(error))
Object.assign(enhancedError, {
timing: {
startTime: providerStartTimeISO,
endTime: new Date(providerEndTime).toISOString(),
duration,
},
})
throw enhancedError
}
}
/** /**
* Executes a request using the Gemini API * Executes a request using the Gemini API
* *
@@ -855,12 +391,6 @@ export async function executeGeminiRequest(
config: GeminiExecutionConfig config: GeminiExecutionConfig
): Promise<ProviderResponse | StreamingExecution> { ): Promise<ProviderResponse | StreamingExecution> {
const { ai, model, request, providerType } = config const { ai, model, request, providerType } = config
// Route deep research models to the interactions API
if (isDeepResearchModel(model)) {
return executeDeepResearchRequest(config)
}
const logger = createLogger(providerType === 'google' ? 'GoogleProvider' : 'VertexProvider') const logger = createLogger(providerType === 'google' ? 'GoogleProvider' : 'VertexProvider')
logger.info(`Preparing ${providerType} Gemini request`, { logger.info(`Preparing ${providerType} Gemini request`, {

View File

@@ -46,9 +46,6 @@ export interface ModelCapabilities {
levels: string[] levels: string[]
default?: string default?: string
} }
deepResearch?: boolean
/** Whether this model supports conversation memory. Defaults to true if omitted. */
memory?: boolean
} }
export interface ModelDefinition { export interface ModelDefinition {
@@ -828,7 +825,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
name: 'Google', name: 'Google',
description: "Google's Gemini models", description: "Google's Gemini models",
defaultModel: 'gemini-2.5-pro', defaultModel: 'gemini-2.5-pro',
modelPatterns: [/^gemini/, /^deep-research/], modelPatterns: [/^gemini/],
capabilities: { capabilities: {
toolUsageControl: true, toolUsageControl: true,
}, },
@@ -931,19 +928,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
}, },
contextWindow: 1000000, contextWindow: 1000000,
}, },
{
id: 'deep-research-pro-preview-12-2025',
pricing: {
input: 2.0,
output: 2.0,
updatedAt: '2026-02-10',
},
capabilities: {
deepResearch: true,
memory: false,
},
contextWindow: 1000000,
},
], ],
}, },
vertex: { vertex: {
@@ -1054,19 +1038,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
}, },
contextWindow: 1000000, contextWindow: 1000000,
}, },
{
id: 'vertex/deep-research-pro-preview-12-2025',
pricing: {
input: 2.0,
output: 2.0,
updatedAt: '2026-02-10',
},
capabilities: {
deepResearch: true,
memory: false,
},
contextWindow: 1000000,
},
], ],
}, },
deepseek: { deepseek: {
@@ -2509,37 +2480,6 @@ export function getThinkingLevelsForModel(modelId: string): string[] | null {
return capability?.levels ?? null return capability?.levels ?? null
} }
/**
* Get all models that support deep research capability
*/
export function getModelsWithDeepResearch(): string[] {
const models: string[] = []
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
for (const model of provider.models) {
if (model.capabilities.deepResearch) {
models.push(model.id)
}
}
}
return models
}
/**
* Get all models that explicitly disable memory support (memory: false).
* Models without this capability default to supporting memory.
*/
export function getModelsWithoutMemory(): string[] {
const models: string[] = []
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
for (const model of provider.models) {
if (model.capabilities.memory === false) {
models.push(model.id)
}
}
}
return models
}
/** /**
* Get the max output tokens for a specific model. * Get the max output tokens for a specific model.
* *

View File

@@ -95,8 +95,6 @@ export interface ProviderResponse {
total: number total: number
pricing: ModelPricing pricing: ModelPricing
} }
/** Interaction ID returned by the Interactions API (used for multi-turn deep research) */
interactionId?: string
} }
export type ToolUsageControl = 'auto' | 'force' | 'none' export type ToolUsageControl = 'auto' | 'force' | 'none'
@@ -171,8 +169,6 @@ export interface ProviderRequest {
verbosity?: string verbosity?: string
thinkingLevel?: string thinkingLevel?: string
isDeployedContext?: boolean isDeployedContext?: boolean
/** Previous interaction ID for multi-turn Interactions API requests (deep research follow-ups) */
previousInteractionId?: string
} }
export const providers: Record<string, ProviderConfig> = {} export const providers: Record<string, ProviderConfig> = {}

View File

@@ -12,8 +12,6 @@ import {
getMaxOutputTokensForModel as getMaxOutputTokensForModelFromDefinitions, getMaxOutputTokensForModel as getMaxOutputTokensForModelFromDefinitions,
getMaxTemperature as getMaxTempFromDefinitions, getMaxTemperature as getMaxTempFromDefinitions,
getModelPricing as getModelPricingFromDefinitions, getModelPricing as getModelPricingFromDefinitions,
getModelsWithDeepResearch,
getModelsWithoutMemory,
getModelsWithReasoningEffort, getModelsWithReasoningEffort,
getModelsWithTemperatureSupport, getModelsWithTemperatureSupport,
getModelsWithTempRange01, getModelsWithTempRange01,
@@ -955,8 +953,6 @@ export const MODELS_WITH_TEMPERATURE_SUPPORT = getModelsWithTemperatureSupport()
export const MODELS_WITH_REASONING_EFFORT = getModelsWithReasoningEffort() export const MODELS_WITH_REASONING_EFFORT = getModelsWithReasoningEffort()
export const MODELS_WITH_VERBOSITY = getModelsWithVerbosity() export const MODELS_WITH_VERBOSITY = getModelsWithVerbosity()
export const MODELS_WITH_THINKING = getModelsWithThinking() export const MODELS_WITH_THINKING = getModelsWithThinking()
export const MODELS_WITH_DEEP_RESEARCH = getModelsWithDeepResearch()
export const MODELS_WITHOUT_MEMORY = getModelsWithoutMemory()
export const PROVIDERS_WITH_TOOL_USAGE_CONTROL = getProvidersWithToolUsageControl() export const PROVIDERS_WITH_TOOL_USAGE_CONTROL = getProvidersWithToolUsageControl()
export function supportsTemperature(model: string): boolean { export function supportsTemperature(model: string): boolean {
@@ -975,10 +971,6 @@ export function supportsThinking(model: string): boolean {
return MODELS_WITH_THINKING.includes(model.toLowerCase()) return MODELS_WITH_THINKING.includes(model.toLowerCase())
} }
export function isDeepResearchModel(model: string): boolean {
return MODELS_WITH_DEEP_RESEARCH.includes(model.toLowerCase())
}
/** /**
* Get the maximum temperature value for a model * Get the maximum temperature value for a model
* @returns Maximum temperature value (1 or 2) or undefined if temperature not supported * @returns Maximum temperature value (1 or 2) or undefined if temperature not supported

Binary file not shown.

Before

Width:  |  Height:  |  Size: 78 KiB

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 58 KiB

View File

@@ -18,6 +18,7 @@ import {
import { flushStreamingUpdates, stopStreamingUpdates } from '@/lib/copilot/client-sse/handlers' import { flushStreamingUpdates, stopStreamingUpdates } from '@/lib/copilot/client-sse/handlers'
import type { ClientContentBlock, ClientStreamingContext } from '@/lib/copilot/client-sse/types' import type { ClientContentBlock, ClientStreamingContext } from '@/lib/copilot/client-sse/types'
import { import {
COPILOT_AUTO_ALLOWED_TOOLS_API_PATH,
COPILOT_CHAT_API_PATH, COPILOT_CHAT_API_PATH,
COPILOT_CHAT_STREAM_API_PATH, COPILOT_CHAT_STREAM_API_PATH,
COPILOT_CHECKPOINTS_API_PATH, COPILOT_CHECKPOINTS_API_PATH,
@@ -38,6 +39,7 @@ import {
buildToolCallsById, buildToolCallsById,
normalizeMessagesForUI, normalizeMessagesForUI,
persistMessages, persistMessages,
persistMessagesBeacon,
saveMessageCheckpoint, saveMessageCheckpoint,
} from '@/lib/copilot/messages' } from '@/lib/copilot/messages'
import type { CopilotTransportMode } from '@/lib/copilot/models' import type { CopilotTransportMode } from '@/lib/copilot/models'
@@ -77,20 +79,34 @@ let _isPageUnloading = false
if (typeof window !== 'undefined') { if (typeof window !== 'undefined') {
window.addEventListener('beforeunload', () => { window.addEventListener('beforeunload', () => {
_isPageUnloading = true _isPageUnloading = true
// Emergency persistence: flush any pending streaming updates to the store and
// persist via sendBeacon (which is guaranteed to be queued during page teardown).
// Without this, thinking blocks and in-progress content are lost on refresh.
try {
const state = useCopilotStore.getState()
if (state.isSendingMessage && state.currentChat) {
// Flush batched streaming updates into the store messages
flushStreamingUpdates(useCopilotStore.setState.bind(useCopilotStore))
const flushedState = useCopilotStore.getState()
persistMessagesBeacon({
chatId: flushedState.currentChat!.id,
messages: flushedState.messages,
sensitiveCredentialIds: flushedState.sensitiveCredentialIds,
planArtifact: flushedState.streamingPlanContent || null,
mode: flushedState.mode,
model: flushedState.selectedModel,
})
}
} catch {
// Best-effort — don't let errors prevent page unload
}
}) })
} }
function isPageUnloading(): boolean { function isPageUnloading(): boolean {
return _isPageUnloading return _isPageUnloading
} }
function isWorkflowEditToolCall(name?: string, params?: Record<string, unknown>): boolean {
if (name !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
function readActiveStreamFromStorage(): CopilotStreamInfo | null { function readActiveStreamFromStorage(): CopilotStreamInfo | null {
if (typeof window === 'undefined') return null if (typeof window === 'undefined') return null
try { try {
@@ -147,6 +163,41 @@ function updateActiveStreamEventId(
writeActiveStreamToStorage(next) writeActiveStreamToStorage(next)
} }
const AUTO_ALLOWED_TOOLS_STORAGE_KEY = 'copilot_auto_allowed_tools'
function readAutoAllowedToolsFromStorage(): string[] | null {
if (typeof window === 'undefined') return null
try {
const raw = window.localStorage.getItem(AUTO_ALLOWED_TOOLS_STORAGE_KEY)
if (!raw) return null
const parsed = JSON.parse(raw)
if (!Array.isArray(parsed)) return null
return parsed.filter((item): item is string => typeof item === 'string')
} catch (error) {
logger.warn('[AutoAllowedTools] Failed to read local cache', {
error: error instanceof Error ? error.message : String(error),
})
return null
}
}
function writeAutoAllowedToolsToStorage(tools: string[]): void {
if (typeof window === 'undefined') return
try {
window.localStorage.setItem(AUTO_ALLOWED_TOOLS_STORAGE_KEY, JSON.stringify(tools))
} catch (error) {
logger.warn('[AutoAllowedTools] Failed to write local cache', {
error: error instanceof Error ? error.message : String(error),
})
}
}
function isToolAutoAllowedByList(toolId: string, autoAllowedTools: string[]): boolean {
if (!toolId) return false
const normalizedTarget = toolId.trim()
return autoAllowedTools.some((allowed) => allowed?.trim() === normalizedTarget)
}
/** /**
* Clear any lingering diff preview from a previous session. * Clear any lingering diff preview from a previous session.
* Called lazily when the store is first activated (setWorkflowId). * Called lazily when the store is first activated (setWorkflowId).
@@ -282,50 +333,6 @@ function parseModelKey(compositeKey: string): { provider: string; modelId: strin
return { provider: compositeKey.slice(0, slashIdx), modelId: compositeKey.slice(slashIdx + 1) } return { provider: compositeKey.slice(0, slashIdx), modelId: compositeKey.slice(slashIdx + 1) }
} }
/**
* Convert legacy/variant Claude IDs into the canonical ID shape used by the model catalog.
*
* Examples:
* - claude-4.5-opus -> claude-opus-4-5
* - claude-opus-4.6 -> claude-opus-4-6
* - anthropic.claude-opus-4-5-20251101-v1:0 -> claude-opus-4-5 (match key only)
*/
function canonicalizeModelMatchKey(modelId: string): string {
if (!modelId) return modelId
const normalized = modelId.trim().toLowerCase()
const toCanonicalClaude = (tier: string, version: string): string => {
const normalizedVersion = version.replace(/\./g, '-')
return `claude-${tier}-${normalizedVersion}`
}
const tierFirstExact = normalized.match(/^claude-(opus|sonnet|haiku)-(\d+(?:[.-]\d+)?)$/)
if (tierFirstExact) {
const [, tier, version] = tierFirstExact
return toCanonicalClaude(tier, version)
}
const versionFirstExact = normalized.match(/^claude-(\d+(?:[.-]\d+)?)-(opus|sonnet|haiku)$/)
if (versionFirstExact) {
const [, version, tier] = versionFirstExact
return toCanonicalClaude(tier, version)
}
const tierFirstEmbedded = normalized.match(/claude-(opus|sonnet|haiku)-(\d+(?:[.-]\d+)?)/)
if (tierFirstEmbedded) {
const [, tier, version] = tierFirstEmbedded
return toCanonicalClaude(tier, version)
}
const versionFirstEmbedded = normalized.match(/claude-(\d+(?:[.-]\d+)?)-(opus|sonnet|haiku)/)
if (versionFirstEmbedded) {
const [, version, tier] = versionFirstEmbedded
return toCanonicalClaude(tier, version)
}
return normalized
}
const MODEL_PROVIDER_PRIORITY = [ const MODEL_PROVIDER_PRIORITY = [
'anthropic', 'anthropic',
'bedrock', 'bedrock',
@@ -366,23 +373,12 @@ function normalizeSelectedModelKey(selectedModel: string, models: AvailableModel
const { provider, modelId } = parseModelKey(selectedModel) const { provider, modelId } = parseModelKey(selectedModel)
const targetModelId = modelId || selectedModel const targetModelId = modelId || selectedModel
const targetMatchKey = canonicalizeModelMatchKey(targetModelId)
const matches = models.filter((m) => { const matches = models.filter((m) => m.id.endsWith(`/${targetModelId}`))
const candidateModelId = parseModelKey(m.id).modelId || m.id
const candidateMatchKey = canonicalizeModelMatchKey(candidateModelId)
return (
candidateModelId === targetModelId ||
m.id.endsWith(`/${targetModelId}`) ||
candidateMatchKey === targetMatchKey
)
})
if (matches.length === 0) return selectedModel if (matches.length === 0) return selectedModel
if (provider) { if (provider) {
const sameProvider = matches.find( const sameProvider = matches.find((m) => m.provider === provider)
(m) => m.provider === provider || m.id.startsWith(`${provider}/`)
)
if (sameProvider) return sameProvider.id if (sameProvider) return sameProvider.id
} }
@@ -452,6 +448,11 @@ function prepareSendContext(
.catch((err) => { .catch((err) => {
logger.warn('[Copilot] Failed to load sensitive credential IDs', err) logger.warn('[Copilot] Failed to load sensitive credential IDs', err)
}) })
get()
.loadAutoAllowedTools()
.catch((err) => {
logger.warn('[Copilot] Failed to load auto-allowed tools', err)
})
let newMessages: CopilotMessage[] let newMessages: CopilotMessage[]
if (revertState) { if (revertState) {
@@ -1004,6 +1005,8 @@ async function resumeFromLiveStream(
return false return false
} }
const cachedAutoAllowedTools = readAutoAllowedToolsFromStorage()
// Initial state (subset required for UI/streaming) // Initial state (subset required for UI/streaming)
const initialState = { const initialState = {
mode: 'build' as const, mode: 'build' as const,
@@ -1038,6 +1041,8 @@ const initialState = {
streamingPlanContent: '', streamingPlanContent: '',
toolCallsById: {} as Record<string, CopilotToolCall>, toolCallsById: {} as Record<string, CopilotToolCall>,
suppressAutoSelect: false, suppressAutoSelect: false,
autoAllowedTools: cachedAutoAllowedTools ?? ([] as string[]),
autoAllowedToolsLoaded: cachedAutoAllowedTools !== null,
activeStream: null as CopilotStreamInfo | null, activeStream: null as CopilotStreamInfo | null,
messageQueue: [] as import('./types').QueuedMessage[], messageQueue: [] as import('./types').QueuedMessage[],
suppressAbortContinueOption: false, suppressAbortContinueOption: false,
@@ -1076,6 +1081,8 @@ export const useCopilotStore = create<CopilotStore>()(
agentPrefetch: get().agentPrefetch, agentPrefetch: get().agentPrefetch,
availableModels: get().availableModels, availableModels: get().availableModels,
isLoadingModels: get().isLoadingModels, isLoadingModels: get().isLoadingModels,
autoAllowedTools: get().autoAllowedTools,
autoAllowedToolsLoaded: get().autoAllowedToolsLoaded,
}) })
}, },
@@ -1109,12 +1116,11 @@ export const useCopilotStore = create<CopilotStore>()(
const chatConfig = chat.config ?? {} const chatConfig = chat.config ?? {}
const chatMode = chatConfig.mode || get().mode const chatMode = chatConfig.mode || get().mode
const chatModel = chatConfig.model || get().selectedModel const chatModel = chatConfig.model || get().selectedModel
const normalizedChatModel = normalizeSelectedModelKey(chatModel, get().availableModels)
logger.debug('[Chat] Restoring chat config', { logger.debug('[Chat] Restoring chat config', {
chatId: chat.id, chatId: chat.id,
mode: chatMode, mode: chatMode,
model: normalizedChatModel, model: chatModel,
hasPlanArtifact: !!planArtifact, hasPlanArtifact: !!planArtifact,
}) })
@@ -1136,7 +1142,7 @@ export const useCopilotStore = create<CopilotStore>()(
showPlanTodos: false, showPlanTodos: false,
streamingPlanContent: planArtifact, streamingPlanContent: planArtifact,
mode: chatMode, mode: chatMode,
selectedModel: normalizedChatModel as CopilotStore['selectedModel'], selectedModel: chatModel as CopilotStore['selectedModel'],
suppressAutoSelect: false, suppressAutoSelect: false,
}) })
@@ -1309,10 +1315,6 @@ export const useCopilotStore = create<CopilotStore>()(
const refreshedConfig = updatedCurrentChat.config ?? {} const refreshedConfig = updatedCurrentChat.config ?? {}
const refreshedMode = refreshedConfig.mode || get().mode const refreshedMode = refreshedConfig.mode || get().mode
const refreshedModel = refreshedConfig.model || get().selectedModel const refreshedModel = refreshedConfig.model || get().selectedModel
const normalizedRefreshedModel = normalizeSelectedModelKey(
refreshedModel,
get().availableModels
)
const toolCallsById = buildToolCallsById(normalizedMessages) const toolCallsById = buildToolCallsById(normalizedMessages)
set({ set({
@@ -1321,7 +1323,7 @@ export const useCopilotStore = create<CopilotStore>()(
toolCallsById, toolCallsById,
streamingPlanContent: refreshedPlanArtifact, streamingPlanContent: refreshedPlanArtifact,
mode: refreshedMode, mode: refreshedMode,
selectedModel: normalizedRefreshedModel as CopilotStore['selectedModel'], selectedModel: refreshedModel as CopilotStore['selectedModel'],
}) })
} }
try { try {
@@ -1341,15 +1343,11 @@ export const useCopilotStore = create<CopilotStore>()(
const chatConfig = mostRecentChat.config ?? {} const chatConfig = mostRecentChat.config ?? {}
const chatMode = chatConfig.mode || get().mode const chatMode = chatConfig.mode || get().mode
const chatModel = chatConfig.model || get().selectedModel const chatModel = chatConfig.model || get().selectedModel
const normalizedChatModel = normalizeSelectedModelKey(
chatModel,
get().availableModels
)
logger.info('[Chat] Auto-selecting most recent chat with config', { logger.info('[Chat] Auto-selecting most recent chat with config', {
chatId: mostRecentChat.id, chatId: mostRecentChat.id,
mode: chatMode, mode: chatMode,
model: normalizedChatModel, model: chatModel,
hasPlanArtifact: !!planArtifact, hasPlanArtifact: !!planArtifact,
}) })
@@ -1361,7 +1359,7 @@ export const useCopilotStore = create<CopilotStore>()(
toolCallsById, toolCallsById,
streamingPlanContent: planArtifact, streamingPlanContent: planArtifact,
mode: chatMode, mode: chatMode,
selectedModel: normalizedChatModel as CopilotStore['selectedModel'], selectedModel: chatModel as CopilotStore['selectedModel'],
}) })
try { try {
await get().loadMessageCheckpoints(mostRecentChat.id) await get().loadMessageCheckpoints(mostRecentChat.id)
@@ -1390,6 +1388,16 @@ export const useCopilotStore = create<CopilotStore>()(
// Send a message (streaming only) // Send a message (streaming only)
sendMessage: async (message: string, options = {}) => { sendMessage: async (message: string, options = {}) => {
if (!get().autoAllowedToolsLoaded) {
try {
await get().loadAutoAllowedTools()
} catch (error) {
logger.warn('[Copilot] Failed to preload auto-allowed tools before send', {
error: error instanceof Error ? error.message : String(error),
})
}
}
const prepared = prepareSendContext(get, set, message, options as SendMessageOptionsInput) const prepared = prepareSendContext(get, set, message, options as SendMessageOptionsInput)
if (!prepared) return if (!prepared) return
@@ -1476,19 +1484,26 @@ export const useCopilotStore = create<CopilotStore>()(
// Immediately put all in-progress tools into aborted state // Immediately put all in-progress tools into aborted state
abortAllInProgressTools(set, get) abortAllInProgressTools(set, get)
// Persist whatever contentBlocks/text we have to keep ordering for reloads // Persist whatever contentBlocks/text we have to keep ordering for reloads.
// During page unload, use sendBeacon which is guaranteed to be queued even
// as the page tears down. Regular async fetch won't complete in time.
const { currentChat, streamingPlanContent, mode, selectedModel } = get() const { currentChat, streamingPlanContent, mode, selectedModel } = get()
if (currentChat) { if (currentChat) {
try { try {
const currentMessages = get().messages const currentMessages = get().messages
void persistMessages({ const persistParams = {
chatId: currentChat.id, chatId: currentChat.id,
messages: currentMessages, messages: currentMessages,
sensitiveCredentialIds: get().sensitiveCredentialIds, sensitiveCredentialIds: get().sensitiveCredentialIds,
planArtifact: streamingPlanContent || null, planArtifact: streamingPlanContent || null,
mode, mode,
model: selectedModel, model: selectedModel,
}) }
if (isPageUnloading()) {
persistMessagesBeacon(persistParams)
} else {
void persistMessages(persistParams)
}
} catch (error) { } catch (error) {
logger.warn('[Copilot] Failed to queue abort snapshot persistence', { logger.warn('[Copilot] Failed to queue abort snapshot persistence', {
error: error instanceof Error ? error.message : String(error), error: error instanceof Error ? error.message : String(error),
@@ -1656,7 +1671,7 @@ export const useCopilotStore = create<CopilotStore>()(
const b = blocks[bi] const b = blocks[bi]
if (b?.type === 'tool_call') { if (b?.type === 'tool_call') {
const tn = b.toolCall?.name const tn = b.toolCall?.name
if (isWorkflowEditToolCall(tn, b.toolCall?.params)) { if (tn === 'edit_workflow') {
id = b.toolCall?.id id = b.toolCall?.id
break outer break outer
} }
@@ -1665,9 +1680,7 @@ export const useCopilotStore = create<CopilotStore>()(
} }
// Fallback to map if not found in messages // Fallback to map if not found in messages
if (!id) { if (!id) {
const candidates = Object.values(toolCallsById).filter((t) => const candidates = Object.values(toolCallsById).filter((t) => t.name === 'edit_workflow')
isWorkflowEditToolCall(t.name, t.params)
)
id = candidates.length ? candidates[candidates.length - 1].id : undefined id = candidates.length ? candidates[candidates.length - 1].id : undefined
} }
} }
@@ -1962,7 +1975,7 @@ export const useCopilotStore = create<CopilotStore>()(
} }
if (!context.wasAborted && sseHandlers.stream_end) { if (!context.wasAborted && sseHandlers.stream_end) {
sseHandlers.stream_end({ type: 'copilot.phase.completed' }, context, get, set) sseHandlers.stream_end({ type: 'done' }, context, get, set)
} }
stopStreamingUpdates() stopStreamingUpdates()
@@ -2285,8 +2298,7 @@ export const useCopilotStore = create<CopilotStore>()(
}, },
setSelectedModel: async (model) => { setSelectedModel: async (model) => {
const normalizedModel = normalizeSelectedModelKey(model, get().availableModels) set({ selectedModel: model })
set({ selectedModel: normalizedModel as CopilotStore['selectedModel'] })
}, },
setAgentPrefetch: (prefetch) => set({ agentPrefetch: prefetch }), setAgentPrefetch: (prefetch) => set({ agentPrefetch: prefetch }),
loadAvailableModels: async () => { loadAvailableModels: async () => {
@@ -2360,6 +2372,74 @@ export const useCopilotStore = create<CopilotStore>()(
} }
}, },
loadAutoAllowedTools: async () => {
try {
logger.debug('[AutoAllowedTools] Loading from API...')
const res = await fetch(COPILOT_AUTO_ALLOWED_TOOLS_API_PATH)
logger.debug('[AutoAllowedTools] Load response', { status: res.status, ok: res.ok })
if (res.ok) {
const data = await res.json()
const tools = data.autoAllowedTools ?? []
set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true })
writeAutoAllowedToolsToStorage(tools)
logger.debug('[AutoAllowedTools] Loaded successfully', { count: tools.length, tools })
} else {
set({ autoAllowedToolsLoaded: true })
logger.warn('[AutoAllowedTools] Load failed with status', { status: res.status })
}
} catch (err) {
set({ autoAllowedToolsLoaded: true })
logger.error('[AutoAllowedTools] Failed to load', { error: err })
}
},
addAutoAllowedTool: async (toolId: string) => {
try {
logger.debug('[AutoAllowedTools] Adding tool...', { toolId })
const res = await fetch(COPILOT_AUTO_ALLOWED_TOOLS_API_PATH, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ toolId }),
})
logger.debug('[AutoAllowedTools] API response', { toolId, status: res.status, ok: res.ok })
if (res.ok) {
const data = await res.json()
logger.debug('[AutoAllowedTools] API returned', { toolId, tools: data.autoAllowedTools })
const tools = data.autoAllowedTools ?? []
set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true })
writeAutoAllowedToolsToStorage(tools)
logger.debug('[AutoAllowedTools] Added tool to store', { toolId })
}
} catch (err) {
logger.error('[AutoAllowedTools] Failed to add tool', { toolId, error: err })
}
},
removeAutoAllowedTool: async (toolId: string) => {
try {
const res = await fetch(
`${COPILOT_AUTO_ALLOWED_TOOLS_API_PATH}?toolId=${encodeURIComponent(toolId)}`,
{
method: 'DELETE',
}
)
if (res.ok) {
const data = await res.json()
const tools = data.autoAllowedTools ?? []
set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true })
writeAutoAllowedToolsToStorage(tools)
logger.debug('[AutoAllowedTools] Removed tool', { toolId })
}
} catch (err) {
logger.error('[AutoAllowedTools] Failed to remove tool', { toolId, error: err })
}
},
isToolAutoAllowed: (toolId: string) => {
const { autoAllowedTools } = get()
return isToolAutoAllowedByList(toolId, autoAllowedTools)
},
// Credential masking // Credential masking
loadSensitiveCredentialIds: async () => { loadSensitiveCredentialIds: async () => {
try { try {

View File

@@ -26,26 +26,6 @@ export interface CopilotToolCall {
params?: Record<string, unknown> params?: Record<string, unknown>
input?: Record<string, unknown> input?: Record<string, unknown>
display?: ClientToolDisplay display?: ClientToolDisplay
/** Server-provided UI contract for this tool call phase */
ui?: {
title?: string
phaseLabel?: string
icon?: string
showInterrupt?: boolean
showRemember?: boolean
autoAllowed?: boolean
actions?: Array<{
id: string
label: string
kind: 'accept' | 'reject'
remember?: boolean
}>
}
/** Server-provided execution routing contract */
execution?: {
target?: 'go' | 'go_subagent' | 'sim_server' | 'sim_client_capability' | string
capabilityId?: string
}
/** Content streamed from a subagent (e.g., debug agent) */ /** Content streamed from a subagent (e.g., debug agent) */
subAgentContent?: string subAgentContent?: string
/** Tool calls made by the subagent */ /** Tool calls made by the subagent */
@@ -187,6 +167,10 @@ export interface CopilotState {
// Per-message metadata captured at send-time for reliable stats // Per-message metadata captured at send-time for reliable stats
// Auto-allowed integration tools (tools that can run without confirmation)
autoAllowedTools: string[]
autoAllowedToolsLoaded: boolean
// Active stream metadata for reconnect/replay // Active stream metadata for reconnect/replay
activeStream: CopilotStreamInfo | null activeStream: CopilotStreamInfo | null
@@ -263,6 +247,11 @@ export interface CopilotActions {
abortSignal?: AbortSignal abortSignal?: AbortSignal
) => Promise<void> ) => Promise<void>
handleNewChatCreation: (newChatId: string) => Promise<void> handleNewChatCreation: (newChatId: string) => Promise<void>
loadAutoAllowedTools: () => Promise<void>
addAutoAllowedTool: (toolId: string) => Promise<void>
removeAutoAllowedTool: (toolId: string) => Promise<void>
isToolAutoAllowed: (toolId: string) => boolean
// Credential masking // Credential masking
loadSensitiveCredentialIds: () => Promise<void> loadSensitiveCredentialIds: () => Promise<void>
maskCredentialValue: (value: string) => string maskCredentialValue: (value: string) => string

View File

@@ -15,7 +15,7 @@ import {
captureBaselineSnapshot, captureBaselineSnapshot,
cloneWorkflowState, cloneWorkflowState,
createBatchedUpdater, createBatchedUpdater,
findLatestWorkflowEditToolCallId, findLatestEditWorkflowToolCallId,
getLatestUserMessageId, getLatestUserMessageId,
persistWorkflowStateToServer, persistWorkflowStateToServer,
} from './utils' } from './utils'
@@ -334,7 +334,7 @@ export const useWorkflowDiffStore = create<WorkflowDiffState & WorkflowDiffActio
}) })
} }
findLatestWorkflowEditToolCallId().then((toolCallId) => { findLatestEditWorkflowToolCallId().then((toolCallId) => {
if (toolCallId) { if (toolCallId) {
import('@/stores/panel/copilot/store') import('@/stores/panel/copilot/store')
.then(({ useCopilotStore }) => { .then(({ useCopilotStore }) => {
@@ -439,7 +439,7 @@ export const useWorkflowDiffStore = create<WorkflowDiffState & WorkflowDiffActio
}) })
} }
findLatestWorkflowEditToolCallId().then((toolCallId) => { findLatestEditWorkflowToolCallId().then((toolCallId) => {
if (toolCallId) { if (toolCallId) {
import('@/stores/panel/copilot/store') import('@/stores/panel/copilot/store')
.then(({ useCopilotStore }) => { .then(({ useCopilotStore }) => {

View File

@@ -126,20 +126,6 @@ export async function getLatestUserMessageId(): Promise<string | null> {
} }
export async function findLatestEditWorkflowToolCallId(): Promise<string | undefined> { export async function findLatestEditWorkflowToolCallId(): Promise<string | undefined> {
return findLatestWorkflowEditToolCallId()
}
function isWorkflowEditToolCall(name?: string, params?: Record<string, unknown>): boolean {
if (name !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
// Be permissive for incomplete events: apply calls always include proposalId.
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
export async function findLatestWorkflowEditToolCallId(): Promise<string | undefined> {
try { try {
const { useCopilotStore } = await import('@/stores/panel/copilot/store') const { useCopilotStore } = await import('@/stores/panel/copilot/store')
const { messages, toolCallsById } = useCopilotStore.getState() const { messages, toolCallsById } = useCopilotStore.getState()
@@ -148,22 +134,17 @@ export async function findLatestWorkflowEditToolCallId(): Promise<string | undef
const message = messages[mi] const message = messages[mi]
if (message.role !== 'assistant' || !message.contentBlocks) continue if (message.role !== 'assistant' || !message.contentBlocks) continue
for (const block of message.contentBlocks) { for (const block of message.contentBlocks) {
if ( if (block?.type === 'tool_call' && block.toolCall?.name === 'edit_workflow') {
block?.type === 'tool_call' &&
isWorkflowEditToolCall(block.toolCall?.name, block.toolCall?.params)
) {
return block.toolCall?.id return block.toolCall?.id
} }
} }
} }
const fallback = Object.values(toolCallsById).filter((call) => const fallback = Object.values(toolCallsById).filter((call) => call.name === 'edit_workflow')
isWorkflowEditToolCall(call.name, call.params)
)
return fallback.length ? fallback[fallback.length - 1].id : undefined return fallback.length ? fallback[fallback.length - 1].id : undefined
} catch (error) { } catch (error) {
logger.warn('Failed to resolve workflow edit tool call id', { error }) logger.warn('Failed to resolve edit_workflow tool call id', { error })
return undefined return undefined
} }
} }

View File

@@ -1,114 +0,0 @@
import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceDeleteLabelParams {
accessToken: string
domain: string
pageId: string
labelName: string
cloudId?: string
}
export interface ConfluenceDeleteLabelResponse {
success: boolean
output: {
ts: string
pageId: string
labelName: string
deleted: boolean
}
}
export const confluenceDeleteLabelTool: ToolConfig<
ConfluenceDeleteLabelParams,
ConfluenceDeleteLabelResponse
> = {
id: 'confluence_delete_label',
name: 'Confluence Delete Label',
description: 'Remove a label from a Confluence page.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
pageId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'Confluence page ID to remove the label from',
},
labelName: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'Name of the label to remove',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: () => '/api/tools/confluence/labels',
method: 'DELETE',
headers: (params: ConfluenceDeleteLabelParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
body: (params: ConfluenceDeleteLabelParams) => ({
domain: params.domain,
accessToken: params.accessToken,
pageId: params.pageId?.trim(),
labelName: params.labelName?.trim(),
cloudId: params.cloudId,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
pageId: data.pageId ?? '',
labelName: data.labelName ?? '',
deleted: true,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
pageId: {
type: 'string',
description: 'Page ID the label was removed from',
},
labelName: {
type: 'string',
description: 'Name of the removed label',
},
deleted: {
type: 'boolean',
description: 'Deletion status',
},
},
}

View File

@@ -1,105 +0,0 @@
import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceDeletePagePropertyParams {
accessToken: string
domain: string
pageId: string
propertyId: string
cloudId?: string
}
export interface ConfluenceDeletePagePropertyResponse {
success: boolean
output: {
ts: string
pageId: string
propertyId: string
deleted: boolean
}
}
export const confluenceDeletePagePropertyTool: ToolConfig<
ConfluenceDeletePagePropertyParams,
ConfluenceDeletePagePropertyResponse
> = {
id: 'confluence_delete_page_property',
name: 'Confluence Delete Page Property',
description: 'Delete a content property from a Confluence page by its property ID.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
pageId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the page containing the property',
},
propertyId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the property to delete',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: () => '/api/tools/confluence/page-properties',
method: 'DELETE',
headers: (params: ConfluenceDeletePagePropertyParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
body: (params: ConfluenceDeletePagePropertyParams) => ({
domain: params.domain,
accessToken: params.accessToken,
pageId: params.pageId?.trim(),
propertyId: params.propertyId?.trim(),
cloudId: params.cloudId,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
pageId: data.pageId ?? '',
propertyId: data.propertyId ?? '',
deleted: true,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
pageId: { type: 'string', description: 'ID of the page' },
propertyId: { type: 'string', description: 'ID of the deleted property' },
deleted: { type: 'boolean', description: 'Deletion status' },
},
}

View File

@@ -1,143 +0,0 @@
import { PAGE_ITEM_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceGetPagesByLabelParams {
accessToken: string
domain: string
labelId: string
limit?: number
cursor?: string
cloudId?: string
}
export interface ConfluenceGetPagesByLabelResponse {
success: boolean
output: {
ts: string
labelId: string
pages: Array<{
id: string
title: string
status: string | null
spaceId: string | null
parentId: string | null
authorId: string | null
createdAt: string | null
version: {
number: number
message?: string
createdAt?: string
} | null
}>
nextCursor: string | null
}
}
export const confluenceGetPagesByLabelTool: ToolConfig<
ConfluenceGetPagesByLabelParams,
ConfluenceGetPagesByLabelResponse
> = {
id: 'confluence_get_pages_by_label',
name: 'Confluence Get Pages by Label',
description: 'Retrieve all pages that have a specific label applied.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
labelId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the label to get pages for',
},
limit: {
type: 'number',
required: false,
visibility: 'user-or-llm',
description: 'Maximum number of pages to return (default: 50, max: 250)',
},
cursor: {
type: 'string',
required: false,
visibility: 'user-or-llm',
description: 'Pagination cursor from previous response',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: (params: ConfluenceGetPagesByLabelParams) => {
const query = new URLSearchParams({
domain: params.domain,
accessToken: params.accessToken,
labelId: params.labelId,
limit: String(params.limit || 50),
})
if (params.cursor) {
query.set('cursor', params.cursor)
}
if (params.cloudId) {
query.set('cloudId', params.cloudId)
}
return `/api/tools/confluence/pages-by-label?${query.toString()}`
},
method: 'GET',
headers: (params: ConfluenceGetPagesByLabelParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
labelId: data.labelId ?? '',
pages: data.pages ?? [],
nextCursor: data.nextCursor ?? null,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
labelId: { type: 'string', description: 'ID of the label' },
pages: {
type: 'array',
description: 'Array of pages with this label',
items: {
type: 'object',
properties: PAGE_ITEM_PROPERTIES,
},
},
nextCursor: {
type: 'string',
description: 'Cursor for fetching the next page of results',
optional: true,
},
},
}

View File

@@ -5,14 +5,11 @@ import { confluenceCreatePageTool } from '@/tools/confluence/create_page'
import { confluenceCreatePagePropertyTool } from '@/tools/confluence/create_page_property' import { confluenceCreatePagePropertyTool } from '@/tools/confluence/create_page_property'
import { confluenceDeleteAttachmentTool } from '@/tools/confluence/delete_attachment' import { confluenceDeleteAttachmentTool } from '@/tools/confluence/delete_attachment'
import { confluenceDeleteCommentTool } from '@/tools/confluence/delete_comment' import { confluenceDeleteCommentTool } from '@/tools/confluence/delete_comment'
import { confluenceDeleteLabelTool } from '@/tools/confluence/delete_label'
import { confluenceDeletePageTool } from '@/tools/confluence/delete_page' import { confluenceDeletePageTool } from '@/tools/confluence/delete_page'
import { confluenceDeletePagePropertyTool } from '@/tools/confluence/delete_page_property'
import { confluenceGetBlogPostTool } from '@/tools/confluence/get_blogpost' import { confluenceGetBlogPostTool } from '@/tools/confluence/get_blogpost'
import { confluenceGetPageAncestorsTool } from '@/tools/confluence/get_page_ancestors' import { confluenceGetPageAncestorsTool } from '@/tools/confluence/get_page_ancestors'
import { confluenceGetPageChildrenTool } from '@/tools/confluence/get_page_children' import { confluenceGetPageChildrenTool } from '@/tools/confluence/get_page_children'
import { confluenceGetPageVersionTool } from '@/tools/confluence/get_page_version' import { confluenceGetPageVersionTool } from '@/tools/confluence/get_page_version'
import { confluenceGetPagesByLabelTool } from '@/tools/confluence/get_pages_by_label'
import { confluenceGetSpaceTool } from '@/tools/confluence/get_space' import { confluenceGetSpaceTool } from '@/tools/confluence/get_space'
import { confluenceListAttachmentsTool } from '@/tools/confluence/list_attachments' import { confluenceListAttachmentsTool } from '@/tools/confluence/list_attachments'
import { confluenceListBlogPostsTool } from '@/tools/confluence/list_blogposts' import { confluenceListBlogPostsTool } from '@/tools/confluence/list_blogposts'
@@ -22,7 +19,6 @@ import { confluenceListLabelsTool } from '@/tools/confluence/list_labels'
import { confluenceListPagePropertiesTool } from '@/tools/confluence/list_page_properties' import { confluenceListPagePropertiesTool } from '@/tools/confluence/list_page_properties'
import { confluenceListPageVersionsTool } from '@/tools/confluence/list_page_versions' import { confluenceListPageVersionsTool } from '@/tools/confluence/list_page_versions'
import { confluenceListPagesInSpaceTool } from '@/tools/confluence/list_pages_in_space' import { confluenceListPagesInSpaceTool } from '@/tools/confluence/list_pages_in_space'
import { confluenceListSpaceLabelsTool } from '@/tools/confluence/list_space_labels'
import { confluenceListSpacesTool } from '@/tools/confluence/list_spaces' import { confluenceListSpacesTool } from '@/tools/confluence/list_spaces'
import { confluenceRetrieveTool } from '@/tools/confluence/retrieve' import { confluenceRetrieveTool } from '@/tools/confluence/retrieve'
import { confluenceSearchTool } from '@/tools/confluence/search' import { confluenceSearchTool } from '@/tools/confluence/search'
@@ -82,7 +78,6 @@ export {
// Page Properties Tools // Page Properties Tools
confluenceListPagePropertiesTool, confluenceListPagePropertiesTool,
confluenceCreatePagePropertyTool, confluenceCreatePagePropertyTool,
confluenceDeletePagePropertyTool,
// Blog Post Tools // Blog Post Tools
confluenceListBlogPostsTool, confluenceListBlogPostsTool,
confluenceGetBlogPostTool, confluenceGetBlogPostTool,
@@ -103,9 +98,6 @@ export {
// Label Tools // Label Tools
confluenceListLabelsTool, confluenceListLabelsTool,
confluenceAddLabelTool, confluenceAddLabelTool,
confluenceDeleteLabelTool,
confluenceGetPagesByLabelTool,
confluenceListSpaceLabelsTool,
// Space Tools // Space Tools
confluenceGetSpaceTool, confluenceGetSpaceTool,
confluenceListSpacesTool, confluenceListSpacesTool,

View File

@@ -1,134 +0,0 @@
import { LABEL_ITEM_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceListSpaceLabelsParams {
accessToken: string
domain: string
spaceId: string
limit?: number
cursor?: string
cloudId?: string
}
export interface ConfluenceListSpaceLabelsResponse {
success: boolean
output: {
ts: string
spaceId: string
labels: Array<{
id: string
name: string
prefix: string
}>
nextCursor: string | null
}
}
export const confluenceListSpaceLabelsTool: ToolConfig<
ConfluenceListSpaceLabelsParams,
ConfluenceListSpaceLabelsResponse
> = {
id: 'confluence_list_space_labels',
name: 'Confluence List Space Labels',
description: 'List all labels associated with a Confluence space.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
spaceId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the Confluence space to list labels from',
},
limit: {
type: 'number',
required: false,
visibility: 'user-or-llm',
description: 'Maximum number of labels to return (default: 25, max: 250)',
},
cursor: {
type: 'string',
required: false,
visibility: 'user-or-llm',
description: 'Pagination cursor from previous response',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: (params: ConfluenceListSpaceLabelsParams) => {
const query = new URLSearchParams({
domain: params.domain,
accessToken: params.accessToken,
spaceId: params.spaceId,
limit: String(params.limit || 25),
})
if (params.cursor) {
query.set('cursor', params.cursor)
}
if (params.cloudId) {
query.set('cloudId', params.cloudId)
}
return `/api/tools/confluence/space-labels?${query.toString()}`
},
method: 'GET',
headers: (params: ConfluenceListSpaceLabelsParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
spaceId: data.spaceId ?? '',
labels: data.labels ?? [],
nextCursor: data.nextCursor ?? null,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
spaceId: { type: 'string', description: 'ID of the space' },
labels: {
type: 'array',
description: 'Array of labels on the space',
items: {
type: 'object',
properties: LABEL_ITEM_PROPERTIES,
},
},
nextCursor: {
type: 'string',
description: 'Cursor for fetching the next page of results',
optional: true,
},
},
}

View File

@@ -118,13 +118,10 @@ import {
confluenceCreatePageTool, confluenceCreatePageTool,
confluenceDeleteAttachmentTool, confluenceDeleteAttachmentTool,
confluenceDeleteCommentTool, confluenceDeleteCommentTool,
confluenceDeleteLabelTool,
confluenceDeletePagePropertyTool,
confluenceDeletePageTool, confluenceDeletePageTool,
confluenceGetBlogPostTool, confluenceGetBlogPostTool,
confluenceGetPageAncestorsTool, confluenceGetPageAncestorsTool,
confluenceGetPageChildrenTool, confluenceGetPageChildrenTool,
confluenceGetPagesByLabelTool,
confluenceGetPageVersionTool, confluenceGetPageVersionTool,
confluenceGetSpaceTool, confluenceGetSpaceTool,
confluenceListAttachmentsTool, confluenceListAttachmentsTool,
@@ -135,7 +132,6 @@ import {
confluenceListPagePropertiesTool, confluenceListPagePropertiesTool,
confluenceListPagesInSpaceTool, confluenceListPagesInSpaceTool,
confluenceListPageVersionsTool, confluenceListPageVersionsTool,
confluenceListSpaceLabelsTool,
confluenceListSpacesTool, confluenceListSpacesTool,
confluenceRetrieveTool, confluenceRetrieveTool,
confluenceSearchInSpaceTool, confluenceSearchInSpaceTool,
@@ -2671,10 +2667,6 @@ export const tools: Record<string, ToolConfig> = {
confluence_delete_attachment: confluenceDeleteAttachmentTool, confluence_delete_attachment: confluenceDeleteAttachmentTool,
confluence_list_labels: confluenceListLabelsTool, confluence_list_labels: confluenceListLabelsTool,
confluence_add_label: confluenceAddLabelTool, confluence_add_label: confluenceAddLabelTool,
confluence_get_pages_by_label: confluenceGetPagesByLabelTool,
confluence_list_space_labels: confluenceListSpaceLabelsTool,
confluence_delete_label: confluenceDeleteLabelTool,
confluence_delete_page_property: confluenceDeletePagePropertyTool,
confluence_get_space: confluenceGetSpaceTool, confluence_get_space: confluenceGetSpaceTool,
confluence_list_spaces: confluenceListSpacesTool, confluence_list_spaces: confluenceListSpacesTool,
cursor_list_agents: cursorListAgentsTool, cursor_list_agents: cursorListAgentsTool,