Compare commits

..

8 Commits

Author SHA1 Message Date
Siddharth Ganesan
c22bd2caaa SSE interface 2026-02-11 18:22:26 -08:00
Siddharth Ganesan
462aa15341 Implement basic tooling for workflow.apply 2026-02-11 16:30:11 -08:00
Vikhyath Mondreti
52aff4d60b fix build 2026-02-11 15:33:22 -08:00
Waleed
3a3bddd6f8 fix(confl): use recommended query param pattern for confluence route (#3202)
* fix(confl): use recommended query param pattern for confluence route

* use unused var
2026-02-11 14:59:26 -08:00
Waleed
639d50d6b9 feat(confluence): added list space labels, delete label, delete page prop (#3201) 2026-02-11 14:40:31 -08:00
Waleed
cec74e09c2 fix(variables): fix tag dropdown and cursor alignment in variables block (#3199) 2026-02-11 14:40:31 -08:00
Waleed
d5a756c9f2 fix(hotkeys): remove C, T, E tab-switching hotkeys (#3197) 2026-02-11 13:24:00 -08:00
Waleed
f3e994baf0 improvement(oom): increase trigger machine size (#3196) 2026-02-11 13:11:28 -08:00
49 changed files with 4064 additions and 920 deletions

View File

@@ -41,9 +41,6 @@ Diese Tastenkombinationen wechseln zwischen den Panel-Tabs auf der rechten Seite
| Tastenkombination | Aktion |
|----------|--------|
| `C` | Copilot-Tab fokussieren |
| `T` | Toolbar-Tab fokussieren |
| `E` | Editor-Tab fokussieren |
| `Mod` + `F` | Toolbar-Suche fokussieren |
## Globale Navigation

View File

@@ -43,9 +43,6 @@ These shortcuts switch between panel tabs on the right side of the canvas.
| Shortcut | Action |
|----------|--------|
| `C` | Focus Copilot tab |
| `T` | Focus Toolbar tab |
| `E` | Focus Editor tab |
| `Mod` + `F` | Focus Toolbar search |
## Global Navigation

View File

@@ -399,6 +399,28 @@ Create a new custom property (metadata) on a Confluence page.
| ↳ `authorId` | string | Account ID of the version author |
| ↳ `createdAt` | string | ISO 8601 timestamp of version creation |
### `confluence_delete_page_property`
Delete a content property from a Confluence page by its property ID.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `pageId` | string | Yes | The ID of the page containing the property |
| `propertyId` | string | Yes | The ID of the property to delete |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `pageId` | string | ID of the page |
| `propertyId` | string | ID of the deleted property |
| `deleted` | boolean | Deletion status |
### `confluence_search`
Search for content across Confluence pages, blog posts, and other content.
@@ -872,6 +894,90 @@ Add a label to a Confluence page for organization and categorization.
| `labelName` | string | Name of the added label |
| `labelId` | string | ID of the added label |
### `confluence_delete_label`
Remove a label from a Confluence page.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `pageId` | string | Yes | Confluence page ID to remove the label from |
| `labelName` | string | Yes | Name of the label to remove |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `pageId` | string | Page ID the label was removed from |
| `labelName` | string | Name of the removed label |
| `deleted` | boolean | Deletion status |
### `confluence_get_pages_by_label`
Retrieve all pages that have a specific label applied.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `labelId` | string | Yes | The ID of the label to get pages for |
| `limit` | number | No | Maximum number of pages to return \(default: 50, max: 250\) |
| `cursor` | string | No | Pagination cursor from previous response |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `labelId` | string | ID of the label |
| `pages` | array | Array of pages with this label |
| ↳ `id` | string | Unique page identifier |
| ↳ `title` | string | Page title |
| ↳ `status` | string | Page status \(e.g., current, archived, trashed, draft\) |
| ↳ `spaceId` | string | ID of the space containing the page |
| ↳ `parentId` | string | ID of the parent page \(null if top-level\) |
| ↳ `authorId` | string | Account ID of the page author |
| ↳ `createdAt` | string | ISO 8601 timestamp when the page was created |
| ↳ `version` | object | Page version information |
| ↳ `number` | number | Version number |
| ↳ `message` | string | Version message |
| ↳ `minorEdit` | boolean | Whether this is a minor edit |
| ↳ `authorId` | string | Account ID of the version author |
| ↳ `createdAt` | string | ISO 8601 timestamp of version creation |
| `nextCursor` | string | Cursor for fetching the next page of results |
### `confluence_list_space_labels`
List all labels associated with a Confluence space.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `spaceId` | string | Yes | The ID of the Confluence space to list labels from |
| `limit` | number | No | Maximum number of labels to return \(default: 25, max: 250\) |
| `cursor` | string | No | Pagination cursor from previous response |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `spaceId` | string | ID of the space |
| `labels` | array | Array of labels on the space |
| ↳ `id` | string | Unique label identifier |
| ↳ `name` | string | Label name |
| ↳ `prefix` | string | Label prefix/type \(e.g., global, my, team\) |
| `nextCursor` | string | Cursor for fetching the next page of results |
### `confluence_get_space`
Get details about a specific Confluence space.

View File

@@ -42,9 +42,6 @@ Estos atajos cambian entre las pestañas del panel en el lado derecho del lienzo
| Atajo | Acción |
|----------|--------|
| `C` | Enfocar pestaña Copilot |
| `T` | Enfocar pestaña Barra de herramientas |
| `E` | Enfocar pestaña Editor |
| `Mod` + `F` | Enfocar búsqueda de Barra de herramientas |
## Navegación global

View File

@@ -42,9 +42,6 @@ Ces raccourcis permettent de basculer entre les onglets du panneau sur le côté
| Raccourci | Action |
|----------|--------|
| `C` | Activer l'onglet Copilot |
| `T` | Activer l'onglet Barre d'outils |
| `E` | Activer l'onglet Éditeur |
| `Mod` + `F` | Activer la recherche dans la barre d'outils |
## Navigation globale

View File

@@ -41,9 +41,6 @@ import { Callout } from 'fumadocs-ui/components/callout'
| ショートカット | 操作 |
|----------|--------|
| `C` | Copilotタブにフォーカス |
| `T` | Toolbarタブにフォーカス |
| `E` | Editorタブにフォーカス |
| `Mod` + `F` | Toolbar検索にフォーカス |
## グローバルナビゲーション

View File

@@ -41,9 +41,6 @@ import { Callout } from 'fumadocs-ui/components/callout'
| 快捷键 | 操作 |
|----------|--------|
| `C` | 聚焦 Copilot 标签页 |
| `T` | 聚焦 Toolbar 标签页 |
| `E` | 聚焦 Editor 标签页 |
| `Mod` + `F` | 聚焦 Toolbar 搜索 |
## 全局导航

View File

@@ -1,145 +1,81 @@
import { db } from '@sim/db'
import { settings } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotAutoAllowedToolsAPI')
/**
* GET - Fetch user's auto-allowed integration tools
*/
export async function GET() {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const [userSettings] = await db
.select()
.from(settings)
.where(eq(settings.userId, userId))
.limit(1)
if (userSettings) {
const autoAllowedTools = (userSettings.copilotAutoAllowedTools as string[]) || []
return NextResponse.json({ autoAllowedTools })
}
await db.insert(settings).values({
id: userId,
userId,
copilotAutoAllowedTools: [],
})
return NextResponse.json({ autoAllowedTools: [] })
} catch (error) {
logger.error('Failed to fetch auto-allowed tools', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
function copilotHeaders(): HeadersInit {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (env.COPILOT_API_KEY) {
headers['x-api-key'] = env.COPILOT_API_KEY
}
return headers
}
/**
* POST - Add a tool to the auto-allowed list
*/
export async function POST(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const body = await request.json()
if (!body.toolId || typeof body.toolId !== 'string') {
return NextResponse.json({ error: 'toolId must be a string' }, { status: 400 })
}
const toolId = body.toolId
const [existing] = await db.select().from(settings).where(eq(settings.userId, userId)).limit(1)
if (existing) {
const currentTools = (existing.copilotAutoAllowedTools as string[]) || []
if (!currentTools.includes(toolId)) {
const updatedTools = [...currentTools, toolId]
await db
.update(settings)
.set({
copilotAutoAllowedTools: updatedTools,
updatedAt: new Date(),
})
.where(eq(settings.userId, userId))
logger.info('Added tool to auto-allowed list', { userId, toolId })
return NextResponse.json({ success: true, autoAllowedTools: updatedTools })
}
return NextResponse.json({ success: true, autoAllowedTools: currentTools })
}
await db.insert(settings).values({
id: userId,
userId,
copilotAutoAllowedTools: [toolId],
})
logger.info('Created settings and added tool to auto-allowed list', { userId, toolId })
return NextResponse.json({ success: true, autoAllowedTools: [toolId] })
} catch (error) {
logger.error('Failed to add auto-allowed tool', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}
/**
* DELETE - Remove a tool from the auto-allowed list
*/
export async function DELETE(request: NextRequest) {
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const toolIdFromQuery = new URL(request.url).searchParams.get('toolId') || undefined
const toolIdFromBody = await request
.json()
.then((body) => (typeof body?.toolId === 'string' ? body.toolId : undefined))
.catch(() => undefined)
const toolId = toolIdFromBody || toolIdFromQuery
if (!toolId) {
return NextResponse.json({ error: 'toolId is required' }, { status: 400 })
}
try {
const session = await getSession()
const res = await fetch(`${SIM_AGENT_API_URL}/api/tool-preferences/auto-allowed`, {
method: 'DELETE',
headers: copilotHeaders(),
body: JSON.stringify({
userId,
toolId,
}),
})
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
const payload = await res.json().catch(() => ({}))
if (!res.ok) {
logger.warn('Failed to remove auto-allowed tool via copilot backend', {
status: res.status,
userId,
toolId,
})
return NextResponse.json(
{
success: false,
error: payload?.error || 'Failed to remove auto-allowed tool',
autoAllowedTools: [],
},
{ status: res.status }
)
}
const userId = session.user.id
const { searchParams } = new URL(request.url)
const toolId = searchParams.get('toolId')
if (!toolId) {
return NextResponse.json({ error: 'toolId query parameter is required' }, { status: 400 })
}
const [existing] = await db.select().from(settings).where(eq(settings.userId, userId)).limit(1)
if (existing) {
const currentTools = (existing.copilotAutoAllowedTools as string[]) || []
const updatedTools = currentTools.filter((t) => t !== toolId)
await db
.update(settings)
.set({
copilotAutoAllowedTools: updatedTools,
updatedAt: new Date(),
})
.where(eq(settings.userId, userId))
logger.info('Removed tool from auto-allowed list', { userId, toolId })
return NextResponse.json({ success: true, autoAllowedTools: updatedTools })
}
return NextResponse.json({ success: true, autoAllowedTools: [] })
return NextResponse.json({
success: true,
autoAllowedTools: Array.isArray(payload?.autoAllowedTools) ? payload.autoAllowedTools : [],
})
} catch (error) {
logger.error('Failed to remove auto-allowed tool', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
logger.error('Error removing auto-allowed tool', {
userId,
toolId,
error: error instanceof Error ? error.message : String(error),
})
return NextResponse.json(
{
success: false,
error: 'Failed to remove auto-allowed tool',
autoAllowedTools: [],
},
{ status: 500 }
)
}
}

View File

@@ -113,6 +113,7 @@ const ChatMessageSchema = z.object({
workflowId: z.string().optional(),
knowledgeId: z.string().optional(),
blockId: z.string().optional(),
blockIds: z.array(z.string()).optional(),
templateId: z.string().optional(),
executionId: z.string().optional(),
// For workflow_block, provide both workflowId and blockId
@@ -159,6 +160,20 @@ export async function POST(req: NextRequest) {
commands,
} = ChatMessageSchema.parse(body)
const normalizedContexts = Array.isArray(contexts)
? contexts.map((ctx) => {
if (ctx.kind !== 'blocks') return ctx
if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx
if (ctx.blockId) {
return {
...ctx,
blockIds: [ctx.blockId],
}
}
return ctx
})
: contexts
// Resolve workflowId - if not provided, use first workflow or find by name
const resolved = await resolveWorkflowIdForUser(
authenticatedUserId,
@@ -176,10 +191,10 @@ export async function POST(req: NextRequest) {
const userMessageIdToUse = userMessageId || crypto.randomUUID()
try {
logger.info(`[${tracker.requestId}] Received chat POST`, {
hasContexts: Array.isArray(contexts),
contextsCount: Array.isArray(contexts) ? contexts.length : 0,
contextsPreview: Array.isArray(contexts)
? contexts.map((c: any) => ({
hasContexts: Array.isArray(normalizedContexts),
contextsCount: Array.isArray(normalizedContexts) ? normalizedContexts.length : 0,
contextsPreview: Array.isArray(normalizedContexts)
? normalizedContexts.map((c: any) => ({
kind: c?.kind,
chatId: c?.chatId,
workflowId: c?.workflowId,
@@ -191,17 +206,25 @@ export async function POST(req: NextRequest) {
} catch {}
// Preprocess contexts server-side
let agentContexts: Array<{ type: string; content: string }> = []
if (Array.isArray(contexts) && contexts.length > 0) {
if (Array.isArray(normalizedContexts) && normalizedContexts.length > 0) {
try {
const { processContextsServer } = await import('@/lib/copilot/process-contents')
const processed = await processContextsServer(contexts as any, authenticatedUserId, message)
const processed = await processContextsServer(
normalizedContexts as any,
authenticatedUserId,
message
)
agentContexts = processed
logger.info(`[${tracker.requestId}] Contexts processed for request`, {
processedCount: agentContexts.length,
kinds: agentContexts.map((c) => c.type),
lengthPreview: agentContexts.map((c) => c.content?.length ?? 0),
})
if (Array.isArray(contexts) && contexts.length > 0 && agentContexts.length === 0) {
if (
Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 &&
agentContexts.length === 0
) {
logger.warn(
`[${tracker.requestId}] Contexts provided but none processed. Check executionId for logs contexts.`
)
@@ -246,11 +269,13 @@ export async function POST(req: NextRequest) {
mode,
model: selectedModel,
provider,
conversationId: effectiveConversationId,
conversationHistory,
contexts: agentContexts,
fileAttachments,
commands,
chatId: actualChatId,
prefetch,
implicitFeedback,
},
{
@@ -432,10 +457,15 @@ export async function POST(req: NextRequest) {
content: message,
timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(Array.isArray(contexts) && contexts.length > 0 && { contexts }),
...(Array.isArray(contexts) &&
contexts.length > 0 && {
contentBlocks: [{ type: 'contexts', contexts: contexts as any, timestamp: Date.now() }],
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contexts: normalizedContexts,
}),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contentBlocks: [
{ type: 'contexts', contexts: normalizedContexts as any, timestamp: Date.now() },
],
}),
}

View File

@@ -1,7 +1,11 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { REDIS_TOOL_CALL_PREFIX, REDIS_TOOL_CALL_TTL_SECONDS } from '@/lib/copilot/constants'
import {
REDIS_TOOL_CALL_PREFIX,
REDIS_TOOL_CALL_TTL_SECONDS,
SIM_AGENT_API_URL,
} from '@/lib/copilot/constants'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
@@ -10,6 +14,7 @@ import {
createUnauthorizedResponse,
type NotificationStatus,
} from '@/lib/copilot/request-helpers'
import { env } from '@/lib/core/config/env'
import { getRedisClient } from '@/lib/core/config/redis'
const logger = createLogger('CopilotConfirmAPI')
@@ -21,6 +26,8 @@ const ConfirmationSchema = z.object({
errorMap: () => ({ message: 'Invalid notification status' }),
}),
message: z.string().optional(), // Optional message for background moves or additional context
toolName: z.string().optional(),
remember: z.boolean().optional(),
})
/**
@@ -57,6 +64,44 @@ async function updateToolCallStatus(
}
}
async function saveAutoAllowedToolPreference(userId: string, toolName: string): Promise<boolean> {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (env.COPILOT_API_KEY) {
headers['x-api-key'] = env.COPILOT_API_KEY
}
try {
const response = await fetch(`${SIM_AGENT_API_URL}/api/tool-preferences/auto-allowed`, {
method: 'POST',
headers,
body: JSON.stringify({
userId,
toolId: toolName,
}),
})
if (!response.ok) {
logger.warn('Failed to persist auto-allowed tool preference', {
userId,
toolName,
status: response.status,
})
return false
}
return true
} catch (error) {
logger.error('Error persisting auto-allowed tool preference', {
userId,
toolName,
error: error instanceof Error ? error.message : String(error),
})
return false
}
}
/**
* POST /api/copilot/confirm
* Update tool call status (Accept/Reject)
@@ -74,7 +119,7 @@ export async function POST(req: NextRequest) {
}
const body = await req.json()
const { toolCallId, status, message } = ConfirmationSchema.parse(body)
const { toolCallId, status, message, toolName, remember } = ConfirmationSchema.parse(body)
// Update the tool call status in Redis
const updated = await updateToolCallStatus(toolCallId, status, message)
@@ -90,14 +135,22 @@ export async function POST(req: NextRequest) {
return createBadRequestResponse('Failed to update tool call status or tool call not found')
}
const duration = tracker.getDuration()
let rememberSaved = false
if (status === 'accepted' && remember === true && toolName && authenticatedUserId) {
rememberSaved = await saveAutoAllowedToolPreference(authenticatedUserId, toolName)
}
return NextResponse.json({
const response: Record<string, unknown> = {
success: true,
message: message || `Tool call ${toolCallId} has been ${status.toLowerCase()}`,
toolCallId,
status,
})
}
if (remember === true) {
response.rememberSaved = rememberSaved
}
return NextResponse.json(response)
} catch (error) {
const duration = tracker.getDuration()

View File

@@ -191,3 +191,84 @@ export async function GET(request: NextRequest) {
)
}
}
// Delete a label from a page
export async function DELETE(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const {
domain,
accessToken,
cloudId: providedCloudId,
pageId,
labelName,
} = await request.json()
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!pageId) {
return NextResponse.json({ error: 'Page ID is required' }, { status: 400 })
}
if (!labelName) {
return NextResponse.json({ error: 'Label name is required' }, { status: 400 })
}
const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255)
if (!pageIdValidation.isValid) {
return NextResponse.json({ error: pageIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const encodedLabel = encodeURIComponent(labelName.trim())
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/rest/api/content/${pageId}/label?name=${encodedLabel}`
const response = await fetch(url, {
method: 'DELETE',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage =
errorData?.message || `Failed to delete Confluence label (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
return NextResponse.json({
pageId,
labelName,
deleted: true,
})
} catch (error) {
logger.error('Error deleting Confluence label:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,103 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation'
import { getConfluenceCloudId } from '@/tools/confluence/utils'
const logger = createLogger('ConfluencePagesByLabelAPI')
export const dynamic = 'force-dynamic'
export async function GET(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const { searchParams } = new URL(request.url)
const domain = searchParams.get('domain')
const accessToken = searchParams.get('accessToken')
const labelId = searchParams.get('labelId')
const providedCloudId = searchParams.get('cloudId')
const limit = searchParams.get('limit') || '50'
const cursor = searchParams.get('cursor')
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!labelId) {
return NextResponse.json({ error: 'Label ID is required' }, { status: 400 })
}
const labelIdValidation = validateAlphanumericId(labelId, 'labelId', 255)
if (!labelIdValidation.isValid) {
return NextResponse.json({ error: labelIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const queryParams = new URLSearchParams()
queryParams.append('limit', String(Math.min(Number(limit), 250)))
if (cursor) {
queryParams.append('cursor', cursor)
}
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/labels/${labelId}/pages?${queryParams.toString()}`
const response = await fetch(url, {
method: 'GET',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage = errorData?.message || `Failed to get pages by label (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
const data = await response.json()
const pages = (data.results || []).map((page: any) => ({
id: page.id,
title: page.title,
status: page.status ?? null,
spaceId: page.spaceId ?? null,
parentId: page.parentId ?? null,
authorId: page.authorId ?? null,
createdAt: page.createdAt ?? null,
version: page.version ?? null,
}))
return NextResponse.json({
pages,
labelId,
nextCursor: data._links?.next
? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor')
: null,
})
} catch (error) {
logger.error('Error getting pages by label:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,98 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation'
import { getConfluenceCloudId } from '@/tools/confluence/utils'
const logger = createLogger('ConfluenceSpaceLabelsAPI')
export const dynamic = 'force-dynamic'
export async function GET(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const { searchParams } = new URL(request.url)
const domain = searchParams.get('domain')
const accessToken = searchParams.get('accessToken')
const spaceId = searchParams.get('spaceId')
const providedCloudId = searchParams.get('cloudId')
const limit = searchParams.get('limit') || '25'
const cursor = searchParams.get('cursor')
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!spaceId) {
return NextResponse.json({ error: 'Space ID is required' }, { status: 400 })
}
const spaceIdValidation = validateAlphanumericId(spaceId, 'spaceId', 255)
if (!spaceIdValidation.isValid) {
return NextResponse.json({ error: spaceIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const queryParams = new URLSearchParams()
queryParams.append('limit', String(Math.min(Number(limit), 250)))
if (cursor) {
queryParams.append('cursor', cursor)
}
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/spaces/${spaceId}/labels?${queryParams.toString()}`
const response = await fetch(url, {
method: 'GET',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage = errorData?.message || `Failed to list space labels (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
const data = await response.json()
const labels = (data.results || []).map((label: any) => ({
id: label.id,
name: label.name,
prefix: label.prefix || 'global',
}))
return NextResponse.json({
labels,
spaceId,
nextCursor: data._links?.next
? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor')
: null,
})
} catch (error) {
logger.error('Error listing space labels:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -13,9 +13,6 @@ export type CommandId =
| 'goto-logs'
| 'open-search'
| 'run-workflow'
| 'focus-copilot-tab'
| 'focus-toolbar-tab'
| 'focus-editor-tab'
| 'clear-terminal-console'
| 'focus-toolbar-search'
| 'clear-notifications'
@@ -75,21 +72,6 @@ export const COMMAND_DEFINITIONS: Record<CommandId, CommandDefinition> = {
shortcut: 'Mod+Enter',
allowInEditable: false,
},
'focus-copilot-tab': {
id: 'focus-copilot-tab',
shortcut: 'C',
allowInEditable: false,
},
'focus-toolbar-tab': {
id: 'focus-toolbar-tab',
shortcut: 'T',
allowInEditable: false,
},
'focus-editor-tab': {
id: 'focus-editor-tab',
shortcut: 'E',
allowInEditable: false,
},
'clear-terminal-console': {
id: 'clear-terminal-console',
shortcut: 'Mod+D',

View File

@@ -14,6 +14,15 @@ const logger = createLogger('DiffControls')
const NOTIFICATION_WIDTH = 240
const NOTIFICATION_GAP = 16
function isWorkflowEditToolCall(name?: string, params?: Record<string, unknown>): boolean {
if (name === 'edit_workflow') return true
if (name !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
export const DiffControls = memo(function DiffControls() {
const isTerminalResizing = useTerminalStore((state) => state.isResizing)
const isPanelResizing = usePanelStore((state) => state.isResizing)
@@ -64,7 +73,7 @@ export const DiffControls = memo(function DiffControls() {
const b = blocks[bi]
if (b?.type === 'tool_call') {
const tn = b.toolCall?.name
if (tn === 'edit_workflow') {
if (isWorkflowEditToolCall(tn, b.toolCall?.params)) {
id = b.toolCall?.id
break outer
}
@@ -72,7 +81,9 @@ export const DiffControls = memo(function DiffControls() {
}
}
if (!id) {
const candidates = Object.values(toolCallsById).filter((t) => t.name === 'edit_workflow')
const candidates = Object.values(toolCallsById).filter((t) =>
isWorkflowEditToolCall(t.name, t.params)
)
id = candidates.length ? candidates[candidates.length - 1].id : undefined
}
if (id) updatePreviewToolCallState('accepted', id)
@@ -102,7 +113,7 @@ export const DiffControls = memo(function DiffControls() {
const b = blocks[bi]
if (b?.type === 'tool_call') {
const tn = b.toolCall?.name
if (tn === 'edit_workflow') {
if (isWorkflowEditToolCall(tn, b.toolCall?.params)) {
id = b.toolCall?.id
break outer
}
@@ -110,7 +121,9 @@ export const DiffControls = memo(function DiffControls() {
}
}
if (!id) {
const candidates = Object.values(toolCallsById).filter((t) => t.name === 'edit_workflow')
const candidates = Object.values(toolCallsById).filter((t) =>
isWorkflowEditToolCall(t.name, t.params)
)
id = candidates.length ? candidates[candidates.length - 1].id : undefined
}
if (id) updatePreviewToolCallState('rejected', id)

View File

@@ -47,6 +47,28 @@ interface ParsedTags {
cleanContent: string
}
function getToolCallParams(toolCall?: CopilotToolCall): Record<string, unknown> {
const candidate = ((toolCall as any)?.parameters ||
(toolCall as any)?.input ||
(toolCall as any)?.params ||
{}) as Record<string, unknown>
return candidate && typeof candidate === 'object' ? candidate : {}
}
function isWorkflowChangeApplyMode(toolCall?: CopilotToolCall): boolean {
if (!toolCall || toolCall.name !== 'workflow_change') return false
const params = getToolCallParams(toolCall)
const mode = typeof params.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params.proposalId === 'string' && params.proposalId.length > 0
}
function isWorkflowEditSummaryTool(toolCall?: CopilotToolCall): boolean {
if (!toolCall) return false
if (toolCall.name === 'edit_workflow') return true
return isWorkflowChangeApplyMode(toolCall)
}
/**
* Extracts plan steps from plan_respond tool calls in subagent blocks.
* @param blocks - The subagent content blocks to search
@@ -871,7 +893,10 @@ const SubagentContentRenderer = memo(function SubagentContentRenderer({
)
}
if (segment.type === 'tool' && segment.block.toolCall) {
if (toolCall.name === 'edit' && segment.block.toolCall.name === 'edit_workflow') {
if (
(toolCall.name === 'edit' || toolCall.name === 'build') &&
isWorkflowEditSummaryTool(segment.block.toolCall)
) {
return (
<div key={`tool-${segment.block.toolCall.id || index}`}>
<WorkflowEditSummary toolCall={segment.block.toolCall} />
@@ -968,12 +993,11 @@ const WorkflowEditSummary = memo(function WorkflowEditSummary({
}
}, [blocks])
if (toolCall.name !== 'edit_workflow') {
if (!isWorkflowEditSummaryTool(toolCall)) {
return null
}
const params =
(toolCall as any).parameters || (toolCall as any).input || (toolCall as any).params || {}
const params = getToolCallParams(toolCall)
let operations = Array.isArray(params.operations) ? params.operations : []
if (operations.length === 0 && Array.isArray((toolCall as any).operations)) {
@@ -1219,11 +1243,6 @@ const WorkflowEditSummary = memo(function WorkflowEditSummary({
)
})
/** Checks if a tool is server-side executed (not a client tool) */
function isIntegrationTool(toolName: string): boolean {
return !TOOL_DISPLAY_REGISTRY[toolName]
}
function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
if (!toolCall.name || toolCall.name === 'unknown_tool') {
return false
@@ -1233,59 +1252,96 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
return false
}
// Never show buttons for tools the user has marked as always-allowed
if (useCopilotStore.getState().isToolAutoAllowed(toolCall.name)) {
if (toolCall.ui?.showInterrupt !== true) {
return false
}
const hasInterrupt = !!TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.interrupt
if (hasInterrupt) {
return true
}
// Integration tools (user-installed) always require approval
if (isIntegrationTool(toolCall.name)) {
return true
}
return false
return true
}
const toolCallLogger = createLogger('CopilotToolCall')
async function sendToolDecision(
toolCallId: string,
status: 'accepted' | 'rejected' | 'background'
status: 'accepted' | 'rejected' | 'background',
options?: {
toolName?: string
remember?: boolean
}
) {
try {
await fetch('/api/copilot/confirm', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ toolCallId, status }),
body: JSON.stringify({
toolCallId,
status,
...(options?.toolName ? { toolName: options.toolName } : {}),
...(options?.remember ? { remember: true } : {}),
}),
})
} catch (error) {
toolCallLogger.warn('Failed to send tool decision', {
toolCallId,
status,
remember: options?.remember === true,
toolName: options?.toolName,
error: error instanceof Error ? error.message : String(error),
})
}
}
async function removeAutoAllowedToolPreference(toolName: string): Promise<boolean> {
try {
const response = await fetch(`/api/copilot/auto-allowed-tools?toolId=${encodeURIComponent(toolName)}`, {
method: 'DELETE',
})
return response.ok
} catch (error) {
toolCallLogger.warn('Failed to remove auto-allowed tool preference', {
toolName,
error: error instanceof Error ? error.message : String(error),
})
return false
}
}
type ToolUiAction = NonNullable<NonNullable<CopilotToolCall['ui']>['actions']>[number]
function actionDecision(action: ToolUiAction): 'accepted' | 'rejected' | 'background' {
const id = action.id.toLowerCase()
if (id.includes('background')) return 'background'
if (action.kind === 'reject') return 'rejected'
return 'accepted'
}
function isClientRunCapability(toolCall: CopilotToolCall): boolean {
if (toolCall.execution?.target === 'sim_client_capability') {
return toolCall.execution.capabilityId === 'workflow.run' || !toolCall.execution.capabilityId
}
return CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)
}
async function handleRun(
toolCall: CopilotToolCall,
setToolCallState: any,
onStateChange?: any,
editedParams?: any
editedParams?: any,
options?: {
remember?: boolean
}
) {
setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined)
onStateChange?.('executing')
await sendToolDecision(toolCall.id, 'accepted')
await sendToolDecision(toolCall.id, 'accepted', {
toolName: toolCall.name,
remember: options?.remember === true,
})
// Client-executable run tools: execute on the client for real-time feedback
// (block pulsing, console logs, stop button). The server defers execution
// for these tools; the client reports back via mark-complete.
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)) {
if (isClientRunCapability(toolCall)) {
const params = editedParams || toolCall.params || {}
executeRunToolOnClient(toolCall.id, toolCall.name, params)
}
@@ -1298,6 +1354,9 @@ async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onSt
}
function getDisplayName(toolCall: CopilotToolCall): string {
if (toolCall.ui?.phaseLabel) return toolCall.ui.phaseLabel
if (toolCall.ui?.title) return `${getStateVerb(toolCall.state)} ${toolCall.ui.title}`
const fromStore = (toolCall as any).display?.text
if (fromStore) return fromStore
const registryEntry = TOOL_DISPLAY_REGISTRY[toolCall.name]
@@ -1342,53 +1401,37 @@ function RunSkipButtons({
toolCall,
onStateChange,
editedParams,
actions,
}: {
toolCall: CopilotToolCall
onStateChange?: (state: any) => void
editedParams?: any
actions: ToolUiAction[]
}) {
const [isProcessing, setIsProcessing] = useState(false)
const [buttonsHidden, setButtonsHidden] = useState(false)
const actionInProgressRef = useRef(false)
const { setToolCallState, addAutoAllowedTool } = useCopilotStore()
const { setToolCallState } = useCopilotStore()
const onRun = async () => {
const onAction = async (action: ToolUiAction) => {
// Prevent race condition - check ref synchronously
if (actionInProgressRef.current) return
actionInProgressRef.current = true
setIsProcessing(true)
setButtonsHidden(true)
try {
await handleRun(toolCall, setToolCallState, onStateChange, editedParams)
} finally {
setIsProcessing(false)
actionInProgressRef.current = false
}
}
const onAlwaysAllow = async () => {
// Prevent race condition - check ref synchronously
if (actionInProgressRef.current) return
actionInProgressRef.current = true
setIsProcessing(true)
setButtonsHidden(true)
try {
await addAutoAllowedTool(toolCall.name)
await handleRun(toolCall, setToolCallState, onStateChange, editedParams)
} finally {
setIsProcessing(false)
actionInProgressRef.current = false
}
}
const onSkip = async () => {
// Prevent race condition - check ref synchronously
if (actionInProgressRef.current) return
actionInProgressRef.current = true
setIsProcessing(true)
setButtonsHidden(true)
try {
await handleSkip(toolCall, setToolCallState, onStateChange)
const decision = actionDecision(action)
if (decision === 'accepted') {
await handleRun(toolCall, setToolCallState, onStateChange, editedParams, {
remember: action.remember === true,
})
} else if (decision === 'rejected') {
await handleSkip(toolCall, setToolCallState, onStateChange)
} else {
setToolCallState(toolCall, ClientToolCallState.background)
onStateChange?.('background')
await sendToolDecision(toolCall.id, 'background')
}
} finally {
setIsProcessing(false)
actionInProgressRef.current = false
@@ -1397,23 +1440,22 @@ function RunSkipButtons({
if (buttonsHidden) return null
// Show "Always Allow" for all tools that require confirmation
const showAlwaysAllow = true
// Standardized buttons for all interrupt tools: Allow, Always Allow, Skip
return (
<div className='mt-[10px] flex gap-[6px]'>
<Button onClick={onRun} disabled={isProcessing} variant='tertiary'>
{isProcessing ? 'Allowing...' : 'Allow'}
</Button>
{showAlwaysAllow && (
<Button onClick={onAlwaysAllow} disabled={isProcessing} variant='default'>
{isProcessing ? 'Allowing...' : 'Always Allow'}
</Button>
)}
<Button onClick={onSkip} disabled={isProcessing} variant='default'>
Skip
</Button>
{actions.map((action, index) => {
const variant =
action.kind === 'reject' ? 'default' : action.remember ? 'default' : 'tertiary'
return (
<Button
key={action.id}
onClick={() => onAction(action)}
disabled={isProcessing}
variant={variant}
>
{isProcessing && index === 0 ? 'Working...' : action.label}
</Button>
)
})}
</div>
)
}
@@ -1430,10 +1472,16 @@ export function ToolCall({
const liveToolCall = useCopilotStore((s) =>
effectiveId ? s.toolCallsById[effectiveId] : undefined
)
const toolCall = liveToolCall || toolCallProp
// Guard: nothing to render without a toolCall
if (!toolCall) return null
const rawToolCall = liveToolCall || toolCallProp
const hasRealToolCall = !!rawToolCall
const toolCall: CopilotToolCall =
rawToolCall ||
({
id: effectiveId || '',
name: '',
state: ClientToolCallState.generating,
params: {},
} as CopilotToolCall)
const isExpandablePending =
toolCall?.state === 'pending' &&
@@ -1441,17 +1489,15 @@ export function ToolCall({
const [expanded, setExpanded] = useState(isExpandablePending)
const [showRemoveAutoAllow, setShowRemoveAutoAllow] = useState(false)
const [autoAllowRemovedForCall, setAutoAllowRemovedForCall] = useState(false)
// State for editable parameters
const params = (toolCall as any).parameters || (toolCall as any).input || toolCall.params || {}
const [editedParams, setEditedParams] = useState(params)
const paramsRef = useRef(params)
// Check if this integration tool is auto-allowed
const { removeAutoAllowedTool, setToolCallState } = useCopilotStore()
const isAutoAllowed = useCopilotStore(
(s) => isIntegrationTool(toolCall.name) && s.isToolAutoAllowed(toolCall.name)
)
const { setToolCallState } = useCopilotStore()
const isAutoAllowed = toolCall.ui?.autoAllowed === true && !autoAllowRemovedForCall
// Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change)
useEffect(() => {
@@ -1461,6 +1507,14 @@ export function ToolCall({
}
}, [params])
useEffect(() => {
setAutoAllowRemovedForCall(false)
setShowRemoveAutoAllow(false)
}, [toolCall.id])
// Guard: nothing to render without a toolCall
if (!hasRealToolCall) return null
// Skip rendering some internal tools
if (
toolCall.name === 'checkoff_todo' ||
@@ -1472,7 +1526,9 @@ export function ToolCall({
return null
// Special rendering for subagent tools - show as thinking text with tool calls at top level
const isSubagentTool = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.subagent === true
const isSubagentTool =
toolCall.execution?.target === 'go_subagent' ||
TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.subagent === true
// For ALL subagent tools, don't show anything until we have blocks with content
if (isSubagentTool) {
@@ -1499,28 +1555,6 @@ export function ToolCall({
)
}
// Get current mode from store to determine if we should render integration tools
const mode = useCopilotStore.getState().mode
// Check if this is a completed/historical tool call (not pending/executing)
// Use string comparison to handle both enum values and string values from DB
const stateStr = String(toolCall.state)
const isCompletedToolCall =
stateStr === 'success' ||
stateStr === 'error' ||
stateStr === 'rejected' ||
stateStr === 'aborted'
// Allow rendering if:
// 1. Tool is in TOOL_DISPLAY_REGISTRY (client tools), OR
// 2. We're in build mode (integration tools are executed server-side), OR
// 3. Tool call is already completed (historical - should always render)
const isClientTool = !!TOOL_DISPLAY_REGISTRY[toolCall.name]
const isIntegrationToolInBuildMode = mode === 'build' && !isClientTool
if (!isClientTool && !isIntegrationToolInBuildMode && !isCompletedToolCall) {
return null
}
const toolUIConfig = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig
// Check if tool has params table config (meaning it's expandable)
const hasParamsTable = !!toolUIConfig?.paramsTable
@@ -1530,6 +1564,14 @@ export function ToolCall({
toolCall.name === 'make_api_request' ||
toolCall.name === 'set_global_workflow_variables'
const interruptActions =
(toolCall.ui?.actions && toolCall.ui.actions.length > 0
? toolCall.ui.actions
: [
{ id: 'allow_once', label: 'Allow', kind: 'accept' as const },
{ id: 'allow_always', label: 'Always Allow', kind: 'accept' as const, remember: true },
{ id: 'reject', label: 'Skip', kind: 'reject' as const },
]) as ToolUiAction[]
const showButtons = isCurrentMessage && shouldShowRunSkipButtons(toolCall)
// Check UI config for secondary action - only show for current message tool calls
@@ -1987,9 +2029,12 @@ export function ToolCall({
<div className='mt-[10px]'>
<Button
onClick={async () => {
await removeAutoAllowedTool(toolCall.name)
setShowRemoveAutoAllow(false)
forceUpdate({})
const removed = await removeAutoAllowedToolPreference(toolCall.name)
if (removed) {
setAutoAllowRemovedForCall(true)
setShowRemoveAutoAllow(false)
forceUpdate({})
}
}}
variant='default'
className='text-xs'
@@ -2003,6 +2048,7 @@ export function ToolCall({
toolCall={toolCall}
onStateChange={handleStateChange}
editedParams={editedParams}
actions={interruptActions}
/>
)}
{/* Render subagent content as thinking text */}
@@ -2048,9 +2094,12 @@ export function ToolCall({
<div className='mt-[10px]'>
<Button
onClick={async () => {
await removeAutoAllowedTool(toolCall.name)
setShowRemoveAutoAllow(false)
forceUpdate({})
const removed = await removeAutoAllowedToolPreference(toolCall.name)
if (removed) {
setAutoAllowRemovedForCall(true)
setShowRemoveAutoAllow(false)
forceUpdate({})
}
}}
variant='default'
className='text-xs'
@@ -2064,6 +2113,7 @@ export function ToolCall({
toolCall={toolCall}
onStateChange={handleStateChange}
editedParams={editedParams}
actions={interruptActions}
/>
)}
{/* Render subagent content as thinking text */}
@@ -2087,7 +2137,7 @@ export function ToolCall({
}
}
const isEditWorkflow = toolCall.name === 'edit_workflow'
const isEditWorkflow = isWorkflowEditSummaryTool(toolCall)
const shouldShowDetails = isRunWorkflow || (isExpandableTool && expanded)
const hasOperations = Array.isArray(params.operations) && params.operations.length > 0
const hideTextForEditWorkflow = isEditWorkflow && hasOperations
@@ -2109,9 +2159,12 @@ export function ToolCall({
<div className='mt-[10px]'>
<Button
onClick={async () => {
await removeAutoAllowedTool(toolCall.name)
setShowRemoveAutoAllow(false)
forceUpdate({})
const removed = await removeAutoAllowedToolPreference(toolCall.name)
if (removed) {
setAutoAllowRemovedForCall(true)
setShowRemoveAutoAllow(false)
forceUpdate({})
}
}}
variant='default'
className='text-xs'
@@ -2125,6 +2178,7 @@ export function ToolCall({
toolCall={toolCall}
onStateChange={handleStateChange}
editedParams={editedParams}
actions={interruptActions}
/>
) : showMoveToBackground ? (
<div className='mt-[10px]'>
@@ -2155,7 +2209,7 @@ export function ToolCall({
</Button>
</div>
) : null}
{/* Workflow edit summary - shows block changes after edit_workflow completes */}
{/* Workflow edit summary - shows block changes after edit_workflow/workflow_change(apply) */}
<WorkflowEditSummary toolCall={toolCall} />
{/* Render subagent content as thinking text */}

View File

@@ -113,7 +113,6 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
clearPlanArtifact,
savePlanArtifact,
loadAvailableModels,
loadAutoAllowedTools,
resumeActiveStream,
} = useCopilotStore()
@@ -125,8 +124,6 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
setCopilotWorkflowId,
loadChats,
loadAvailableModels,
loadAutoAllowedTools,
currentChat,
isSendingMessage,
resumeActiveStream,
})

View File

@@ -12,8 +12,6 @@ interface UseCopilotInitializationProps {
setCopilotWorkflowId: (workflowId: string | null) => Promise<void>
loadChats: (forceRefresh?: boolean) => Promise<void>
loadAvailableModels: () => Promise<void>
loadAutoAllowedTools: () => Promise<void>
currentChat: any
isSendingMessage: boolean
resumeActiveStream: () => Promise<boolean>
}
@@ -32,8 +30,6 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
setCopilotWorkflowId,
loadChats,
loadAvailableModels,
loadAutoAllowedTools,
currentChat,
isSendingMessage,
resumeActiveStream,
} = props
@@ -120,17 +116,6 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
})
}, [isSendingMessage, resumeActiveStream])
/** Load auto-allowed tools once on mount - runs immediately, independent of workflow */
const hasLoadedAutoAllowedToolsRef = useRef(false)
useEffect(() => {
if (!hasLoadedAutoAllowedToolsRef.current) {
hasLoadedAutoAllowedToolsRef.current = true
loadAutoAllowedTools().catch((err) => {
logger.warn('[Copilot] Failed to load auto-allowed tools', err)
})
}
}, [loadAutoAllowedTools])
/** Load available models once on mount */
const hasLoadedModelsRef = useRef(false)
useEffect(() => {

View File

@@ -340,13 +340,7 @@ export const Panel = memo(function Panel() {
* Register global keyboard shortcuts using the central commands registry.
*
* - Mod+Enter: Run / cancel workflow (matches the Run button behavior)
* - C: Focus Copilot tab
* - T: Focus Toolbar tab
* - E: Focus Editor tab
* - Mod+F: Focus Toolbar tab and search input
*
* The tab-switching commands are disabled inside editable elements so typing
* in inputs or textareas is not interrupted.
*/
useRegisterGlobalCommands(() =>
createCommands([
@@ -363,33 +357,6 @@ export const Panel = memo(function Panel() {
allowInEditable: false,
},
},
{
id: 'focus-copilot-tab',
handler: () => {
setActiveTab('copilot')
},
overrides: {
allowInEditable: false,
},
},
{
id: 'focus-toolbar-tab',
handler: () => {
setActiveTab('toolbar')
},
overrides: {
allowInEditable: false,
},
},
{
id: 'focus-editor-tab',
handler: () => {
setActiveTab('editor')
},
overrides: {
allowInEditable: false,
},
},
{
id: 'focus-toolbar-search',
handler: () => {

View File

@@ -589,6 +589,7 @@ export async function executeScheduleJob(payload: ScheduleExecutionPayload) {
export const scheduleExecution = task({
id: 'schedule-execution',
machine: 'medium-1x',
retry: {
maxAttempts: 1,
},

View File

@@ -669,6 +669,7 @@ async function executeWebhookJobInternal(
export const webhookExecution = task({
id: 'webhook-execution',
machine: 'medium-1x',
retry: {
maxAttempts: 1,
},

View File

@@ -197,5 +197,6 @@ export async function executeWorkflowJob(payload: WorkflowExecutionPayload) {
export const workflowExecutionTask = task({
id: 'workflow-execution',
machine: 'medium-1x',
run: executeWorkflowJob,
})

View File

@@ -394,6 +394,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Page Property Operations
{ label: 'List Page Properties', id: 'list_page_properties' },
{ label: 'Create Page Property', id: 'create_page_property' },
{ label: 'Delete Page Property', id: 'delete_page_property' },
// Search Operations
{ label: 'Search Content', id: 'search' },
{ label: 'Search in Space', id: 'search_in_space' },
@@ -414,6 +415,9 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Operations
{ label: 'List Labels', id: 'list_labels' },
{ label: 'Add Label', id: 'add_label' },
{ label: 'Delete Label', id: 'delete_label' },
{ label: 'Get Pages by Label', id: 'get_pages_by_label' },
{ label: 'List Space Labels', id: 'list_space_labels' },
// Space Operations
{ label: 'Get Space', id: 'get_space' },
{ label: 'List Spaces', id: 'list_spaces' },
@@ -485,6 +489,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space',
'get_space',
'list_spaces',
'get_pages_by_label',
'list_space_labels',
],
not: true,
},
@@ -500,6 +506,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_labels',
'upload_attachment',
'add_label',
'delete_label',
'delete_page_property',
'get_page_children',
'get_page_ancestors',
'list_page_versions',
@@ -527,6 +535,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space',
'get_space',
'list_spaces',
'get_pages_by_label',
'list_space_labels',
],
not: true,
},
@@ -542,6 +552,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_labels',
'upload_attachment',
'add_label',
'delete_label',
'delete_page_property',
'get_page_children',
'get_page_ancestors',
'list_page_versions',
@@ -566,6 +578,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space',
'create_blogpost',
'list_blogposts_in_space',
'list_space_labels',
],
},
},
@@ -601,6 +614,14 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
required: true,
condition: { field: 'operation', value: 'create_page_property' },
},
{
id: 'propertyId',
title: 'Property ID',
type: 'short-input',
placeholder: 'Enter property ID to delete',
required: true,
condition: { field: 'operation', value: 'delete_page_property' },
},
{
id: 'title',
title: 'Title',
@@ -694,7 +715,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
type: 'short-input',
placeholder: 'Enter label name',
required: true,
condition: { field: 'operation', value: 'add_label' },
condition: { field: 'operation', value: ['add_label', 'delete_label'] },
},
{
id: 'labelPrefix',
@@ -709,6 +730,14 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
value: () => 'global',
condition: { field: 'operation', value: 'add_label' },
},
{
id: 'labelId',
title: 'Label ID',
type: 'short-input',
placeholder: 'Enter label ID',
required: true,
condition: { field: 'operation', value: 'get_pages_by_label' },
},
{
id: 'blogPostStatus',
title: 'Status',
@@ -759,6 +788,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_page_versions',
'list_page_properties',
'list_labels',
'get_pages_by_label',
'list_space_labels',
],
},
},
@@ -780,6 +811,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_page_versions',
'list_page_properties',
'list_labels',
'get_pages_by_label',
'list_space_labels',
],
},
},
@@ -800,6 +833,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Property Tools
'confluence_list_page_properties',
'confluence_create_page_property',
'confluence_delete_page_property',
// Search Tools
'confluence_search',
'confluence_search_in_space',
@@ -820,6 +854,9 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Tools
'confluence_list_labels',
'confluence_add_label',
'confluence_delete_label',
'confluence_get_pages_by_label',
'confluence_list_space_labels',
// Space Tools
'confluence_get_space',
'confluence_list_spaces',
@@ -852,6 +889,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
return 'confluence_list_page_properties'
case 'create_page_property':
return 'confluence_create_page_property'
case 'delete_page_property':
return 'confluence_delete_page_property'
// Search Operations
case 'search':
return 'confluence_search'
@@ -887,6 +926,12 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
return 'confluence_list_labels'
case 'add_label':
return 'confluence_add_label'
case 'delete_label':
return 'confluence_delete_label'
case 'get_pages_by_label':
return 'confluence_get_pages_by_label'
case 'list_space_labels':
return 'confluence_list_space_labels'
// Space Operations
case 'get_space':
return 'confluence_get_space'
@@ -908,7 +953,9 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
versionNumber,
propertyKey,
propertyValue,
propertyId,
labelPrefix,
labelId,
blogPostStatus,
purge,
bodyFormat,
@@ -959,7 +1006,9 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
}
}
// Operations that support cursor pagination
// Operations that support generic cursor pagination.
// get_pages_by_label and list_space_labels have dedicated handlers
// below that pass cursor along with their required params (labelId, spaceId).
const supportsCursor = [
'list_attachments',
'list_spaces',
@@ -996,6 +1045,35 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
}
}
if (operation === 'delete_page_property') {
return {
credential,
pageId: effectivePageId,
operation,
propertyId,
...rest,
}
}
if (operation === 'get_pages_by_label') {
return {
credential,
operation,
labelId,
cursor: cursor || undefined,
...rest,
}
}
if (operation === 'list_space_labels') {
return {
credential,
operation,
cursor: cursor || undefined,
...rest,
}
}
if (operation === 'upload_attachment') {
const normalizedFile = normalizeFileInput(attachmentFile, { single: true })
if (!normalizedFile) {
@@ -1044,7 +1122,9 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
attachmentFileName: { type: 'string', description: 'Custom file name for attachment' },
attachmentComment: { type: 'string', description: 'Comment for the attachment' },
labelName: { type: 'string', description: 'Label name' },
labelId: { type: 'string', description: 'Label identifier' },
labelPrefix: { type: 'string', description: 'Label prefix (global, my, team, system)' },
propertyId: { type: 'string', description: 'Property identifier' },
blogPostStatus: { type: 'string', description: 'Blog post status (current or draft)' },
purge: { type: 'boolean', description: 'Permanently delete instead of moving to trash' },
bodyFormat: { type: 'string', description: 'Body format for comments' },
@@ -1080,6 +1160,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Results
labels: { type: 'array', description: 'List of labels' },
labelName: { type: 'string', description: 'Label name' },
labelId: { type: 'string', description: 'Label identifier' },
// Space Results
spaces: { type: 'array', description: 'List of spaces' },
spaceId: { type: 'string', description: 'Space identifier' },

View File

@@ -1,90 +0,0 @@
---
slug: workflow-bench
title: 'Introducing Workflow Bench - Benchmarking Natural Language Workflow Building'
description: 'How we built a benchmark to measure how well AI models translate natural language instructions into executable workflows, and what we learned along the way'
date: 2026-02-11
updated: 2026-02-11
authors:
- sid
readingTime: 10
tags: [Benchmark, Evaluation, Workflows, Natural Language]
ogImage: /studio/workflow-bench/cover.png
ogAlt: 'Workflow Bench benchmark overview'
about: ['Benchmarking', 'Workflow Building', 'Natural Language']
timeRequired: PT10M
canonical: https://sim.ai/studio/workflow-bench
featured: false
draft: true
---
Building workflows from natural language sounds straightforward until you try to measure it. When a user says "send me a Slack message every morning with a summary of my unread emails," how do you evaluate whether the resulting workflow is correct? Is partial credit fair? What about workflows that are functionally equivalent but structurally different?
We built Workflow Bench to answer these questions. This post covers why we needed a dedicated benchmark, how we designed it, and what the results tell us about the current state of natural language workflow building.
## Why a Workflow Benchmark?
<!-- TODO: Motivation for building Workflow Bench -->
<!-- - Gap in existing benchmarks (code gen benchmarks don't capture workflow semantics) -->
<!-- - Need to track progress as we iterate on the copilot / natural language builder -->
<!-- - Workflows are structured artifacts, not just code — they have topology, block types, connections, configs -->
## What We're Measuring
<!-- TODO: Define the core evaluation dimensions -->
<!-- - Structural correctness (right blocks, right connections) -->
<!-- - Configuration accuracy (correct params, API mappings) -->
<!-- - Functional equivalence (does it do the same thing even if shaped differently?) -->
<!-- - Edge cases: loops, conditionals, parallel branches, error handling -->
## Benchmark Design
<!-- TODO: How the benchmark dataset is constructed -->
<!-- - Task categories and complexity tiers -->
<!-- - How ground truth workflows are defined -->
<!-- - Natural language prompt variations (terse vs. detailed, ambiguous vs. precise) -->
### Task Categories
<!-- TODO: Break down the types of workflows in the benchmark -->
<!-- - Simple linear (A → B → C) -->
<!-- - Branching / conditional -->
<!-- - Looping / iterative -->
<!-- - Parallel fan-out / fan-in -->
<!-- - Multi-trigger -->
### Scoring
<!-- TODO: Explain the scoring methodology -->
<!-- - How partial credit works -->
<!-- - Structural similarity metrics -->
<!-- - Config-level accuracy -->
<!-- - Overall composite score -->
## Evaluation Pipeline
<!-- TODO: How we run the benchmark end to end -->
<!-- - Prompt → model → workflow JSON → evaluator → score -->
<!-- - Automation and reproducibility -->
<!-- - How we handle non-determinism across runs -->
## Results
<!-- TODO: Present the benchmark results -->
<!-- - Model comparisons -->
<!-- - Performance by task category -->
<!-- - Where models struggle most -->
<!-- - Trends over time as we iterate -->
## What We Learned
<!-- TODO: Key takeaways from running the benchmark -->
<!-- - Surprising strengths and weaknesses -->
<!-- - How benchmark results influenced product decisions -->
<!-- - Common failure modes -->
## What's Next
<!-- TODO: Future directions -->
<!-- - Expanding the benchmark (more tasks, more complexity) -->
<!-- - Community contributions / open-sourcing -->
<!-- - Using the benchmark to guide copilot improvements -->

View File

@@ -20,6 +20,8 @@ export interface BuildPayloadParams {
fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }>
commands?: string[]
chatId?: string
conversationId?: string
prefetch?: boolean
implicitFeedback?: string
}
@@ -64,6 +66,10 @@ export async function buildCopilotRequestPayload(
fileAttachments,
commands,
chatId,
conversationId,
prefetch,
conversationHistory,
implicitFeedback,
} = params
const selectedModel = options.selectedModel
@@ -154,6 +160,12 @@ export async function buildCopilotRequestPayload(
version: SIM_AGENT_VERSION,
...(contexts && contexts.length > 0 ? { context: contexts } : {}),
...(chatId ? { chatId } : {}),
...(conversationId ? { conversationId } : {}),
...(Array.isArray(conversationHistory) && conversationHistory.length > 0
? { conversationHistory }
: {}),
...(typeof prefetch === 'boolean' ? { prefetch } : {}),
...(implicitFeedback ? { implicitFeedback } : {}),
...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}),
...(integrationTools.length > 0 ? { integrationTools } : {}),
...(credentials ? { credentials } : {}),

View File

@@ -1,5 +1,5 @@
import { createLogger } from '@sim/logger'
import { COPILOT_CONFIRM_API_PATH, STREAM_STORAGE_KEY } from '@/lib/copilot/constants'
import { STREAM_STORAGE_KEY } from '@/lib/copilot/constants'
import { asRecord } from '@/lib/copilot/orchestrator/sse-utils'
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
import {
@@ -26,21 +26,119 @@ const MAX_BATCH_INTERVAL = 50
const MIN_BATCH_INTERVAL = 16
const MAX_QUEUE_SIZE = 5
/**
* Send an auto-accept confirmation to the server for auto-allowed tools.
* The server-side orchestrator polls Redis for this decision.
*/
export function sendAutoAcceptConfirmation(toolCallId: string): void {
fetch(COPILOT_CONFIRM_API_PATH, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ toolCallId, status: 'accepted' }),
}).catch((error) => {
logger.warn('Failed to send auto-accept confirmation', {
toolCallId,
error: error instanceof Error ? error.message : String(error),
})
})
function isWorkflowEditToolCall(toolName?: string, params?: Record<string, unknown>): boolean {
if (toolName === 'edit_workflow') return true
if (toolName !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
function isClientRunCapability(toolCall: CopilotToolCall): boolean {
if (toolCall.execution?.target === 'sim_client_capability') {
return toolCall.execution.capabilityId === 'workflow.run' || !toolCall.execution.capabilityId
}
return CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)
}
function mapServerStateToClientState(state: unknown): ClientToolCallState {
switch (String(state || '')) {
case 'generating':
return ClientToolCallState.generating
case 'pending':
case 'awaiting_approval':
return ClientToolCallState.pending
case 'executing':
return ClientToolCallState.executing
case 'success':
return ClientToolCallState.success
case 'rejected':
case 'skipped':
return ClientToolCallState.rejected
case 'aborted':
return ClientToolCallState.aborted
case 'error':
case 'failed':
return ClientToolCallState.error
default:
return ClientToolCallState.pending
}
}
function extractToolUiMetadata(data: Record<string, unknown>): CopilotToolCall['ui'] | undefined {
const ui = asRecord(data.ui)
if (!ui || Object.keys(ui).length === 0) return undefined
const autoAllowedFromUi = ui.autoAllowed === true
const autoAllowedFromData = data.autoAllowed === true
return {
title: typeof ui.title === 'string' ? ui.title : undefined,
phaseLabel: typeof ui.phaseLabel === 'string' ? ui.phaseLabel : undefined,
icon: typeof ui.icon === 'string' ? ui.icon : undefined,
showInterrupt: ui.showInterrupt === true,
showRemember: ui.showRemember === true,
autoAllowed: autoAllowedFromUi || autoAllowedFromData,
actions: Array.isArray(ui.actions)
? ui.actions
.map((action) => {
const a = asRecord(action)
const id = typeof a.id === 'string' ? a.id : undefined
const label = typeof a.label === 'string' ? a.label : undefined
const kind: 'accept' | 'reject' = a.kind === 'reject' ? 'reject' : 'accept'
if (!id || !label) return null
return {
id,
label,
kind,
remember: a.remember === true,
}
})
.filter((a): a is NonNullable<typeof a> => !!a)
: undefined,
}
}
function extractToolExecutionMetadata(
data: Record<string, unknown>
): CopilotToolCall['execution'] | undefined {
const execution = asRecord(data.execution)
if (!execution || Object.keys(execution).length === 0) return undefined
return {
target: typeof execution.target === 'string' ? execution.target : undefined,
capabilityId: typeof execution.capabilityId === 'string' ? execution.capabilityId : undefined,
}
}
function isWorkflowChangeApplyCall(toolName?: string, params?: Record<string, unknown>): boolean {
if (toolName !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
function extractWorkflowStateFromResultPayload(
resultPayload: Record<string, unknown>
): WorkflowState | null {
const directState = asRecord(resultPayload.workflowState)
if (directState) return directState as unknown as WorkflowState
const editResult = asRecord(resultPayload.editResult)
const nestedState = asRecord(editResult?.workflowState)
if (nestedState) return nestedState as unknown as WorkflowState
return null
}
function extractOperationListFromResultPayload(
resultPayload: Record<string, unknown>
): Array<Record<string, unknown>> | undefined {
const operations = resultPayload.operations
if (Array.isArray(operations)) return operations as Array<Record<string, unknown>>
const compiled = resultPayload.compiledOperations
if (Array.isArray(compiled)) return compiled as Array<Record<string, unknown>>
return undefined
}
function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void {
@@ -244,14 +342,28 @@ export const sseHandlers: Record<string, SSEHandler> = {
try {
const eventData = asRecord(data?.data)
const toolCallId: string | undefined =
data?.toolCallId || (eventData.id as string | undefined)
data?.toolCallId ||
(eventData.id as string | undefined) ||
(eventData.callId as string | undefined)
const success: boolean | undefined = data?.success
const failedDependency: boolean = data?.failedDependency === true
const resultObj = asRecord(data?.result)
const skipped: boolean = resultObj.skipped === true
if (!toolCallId) return
const uiMetadata = extractToolUiMetadata(eventData)
const executionMetadata = extractToolExecutionMetadata(eventData)
const serverState = (eventData.state as string | undefined) || undefined
const targetState = serverState
? mapServerStateToClientState(serverState)
: success
? ClientToolCallState.success
: failedDependency || skipped
? ClientToolCallState.rejected
: ClientToolCallState.error
const resultPayload = asRecord(data?.result || eventData.result || eventData.data || data?.data)
const { toolCallsById } = get()
const current = toolCallsById[toolCallId]
let paramsForCurrentToolCall: Record<string, unknown> | undefined = current?.params
if (current) {
if (
isRejectedState(current.state) ||
@@ -260,16 +372,32 @@ export const sseHandlers: Record<string, SSEHandler> = {
) {
return
}
const targetState = success
? ClientToolCallState.success
: failedDependency || skipped
? ClientToolCallState.rejected
: ClientToolCallState.error
if (
targetState === ClientToolCallState.success &&
isWorkflowChangeApplyCall(current.name, paramsForCurrentToolCall)
) {
const operations = extractOperationListFromResultPayload(resultPayload || {})
if (operations && operations.length > 0) {
paramsForCurrentToolCall = {
...(current.params || {}),
operations,
}
}
}
const updatedMap = { ...toolCallsById }
updatedMap[toolCallId] = {
...current,
ui: uiMetadata || current.ui,
execution: executionMetadata || current.execution,
params: paramsForCurrentToolCall,
state: targetState,
display: resolveToolDisplay(current.name, targetState, current.id, current.params),
display: resolveToolDisplay(
current.name,
targetState,
current.id,
paramsForCurrentToolCall
),
}
set({ toolCallsById: updatedMap })
@@ -312,31 +440,39 @@ export const sseHandlers: Record<string, SSEHandler> = {
}
}
if (current.name === 'edit_workflow') {
if (
targetState === ClientToolCallState.success &&
isWorkflowEditToolCall(current.name, paramsForCurrentToolCall)
) {
try {
const resultPayload = asRecord(
data?.result || eventData.result || eventData.data || data?.data
)
const workflowState = asRecord(resultPayload?.workflowState)
const hasWorkflowState = !!resultPayload?.workflowState
logger.info('[SSE] edit_workflow result received', {
const workflowState = resultPayload
? extractWorkflowStateFromResultPayload(resultPayload)
: null
const hasWorkflowState = !!workflowState
logger.info('[SSE] workflow edit result received', {
toolName: current.name,
hasWorkflowState,
blockCount: hasWorkflowState ? Object.keys(workflowState.blocks ?? {}).length : 0,
edgeCount: Array.isArray(workflowState.edges) ? workflowState.edges.length : 0,
blockCount: hasWorkflowState
? Object.keys((workflowState as any).blocks ?? {}).length
: 0,
edgeCount:
hasWorkflowState && Array.isArray((workflowState as any).edges)
? (workflowState as any).edges.length
: 0,
})
if (hasWorkflowState) {
if (workflowState) {
const diffStore = useWorkflowDiffStore.getState()
diffStore
.setProposedChanges(resultPayload.workflowState as WorkflowState)
.catch((err) => {
logger.error('[SSE] Failed to apply edit_workflow diff', {
error: err instanceof Error ? err.message : String(err),
})
diffStore.setProposedChanges(workflowState).catch((err) => {
logger.error('[SSE] Failed to apply workflow edit diff', {
error: err instanceof Error ? err.message : String(err),
toolName: current.name,
})
})
}
} catch (err) {
logger.error('[SSE] edit_workflow result handling failed', {
logger.error('[SSE] workflow edit result handling failed', {
error: err instanceof Error ? err.message : String(err),
toolName: current.name,
})
}
}
@@ -460,16 +596,23 @@ export const sseHandlers: Record<string, SSEHandler> = {
: failedDependency || skipped
? ClientToolCallState.rejected
: ClientToolCallState.error
const paramsForBlock =
b.toolCall?.id === toolCallId
? paramsForCurrentToolCall || b.toolCall?.params
: b.toolCall?.params
context.contentBlocks[i] = {
...b,
toolCall: {
...b.toolCall,
params: paramsForBlock,
ui: uiMetadata || b.toolCall?.ui,
execution: executionMetadata || b.toolCall?.execution,
state: targetState,
display: resolveToolDisplay(
b.toolCall?.name,
targetState,
toolCallId,
b.toolCall?.params
paramsForBlock
),
},
}
@@ -487,7 +630,9 @@ export const sseHandlers: Record<string, SSEHandler> = {
try {
const errorData = asRecord(data?.data)
const toolCallId: string | undefined =
data?.toolCallId || (errorData.id as string | undefined)
data?.toolCallId ||
(errorData.id as string | undefined) ||
(errorData.callId as string | undefined)
const failedDependency: boolean = data?.failedDependency === true
if (!toolCallId) return
const { toolCallsById } = get()
@@ -500,12 +645,18 @@ export const sseHandlers: Record<string, SSEHandler> = {
) {
return
}
const targetState = failedDependency
? ClientToolCallState.rejected
: ClientToolCallState.error
const targetState = errorData.state
? mapServerStateToClientState(errorData.state)
: failedDependency
? ClientToolCallState.rejected
: ClientToolCallState.error
const uiMetadata = extractToolUiMetadata(errorData)
const executionMetadata = extractToolExecutionMetadata(errorData)
const updatedMap = { ...toolCallsById }
updatedMap[toolCallId] = {
...current,
ui: uiMetadata || current.ui,
execution: executionMetadata || current.execution,
state: targetState,
display: resolveToolDisplay(current.name, targetState, current.id, current.params),
}
@@ -520,13 +671,19 @@ export const sseHandlers: Record<string, SSEHandler> = {
isBackgroundState(b.toolCall?.state)
)
break
const targetState = failedDependency
? ClientToolCallState.rejected
: ClientToolCallState.error
const targetState = errorData.state
? mapServerStateToClientState(errorData.state)
: failedDependency
? ClientToolCallState.rejected
: ClientToolCallState.error
const uiMetadata = extractToolUiMetadata(errorData)
const executionMetadata = extractToolExecutionMetadata(errorData)
context.contentBlocks[i] = {
...b,
toolCall: {
...b.toolCall,
ui: uiMetadata || b.toolCall?.ui,
execution: executionMetadata || b.toolCall?.execution,
state: targetState,
display: resolveToolDisplay(
b.toolCall?.name,
@@ -547,19 +704,26 @@ export const sseHandlers: Record<string, SSEHandler> = {
}
},
tool_generating: (data, context, get, set) => {
const { toolCallId, toolName } = data
const eventData = asRecord(data?.data)
const toolCallId =
data?.toolCallId ||
(eventData.id as string | undefined) ||
(eventData.callId as string | undefined)
const toolName =
data?.toolName ||
(eventData.name as string | undefined) ||
(eventData.toolName as string | undefined)
if (!toolCallId || !toolName) return
const { toolCallsById } = get()
if (!toolCallsById[toolCallId]) {
const isAutoAllowed = get().isToolAutoAllowed(toolName)
const initialState = isAutoAllowed
? ClientToolCallState.executing
: ClientToolCallState.pending
const initialState = ClientToolCallState.generating
const tc: CopilotToolCall = {
id: toolCallId,
name: toolName,
state: initialState,
ui: extractToolUiMetadata(eventData),
execution: extractToolExecutionMetadata(eventData),
display: resolveToolDisplay(toolName, initialState, toolCallId),
}
const updated = { ...toolCallsById, [toolCallId]: tc }
@@ -572,17 +736,27 @@ export const sseHandlers: Record<string, SSEHandler> = {
},
tool_call: (data, context, get, set) => {
const toolData = asRecord(data?.data)
const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
const id: string | undefined =
(toolData.id as string | undefined) ||
(toolData.callId as string | undefined) ||
data?.toolCallId
const name: string | undefined =
(toolData.name as string | undefined) ||
(toolData.toolName as string | undefined) ||
data?.toolName
if (!id) return
const args = toolData.arguments as Record<string, unknown> | undefined
const isPartial = toolData.partial === true
const uiMetadata = extractToolUiMetadata(toolData)
const executionMetadata = extractToolExecutionMetadata(toolData)
const serverState = toolData.state
const { toolCallsById } = get()
const existing = toolCallsById[id]
const toolName = name || existing?.name || 'unknown_tool'
const isAutoAllowed = get().isToolAutoAllowed(toolName)
let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending
let initialState = serverState
? mapServerStateToClientState(serverState)
: ClientToolCallState.pending
// Avoid flickering back to pending on partial/duplicate events once a tool is executing.
if (
@@ -597,6 +771,8 @@ export const sseHandlers: Record<string, SSEHandler> = {
...existing,
name: toolName,
state: initialState,
ui: uiMetadata || existing.ui,
execution: executionMetadata || existing.execution,
...(args ? { params: args } : {}),
display: resolveToolDisplay(toolName, initialState, id, args || existing.params),
}
@@ -604,6 +780,8 @@ export const sseHandlers: Record<string, SSEHandler> = {
id,
name: toolName,
state: initialState,
ui: uiMetadata,
execution: executionMetadata,
...(args ? { params: args } : {}),
display: resolveToolDisplay(toolName, initialState, id, args),
}
@@ -618,20 +796,12 @@ export const sseHandlers: Record<string, SSEHandler> = {
return
}
// Auto-allowed tools: send confirmation to the server so it can proceed
// without waiting for the user to click "Allow".
if (isAutoAllowed) {
sendAutoAcceptConfirmation(id)
}
const shouldInterrupt = next.ui?.showInterrupt === true
// Client-executable run tools: execute on the client for real-time feedback
// (block pulsing, console logs, stop button). The server defers execution
// for these tools in interactive mode; the client reports back via mark-complete.
if (
CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName) &&
initialState === ClientToolCallState.executing
) {
executeRunToolOnClient(id, toolName, args || existing?.params || {})
// Client-run capability: execution is delegated to the browser.
// We run immediately only when no interrupt is required.
if (isClientRunCapability(next) && !shouldInterrupt) {
executeRunToolOnClient(id, toolName, args || next.params || {})
}
// OAuth: dispatch event to open the OAuth connect modal

View File

@@ -9,9 +9,10 @@ import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
import { resolveToolDisplay } from '@/lib/copilot/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
import {
type SSEHandler,
sendAutoAcceptConfirmation,
sseHandlers,
updateStreamingMessage,
} from './handlers'
@@ -24,6 +25,113 @@ type StoreSet = (
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
) => void
function mapServerStateToClientState(state: unknown): ClientToolCallState {
switch (String(state || '')) {
case 'generating':
return ClientToolCallState.generating
case 'pending':
case 'awaiting_approval':
return ClientToolCallState.pending
case 'executing':
return ClientToolCallState.executing
case 'success':
return ClientToolCallState.success
case 'rejected':
case 'skipped':
return ClientToolCallState.rejected
case 'aborted':
return ClientToolCallState.aborted
case 'error':
case 'failed':
return ClientToolCallState.error
default:
return ClientToolCallState.pending
}
}
function extractToolUiMetadata(data: Record<string, unknown>): CopilotToolCall['ui'] | undefined {
const ui = asRecord(data.ui)
if (!ui || Object.keys(ui).length === 0) return undefined
const autoAllowedFromUi = ui.autoAllowed === true
const autoAllowedFromData = data.autoAllowed === true
return {
title: typeof ui.title === 'string' ? ui.title : undefined,
phaseLabel: typeof ui.phaseLabel === 'string' ? ui.phaseLabel : undefined,
icon: typeof ui.icon === 'string' ? ui.icon : undefined,
showInterrupt: ui.showInterrupt === true,
showRemember: ui.showRemember === true,
autoAllowed: autoAllowedFromUi || autoAllowedFromData,
actions: Array.isArray(ui.actions)
? ui.actions
.map((action) => {
const a = asRecord(action)
const id = typeof a.id === 'string' ? a.id : undefined
const label = typeof a.label === 'string' ? a.label : undefined
const kind: 'accept' | 'reject' = a.kind === 'reject' ? 'reject' : 'accept'
if (!id || !label) return null
return {
id,
label,
kind,
remember: a.remember === true,
}
})
.filter((a): a is NonNullable<typeof a> => !!a)
: undefined,
}
}
function extractToolExecutionMetadata(
data: Record<string, unknown>
): CopilotToolCall['execution'] | undefined {
const execution = asRecord(data.execution)
if (!execution || Object.keys(execution).length === 0) return undefined
return {
target: typeof execution.target === 'string' ? execution.target : undefined,
capabilityId: typeof execution.capabilityId === 'string' ? execution.capabilityId : undefined,
}
}
function isClientRunCapability(toolCall: CopilotToolCall): boolean {
if (toolCall.execution?.target === 'sim_client_capability') {
return toolCall.execution.capabilityId === 'workflow.run' || !toolCall.execution.capabilityId
}
return CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)
}
function isWorkflowChangeApplyCall(toolCall: CopilotToolCall): boolean {
if (toolCall.name !== 'workflow_change') return false
const params = (toolCall.params || {}) as Record<string, unknown>
const mode = typeof params.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params.proposalId === 'string' && params.proposalId.length > 0
}
function extractWorkflowStateFromResultPayload(
resultPayload: Record<string, unknown>
): WorkflowState | null {
const directState = asRecord(resultPayload.workflowState)
if (directState) return directState as unknown as WorkflowState
const editResult = asRecord(resultPayload.editResult)
const nestedState = asRecord(editResult?.workflowState)
if (nestedState) return nestedState as unknown as WorkflowState
return null
}
function extractOperationListFromResultPayload(
resultPayload: Record<string, unknown>
): Array<Record<string, unknown>> | undefined {
const operations = resultPayload.operations
if (Array.isArray(operations)) return operations as Array<Record<string, unknown>>
const compiled = resultPayload.compiledOperations
if (Array.isArray(compiled)) return compiled as Array<Record<string, unknown>>
return undefined
}
export function appendSubAgentContent(
context: ClientStreamingContext,
parentToolCallId: string,
@@ -164,6 +272,8 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
if (!id || !name) return
const isPartial = toolData.partial === true
const uiMetadata = extractToolUiMetadata(toolData)
const executionMetadata = extractToolExecutionMetadata(toolData)
let args: Record<string, unknown> | undefined = (toolData.arguments || toolData.input) as
| Record<string, unknown>
@@ -199,9 +309,10 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
const existingToolCall =
existingIndex >= 0 ? context.subAgentToolCalls[parentToolCallId][existingIndex] : undefined
// Auto-allowed tools skip pending state to avoid flashing interrupt buttons
const isAutoAllowed = get().isToolAutoAllowed(name)
let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending
const serverState = toolData.state
let initialState = serverState
? mapServerStateToClientState(serverState)
: ClientToolCallState.pending
// Avoid flickering back to pending on partial/duplicate events once a tool is executing.
if (
@@ -215,6 +326,8 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
id,
name,
state: initialState,
ui: uiMetadata,
execution: executionMetadata,
...(args ? { params: args } : {}),
display: resolveToolDisplay(name, initialState, id, args),
}
@@ -241,16 +354,11 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
return
}
// Auto-allowed tools: send confirmation to the server so it can proceed
// without waiting for the user to click "Allow".
if (isAutoAllowed) {
sendAutoAcceptConfirmation(id)
}
const shouldInterrupt = subAgentToolCall.ui?.showInterrupt === true
// Client-executable run tools: if auto-allowed, execute immediately for
// real-time feedback. For non-auto-allowed, the user must click "Allow"
// first — handleRun in tool-call.tsx triggers executeRunToolOnClient.
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(name) && isAutoAllowed) {
// Client-run capability: execution is delegated to the browser.
// Execute immediately only for non-interrupting calls.
if (isClientRunCapability(subAgentToolCall) && !shouldInterrupt) {
executeRunToolOnClient(id, name, args || {})
}
},
@@ -275,17 +383,45 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
if (!context.subAgentToolCalls[parentToolCallId]) return
if (!context.subAgentBlocks[parentToolCallId]) return
const targetState = success ? ClientToolCallState.success : ClientToolCallState.error
const serverState = resultData.state
const targetState = serverState
? mapServerStateToClientState(serverState)
: success
? ClientToolCallState.success
: ClientToolCallState.error
const uiMetadata = extractToolUiMetadata(resultData)
const executionMetadata = extractToolExecutionMetadata(resultData)
const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex(
(tc: CopilotToolCall) => tc.id === toolCallId
)
if (existingIndex >= 0) {
const existing = context.subAgentToolCalls[parentToolCallId][existingIndex]
let nextParams = existing.params
const resultPayload = asRecord(
data?.result || resultData.result || resultData.data || data?.data
)
if (
targetState === ClientToolCallState.success &&
isWorkflowChangeApplyCall(existing) &&
resultPayload
) {
const operations = extractOperationListFromResultPayload(resultPayload)
if (operations && operations.length > 0) {
nextParams = {
...(existing.params || {}),
operations,
}
}
}
const updatedSubAgentToolCall = {
...existing,
params: nextParams,
ui: uiMetadata || existing.ui,
execution: executionMetadata || existing.execution,
state: targetState,
display: resolveToolDisplay(existing.name, targetState, toolCallId, existing.params),
display: resolveToolDisplay(existing.name, targetState, toolCallId, nextParams),
}
context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall
@@ -309,6 +445,23 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
state: targetState,
})
}
if (
targetState === ClientToolCallState.success &&
resultPayload &&
isWorkflowChangeApplyCall(updatedSubAgentToolCall)
) {
const workflowState = extractWorkflowStateFromResultPayload(resultPayload)
if (workflowState) {
const diffStore = useWorkflowDiffStore.getState()
diffStore.setProposedChanges(workflowState).catch((error) => {
logger.error('[SubAgent] Failed to apply workflow_change diff', {
error: error instanceof Error ? error.message : String(error),
toolCallId,
})
})
}
}
}
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)

View File

@@ -101,9 +101,6 @@ export const COPILOT_CHECKPOINTS_API_PATH = '/api/copilot/checkpoints'
/** POST — revert to a checkpoint. */
export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert'
/** GET/POST/DELETE — manage auto-allowed tools. */
export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools'
/** GET — fetch dynamically available copilot models. */
export const COPILOT_MODELS_API_PATH = '/api/copilot/models'

View File

@@ -1,67 +0,0 @@
export const INTERRUPT_TOOL_NAMES = [
'set_global_workflow_variables',
'run_workflow',
'run_workflow_until_block',
'run_from_block',
'run_block',
'manage_mcp_tool',
'manage_custom_tool',
'deploy_mcp',
'deploy_chat',
'deploy_api',
'create_workspace_mcp_server',
'set_environment_variables',
'make_api_request',
'oauth_request_access',
'navigate_ui',
'knowledge_base',
'generate_api_key',
] as const
export const INTERRUPT_TOOL_SET = new Set<string>(INTERRUPT_TOOL_NAMES)
export const SUBAGENT_TOOL_NAMES = [
'debug',
'edit',
'build',
'plan',
'test',
'deploy',
'auth',
'research',
'knowledge',
'custom_tool',
'tour',
'info',
'workflow',
'evaluate',
'superagent',
'discovery',
] as const
export const SUBAGENT_TOOL_SET = new Set<string>(SUBAGENT_TOOL_NAMES)
/**
* Respond tools are internal to the copilot's subagent system.
* They're used by subagents to signal completion and should NOT be executed by the sim side.
* The copilot backend handles these internally.
*/
export const RESPOND_TOOL_NAMES = [
'plan_respond',
'edit_respond',
'build_respond',
'debug_respond',
'info_respond',
'research_respond',
'deploy_respond',
'superagent_respond',
'discovery_respond',
'tour_respond',
'auth_respond',
'workflow_respond',
'knowledge_respond',
'custom_tool_respond',
'test_respond',
] as const
export const RESPOND_TOOL_SET = new Set<string>(RESPOND_TOOL_NAMES)

View File

@@ -1,17 +1,12 @@
import { createLogger } from '@sim/logger'
import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants'
import { RESPOND_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config'
import {
asRecord,
getEventData,
markToolResultSeen,
wasToolResultSeen,
} from '@/lib/copilot/orchestrator/sse-utils'
import {
isIntegrationTool,
isToolAvailableOnSimSide,
markToolComplete,
} from '@/lib/copilot/orchestrator/tool-executor'
import { markToolComplete } from '@/lib/copilot/orchestrator/tool-executor'
import type {
ContentBlock,
ExecutionContext,
@@ -22,7 +17,6 @@ import type {
} from '@/lib/copilot/orchestrator/types'
import {
executeToolAndReport,
isInterruptToolName,
waitForToolCompletion,
waitForToolDecision,
} from './tool-execution'
@@ -41,6 +35,113 @@ const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([
'run_block',
])
function mapServerStateToToolStatus(state: unknown): ToolCallState['status'] {
switch (String(state || '')) {
case 'generating':
case 'pending':
case 'awaiting_approval':
return 'pending'
case 'executing':
return 'executing'
case 'success':
return 'success'
case 'rejected':
case 'skipped':
return 'rejected'
case 'aborted':
return 'skipped'
case 'error':
case 'failed':
return 'error'
default:
return 'pending'
}
}
function getExecutionTarget(
toolData: Record<string, unknown>,
toolName: string
): { target: string; capabilityId?: string } {
const execution = asRecord(toolData.execution)
if (typeof execution.target === 'string' && execution.target.length > 0) {
return {
target: execution.target,
capabilityId:
typeof execution.capabilityId === 'string' ? execution.capabilityId : undefined,
}
}
// Fallback only when metadata is missing.
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
return { target: 'sim_client_capability', capabilityId: 'workflow.run' }
}
return { target: 'sim_server' }
}
function needsApproval(toolData: Record<string, unknown>): boolean {
const ui = asRecord(toolData.ui)
return ui.showInterrupt === true
}
async function waitForClientCapabilityAndReport(
toolCall: ToolCallState,
options: OrchestratorOptions,
logScope: string
): Promise<void> {
toolCall.status = 'executing'
const completion = await waitForToolCompletion(
toolCall.id,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope} background)`, {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
if (completion?.status === 'rejected') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(toolCall.id, toolCall.name, 400, completion.message || 'Tool execution rejected')
.catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope} rejected)`, {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope})`, {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
}
// Normalization + dedupe helpers live in sse-utils to keep server/client in sync.
function inferToolSuccess(data: Record<string, unknown> | undefined): {
@@ -85,7 +186,11 @@ export const sseHandlers: Record<string, SSEHandler> = {
const { success, hasResultData, hasError } = inferToolSuccess(data)
current.status = success ? 'success' : 'error'
current.status = data?.state
? mapServerStateToToolStatus(data.state)
: success
? 'success'
: 'error'
current.endTime = Date.now()
if (hasResultData) {
current.result = {
@@ -104,7 +209,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
if (!toolCallId) return
const current = context.toolCalls.get(toolCallId)
if (!current) return
current.status = 'error'
current.status = data?.state ? mapServerStateToToolStatus(data.state) : 'error'
current.error = (data?.error as string | undefined) || 'Tool execution failed'
current.endTime = Date.now()
},
@@ -121,7 +226,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
context.toolCalls.set(toolCallId, {
id: toolCallId,
name: toolName,
status: 'pending',
status: data?.state ? mapServerStateToToolStatus(data.state) : 'pending',
startTime: Date.now(),
})
}
@@ -156,7 +261,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
context.toolCalls.set(toolCallId, {
id: toolCallId,
name: toolName,
status: 'pending',
status: toolData.state ? mapServerStateToToolStatus(toolData.state) : 'pending',
params: args,
startTime: Date.now(),
})
@@ -170,83 +275,29 @@ export const sseHandlers: Record<string, SSEHandler> = {
const toolCall = context.toolCalls.get(toolCallId)
if (!toolCall) return
// Subagent tools are executed by the copilot backend, not sim side.
if (SUBAGENT_TOOL_SET.has(toolName)) {
return
}
// Respond tools are internal to copilot's subagent system - skip execution.
// The copilot backend handles these internally to signal subagent completion.
if (RESPOND_TOOL_SET.has(toolName)) {
toolCall.status = 'success'
toolCall.endTime = Date.now()
toolCall.result = {
success: true,
output: 'Internal respond tool - handled by copilot backend',
}
return
}
const isInterruptTool = isInterruptToolName(toolName)
const execution = getExecutionTarget(toolData, toolName)
const isInteractive = options.interactive === true
// Integration tools (user-installed) also require approval in interactive mode
const needsApproval = isInterruptTool || isIntegrationTool(toolName)
const requiresApproval = isInteractive && needsApproval(toolData)
if (toolData.state) {
toolCall.status = mapServerStateToToolStatus(toolData.state)
}
if (needsApproval && isInteractive) {
if (requiresApproval) {
const decision = await waitForToolDecision(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (decision?.status === 'accepted' || decision?.status === 'success') {
// Client-executable run tools: defer execution to the browser client.
// The client calls executeWorkflowWithFullLogging for real-time feedback
// (block pulsing, logs, stop button) and reports completion via
// /api/copilot/confirm with status success/error. We poll Redis for
// that completion signal, then fire-and-forget markToolComplete to Go.
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
toolCall.status = 'executing'
const completion = await waitForToolCompletion(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (run tool background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg =
completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
// Fire-and-forget: tell Go backend the tool is done
// (must NOT await — see deadlock note in executeToolAndReport)
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (run tool)', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
if (execution.target === 'sim_client_capability' && isInteractive) {
await waitForClientCapabilityAndReport(toolCall, options, 'run tool')
return
}
await executeToolAndReport(toolCallId, context, execContext, options)
if (execution.target === 'sim_server' || execution.target === 'sim_client_capability') {
if (options.autoExecuteTools !== false) {
await executeToolAndReport(toolCallId, context, execContext, options)
}
}
return
}
@@ -308,7 +359,15 @@ export const sseHandlers: Record<string, SSEHandler> = {
return
}
if (options.autoExecuteTools !== false) {
if (execution.target === 'sim_client_capability' && isInteractive) {
await waitForClientCapabilityAndReport(toolCall, options, 'run tool')
return
}
if (
(execution.target === 'sim_server' || execution.target === 'sim_client_capability') &&
options.autoExecuteTools !== false
) {
await executeToolAndReport(toolCallId, context, execContext, options)
}
},
@@ -410,7 +469,7 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
const toolCall: ToolCallState = {
id: toolCallId,
name: toolName,
status: 'pending',
status: toolData.state ? mapServerStateToToolStatus(toolData.state) : 'pending',
params: args,
startTime: Date.now(),
}
@@ -428,37 +487,26 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
if (isPartial) return
// Respond tools are internal to copilot's subagent system - skip execution.
if (RESPOND_TOOL_SET.has(toolName)) {
toolCall.status = 'success'
toolCall.endTime = Date.now()
toolCall.result = {
success: true,
output: 'Internal respond tool - handled by copilot backend',
}
return
}
const execution = getExecutionTarget(toolData, toolName)
const isInteractive = options.interactive === true
const requiresApproval = isInteractive && needsApproval(toolData)
// Tools that only exist on the Go backend (e.g. search_patterns,
// search_errors, remember_debug) should NOT be re-executed on the Sim side.
// The Go backend already executed them and will send its own tool_result
// SSE event with the real outcome. Trying to execute them here would fail
// with "Tool not found" and incorrectly mark the tool as failed.
if (!isToolAvailableOnSimSide(toolName)) {
return
}
// Interrupt tools and integration tools (user-installed) require approval
// in interactive mode, same as top-level handler.
const needsSubagentApproval = isInterruptToolName(toolName) || isIntegrationTool(toolName)
if (options.interactive === true && needsSubagentApproval) {
if (requiresApproval) {
const decision = await waitForToolDecision(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (decision?.status === 'accepted' || decision?.status === 'success') {
await executeToolAndReport(toolCallId, context, execContext, options)
if (execution.target === 'sim_client_capability' && isInteractive) {
await waitForClientCapabilityAndReport(toolCall, options, 'subagent run tool')
return
}
if (execution.target === 'sim_server' || execution.target === 'sim_client_capability') {
if (options.autoExecuteTools !== false) {
await executeToolAndReport(toolCallId, context, execContext, options)
}
}
return
}
if (decision?.status === 'rejected' || decision?.status === 'error') {
@@ -517,66 +565,15 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
return
}
// Client-executable run tools in interactive mode: defer to client.
// Same pattern as main handler: wait for client completion, then tell Go.
if (options.interactive === true && CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
toolCall.status = 'executing'
const completion = await waitForToolCompletion(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (completion?.status === 'rejected') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
400,
completion.message || 'Tool execution rejected'
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent run tool rejected)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent run tool background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent run tool)', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
if (execution.target === 'sim_client_capability' && isInteractive) {
await waitForClientCapabilityAndReport(toolCall, options, 'subagent run tool')
return
}
if (options.autoExecuteTools !== false) {
if (
(execution.target === 'sim_server' || execution.target === 'sim_client_capability') &&
options.autoExecuteTools !== false
) {
await executeToolAndReport(toolCallId, context, execContext, options)
}
},
@@ -596,7 +593,7 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
const { success, hasResultData, hasError } = inferToolSuccess(data)
const status = success ? 'success' : 'error'
const status = data?.state ? mapServerStateToToolStatus(data.state) : success ? 'success' : 'error'
const endTime = Date.now()
const result = hasResultData ? { success, output: data?.result || data?.data } : undefined

View File

@@ -4,7 +4,6 @@ import {
TOOL_DECISION_MAX_POLL_MS,
TOOL_DECISION_POLL_BACKOFF,
} from '@/lib/copilot/constants'
import { INTERRUPT_TOOL_SET } from '@/lib/copilot/orchestrator/config'
import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence'
import {
asRecord,
@@ -21,10 +20,6 @@ import type {
const logger = createLogger('CopilotSseToolExecution')
export function isInterruptToolName(toolName: string): boolean {
return INTERRUPT_TOOL_SET.has(toolName)
}
export async function executeToolAndReport(
toolCallId: string,
context: StreamingContext,
@@ -34,9 +29,11 @@ export async function executeToolAndReport(
const toolCall = context.toolCalls.get(toolCallId)
if (!toolCall) return
if (toolCall.status === 'executing') return
const lockable = toolCall as typeof toolCall & { __simExecuting?: boolean }
if (lockable.__simExecuting) return
if (wasToolResultSeen(toolCall.id)) return
lockable.__simExecuting = true
toolCall.status = 'executing'
try {
const result = await executeToolServerSide(toolCall, execContext)
@@ -122,6 +119,8 @@ export async function executeToolAndReport(
},
}
await options?.onEvent?.(errorEvent)
} finally {
delete lockable.__simExecuting
}
}

View File

@@ -1,7 +1,7 @@
import { db } from '@sim/db'
import { workflow } from '@sim/db/schema'
import { customTools, workflow } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { and, desc, eq, isNull, or } from 'drizzle-orm'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import type {
ExecutionContext,
@@ -12,6 +12,7 @@ import { routeExecution } from '@/lib/copilot/tools/server/router'
import { env } from '@/lib/core/config/env'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { upsertCustomTools } from '@/lib/workflows/custom-tools/operations'
import { getTool, resolveToolId } from '@/tools/utils'
import {
executeCheckDeploymentStatus,
@@ -76,6 +77,247 @@ import {
const logger = createLogger('CopilotToolExecutor')
type ManageCustomToolOperation = 'add' | 'edit' | 'delete' | 'list'
interface ManageCustomToolSchema {
type: 'function'
function: {
name: string
description?: string
parameters: Record<string, unknown>
}
}
interface ManageCustomToolParams {
operation?: string
toolId?: string
schema?: ManageCustomToolSchema
code?: string
title?: string
workspaceId?: string
}
async function executeManageCustomTool(
rawParams: Record<string, unknown>,
context: ExecutionContext
): Promise<ToolCallResult> {
const params = rawParams as ManageCustomToolParams
const operation = String(params.operation || '').toLowerCase() as ManageCustomToolOperation
const workspaceId = params.workspaceId || context.workspaceId
if (!operation) {
return { success: false, error: "Missing required 'operation' argument" }
}
try {
if (operation === 'list') {
const toolsForUser = workspaceId
? await db
.select()
.from(customTools)
.where(
or(
eq(customTools.workspaceId, workspaceId),
and(isNull(customTools.workspaceId), eq(customTools.userId, context.userId))
)
)
.orderBy(desc(customTools.createdAt))
: await db
.select()
.from(customTools)
.where(and(isNull(customTools.workspaceId), eq(customTools.userId, context.userId)))
.orderBy(desc(customTools.createdAt))
return {
success: true,
output: {
success: true,
operation,
tools: toolsForUser,
count: toolsForUser.length,
},
}
}
if (operation === 'add') {
if (!workspaceId) {
return {
success: false,
error: "workspaceId is required for operation 'add'",
}
}
if (!params.schema || !params.code) {
return {
success: false,
error: "Both 'schema' and 'code' are required for operation 'add'",
}
}
const title = params.title || params.schema.function?.name
if (!title) {
return { success: false, error: "Missing tool title or schema.function.name for 'add'" }
}
const resultTools = await upsertCustomTools({
tools: [
{
title,
schema: params.schema,
code: params.code,
},
],
workspaceId,
userId: context.userId,
})
const created = resultTools.find((tool) => tool.title === title)
return {
success: true,
output: {
success: true,
operation,
toolId: created?.id,
title,
message: `Created custom tool "${title}"`,
},
}
}
if (operation === 'edit') {
if (!workspaceId) {
return {
success: false,
error: "workspaceId is required for operation 'edit'",
}
}
if (!params.toolId) {
return { success: false, error: "'toolId' is required for operation 'edit'" }
}
if (!params.schema && !params.code) {
return {
success: false,
error: "At least one of 'schema' or 'code' is required for operation 'edit'",
}
}
const workspaceTool = await db
.select()
.from(customTools)
.where(and(eq(customTools.id, params.toolId), eq(customTools.workspaceId, workspaceId)))
.limit(1)
const legacyTool =
workspaceTool.length === 0
? await db
.select()
.from(customTools)
.where(
and(
eq(customTools.id, params.toolId),
isNull(customTools.workspaceId),
eq(customTools.userId, context.userId)
)
)
.limit(1)
: []
const existing = workspaceTool[0] || legacyTool[0]
if (!existing) {
return { success: false, error: `Custom tool not found: ${params.toolId}` }
}
const mergedSchema = params.schema || (existing.schema as ManageCustomToolSchema)
const mergedCode = params.code || existing.code
const title = params.title || mergedSchema.function?.name || existing.title
await upsertCustomTools({
tools: [
{
id: params.toolId,
title,
schema: mergedSchema,
code: mergedCode,
},
],
workspaceId,
userId: context.userId,
})
return {
success: true,
output: {
success: true,
operation,
toolId: params.toolId,
title,
message: `Updated custom tool "${title}"`,
},
}
}
if (operation === 'delete') {
if (!params.toolId) {
return { success: false, error: "'toolId' is required for operation 'delete'" }
}
const workspaceDelete =
workspaceId != null
? await db
.delete(customTools)
.where(
and(eq(customTools.id, params.toolId), eq(customTools.workspaceId, workspaceId))
)
.returning({ id: customTools.id })
: []
const legacyDelete =
workspaceDelete.length === 0
? await db
.delete(customTools)
.where(
and(
eq(customTools.id, params.toolId),
isNull(customTools.workspaceId),
eq(customTools.userId, context.userId)
)
)
.returning({ id: customTools.id })
: []
const deleted = workspaceDelete[0] || legacyDelete[0]
if (!deleted) {
return { success: false, error: `Custom tool not found: ${params.toolId}` }
}
return {
success: true,
output: {
success: true,
operation,
toolId: params.toolId,
message: 'Deleted custom tool',
},
}
}
return {
success: false,
error: `Unsupported operation for manage_custom_tool: ${operation}`,
}
} catch (error) {
logger.error('manage_custom_tool execution failed', {
operation,
workspaceId,
userId: context.userId,
error: error instanceof Error ? error.message : String(error),
})
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to manage custom tool',
}
}
}
const SERVER_TOOLS = new Set<string>([
'get_blocks_and_tools',
'get_blocks_metadata',
@@ -83,6 +325,10 @@ const SERVER_TOOLS = new Set<string>([
'get_block_config',
'get_trigger_blocks',
'edit_workflow',
'workflow_context_get',
'workflow_context_expand',
'workflow_change',
'workflow_verify',
'get_workflow_console',
'search_documentation',
'search_online',
@@ -161,6 +407,19 @@ const SIM_WORKFLOW_TOOL_HANDLERS: Record<
}
}
},
oauth_request_access: async (p, _c) => {
const providerName = (p.providerName || p.provider_name || 'the provider') as string
return {
success: true,
output: {
success: true,
status: 'requested',
providerName,
message: `Requested ${providerName} OAuth connection. The user should complete the OAuth modal in the UI, then retry credential-dependent actions.`,
},
}
},
manage_custom_tool: (p, c) => executeManageCustomTool(p, c),
}
/**

View File

@@ -609,6 +609,83 @@ const META_edit_workflow: ToolMetadata = {
},
}
const META_workflow_change: ToolMetadata = {
displayNames: {
[ClientToolCallState.generating]: { text: 'Planning workflow changes', icon: Loader2 },
[ClientToolCallState.executing]: { text: 'Applying workflow changes', icon: Loader2 },
[ClientToolCallState.success]: { text: 'Updated your workflow', icon: Grid2x2Check },
[ClientToolCallState.error]: { text: 'Failed to update your workflow', icon: XCircle },
[ClientToolCallState.review]: { text: 'Review your workflow changes', icon: Grid2x2 },
[ClientToolCallState.rejected]: { text: 'Rejected workflow changes', icon: Grid2x2X },
[ClientToolCallState.aborted]: { text: 'Aborted workflow changes', icon: MinusCircle },
[ClientToolCallState.pending]: { text: 'Planning workflow changes', icon: Loader2 },
},
getDynamicText: (params, state) => {
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'dry_run') {
switch (state) {
case ClientToolCallState.success:
return 'Planned workflow changes'
case ClientToolCallState.executing:
case ClientToolCallState.generating:
case ClientToolCallState.pending:
return 'Planning workflow changes'
}
}
if (mode === 'apply' || typeof params?.proposalId === 'string') {
switch (state) {
case ClientToolCallState.success:
return 'Applied workflow changes'
case ClientToolCallState.executing:
case ClientToolCallState.generating:
case ClientToolCallState.pending:
return 'Applying workflow changes'
}
}
return undefined
},
uiConfig: {
isSpecial: true,
customRenderer: 'edit_summary',
},
}
const META_workflow_context_get: ToolMetadata = {
displayNames: {
[ClientToolCallState.generating]: { text: 'Gathering workflow context', icon: Loader2 },
[ClientToolCallState.pending]: { text: 'Gathering workflow context', icon: Loader2 },
[ClientToolCallState.executing]: { text: 'Gathering workflow context', icon: Loader2 },
[ClientToolCallState.success]: { text: 'Gathered workflow context', icon: FileText },
[ClientToolCallState.error]: { text: 'Failed to gather workflow context', icon: XCircle },
[ClientToolCallState.rejected]: { text: 'Skipped workflow context', icon: MinusCircle },
[ClientToolCallState.aborted]: { text: 'Aborted workflow context', icon: MinusCircle },
},
}
const META_workflow_context_expand: ToolMetadata = {
displayNames: {
[ClientToolCallState.generating]: { text: 'Expanding workflow schemas', icon: Loader2 },
[ClientToolCallState.pending]: { text: 'Expanding workflow schemas', icon: Loader2 },
[ClientToolCallState.executing]: { text: 'Expanding workflow schemas', icon: Loader2 },
[ClientToolCallState.success]: { text: 'Expanded workflow schemas', icon: FileText },
[ClientToolCallState.error]: { text: 'Failed to expand workflow schemas', icon: XCircle },
[ClientToolCallState.rejected]: { text: 'Skipped schema expansion', icon: MinusCircle },
[ClientToolCallState.aborted]: { text: 'Aborted schema expansion', icon: MinusCircle },
},
}
const META_workflow_verify: ToolMetadata = {
displayNames: {
[ClientToolCallState.generating]: { text: 'Verifying workflow', icon: Loader2 },
[ClientToolCallState.pending]: { text: 'Verifying workflow', icon: Loader2 },
[ClientToolCallState.executing]: { text: 'Verifying workflow', icon: Loader2 },
[ClientToolCallState.success]: { text: 'Verified workflow', icon: CheckCircle2 },
[ClientToolCallState.error]: { text: 'Workflow verification failed', icon: XCircle },
[ClientToolCallState.rejected]: { text: 'Skipped workflow verification', icon: MinusCircle },
[ClientToolCallState.aborted]: { text: 'Aborted workflow verification', icon: MinusCircle },
},
}
const META_evaluate: ToolMetadata = {
displayNames: {
[ClientToolCallState.generating]: { text: 'Evaluating', icon: Loader2 },
@@ -2542,6 +2619,10 @@ const TOOL_METADATA_BY_ID: Record<string, ToolMetadata> = {
deploy_mcp: META_deploy_mcp,
edit: META_edit,
edit_workflow: META_edit_workflow,
workflow_context_get: META_workflow_context_get,
workflow_context_expand: META_workflow_context_expand,
workflow_change: META_workflow_change,
workflow_verify: META_workflow_verify,
evaluate: META_evaluate,
get_block_config: META_get_block_config,
get_block_options: META_get_block_options,

View File

@@ -13,6 +13,12 @@ import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-cr
import { setEnvironmentVariablesServerTool } from '@/lib/copilot/tools/server/user/set-environment-variables'
import { editWorkflowServerTool } from '@/lib/copilot/tools/server/workflow/edit-workflow'
import { getWorkflowConsoleServerTool } from '@/lib/copilot/tools/server/workflow/get-workflow-console'
import { workflowChangeServerTool } from '@/lib/copilot/tools/server/workflow/workflow-change'
import {
workflowContextExpandServerTool,
workflowContextGetServerTool,
} from '@/lib/copilot/tools/server/workflow/workflow-context'
import { workflowVerifyServerTool } from '@/lib/copilot/tools/server/workflow/workflow-verify'
import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas'
export { ExecuteResponseSuccessSchema }
@@ -35,6 +41,10 @@ const serverToolRegistry: Record<string, BaseServerTool> = {
[getCredentialsServerTool.name]: getCredentialsServerTool,
[makeApiRequestServerTool.name]: makeApiRequestServerTool,
[knowledgeBaseServerTool.name]: knowledgeBaseServerTool,
[workflowContextGetServerTool.name]: workflowContextGetServerTool,
[workflowContextExpandServerTool.name]: workflowContextExpandServerTool,
[workflowChangeServerTool.name]: workflowChangeServerTool,
[workflowVerifyServerTool.name]: workflowVerifyServerTool,
}
/**

View File

@@ -0,0 +1,93 @@
import crypto from 'crypto'
type StoreEntry<T> = {
value: T
expiresAt: number
}
const DEFAULT_TTL_MS = 30 * 60 * 1000
const MAX_ENTRIES = 500
class TTLStore<T> {
private readonly data = new Map<string, StoreEntry<T>>()
constructor(private readonly ttlMs = DEFAULT_TTL_MS) {}
set(value: T): string {
this.gc()
if (this.data.size >= MAX_ENTRIES) {
const firstKey = this.data.keys().next().value as string | undefined
if (firstKey) {
this.data.delete(firstKey)
}
}
const id = crypto.randomUUID()
this.data.set(id, {
value,
expiresAt: Date.now() + this.ttlMs,
})
return id
}
get(id: string): T | null {
const entry = this.data.get(id)
if (!entry) return null
if (entry.expiresAt <= Date.now()) {
this.data.delete(id)
return null
}
return entry.value
}
private gc(): void {
const now = Date.now()
for (const [key, entry] of this.data.entries()) {
if (entry.expiresAt <= now) {
this.data.delete(key)
}
}
}
}
export type WorkflowContextPack = {
workflowId: string
snapshotHash: string
workflowState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
loops: Record<string, any>
parallels: Record<string, any>
}
schemasByType: Record<string, any>
schemaRefsByType: Record<string, string>
summary: Record<string, any>
}
export type WorkflowChangeProposal = {
workflowId: string
baseSnapshotHash: string
compiledOperations: Array<Record<string, any>>
diffSummary: Record<string, any>
warnings: string[]
diagnostics: string[]
touchedBlocks: string[]
}
const contextPackStore = new TTLStore<WorkflowContextPack>()
const proposalStore = new TTLStore<WorkflowChangeProposal>()
export function saveContextPack(pack: WorkflowContextPack): string {
return contextPackStore.set(pack)
}
export function getContextPack(id: string): WorkflowContextPack | null {
return contextPackStore.get(id)
}
export function saveProposal(proposal: WorkflowChangeProposal): string {
return proposalStore.set(proposal)
}
export function getProposal(id: string): WorkflowChangeProposal | null {
return proposalStore.get(id)
}

View File

@@ -0,0 +1,987 @@
import crypto from 'crypto'
import { createLogger } from '@sim/logger'
import { z } from 'zod'
import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool'
import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { getBlock } from '@/blocks/registry'
import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check'
import {
getContextPack,
getProposal,
saveProposal,
type WorkflowChangeProposal,
} from './change-store'
import { editWorkflowServerTool } from './edit-workflow'
import { applyOperationsToWorkflowState } from './edit-workflow/engine'
import { preValidateCredentialInputs } from './edit-workflow/validation'
import { hashWorkflowState, loadWorkflowStateFromDb } from './workflow-state'
const logger = createLogger('WorkflowChangeServerTool')
const TargetSchema = z
.object({
blockId: z.string().optional(),
alias: z.string().optional(),
match: z
.object({
type: z.string().optional(),
name: z.string().optional(),
})
.optional(),
})
.strict()
const CredentialSelectionSchema = z
.object({
strategy: z.enum(['first_connected', 'by_id', 'by_name']).optional(),
id: z.string().optional(),
name: z.string().optional(),
})
.strict()
const ChangeOperationSchema = z
.object({
op: z.enum(['set', 'unset', 'merge', 'append', 'remove', 'attach_credential']),
path: z.string().optional(),
value: z.any().optional(),
provider: z.string().optional(),
selection: CredentialSelectionSchema.optional(),
required: z.boolean().optional(),
})
.strict()
const MutationSchema = z
.object({
action: z.enum([
'ensure_block',
'patch_block',
'remove_block',
'connect',
'disconnect',
'ensure_variable',
'set_variable',
]),
target: TargetSchema.optional(),
type: z.string().optional(),
name: z.string().optional(),
inputs: z.record(z.any()).optional(),
triggerMode: z.boolean().optional(),
advancedMode: z.boolean().optional(),
enabled: z.boolean().optional(),
changes: z.array(ChangeOperationSchema).optional(),
from: TargetSchema.optional(),
to: TargetSchema.optional(),
handle: z.string().optional(),
toHandle: z.string().optional(),
mode: z.enum(['set', 'append', 'remove']).optional(),
})
.strict()
const LinkEndpointSchema = z
.object({
blockId: z.string().optional(),
alias: z.string().optional(),
match: z
.object({
type: z.string().optional(),
name: z.string().optional(),
})
.optional(),
handle: z.string().optional(),
})
.strict()
const LinkSchema = z
.object({
from: LinkEndpointSchema,
to: LinkEndpointSchema,
mode: z.enum(['set', 'append', 'remove']).optional(),
})
.strict()
const ChangeSpecSchema = z
.object({
objective: z.string().optional(),
constraints: z.record(z.any()).optional(),
resources: z.record(z.any()).optional(),
mutations: z.array(MutationSchema).optional(),
links: z.array(LinkSchema).optional(),
acceptance: z.array(z.any()).optional(),
})
.strict()
const WorkflowChangeInputSchema = z
.object({
mode: z.enum(['dry_run', 'apply']),
workflowId: z.string().optional(),
contextPackId: z.string().optional(),
proposalId: z.string().optional(),
baseSnapshotHash: z.string().optional(),
expectedSnapshotHash: z.string().optional(),
changeSpec: ChangeSpecSchema.optional(),
})
.strict()
type WorkflowChangeParams = z.input<typeof WorkflowChangeInputSchema>
type ChangeSpec = z.input<typeof ChangeSpecSchema>
type TargetRef = z.input<typeof TargetSchema>
type ChangeOperation = z.input<typeof ChangeOperationSchema>
type CredentialRecord = {
id: string
name: string
provider: string
isDefault?: boolean
}
type ConnectionTarget = {
block: string
handle?: string
}
type ConnectionState = Map<string, Map<string, ConnectionTarget[]>>
function createDraftBlockId(seed?: string): string {
const suffix = crypto.randomUUID().slice(0, 8)
const base = seed ? seed.replace(/[^a-zA-Z0-9]/g, '').slice(0, 24) : 'draft'
return `${base || 'draft'}_${suffix}`
}
function normalizeHandle(handle?: string): string {
if (!handle) return 'source'
if (handle === 'success') return 'source'
return handle
}
function deepClone<T>(value: T): T {
return JSON.parse(JSON.stringify(value))
}
function stableUnique(values: string[]): string[] {
return [...new Set(values.filter(Boolean))]
}
function buildConnectionState(workflowState: {
edges: Array<Record<string, any>>
}): ConnectionState {
const state: ConnectionState = new Map()
for (const edge of workflowState.edges || []) {
const source = String(edge.source || '')
const target = String(edge.target || '')
if (!source || !target) continue
const sourceHandle = normalizeHandle(String(edge.sourceHandle || 'source'))
const targetHandle = edge.targetHandle ? String(edge.targetHandle) : undefined
let handleMap = state.get(source)
if (!handleMap) {
handleMap = new Map()
state.set(source, handleMap)
}
const existing = handleMap.get(sourceHandle) || []
existing.push({ block: target, handle: targetHandle })
handleMap.set(sourceHandle, existing)
}
return state
}
function connectionStateToPayload(state: Map<string, ConnectionTarget[]>): Record<string, any> {
const payload: Record<string, any> = {}
for (const [handle, targets] of state.entries()) {
if (!targets || targets.length === 0) continue
const normalizedTargets = targets.map((target) => {
if (!target.handle || target.handle === 'target') {
return target.block
}
return { block: target.block, handle: target.handle }
})
payload[handle] = normalizedTargets.length === 1 ? normalizedTargets[0] : normalizedTargets
}
return payload
}
function findMatchingBlockId(
workflowState: { blocks: Record<string, any> },
target: TargetRef
): string | null {
if (target.blockId && workflowState.blocks[target.blockId]) {
return target.blockId
}
if (target.match) {
const type = target.match.type
const name = target.match.name?.toLowerCase()
const matches = Object.entries(workflowState.blocks || {}).filter(([_, block]) => {
const blockType = String((block as Record<string, unknown>).type || '')
const blockName = String((block as Record<string, unknown>).name || '').toLowerCase()
const typeOk = type ? blockType === type : true
const nameOk = name ? blockName === name : true
return typeOk && nameOk
})
if (matches.length === 1) {
return matches[0][0]
}
if (matches.length > 1) {
throw new Error(
`ambiguous_target: target match resolved to ${matches.length} blocks (${matches.map(([id]) => id).join(', ')})`
)
}
}
return null
}
function getNestedValue(value: any, path: string[]): any {
let cursor = value
for (const segment of path) {
if (cursor == null || typeof cursor !== 'object') return undefined
cursor = cursor[segment]
}
return cursor
}
function setNestedValue(base: any, path: string[], nextValue: any): any {
if (path.length === 0) return nextValue
const out = Array.isArray(base) ? [...base] : { ...(base || {}) }
let cursor: any = out
for (let i = 0; i < path.length - 1; i++) {
const key = path[i]
const current = cursor[key]
cursor[key] =
current && typeof current === 'object'
? Array.isArray(current)
? [...current]
: { ...current }
: {}
cursor = cursor[key]
}
cursor[path[path.length - 1]] = nextValue
return out
}
function removeArrayItem(arr: unknown[], value: unknown): unknown[] {
return arr.filter((item) => JSON.stringify(item) !== JSON.stringify(value))
}
function selectCredentialId(
availableCredentials: CredentialRecord[],
provider: string,
selection: z.infer<typeof CredentialSelectionSchema> | undefined
): string | null {
const providerLower = provider.toLowerCase()
const providerMatches = availableCredentials.filter((credential) => {
const credentialProvider = credential.provider.toLowerCase()
return (
credentialProvider === providerLower || credentialProvider.startsWith(`${providerLower}-`)
)
})
const pool = providerMatches.length > 0 ? providerMatches : availableCredentials
const strategy = selection?.strategy || 'first_connected'
if (strategy === 'by_id') {
const id = selection?.id
if (!id) return null
return pool.find((credential) => credential.id === id)?.id || null
}
if (strategy === 'by_name') {
const name = selection?.name?.toLowerCase()
if (!name) return null
const exact = pool.find((credential) => credential.name.toLowerCase() === name)
if (exact) return exact.id
const partial = pool.find((credential) => credential.name.toLowerCase().includes(name))
return partial?.id || null
}
const defaultCredential = pool.find((credential) => credential.isDefault)
if (defaultCredential) return defaultCredential.id
return pool[0]?.id || null
}
function selectCredentialFieldId(blockType: string, provider: string): string | null {
const blockConfig = getBlock(blockType)
if (!blockConfig) return null
const oauthFields = (blockConfig.subBlocks || []).filter(
(subBlock) => subBlock.type === 'oauth-input'
)
if (oauthFields.length === 0) return null
const providerKey = provider.replace(/[^a-zA-Z0-9]/g, '').toLowerCase()
const fieldMatch = oauthFields.find((subBlock) =>
subBlock.id
.replace(/[^a-zA-Z0-9]/g, '')
.toLowerCase()
.includes(providerKey)
)
if (fieldMatch) return fieldMatch.id
return oauthFields[0].id
}
function ensureConnectionTarget(
existing: ConnectionTarget[],
target: ConnectionTarget,
mode: 'set' | 'append' | 'remove'
): ConnectionTarget[] {
if (mode === 'set') {
return [target]
}
if (mode === 'remove') {
return existing.filter(
(item) =>
!(item.block === target.block && (item.handle || 'target') === (target.handle || 'target'))
)
}
const duplicate = existing.some(
(item) =>
item.block === target.block && (item.handle || 'target') === (target.handle || 'target')
)
if (duplicate) return existing
return [...existing, target]
}
async function compileChangeSpec(params: {
changeSpec: ChangeSpec
workflowState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
loops: Record<string, any>
parallels: Record<string, any>
}
userId: string
workflowId: string
}): Promise<{
operations: Array<Record<string, any>>
warnings: string[]
diagnostics: string[]
touchedBlocks: string[]
}> {
const { changeSpec, workflowState, userId, workflowId } = params
const operations: Array<Record<string, any>> = []
const diagnostics: string[] = []
const warnings: string[] = []
const touchedBlocks = new Set<string>()
const aliasMap = new Map<string, string>()
const workingState = deepClone(workflowState)
const connectionState = buildConnectionState(workingState)
const connectionTouchedSources = new Set<string>()
const plannedBlockTypes = new Map<string, string>()
// Seed aliases from existing block names.
for (const [blockId, block] of Object.entries(workingState.blocks || {})) {
const blockName = String((block as Record<string, unknown>).name || '')
if (!blockName) continue
const normalizedAlias = blockName.replace(/[^a-zA-Z0-9]/g, '')
if (normalizedAlias && !aliasMap.has(normalizedAlias)) {
aliasMap.set(normalizedAlias, blockId)
}
}
const credentialsResponse = await getCredentialsServerTool.execute({ workflowId }, { userId })
const availableCredentials: CredentialRecord[] =
credentialsResponse?.oauth?.connected?.credentials?.map((credential: any) => ({
id: String(credential.id || ''),
name: String(credential.name || ''),
provider: String(credential.provider || ''),
isDefault: Boolean(credential.isDefault),
})) || []
const resolveTarget = (
target: TargetRef | undefined,
allowCreateAlias = false
): string | null => {
if (!target) return null
if (target.blockId) {
if (workingState.blocks[target.blockId] || plannedBlockTypes.has(target.blockId)) {
return target.blockId
}
return allowCreateAlias ? target.blockId : null
}
if (target.alias) {
if (aliasMap.has(target.alias)) return aliasMap.get(target.alias) || null
const byMatch = findMatchingBlockId(workingState, { alias: target.alias })
if (byMatch) {
aliasMap.set(target.alias, byMatch)
return byMatch
}
return allowCreateAlias ? target.alias : null
}
const matched = findMatchingBlockId(workingState, target)
if (matched) return matched
return null
}
const applyPatchChange = (
targetId: string,
blockType: string | null,
change: ChangeOperation,
paramsOut: Record<string, any>
): void => {
if (change.op === 'attach_credential') {
const provider = change.provider
if (!provider) {
diagnostics.push(`attach_credential on ${targetId} is missing provider`)
return
}
if (!blockType) {
diagnostics.push(`attach_credential on ${targetId} failed: unknown block type`)
return
}
const credentialFieldId = selectCredentialFieldId(blockType, provider)
if (!credentialFieldId) {
const msg = `No oauth input field found for block type "${blockType}" on ${targetId}`
if (change.required) diagnostics.push(msg)
else warnings.push(msg)
return
}
const credentialId = selectCredentialId(availableCredentials, provider, change.selection)
if (!credentialId) {
const msg = `No credential found for provider "${provider}" on ${targetId}`
if (change.required) diagnostics.push(msg)
else warnings.push(msg)
return
}
paramsOut.inputs = paramsOut.inputs || {}
paramsOut.inputs[credentialFieldId] = credentialId
return
}
if (!change.path) {
diagnostics.push(`${change.op} on ${targetId} requires a path`)
return
}
const pathSegments = change.path.split('.').filter(Boolean)
if (pathSegments.length === 0) {
diagnostics.push(`${change.op} on ${targetId} has an invalid path "${change.path}"`)
return
}
if (pathSegments[0] === 'inputs') {
const inputKey = pathSegments[1]
if (!inputKey) {
diagnostics.push(`${change.op} on ${targetId} has invalid input path "${change.path}"`)
return
}
const currentInputValue =
paramsOut.inputs?.[inputKey] ??
workingState.blocks[targetId]?.subBlocks?.[inputKey]?.value ??
null
let nextInputValue = currentInputValue
const nestedPath = pathSegments.slice(2)
if (change.op === 'set') {
nextInputValue =
nestedPath.length > 0
? setNestedValue(currentInputValue ?? {}, nestedPath, change.value)
: change.value
} else if (change.op === 'unset') {
nextInputValue =
nestedPath.length > 0 ? setNestedValue(currentInputValue ?? {}, nestedPath, null) : null
} else if (change.op === 'merge') {
if (nestedPath.length > 0) {
const baseObject = getNestedValue(currentInputValue ?? {}, nestedPath) || {}
if (
baseObject &&
typeof baseObject === 'object' &&
change.value &&
typeof change.value === 'object'
) {
nextInputValue = setNestedValue(currentInputValue ?? {}, nestedPath, {
...baseObject,
...(change.value as Record<string, unknown>),
})
} else {
diagnostics.push(`merge on ${targetId} at "${change.path}" requires object values`)
return
}
} else if (
currentInputValue &&
typeof currentInputValue === 'object' &&
!Array.isArray(currentInputValue) &&
change.value &&
typeof change.value === 'object' &&
!Array.isArray(change.value)
) {
nextInputValue = { ...currentInputValue, ...(change.value as Record<string, unknown>) }
} else if (currentInputValue == null && change.value && typeof change.value === 'object') {
nextInputValue = change.value
} else {
diagnostics.push(`merge on ${targetId} at "${change.path}" requires object values`)
return
}
} else if (change.op === 'append') {
const arr = Array.isArray(currentInputValue) ? [...currentInputValue] : []
arr.push(change.value)
nextInputValue = arr
} else if (change.op === 'remove') {
if (!Array.isArray(currentInputValue)) {
diagnostics.push(`remove on ${targetId} at "${change.path}" requires an array value`)
return
}
nextInputValue = removeArrayItem(currentInputValue, change.value)
}
paramsOut.inputs = paramsOut.inputs || {}
paramsOut.inputs[inputKey] = nextInputValue
return
}
if (pathSegments.length !== 1) {
diagnostics.push(
`Unsupported path "${change.path}" on ${targetId}. Use inputs.* or top-level field names.`
)
return
}
const topLevelField = pathSegments[0]
if (!['name', 'type', 'triggerMode', 'advancedMode', 'enabled'].includes(topLevelField)) {
diagnostics.push(`Unsupported top-level path "${change.path}" on ${targetId}`)
return
}
paramsOut[topLevelField] = change.op === 'unset' ? null : change.value
}
for (const mutation of changeSpec.mutations || []) {
if (mutation.action === 'ensure_block') {
const targetId = resolveTarget(mutation.target, true)
if (!targetId) {
diagnostics.push('ensure_block is missing a resolvable target')
continue
}
const existingBlock = workingState.blocks[targetId]
if (existingBlock) {
const editParams: Record<string, any> = {}
if (mutation.name) editParams.name = mutation.name
if (mutation.type) editParams.type = mutation.type
if (mutation.inputs) editParams.inputs = mutation.inputs
if (mutation.triggerMode !== undefined) editParams.triggerMode = mutation.triggerMode
if (mutation.advancedMode !== undefined) editParams.advancedMode = mutation.advancedMode
if (mutation.enabled !== undefined) editParams.enabled = mutation.enabled
operations.push({
operation_type: 'edit',
block_id: targetId,
params: editParams,
})
touchedBlocks.add(targetId)
} else {
if (!mutation.type || !mutation.name) {
diagnostics.push(`ensure_block for "${targetId}" requires type and name when creating`)
continue
}
const blockId =
mutation.target?.blockId || mutation.target?.alias || createDraftBlockId(mutation.name)
const addParams: Record<string, any> = {
type: mutation.type,
name: mutation.name,
}
if (mutation.inputs) addParams.inputs = mutation.inputs
if (mutation.triggerMode !== undefined) addParams.triggerMode = mutation.triggerMode
if (mutation.advancedMode !== undefined) addParams.advancedMode = mutation.advancedMode
if (mutation.enabled !== undefined) addParams.enabled = mutation.enabled
operations.push({
operation_type: 'add',
block_id: blockId,
params: addParams,
})
workingState.blocks[blockId] = {
id: blockId,
type: mutation.type,
name: mutation.name,
subBlocks: Object.fromEntries(
Object.entries(mutation.inputs || {}).map(([key, value]) => [
key,
{ id: key, value, type: 'short-input' },
])
),
triggerMode: mutation.triggerMode || false,
advancedMode: mutation.advancedMode || false,
enabled: mutation.enabled !== undefined ? mutation.enabled : true,
}
plannedBlockTypes.set(blockId, mutation.type)
touchedBlocks.add(blockId)
if (mutation.target?.alias) aliasMap.set(mutation.target.alias, blockId)
}
continue
}
if (mutation.action === 'patch_block') {
const targetId = resolveTarget(mutation.target)
if (!targetId) {
diagnostics.push('patch_block target could not be resolved')
continue
}
const blockType =
String(workingState.blocks[targetId]?.type || '') || plannedBlockTypes.get(targetId) || null
const editParams: Record<string, any> = {}
for (const change of mutation.changes || []) {
applyPatchChange(targetId, blockType, change, editParams)
}
if (Object.keys(editParams).length === 0) {
warnings.push(`patch_block for ${targetId} had no effective changes`)
continue
}
operations.push({
operation_type: 'edit',
block_id: targetId,
params: editParams,
})
touchedBlocks.add(targetId)
continue
}
if (mutation.action === 'remove_block') {
const targetId = resolveTarget(mutation.target)
if (!targetId) {
diagnostics.push('remove_block target could not be resolved')
continue
}
operations.push({
operation_type: 'delete',
block_id: targetId,
params: {},
})
touchedBlocks.add(targetId)
connectionState.delete(targetId)
for (const [source, handles] of connectionState.entries()) {
for (const [handle, targets] of handles.entries()) {
const nextTargets = targets.filter((target) => target.block !== targetId)
handles.set(handle, nextTargets)
}
connectionTouchedSources.add(source)
}
continue
}
if (mutation.action === 'connect' || mutation.action === 'disconnect') {
const from = resolveTarget(mutation.from)
const to = resolveTarget(mutation.to)
if (!from || !to) {
diagnostics.push(`${mutation.action} requires resolvable from/to targets`)
continue
}
const sourceHandle = normalizeHandle(mutation.handle)
const targetHandle = mutation.toHandle || 'target'
let sourceMap = connectionState.get(from)
if (!sourceMap) {
sourceMap = new Map()
connectionState.set(from, sourceMap)
}
const existingTargets = sourceMap.get(sourceHandle) || []
const mode = mutation.action === 'disconnect' ? 'remove' : mutation.mode || 'set'
const nextTargets = ensureConnectionTarget(
existingTargets,
{ block: to, handle: targetHandle },
mode
)
sourceMap.set(sourceHandle, nextTargets)
connectionTouchedSources.add(from)
touchedBlocks.add(from)
}
}
for (const link of changeSpec.links || []) {
const from = resolveTarget(
{
blockId: link.from.blockId,
alias: link.from.alias,
match: link.from.match,
},
true
)
const to = resolveTarget(
{
blockId: link.to.blockId,
alias: link.to.alias,
match: link.to.match,
},
true
)
if (!from || !to) {
diagnostics.push('link contains unresolved from/to target')
continue
}
const sourceHandle = normalizeHandle(link.from.handle)
const targetHandle = link.to.handle || 'target'
let sourceMap = connectionState.get(from)
if (!sourceMap) {
sourceMap = new Map()
connectionState.set(from, sourceMap)
}
const existingTargets = sourceMap.get(sourceHandle) || []
const nextTargets = ensureConnectionTarget(
existingTargets,
{ block: to, handle: targetHandle },
link.mode || 'set'
)
sourceMap.set(sourceHandle, nextTargets)
connectionTouchedSources.add(from)
touchedBlocks.add(from)
}
for (const sourceBlockId of stableUnique([...connectionTouchedSources])) {
if (!connectionState.has(sourceBlockId)) continue
const sourceConnections = connectionState.get(sourceBlockId)!
operations.push({
operation_type: 'edit',
block_id: sourceBlockId,
params: {
connections: connectionStateToPayload(sourceConnections),
},
})
}
return {
operations,
warnings,
diagnostics,
touchedBlocks: [...touchedBlocks],
}
}
function summarizeDiff(
beforeState: { blocks: Record<string, any>; edges: Array<Record<string, any>> },
afterState: { blocks: Record<string, any>; edges: Array<Record<string, any>> },
operations: Array<Record<string, any>>
): Record<string, any> {
const beforeBlocks = Object.keys(beforeState.blocks || {}).length
const afterBlocks = Object.keys(afterState.blocks || {}).length
const beforeEdges = (beforeState.edges || []).length
const afterEdges = (afterState.edges || []).length
const counts = operations.reduce<Record<string, number>>((acc, operation) => {
const opType = String(operation.operation_type || 'unknown')
acc[opType] = (acc[opType] || 0) + 1
return acc
}, {})
return {
operationCounts: counts,
blocks: {
before: beforeBlocks,
after: afterBlocks,
delta: afterBlocks - beforeBlocks,
},
edges: {
before: beforeEdges,
after: afterEdges,
delta: afterEdges - beforeEdges,
},
}
}
async function validateAndSimulateOperations(params: {
workflowState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
loops: Record<string, any>
parallels: Record<string, any>
}
operations: Array<Record<string, any>>
userId: string
}): Promise<{
operationsForApply: Array<Record<string, any>>
simulatedState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
loops: Record<string, any>
parallels: Record<string, any>
}
warnings: string[]
diagnostics: string[]
}> {
const diagnostics: string[] = []
const warnings: string[] = []
const permissionConfig = await getUserPermissionConfig(params.userId)
const { filteredOperations, errors: preValidationErrors } = await preValidateCredentialInputs(
params.operations as any,
{ userId: params.userId },
params.workflowState
)
for (const error of preValidationErrors) {
warnings.push(error.error)
}
const { state, validationErrors, skippedItems } = applyOperationsToWorkflowState(
params.workflowState,
filteredOperations as any,
permissionConfig
)
for (const validationError of validationErrors) {
warnings.push(validationError.error)
}
for (const skippedItem of skippedItems) {
warnings.push(skippedItem.reason)
}
if (Object.keys(state.blocks || {}).length === 0) {
diagnostics.push('Simulation produced an empty workflow state')
}
return {
operationsForApply: filteredOperations as Array<Record<string, any>>,
simulatedState: state,
warnings,
diagnostics,
}
}
export const workflowChangeServerTool: BaseServerTool<WorkflowChangeParams, any> = {
name: 'workflow_change',
inputSchema: WorkflowChangeInputSchema,
async execute(params: WorkflowChangeParams, context?: { userId: string }): Promise<any> {
if (!context?.userId) {
throw new Error('Unauthorized workflow access')
}
if (params.mode === 'dry_run') {
const workflowId = params.workflowId || getContextPack(params.contextPackId || '')?.workflowId
if (!workflowId) {
throw new Error('workflowId is required for dry_run')
}
if (!params.changeSpec) {
throw new Error('changeSpec is required for dry_run')
}
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: context.userId,
action: 'write',
})
if (!authorization.allowed) {
throw new Error(authorization.message || 'Unauthorized workflow access')
}
const { workflowState } = await loadWorkflowStateFromDb(workflowId)
const currentHash = hashWorkflowState(workflowState as unknown as Record<string, unknown>)
const requestedHash = params.baseSnapshotHash
if (requestedHash && requestedHash !== currentHash) {
throw new Error(
`snapshot_mismatch: expected ${requestedHash} but current state is ${currentHash}`
)
}
const compileResult = await compileChangeSpec({
changeSpec: params.changeSpec,
workflowState,
userId: context.userId,
workflowId,
})
const simulation = await validateAndSimulateOperations({
workflowState,
operations: compileResult.operations,
userId: context.userId,
})
const diffSummary = summarizeDiff(
workflowState,
simulation.simulatedState,
simulation.operationsForApply
)
const diagnostics = [...compileResult.diagnostics, ...simulation.diagnostics]
const warnings = [...compileResult.warnings, ...simulation.warnings]
const proposal: WorkflowChangeProposal = {
workflowId,
baseSnapshotHash: currentHash,
compiledOperations: simulation.operationsForApply,
diffSummary,
warnings,
diagnostics,
touchedBlocks: compileResult.touchedBlocks,
}
const proposalId = saveProposal(proposal)
logger.info('Compiled workflow_change dry run', {
workflowId,
proposalId,
operationCount: proposal.compiledOperations.length,
warningCount: warnings.length,
diagnosticsCount: diagnostics.length,
})
return {
success: diagnostics.length === 0,
mode: 'dry_run',
workflowId,
proposalId,
baseSnapshotHash: currentHash,
compiledOperations: proposal.compiledOperations,
diffSummary,
warnings,
diagnostics,
touchedBlocks: proposal.touchedBlocks,
}
}
// apply mode
const proposalId = params.proposalId
if (!proposalId) {
throw new Error('proposalId is required for apply')
}
const proposal = getProposal(proposalId)
if (!proposal) {
throw new Error(`Proposal not found or expired: ${proposalId}`)
}
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId: proposal.workflowId,
userId: context.userId,
action: 'write',
})
if (!authorization.allowed) {
throw new Error(authorization.message || 'Unauthorized workflow access')
}
const { workflowState } = await loadWorkflowStateFromDb(proposal.workflowId)
const currentHash = hashWorkflowState(workflowState as unknown as Record<string, unknown>)
const expectedHash = params.expectedSnapshotHash || proposal.baseSnapshotHash
if (expectedHash && expectedHash !== currentHash) {
throw new Error(`snapshot_mismatch: expected ${expectedHash} but current is ${currentHash}`)
}
const applyResult = await editWorkflowServerTool.execute(
{
workflowId: proposal.workflowId,
operations: proposal.compiledOperations as any,
},
{ userId: context.userId }
)
const appliedWorkflowState = (applyResult as any)?.workflowState
const newSnapshotHash = appliedWorkflowState
? hashWorkflowState(appliedWorkflowState as Record<string, unknown>)
: null
return {
success: true,
mode: 'apply',
workflowId: proposal.workflowId,
proposalId,
baseSnapshotHash: proposal.baseSnapshotHash,
newSnapshotHash,
operations: proposal.compiledOperations,
workflowState: appliedWorkflowState || null,
appliedDiff: proposal.diffSummary,
warnings: proposal.warnings,
diagnostics: proposal.diagnostics,
editResult: applyResult,
}
},
}

View File

@@ -0,0 +1,158 @@
import { createLogger } from '@sim/logger'
import { z } from 'zod'
import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { getContextPack, saveContextPack } from './change-store'
import {
buildSchemasByType,
getAllKnownBlockTypes,
hashWorkflowState,
loadWorkflowStateFromDb,
summarizeWorkflowState,
} from './workflow-state'
const logger = createLogger('WorkflowContextServerTool')
const WorkflowContextGetInputSchema = z.object({
workflowId: z.string(),
objective: z.string().optional(),
includeBlockTypes: z.array(z.string()).optional(),
includeAllSchemas: z.boolean().optional(),
})
type WorkflowContextGetParams = z.infer<typeof WorkflowContextGetInputSchema>
const WorkflowContextExpandInputSchema = z.object({
contextPackId: z.string(),
blockTypes: z.array(z.string()).optional(),
schemaRefs: z.array(z.string()).optional(),
})
type WorkflowContextExpandParams = z.infer<typeof WorkflowContextExpandInputSchema>
function parseSchemaRefToBlockType(schemaRef: string): string | null {
if (!schemaRef) return null
const [blockType] = schemaRef.split('@')
return blockType || null
}
function buildAvailableBlockCatalog(
schemaRefsByType: Record<string, string>
): Array<Record<string, any>> {
return Object.entries(schemaRefsByType)
.sort((a, b) => a[0].localeCompare(b[0]))
.map(([blockType, schemaRef]) => ({
blockType,
schemaRef,
}))
}
export const workflowContextGetServerTool: BaseServerTool<WorkflowContextGetParams, any> = {
name: 'workflow_context_get',
inputSchema: WorkflowContextGetInputSchema,
async execute(params: WorkflowContextGetParams, context?: { userId: string }): Promise<any> {
if (!context?.userId) {
throw new Error('Unauthorized workflow access')
}
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId: params.workflowId,
userId: context.userId,
action: 'read',
})
if (!authorization.allowed) {
throw new Error(authorization.message || 'Unauthorized workflow access')
}
const { workflowState } = await loadWorkflowStateFromDb(params.workflowId)
const snapshotHash = hashWorkflowState(workflowState as unknown as Record<string, unknown>)
const blockTypesInWorkflow = Object.values(workflowState.blocks || {}).map((block: any) =>
String(block?.type || '')
)
const requestedTypes = params.includeBlockTypes || []
const includeAllSchemas = params.includeAllSchemas === true
const candidateTypes = includeAllSchemas
? getAllKnownBlockTypes()
: [...blockTypesInWorkflow, ...requestedTypes]
const { schemasByType, schemaRefsByType } = buildSchemasByType(candidateTypes)
const summary = summarizeWorkflowState(workflowState)
const packId = saveContextPack({
workflowId: params.workflowId,
snapshotHash,
workflowState,
schemasByType,
schemaRefsByType,
summary: {
...summary,
objective: params.objective || null,
},
})
logger.info('Generated workflow context pack', {
workflowId: params.workflowId,
contextPackId: packId,
schemaCount: Object.keys(schemaRefsByType).length,
})
return {
success: true,
contextPackId: packId,
workflowId: params.workflowId,
snapshotHash,
summary: {
...summary,
objective: params.objective || null,
},
schemaRefsByType,
availableBlockCatalog: buildAvailableBlockCatalog(schemaRefsByType),
inScopeSchemas: schemasByType,
}
},
}
export const workflowContextExpandServerTool: BaseServerTool<WorkflowContextExpandParams, any> = {
name: 'workflow_context_expand',
inputSchema: WorkflowContextExpandInputSchema,
async execute(params: WorkflowContextExpandParams, context?: { userId: string }): Promise<any> {
if (!context?.userId) {
throw new Error('Unauthorized workflow access')
}
const contextPack = getContextPack(params.contextPackId)
if (!contextPack) {
throw new Error(`Context pack not found or expired: ${params.contextPackId}`)
}
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId: contextPack.workflowId,
userId: context.userId,
action: 'read',
})
if (!authorization.allowed) {
throw new Error(authorization.message || 'Unauthorized workflow access')
}
const requestedBlockTypes = new Set<string>()
for (const blockType of params.blockTypes || []) {
if (blockType) requestedBlockTypes.add(blockType)
}
for (const schemaRef of params.schemaRefs || []) {
const blockType = parseSchemaRefToBlockType(schemaRef)
if (blockType) requestedBlockTypes.add(blockType)
}
const typesToExpand = [...requestedBlockTypes]
const { schemasByType, schemaRefsByType } = buildSchemasByType(typesToExpand)
return {
success: true,
contextPackId: params.contextPackId,
workflowId: contextPack.workflowId,
snapshotHash: contextPack.snapshotHash,
schemasByType,
schemaRefsByType,
}
},
}

View File

@@ -0,0 +1,226 @@
import crypto from 'crypto'
import { db } from '@sim/db'
import { workflow as workflowTable } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import { getAllBlockTypes, getBlock } from '@/blocks/registry'
import type { SubBlockConfig } from '@/blocks/types'
const logger = createLogger('WorkflowContextState')
function stableSortValue(value: any): any {
if (Array.isArray(value)) {
return value.map(stableSortValue)
}
if (value && typeof value === 'object') {
const sorted: Record<string, any> = {}
for (const key of Object.keys(value).sort()) {
sorted[key] = stableSortValue(value[key])
}
return sorted
}
return value
}
export function hashWorkflowState(state: Record<string, unknown>): string {
const stable = stableSortValue(state)
const payload = JSON.stringify(stable)
return `sha256:${crypto.createHash('sha256').update(payload).digest('hex')}`
}
function normalizeOptions(options: unknown): string[] | null {
if (!Array.isArray(options)) return null
const normalized = options
.map((option) => {
if (option == null) return null
if (typeof option === 'object') {
const optionRecord = option as Record<string, unknown>
const id = optionRecord.id
if (typeof id === 'string') return id
const label = optionRecord.label
if (typeof label === 'string') return label
return null
}
return String(option)
})
.filter((value): value is string => Boolean(value))
return normalized.length > 0 ? normalized : null
}
function serializeRequired(required: SubBlockConfig['required']): boolean | Record<string, any> {
if (typeof required === 'boolean') return required
if (!required) return false
if (typeof required === 'object') {
const out: Record<string, any> = {}
const record = required as Record<string, unknown>
for (const key of ['field', 'operator', 'value']) {
if (record[key] !== undefined) {
out[key] = record[key]
}
}
return out
}
return false
}
function serializeSubBlock(subBlock: SubBlockConfig): Record<string, unknown> {
const staticOptions =
typeof subBlock.options === 'function' ? null : normalizeOptions(subBlock.options)
return {
id: subBlock.id,
type: subBlock.type,
title: subBlock.title,
description: subBlock.description || null,
mode: subBlock.mode || null,
placeholder: subBlock.placeholder || null,
hidden: Boolean(subBlock.hidden),
multiSelect: Boolean(subBlock.multiSelect),
required: serializeRequired(subBlock.required),
hasDynamicOptions: typeof subBlock.options === 'function',
options: staticOptions,
defaultValue: subBlock.defaultValue ?? null,
min: subBlock.min ?? null,
max: subBlock.max ?? null,
}
}
function serializeBlockSchema(blockType: string): Record<string, unknown> | null {
const blockConfig = getBlock(blockType)
if (!blockConfig) return null
const subBlocks = Array.isArray(blockConfig.subBlocks)
? blockConfig.subBlocks.map(serializeSubBlock)
: []
const outputs = blockConfig.outputs || {}
const outputKeys = Object.keys(outputs)
return {
blockType,
blockName: blockConfig.name || blockType,
category: blockConfig.category,
triggerAllowed: Boolean(blockConfig.triggerAllowed || blockConfig.triggers?.enabled),
hasTriggersConfig: Boolean(blockConfig.triggers?.enabled),
subBlocks,
outputKeys,
longDescription: blockConfig.longDescription || null,
}
}
export function buildSchemasByType(blockTypes: string[]): {
schemasByType: Record<string, any>
schemaRefsByType: Record<string, string>
} {
const schemasByType: Record<string, any> = {}
const schemaRefsByType: Record<string, string> = {}
const uniqueTypes = [...new Set(blockTypes.filter(Boolean))]
for (const blockType of uniqueTypes) {
const schema = serializeBlockSchema(blockType)
if (!schema) continue
const stableSchema = stableSortValue(schema)
const schemaHash = crypto
.createHash('sha256')
.update(JSON.stringify(stableSchema))
.digest('hex')
schemasByType[blockType] = stableSchema
schemaRefsByType[blockType] = `${blockType}@sha256:${schemaHash}`
}
return { schemasByType, schemaRefsByType }
}
export async function loadWorkflowStateFromDb(workflowId: string): Promise<{
workflowState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
loops: Record<string, any>
parallels: Record<string, any>
}
workspaceId?: string
}> {
const [workflowRecord] = await db
.select({ workspaceId: workflowTable.workspaceId })
.from(workflowTable)
.where(eq(workflowTable.id, workflowId))
.limit(1)
if (!workflowRecord) {
throw new Error(`Workflow ${workflowId} not found`)
}
const normalized = await loadWorkflowFromNormalizedTables(workflowId)
if (!normalized) {
throw new Error(`Workflow ${workflowId} has no normalized data`)
}
const blocks = { ...normalized.blocks }
const invalidBlockIds: string[] = []
for (const [blockId, block] of Object.entries(blocks)) {
if (!(block as { type?: unknown })?.type) {
invalidBlockIds.push(blockId)
}
}
for (const blockId of invalidBlockIds) {
delete blocks[blockId]
}
const invalidSet = new Set(invalidBlockIds)
const edges = (normalized.edges || []).filter(
(edge: any) => !invalidSet.has(edge.source) && !invalidSet.has(edge.target)
)
if (invalidBlockIds.length > 0) {
logger.warn('Dropped blocks without type while loading workflow state', {
workflowId,
dropped: invalidBlockIds,
})
}
return {
workflowState: {
blocks,
edges,
loops: normalized.loops || {},
parallels: normalized.parallels || {},
},
workspaceId: workflowRecord.workspaceId || undefined,
}
}
export function summarizeWorkflowState(workflowState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
loops: Record<string, any>
parallels: Record<string, any>
}): Record<string, unknown> {
const blocks = workflowState.blocks || {}
const edges = workflowState.edges || []
const blockTypes: Record<string, number> = {}
const triggerBlocks: Array<{ id: string; name: string; type: string }> = []
for (const [blockId, block] of Object.entries(blocks)) {
const blockType = String((block as Record<string, unknown>).type || 'unknown')
blockTypes[blockType] = (blockTypes[blockType] || 0) + 1
if ((block as Record<string, unknown>).triggerMode === true) {
triggerBlocks.push({
id: blockId,
name: String((block as Record<string, unknown>).name || blockType),
type: blockType,
})
}
}
return {
blockCount: Object.keys(blocks).length,
edgeCount: edges.length,
loopCount: Object.keys(workflowState.loops || {}).length,
parallelCount: Object.keys(workflowState.parallels || {}).length,
blockTypes,
triggerBlocks,
}
}
export function getAllKnownBlockTypes(): string[] {
return getAllBlockTypes()
}

View File

@@ -0,0 +1,194 @@
import { createLogger } from '@sim/logger'
import { z } from 'zod'
import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool'
import { validateWorkflowState } from '@/lib/workflows/sanitization/validation'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { hashWorkflowState, loadWorkflowStateFromDb } from './workflow-state'
const logger = createLogger('WorkflowVerifyServerTool')
const AcceptanceItemSchema = z.union([
z.string(),
z.object({
kind: z.string().optional(),
assert: z.string(),
}),
])
const WorkflowVerifyInputSchema = z
.object({
workflowId: z.string(),
acceptance: z.array(AcceptanceItemSchema).optional(),
baseSnapshotHash: z.string().optional(),
})
.strict()
type WorkflowVerifyParams = z.infer<typeof WorkflowVerifyInputSchema>
function normalizeName(value: string): string {
return value.trim().toLowerCase()
}
function resolveBlockToken(
workflowState: { blocks: Record<string, any> },
token: string
): string | null {
if (!token) return null
if (workflowState.blocks[token]) return token
const normalized = normalizeName(token)
for (const [blockId, block] of Object.entries(workflowState.blocks || {})) {
const blockName = normalizeName(String((block as Record<string, unknown>).name || ''))
if (blockName === normalized) return blockId
}
return null
}
function hasPath(
workflowState: { edges: Array<Record<string, any>> },
blockPath: string[]
): boolean {
if (blockPath.length < 2) return true
const adjacency = new Map<string, string[]>()
for (const edge of workflowState.edges || []) {
const source = String(edge.source || '')
const target = String(edge.target || '')
if (!source || !target) continue
const existing = adjacency.get(source) || []
existing.push(target)
adjacency.set(source, existing)
}
for (let i = 0; i < blockPath.length - 1; i++) {
const from = blockPath[i]
const to = blockPath[i + 1]
const next = adjacency.get(from) || []
if (!next.includes(to)) return false
}
return true
}
function evaluateAssertions(params: {
workflowState: {
blocks: Record<string, any>
edges: Array<Record<string, any>>
}
assertions: string[]
}): { failures: string[]; checks: Array<Record<string, any>> } {
const failures: string[] = []
const checks: Array<Record<string, any>> = []
for (const assertion of params.assertions) {
if (assertion.startsWith('block_exists:')) {
const token = assertion.slice('block_exists:'.length).trim()
const blockId = resolveBlockToken(params.workflowState, token)
const passed = Boolean(blockId)
checks.push({ assert: assertion, passed, resolvedBlockId: blockId || null })
if (!passed) failures.push(`Assertion failed: ${assertion}`)
continue
}
if (assertion.startsWith('trigger_exists:')) {
const triggerType = normalizeName(assertion.slice('trigger_exists:'.length))
const triggerBlock = Object.values(params.workflowState.blocks || {}).find((block: any) => {
if (block?.triggerMode !== true) return false
return normalizeName(String(block?.type || '')) === triggerType
})
const passed = Boolean(triggerBlock)
checks.push({ assert: assertion, passed })
if (!passed) failures.push(`Assertion failed: ${assertion}`)
continue
}
if (assertion.startsWith('path_exists:')) {
const rawPath = assertion.slice('path_exists:'.length).trim()
const tokens = rawPath
.split('->')
.map((token) => token.trim())
.filter(Boolean)
const resolvedPath = tokens
.map((token) => resolveBlockToken(params.workflowState, token))
.filter((value): value is string => Boolean(value))
const resolvedAll = resolvedPath.length === tokens.length
const passed = resolvedAll && hasPath(params.workflowState, resolvedPath)
checks.push({
assert: assertion,
passed,
resolvedPath,
})
if (!passed) failures.push(`Assertion failed: ${assertion}`)
continue
}
// Unknown assertion format - mark as warning failure for explicit visibility.
checks.push({ assert: assertion, passed: false, reason: 'unknown_assertion_type' })
failures.push(`Unknown assertion format: ${assertion}`)
}
return { failures, checks }
}
export const workflowVerifyServerTool: BaseServerTool<WorkflowVerifyParams, any> = {
name: 'workflow_verify',
inputSchema: WorkflowVerifyInputSchema,
async execute(params: WorkflowVerifyParams, context?: { userId: string }): Promise<any> {
if (!context?.userId) {
throw new Error('Unauthorized workflow access')
}
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId: params.workflowId,
userId: context.userId,
action: 'read',
})
if (!authorization.allowed) {
throw new Error(authorization.message || 'Unauthorized workflow access')
}
const { workflowState } = await loadWorkflowStateFromDb(params.workflowId)
const snapshotHash = hashWorkflowState(workflowState as unknown as Record<string, unknown>)
if (params.baseSnapshotHash && params.baseSnapshotHash !== snapshotHash) {
return {
success: false,
verified: false,
reason: 'snapshot_mismatch',
expected: params.baseSnapshotHash,
current: snapshotHash,
}
}
const validation = validateWorkflowState(workflowState as any, { sanitize: false })
const assertions = (params.acceptance || []).map((item) =>
typeof item === 'string' ? item : item.assert
)
const assertionResults = evaluateAssertions({
workflowState,
assertions,
})
const verified =
validation.valid && assertionResults.failures.length === 0 && validation.errors.length === 0
logger.info('Workflow verification complete', {
workflowId: params.workflowId,
verified,
errorCount: validation.errors.length,
warningCount: validation.warnings.length,
assertionFailures: assertionResults.failures.length,
})
return {
success: true,
verified,
snapshotHash,
validation: {
valid: validation.valid,
errors: validation.errors,
warnings: validation.warnings,
},
assertions: assertionResults.checks,
failures: assertionResults.failures,
}
},
}

View File

@@ -18,7 +18,6 @@ import {
import { flushStreamingUpdates, stopStreamingUpdates } from '@/lib/copilot/client-sse/handlers'
import type { ClientContentBlock, ClientStreamingContext } from '@/lib/copilot/client-sse/types'
import {
COPILOT_AUTO_ALLOWED_TOOLS_API_PATH,
COPILOT_CHAT_API_PATH,
COPILOT_CHAT_STREAM_API_PATH,
COPILOT_CHECKPOINTS_API_PATH,
@@ -84,6 +83,15 @@ function isPageUnloading(): boolean {
return _isPageUnloading
}
function isWorkflowEditToolCall(name?: string, params?: Record<string, unknown>): boolean {
if (name === 'edit_workflow') return true
if (name !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
function readActiveStreamFromStorage(): CopilotStreamInfo | null {
if (typeof window === 'undefined') return null
try {
@@ -140,41 +148,6 @@ function updateActiveStreamEventId(
writeActiveStreamToStorage(next)
}
const AUTO_ALLOWED_TOOLS_STORAGE_KEY = 'copilot_auto_allowed_tools'
function readAutoAllowedToolsFromStorage(): string[] | null {
if (typeof window === 'undefined') return null
try {
const raw = window.localStorage.getItem(AUTO_ALLOWED_TOOLS_STORAGE_KEY)
if (!raw) return null
const parsed = JSON.parse(raw)
if (!Array.isArray(parsed)) return null
return parsed.filter((item): item is string => typeof item === 'string')
} catch (error) {
logger.warn('[AutoAllowedTools] Failed to read local cache', {
error: error instanceof Error ? error.message : String(error),
})
return null
}
}
function writeAutoAllowedToolsToStorage(tools: string[]): void {
if (typeof window === 'undefined') return
try {
window.localStorage.setItem(AUTO_ALLOWED_TOOLS_STORAGE_KEY, JSON.stringify(tools))
} catch (error) {
logger.warn('[AutoAllowedTools] Failed to write local cache', {
error: error instanceof Error ? error.message : String(error),
})
}
}
function isToolAutoAllowedByList(toolId: string, autoAllowedTools: string[]): boolean {
if (!toolId) return false
const normalizedTarget = toolId.trim()
return autoAllowedTools.some((allowed) => allowed?.trim() === normalizedTarget)
}
/**
* Clear any lingering diff preview from a previous session.
* Called lazily when the store is first activated (setWorkflowId).
@@ -310,6 +283,50 @@ function parseModelKey(compositeKey: string): { provider: string; modelId: strin
return { provider: compositeKey.slice(0, slashIdx), modelId: compositeKey.slice(slashIdx + 1) }
}
/**
* Convert legacy/variant Claude IDs into the canonical ID shape used by the model catalog.
*
* Examples:
* - claude-4.5-opus -> claude-opus-4-5
* - claude-opus-4.6 -> claude-opus-4-6
* - anthropic.claude-opus-4-5-20251101-v1:0 -> claude-opus-4-5 (match key only)
*/
function canonicalizeModelMatchKey(modelId: string): string {
if (!modelId) return modelId
const normalized = modelId.trim().toLowerCase()
const toCanonicalClaude = (tier: string, version: string): string => {
const normalizedVersion = version.replace(/\./g, '-')
return `claude-${tier}-${normalizedVersion}`
}
const tierFirstExact = normalized.match(/^claude-(opus|sonnet|haiku)-(\d+(?:[.-]\d+)?)$/)
if (tierFirstExact) {
const [, tier, version] = tierFirstExact
return toCanonicalClaude(tier, version)
}
const versionFirstExact = normalized.match(/^claude-(\d+(?:[.-]\d+)?)-(opus|sonnet|haiku)$/)
if (versionFirstExact) {
const [, version, tier] = versionFirstExact
return toCanonicalClaude(tier, version)
}
const tierFirstEmbedded = normalized.match(/claude-(opus|sonnet|haiku)-(\d+(?:[.-]\d+)?)/)
if (tierFirstEmbedded) {
const [, tier, version] = tierFirstEmbedded
return toCanonicalClaude(tier, version)
}
const versionFirstEmbedded = normalized.match(/claude-(\d+(?:[.-]\d+)?)-(opus|sonnet|haiku)/)
if (versionFirstEmbedded) {
const [, version, tier] = versionFirstEmbedded
return toCanonicalClaude(tier, version)
}
return normalized
}
const MODEL_PROVIDER_PRIORITY = [
'anthropic',
'bedrock',
@@ -350,12 +367,23 @@ function normalizeSelectedModelKey(selectedModel: string, models: AvailableModel
const { provider, modelId } = parseModelKey(selectedModel)
const targetModelId = modelId || selectedModel
const targetMatchKey = canonicalizeModelMatchKey(targetModelId)
const matches = models.filter((m) => m.id.endsWith(`/${targetModelId}`))
const matches = models.filter((m) => {
const candidateModelId = parseModelKey(m.id).modelId || m.id
const candidateMatchKey = canonicalizeModelMatchKey(candidateModelId)
return (
candidateModelId === targetModelId ||
m.id.endsWith(`/${targetModelId}`) ||
candidateMatchKey === targetMatchKey
)
})
if (matches.length === 0) return selectedModel
if (provider) {
const sameProvider = matches.find((m) => m.provider === provider)
const sameProvider = matches.find(
(m) => m.provider === provider || m.id.startsWith(`${provider}/`)
)
if (sameProvider) return sameProvider.id
}
@@ -425,11 +453,6 @@ function prepareSendContext(
.catch((err) => {
logger.warn('[Copilot] Failed to load sensitive credential IDs', err)
})
get()
.loadAutoAllowedTools()
.catch((err) => {
logger.warn('[Copilot] Failed to load auto-allowed tools', err)
})
let newMessages: CopilotMessage[]
if (revertState) {
@@ -982,8 +1005,6 @@ async function resumeFromLiveStream(
return false
}
const cachedAutoAllowedTools = readAutoAllowedToolsFromStorage()
// Initial state (subset required for UI/streaming)
const initialState = {
mode: 'build' as const,
@@ -1018,8 +1039,6 @@ const initialState = {
streamingPlanContent: '',
toolCallsById: {} as Record<string, CopilotToolCall>,
suppressAutoSelect: false,
autoAllowedTools: cachedAutoAllowedTools ?? ([] as string[]),
autoAllowedToolsLoaded: cachedAutoAllowedTools !== null,
activeStream: null as CopilotStreamInfo | null,
messageQueue: [] as import('./types').QueuedMessage[],
suppressAbortContinueOption: false,
@@ -1058,8 +1077,6 @@ export const useCopilotStore = create<CopilotStore>()(
agentPrefetch: get().agentPrefetch,
availableModels: get().availableModels,
isLoadingModels: get().isLoadingModels,
autoAllowedTools: get().autoAllowedTools,
autoAllowedToolsLoaded: get().autoAllowedToolsLoaded,
})
},
@@ -1093,11 +1110,12 @@ export const useCopilotStore = create<CopilotStore>()(
const chatConfig = chat.config ?? {}
const chatMode = chatConfig.mode || get().mode
const chatModel = chatConfig.model || get().selectedModel
const normalizedChatModel = normalizeSelectedModelKey(chatModel, get().availableModels)
logger.debug('[Chat] Restoring chat config', {
chatId: chat.id,
mode: chatMode,
model: chatModel,
model: normalizedChatModel,
hasPlanArtifact: !!planArtifact,
})
@@ -1119,7 +1137,7 @@ export const useCopilotStore = create<CopilotStore>()(
showPlanTodos: false,
streamingPlanContent: planArtifact,
mode: chatMode,
selectedModel: chatModel as CopilotStore['selectedModel'],
selectedModel: normalizedChatModel as CopilotStore['selectedModel'],
suppressAutoSelect: false,
})
@@ -1292,6 +1310,10 @@ export const useCopilotStore = create<CopilotStore>()(
const refreshedConfig = updatedCurrentChat.config ?? {}
const refreshedMode = refreshedConfig.mode || get().mode
const refreshedModel = refreshedConfig.model || get().selectedModel
const normalizedRefreshedModel = normalizeSelectedModelKey(
refreshedModel,
get().availableModels
)
const toolCallsById = buildToolCallsById(normalizedMessages)
set({
@@ -1300,7 +1322,7 @@ export const useCopilotStore = create<CopilotStore>()(
toolCallsById,
streamingPlanContent: refreshedPlanArtifact,
mode: refreshedMode,
selectedModel: refreshedModel as CopilotStore['selectedModel'],
selectedModel: normalizedRefreshedModel as CopilotStore['selectedModel'],
})
}
try {
@@ -1320,11 +1342,15 @@ export const useCopilotStore = create<CopilotStore>()(
const chatConfig = mostRecentChat.config ?? {}
const chatMode = chatConfig.mode || get().mode
const chatModel = chatConfig.model || get().selectedModel
const normalizedChatModel = normalizeSelectedModelKey(
chatModel,
get().availableModels
)
logger.info('[Chat] Auto-selecting most recent chat with config', {
chatId: mostRecentChat.id,
mode: chatMode,
model: chatModel,
model: normalizedChatModel,
hasPlanArtifact: !!planArtifact,
})
@@ -1336,7 +1362,7 @@ export const useCopilotStore = create<CopilotStore>()(
toolCallsById,
streamingPlanContent: planArtifact,
mode: chatMode,
selectedModel: chatModel as CopilotStore['selectedModel'],
selectedModel: normalizedChatModel as CopilotStore['selectedModel'],
})
try {
await get().loadMessageCheckpoints(mostRecentChat.id)
@@ -1365,16 +1391,6 @@ export const useCopilotStore = create<CopilotStore>()(
// Send a message (streaming only)
sendMessage: async (message: string, options = {}) => {
if (!get().autoAllowedToolsLoaded) {
try {
await get().loadAutoAllowedTools()
} catch (error) {
logger.warn('[Copilot] Failed to preload auto-allowed tools before send', {
error: error instanceof Error ? error.message : String(error),
})
}
}
const prepared = prepareSendContext(get, set, message, options as SendMessageOptionsInput)
if (!prepared) return
@@ -1641,7 +1657,7 @@ export const useCopilotStore = create<CopilotStore>()(
const b = blocks[bi]
if (b?.type === 'tool_call') {
const tn = b.toolCall?.name
if (tn === 'edit_workflow') {
if (isWorkflowEditToolCall(tn, b.toolCall?.params)) {
id = b.toolCall?.id
break outer
}
@@ -1650,7 +1666,9 @@ export const useCopilotStore = create<CopilotStore>()(
}
// Fallback to map if not found in messages
if (!id) {
const candidates = Object.values(toolCallsById).filter((t) => t.name === 'edit_workflow')
const candidates = Object.values(toolCallsById).filter((t) =>
isWorkflowEditToolCall(t.name, t.params)
)
id = candidates.length ? candidates[candidates.length - 1].id : undefined
}
}
@@ -2268,7 +2286,8 @@ export const useCopilotStore = create<CopilotStore>()(
},
setSelectedModel: async (model) => {
set({ selectedModel: model })
const normalizedModel = normalizeSelectedModelKey(model, get().availableModels)
set({ selectedModel: normalizedModel as CopilotStore['selectedModel'] })
},
setAgentPrefetch: (prefetch) => set({ agentPrefetch: prefetch }),
loadAvailableModels: async () => {
@@ -2342,74 +2361,6 @@ export const useCopilotStore = create<CopilotStore>()(
}
},
loadAutoAllowedTools: async () => {
try {
logger.debug('[AutoAllowedTools] Loading from API...')
const res = await fetch(COPILOT_AUTO_ALLOWED_TOOLS_API_PATH)
logger.debug('[AutoAllowedTools] Load response', { status: res.status, ok: res.ok })
if (res.ok) {
const data = await res.json()
const tools = data.autoAllowedTools ?? []
set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true })
writeAutoAllowedToolsToStorage(tools)
logger.debug('[AutoAllowedTools] Loaded successfully', { count: tools.length, tools })
} else {
set({ autoAllowedToolsLoaded: true })
logger.warn('[AutoAllowedTools] Load failed with status', { status: res.status })
}
} catch (err) {
set({ autoAllowedToolsLoaded: true })
logger.error('[AutoAllowedTools] Failed to load', { error: err })
}
},
addAutoAllowedTool: async (toolId: string) => {
try {
logger.debug('[AutoAllowedTools] Adding tool...', { toolId })
const res = await fetch(COPILOT_AUTO_ALLOWED_TOOLS_API_PATH, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ toolId }),
})
logger.debug('[AutoAllowedTools] API response', { toolId, status: res.status, ok: res.ok })
if (res.ok) {
const data = await res.json()
logger.debug('[AutoAllowedTools] API returned', { toolId, tools: data.autoAllowedTools })
const tools = data.autoAllowedTools ?? []
set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true })
writeAutoAllowedToolsToStorage(tools)
logger.debug('[AutoAllowedTools] Added tool to store', { toolId })
}
} catch (err) {
logger.error('[AutoAllowedTools] Failed to add tool', { toolId, error: err })
}
},
removeAutoAllowedTool: async (toolId: string) => {
try {
const res = await fetch(
`${COPILOT_AUTO_ALLOWED_TOOLS_API_PATH}?toolId=${encodeURIComponent(toolId)}`,
{
method: 'DELETE',
}
)
if (res.ok) {
const data = await res.json()
const tools = data.autoAllowedTools ?? []
set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true })
writeAutoAllowedToolsToStorage(tools)
logger.debug('[AutoAllowedTools] Removed tool', { toolId })
}
} catch (err) {
logger.error('[AutoAllowedTools] Failed to remove tool', { toolId, error: err })
}
},
isToolAutoAllowed: (toolId: string) => {
const { autoAllowedTools } = get()
return isToolAutoAllowedByList(toolId, autoAllowedTools)
},
// Credential masking
loadSensitiveCredentialIds: async () => {
try {

View File

@@ -26,6 +26,26 @@ export interface CopilotToolCall {
params?: Record<string, unknown>
input?: Record<string, unknown>
display?: ClientToolDisplay
/** Server-provided UI contract for this tool call phase */
ui?: {
title?: string
phaseLabel?: string
icon?: string
showInterrupt?: boolean
showRemember?: boolean
autoAllowed?: boolean
actions?: Array<{
id: string
label: string
kind: 'accept' | 'reject'
remember?: boolean
}>
}
/** Server-provided execution routing contract */
execution?: {
target?: 'go' | 'go_subagent' | 'sim_server' | 'sim_client_capability' | string
capabilityId?: string
}
/** Content streamed from a subagent (e.g., debug agent) */
subAgentContent?: string
/** Tool calls made by the subagent */
@@ -167,10 +187,6 @@ export interface CopilotState {
// Per-message metadata captured at send-time for reliable stats
// Auto-allowed integration tools (tools that can run without confirmation)
autoAllowedTools: string[]
autoAllowedToolsLoaded: boolean
// Active stream metadata for reconnect/replay
activeStream: CopilotStreamInfo | null
@@ -247,11 +263,6 @@ export interface CopilotActions {
abortSignal?: AbortSignal
) => Promise<void>
handleNewChatCreation: (newChatId: string) => Promise<void>
loadAutoAllowedTools: () => Promise<void>
addAutoAllowedTool: (toolId: string) => Promise<void>
removeAutoAllowedTool: (toolId: string) => Promise<void>
isToolAutoAllowed: (toolId: string) => boolean
// Credential masking
loadSensitiveCredentialIds: () => Promise<void>
maskCredentialValue: (value: string) => string

View File

@@ -15,7 +15,7 @@ import {
captureBaselineSnapshot,
cloneWorkflowState,
createBatchedUpdater,
findLatestEditWorkflowToolCallId,
findLatestWorkflowEditToolCallId,
getLatestUserMessageId,
persistWorkflowStateToServer,
} from './utils'
@@ -334,7 +334,7 @@ export const useWorkflowDiffStore = create<WorkflowDiffState & WorkflowDiffActio
})
}
findLatestEditWorkflowToolCallId().then((toolCallId) => {
findLatestWorkflowEditToolCallId().then((toolCallId) => {
if (toolCallId) {
import('@/stores/panel/copilot/store')
.then(({ useCopilotStore }) => {
@@ -439,7 +439,7 @@ export const useWorkflowDiffStore = create<WorkflowDiffState & WorkflowDiffActio
})
}
findLatestEditWorkflowToolCallId().then((toolCallId) => {
findLatestWorkflowEditToolCallId().then((toolCallId) => {
if (toolCallId) {
import('@/stores/panel/copilot/store')
.then(({ useCopilotStore }) => {

View File

@@ -126,6 +126,21 @@ export async function getLatestUserMessageId(): Promise<string | null> {
}
export async function findLatestEditWorkflowToolCallId(): Promise<string | undefined> {
return findLatestWorkflowEditToolCallId()
}
function isWorkflowEditToolCall(name?: string, params?: Record<string, unknown>): boolean {
if (name === 'edit_workflow') return true
if (name !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
// Be permissive for legacy/incomplete events: apply calls always include proposalId.
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
export async function findLatestWorkflowEditToolCallId(): Promise<string | undefined> {
try {
const { useCopilotStore } = await import('@/stores/panel/copilot/store')
const { messages, toolCallsById } = useCopilotStore.getState()
@@ -134,17 +149,22 @@ export async function findLatestEditWorkflowToolCallId(): Promise<string | undef
const message = messages[mi]
if (message.role !== 'assistant' || !message.contentBlocks) continue
for (const block of message.contentBlocks) {
if (block?.type === 'tool_call' && block.toolCall?.name === 'edit_workflow') {
if (
block?.type === 'tool_call' &&
isWorkflowEditToolCall(block.toolCall?.name, block.toolCall?.params)
) {
return block.toolCall?.id
}
}
}
const fallback = Object.values(toolCallsById).filter((call) => call.name === 'edit_workflow')
const fallback = Object.values(toolCallsById).filter((call) =>
isWorkflowEditToolCall(call.name, call.params)
)
return fallback.length ? fallback[fallback.length - 1].id : undefined
} catch (error) {
logger.warn('Failed to resolve edit_workflow tool call id', { error })
logger.warn('Failed to resolve workflow edit tool call id', { error })
return undefined
}
}

View File

@@ -0,0 +1,114 @@
import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceDeleteLabelParams {
accessToken: string
domain: string
pageId: string
labelName: string
cloudId?: string
}
export interface ConfluenceDeleteLabelResponse {
success: boolean
output: {
ts: string
pageId: string
labelName: string
deleted: boolean
}
}
export const confluenceDeleteLabelTool: ToolConfig<
ConfluenceDeleteLabelParams,
ConfluenceDeleteLabelResponse
> = {
id: 'confluence_delete_label',
name: 'Confluence Delete Label',
description: 'Remove a label from a Confluence page.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
pageId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'Confluence page ID to remove the label from',
},
labelName: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'Name of the label to remove',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: () => '/api/tools/confluence/labels',
method: 'DELETE',
headers: (params: ConfluenceDeleteLabelParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
body: (params: ConfluenceDeleteLabelParams) => ({
domain: params.domain,
accessToken: params.accessToken,
pageId: params.pageId?.trim(),
labelName: params.labelName?.trim(),
cloudId: params.cloudId,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
pageId: data.pageId ?? '',
labelName: data.labelName ?? '',
deleted: true,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
pageId: {
type: 'string',
description: 'Page ID the label was removed from',
},
labelName: {
type: 'string',
description: 'Name of the removed label',
},
deleted: {
type: 'boolean',
description: 'Deletion status',
},
},
}

View File

@@ -0,0 +1,105 @@
import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceDeletePagePropertyParams {
accessToken: string
domain: string
pageId: string
propertyId: string
cloudId?: string
}
export interface ConfluenceDeletePagePropertyResponse {
success: boolean
output: {
ts: string
pageId: string
propertyId: string
deleted: boolean
}
}
export const confluenceDeletePagePropertyTool: ToolConfig<
ConfluenceDeletePagePropertyParams,
ConfluenceDeletePagePropertyResponse
> = {
id: 'confluence_delete_page_property',
name: 'Confluence Delete Page Property',
description: 'Delete a content property from a Confluence page by its property ID.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
pageId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the page containing the property',
},
propertyId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the property to delete',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: () => '/api/tools/confluence/page-properties',
method: 'DELETE',
headers: (params: ConfluenceDeletePagePropertyParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
body: (params: ConfluenceDeletePagePropertyParams) => ({
domain: params.domain,
accessToken: params.accessToken,
pageId: params.pageId?.trim(),
propertyId: params.propertyId?.trim(),
cloudId: params.cloudId,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
pageId: data.pageId ?? '',
propertyId: data.propertyId ?? '',
deleted: true,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
pageId: { type: 'string', description: 'ID of the page' },
propertyId: { type: 'string', description: 'ID of the deleted property' },
deleted: { type: 'boolean', description: 'Deletion status' },
},
}

View File

@@ -0,0 +1,143 @@
import { PAGE_ITEM_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceGetPagesByLabelParams {
accessToken: string
domain: string
labelId: string
limit?: number
cursor?: string
cloudId?: string
}
export interface ConfluenceGetPagesByLabelResponse {
success: boolean
output: {
ts: string
labelId: string
pages: Array<{
id: string
title: string
status: string | null
spaceId: string | null
parentId: string | null
authorId: string | null
createdAt: string | null
version: {
number: number
message?: string
createdAt?: string
} | null
}>
nextCursor: string | null
}
}
export const confluenceGetPagesByLabelTool: ToolConfig<
ConfluenceGetPagesByLabelParams,
ConfluenceGetPagesByLabelResponse
> = {
id: 'confluence_get_pages_by_label',
name: 'Confluence Get Pages by Label',
description: 'Retrieve all pages that have a specific label applied.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
labelId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the label to get pages for',
},
limit: {
type: 'number',
required: false,
visibility: 'user-or-llm',
description: 'Maximum number of pages to return (default: 50, max: 250)',
},
cursor: {
type: 'string',
required: false,
visibility: 'user-or-llm',
description: 'Pagination cursor from previous response',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: (params: ConfluenceGetPagesByLabelParams) => {
const query = new URLSearchParams({
domain: params.domain,
accessToken: params.accessToken,
labelId: params.labelId,
limit: String(params.limit || 50),
})
if (params.cursor) {
query.set('cursor', params.cursor)
}
if (params.cloudId) {
query.set('cloudId', params.cloudId)
}
return `/api/tools/confluence/pages-by-label?${query.toString()}`
},
method: 'GET',
headers: (params: ConfluenceGetPagesByLabelParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
labelId: data.labelId ?? '',
pages: data.pages ?? [],
nextCursor: data.nextCursor ?? null,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
labelId: { type: 'string', description: 'ID of the label' },
pages: {
type: 'array',
description: 'Array of pages with this label',
items: {
type: 'object',
properties: PAGE_ITEM_PROPERTIES,
},
},
nextCursor: {
type: 'string',
description: 'Cursor for fetching the next page of results',
optional: true,
},
},
}

View File

@@ -5,11 +5,14 @@ import { confluenceCreatePageTool } from '@/tools/confluence/create_page'
import { confluenceCreatePagePropertyTool } from '@/tools/confluence/create_page_property'
import { confluenceDeleteAttachmentTool } from '@/tools/confluence/delete_attachment'
import { confluenceDeleteCommentTool } from '@/tools/confluence/delete_comment'
import { confluenceDeleteLabelTool } from '@/tools/confluence/delete_label'
import { confluenceDeletePageTool } from '@/tools/confluence/delete_page'
import { confluenceDeletePagePropertyTool } from '@/tools/confluence/delete_page_property'
import { confluenceGetBlogPostTool } from '@/tools/confluence/get_blogpost'
import { confluenceGetPageAncestorsTool } from '@/tools/confluence/get_page_ancestors'
import { confluenceGetPageChildrenTool } from '@/tools/confluence/get_page_children'
import { confluenceGetPageVersionTool } from '@/tools/confluence/get_page_version'
import { confluenceGetPagesByLabelTool } from '@/tools/confluence/get_pages_by_label'
import { confluenceGetSpaceTool } from '@/tools/confluence/get_space'
import { confluenceListAttachmentsTool } from '@/tools/confluence/list_attachments'
import { confluenceListBlogPostsTool } from '@/tools/confluence/list_blogposts'
@@ -19,6 +22,7 @@ import { confluenceListLabelsTool } from '@/tools/confluence/list_labels'
import { confluenceListPagePropertiesTool } from '@/tools/confluence/list_page_properties'
import { confluenceListPageVersionsTool } from '@/tools/confluence/list_page_versions'
import { confluenceListPagesInSpaceTool } from '@/tools/confluence/list_pages_in_space'
import { confluenceListSpaceLabelsTool } from '@/tools/confluence/list_space_labels'
import { confluenceListSpacesTool } from '@/tools/confluence/list_spaces'
import { confluenceRetrieveTool } from '@/tools/confluence/retrieve'
import { confluenceSearchTool } from '@/tools/confluence/search'
@@ -78,6 +82,7 @@ export {
// Page Properties Tools
confluenceListPagePropertiesTool,
confluenceCreatePagePropertyTool,
confluenceDeletePagePropertyTool,
// Blog Post Tools
confluenceListBlogPostsTool,
confluenceGetBlogPostTool,
@@ -98,6 +103,9 @@ export {
// Label Tools
confluenceListLabelsTool,
confluenceAddLabelTool,
confluenceDeleteLabelTool,
confluenceGetPagesByLabelTool,
confluenceListSpaceLabelsTool,
// Space Tools
confluenceGetSpaceTool,
confluenceListSpacesTool,

View File

@@ -0,0 +1,134 @@
import { LABEL_ITEM_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceListSpaceLabelsParams {
accessToken: string
domain: string
spaceId: string
limit?: number
cursor?: string
cloudId?: string
}
export interface ConfluenceListSpaceLabelsResponse {
success: boolean
output: {
ts: string
spaceId: string
labels: Array<{
id: string
name: string
prefix: string
}>
nextCursor: string | null
}
}
export const confluenceListSpaceLabelsTool: ToolConfig<
ConfluenceListSpaceLabelsParams,
ConfluenceListSpaceLabelsResponse
> = {
id: 'confluence_list_space_labels',
name: 'Confluence List Space Labels',
description: 'List all labels associated with a Confluence space.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
spaceId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the Confluence space to list labels from',
},
limit: {
type: 'number',
required: false,
visibility: 'user-or-llm',
description: 'Maximum number of labels to return (default: 25, max: 250)',
},
cursor: {
type: 'string',
required: false,
visibility: 'user-or-llm',
description: 'Pagination cursor from previous response',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: (params: ConfluenceListSpaceLabelsParams) => {
const query = new URLSearchParams({
domain: params.domain,
accessToken: params.accessToken,
spaceId: params.spaceId,
limit: String(params.limit || 25),
})
if (params.cursor) {
query.set('cursor', params.cursor)
}
if (params.cloudId) {
query.set('cloudId', params.cloudId)
}
return `/api/tools/confluence/space-labels?${query.toString()}`
},
method: 'GET',
headers: (params: ConfluenceListSpaceLabelsParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
spaceId: data.spaceId ?? '',
labels: data.labels ?? [],
nextCursor: data.nextCursor ?? null,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
spaceId: { type: 'string', description: 'ID of the space' },
labels: {
type: 'array',
description: 'Array of labels on the space',
items: {
type: 'object',
properties: LABEL_ITEM_PROPERTIES,
},
},
nextCursor: {
type: 'string',
description: 'Cursor for fetching the next page of results',
optional: true,
},
},
}

View File

@@ -118,10 +118,13 @@ import {
confluenceCreatePageTool,
confluenceDeleteAttachmentTool,
confluenceDeleteCommentTool,
confluenceDeleteLabelTool,
confluenceDeletePagePropertyTool,
confluenceDeletePageTool,
confluenceGetBlogPostTool,
confluenceGetPageAncestorsTool,
confluenceGetPageChildrenTool,
confluenceGetPagesByLabelTool,
confluenceGetPageVersionTool,
confluenceGetSpaceTool,
confluenceListAttachmentsTool,
@@ -132,6 +135,7 @@ import {
confluenceListPagePropertiesTool,
confluenceListPagesInSpaceTool,
confluenceListPageVersionsTool,
confluenceListSpaceLabelsTool,
confluenceListSpacesTool,
confluenceRetrieveTool,
confluenceSearchInSpaceTool,
@@ -2667,6 +2671,10 @@ export const tools: Record<string, ToolConfig> = {
confluence_delete_attachment: confluenceDeleteAttachmentTool,
confluence_list_labels: confluenceListLabelsTool,
confluence_add_label: confluenceAddLabelTool,
confluence_get_pages_by_label: confluenceGetPagesByLabelTool,
confluence_list_space_labels: confluenceListSpaceLabelsTool,
confluence_delete_label: confluenceDeleteLabelTool,
confluence_delete_page_property: confluenceDeletePagePropertyTool,
confluence_get_space: confluenceGetSpaceTool,
confluence_list_spaces: confluenceListSpacesTool,
cursor_list_agents: cursorListAgentsTool,