Update request tracing and skills and handlers

This commit is contained in:
Siddharth Ganesan
2026-04-10 02:15:18 -07:00
parent 949601ca02
commit 91301df20a
35 changed files with 1079 additions and 244 deletions

View File

@@ -1,3 +1,6 @@
import { db } from '@sim/db'
import { user } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { env } from '@/lib/core/config/env'
@@ -12,6 +15,19 @@ function getMothershipUrl(environment: string): string | null {
return ENV_URLS[environment] ?? null
}
async function isAdminRequestAuthorized() {
const session = await getSession()
if (!session?.user?.id) return false
const [currentUser] = await db
.select({ role: user.role })
.from(user)
.where(eq(user.id, session.user.id))
.limit(1)
return currentUser?.role === 'admin'
}
/**
* Proxy to the mothership admin API.
*
@@ -23,8 +39,7 @@ function getMothershipUrl(environment: string): string | null {
* (e.g. requestId for GET /traces) are forwarded.
*/
export async function POST(req: NextRequest) {
const session = await getSession()
if (!session?.user || session.user.role !== 'admin') {
if (!(await isAdminRequestAuthorized())) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
@@ -75,8 +90,7 @@ export async function POST(req: NextRequest) {
}
export async function GET(req: NextRequest) {
const session = await getSession()
if (!session?.user || session.user.role !== 'admin') {
if (!(await isAdminRequestAuthorized())) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}

View File

@@ -143,8 +143,8 @@ When the user refers to a workflow by name or description ("the email one", "my
### Key Rules
- You can test workflows immediately after building — deployment is only needed for external access (API, chat, MCP).
- All copilot tools (build, plan, edit, deploy, test, debug) require workflowId.
- If the user reports errors → use \`sim_debug\` first, don't guess.
- All workflow-scoped copilot tools require \`workflowId\`.
- If the user reports errors, route through \`sim_workflow\` and ask it to reproduce, inspect logs, and fix the issue end to end.
- Variable syntax: \`<blockname.field>\` for block outputs, \`{{ENV_VAR}}\` for env vars.
`

View File

@@ -2,7 +2,6 @@ import type { ComponentType, SVGProps } from 'react'
import {
Asterisk,
Blimp,
Bug,
Calendar,
Database,
Eye,
@@ -55,7 +54,6 @@ const TOOL_ICONS: Record<string, IconComponent> = {
agent: AgentIcon,
custom_tool: Wrench,
research: Search,
debug: Bug,
context_compaction: Asterisk,
open_resource: Eye,
file: File,

View File

@@ -62,6 +62,7 @@ import {
isResourceToolName,
} from '@/lib/copilot/resources/extraction'
import { VFS_DIR_TO_RESOURCE } from '@/lib/copilot/resources/types'
import { isToolHiddenInUi } from '@/lib/copilot/tools/client/hidden-tools'
import {
cancelRunToolExecution,
executeRunToolOnClient,
@@ -1575,7 +1576,7 @@ export function useChat(
? payload.name
: 'unknown'
const isPartial = payload.partial === true
if (name === ToolSearchToolRegex.id) {
if (name === ToolSearchToolRegex.id || isToolHiddenInUi(name)) {
break
}
const ui = getToolUI(payload)

View File

@@ -2,7 +2,6 @@ import {
Agent,
Auth,
CreateWorkflow,
Debug,
Deploy,
EditContent,
EditWorkflow,
@@ -188,7 +187,6 @@ export const SUBAGENT_LABELS: Record<string, string> = {
table: 'Table Agent',
custom_tool: 'Custom Tool Agent',
superagent: 'Superagent',
debug: 'Debug Agent',
run: 'Run Agent',
agent: 'Tools Agent',
job: 'Job Agent',
@@ -315,7 +313,6 @@ export const TOOL_UI_METADATA: Record<string, ToolUIMetadata> = {
phase: 'subagent',
},
[Research.id]: { title: 'Research Agent', phaseLabel: 'Research', phase: 'subagent' },
[Debug.id]: { title: 'Debug Agent', phaseLabel: 'Debug', phase: 'subagent' },
[OpenResource.id]: {
title: 'Opening resource',
phaseLabel: 'Resource',

View File

@@ -315,7 +315,7 @@ function OverviewTab({
{r.error ? (
<Badge variant='red'>Error</Badge>
) : r.aborted ? (
<Badge variant='yellow'>Abort</Badge>
<Badge variant='amber'>Abort</Badge>
) : (
<Badge variant='green'>OK</Badge>
)}
@@ -694,7 +694,7 @@ function TraceDetail({ trace }: { trace: TraceData }) {
trace.outcome === 'success'
? 'green'
: trace.outcome === 'cancelled'
? 'yellow'
? 'amber'
: 'red'
}
>

View File

@@ -14,12 +14,11 @@ import {
} from '@/components/emcn'
import { cn } from '@/lib/core/utils/cn'
import {
getEffectiveBlockOutputPaths,
getEffectiveBlockOutputType,
getOutputPathsFromSchema,
} from '@/lib/workflows/blocks/block-outputs'
import { getBlockReferenceTags } from '@/lib/workflows/blocks/block-reference-tags'
import { hasTriggerCapability } from '@/lib/workflows/triggers/trigger-utils'
import { TRIGGER_TYPES } from '@/lib/workflows/triggers/triggers'
import { KeyboardNavigationHandler } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tag-dropdown/components/keyboard-navigation-handler'
import type {
BlockTagGroup,
@@ -177,17 +176,6 @@ const ensureRootTag = (tags: string[], rootTag: string): string[] => {
return [rootTag, ...tags]
}
/**
* Gets a subblock value from the store.
*
* @param blockId - The block identifier
* @param property - The property name to retrieve
* @returns The value from the subblock store
*/
const getSubBlockValue = (blockId: string, property: string): any => {
return useSubBlockStore.getState().getValue(blockId, property)
}
/**
* Gets the output type for a specific path in a block's outputs.
*
@@ -1055,53 +1043,19 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
return { tags: [], variableInfoMap: emptyVariableInfoMap, blockTagGroups: [] }
}
const blockName = sourceBlock.name || sourceBlock.type
const normalizedBlockName = normalizeName(blockName)
const mergedSubBlocks = getMergedSubBlocks(activeSourceBlockId)
let blockTags: string[]
if (sourceBlock.type === 'variables') {
const variablesValue = getSubBlockValue(activeSourceBlockId, 'variables')
if (variablesValue && Array.isArray(variablesValue) && variablesValue.length > 0) {
const validAssignments = variablesValue.filter((assignment: { variableName?: string }) =>
assignment?.variableName?.trim()
)
blockTags = validAssignments.map(
(assignment: { variableName: string }) =>
`${normalizedBlockName}.${assignment.variableName.trim()}`
)
} else {
blockTags = [normalizedBlockName]
}
} else {
const sourceBlockConfig = getBlock(sourceBlock.type)
const isTriggerCapable = sourceBlockConfig ? hasTriggerCapability(sourceBlockConfig) : false
const effectiveTriggerMode = Boolean(sourceBlock.triggerMode && isTriggerCapable)
const outputPaths = getEffectiveBlockOutputPaths(sourceBlock.type, mergedSubBlocks, {
triggerMode: effectiveTriggerMode,
preferToolOutputs: !effectiveTriggerMode,
})
const allTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
if (sourceBlock.type === 'human_in_the_loop' && activeSourceBlockId === blockId) {
blockTags = allTags.filter(
(tag) => tag.endsWith('.url') || tag.endsWith('.resumeEndpoint')
)
} else if (allTags.length === 0) {
blockTags = [normalizedBlockName]
} else {
blockTags = allTags
}
}
blockTags = ensureRootTag(blockTags, normalizedBlockName)
const shouldShowRootTag =
sourceBlock.type === TRIGGER_TYPES.GENERIC_WEBHOOK || sourceBlock.type === 'start_trigger'
if (!shouldShowRootTag) {
blockTags = blockTags.filter((tag) => tag !== normalizedBlockName)
}
const blockName = sourceBlock.name || sourceBlock.type
const blockTags = getBlockReferenceTags({
block: {
id: activeSourceBlockId,
type: sourceBlock.type,
name: sourceBlock.name,
triggerMode: sourceBlock.triggerMode,
subBlocks: mergedSubBlocks,
},
currentBlockId: blockId,
subBlocks: mergedSubBlocks,
})
const blockTagGroups: BlockTagGroup[] = [
{
@@ -1331,57 +1285,19 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
continue
}
const blockName = accessibleBlock.name || accessibleBlock.type
const normalizedBlockName = normalizeName(blockName)
const mergedSubBlocks = getMergedSubBlocks(accessibleBlockId)
let blockTags: string[]
if (accessibleBlock.type === 'variables') {
const variablesValue = getSubBlockValue(accessibleBlockId, 'variables')
if (variablesValue && Array.isArray(variablesValue) && variablesValue.length > 0) {
const validAssignments = variablesValue.filter((assignment: { variableName?: string }) =>
assignment?.variableName?.trim()
)
blockTags = validAssignments.map(
(assignment: { variableName: string }) =>
`${normalizedBlockName}.${assignment.variableName.trim()}`
)
} else {
blockTags = [normalizedBlockName]
}
} else {
const accessibleBlockConfig = getBlock(accessibleBlock.type)
const isTriggerCapable = accessibleBlockConfig
? hasTriggerCapability(accessibleBlockConfig)
: false
const effectiveTriggerMode = Boolean(accessibleBlock.triggerMode && isTriggerCapable)
const outputPaths = getEffectiveBlockOutputPaths(accessibleBlock.type, mergedSubBlocks, {
triggerMode: effectiveTriggerMode,
preferToolOutputs: !effectiveTriggerMode,
})
const allTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
if (accessibleBlock.type === 'human_in_the_loop' && accessibleBlockId === blockId) {
blockTags = allTags.filter(
(tag) => tag.endsWith('.url') || tag.endsWith('.resumeEndpoint')
)
} else if (allTags.length === 0) {
blockTags = [normalizedBlockName]
} else {
blockTags = allTags
}
}
blockTags = ensureRootTag(blockTags, normalizedBlockName)
const shouldShowRootTag =
accessibleBlock.type === TRIGGER_TYPES.GENERIC_WEBHOOK ||
accessibleBlock.type === 'start_trigger'
if (!shouldShowRootTag) {
blockTags = blockTags.filter((tag) => tag !== normalizedBlockName)
}
const blockName = accessibleBlock.name || accessibleBlock.type
const blockTags = getBlockReferenceTags({
block: {
id: accessibleBlockId,
type: accessibleBlock.type,
name: accessibleBlock.name,
triggerMode: accessibleBlock.triggerMode,
subBlocks: mergedSubBlocks,
},
currentBlockId: blockId,
subBlocks: mergedSubBlocks,
})
blockTagGroups.push({
blockName,

View File

@@ -177,6 +177,7 @@ export interface ExecutionContext {
userId?: string
isDeployedContext?: boolean
enforceCredentialAccess?: boolean
copilotToolExecution?: boolean
permissionConfig?: PermissionGroupConfig | null
permissionConfigLoaded?: boolean

View File

@@ -60,4 +60,32 @@ describe('display-message', () => {
},
])
})
it('hides load_agent_skill blocks from display output', () => {
const display = toDisplayMessage({
id: 'msg-2',
role: 'assistant',
content: '',
timestamp: '2024-01-01T00:00:00.000Z',
contentBlocks: [
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-hidden',
name: 'load_agent_skill',
state: 'success',
display: { title: 'Loading skill' },
},
},
{
type: 'text',
channel: 'assistant',
content: 'visible text',
},
],
})
expect(display.contentBlocks).toEqual([{ type: 'text', content: 'visible text' }])
})
})

View File

@@ -4,6 +4,7 @@ import {
MothershipStreamV1SpanLifecycleEvent,
MothershipStreamV1ToolOutcome,
} from '@/lib/copilot/generated/mothership-stream-v1'
import { isToolHiddenInUi } from '@/lib/copilot/tools/client/hidden-tools'
import {
type ChatContextKind,
type ChatMessage,
@@ -29,6 +30,7 @@ const STATE_TO_STATUS: Record<string, ToolCallStatus> = {
function toToolCallInfo(block: PersistedContentBlock): ToolCallInfo | undefined {
const tc = block.toolCall
if (!tc) return undefined
if (isToolHiddenInUi(tc.name)) return undefined
const status: ToolCallStatus = STATE_TO_STATUS[tc.state] ?? ToolCallStatus.error
return {
id: tc.id,
@@ -42,7 +44,7 @@ function toToolCallInfo(block: PersistedContentBlock): ToolCallInfo | undefined
}
}
function toDisplayBlock(block: PersistedContentBlock): ContentBlock {
function toDisplayBlock(block: PersistedContentBlock): ContentBlock | undefined {
switch (block.type) {
case MothershipStreamV1EventType.text:
if (block.lane === 'subagent') {
@@ -53,6 +55,7 @@ function toDisplayBlock(block: PersistedContentBlock): ContentBlock {
}
return { type: ContentBlockType.text, content: block.content }
case MothershipStreamV1EventType.tool:
if (!toToolCallInfo(block)) return undefined
return { type: ContentBlockType.tool_call, toolCall: toToolCallInfo(block) }
case MothershipStreamV1EventType.span:
if (block.lifecycle === MothershipStreamV1SpanLifecycleEvent.end) {
@@ -110,7 +113,9 @@ export function toDisplayMessage(msg: PersistedMessage): ChatMessage {
}
if (msg.contentBlocks && msg.contentBlocks.length > 0) {
display.contentBlocks = msg.contentBlocks.map(toDisplayBlock)
display.contentBlocks = msg.contentBlocks
.map(toDisplayBlock)
.filter((block): block is ContentBlock => !!block)
}
const attachments = toDisplayAttachment(msg.fileAttachments)

View File

@@ -3,7 +3,8 @@
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'
const { mockGetHighestPrioritySubscription } = vi.hoisted(() => ({
const { mockCreateUserToolSchema, mockGetHighestPrioritySubscription } = vi.hoisted(() => ({
mockCreateUserToolSchema: vi.fn(() => ({ type: 'object', properties: {} })),
mockGetHighestPrioritySubscription: vi.fn(),
}))
@@ -56,7 +57,7 @@ vi.mock('@/tools/utils', () => ({
}))
vi.mock('@/tools/params', () => ({
createUserToolSchema: vi.fn(() => ({ type: 'object', properties: {} })),
createUserToolSchema: mockCreateUserToolSchema,
}))
import { buildIntegrationToolSchemas } from './payload'
@@ -64,6 +65,7 @@ import { buildIntegrationToolSchemas } from './payload'
describe('buildIntegrationToolSchemas', () => {
beforeEach(() => {
vi.clearAllMocks()
mockCreateUserToolSchema.mockReturnValue({ type: 'object', properties: {} })
})
it('appends the email footer prompt for free users', async () => {
@@ -108,4 +110,19 @@ describe('buildIntegrationToolSchemas', () => {
expect(gmailTool?.executeLocally).toBe(false)
expect(runTool?.executeLocally).toBe(true)
})
it('uses copilot-facing file schemas for integration tools', async () => {
mockGetHighestPrioritySubscription.mockResolvedValue({ plan: 'pro', status: 'active' })
await buildIntegrationToolSchemas('user-copilot')
expect(mockCreateUserToolSchema).toHaveBeenCalledWith(
expect.objectContaining({ id: 'gmail_send' }),
{ surface: 'copilot' }
)
expect(mockCreateUserToolSchema).toHaveBeenCalledWith(
expect.objectContaining({ id: 'brandfetch_search' }),
{ surface: 'copilot' }
)
})
})

View File

@@ -42,6 +42,10 @@ export interface ToolSchema {
oauth?: { required: boolean; provider: string }
}
interface BuildIntegrationToolSchemasOptions {
schemaSurface?: 'default' | 'copilot'
}
/**
* Build deferred integration tool schemas from the Sim tool registry.
* Shared by the interactive chat payload builder and the non-interactive
@@ -49,7 +53,8 @@ export interface ToolSchema {
*/
export async function buildIntegrationToolSchemas(
userId: string,
messageId?: string
messageId?: string,
options: BuildIntegrationToolSchemasOptions = { schemaSurface: 'copilot' }
): Promise<ToolSchema[]> {
const reqLogger = logger.withMetadata({ messageId })
const integrationTools: ToolSchema[] = []
@@ -70,7 +75,9 @@ export async function buildIntegrationToolSchemas(
for (const [toolId, toolConfig] of Object.entries(latestTools)) {
try {
const userSchema = createUserToolSchema(toolConfig)
const userSchema = createUserToolSchema(toolConfig, {
surface: options.schemaSurface ?? 'copilot',
})
const strippedName = stripVersionSuffix(toolId)
const catalogEntry = getToolEntry(strippedName)
integrationTools.push({
@@ -192,7 +199,9 @@ export async function buildCopilotRequestPayload(
const payloadLogger = logger.withMetadata({ messageId: userMessageId })
if (effectiveMode === 'build') {
integrationTools = await buildIntegrationToolSchemas(userId, userMessageId)
integrationTools = await buildIntegrationToolSchemas(userId, userMessageId, {
schemaSurface: 'copilot',
})
// Discover MCP tools from workspace servers and include as deferred tools
if (workflowId) {

View File

@@ -15,7 +15,7 @@ export type RequestTraceV1SpanSource = 'sim' | 'go'
* This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema
* via the `definition` "RequestTraceV1SpanStatus".
*/
export type RequestTraceV1SpanStatus = 'ok' | 'error' | 'cancelled'
export type RequestTraceV1SpanStatus = 'ok' | 'error' | 'cancelled' | 'pending'
/**
* Trace report sent from Sim to Go after a request completes.
@@ -49,8 +49,8 @@ export interface RequestTraceV1CostSummary {
*/
export interface RequestTraceV1Span {
attributes?: MothershipStreamV1AdditionalPropertiesMap
durationMs?: number
endMs?: number
durationMs: number
endMs: number
kind?: string
name: string
parentName?: string
@@ -129,4 +129,5 @@ export const RequestTraceV1SpanStatus = {
ok: 'ok',
error: 'error',
cancelled: 'cancelled',
pending: 'pending',
} as const

View File

@@ -18,7 +18,6 @@ export interface ToolCatalogEntry {
| 'create_job'
| 'create_workflow'
| 'create_workspace_mcp_server'
| 'debug'
| 'delete_file'
| 'delete_folder'
| 'delete_workflow'
@@ -82,6 +81,7 @@ export interface ToolCatalogEntry {
| 'search_library_docs'
| 'search_online'
| 'search_patterns'
| 'set_block_enabled'
| 'set_environment_variables'
| 'set_global_workflow_variables'
| 'superagent'
@@ -107,7 +107,6 @@ export interface ToolCatalogEntry {
| 'create_job'
| 'create_workflow'
| 'create_workspace_mcp_server'
| 'debug'
| 'delete_file'
| 'delete_folder'
| 'delete_workflow'
@@ -171,6 +170,7 @@ export interface ToolCatalogEntry {
| 'search_library_docs'
| 'search_online'
| 'search_patterns'
| 'set_block_enabled'
| 'set_environment_variables'
| 'set_global_workflow_variables'
| 'superagent'
@@ -189,7 +189,6 @@ export interface ToolCatalogEntry {
subagentId?:
| 'agent'
| 'auth'
| 'debug'
| 'deploy'
| 'file'
| 'job'
@@ -448,31 +447,6 @@ export const CreateWorkspaceMcpServer: ToolCatalogEntry = {
requiredPermission: 'admin',
}
export const Debug: ToolCatalogEntry = {
id: 'debug',
name: 'debug',
executor: 'subagent',
mode: 'async',
parameters: {
properties: {
context: {
description:
'Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.',
type: 'string',
},
request: {
description:
'What to debug. Include error messages, block IDs, and any context about the failure.',
type: 'string',
},
},
required: ['request'],
type: 'object',
},
subagentId: 'debug',
internal: true,
}
export const DeleteFile: ToolCatalogEntry = {
id: 'delete_file',
name: 'delete_file',
@@ -2309,6 +2283,33 @@ export const SearchPatterns: ToolCatalogEntry = {
},
}
export const SetBlockEnabled: ToolCatalogEntry = {
id: 'set_block_enabled',
name: 'set_block_enabled',
executor: 'sim',
mode: 'async',
parameters: {
type: 'object',
properties: {
blockId: {
type: 'string',
description: 'The block ID whose enabled state should be changed.',
},
enabled: {
type: 'boolean',
description: 'Set to true to enable the block, or false to disable it.',
},
workflowId: {
type: 'string',
description:
'Optional workflow ID to edit. If not provided, uses the current workflow in context.',
},
},
required: ['blockId', 'enabled'],
},
requiredPermission: 'write',
}
export const SetEnvironmentVariables: ToolCatalogEntry = {
id: 'set_environment_variables',
name: 'set_environment_variables',
@@ -3055,7 +3056,6 @@ export const TOOL_CATALOG: Record<string, ToolCatalogEntry> = {
[CreateJob.id]: CreateJob,
[CreateWorkflow.id]: CreateWorkflow,
[CreateWorkspaceMcpServer.id]: CreateWorkspaceMcpServer,
[Debug.id]: Debug,
[DeleteFile.id]: DeleteFile,
[DeleteFolder.id]: DeleteFolder,
[DeleteWorkflow.id]: DeleteWorkflow,
@@ -3119,6 +3119,7 @@ export const TOOL_CATALOG: Record<string, ToolCatalogEntry> = {
[SearchLibraryDocs.id]: SearchLibraryDocs,
[SearchOnline.id]: SearchOnline,
[SearchPatterns.id]: SearchPatterns,
[SetBlockEnabled.id]: SetBlockEnabled,
[SetEnvironmentVariables.id]: SetEnvironmentVariables,
[SetGlobalWorkflowVariables.id]: SetGlobalWorkflowVariables,
[Superagent.id]: Superagent,

View File

@@ -266,25 +266,6 @@ export const TOOL_RUNTIME_SCHEMAS: Record<string, ToolRuntimeSchemaEntry> = {
},
resultSchema: undefined,
},
debug: {
parameters: {
properties: {
context: {
description:
'Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.',
type: 'string',
},
request: {
description:
'What to debug. Include error messages, block IDs, and any context about the failure.',
type: 'string',
},
},
required: ['request'],
type: 'object',
},
resultSchema: undefined,
},
delete_file: {
parameters: {
type: 'object',
@@ -2078,6 +2059,28 @@ export const TOOL_RUNTIME_SCHEMAS: Record<string, ToolRuntimeSchemaEntry> = {
},
resultSchema: undefined,
},
set_block_enabled: {
parameters: {
type: 'object',
properties: {
blockId: {
type: 'string',
description: 'The block ID whose enabled state should be changed.',
},
enabled: {
type: 'boolean',
description: 'Set to true to enable the block, or false to disable it.',
},
workflowId: {
type: 'string',
description:
'Optional workflow ID to edit. If not provided, uses the current workflow in context.',
},
},
required: ['blockId', 'enabled'],
},
resultSchema: undefined,
},
set_environment_variables: {
parameters: {
type: 'object',

View File

@@ -127,6 +127,33 @@ describe('sse-handlers tool lifecycle', () => {
expect(updated?.result?.output).toEqual({ ok: true })
})
it('does not add hidden tool calls to content blocks', async () => {
executeTool.mockResolvedValueOnce({ success: true, output: { skill: 'ok' } })
await sseHandlers.tool(
{
type: MothershipStreamV1EventType.tool,
payload: {
toolCallId: 'tool-hidden',
toolName: 'load_agent_skill',
arguments: { skill_name: 'markdown-writing' },
executor: MothershipStreamV1ToolExecutor.sim,
mode: MothershipStreamV1ToolMode.async,
phase: MothershipStreamV1ToolPhase.call,
},
} satisfies StreamEvent,
context,
execContext,
{ interactive: false, timeout: 1000 }
)
await new Promise((resolve) => setTimeout(resolve, 0))
expect(executeTool).toHaveBeenCalledTimes(1)
expect(context.contentBlocks).toEqual([])
expect(context.toolCalls.get('tool-hidden')?.name).toBe('load_agent_skill')
})
it('updates stored params when a subagent generating event is followed by the final tool call', async () => {
executeTool.mockResolvedValueOnce({ success: true, output: { ok: true } })
context.subAgentParentToolCallId = 'parent-1'

View File

@@ -22,6 +22,7 @@ import type {
ToolCallState,
} from '@/lib/copilot/request/types'
import { getToolEntry, isSimExecuted } from '@/lib/copilot/tool-executor'
import { isToolHiddenInUi } from '@/lib/copilot/tools/client/hidden-tools'
import { isWorkflowToolName } from '@/lib/copilot/tools/workflow-tools'
import type { ToolScope } from './types'
import {
@@ -235,6 +236,7 @@ function registerSubagentToolCall(
if (!context.subAgentToolCalls[parentToolCallId]) {
context.subAgentToolCalls[parentToolCallId] = []
}
const hideFromUi = isToolHiddenInUi(toolName)
let toolCall = context.toolCalls.get(toolCallId)
if (toolCall) {
if (!toolCall.name && toolName) toolCall.name = toolName
@@ -249,11 +251,13 @@ function registerSubagentToolCall(
}
context.toolCalls.set(toolCallId, toolCall)
const parentToolCall = context.toolCalls.get(parentToolCallId)
addContentBlock(context, {
type: 'tool_call',
toolCall,
calledBy: parentToolCall?.name,
})
if (!hideFromUi) {
addContentBlock(context, {
type: 'tool_call',
toolCall,
calledBy: parentToolCall?.name,
})
}
}
const subagentToolCalls = context.subAgentToolCalls[parentToolCallId]
@@ -273,9 +277,11 @@ function registerMainToolCall(
args: Record<string, unknown> | undefined,
existing: ToolCallState | undefined
): void {
const hideFromUi = isToolHiddenInUi(toolName)
if (existing) {
if (args && !existing.params) existing.params = args
if (
!hideFromUi &&
!context.contentBlocks.some((b) => b.type === 'tool_call' && b.toolCall?.id === toolCallId)
) {
addContentBlock(context, { type: 'tool_call', toolCall: existing })
@@ -289,7 +295,9 @@ function registerMainToolCall(
startTime: Date.now(),
}
context.toolCalls.set(toolCallId, created)
addContentBlock(context, { type: 'tool_call', toolCall: created })
if (!hideFromUi) {
addContentBlock(context, { type: 'tool_call', toolCall: created })
}
}
}

View File

@@ -384,6 +384,7 @@ async function buildExecutionContext(
const { userId, workflowId, workspaceId, chatId, executionId, runId, abortSignal } = params
const userTimezone =
typeof requestPayload?.userTimezone === 'string' ? requestPayload.userTimezone : undefined
const requestMode = typeof requestPayload?.mode === 'string' ? requestPayload.mode : undefined
let execContext: ExecutionContext
if (workflowId) {
@@ -400,6 +401,8 @@ async function buildExecutionContext(
}
if (userTimezone) execContext.userTimezone = userTimezone
execContext.copilotToolExecution = true
if (requestMode) execContext.requestMode = requestMode
execContext.executionId = executionId
execContext.runId = runId
execContext.abortSignal = abortSignal

View File

@@ -25,10 +25,13 @@ export class TraceCollector {
attributes?: Record<string, unknown>,
parent?: RequestTraceV1Span
): RequestTraceV1Span {
const startMs = Date.now()
const span: RequestTraceV1Span = {
name,
kind,
startMs: Date.now(),
startMs,
endMs: startMs,
durationMs: 0,
status: RequestTraceV1SpanStatus.ok,
source: RequestTraceV1SpanSource.sim,
...(parent

View File

@@ -113,6 +113,9 @@ function buildAppToolParams(
chatId: context.chatId,
executionId: context.executionId,
runId: context.runId,
copilotToolExecution: context.copilotToolExecution,
requestMode: context.requestMode,
currentAgentId: context.currentAgentId,
enforceCredentialAccess: true,
}

View File

@@ -45,6 +45,7 @@ import {
RunFromBlock,
RunWorkflow,
RunWorkflowUntilBlock,
SetBlockEnabled,
SetGlobalWorkflowVariables,
UpdateJobHistory,
UpdateWorkspaceMcpServer,
@@ -96,6 +97,7 @@ import {
executeRunFromBlock,
executeRunWorkflow,
executeRunWorkflowUntilBlock,
executeSetBlockEnabled,
executeSetGlobalWorkflowVariables,
} from '../tools/handlers/workflow/mutations'
import {
@@ -147,6 +149,7 @@ function buildHandlerMap(): Record<string, ToolHandler> {
[RunWorkflowUntilBlock.id]: h(executeRunWorkflowUntilBlock),
[RunFromBlock.id]: h(executeRunFromBlock),
[RunBlock.id]: h(executeRunBlock),
[SetBlockEnabled.id]: h(executeSetBlockEnabled),
[GenerateApiKey.id]: h(executeGenerateApiKey),
[SetGlobalWorkflowVariables.id]: h(executeSetGlobalWorkflowVariables),

View File

@@ -7,6 +7,9 @@ export interface ToolExecutionContext {
chatId?: string
executionId?: string
runId?: string
copilotToolExecution?: boolean
requestMode?: string
currentAgentId?: string
abortSignal?: AbortSignal
userTimezone?: string
userPermission?: string

View File

@@ -0,0 +1,9 @@
const HIDDEN_TOOL_NAMES = new Set(['tool_search_tool_regex', 'load_agent_skill'])
export function isToolHiddenInUi(toolName: string | undefined): boolean {
return !!toolName && HIDDEN_TOOL_NAMES.has(toolName)
}
export function getHiddenToolNames(): ReadonlySet<string> {
return HIDDEN_TOOL_NAMES
}

View File

@@ -25,6 +25,7 @@ import {
} from 'lucide-react'
import { Read as ReadTool } from '@/lib/copilot/generated/tool-catalog-v1'
import { VFS_DIR_TO_RESOURCE } from '@/lib/copilot/resources/types'
import { isToolHiddenInUi } from '@/lib/copilot/tools/client/hidden-tools'
import {
ClientToolCallState,
type ClientToolDisplay,
@@ -36,8 +37,6 @@ const logger = createLogger('CopilotStoreUtils')
/** Respond tools are internal handoff tools shown with a friendly generic label. */
const HIDDEN_TOOL_SUFFIX = '_respond'
const INTERNAL_RESPOND_TOOL = 'respond'
const HIDDEN_TOOL_NAMES = new Set(['tool_search_tool_regex'])
/** UI metadata sent by the copilot on SSE tool_call events. */
export interface ServerToolUI {
title?: string
@@ -85,7 +84,7 @@ export function resolveToolDisplay(
serverUI?: ServerToolUI
): ClientToolDisplay | undefined {
if (!toolName) return undefined
if (HIDDEN_TOOL_NAMES.has(toolName)) return undefined
if (isToolHiddenInUi(toolName)) return undefined
const specialDisplay = specialToolDisplay(toolName, state, params)
if (specialDisplay) return specialDisplay

View File

@@ -1,7 +1,6 @@
import type { LucideIcon } from 'lucide-react'
import {
BookOpen,
Bug,
Check,
CheckCircle,
Database,
@@ -2126,26 +2125,6 @@ const META_superagent: ToolMetadata = {
},
}
const META_debug: ToolMetadata = {
displayNames: {
[ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 },
[ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 },
[ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 },
[ClientToolCallState.success]: { text: 'Debugged', icon: Bug },
[ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle },
[ClientToolCallState.rejected]: { text: 'Skipped debugging', icon: XCircle },
[ClientToolCallState.aborted]: { text: 'Aborted debugging', icon: XCircle },
},
uiConfig: {
subagent: {
streamingLabel: 'Debugging',
completedLabel: 'Debugged',
shouldCollapse: true,
outputArtifacts: [],
},
},
}
const META_table: ToolMetadata = {
displayNames: {
[ClientToolCallState.generating]: { text: 'Managing tables', icon: Loader2 },
@@ -2345,7 +2324,6 @@ const TOOL_METADATA_BY_ID: Record<string, ToolMetadata> = {
create_workflow: META_create_workflow,
agent: META_agent,
custom_tool: META_custom_tool,
debug: META_debug,
deploy: META_deploy,
deploy_api: META_deploy_api,
deploy_chat: META_deploy_chat,

View File

@@ -103,6 +103,12 @@ export interface SetGlobalWorkflowVariablesParams {
operations?: VariableOperation[]
}
export interface SetBlockEnabledParams {
workflowId?: string
blockId: string
enabled: boolean
}
// === Deployment Params ===
export interface DeployApiParams {

View File

@@ -1,7 +1,10 @@
import { db, workflow as workflowTable } from '@sim/db'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { createWorkspaceApiKey } from '@/lib/api-key/auth'
import { AuditAction, AuditResourceType, recordAudit } from '@/lib/audit/log'
import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types'
import { env } from '@/lib/core/config/env'
import { generateRequestId } from '@/lib/core/utils/request'
import { generateId } from '@/lib/core/utils/uuid'
import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow'
@@ -10,7 +13,10 @@ import {
getLatestExecutionState,
} from '@/lib/workflows/executor/execution-state'
import { performDeleteFolder, performDeleteWorkflow } from '@/lib/workflows/orchestration'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import {
loadWorkflowFromNormalizedTables,
saveWorkflowToNormalizedTables,
} from '@/lib/workflows/persistence/utils'
import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer'
import {
checkForCircularReference,
@@ -22,6 +28,7 @@ import {
updateWorkflowRecord,
} from '@/lib/workflows/utils'
import { hasExecutionResult } from '@/executor/utils/errors'
import type { BlockState, WorkflowState } from '@/stores/workflows/workflow/types'
import { ensureWorkflowAccess, ensureWorkspaceAccess, getDefaultWorkspaceId } from '../access'
function stripBinaryFields(value: unknown): unknown {
@@ -71,6 +78,72 @@ function buildExecutionError(error: unknown): ToolCallResult {
return { success: false, error: message }
}
function isBlockProtected(blockId: string, blocksById: Record<string, BlockState>): boolean {
const block = blocksById[blockId]
if (!block) return false
if (block.locked) return true
const visited = new Set<string>()
let parentId = block.data?.parentId
while (parentId && !visited.has(parentId)) {
visited.add(parentId)
if (blocksById[parentId]?.locked) return true
parentId = blocksById[parentId]?.data?.parentId
}
return false
}
function hasDisabledAncestor(blockId: string, blocksById: Record<string, BlockState>): boolean {
const visited = new Set<string>()
let parentId = blocksById[blockId]?.data?.parentId
while (parentId && !visited.has(parentId)) {
visited.add(parentId)
const parent = blocksById[parentId]
if (!parent) return false
if (parent.enabled === false) return true
parentId = parent.data?.parentId
}
return false
}
function findDescendants(containerId: string, blocksById: Record<string, BlockState>): string[] {
const descendants: string[] = []
const stack = [containerId]
const visited = new Set<string>()
while (stack.length > 0) {
const current = stack.pop()!
if (visited.has(current)) continue
visited.add(current)
for (const [blockId, block] of Object.entries(blocksById)) {
if (block.data?.parentId === current) {
descendants.push(blockId)
stack.push(blockId)
}
}
}
return descendants
}
function notifyWorkflowUpdated(workflowId: string): void {
const socketUrl = env.SOCKET_SERVER_URL || 'http://localhost:3002'
fetch(`${socketUrl}/api/workflow-updated`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': env.INTERNAL_API_SECRET,
},
body: JSON.stringify({ workflowId }),
}).catch((error) => {
logger.warn('Failed to notify socket server of workflow update', { workflowId, error })
})
}
import type {
CreateFolderParams,
CreateWorkflowParams,
@@ -85,6 +158,7 @@ import type {
RunFromBlockParams,
RunWorkflowParams,
RunWorkflowUntilBlockParams,
SetBlockEnabledParams,
SetGlobalWorkflowVariablesParams,
UpdateWorkflowParams,
VariableOperation,
@@ -666,6 +740,137 @@ export async function executeUpdateWorkflow(
}
}
export async function executeSetBlockEnabled(
params: SetBlockEnabledParams,
context: ExecutionContext
): Promise<ToolCallResult> {
try {
const workflowId = params.workflowId || context.workflowId
if (!workflowId) {
return { success: false, error: 'workflowId is required' }
}
if (!params.blockId) {
return { success: false, error: 'blockId is required' }
}
if (typeof params.enabled !== 'boolean') {
return { success: false, error: 'enabled must be a boolean' }
}
const { workflow: workflowRecord } = await ensureWorkflowAccess(
workflowId,
context.userId,
'write'
)
assertWorkflowMutationNotAborted(context)
const normalized = await loadWorkflowFromNormalizedTables(workflowId)
if (!normalized) {
return { success: false, error: `Workflow ${workflowId} has no normalized state` }
}
const currentState: WorkflowState = {
blocks: normalized.blocks as Record<string, BlockState>,
edges: normalized.edges || [],
loops: normalized.loops || {},
parallels: normalized.parallels || {},
lastSaved: Date.now(),
}
const currentBlocks = currentState.blocks
const targetBlock = currentBlocks[params.blockId]
if (!targetBlock) {
return {
success: false,
error: `Block ${params.blockId} not found in workflow ${workflowId}`,
}
}
if (isBlockProtected(params.blockId, currentBlocks)) {
return {
success: false,
error: `Block ${params.blockId} is locked or inside a locked container and cannot be updated`,
}
}
if (targetBlock.enabled === params.enabled) {
return {
success: true,
output: {
workflowId,
workflowName: workflowRecord.name,
blockId: params.blockId,
enabled: params.enabled,
affectedBlockIds: [params.blockId],
workflowState: currentState,
copilotSanitizedWorkflowState: sanitizeForCopilot(currentState),
message: `Block ${params.blockId} is already ${params.enabled ? 'enabled' : 'disabled'}`,
},
}
}
if (params.enabled && hasDisabledAncestor(params.blockId, currentBlocks)) {
return {
success: false,
error: `Cannot enable block ${params.blockId} while one of its parent containers is disabled. Enable the parent first.`,
}
}
const affectedBlockIds = new Set<string>([params.blockId])
if (targetBlock.type === 'loop' || targetBlock.type === 'parallel') {
for (const descendantId of findDescendants(params.blockId, currentBlocks)) {
if (!isBlockProtected(descendantId, currentBlocks)) {
affectedBlockIds.add(descendantId)
}
}
}
const nextBlocks: Record<string, BlockState> = { ...currentBlocks }
for (const blockId of affectedBlockIds) {
nextBlocks[blockId] = {
...nextBlocks[blockId],
enabled: params.enabled,
}
}
const nextState: WorkflowState = {
...currentState,
blocks: nextBlocks,
lastSaved: Date.now(),
}
assertWorkflowMutationNotAborted(context)
const saveResult = await saveWorkflowToNormalizedTables(workflowId, nextState)
if (!saveResult.success) {
return {
success: false,
error: saveResult.error || `Failed to persist enabled state for block ${params.blockId}`,
}
}
await db
.update(workflowTable)
.set({
lastSynced: new Date(),
updatedAt: new Date(),
})
.where(eq(workflowTable.id, workflowId))
notifyWorkflowUpdated(workflowId)
return {
success: true,
output: {
workflowId,
workflowName: workflowRecord.name,
blockId: params.blockId,
enabled: params.enabled,
affectedBlockIds: Array.from(affectedBlockIds),
workflowState: nextState,
copilotSanitizedWorkflowState: sanitizeForCopilot(nextState),
},
}
} catch (error) {
return { success: false, error: error instanceof Error ? error.message : String(error) }
}
}
export async function executeDeleteWorkflow(
params: DeleteWorkflowParams,
context: ExecutionContext

View File

@@ -4,6 +4,7 @@ import { mcpService } from '@/lib/mcp/service'
import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace'
import { getEffectiveBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs'
import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator'
import { getBlockReferenceTags } from '@/lib/workflows/blocks/block-reference-tags'
import { listCustomTools } from '@/lib/workflows/custom-tools/operations'
import {
loadDeployedWorkflowState,
@@ -335,27 +336,29 @@ export async function executeGetBlockUpstreamReferences(
const blockName = block.name || block.type
let accessContext: 'inside' | 'outside' | undefined
let outputPaths: string[]
let formattedOutputs: string[]
if (block.type === 'loop' || block.type === 'parallel') {
const isInside =
(block.type === 'loop' && containingLoopIds.has(accessibleBlockId)) ||
(block.type === 'parallel' && containingParallelIds.has(accessibleBlockId))
accessContext = isInside ? 'inside' : 'outside'
outputPaths = isInside
const outputPaths = isInside
? getSubflowInsidePaths(block.type, accessibleBlockId, loops, parallels)
: ['results']
formattedOutputs = formatOutputsWithPrefix(outputPaths, blockName)
} else {
const blockConfig = getBlock(block.type)
const isTriggerCapable = blockConfig ? hasTriggerCapability(blockConfig) : false
const triggerMode = Boolean(block.triggerMode && isTriggerCapable)
outputPaths = getEffectiveBlockOutputPaths(block.type, block.subBlocks, {
triggerMode,
preferToolOutputs: !triggerMode,
formattedOutputs = getBlockReferenceTags({
block: {
id: accessibleBlockId,
type: block.type,
name: block.name,
triggerMode: block.triggerMode,
subBlocks: block.subBlocks,
},
currentBlockId: blockId,
})
}
const formattedOutputs = formatOutputsWithPrefix(outputPaths, blockName)
const entry: AccessibleBlockEntry = {
blockId: accessibleBlockId,
blockName,

View File

@@ -397,22 +397,6 @@ Supports full and partial execution:
},
annotations: { destructiveHint: false, openWorldHint: true },
},
{
name: 'sim_debug',
agentId: 'debug',
description:
'Diagnose errors or unexpected workflow behavior. Provide the error message and workflowId. Returns root cause analysis and fix suggestions.',
inputSchema: {
type: 'object',
properties: {
error: { type: 'string', description: 'The error message or description of the issue.' },
workflowId: { type: 'string', description: 'REQUIRED. The workflow ID to debug.' },
context: { type: 'object' },
},
required: ['error', 'workflowId'],
},
annotations: { readOnlyHint: true },
},
{
name: 'sim_auth',
agentId: 'auth',

View File

@@ -0,0 +1,76 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const { mockGetBlock } = vi.hoisted(() => ({
mockGetBlock: vi.fn(),
}))
vi.mock('@/blocks/registry', () => ({
getBlock: mockGetBlock,
getAllBlocks: vi.fn(() => ({})),
}))
import { getBlockReferenceTags } from '@/lib/workflows/blocks/block-reference-tags'
describe('getBlockReferenceTags', () => {
beforeEach(() => {
mockGetBlock.mockReset()
mockGetBlock.mockReturnValue({
outputs: {
content: { type: 'string' },
model: { type: 'string' },
},
subBlocks: [],
})
})
it('returns agent responseFormat fields instead of default outputs', () => {
const tags = getBlockReferenceTags({
block: {
id: 'agent-1',
type: 'agent',
name: 'Classify Email',
subBlocks: {
responseFormat: {
value: {
name: 'email_classification',
schema: {
type: 'object',
properties: {
isImportant: { type: 'boolean' },
draftReply: { type: 'string' },
reason: { type: 'string' },
},
required: ['isImportant', 'draftReply', 'reason'],
additionalProperties: false,
},
strict: true,
},
},
},
},
})
expect(tags).toEqual([
'classifyemail.isImportant',
'classifyemail.draftReply',
'classifyemail.reason',
])
})
it('returns variables block assignments as block tags', () => {
const tags = getBlockReferenceTags({
block: {
id: 'variables-1',
type: 'variables',
name: 'Workflow Vars',
subBlocks: {
variables: {
value: [{ variableName: 'currentDraft' }, { variableName: 'needsRevision' }],
},
},
},
})
expect(tags).toEqual(['workflowvars.currentDraft', 'workflowvars.needsRevision'])
})
})

View File

@@ -0,0 +1,83 @@
import { getEffectiveBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs'
import { hasTriggerCapability } from '@/lib/workflows/triggers/trigger-utils'
import { TRIGGER_TYPES } from '@/lib/workflows/triggers/triggers'
import { getBlock } from '@/blocks'
import { normalizeName } from '@/executor/constants'
interface ReferenceableBlock {
id: string
type: string
name?: string
triggerMode?: boolean
subBlocks?: Record<string, { value?: unknown }>
}
interface GetBlockReferenceTagsOptions {
block: ReferenceableBlock
currentBlockId?: string
subBlocks?: Record<string, { value?: unknown }>
}
/**
* Returns the exact reference tags shown in the workflow tag dropdown for a block.
*/
export function getBlockReferenceTags({
block,
currentBlockId,
subBlocks,
}: GetBlockReferenceTagsOptions): string[] {
const blockName = block.name || block.type
const normalizedBlockName = normalizeName(blockName)
const mergedSubBlocks = subBlocks ?? block.subBlocks
if (block.type === 'variables') {
const variablesValue = mergedSubBlocks?.variables?.value
if (Array.isArray(variablesValue) && variablesValue.length > 0) {
const validAssignments = variablesValue.filter((assignment: { variableName?: string }) =>
assignment?.variableName?.trim()
)
if (validAssignments.length > 0) {
return validAssignments.map(
(assignment: { variableName: string }) =>
`${normalizedBlockName}.${assignment.variableName.trim()}`
)
}
}
return [normalizedBlockName]
}
const blockConfig = getBlock(block.type)
if (!blockConfig) {
return []
}
const isTriggerCapable = hasTriggerCapability(blockConfig)
const effectiveTriggerMode = Boolean(block.triggerMode && isTriggerCapable)
const outputPaths = getEffectiveBlockOutputPaths(block.type, mergedSubBlocks, {
triggerMode: effectiveTriggerMode,
preferToolOutputs: !effectiveTriggerMode,
})
const allTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
let blockTags: string[]
if (block.type === 'human_in_the_loop' && block.id === currentBlockId) {
blockTags = allTags.filter((tag) => tag.endsWith('.url') || tag.endsWith('.resumeEndpoint'))
} else if (allTags.length === 0) {
blockTags = [normalizedBlockName]
} else {
blockTags = allTags
}
if (!blockTags.includes(normalizedBlockName)) {
blockTags = [normalizedBlockName, ...blockTags]
}
const shouldShowRootTag =
block.type === TRIGGER_TYPES.GENERIC_WEBHOOK || block.type === 'start_trigger'
if (!shouldShowRootTag) {
blockTags = blockTags.filter((tag) => tag !== normalizedBlockName)
}
return blockTags
}

View File

@@ -28,6 +28,7 @@ const {
mockGenerateInternalToken,
mockSecureFetchWithPinnedIP,
mockValidateUrlWithDNS,
mockResolveWorkspaceFileReference,
} = vi.hoisted(() => ({
mockIsHosted: { value: false },
mockEnv: { NEXT_PUBLIC_APP_URL: 'http://localhost:3000' } as Record<string, string | undefined>,
@@ -44,6 +45,7 @@ const {
mockGenerateInternalToken: vi.fn(),
mockSecureFetchWithPinnedIP: vi.fn(),
mockValidateUrlWithDNS: vi.fn(),
mockResolveWorkspaceFileReference: vi.fn(),
}))
// Mock feature flags
@@ -86,6 +88,10 @@ vi.mock('@/lib/core/rate-limiter/hosted-key', () => ({
getHostedKeyRateLimiter: () => mockRateLimiterFns,
}))
vi.mock('@/lib/uploads/contexts/workspace/workspace-file-manager', () => ({
resolveWorkspaceFileReference: (...args: unknown[]) => mockResolveWorkspaceFileReference(...args),
}))
// Mock the tools registry to avoid loading the full 4500+ line registry file.
// Only the tools actually exercised in tests are provided.
vi.mock('@/tools/registry', () => {
@@ -188,6 +194,44 @@ vi.mock('@/tools/registry', () => {
params: {},
request: { url: '/api/tools/gmail/send', method: 'POST' },
},
test_single_file_tool: {
id: 'test_single_file_tool',
name: 'Test Single File Tool',
description: 'Accepts a single file parameter',
version: '1.0.0',
params: {
attachment: { type: 'file', required: true },
},
request: {
url: '/api/tools/test/single-file',
method: 'POST',
headers: () => ({ 'Content-Type': 'application/json' }),
body: (p: any) => ({ attachment: p.attachment }),
},
transformResponse: async (response: any) => {
const data = await response.json()
return { success: true, output: data }
},
},
test_file_array_tool: {
id: 'test_file_array_tool',
name: 'Test File Array Tool',
description: 'Accepts an array of file parameters',
version: '1.0.0',
params: {
attachments: { type: 'file[]', required: true },
},
request: {
url: '/api/tools/test/file-array',
method: 'POST',
headers: () => ({ 'Content-Type': 'application/json' }),
body: (p: any) => ({ attachments: p.attachments }),
},
transformResponse: async (response: any) => {
const data = await response.json()
return { success: true, output: data }
},
},
google_drive_list: {
id: 'google_drive_list',
name: 'Google Drive List',
@@ -747,6 +791,197 @@ describe('Automatic Internal Route Detection', () => {
})
})
describe('Copilot File Parameter Normalization', () => {
let cleanupEnvVars: () => void
beforeEach(() => {
process.env.NEXT_PUBLIC_APP_URL = 'http://localhost:3000'
cleanupEnvVars = setupEnvVars({ NEXT_PUBLIC_APP_URL: 'http://localhost:3000' })
mockResolveWorkspaceFileReference.mockReset()
})
afterEach(() => {
vi.resetAllMocks()
cleanupEnvVars()
})
it('resolves canonical file IDs for single-file params during copilot execution', async () => {
mockResolveWorkspaceFileReference.mockResolvedValue({
id: 'wf_123',
name: 'brief.pdf',
path: '/api/files/wf_123',
size: 512,
type: 'application/pdf',
key: 'uploads/wf_123',
})
global.fetch = Object.assign(
vi.fn().mockImplementation(async (_url, options) => {
const body = JSON.parse(options?.body as string)
expect(body.attachment).toEqual({
id: 'wf_123',
name: 'brief.pdf',
url: '/api/files/wf_123',
size: 512,
type: 'application/pdf',
key: 'uploads/wf_123',
context: 'workspace',
})
return {
ok: true,
status: 200,
statusText: 'OK',
headers: new Headers(),
json: () => Promise.resolve({ ok: true }),
text: () => Promise.resolve(JSON.stringify({ ok: true })),
clone: vi.fn().mockReturnThis(),
}
}),
{ preconnect: vi.fn() }
) as typeof fetch
const context = createToolExecutionContext({
workspaceId: 'workspace-456',
copilotToolExecution: true,
} as any)
const result = await executeTool(
'test_single_file_tool',
{ attachment: 'wf_123' },
false,
context
)
expect(result.success).toBe(true)
expect(mockResolveWorkspaceFileReference).toHaveBeenCalledWith('workspace-456', 'wf_123')
})
it('resolves file-array params from strings and partial file objects, while preserving full file objects', async () => {
mockResolveWorkspaceFileReference.mockImplementation(
async (_workspaceId: string, fileId: string) => ({
id: fileId,
name: `${fileId}.txt`,
path: `/api/files/${fileId}`,
size: 128,
type: 'text/plain',
key: `uploads/${fileId}`,
})
)
const existingFileObject = {
id: 'wf_existing',
name: 'existing.txt',
url: '/api/files/wf_existing',
size: 64,
type: 'text/plain',
key: 'uploads/wf_existing',
context: 'workspace',
}
const partialFileObject = {
id: 'wf_partial',
name: 'partial.txt',
}
global.fetch = Object.assign(
vi.fn().mockImplementation(async (_url, options) => {
const body = JSON.parse(options?.body as string)
expect(body.attachments).toEqual([
{
id: 'wf_1',
name: 'wf_1.txt',
url: '/api/files/wf_1',
size: 128,
type: 'text/plain',
key: 'uploads/wf_1',
context: 'workspace',
},
{
id: 'wf_partial',
name: 'wf_partial.txt',
url: '/api/files/wf_partial',
size: 128,
type: 'text/plain',
key: 'uploads/wf_partial',
context: 'workspace',
},
existingFileObject,
{
id: 'wf_2',
name: 'wf_2.txt',
url: '/api/files/wf_2',
size: 128,
type: 'text/plain',
key: 'uploads/wf_2',
context: 'workspace',
},
])
return {
ok: true,
status: 200,
statusText: 'OK',
headers: new Headers(),
json: () => Promise.resolve({ ok: true }),
text: () => Promise.resolve(JSON.stringify({ ok: true })),
clone: vi.fn().mockReturnThis(),
}
}),
{ preconnect: vi.fn() }
) as typeof fetch
const context = createToolExecutionContext({
workspaceId: 'workspace-456',
copilotToolExecution: true,
} as any)
const result = await executeTool(
'test_file_array_tool',
{ attachments: ['wf_1', partialFileObject, existingFileObject, 'wf_2'] },
false,
context
)
expect(result.success).toBe(true)
expect(mockResolveWorkspaceFileReference).toHaveBeenCalledTimes(3)
})
it('does not resolve file params outside copilot execution', async () => {
global.fetch = Object.assign(
vi.fn().mockImplementation(async (_url, options) => {
const body = JSON.parse(options?.body as string)
expect(body.attachment).toBe('wf_123')
return {
ok: true,
status: 200,
statusText: 'OK',
headers: new Headers(),
json: () => Promise.resolve({ ok: true }),
text: () => Promise.resolve(JSON.stringify({ ok: true })),
clone: vi.fn().mockReturnThis(),
}
}),
{ preconnect: vi.fn() }
) as typeof fetch
const context = createToolExecutionContext({
workspaceId: 'workspace-456',
} as any)
const result = await executeTool(
'test_single_file_tool',
{ attachment: 'wf_123' },
false,
context
)
expect(result.success).toBe(true)
expect(mockResolveWorkspaceFileReference).not.toHaveBeenCalled()
})
})
describe('Centralized Error Handling', () => {
let cleanupEnvVars: () => void

View File

@@ -11,11 +11,13 @@ import {
import { PlatformEvents } from '@/lib/core/telemetry'
import { generateRequestId } from '@/lib/core/utils/request'
import { getBaseUrl, getInternalApiBaseUrl } from '@/lib/core/utils/urls'
import { isUserFile } from '@/lib/core/utils/user-file'
import { SIM_VIA_HEADER, serializeCallChain } from '@/lib/execution/call-chain'
import { parseMcpToolId } from '@/lib/mcp/utils'
import { resolveWorkspaceFileReference } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
import { isCustomTool, isMcpTool } from '@/executor/constants'
import { resolveSkillContent } from '@/executor/handlers/agent/skills-resolver'
import type { ExecutionContext } from '@/executor/types'
import type { ExecutionContext, UserFile } from '@/executor/types'
import type { ErrorInfo } from '@/tools/error-extractors'
import { extractErrorMessage } from '@/tools/error-extractors'
import type {
@@ -39,6 +41,7 @@ interface ToolExecutionScope {
callChain?: string[]
isDeployedContext?: boolean
enforceCredentialAccess?: boolean
copilotToolExecution?: boolean
}
function resolveToolScope(
@@ -57,6 +60,108 @@ function resolveToolScope(
| undefined,
enforceCredentialAccess: (executionContext?.enforceCredentialAccess ??
ctx?.enforceCredentialAccess) as boolean | undefined,
copilotToolExecution: (executionContext?.copilotToolExecution ?? ctx?.copilotToolExecution) as
| boolean
| undefined,
}
}
function toUserFileFromWorkspaceRecord(record: {
id: string
name: string
path: string
url?: string
size: number
type: string
key: string
}): UserFile {
return {
id: record.id,
name: record.name,
url: record.url ?? record.path,
size: record.size,
type: record.type,
key: record.key,
context: 'workspace',
}
}
async function resolveCopilotFileReference(
value: unknown,
workspaceId: string,
paramId: string
): Promise<UserFile | unknown> {
if (isUserFile(value)) {
return value
}
const referenceId =
typeof value === 'string'
? value
: value &&
typeof value === 'object' &&
typeof (value as Record<string, unknown>).id === 'string'
? ((value as Record<string, unknown>).id as string)
: null
if (!referenceId) {
return value
}
const fileRecord = await resolveWorkspaceFileReference(workspaceId, referenceId)
if (!fileRecord) {
throw new Error(
`Could not resolve workspace file reference "${referenceId}" for parameter "${paramId}"`
)
}
const resolvedFile = toUserFileFromWorkspaceRecord(fileRecord)
if (!value || typeof value !== 'object') {
return resolvedFile
}
const candidate = value as Record<string, unknown>
return {
...resolvedFile,
context: typeof candidate.context === 'string' ? candidate.context : resolvedFile.context,
base64: typeof candidate.base64 === 'string' ? candidate.base64 : undefined,
}
}
async function normalizeCopilotFileParams(
tool: ToolConfig,
params: Record<string, unknown>,
scope: ToolExecutionScope
): Promise<void> {
if (!scope.copilotToolExecution) {
return
}
for (const [paramId, paramDef] of Object.entries(tool.params || {})) {
const paramType = paramDef?.type
const currentValue = params[paramId]
if (currentValue === undefined || currentValue === null) {
continue
}
if (paramType === 'file') {
if (!scope.workspaceId) {
throw new Error(`Missing workspaceId while resolving file parameter "${paramId}"`)
}
params[paramId] = await resolveCopilotFileReference(currentValue, scope.workspaceId, paramId)
continue
}
if (paramType === 'file[]') {
if (!scope.workspaceId) {
throw new Error(`Missing workspaceId while resolving file parameter "${paramId}"`)
}
const values = Array.isArray(currentValue) ? currentValue : [currentValue]
params[paramId] = await Promise.all(
values.map((item) => resolveCopilotFileReference(item, scope.workspaceId!, paramId))
)
}
}
}
@@ -683,6 +788,8 @@ export async function executeTool(
throw new Error(`Tool not found: ${toolId}`)
}
await normalizeCopilotFileParams(tool, contextParams, scope)
// Inject hosted API key if tool supports it and user didn't provide one
const hostedKeyInfo = await injectHostedKeyIfNeeded(
tool,

View File

@@ -141,6 +141,61 @@ describe('Tool Parameters Utils', () => {
expect(schema.required).not.toContain('accessToken')
expect(schema.properties).toHaveProperty('message')
})
it.concurrent('keeps shared file params unchanged by default', () => {
const toolWithFileParam = {
...mockToolConfig,
id: 'file_schema_tool',
params: {
attachment: {
type: 'file',
required: true,
visibility: 'user-or-llm' as ParameterVisibility,
description: 'Attachment file',
},
},
}
const schema = createUserToolSchema(toolWithFileParam)
expect(schema.properties.attachment).toMatchObject({
type: 'file',
description: 'Attachment file',
})
})
it.concurrent('expands file params for copilot-facing schemas', () => {
const toolWithFileParams = {
...mockToolConfig,
id: 'copilot_file_schema_tool',
params: {
attachment: {
type: 'file',
required: true,
visibility: 'user-or-llm' as ParameterVisibility,
description: 'Attachment file',
},
attachments: {
type: 'file[]',
required: false,
visibility: 'user-or-llm' as ParameterVisibility,
description: 'Attachment files',
},
},
}
const schema = createUserToolSchema(toolWithFileParams, { surface: 'copilot' })
expect(schema.properties.attachment).toMatchObject({
type: 'object',
required: ['id', 'name', 'url', 'size', 'type', 'key'],
})
expect(schema.properties.attachment.description).toContain('canonical workspace file IDs')
expect(schema.properties.attachments).toMatchObject({
type: 'array',
})
expect(schema.properties.attachments.description).toContain('canonical workspace file IDs')
})
})
describe('createExecutionToolSchema', () => {

View File

@@ -123,6 +123,10 @@ export interface ToolSchema {
required: string[]
}
export interface UserToolSchemaOptions {
surface?: 'default' | 'copilot'
}
export interface LLMToolSchemaResult {
schema: ToolSchema
enrichedDescription?: string
@@ -390,8 +394,15 @@ export function getToolParametersConfig(
function buildParameterSchema(
toolId: string,
paramId: string,
param: ToolParamDefinition
param: ToolParamDefinition,
options: UserToolSchemaOptions = {}
): SchemaProperty {
const surface = options.surface ?? 'default'
if (surface === 'copilot' && (param.type === 'file' || param.type === 'file[]')) {
return buildCopilotFileParameterSchema(param)
}
let schemaType = param.type
if (schemaType === 'json' || schemaType === 'any') {
schemaType = 'object'
@@ -416,7 +427,50 @@ function buildParameterSchema(
return propertySchema
}
export function createUserToolSchema(toolConfig: ToolConfig): ToolSchema {
function buildCopilotFileParameterSchema(param: ToolParamDefinition): SchemaProperty {
const baseDescription =
param.description ||
(param.type === 'file'
? 'A file object for tool execution.'
: 'An array of file objects for tool execution.')
const resolutionDescription =
'For copilot and mothership tool calls, prefer passing canonical workspace file IDs such as "wf_123". The runtime will resolve them into full file objects before tool execution.'
const fileObjectSchema: SchemaProperty = {
type: 'object',
description: `${baseDescription} ${resolutionDescription}`,
properties: {
id: { type: 'string', description: 'Canonical workspace file ID.' },
name: { type: 'string', description: 'File name.' },
url: { type: 'string', description: 'File URL or serve path.' },
size: { type: 'number', description: 'File size in bytes.' },
type: { type: 'string', description: 'MIME type.' },
key: { type: 'string', description: 'Internal storage key.' },
context: { type: 'string', description: 'Optional file context.' },
base64: { type: 'string', description: 'Optional base64-encoded file contents.' },
},
required: ['id', 'name', 'url', 'size', 'type', 'key'],
}
if (param.type === 'file') {
return fileObjectSchema
}
return {
type: 'array',
description: `${baseDescription} ${resolutionDescription}`,
items: {
type: 'object',
description: 'A file object.',
properties: fileObjectSchema.properties,
},
}
}
export function createUserToolSchema(
toolConfig: ToolConfig,
options: UserToolSchemaOptions = {}
): ToolSchema {
const schema: ToolSchema = {
type: 'object',
properties: {},
@@ -430,7 +484,7 @@ export function createUserToolSchema(toolConfig: ToolConfig): ToolSchema {
continue
}
const propertySchema = buildParameterSchema(toolConfig.id, paramId, param)
const propertySchema = buildParameterSchema(toolConfig.id, paramId, param, options)
schema.properties[paramId] = propertySchema
if (param.required) {