mirror of
https://github.com/simstudioai/sim.git
synced 2026-04-06 03:00:16 -04:00
feat(copilot): copilot mcp + server side copilot execution (#3173)
* v0 * v1 * Basic ss tes * Ss tests * Stuff * Add mcp * mcp v1 * Improvement * Fix * BROKEN * Checkpoint * Streaming * Fix abort * Things are broken * Streaming seems to work but copilot is dumb * Fix edge issue * LUAAAA * Fix stream buffer * Fix lint * Checkpoint * Initial temp state, in the middle of a refactor * Initial test shows diff store still working * Tool refactor * First cleanup pass complete - untested * Continued cleanup * Refactor * Refactor complete - no testing yet * Fix - cursor makes me sad * Fix mcp * Clean up mcp * Updated mcp * Add respond to subagents * Fix definitions * Add tools * Add tools * Add copilot mcp tracking * Fix lint * Fix mcp * Fix * Updates * Clean up mcp * Fix copilot mcp tool names to be sim prefixed * Add opus 4.6 * Fix discovery tool * Fix * Remove logs * Fix go side tool rendering * Update docs * Fix hydration * Fix tool call resolution * Fix * Fix lint * Fix superagent and autoallow integrations * Fix always allow * Update block * Remove plan docs * Fix hardcoded ff * Fix dropped provider * Fix lint * Fix tests * Fix dead messages array * Fix discovery * Fix run workflow * Fix run block * Fix run from block in copilot * Fix lint * Fix skip and mtb * Fix typing * Fix tool call * Bump api version * Fix bun lock * Nuke bad files
This commit is contained in:
committed by
GitHub
parent
e5d30494cb
commit
190f12fd77
@@ -14,7 +14,7 @@ export type UsageLogCategory = 'model' | 'fixed'
|
||||
/**
|
||||
* Usage log source types
|
||||
*/
|
||||
export type UsageLogSource = 'workflow' | 'wand' | 'copilot'
|
||||
export type UsageLogSource = 'workflow' | 'wand' | 'copilot' | 'mcp_copilot'
|
||||
|
||||
/**
|
||||
* Metadata for 'model' category charges
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { COPILOT_CHAT_API_PATH, COPILOT_CHAT_STREAM_API_PATH } from '@/lib/copilot/constants'
|
||||
import type { CopilotMode, CopilotModelId, CopilotTransportMode } from '@/lib/copilot/models'
|
||||
|
||||
const logger = createLogger('CopilotAPI')
|
||||
@@ -82,6 +83,7 @@ export interface SendMessageRequest {
|
||||
executionId?: string
|
||||
}>
|
||||
commands?: string[]
|
||||
resumeFromEventId?: number
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -120,7 +122,7 @@ export async function sendStreamingMessage(
|
||||
request: SendMessageRequest
|
||||
): Promise<StreamingResponse> {
|
||||
try {
|
||||
const { abortSignal, ...requestBody } = request
|
||||
const { abortSignal, resumeFromEventId, ...requestBody } = request
|
||||
try {
|
||||
const preview = Array.isArray((requestBody as any).contexts)
|
||||
? (requestBody as any).contexts.map((c: any) => ({
|
||||
@@ -136,9 +138,56 @@ export async function sendStreamingMessage(
|
||||
? (requestBody as any).contexts.length
|
||||
: 0,
|
||||
contextsPreview: preview,
|
||||
resumeFromEventId,
|
||||
})
|
||||
} catch {}
|
||||
const response = await fetch('/api/copilot/chat', {
|
||||
} catch (error) {
|
||||
logger.warn('Failed to log streaming message context preview', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
|
||||
const streamId = request.userMessageId
|
||||
if (typeof resumeFromEventId === 'number') {
|
||||
if (!streamId) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'streamId is required to resume a stream',
|
||||
status: 400,
|
||||
}
|
||||
}
|
||||
const url = `${COPILOT_CHAT_STREAM_API_PATH}?streamId=${encodeURIComponent(
|
||||
streamId
|
||||
)}&from=${encodeURIComponent(String(resumeFromEventId))}`
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
signal: abortSignal,
|
||||
credentials: 'include',
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorMessage = await handleApiError(response, 'Failed to resume streaming message')
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
status: response.status,
|
||||
}
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'No response body received',
|
||||
status: 500,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
stream: response.body,
|
||||
}
|
||||
}
|
||||
|
||||
const response = await fetch(COPILOT_CHAT_API_PATH, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ ...requestBody, stream: true }),
|
||||
|
||||
66
apps/sim/lib/copilot/chat-context.ts
Normal file
66
apps/sim/lib/copilot/chat-context.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { CopilotFiles } from '@/lib/uploads'
|
||||
import { createFileContent } from '@/lib/uploads/utils/file-utils'
|
||||
|
||||
const logger = createLogger('CopilotChatContext')
|
||||
|
||||
/**
|
||||
* Build conversation history from stored chat messages.
|
||||
*/
|
||||
export function buildConversationHistory(
|
||||
messages: unknown[],
|
||||
conversationId?: string
|
||||
): { history: unknown[]; conversationId?: string } {
|
||||
const history = Array.isArray(messages) ? messages : []
|
||||
return {
|
||||
history,
|
||||
...(conversationId ? { conversationId } : {}),
|
||||
}
|
||||
}
|
||||
|
||||
export interface FileAttachmentInput {
|
||||
id: string
|
||||
key: string
|
||||
name?: string
|
||||
filename?: string
|
||||
mimeType?: string
|
||||
media_type?: string
|
||||
size: number
|
||||
}
|
||||
|
||||
export interface FileContent {
|
||||
type: string
|
||||
[key: string]: unknown
|
||||
}
|
||||
|
||||
/**
|
||||
* Process file attachments into content for the payload.
|
||||
*/
|
||||
export async function processFileAttachments(
|
||||
fileAttachments: FileAttachmentInput[],
|
||||
userId: string
|
||||
): Promise<FileContent[]> {
|
||||
if (!Array.isArray(fileAttachments) || fileAttachments.length === 0) return []
|
||||
|
||||
const processedFileContents: FileContent[] = []
|
||||
const requestId = `copilot-${userId}-${Date.now()}`
|
||||
const processedAttachments = await CopilotFiles.processCopilotAttachments(
|
||||
fileAttachments as Parameters<typeof CopilotFiles.processCopilotAttachments>[0],
|
||||
requestId
|
||||
)
|
||||
|
||||
for (const { buffer, attachment } of processedAttachments) {
|
||||
const fileContent = createFileContent(buffer, attachment.media_type)
|
||||
if (fileContent) {
|
||||
processedFileContents.push(fileContent as FileContent)
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('Processed file attachments for payload', {
|
||||
userId,
|
||||
inputCount: fileAttachments.length,
|
||||
outputCount: processedFileContents.length,
|
||||
})
|
||||
|
||||
return processedFileContents
|
||||
}
|
||||
69
apps/sim/lib/copilot/chat-lifecycle.ts
Normal file
69
apps/sim/lib/copilot/chat-lifecycle.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
import { db } from '@sim/db'
|
||||
import { copilotChats } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
|
||||
const logger = createLogger('CopilotChatLifecycle')
|
||||
|
||||
export interface ChatLoadResult {
|
||||
chatId: string
|
||||
chat: typeof copilotChats.$inferSelect | null
|
||||
conversationHistory: unknown[]
|
||||
isNew: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve or create a copilot chat session.
|
||||
* If chatId is provided, loads the existing chat. Otherwise creates a new one.
|
||||
*/
|
||||
export async function resolveOrCreateChat(params: {
|
||||
chatId?: string
|
||||
userId: string
|
||||
workflowId: string
|
||||
model: string
|
||||
}): Promise<ChatLoadResult> {
|
||||
const { chatId, userId, workflowId, model } = params
|
||||
|
||||
if (chatId) {
|
||||
const [chat] = await db
|
||||
.select()
|
||||
.from(copilotChats)
|
||||
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
|
||||
.limit(1)
|
||||
|
||||
return {
|
||||
chatId,
|
||||
chat: chat ?? null,
|
||||
conversationHistory: chat && Array.isArray(chat.messages) ? chat.messages : [],
|
||||
isNew: false,
|
||||
}
|
||||
}
|
||||
|
||||
const [newChat] = await db
|
||||
.insert(copilotChats)
|
||||
.values({
|
||||
userId,
|
||||
workflowId,
|
||||
title: null,
|
||||
model,
|
||||
messages: [],
|
||||
})
|
||||
.returning()
|
||||
|
||||
if (!newChat) {
|
||||
logger.warn('Failed to create new copilot chat row', { userId, workflowId })
|
||||
return {
|
||||
chatId: '',
|
||||
chat: null,
|
||||
conversationHistory: [],
|
||||
isNew: true,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
chatId: newChat.id,
|
||||
chat: newChat,
|
||||
conversationHistory: [],
|
||||
isNew: true,
|
||||
}
|
||||
}
|
||||
209
apps/sim/lib/copilot/chat-payload.ts
Normal file
209
apps/sim/lib/copilot/chat-payload.ts
Normal file
@@ -0,0 +1,209 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { processFileAttachments } from '@/lib/copilot/chat-context'
|
||||
import { getCopilotModel } from '@/lib/copilot/config'
|
||||
import { SIM_AGENT_VERSION } from '@/lib/copilot/constants'
|
||||
import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials'
|
||||
import type { CopilotProviderConfig } from '@/lib/copilot/types'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { tools } from '@/tools/registry'
|
||||
import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils'
|
||||
|
||||
const logger = createLogger('CopilotChatPayload')
|
||||
|
||||
export interface BuildPayloadParams {
|
||||
message: string
|
||||
workflowId: string
|
||||
userId: string
|
||||
userMessageId: string
|
||||
mode: string
|
||||
model: string
|
||||
conversationHistory?: unknown[]
|
||||
contexts?: Array<{ type: string; content: string }>
|
||||
fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }>
|
||||
commands?: string[]
|
||||
chatId?: string
|
||||
implicitFeedback?: string
|
||||
}
|
||||
|
||||
interface ToolSchema {
|
||||
name: string
|
||||
description: string
|
||||
input_schema: Record<string, unknown>
|
||||
defer_loading?: boolean
|
||||
executeLocally?: boolean
|
||||
oauth?: { required: boolean; provider: string }
|
||||
}
|
||||
|
||||
interface CredentialsPayload {
|
||||
oauth: Record<
|
||||
string,
|
||||
{ accessToken: string; accountId: string; name: string; expiresAt?: string }
|
||||
>
|
||||
apiKeys: string[]
|
||||
metadata?: {
|
||||
connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }>
|
||||
configuredApiKeys: string[]
|
||||
}
|
||||
}
|
||||
|
||||
function buildProviderConfig(selectedModel: string): CopilotProviderConfig | undefined {
|
||||
const defaults = getCopilotModel('chat')
|
||||
const envModel = env.COPILOT_MODEL || defaults.model
|
||||
const providerEnv = env.COPILOT_PROVIDER
|
||||
|
||||
if (!providerEnv) return undefined
|
||||
|
||||
if (providerEnv === 'azure-openai') {
|
||||
return {
|
||||
provider: 'azure-openai',
|
||||
model: envModel,
|
||||
apiKey: env.AZURE_OPENAI_API_KEY,
|
||||
apiVersion: 'preview',
|
||||
endpoint: env.AZURE_OPENAI_ENDPOINT,
|
||||
}
|
||||
}
|
||||
|
||||
if (providerEnv === 'azure-anthropic') {
|
||||
return {
|
||||
provider: 'azure-anthropic',
|
||||
model: envModel,
|
||||
apiKey: env.AZURE_ANTHROPIC_API_KEY,
|
||||
apiVersion: env.AZURE_ANTHROPIC_API_VERSION,
|
||||
endpoint: env.AZURE_ANTHROPIC_ENDPOINT,
|
||||
}
|
||||
}
|
||||
|
||||
if (providerEnv === 'vertex') {
|
||||
return {
|
||||
provider: 'vertex',
|
||||
model: envModel,
|
||||
apiKey: env.COPILOT_API_KEY,
|
||||
vertexProject: env.VERTEX_PROJECT,
|
||||
vertexLocation: env.VERTEX_LOCATION,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
provider: providerEnv as Exclude<string, 'azure-openai' | 'vertex'>,
|
||||
model: selectedModel,
|
||||
apiKey: env.COPILOT_API_KEY,
|
||||
} as CopilotProviderConfig
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the request payload for the copilot backend.
|
||||
*/
|
||||
export async function buildCopilotRequestPayload(
|
||||
params: BuildPayloadParams,
|
||||
options: {
|
||||
providerConfig?: CopilotProviderConfig
|
||||
selectedModel: string
|
||||
}
|
||||
): Promise<Record<string, unknown>> {
|
||||
const {
|
||||
message,
|
||||
workflowId,
|
||||
userId,
|
||||
userMessageId,
|
||||
mode,
|
||||
contexts,
|
||||
fileAttachments,
|
||||
commands,
|
||||
chatId,
|
||||
} = params
|
||||
|
||||
const selectedModel = options.selectedModel
|
||||
const providerConfig = options.providerConfig ?? buildProviderConfig(selectedModel)
|
||||
|
||||
const effectiveMode = mode === 'agent' ? 'build' : mode
|
||||
const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode
|
||||
|
||||
const processedFileContents = await processFileAttachments(fileAttachments ?? [], userId)
|
||||
|
||||
const integrationTools: ToolSchema[] = []
|
||||
let credentials: CredentialsPayload | null = null
|
||||
|
||||
if (effectiveMode === 'build') {
|
||||
// function_execute sandbox tool is now defined in Go — no need to send it
|
||||
|
||||
try {
|
||||
const rawCredentials = await getCredentialsServerTool.execute({ workflowId }, { userId })
|
||||
|
||||
const oauthMap: CredentialsPayload['oauth'] = {}
|
||||
const connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> = []
|
||||
for (const cred of rawCredentials?.oauth?.connected?.credentials ?? []) {
|
||||
if (cred.accessToken) {
|
||||
oauthMap[cred.provider] = {
|
||||
accessToken: cred.accessToken,
|
||||
accountId: cred.id,
|
||||
name: cred.name,
|
||||
}
|
||||
connectedOAuth.push({ provider: cred.provider, name: cred.name })
|
||||
}
|
||||
}
|
||||
|
||||
credentials = {
|
||||
oauth: oauthMap,
|
||||
apiKeys: rawCredentials?.environment?.variableNames ?? [],
|
||||
metadata: {
|
||||
connectedOAuth,
|
||||
configuredApiKeys: rawCredentials?.environment?.variableNames ?? [],
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to fetch credentials for build payload', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
|
||||
try {
|
||||
const { createUserToolSchema } = await import('@/tools/params')
|
||||
const latestTools = getLatestVersionTools(tools)
|
||||
|
||||
for (const [toolId, toolConfig] of Object.entries(latestTools)) {
|
||||
try {
|
||||
const userSchema = createUserToolSchema(toolConfig)
|
||||
const strippedName = stripVersionSuffix(toolId)
|
||||
integrationTools.push({
|
||||
name: strippedName,
|
||||
description: toolConfig.description || toolConfig.name || strippedName,
|
||||
input_schema: userSchema as unknown as Record<string, unknown>,
|
||||
defer_loading: true,
|
||||
...(toolConfig.oauth?.required && {
|
||||
oauth: {
|
||||
required: true,
|
||||
provider: toolConfig.oauth.provider,
|
||||
},
|
||||
}),
|
||||
})
|
||||
} catch (toolError) {
|
||||
logger.warn('Failed to build schema for tool, skipping', {
|
||||
toolId,
|
||||
error: toolError instanceof Error ? toolError.message : String(toolError),
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to build tool schemas for payload', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
message,
|
||||
workflowId,
|
||||
userId,
|
||||
model: selectedModel,
|
||||
mode: transportMode,
|
||||
messageId: userMessageId,
|
||||
version: SIM_AGENT_VERSION,
|
||||
...(providerConfig ? { provider: providerConfig } : {}),
|
||||
...(contexts && contexts.length > 0 ? { context: contexts } : {}),
|
||||
...(chatId ? { chatId } : {}),
|
||||
...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}),
|
||||
...(integrationTools.length > 0 ? { integrationTools } : {}),
|
||||
...(credentials ? { credentials } : {}),
|
||||
...(commands && commands.length > 0 ? { commands } : {}),
|
||||
}
|
||||
}
|
||||
147
apps/sim/lib/copilot/client-sse/content-blocks.ts
Normal file
147
apps/sim/lib/copilot/client-sse/content-blocks.ts
Normal file
@@ -0,0 +1,147 @@
|
||||
import type {
|
||||
ChatContext,
|
||||
CopilotMessage,
|
||||
MessageFileAttachment,
|
||||
} from '@/stores/panel/copilot/types'
|
||||
import type { ClientContentBlock, ClientStreamingContext } from './types'
|
||||
|
||||
const TEXT_BLOCK_TYPE = 'text'
|
||||
const THINKING_BLOCK_TYPE = 'thinking'
|
||||
const CONTINUE_OPTIONS_TAG = '<options>{"1":"Continue"}</options>'
|
||||
|
||||
export function createUserMessage(
|
||||
content: string,
|
||||
fileAttachments?: MessageFileAttachment[],
|
||||
contexts?: ChatContext[],
|
||||
messageId?: string
|
||||
): CopilotMessage {
|
||||
return {
|
||||
id: messageId || crypto.randomUUID(),
|
||||
role: 'user',
|
||||
content,
|
||||
timestamp: new Date().toISOString(),
|
||||
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
|
||||
...(contexts && contexts.length > 0 && { contexts }),
|
||||
...(contexts &&
|
||||
contexts.length > 0 && {
|
||||
contentBlocks: [{ type: 'contexts', contexts, timestamp: Date.now() }],
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
export function createStreamingMessage(): CopilotMessage {
|
||||
return {
|
||||
id: crypto.randomUUID(),
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
}
|
||||
|
||||
export function createErrorMessage(
|
||||
messageId: string,
|
||||
content: string,
|
||||
errorType?: 'usage_limit' | 'unauthorized' | 'forbidden' | 'rate_limit' | 'upgrade_required'
|
||||
): CopilotMessage {
|
||||
return {
|
||||
id: messageId,
|
||||
role: 'assistant',
|
||||
content,
|
||||
timestamp: new Date().toISOString(),
|
||||
contentBlocks: [
|
||||
{
|
||||
type: 'text',
|
||||
content,
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
],
|
||||
errorType,
|
||||
}
|
||||
}
|
||||
|
||||
export function appendTextBlock(context: ClientStreamingContext, text: string) {
|
||||
if (!text) return
|
||||
context.accumulatedContent += text
|
||||
if (context.currentTextBlock && context.contentBlocks.length > 0) {
|
||||
const lastBlock = context.contentBlocks[context.contentBlocks.length - 1]
|
||||
if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) {
|
||||
lastBlock.content += text
|
||||
return
|
||||
}
|
||||
}
|
||||
const newBlock: ClientContentBlock = { type: 'text', content: text, timestamp: Date.now() }
|
||||
context.currentTextBlock = newBlock
|
||||
context.contentBlocks.push(newBlock)
|
||||
}
|
||||
|
||||
export function appendContinueOption(content: string): string {
|
||||
if (/<options>/i.test(content)) return content
|
||||
const suffix = content.trim().length > 0 ? '\n\n' : ''
|
||||
return `${content}${suffix}${CONTINUE_OPTIONS_TAG}`
|
||||
}
|
||||
|
||||
export function appendContinueOptionBlock(blocks: ClientContentBlock[]): ClientContentBlock[] {
|
||||
if (!Array.isArray(blocks)) return blocks
|
||||
const hasOptions = blocks.some(
|
||||
(block) =>
|
||||
block?.type === TEXT_BLOCK_TYPE &&
|
||||
typeof block.content === 'string' &&
|
||||
/<options>/i.test(block.content)
|
||||
)
|
||||
if (hasOptions) return blocks
|
||||
return [
|
||||
...blocks,
|
||||
{
|
||||
type: TEXT_BLOCK_TYPE,
|
||||
content: CONTINUE_OPTIONS_TAG,
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
export function stripContinueOption(content: string): string {
|
||||
if (!content || !content.includes(CONTINUE_OPTIONS_TAG)) return content
|
||||
const next = content.replace(CONTINUE_OPTIONS_TAG, '')
|
||||
return next.replace(/\n{2,}\s*$/g, '\n').trimEnd()
|
||||
}
|
||||
|
||||
export function stripContinueOptionFromBlocks(blocks: ClientContentBlock[]): ClientContentBlock[] {
|
||||
if (!Array.isArray(blocks)) return blocks
|
||||
return blocks.flatMap((block) => {
|
||||
if (
|
||||
block?.type === TEXT_BLOCK_TYPE &&
|
||||
typeof block.content === 'string' &&
|
||||
block.content.includes(CONTINUE_OPTIONS_TAG)
|
||||
) {
|
||||
const nextContent = stripContinueOption(block.content)
|
||||
if (!nextContent.trim()) return []
|
||||
return [{ ...block, content: nextContent }]
|
||||
}
|
||||
return [block]
|
||||
})
|
||||
}
|
||||
|
||||
export function beginThinkingBlock(context: ClientStreamingContext) {
|
||||
if (!context.currentThinkingBlock) {
|
||||
const newBlock: ClientContentBlock = {
|
||||
type: 'thinking',
|
||||
content: '',
|
||||
timestamp: Date.now(),
|
||||
startTime: Date.now(),
|
||||
}
|
||||
context.currentThinkingBlock = newBlock
|
||||
context.contentBlocks.push(newBlock)
|
||||
}
|
||||
context.isInThinkingBlock = true
|
||||
context.currentTextBlock = null
|
||||
}
|
||||
|
||||
export function finalizeThinkingBlock(context: ClientStreamingContext) {
|
||||
if (context.currentThinkingBlock) {
|
||||
context.currentThinkingBlock.duration =
|
||||
Date.now() - (context.currentThinkingBlock.startTime || Date.now())
|
||||
}
|
||||
context.isInThinkingBlock = false
|
||||
context.currentThinkingBlock = null
|
||||
context.currentTextBlock = null
|
||||
}
|
||||
935
apps/sim/lib/copilot/client-sse/handlers.ts
Normal file
935
apps/sim/lib/copilot/client-sse/handlers.ts
Normal file
@@ -0,0 +1,935 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { COPILOT_CONFIRM_API_PATH, STREAM_STORAGE_KEY } from '@/lib/copilot/constants'
|
||||
import { asRecord } from '@/lib/copilot/orchestrator/sse-utils'
|
||||
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
|
||||
import {
|
||||
isBackgroundState,
|
||||
isRejectedState,
|
||||
isReviewState,
|
||||
resolveToolDisplay,
|
||||
} from '@/lib/copilot/store-utils'
|
||||
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
|
||||
import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types'
|
||||
import { useVariablesStore } from '@/stores/panel/variables/store'
|
||||
import { useEnvironmentStore } from '@/stores/settings/environment/store'
|
||||
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
import type { WorkflowState } from '@/stores/workflows/workflow/types'
|
||||
import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks'
|
||||
import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution'
|
||||
import type { ClientContentBlock, ClientStreamingContext } from './types'
|
||||
|
||||
const logger = createLogger('CopilotClientSseHandlers')
|
||||
const TEXT_BLOCK_TYPE = 'text'
|
||||
|
||||
const MAX_BATCH_INTERVAL = 50
|
||||
const MIN_BATCH_INTERVAL = 16
|
||||
const MAX_QUEUE_SIZE = 5
|
||||
|
||||
/**
|
||||
* Send an auto-accept confirmation to the server for auto-allowed tools.
|
||||
* The server-side orchestrator polls Redis for this decision.
|
||||
*/
|
||||
export function sendAutoAcceptConfirmation(toolCallId: string): void {
|
||||
fetch(COPILOT_CONFIRM_API_PATH, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolCallId, status: 'accepted' }),
|
||||
}).catch((error) => {
|
||||
logger.warn('Failed to send auto-accept confirmation', {
|
||||
toolCallId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void {
|
||||
if (typeof window === 'undefined') return
|
||||
try {
|
||||
if (!info) {
|
||||
window.sessionStorage.removeItem(STREAM_STORAGE_KEY)
|
||||
return
|
||||
}
|
||||
window.sessionStorage.setItem(STREAM_STORAGE_KEY, JSON.stringify(info))
|
||||
} catch (error) {
|
||||
logger.warn('Failed to write active stream to storage', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type StoreSet = (
|
||||
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
|
||||
) => void
|
||||
|
||||
export type SSEHandler = (
|
||||
data: SSEEvent,
|
||||
context: ClientStreamingContext,
|
||||
get: () => CopilotStore,
|
||||
set: StoreSet
|
||||
) => Promise<void> | void
|
||||
|
||||
const streamingUpdateQueue = new Map<string, ClientStreamingContext>()
|
||||
let streamingUpdateRAF: number | null = null
|
||||
let lastBatchTime = 0
|
||||
|
||||
export function stopStreamingUpdates() {
|
||||
if (streamingUpdateRAF !== null) {
|
||||
cancelAnimationFrame(streamingUpdateRAF)
|
||||
streamingUpdateRAF = null
|
||||
}
|
||||
streamingUpdateQueue.clear()
|
||||
}
|
||||
|
||||
function createOptimizedContentBlocks(contentBlocks: ClientContentBlock[]): ClientContentBlock[] {
|
||||
const result: ClientContentBlock[] = new Array(contentBlocks.length)
|
||||
for (let i = 0; i < contentBlocks.length; i++) {
|
||||
const block = contentBlocks[i]
|
||||
result[i] = { ...block }
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
export function flushStreamingUpdates(set: StoreSet) {
|
||||
if (streamingUpdateRAF !== null) {
|
||||
cancelAnimationFrame(streamingUpdateRAF)
|
||||
streamingUpdateRAF = null
|
||||
}
|
||||
if (streamingUpdateQueue.size === 0) return
|
||||
|
||||
const updates = new Map(streamingUpdateQueue)
|
||||
streamingUpdateQueue.clear()
|
||||
|
||||
set((state: CopilotStore) => {
|
||||
if (updates.size === 0) return state
|
||||
return {
|
||||
messages: state.messages.map((msg) => {
|
||||
const update = updates.get(msg.id)
|
||||
if (update) {
|
||||
return {
|
||||
...msg,
|
||||
content: '',
|
||||
contentBlocks:
|
||||
update.contentBlocks.length > 0
|
||||
? createOptimizedContentBlocks(update.contentBlocks)
|
||||
: [],
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
export function updateStreamingMessage(set: StoreSet, context: ClientStreamingContext) {
|
||||
if (context.suppressStreamingUpdates) return
|
||||
const now = performance.now()
|
||||
streamingUpdateQueue.set(context.messageId, context)
|
||||
const timeSinceLastBatch = now - lastBatchTime
|
||||
const shouldFlushImmediately =
|
||||
streamingUpdateQueue.size >= MAX_QUEUE_SIZE || timeSinceLastBatch > MAX_BATCH_INTERVAL
|
||||
|
||||
if (streamingUpdateRAF === null) {
|
||||
const scheduleUpdate = () => {
|
||||
streamingUpdateRAF = requestAnimationFrame(() => {
|
||||
const updates = new Map(streamingUpdateQueue)
|
||||
streamingUpdateQueue.clear()
|
||||
streamingUpdateRAF = null
|
||||
lastBatchTime = performance.now()
|
||||
set((state: CopilotStore) => {
|
||||
if (updates.size === 0) return state
|
||||
const messages = state.messages
|
||||
const lastMessage = messages[messages.length - 1]
|
||||
const lastMessageUpdate = lastMessage ? updates.get(lastMessage.id) : null
|
||||
if (updates.size === 1 && lastMessageUpdate) {
|
||||
const newMessages = [...messages]
|
||||
newMessages[messages.length - 1] = {
|
||||
...lastMessage,
|
||||
content: '',
|
||||
contentBlocks:
|
||||
lastMessageUpdate.contentBlocks.length > 0
|
||||
? createOptimizedContentBlocks(lastMessageUpdate.contentBlocks)
|
||||
: [],
|
||||
}
|
||||
return { messages: newMessages }
|
||||
}
|
||||
return {
|
||||
messages: messages.map((msg) => {
|
||||
const update = updates.get(msg.id)
|
||||
if (update) {
|
||||
return {
|
||||
...msg,
|
||||
content: '',
|
||||
contentBlocks:
|
||||
update.contentBlocks.length > 0
|
||||
? createOptimizedContentBlocks(update.contentBlocks)
|
||||
: [],
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}),
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
if (shouldFlushImmediately) scheduleUpdate()
|
||||
else setTimeout(scheduleUpdate, Math.max(0, MIN_BATCH_INTERVAL - timeSinceLastBatch))
|
||||
}
|
||||
}
|
||||
|
||||
export function upsertToolCallBlock(context: ClientStreamingContext, toolCall: CopilotToolCall) {
|
||||
let found = false
|
||||
for (let i = 0; i < context.contentBlocks.length; i++) {
|
||||
const b = context.contentBlocks[i]
|
||||
if (b.type === 'tool_call' && b.toolCall?.id === toolCall.id) {
|
||||
context.contentBlocks[i] = { ...b, toolCall }
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
context.contentBlocks.push({ type: 'tool_call', toolCall, timestamp: Date.now() })
|
||||
}
|
||||
}
|
||||
|
||||
function stripThinkingTags(text: string): string {
|
||||
return text.replace(/<\/?thinking[^>]*>/gi, '').replace(/<\/?thinking[^&]*>/gi, '')
|
||||
}
|
||||
|
||||
function appendThinkingContent(context: ClientStreamingContext, text: string) {
|
||||
if (!text) return
|
||||
const cleanedText = stripThinkingTags(text)
|
||||
if (!cleanedText) return
|
||||
if (context.currentThinkingBlock) {
|
||||
context.currentThinkingBlock.content += cleanedText
|
||||
} else {
|
||||
const newBlock: ClientContentBlock = {
|
||||
type: 'thinking',
|
||||
content: cleanedText,
|
||||
timestamp: Date.now(),
|
||||
startTime: Date.now(),
|
||||
}
|
||||
context.currentThinkingBlock = newBlock
|
||||
context.contentBlocks.push(newBlock)
|
||||
}
|
||||
context.isInThinkingBlock = true
|
||||
context.currentTextBlock = null
|
||||
}
|
||||
|
||||
export const sseHandlers: Record<string, SSEHandler> = {
|
||||
chat_id: async (data, context, get, set) => {
|
||||
context.newChatId = data.chatId
|
||||
const { currentChat, activeStream } = get()
|
||||
if (!currentChat && context.newChatId) {
|
||||
await get().handleNewChatCreation(context.newChatId)
|
||||
}
|
||||
if (activeStream && context.newChatId && !activeStream.chatId) {
|
||||
const updatedStream = { ...activeStream, chatId: context.newChatId }
|
||||
set({ activeStream: updatedStream })
|
||||
writeActiveStreamToStorage(updatedStream)
|
||||
}
|
||||
},
|
||||
title_updated: (_data, _context, get, set) => {
|
||||
const title = _data.title
|
||||
if (!title) return
|
||||
const { currentChat, chats } = get()
|
||||
if (currentChat) {
|
||||
set({
|
||||
currentChat: { ...currentChat, title },
|
||||
chats: chats.map((c) => (c.id === currentChat.id ? { ...c, title } : c)),
|
||||
})
|
||||
}
|
||||
},
|
||||
tool_result: (data, context, get, set) => {
|
||||
try {
|
||||
const eventData = asRecord(data?.data)
|
||||
const toolCallId: string | undefined =
|
||||
data?.toolCallId || (eventData.id as string | undefined)
|
||||
const success: boolean | undefined = data?.success
|
||||
const failedDependency: boolean = data?.failedDependency === true
|
||||
const resultObj = asRecord(data?.result)
|
||||
const skipped: boolean = resultObj.skipped === true
|
||||
if (!toolCallId) return
|
||||
const { toolCallsById } = get()
|
||||
const current = toolCallsById[toolCallId]
|
||||
if (current) {
|
||||
if (
|
||||
isRejectedState(current.state) ||
|
||||
isReviewState(current.state) ||
|
||||
isBackgroundState(current.state)
|
||||
) {
|
||||
return
|
||||
}
|
||||
const targetState = success
|
||||
? ClientToolCallState.success
|
||||
: failedDependency || skipped
|
||||
? ClientToolCallState.rejected
|
||||
: ClientToolCallState.error
|
||||
const updatedMap = { ...toolCallsById }
|
||||
updatedMap[toolCallId] = {
|
||||
...current,
|
||||
state: targetState,
|
||||
display: resolveToolDisplay(current.name, targetState, current.id, current.params),
|
||||
}
|
||||
set({ toolCallsById: updatedMap })
|
||||
|
||||
if (targetState === ClientToolCallState.success && current.name === 'checkoff_todo') {
|
||||
try {
|
||||
const result = asRecord(data?.result) || asRecord(eventData.result)
|
||||
const input = asRecord(current.params || current.input)
|
||||
const todoId = (input.id || input.todoId || result.id || result.todoId) as
|
||||
| string
|
||||
| undefined
|
||||
if (todoId) {
|
||||
get().updatePlanTodoStatus(todoId, 'completed')
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to process checkoff_todo tool result', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
toolCallId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
targetState === ClientToolCallState.success &&
|
||||
current.name === 'mark_todo_in_progress'
|
||||
) {
|
||||
try {
|
||||
const result = asRecord(data?.result) || asRecord(eventData.result)
|
||||
const input = asRecord(current.params || current.input)
|
||||
const todoId = (input.id || input.todoId || result.id || result.todoId) as
|
||||
| string
|
||||
| undefined
|
||||
if (todoId) {
|
||||
get().updatePlanTodoStatus(todoId, 'executing')
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to process mark_todo_in_progress tool result', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
toolCallId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (current.name === 'edit_workflow') {
|
||||
try {
|
||||
const resultPayload = asRecord(
|
||||
data?.result || eventData.result || eventData.data || data?.data
|
||||
)
|
||||
const workflowState = asRecord(resultPayload?.workflowState)
|
||||
const hasWorkflowState = !!resultPayload?.workflowState
|
||||
logger.info('[SSE] edit_workflow result received', {
|
||||
hasWorkflowState,
|
||||
blockCount: hasWorkflowState ? Object.keys(workflowState.blocks ?? {}).length : 0,
|
||||
edgeCount: Array.isArray(workflowState.edges) ? workflowState.edges.length : 0,
|
||||
})
|
||||
if (hasWorkflowState) {
|
||||
const diffStore = useWorkflowDiffStore.getState()
|
||||
diffStore
|
||||
.setProposedChanges(resultPayload.workflowState as WorkflowState)
|
||||
.catch((err) => {
|
||||
logger.error('[SSE] Failed to apply edit_workflow diff', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error('[SSE] edit_workflow result handling failed', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Deploy tools: update deployment status in workflow registry
|
||||
if (
|
||||
targetState === ClientToolCallState.success &&
|
||||
(current.name === 'deploy_api' ||
|
||||
current.name === 'deploy_chat' ||
|
||||
current.name === 'deploy_mcp' ||
|
||||
current.name === 'redeploy')
|
||||
) {
|
||||
try {
|
||||
const resultPayload = asRecord(
|
||||
data?.result || eventData.result || eventData.data || data?.data
|
||||
)
|
||||
const input = asRecord(current.params)
|
||||
const workflowId =
|
||||
(resultPayload?.workflowId as string) ||
|
||||
(input?.workflowId as string) ||
|
||||
useWorkflowRegistry.getState().activeWorkflowId
|
||||
const isDeployed = resultPayload?.isDeployed !== false
|
||||
if (workflowId) {
|
||||
useWorkflowRegistry
|
||||
.getState()
|
||||
.setDeploymentStatus(workflowId, isDeployed, isDeployed ? new Date() : undefined)
|
||||
logger.info('[SSE] Updated deployment status from tool result', {
|
||||
toolName: current.name,
|
||||
workflowId,
|
||||
isDeployed,
|
||||
})
|
||||
}
|
||||
} catch (err) {
|
||||
logger.warn('[SSE] Failed to hydrate deployment status', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Environment variables: reload store after successful set
|
||||
if (
|
||||
targetState === ClientToolCallState.success &&
|
||||
current.name === 'set_environment_variables'
|
||||
) {
|
||||
try {
|
||||
useEnvironmentStore.getState().loadEnvironmentVariables()
|
||||
logger.info('[SSE] Triggered environment variables reload')
|
||||
} catch (err) {
|
||||
logger.warn('[SSE] Failed to reload environment variables', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Workflow variables: reload store after successful set
|
||||
if (
|
||||
targetState === ClientToolCallState.success &&
|
||||
current.name === 'set_global_workflow_variables'
|
||||
) {
|
||||
try {
|
||||
const input = asRecord(current.params)
|
||||
const workflowId =
|
||||
(input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId
|
||||
if (workflowId) {
|
||||
useVariablesStore.getState().loadForWorkflow(workflowId)
|
||||
logger.info('[SSE] Triggered workflow variables reload', { workflowId })
|
||||
}
|
||||
} catch (err) {
|
||||
logger.warn('[SSE] Failed to reload workflow variables', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Generate API key: update deployment status with the new key
|
||||
if (targetState === ClientToolCallState.success && current.name === 'generate_api_key') {
|
||||
try {
|
||||
const resultPayload = asRecord(
|
||||
data?.result || eventData.result || eventData.data || data?.data
|
||||
)
|
||||
const input = asRecord(current.params)
|
||||
const workflowId =
|
||||
(input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId
|
||||
const apiKey = (resultPayload?.apiKey || resultPayload?.key) as string | undefined
|
||||
if (workflowId) {
|
||||
const existingStatus = useWorkflowRegistry
|
||||
.getState()
|
||||
.getWorkflowDeploymentStatus(workflowId)
|
||||
useWorkflowRegistry
|
||||
.getState()
|
||||
.setDeploymentStatus(
|
||||
workflowId,
|
||||
existingStatus?.isDeployed ?? false,
|
||||
existingStatus?.deployedAt,
|
||||
apiKey
|
||||
)
|
||||
logger.info('[SSE] Updated deployment status with API key', {
|
||||
workflowId,
|
||||
hasKey: !!apiKey,
|
||||
})
|
||||
}
|
||||
} catch (err) {
|
||||
logger.warn('[SSE] Failed to hydrate API key status', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (let i = 0; i < context.contentBlocks.length; i++) {
|
||||
const b = context.contentBlocks[i]
|
||||
if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) {
|
||||
if (
|
||||
isRejectedState(b.toolCall?.state) ||
|
||||
isReviewState(b.toolCall?.state) ||
|
||||
isBackgroundState(b.toolCall?.state)
|
||||
)
|
||||
break
|
||||
const targetState = success
|
||||
? ClientToolCallState.success
|
||||
: failedDependency || skipped
|
||||
? ClientToolCallState.rejected
|
||||
: ClientToolCallState.error
|
||||
context.contentBlocks[i] = {
|
||||
...b,
|
||||
toolCall: {
|
||||
...b.toolCall,
|
||||
state: targetState,
|
||||
display: resolveToolDisplay(
|
||||
b.toolCall?.name,
|
||||
targetState,
|
||||
toolCallId,
|
||||
b.toolCall?.params
|
||||
),
|
||||
},
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
updateStreamingMessage(set, context)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to process tool_result SSE event', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
},
|
||||
tool_error: (data, context, get, set) => {
|
||||
try {
|
||||
const errorData = asRecord(data?.data)
|
||||
const toolCallId: string | undefined =
|
||||
data?.toolCallId || (errorData.id as string | undefined)
|
||||
const failedDependency: boolean = data?.failedDependency === true
|
||||
if (!toolCallId) return
|
||||
const { toolCallsById } = get()
|
||||
const current = toolCallsById[toolCallId]
|
||||
if (current) {
|
||||
if (
|
||||
isRejectedState(current.state) ||
|
||||
isReviewState(current.state) ||
|
||||
isBackgroundState(current.state)
|
||||
) {
|
||||
return
|
||||
}
|
||||
const targetState = failedDependency
|
||||
? ClientToolCallState.rejected
|
||||
: ClientToolCallState.error
|
||||
const updatedMap = { ...toolCallsById }
|
||||
updatedMap[toolCallId] = {
|
||||
...current,
|
||||
state: targetState,
|
||||
display: resolveToolDisplay(current.name, targetState, current.id, current.params),
|
||||
}
|
||||
set({ toolCallsById: updatedMap })
|
||||
}
|
||||
for (let i = 0; i < context.contentBlocks.length; i++) {
|
||||
const b = context.contentBlocks[i]
|
||||
if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) {
|
||||
if (
|
||||
isRejectedState(b.toolCall?.state) ||
|
||||
isReviewState(b.toolCall?.state) ||
|
||||
isBackgroundState(b.toolCall?.state)
|
||||
)
|
||||
break
|
||||
const targetState = failedDependency
|
||||
? ClientToolCallState.rejected
|
||||
: ClientToolCallState.error
|
||||
context.contentBlocks[i] = {
|
||||
...b,
|
||||
toolCall: {
|
||||
...b.toolCall,
|
||||
state: targetState,
|
||||
display: resolveToolDisplay(
|
||||
b.toolCall?.name,
|
||||
targetState,
|
||||
toolCallId,
|
||||
b.toolCall?.params
|
||||
),
|
||||
},
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
updateStreamingMessage(set, context)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to process tool_error SSE event', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
},
|
||||
tool_generating: (data, context, get, set) => {
|
||||
const { toolCallId, toolName } = data
|
||||
if (!toolCallId || !toolName) return
|
||||
const { toolCallsById } = get()
|
||||
|
||||
if (!toolCallsById[toolCallId]) {
|
||||
const isAutoAllowed = get().isToolAutoAllowed(toolName)
|
||||
const initialState = isAutoAllowed
|
||||
? ClientToolCallState.executing
|
||||
: ClientToolCallState.pending
|
||||
const tc: CopilotToolCall = {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
state: initialState,
|
||||
display: resolveToolDisplay(toolName, initialState, toolCallId),
|
||||
}
|
||||
const updated = { ...toolCallsById, [toolCallId]: tc }
|
||||
set({ toolCallsById: updated })
|
||||
logger.info('[toolCallsById] map updated', updated)
|
||||
|
||||
upsertToolCallBlock(context, tc)
|
||||
updateStreamingMessage(set, context)
|
||||
}
|
||||
},
|
||||
tool_call: (data, context, get, set) => {
|
||||
const toolData = asRecord(data?.data)
|
||||
const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId
|
||||
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
|
||||
if (!id) return
|
||||
const args = toolData.arguments as Record<string, unknown> | undefined
|
||||
const isPartial = toolData.partial === true
|
||||
const { toolCallsById } = get()
|
||||
|
||||
const existing = toolCallsById[id]
|
||||
const toolName = name || existing?.name || 'unknown_tool'
|
||||
const isAutoAllowed = get().isToolAutoAllowed(toolName)
|
||||
let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending
|
||||
|
||||
// Avoid flickering back to pending on partial/duplicate events once a tool is executing.
|
||||
if (
|
||||
existing?.state === ClientToolCallState.executing &&
|
||||
initialState === ClientToolCallState.pending
|
||||
) {
|
||||
initialState = ClientToolCallState.executing
|
||||
}
|
||||
|
||||
const next: CopilotToolCall = existing
|
||||
? {
|
||||
...existing,
|
||||
name: toolName,
|
||||
state: initialState,
|
||||
...(args ? { params: args } : {}),
|
||||
display: resolveToolDisplay(toolName, initialState, id, args || existing.params),
|
||||
}
|
||||
: {
|
||||
id,
|
||||
name: toolName,
|
||||
state: initialState,
|
||||
...(args ? { params: args } : {}),
|
||||
display: resolveToolDisplay(toolName, initialState, id, args),
|
||||
}
|
||||
const updated = { ...toolCallsById, [id]: next }
|
||||
set({ toolCallsById: updated })
|
||||
logger.info(`[toolCallsById] → ${initialState}`, { id, name: toolName, params: args })
|
||||
|
||||
upsertToolCallBlock(context, next)
|
||||
updateStreamingMessage(set, context)
|
||||
|
||||
if (isPartial) {
|
||||
return
|
||||
}
|
||||
|
||||
// Auto-allowed tools: send confirmation to the server so it can proceed
|
||||
// without waiting for the user to click "Allow".
|
||||
if (isAutoAllowed) {
|
||||
sendAutoAcceptConfirmation(id)
|
||||
}
|
||||
|
||||
// Client-executable run tools: execute on the client for real-time feedback
|
||||
// (block pulsing, console logs, stop button). The server defers execution
|
||||
// for these tools in interactive mode; the client reports back via mark-complete.
|
||||
if (
|
||||
CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName) &&
|
||||
initialState === ClientToolCallState.executing
|
||||
) {
|
||||
executeRunToolOnClient(id, toolName, args || existing?.params || {})
|
||||
}
|
||||
|
||||
// OAuth: dispatch event to open the OAuth connect modal
|
||||
if (toolName === 'oauth_request_access' && args && typeof window !== 'undefined') {
|
||||
try {
|
||||
window.dispatchEvent(
|
||||
new CustomEvent('open-oauth-connect', {
|
||||
detail: {
|
||||
providerName: (args.providerName || args.provider_name || '') as string,
|
||||
serviceId: (args.serviceId || args.service_id || '') as string,
|
||||
providerId: (args.providerId || args.provider_id || '') as string,
|
||||
requiredScopes: (args.requiredScopes || args.required_scopes || []) as string[],
|
||||
newScopes: (args.newScopes || args.new_scopes || []) as string[],
|
||||
},
|
||||
})
|
||||
)
|
||||
logger.info('[SSE] Dispatched OAuth connect event', {
|
||||
providerId: args.providerId || args.provider_id,
|
||||
providerName: args.providerName || args.provider_name,
|
||||
})
|
||||
} catch (err) {
|
||||
logger.warn('[SSE] Failed to dispatch OAuth connect event', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
},
|
||||
reasoning: (data, context, _get, set) => {
|
||||
const phase = (data && (data.phase || data?.data?.phase)) as string | undefined
|
||||
if (phase === 'start') {
|
||||
beginThinkingBlock(context)
|
||||
updateStreamingMessage(set, context)
|
||||
return
|
||||
}
|
||||
if (phase === 'end') {
|
||||
finalizeThinkingBlock(context)
|
||||
updateStreamingMessage(set, context)
|
||||
return
|
||||
}
|
||||
const chunk: string = typeof data?.data === 'string' ? data.data : data?.content || ''
|
||||
if (!chunk) return
|
||||
appendThinkingContent(context, chunk)
|
||||
updateStreamingMessage(set, context)
|
||||
},
|
||||
content: (data, context, get, set) => {
|
||||
if (!data.data) return
|
||||
context.pendingContent += data.data
|
||||
|
||||
let contentToProcess = context.pendingContent
|
||||
let hasProcessedContent = false
|
||||
|
||||
const thinkingStartRegex = /<thinking>/
|
||||
const thinkingEndRegex = /<\/thinking>/
|
||||
const designWorkflowStartRegex = /<design_workflow>/
|
||||
const designWorkflowEndRegex = /<\/design_workflow>/
|
||||
|
||||
const splitTrailingPartialTag = (
|
||||
text: string,
|
||||
tags: string[]
|
||||
): { text: string; remaining: string } => {
|
||||
const partialIndex = text.lastIndexOf('<')
|
||||
if (partialIndex < 0) {
|
||||
return { text, remaining: '' }
|
||||
}
|
||||
const possibleTag = text.substring(partialIndex)
|
||||
const matchesTagStart = tags.some((tag) => tag.startsWith(possibleTag))
|
||||
if (!matchesTagStart) {
|
||||
return { text, remaining: '' }
|
||||
}
|
||||
return {
|
||||
text: text.substring(0, partialIndex),
|
||||
remaining: possibleTag,
|
||||
}
|
||||
}
|
||||
|
||||
while (contentToProcess.length > 0) {
|
||||
if (context.isInDesignWorkflowBlock) {
|
||||
const endMatch = designWorkflowEndRegex.exec(contentToProcess)
|
||||
if (endMatch) {
|
||||
const designContent = contentToProcess.substring(0, endMatch.index)
|
||||
context.designWorkflowContent += designContent
|
||||
context.isInDesignWorkflowBlock = false
|
||||
|
||||
logger.info('[design_workflow] Tag complete, setting plan content', {
|
||||
contentLength: context.designWorkflowContent.length,
|
||||
})
|
||||
set({ streamingPlanContent: context.designWorkflowContent })
|
||||
|
||||
contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length)
|
||||
hasProcessedContent = true
|
||||
} else {
|
||||
const { text, remaining } = splitTrailingPartialTag(contentToProcess, [
|
||||
'</design_workflow>',
|
||||
])
|
||||
context.designWorkflowContent += text
|
||||
|
||||
set({ streamingPlanContent: context.designWorkflowContent })
|
||||
|
||||
contentToProcess = remaining
|
||||
hasProcessedContent = true
|
||||
if (remaining) {
|
||||
break
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (!context.isInThinkingBlock && !context.isInDesignWorkflowBlock) {
|
||||
const designStartMatch = designWorkflowStartRegex.exec(contentToProcess)
|
||||
if (designStartMatch) {
|
||||
const textBeforeDesign = contentToProcess.substring(0, designStartMatch.index)
|
||||
if (textBeforeDesign) {
|
||||
appendTextBlock(context, textBeforeDesign)
|
||||
hasProcessedContent = true
|
||||
}
|
||||
context.isInDesignWorkflowBlock = true
|
||||
context.designWorkflowContent = ''
|
||||
contentToProcess = contentToProcess.substring(
|
||||
designStartMatch.index + designStartMatch[0].length
|
||||
)
|
||||
hasProcessedContent = true
|
||||
continue
|
||||
}
|
||||
|
||||
const nextMarkIndex = contentToProcess.indexOf('<marktodo>')
|
||||
const nextCheckIndex = contentToProcess.indexOf('<checkofftodo>')
|
||||
const hasMark = nextMarkIndex >= 0
|
||||
const hasCheck = nextCheckIndex >= 0
|
||||
|
||||
const nextTagIndex =
|
||||
hasMark && hasCheck
|
||||
? Math.min(nextMarkIndex, nextCheckIndex)
|
||||
: hasMark
|
||||
? nextMarkIndex
|
||||
: hasCheck
|
||||
? nextCheckIndex
|
||||
: -1
|
||||
|
||||
if (nextTagIndex >= 0) {
|
||||
const isMarkTodo = hasMark && nextMarkIndex === nextTagIndex
|
||||
const tagStart = isMarkTodo ? '<marktodo>' : '<checkofftodo>'
|
||||
const tagEnd = isMarkTodo ? '</marktodo>' : '</checkofftodo>'
|
||||
const closingIndex = contentToProcess.indexOf(tagEnd, nextTagIndex + tagStart.length)
|
||||
|
||||
if (closingIndex === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const todoId = contentToProcess
|
||||
.substring(nextTagIndex + tagStart.length, closingIndex)
|
||||
.trim()
|
||||
logger.info(
|
||||
isMarkTodo ? '[TODO] Detected marktodo tag' : '[TODO] Detected checkofftodo tag',
|
||||
{ todoId }
|
||||
)
|
||||
|
||||
if (todoId) {
|
||||
try {
|
||||
get().updatePlanTodoStatus(todoId, isMarkTodo ? 'executing' : 'completed')
|
||||
logger.info(
|
||||
isMarkTodo
|
||||
? '[TODO] Successfully marked todo in progress'
|
||||
: '[TODO] Successfully checked off todo',
|
||||
{ todoId }
|
||||
)
|
||||
} catch (e) {
|
||||
logger.error(
|
||||
isMarkTodo
|
||||
? '[TODO] Failed to mark todo in progress'
|
||||
: '[TODO] Failed to checkoff todo',
|
||||
{ todoId, error: e }
|
||||
)
|
||||
}
|
||||
} else {
|
||||
logger.warn('[TODO] Empty todoId extracted from todo tag', { tagType: tagStart })
|
||||
}
|
||||
|
||||
let beforeTag = contentToProcess.substring(0, nextTagIndex)
|
||||
let afterTag = contentToProcess.substring(closingIndex + tagEnd.length)
|
||||
|
||||
const hadNewlineBefore = /(\r?\n)+$/.test(beforeTag)
|
||||
const hadNewlineAfter = /^(\r?\n)+/.test(afterTag)
|
||||
|
||||
beforeTag = beforeTag.replace(/(\r?\n)+$/, '')
|
||||
afterTag = afterTag.replace(/^(\r?\n)+/, '')
|
||||
|
||||
contentToProcess =
|
||||
beforeTag + (hadNewlineBefore && hadNewlineAfter ? '\n' : '') + afterTag
|
||||
context.currentTextBlock = null
|
||||
hasProcessedContent = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if (context.isInThinkingBlock) {
|
||||
const endMatch = thinkingEndRegex.exec(contentToProcess)
|
||||
if (endMatch) {
|
||||
const thinkingContent = contentToProcess.substring(0, endMatch.index)
|
||||
appendThinkingContent(context, thinkingContent)
|
||||
finalizeThinkingBlock(context)
|
||||
contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length)
|
||||
hasProcessedContent = true
|
||||
} else {
|
||||
const { text, remaining } = splitTrailingPartialTag(contentToProcess, ['</thinking>'])
|
||||
if (text) {
|
||||
appendThinkingContent(context, text)
|
||||
hasProcessedContent = true
|
||||
}
|
||||
contentToProcess = remaining
|
||||
if (remaining) {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const startMatch = thinkingStartRegex.exec(contentToProcess)
|
||||
if (startMatch) {
|
||||
const textBeforeThinking = contentToProcess.substring(0, startMatch.index)
|
||||
if (textBeforeThinking) {
|
||||
appendTextBlock(context, textBeforeThinking)
|
||||
hasProcessedContent = true
|
||||
}
|
||||
context.isInThinkingBlock = true
|
||||
context.currentTextBlock = null
|
||||
contentToProcess = contentToProcess.substring(startMatch.index + startMatch[0].length)
|
||||
hasProcessedContent = true
|
||||
} else {
|
||||
let partialTagIndex = contentToProcess.lastIndexOf('<')
|
||||
|
||||
const partialMarkTodo = contentToProcess.lastIndexOf('<marktodo')
|
||||
const partialCheckoffTodo = contentToProcess.lastIndexOf('<checkofftodo')
|
||||
|
||||
if (partialMarkTodo > partialTagIndex) {
|
||||
partialTagIndex = partialMarkTodo
|
||||
}
|
||||
if (partialCheckoffTodo > partialTagIndex) {
|
||||
partialTagIndex = partialCheckoffTodo
|
||||
}
|
||||
|
||||
let textToAdd = contentToProcess
|
||||
let remaining = ''
|
||||
if (partialTagIndex >= 0 && partialTagIndex > contentToProcess.length - 50) {
|
||||
textToAdd = contentToProcess.substring(0, partialTagIndex)
|
||||
remaining = contentToProcess.substring(partialTagIndex)
|
||||
}
|
||||
if (textToAdd) {
|
||||
appendTextBlock(context, textToAdd)
|
||||
hasProcessedContent = true
|
||||
}
|
||||
contentToProcess = remaining
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
context.pendingContent = contentToProcess
|
||||
if (hasProcessedContent) {
|
||||
updateStreamingMessage(set, context)
|
||||
}
|
||||
},
|
||||
done: (_data, context) => {
|
||||
logger.info('[SSE] DONE EVENT RECEIVED', {
|
||||
doneEventCount: context.doneEventCount,
|
||||
data: _data,
|
||||
})
|
||||
context.doneEventCount++
|
||||
if (context.doneEventCount >= 1) {
|
||||
logger.info('[SSE] Setting streamComplete = true, stream will terminate')
|
||||
context.streamComplete = true
|
||||
}
|
||||
},
|
||||
error: (data, context, _get, set) => {
|
||||
logger.error('Stream error:', data.error)
|
||||
set((state: CopilotStore) => ({
|
||||
messages: state.messages.map((msg) =>
|
||||
msg.id === context.messageId
|
||||
? {
|
||||
...msg,
|
||||
content: context.accumulatedContent || 'An error occurred.',
|
||||
error: data.error,
|
||||
}
|
||||
: msg
|
||||
),
|
||||
}))
|
||||
context.streamComplete = true
|
||||
},
|
||||
stream_end: (_data, context, _get, set) => {
|
||||
if (context.pendingContent) {
|
||||
if (context.isInThinkingBlock && context.currentThinkingBlock) {
|
||||
appendThinkingContent(context, context.pendingContent)
|
||||
} else if (context.pendingContent.trim()) {
|
||||
appendTextBlock(context, context.pendingContent)
|
||||
}
|
||||
context.pendingContent = ''
|
||||
}
|
||||
finalizeThinkingBlock(context)
|
||||
updateStreamingMessage(set, context)
|
||||
},
|
||||
default: () => {},
|
||||
}
|
||||
3
apps/sim/lib/copilot/client-sse/index.ts
Normal file
3
apps/sim/lib/copilot/client-sse/index.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
export type { SSEHandler } from './handlers'
|
||||
export { sseHandlers } from './handlers'
|
||||
export { applySseEvent, subAgentSSEHandlers } from './subagent-handlers'
|
||||
221
apps/sim/lib/copilot/client-sse/run-tool-execution.ts
Normal file
221
apps/sim/lib/copilot/client-sse/run-tool-execution.ts
Normal file
@@ -0,0 +1,221 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { COPILOT_CONFIRM_API_PATH } from '@/lib/copilot/constants'
|
||||
import { resolveToolDisplay } from '@/lib/copilot/store-utils'
|
||||
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
|
||||
import { executeWorkflowWithFullLogging } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils'
|
||||
import { useExecutionStore } from '@/stores/execution/store'
|
||||
import { useCopilotStore } from '@/stores/panel/copilot/store'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
|
||||
const logger = createLogger('CopilotRunToolExecution')
|
||||
|
||||
/**
|
||||
* Run tools that execute client-side for real-time feedback
|
||||
* (block pulsing, logs, stop button, etc.).
|
||||
*/
|
||||
export const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([
|
||||
'run_workflow',
|
||||
'run_workflow_until_block',
|
||||
'run_from_block',
|
||||
'run_block',
|
||||
])
|
||||
|
||||
/**
|
||||
* Execute a run tool on the client side using the streaming execute endpoint.
|
||||
* This gives full interactive feedback: block pulsing, console logs, stop button.
|
||||
*
|
||||
* Mirrors staging's RunWorkflowClientTool.handleAccept():
|
||||
* 1. Execute via executeWorkflowWithFullLogging
|
||||
* 2. Update client tool state directly (success/error)
|
||||
* 3. Report completion to server via /api/copilot/confirm (Redis),
|
||||
* where the server-side handler picks it up and tells Go
|
||||
*/
|
||||
export function executeRunToolOnClient(
|
||||
toolCallId: string,
|
||||
toolName: string,
|
||||
params: Record<string, unknown>
|
||||
): void {
|
||||
doExecuteRunTool(toolCallId, toolName, params).catch((err) => {
|
||||
logger.error('[RunTool] Unhandled error in client-side run tool execution', {
|
||||
toolCallId,
|
||||
toolName,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async function doExecuteRunTool(
|
||||
toolCallId: string,
|
||||
toolName: string,
|
||||
params: Record<string, unknown>
|
||||
): Promise<void> {
|
||||
const { isExecuting, setIsExecuting } = useExecutionStore.getState()
|
||||
|
||||
if (isExecuting) {
|
||||
logger.warn('[RunTool] Execution prevented: already executing', { toolCallId, toolName })
|
||||
setToolState(toolCallId, ClientToolCallState.error)
|
||||
await reportCompletion(toolCallId, false, 'Workflow is already executing. Try again later')
|
||||
return
|
||||
}
|
||||
|
||||
const { activeWorkflowId } = useWorkflowRegistry.getState()
|
||||
if (!activeWorkflowId) {
|
||||
logger.warn('[RunTool] Execution prevented: no active workflow', { toolCallId, toolName })
|
||||
setToolState(toolCallId, ClientToolCallState.error)
|
||||
await reportCompletion(toolCallId, false, 'No active workflow found')
|
||||
return
|
||||
}
|
||||
|
||||
// Extract params for all tool types
|
||||
const workflowInput = (params.workflow_input || params.input || undefined) as
|
||||
| Record<string, unknown>
|
||||
| undefined
|
||||
|
||||
const stopAfterBlockId = (() => {
|
||||
if (toolName === 'run_workflow_until_block')
|
||||
return params.stopAfterBlockId as string | undefined
|
||||
if (toolName === 'run_block') return params.blockId as string | undefined
|
||||
return undefined
|
||||
})()
|
||||
|
||||
const runFromBlock = (() => {
|
||||
if (toolName === 'run_from_block' && params.startBlockId) {
|
||||
return {
|
||||
startBlockId: params.startBlockId as string,
|
||||
executionId: (params.executionId as string | undefined) || 'latest',
|
||||
}
|
||||
}
|
||||
if (toolName === 'run_block' && params.blockId) {
|
||||
return {
|
||||
startBlockId: params.blockId as string,
|
||||
executionId: (params.executionId as string | undefined) || 'latest',
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
})()
|
||||
|
||||
setIsExecuting(true)
|
||||
const executionId = uuidv4()
|
||||
const executionStartTime = new Date().toISOString()
|
||||
|
||||
logger.info('[RunTool] Starting client-side workflow execution', {
|
||||
toolCallId,
|
||||
toolName,
|
||||
executionId,
|
||||
activeWorkflowId,
|
||||
hasInput: !!workflowInput,
|
||||
stopAfterBlockId,
|
||||
runFromBlock: runFromBlock ? { startBlockId: runFromBlock.startBlockId } : undefined,
|
||||
})
|
||||
|
||||
try {
|
||||
const result = await executeWorkflowWithFullLogging({
|
||||
workflowInput,
|
||||
executionId,
|
||||
stopAfterBlockId,
|
||||
runFromBlock,
|
||||
})
|
||||
|
||||
// Determine success (same logic as staging's RunWorkflowClientTool)
|
||||
let succeeded = true
|
||||
let errorMessage: string | undefined
|
||||
try {
|
||||
if (result && typeof result === 'object' && 'success' in (result as any)) {
|
||||
succeeded = Boolean((result as any).success)
|
||||
if (!succeeded) {
|
||||
errorMessage = (result as any)?.error || (result as any)?.output?.error
|
||||
}
|
||||
} else if (
|
||||
result &&
|
||||
typeof result === 'object' &&
|
||||
'execution' in (result as any) &&
|
||||
(result as any).execution
|
||||
) {
|
||||
succeeded = Boolean((result as any).execution.success)
|
||||
if (!succeeded) {
|
||||
errorMessage =
|
||||
(result as any).execution?.error || (result as any).execution?.output?.error
|
||||
}
|
||||
}
|
||||
} catch {}
|
||||
|
||||
if (succeeded) {
|
||||
logger.info('[RunTool] Workflow execution succeeded', { toolCallId, toolName })
|
||||
setToolState(toolCallId, ClientToolCallState.success)
|
||||
await reportCompletion(
|
||||
toolCallId,
|
||||
true,
|
||||
`Workflow execution completed. Started at: ${executionStartTime}`
|
||||
)
|
||||
} else {
|
||||
const msg = errorMessage || 'Workflow execution failed'
|
||||
logger.error('[RunTool] Workflow execution failed', { toolCallId, toolName, error: msg })
|
||||
setToolState(toolCallId, ClientToolCallState.error)
|
||||
await reportCompletion(toolCallId, false, msg)
|
||||
}
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err)
|
||||
logger.error('[RunTool] Workflow execution threw', { toolCallId, toolName, error: msg })
|
||||
setToolState(toolCallId, ClientToolCallState.error)
|
||||
await reportCompletion(toolCallId, false, msg)
|
||||
} finally {
|
||||
setIsExecuting(false)
|
||||
}
|
||||
}
|
||||
|
||||
/** Update the tool call state directly in the copilot store (like staging's setState). */
|
||||
function setToolState(toolCallId: string, state: ClientToolCallState): void {
|
||||
try {
|
||||
const store = useCopilotStore.getState()
|
||||
const current = store.toolCallsById[toolCallId]
|
||||
if (!current) return
|
||||
const updated = {
|
||||
...store.toolCallsById,
|
||||
[toolCallId]: {
|
||||
...current,
|
||||
state,
|
||||
display: resolveToolDisplay(current.name, state, toolCallId, current.params),
|
||||
},
|
||||
}
|
||||
useCopilotStore.setState({ toolCallsById: updated })
|
||||
} catch (err) {
|
||||
logger.warn('[RunTool] Failed to update tool state', {
|
||||
toolCallId,
|
||||
state,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Report tool completion to the server via the existing /api/copilot/confirm endpoint.
|
||||
* This writes {status: 'success'|'error', message} to Redis. The server-side handler
|
||||
* is polling Redis via waitForToolCompletion() and will pick this up, then fire-and-forget
|
||||
* markToolComplete to the Go backend.
|
||||
*/
|
||||
async function reportCompletion(
|
||||
toolCallId: string,
|
||||
success: boolean,
|
||||
message?: string
|
||||
): Promise<void> {
|
||||
try {
|
||||
const res = await fetch(COPILOT_CONFIRM_API_PATH, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
toolCallId,
|
||||
status: success ? 'success' : 'error',
|
||||
message: message || (success ? 'Tool completed' : 'Tool failed'),
|
||||
}),
|
||||
})
|
||||
if (!res.ok) {
|
||||
logger.warn('[RunTool] reportCompletion failed', { toolCallId, status: res.status })
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error('[RunTool] reportCompletion error', {
|
||||
toolCallId,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
416
apps/sim/lib/copilot/client-sse/subagent-handlers.ts
Normal file
416
apps/sim/lib/copilot/client-sse/subagent-handlers.ts
Normal file
@@ -0,0 +1,416 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import {
|
||||
asRecord,
|
||||
normalizeSseEvent,
|
||||
shouldSkipToolCallEvent,
|
||||
shouldSkipToolResultEvent,
|
||||
} from '@/lib/copilot/orchestrator/sse-utils'
|
||||
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
|
||||
import { resolveToolDisplay } from '@/lib/copilot/store-utils'
|
||||
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
|
||||
import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
|
||||
import {
|
||||
type SSEHandler,
|
||||
sendAutoAcceptConfirmation,
|
||||
sseHandlers,
|
||||
updateStreamingMessage,
|
||||
} from './handlers'
|
||||
import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution'
|
||||
import type { ClientStreamingContext } from './types'
|
||||
|
||||
const logger = createLogger('CopilotClientSubagentHandlers')
|
||||
|
||||
type StoreSet = (
|
||||
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
|
||||
) => void
|
||||
|
||||
export function appendSubAgentContent(
|
||||
context: ClientStreamingContext,
|
||||
parentToolCallId: string,
|
||||
text: string
|
||||
) {
|
||||
if (!context.subAgentContent[parentToolCallId]) {
|
||||
context.subAgentContent[parentToolCallId] = ''
|
||||
}
|
||||
if (!context.subAgentBlocks[parentToolCallId]) {
|
||||
context.subAgentBlocks[parentToolCallId] = []
|
||||
}
|
||||
context.subAgentContent[parentToolCallId] += text
|
||||
const blocks = context.subAgentBlocks[parentToolCallId]
|
||||
const lastBlock = blocks[blocks.length - 1]
|
||||
if (lastBlock && lastBlock.type === 'subagent_text') {
|
||||
lastBlock.content = (lastBlock.content || '') + text
|
||||
} else {
|
||||
blocks.push({
|
||||
type: 'subagent_text',
|
||||
content: text,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export function updateToolCallWithSubAgentData(
|
||||
context: ClientStreamingContext,
|
||||
get: () => CopilotStore,
|
||||
set: StoreSet,
|
||||
parentToolCallId: string
|
||||
) {
|
||||
const { toolCallsById } = get()
|
||||
const parentToolCall = toolCallsById[parentToolCallId]
|
||||
if (!parentToolCall) {
|
||||
logger.warn('[SubAgent] updateToolCallWithSubAgentData: parent tool call not found', {
|
||||
parentToolCallId,
|
||||
availableToolCallIds: Object.keys(toolCallsById),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const blocks = context.subAgentBlocks[parentToolCallId] ?? []
|
||||
|
||||
const updatedToolCall: CopilotToolCall = {
|
||||
...parentToolCall,
|
||||
subAgentContent: context.subAgentContent[parentToolCallId] || '',
|
||||
subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] ?? [],
|
||||
subAgentBlocks: blocks,
|
||||
subAgentStreaming: true,
|
||||
}
|
||||
|
||||
logger.info('[SubAgent] Updating tool call with subagent data', {
|
||||
parentToolCallId,
|
||||
parentToolName: parentToolCall.name,
|
||||
subAgentContentLength: updatedToolCall.subAgentContent?.length,
|
||||
subAgentBlocksCount: updatedToolCall.subAgentBlocks?.length,
|
||||
subAgentToolCallsCount: updatedToolCall.subAgentToolCalls?.length,
|
||||
})
|
||||
|
||||
const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall }
|
||||
set({ toolCallsById: updatedMap })
|
||||
|
||||
let foundInContentBlocks = false
|
||||
for (let i = 0; i < context.contentBlocks.length; i++) {
|
||||
const b = context.contentBlocks[i]
|
||||
if (b.type === 'tool_call' && b.toolCall?.id === parentToolCallId) {
|
||||
context.contentBlocks[i] = { ...b, toolCall: updatedToolCall }
|
||||
foundInContentBlocks = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (!foundInContentBlocks) {
|
||||
logger.warn('[SubAgent] Parent tool call not found in contentBlocks', {
|
||||
parentToolCallId,
|
||||
contentBlocksCount: context.contentBlocks.length,
|
||||
toolCallBlockIds: context.contentBlocks
|
||||
.filter((b) => b.type === 'tool_call')
|
||||
.map((b) => b.toolCall?.id),
|
||||
})
|
||||
}
|
||||
|
||||
updateStreamingMessage(set, context)
|
||||
}
|
||||
|
||||
export const subAgentSSEHandlers: Record<string, SSEHandler> = {
|
||||
start: () => {
|
||||
// Subagent start event - no action needed, parent is already tracked from subagent_start
|
||||
},
|
||||
|
||||
content: (data, context, get, set) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
const contentStr = typeof data.data === 'string' ? data.data : data.content || ''
|
||||
logger.info('[SubAgent] content event', {
|
||||
parentToolCallId,
|
||||
hasData: !!contentStr,
|
||||
dataPreview: contentStr ? contentStr.substring(0, 50) : null,
|
||||
})
|
||||
if (!parentToolCallId || !contentStr) {
|
||||
logger.warn('[SubAgent] content missing parentToolCallId or data', {
|
||||
parentToolCallId,
|
||||
hasData: !!contentStr,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
appendSubAgentContent(context, parentToolCallId, contentStr)
|
||||
|
||||
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
|
||||
},
|
||||
|
||||
reasoning: (data, context, get, set) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
const dataObj = asRecord(data?.data)
|
||||
const phase = data?.phase || (dataObj.phase as string | undefined)
|
||||
if (!parentToolCallId) return
|
||||
|
||||
if (phase === 'start' || phase === 'end') return
|
||||
|
||||
const chunk = typeof data?.data === 'string' ? data.data : data?.content || ''
|
||||
if (!chunk) return
|
||||
|
||||
appendSubAgentContent(context, parentToolCallId, chunk)
|
||||
|
||||
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
|
||||
},
|
||||
|
||||
tool_generating: () => {
|
||||
// Tool generating event - no action needed, we'll handle the actual tool_call
|
||||
},
|
||||
|
||||
tool_call: async (data, context, get, set) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
|
||||
const toolData = asRecord(data?.data)
|
||||
const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId
|
||||
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
|
||||
if (!id || !name) return
|
||||
const isPartial = toolData.partial === true
|
||||
|
||||
let args: Record<string, unknown> | undefined = (toolData.arguments || toolData.input) as
|
||||
| Record<string, unknown>
|
||||
| undefined
|
||||
|
||||
if (typeof args === 'string') {
|
||||
try {
|
||||
args = JSON.parse(args) as Record<string, unknown>
|
||||
} catch {
|
||||
logger.warn('[SubAgent] Failed to parse arguments string', { args })
|
||||
}
|
||||
}
|
||||
|
||||
logger.info('[SubAgent] tool_call received', {
|
||||
id,
|
||||
name,
|
||||
hasArgs: !!args,
|
||||
argsKeys: args ? Object.keys(args) : [],
|
||||
toolDataKeys: Object.keys(toolData),
|
||||
dataKeys: Object.keys(data ?? {}),
|
||||
})
|
||||
|
||||
if (!context.subAgentToolCalls[parentToolCallId]) {
|
||||
context.subAgentToolCalls[parentToolCallId] = []
|
||||
}
|
||||
if (!context.subAgentBlocks[parentToolCallId]) {
|
||||
context.subAgentBlocks[parentToolCallId] = []
|
||||
}
|
||||
|
||||
const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex(
|
||||
(tc: CopilotToolCall) => tc.id === id
|
||||
)
|
||||
const existingToolCall =
|
||||
existingIndex >= 0 ? context.subAgentToolCalls[parentToolCallId][existingIndex] : undefined
|
||||
|
||||
// Auto-allowed tools skip pending state to avoid flashing interrupt buttons
|
||||
const isAutoAllowed = get().isToolAutoAllowed(name)
|
||||
let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending
|
||||
|
||||
// Avoid flickering back to pending on partial/duplicate events once a tool is executing.
|
||||
if (
|
||||
existingToolCall?.state === ClientToolCallState.executing &&
|
||||
initialState === ClientToolCallState.pending
|
||||
) {
|
||||
initialState = ClientToolCallState.executing
|
||||
}
|
||||
|
||||
const subAgentToolCall: CopilotToolCall = {
|
||||
id,
|
||||
name,
|
||||
state: initialState,
|
||||
...(args ? { params: args } : {}),
|
||||
display: resolveToolDisplay(name, initialState, id, args),
|
||||
}
|
||||
|
||||
if (existingIndex >= 0) {
|
||||
context.subAgentToolCalls[parentToolCallId][existingIndex] = subAgentToolCall
|
||||
} else {
|
||||
context.subAgentToolCalls[parentToolCallId].push(subAgentToolCall)
|
||||
|
||||
context.subAgentBlocks[parentToolCallId].push({
|
||||
type: 'subagent_tool_call',
|
||||
toolCall: subAgentToolCall,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
|
||||
const { toolCallsById } = get()
|
||||
const updated = { ...toolCallsById, [id]: subAgentToolCall }
|
||||
set({ toolCallsById: updated })
|
||||
|
||||
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
|
||||
|
||||
if (isPartial) {
|
||||
return
|
||||
}
|
||||
|
||||
// Auto-allowed tools: send confirmation to the server so it can proceed
|
||||
// without waiting for the user to click "Allow".
|
||||
if (isAutoAllowed) {
|
||||
sendAutoAcceptConfirmation(id)
|
||||
}
|
||||
|
||||
// Client-executable run tools: if auto-allowed, execute immediately for
|
||||
// real-time feedback. For non-auto-allowed, the user must click "Allow"
|
||||
// first — handleRun in tool-call.tsx triggers executeRunToolOnClient.
|
||||
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(name) && isAutoAllowed) {
|
||||
executeRunToolOnClient(id, name, args || {})
|
||||
}
|
||||
},
|
||||
|
||||
tool_result: (data, context, get, set) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
|
||||
const resultData = asRecord(data?.data)
|
||||
const toolCallId: string | undefined = data?.toolCallId || (resultData.id as string | undefined)
|
||||
// Determine success: explicit `success` field takes priority; otherwise
|
||||
// infer from presence of result data vs error (same logic as server-side
|
||||
// inferToolSuccess). The Go backend uses `*bool` with omitempty so
|
||||
// `success` is present when explicitly set, and absent for non-tool events.
|
||||
const hasExplicitSuccess = data?.success !== undefined || resultData.success !== undefined
|
||||
const explicitSuccess = data?.success ?? resultData.success
|
||||
const hasResultData = data?.result !== undefined || resultData.result !== undefined
|
||||
const hasError = !!data?.error || !!resultData.error
|
||||
const success: boolean = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError
|
||||
if (!toolCallId) return
|
||||
|
||||
if (!context.subAgentToolCalls[parentToolCallId]) return
|
||||
if (!context.subAgentBlocks[parentToolCallId]) return
|
||||
|
||||
const targetState = success ? ClientToolCallState.success : ClientToolCallState.error
|
||||
const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex(
|
||||
(tc: CopilotToolCall) => tc.id === toolCallId
|
||||
)
|
||||
|
||||
if (existingIndex >= 0) {
|
||||
const existing = context.subAgentToolCalls[parentToolCallId][existingIndex]
|
||||
const updatedSubAgentToolCall = {
|
||||
...existing,
|
||||
state: targetState,
|
||||
display: resolveToolDisplay(existing.name, targetState, toolCallId, existing.params),
|
||||
}
|
||||
context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall
|
||||
|
||||
for (const block of context.subAgentBlocks[parentToolCallId]) {
|
||||
if (block.type === 'subagent_tool_call' && block.toolCall?.id === toolCallId) {
|
||||
block.toolCall = updatedSubAgentToolCall
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
const { toolCallsById } = get()
|
||||
if (toolCallsById[toolCallId]) {
|
||||
const updatedMap = {
|
||||
...toolCallsById,
|
||||
[toolCallId]: updatedSubAgentToolCall,
|
||||
}
|
||||
set({ toolCallsById: updatedMap })
|
||||
logger.info('[SubAgent] Updated subagent tool call state in toolCallsById', {
|
||||
toolCallId,
|
||||
name: existing.name,
|
||||
state: targetState,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
|
||||
},
|
||||
|
||||
done: (_data, context, get, set) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
|
||||
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
|
||||
},
|
||||
}
|
||||
|
||||
export async function applySseEvent(
|
||||
rawData: SSEEvent,
|
||||
context: ClientStreamingContext,
|
||||
get: () => CopilotStore,
|
||||
set: (next: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)) => void
|
||||
): Promise<boolean> {
|
||||
const normalizedEvent = normalizeSseEvent(rawData)
|
||||
if (shouldSkipToolCallEvent(normalizedEvent) || shouldSkipToolResultEvent(normalizedEvent)) {
|
||||
return true
|
||||
}
|
||||
const data = normalizedEvent
|
||||
|
||||
if (data.type === 'subagent_start') {
|
||||
const startData = asRecord(data.data)
|
||||
const toolCallId = startData.tool_call_id as string | undefined
|
||||
if (toolCallId) {
|
||||
context.subAgentParentToolCallId = toolCallId
|
||||
const { toolCallsById } = get()
|
||||
const parentToolCall = toolCallsById[toolCallId]
|
||||
if (parentToolCall) {
|
||||
const updatedToolCall: CopilotToolCall = {
|
||||
...parentToolCall,
|
||||
subAgentStreaming: true,
|
||||
}
|
||||
const updatedMap = { ...toolCallsById, [toolCallId]: updatedToolCall }
|
||||
set({ toolCallsById: updatedMap })
|
||||
}
|
||||
logger.info('[SSE] Subagent session started', {
|
||||
subagent: data.subagent,
|
||||
parentToolCallId: toolCallId,
|
||||
})
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if (data.type === 'subagent_end') {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (parentToolCallId) {
|
||||
const { toolCallsById } = get()
|
||||
const parentToolCall = toolCallsById[parentToolCallId]
|
||||
if (parentToolCall) {
|
||||
const updatedToolCall: CopilotToolCall = {
|
||||
...parentToolCall,
|
||||
subAgentContent: context.subAgentContent[parentToolCallId] || '',
|
||||
subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] ?? [],
|
||||
subAgentBlocks: context.subAgentBlocks[parentToolCallId] ?? [],
|
||||
subAgentStreaming: false,
|
||||
}
|
||||
const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall }
|
||||
set({ toolCallsById: updatedMap })
|
||||
logger.info('[SSE] Subagent session ended', {
|
||||
subagent: data.subagent,
|
||||
parentToolCallId,
|
||||
contentLength: context.subAgentContent[parentToolCallId]?.length || 0,
|
||||
toolCallCount: context.subAgentToolCalls[parentToolCallId]?.length || 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
context.subAgentParentToolCallId = undefined
|
||||
return true
|
||||
}
|
||||
|
||||
if (data.subagent) {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) {
|
||||
logger.warn('[SSE] Subagent event without parent tool call ID', {
|
||||
type: data.type,
|
||||
subagent: data.subagent,
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
logger.info('[SSE] Processing subagent event', {
|
||||
type: data.type,
|
||||
subagent: data.subagent,
|
||||
parentToolCallId,
|
||||
hasHandler: !!subAgentSSEHandlers[data.type],
|
||||
})
|
||||
|
||||
const subAgentHandler = subAgentSSEHandlers[data.type]
|
||||
if (subAgentHandler) {
|
||||
await subAgentHandler(data, context, get, set)
|
||||
} else {
|
||||
logger.warn('[SSE] No handler for subagent event type', { type: data.type })
|
||||
}
|
||||
return !context.streamComplete
|
||||
}
|
||||
|
||||
const handler = sseHandlers[data.type] || sseHandlers.default
|
||||
await handler(data, context, get, set)
|
||||
return !context.streamComplete
|
||||
}
|
||||
45
apps/sim/lib/copilot/client-sse/types.ts
Normal file
45
apps/sim/lib/copilot/client-sse/types.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
import type {
|
||||
ChatContext,
|
||||
CopilotToolCall,
|
||||
SubAgentContentBlock,
|
||||
} from '@/stores/panel/copilot/types'
|
||||
|
||||
/**
|
||||
* A content block used in copilot messages and during streaming.
|
||||
* Uses a literal type union for `type` to stay compatible with CopilotMessage.
|
||||
*/
|
||||
export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'contexts'
|
||||
|
||||
export interface ClientContentBlock {
|
||||
type: ContentBlockType
|
||||
content?: string
|
||||
timestamp: number
|
||||
toolCall?: CopilotToolCall | null
|
||||
startTime?: number
|
||||
duration?: number
|
||||
contexts?: ChatContext[]
|
||||
}
|
||||
|
||||
export interface StreamingContext {
|
||||
messageId: string
|
||||
accumulatedContent: string
|
||||
contentBlocks: ClientContentBlock[]
|
||||
currentTextBlock: ClientContentBlock | null
|
||||
isInThinkingBlock: boolean
|
||||
currentThinkingBlock: ClientContentBlock | null
|
||||
isInDesignWorkflowBlock: boolean
|
||||
designWorkflowContent: string
|
||||
pendingContent: string
|
||||
newChatId?: string
|
||||
doneEventCount: number
|
||||
streamComplete?: boolean
|
||||
wasAborted?: boolean
|
||||
suppressContinueOption?: boolean
|
||||
subAgentParentToolCallId?: string
|
||||
subAgentContent: Record<string, string>
|
||||
subAgentToolCalls: Record<string, CopilotToolCall[]>
|
||||
subAgentBlocks: Record<string, SubAgentContentBlock[]>
|
||||
suppressStreamingUpdates?: boolean
|
||||
}
|
||||
|
||||
export type ClientStreamingContext = StreamingContext
|
||||
@@ -109,14 +109,14 @@ function parseBooleanEnv(value: string | undefined): boolean | null {
|
||||
export const DEFAULT_COPILOT_CONFIG: CopilotConfig = {
|
||||
chat: {
|
||||
defaultProvider: 'anthropic',
|
||||
defaultModel: 'claude-3-7-sonnet-latest',
|
||||
defaultModel: 'claude-4.6-opus',
|
||||
temperature: 0.1,
|
||||
maxTokens: 8192,
|
||||
systemPrompt: AGENT_MODE_SYSTEM_PROMPT,
|
||||
},
|
||||
rag: {
|
||||
defaultProvider: 'anthropic',
|
||||
defaultModel: 'claude-3-7-sonnet-latest',
|
||||
defaultModel: 'claude-4.6-opus',
|
||||
temperature: 0.1,
|
||||
maxTokens: 2000,
|
||||
embeddingModel: 'text-embedding-3-small',
|
||||
|
||||
@@ -1,2 +1,115 @@
|
||||
import { env } from '@/lib/core/config/env'
|
||||
|
||||
export const SIM_AGENT_API_URL_DEFAULT = 'https://copilot.sim.ai'
|
||||
export const SIM_AGENT_VERSION = '1.0.3'
|
||||
export const SIM_AGENT_VERSION = '3.0.0'
|
||||
|
||||
/** Resolved copilot backend URL — reads from env with fallback to default. */
|
||||
const rawAgentUrl = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
|
||||
export const SIM_AGENT_API_URL =
|
||||
rawAgentUrl.startsWith('http://') || rawAgentUrl.startsWith('https://')
|
||||
? rawAgentUrl
|
||||
: SIM_AGENT_API_URL_DEFAULT
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Redis key prefixes
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Redis key prefix for tool call confirmation payloads (polled by waitForToolDecision). */
|
||||
export const REDIS_TOOL_CALL_PREFIX = 'tool_call:'
|
||||
|
||||
/** Redis key prefix for copilot SSE stream buffers. */
|
||||
export const REDIS_COPILOT_STREAM_PREFIX = 'copilot_stream:'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Timeouts
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Default timeout for the copilot orchestration stream loop (5 min). */
|
||||
export const ORCHESTRATION_TIMEOUT_MS = 300_000
|
||||
|
||||
/** Timeout for the client-side streaming response handler (10 min). */
|
||||
export const STREAM_TIMEOUT_MS = 600_000
|
||||
|
||||
/** TTL for Redis tool call confirmation entries (24 h). */
|
||||
export const REDIS_TOOL_CALL_TTL_SECONDS = 86_400
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tool decision polling
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Initial poll interval when waiting for a user tool decision. */
|
||||
export const TOOL_DECISION_INITIAL_POLL_MS = 100
|
||||
|
||||
/** Maximum poll interval when waiting for a user tool decision. */
|
||||
export const TOOL_DECISION_MAX_POLL_MS = 3_000
|
||||
|
||||
/** Backoff multiplier for the tool decision poll interval. */
|
||||
export const TOOL_DECISION_POLL_BACKOFF = 1.5
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Stream resume
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Maximum number of resume attempts before giving up. */
|
||||
export const MAX_RESUME_ATTEMPTS = 3
|
||||
|
||||
/** SessionStorage key for persisting active stream metadata across page reloads. */
|
||||
export const STREAM_STORAGE_KEY = 'copilot_active_stream'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Client-side streaming batching
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Delay (ms) before processing the next queued message after stream completion. */
|
||||
export const QUEUE_PROCESS_DELAY_MS = 100
|
||||
|
||||
/** Delay (ms) before invalidating subscription queries after stream completion. */
|
||||
export const SUBSCRIPTION_INVALIDATE_DELAY_MS = 1_000
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// UI helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Maximum character length for an optimistic chat title derived from a user message. */
|
||||
export const OPTIMISTIC_TITLE_MAX_LENGTH = 50
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Copilot API paths (client-side fetch targets)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** POST — send a chat message to the copilot. */
|
||||
export const COPILOT_CHAT_API_PATH = '/api/copilot/chat'
|
||||
|
||||
/** GET — resume/replay a copilot SSE stream. */
|
||||
export const COPILOT_CHAT_STREAM_API_PATH = '/api/copilot/chat/stream'
|
||||
|
||||
/** POST — persist chat messages / plan artifact / config. */
|
||||
export const COPILOT_UPDATE_MESSAGES_API_PATH = '/api/copilot/chat/update-messages'
|
||||
|
||||
/** DELETE — delete a copilot chat. */
|
||||
export const COPILOT_DELETE_CHAT_API_PATH = '/api/copilot/chat/delete'
|
||||
|
||||
/** POST — confirm or reject a tool call. */
|
||||
export const COPILOT_CONFIRM_API_PATH = '/api/copilot/confirm'
|
||||
|
||||
/** POST — forward diff-accepted/rejected stats to the copilot backend. */
|
||||
export const COPILOT_STATS_API_PATH = '/api/copilot/stats'
|
||||
|
||||
/** GET — load checkpoints for a chat. */
|
||||
export const COPILOT_CHECKPOINTS_API_PATH = '/api/copilot/checkpoints'
|
||||
|
||||
/** POST — revert to a checkpoint. */
|
||||
export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert'
|
||||
|
||||
/** GET/POST/DELETE — manage auto-allowed tools. */
|
||||
export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools'
|
||||
|
||||
/** GET — fetch user credentials for masking. */
|
||||
export const COPILOT_CREDENTIALS_API_PATH = '/api/copilot/credentials'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Dedup limits
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Maximum entries in the in-memory SSE tool-event dedup cache. */
|
||||
export const STREAM_BUFFER_MAX_DEDUP_ENTRIES = 1_000
|
||||
|
||||
129
apps/sim/lib/copilot/messages/checkpoints.ts
Normal file
129
apps/sim/lib/copilot/messages/checkpoints.ts
Normal file
@@ -0,0 +1,129 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { COPILOT_CHECKPOINTS_API_PATH } from '@/lib/copilot/constants'
|
||||
import type { CopilotMessage, CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
|
||||
import { mergeSubblockState } from '@/stores/workflows/utils'
|
||||
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
|
||||
import type { WorkflowState } from '@/stores/workflows/workflow/types'
|
||||
|
||||
const logger = createLogger('CopilotMessageCheckpoints')
|
||||
|
||||
export function buildCheckpointWorkflowState(workflowId: string): WorkflowState | null {
|
||||
const rawState = useWorkflowStore.getState().getWorkflowState()
|
||||
if (!rawState) return null
|
||||
|
||||
const blocksWithSubblockValues = mergeSubblockState(rawState.blocks, workflowId)
|
||||
|
||||
const filteredBlocks = Object.entries(blocksWithSubblockValues).reduce(
|
||||
(acc, [blockId, block]) => {
|
||||
if (block?.type && block?.name) {
|
||||
acc[blockId] = {
|
||||
...block,
|
||||
id: block.id || blockId,
|
||||
enabled: block.enabled !== undefined ? block.enabled : true,
|
||||
horizontalHandles: block.horizontalHandles !== undefined ? block.horizontalHandles : true,
|
||||
height: block.height !== undefined ? block.height : 90,
|
||||
subBlocks: block.subBlocks ?? {},
|
||||
outputs: block.outputs ?? {},
|
||||
data: block.data ?? {},
|
||||
position: block.position || { x: 0, y: 0 },
|
||||
}
|
||||
}
|
||||
return acc
|
||||
},
|
||||
{} as WorkflowState['blocks']
|
||||
)
|
||||
|
||||
return {
|
||||
blocks: filteredBlocks,
|
||||
edges: rawState.edges ?? [],
|
||||
loops: rawState.loops ?? {},
|
||||
parallels: rawState.parallels ?? {},
|
||||
lastSaved: rawState.lastSaved || Date.now(),
|
||||
deploymentStatuses: rawState.deploymentStatuses ?? {},
|
||||
}
|
||||
}
|
||||
|
||||
export async function saveMessageCheckpoint(
|
||||
messageId: string,
|
||||
get: () => CopilotStore,
|
||||
set: (partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)) => void
|
||||
): Promise<boolean> {
|
||||
const { workflowId, currentChat, messageSnapshots, messageCheckpoints } = get()
|
||||
if (!workflowId || !currentChat?.id) return false
|
||||
|
||||
const snapshot = messageSnapshots[messageId]
|
||||
if (!snapshot) return false
|
||||
|
||||
const nextSnapshots = { ...messageSnapshots }
|
||||
delete nextSnapshots[messageId]
|
||||
set({ messageSnapshots: nextSnapshots })
|
||||
|
||||
try {
|
||||
const response = await fetch(COPILOT_CHECKPOINTS_API_PATH, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
workflowId,
|
||||
chatId: currentChat.id,
|
||||
messageId,
|
||||
workflowState: JSON.stringify(snapshot),
|
||||
}),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to create checkpoint: ${response.statusText}`)
|
||||
}
|
||||
|
||||
const result = await response.json()
|
||||
const newCheckpoint = result.checkpoint
|
||||
if (newCheckpoint) {
|
||||
const existingCheckpoints = messageCheckpoints[messageId] ?? []
|
||||
const updatedCheckpoints = {
|
||||
...messageCheckpoints,
|
||||
[messageId]: [newCheckpoint, ...existingCheckpoints],
|
||||
}
|
||||
set({ messageCheckpoints: updatedCheckpoints })
|
||||
}
|
||||
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error('Failed to create checkpoint from snapshot:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
export function extractToolCallsRecursively(
|
||||
toolCall: CopilotToolCall,
|
||||
map: Record<string, CopilotToolCall>
|
||||
): void {
|
||||
if (!toolCall?.id) return
|
||||
map[toolCall.id] = toolCall
|
||||
|
||||
if (Array.isArray(toolCall.subAgentBlocks)) {
|
||||
for (const block of toolCall.subAgentBlocks) {
|
||||
if (block?.type === 'subagent_tool_call' && block.toolCall?.id) {
|
||||
extractToolCallsRecursively(block.toolCall, map)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(toolCall.subAgentToolCalls)) {
|
||||
for (const subTc of toolCall.subAgentToolCalls) {
|
||||
extractToolCallsRecursively(subTc, map)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function buildToolCallsById(messages: CopilotMessage[]): Record<string, CopilotToolCall> {
|
||||
const toolCallsById: Record<string, CopilotToolCall> = {}
|
||||
for (const msg of messages) {
|
||||
if (msg.contentBlocks) {
|
||||
for (const block of msg.contentBlocks) {
|
||||
if (block?.type === 'tool_call' && block.toolCall?.id) {
|
||||
extractToolCallsRecursively(block.toolCall, toolCallsById)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return toolCallsById
|
||||
}
|
||||
28
apps/sim/lib/copilot/messages/credential-masking.ts
Normal file
28
apps/sim/lib/copilot/messages/credential-masking.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
export function maskCredentialIdsInValue<T>(value: T, credentialIds: Set<string>): T {
|
||||
if (!value || credentialIds.size === 0) return value
|
||||
|
||||
if (typeof value === 'string') {
|
||||
let masked = value as string
|
||||
const sortedIds = Array.from(credentialIds).sort((a, b) => b.length - a.length)
|
||||
for (const id of sortedIds) {
|
||||
if (id && masked.includes(id)) {
|
||||
masked = masked.split(id).join('••••••••')
|
||||
}
|
||||
}
|
||||
return masked as unknown as T
|
||||
}
|
||||
|
||||
if (Array.isArray(value)) {
|
||||
return value.map((item) => maskCredentialIdsInValue(item, credentialIds)) as T
|
||||
}
|
||||
|
||||
if (typeof value === 'object') {
|
||||
const masked: Record<string, unknown> = {}
|
||||
for (const key of Object.keys(value as Record<string, unknown>)) {
|
||||
masked[key] = maskCredentialIdsInValue((value as Record<string, unknown>)[key], credentialIds)
|
||||
}
|
||||
return masked as T
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
4
apps/sim/lib/copilot/messages/index.ts
Normal file
4
apps/sim/lib/copilot/messages/index.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
export * from './checkpoints'
|
||||
export * from './credential-masking'
|
||||
export * from './persist'
|
||||
export * from './serialization'
|
||||
43
apps/sim/lib/copilot/messages/persist.ts
Normal file
43
apps/sim/lib/copilot/messages/persist.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { COPILOT_UPDATE_MESSAGES_API_PATH } from '@/lib/copilot/constants'
|
||||
import type { CopilotMessage } from '@/stores/panel/copilot/types'
|
||||
import { serializeMessagesForDB } from './serialization'
|
||||
|
||||
const logger = createLogger('CopilotMessagePersistence')
|
||||
|
||||
export async function persistMessages(params: {
|
||||
chatId: string
|
||||
messages: CopilotMessage[]
|
||||
sensitiveCredentialIds?: Set<string>
|
||||
planArtifact?: string | null
|
||||
mode?: string
|
||||
model?: string
|
||||
conversationId?: string
|
||||
}): Promise<boolean> {
|
||||
try {
|
||||
const dbMessages = serializeMessagesForDB(
|
||||
params.messages,
|
||||
params.sensitiveCredentialIds ?? new Set<string>()
|
||||
)
|
||||
const response = await fetch(COPILOT_UPDATE_MESSAGES_API_PATH, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
chatId: params.chatId,
|
||||
messages: dbMessages,
|
||||
...(params.planArtifact !== undefined ? { planArtifact: params.planArtifact } : {}),
|
||||
...(params.mode || params.model
|
||||
? { config: { mode: params.mode, model: params.model } }
|
||||
: {}),
|
||||
...(params.conversationId ? { conversationId: params.conversationId } : {}),
|
||||
}),
|
||||
})
|
||||
return response.ok
|
||||
} catch (error) {
|
||||
logger.warn('Failed to persist messages', {
|
||||
chatId: params.chatId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return false
|
||||
}
|
||||
}
|
||||
200
apps/sim/lib/copilot/messages/serialization.ts
Normal file
200
apps/sim/lib/copilot/messages/serialization.ts
Normal file
@@ -0,0 +1,200 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { resolveToolDisplay } from '@/lib/copilot/store-utils'
|
||||
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
|
||||
import type { CopilotMessage, CopilotToolCall } from '@/stores/panel/copilot/types'
|
||||
import { maskCredentialIdsInValue } from './credential-masking'
|
||||
|
||||
const logger = createLogger('CopilotMessageSerialization')
|
||||
|
||||
const TERMINAL_STATES = new Set<string>([
|
||||
ClientToolCallState.success,
|
||||
ClientToolCallState.error,
|
||||
ClientToolCallState.rejected,
|
||||
ClientToolCallState.aborted,
|
||||
ClientToolCallState.review,
|
||||
ClientToolCallState.background,
|
||||
])
|
||||
|
||||
/**
|
||||
* Clears streaming flags and normalizes non-terminal tool call states to 'aborted'.
|
||||
* This ensures that tool calls loaded from DB after a refresh/abort don't render
|
||||
* as in-progress with shimmer animations or interrupt buttons.
|
||||
*/
|
||||
export function clearStreamingFlags(toolCall: CopilotToolCall): void {
|
||||
if (!toolCall) return
|
||||
|
||||
toolCall.subAgentStreaming = false
|
||||
|
||||
// Normalize non-terminal states when loading from DB.
|
||||
// 'executing' → 'success': the server was running it, assume it completed.
|
||||
// 'pending'/'generating' → 'aborted': never reached execution.
|
||||
if (toolCall.state && !TERMINAL_STATES.has(toolCall.state)) {
|
||||
const normalized =
|
||||
toolCall.state === ClientToolCallState.executing
|
||||
? ClientToolCallState.success
|
||||
: ClientToolCallState.aborted
|
||||
toolCall.state = normalized
|
||||
toolCall.display = resolveToolDisplay(toolCall.name, normalized, toolCall.id, toolCall.params)
|
||||
}
|
||||
|
||||
if (Array.isArray(toolCall.subAgentBlocks)) {
|
||||
for (const block of toolCall.subAgentBlocks) {
|
||||
if (block?.type === 'subagent_tool_call' && block.toolCall) {
|
||||
clearStreamingFlags(block.toolCall)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (Array.isArray(toolCall.subAgentToolCalls)) {
|
||||
for (const subTc of toolCall.subAgentToolCalls) {
|
||||
clearStreamingFlags(subTc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] {
|
||||
try {
|
||||
for (const message of messages) {
|
||||
if (message.role === 'assistant') {
|
||||
logger.debug('[normalizeMessagesForUI] Loading assistant message', {
|
||||
id: message.id,
|
||||
hasContent: !!message.content?.trim(),
|
||||
contentBlockCount: message.contentBlocks?.length || 0,
|
||||
contentBlockTypes: message.contentBlocks?.map((b) => b?.type) ?? [],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for (const message of messages) {
|
||||
if (message.contentBlocks) {
|
||||
for (const block of message.contentBlocks) {
|
||||
if (block?.type === 'tool_call' && block.toolCall) {
|
||||
clearStreamingFlags(block.toolCall)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (message.toolCalls) {
|
||||
for (const toolCall of message.toolCalls) {
|
||||
clearStreamingFlags(toolCall)
|
||||
}
|
||||
}
|
||||
}
|
||||
return messages
|
||||
} catch (error) {
|
||||
logger.warn('[normalizeMessagesForUI] Failed to normalize messages', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return messages
|
||||
}
|
||||
}
|
||||
|
||||
export function deepClone<T>(obj: T): T {
|
||||
try {
|
||||
const json = JSON.stringify(obj)
|
||||
if (!json || json === 'undefined') {
|
||||
logger.warn('[deepClone] JSON.stringify returned empty for object', {
|
||||
type: typeof obj,
|
||||
isArray: Array.isArray(obj),
|
||||
length: Array.isArray(obj) ? obj.length : undefined,
|
||||
})
|
||||
return obj
|
||||
}
|
||||
const parsed = JSON.parse(json)
|
||||
if (Array.isArray(obj) && (!Array.isArray(parsed) || parsed.length !== obj.length)) {
|
||||
logger.warn('[deepClone] Array clone mismatch', {
|
||||
originalLength: obj.length,
|
||||
clonedLength: Array.isArray(parsed) ? parsed.length : 'not array',
|
||||
})
|
||||
}
|
||||
return parsed
|
||||
} catch (err) {
|
||||
logger.error('[deepClone] Failed to clone object', {
|
||||
error: String(err),
|
||||
type: typeof obj,
|
||||
isArray: Array.isArray(obj),
|
||||
})
|
||||
return obj
|
||||
}
|
||||
}
|
||||
|
||||
export function serializeMessagesForDB(
|
||||
messages: CopilotMessage[],
|
||||
credentialIds: Set<string>
|
||||
): CopilotMessage[] {
|
||||
const result = messages
|
||||
.map((msg) => {
|
||||
let timestamp: string = msg.timestamp
|
||||
if (typeof timestamp !== 'string') {
|
||||
const ts = timestamp as unknown
|
||||
timestamp = ts instanceof Date ? ts.toISOString() : new Date().toISOString()
|
||||
}
|
||||
|
||||
const serialized: CopilotMessage = {
|
||||
id: msg.id,
|
||||
role: msg.role,
|
||||
content: msg.content || '',
|
||||
timestamp,
|
||||
}
|
||||
|
||||
if (Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0) {
|
||||
serialized.contentBlocks = deepClone(msg.contentBlocks)
|
||||
}
|
||||
|
||||
if (Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0) {
|
||||
serialized.toolCalls = deepClone(msg.toolCalls)
|
||||
}
|
||||
|
||||
if (Array.isArray(msg.fileAttachments) && msg.fileAttachments.length > 0) {
|
||||
serialized.fileAttachments = deepClone(msg.fileAttachments)
|
||||
}
|
||||
|
||||
if (Array.isArray(msg.contexts) && msg.contexts.length > 0) {
|
||||
serialized.contexts = deepClone(msg.contexts)
|
||||
}
|
||||
|
||||
if (Array.isArray(msg.citations) && msg.citations.length > 0) {
|
||||
serialized.citations = deepClone(msg.citations)
|
||||
}
|
||||
|
||||
if (msg.errorType) {
|
||||
serialized.errorType = msg.errorType
|
||||
}
|
||||
|
||||
return maskCredentialIdsInValue(serialized, credentialIds)
|
||||
})
|
||||
.filter((msg) => {
|
||||
if (msg.role === 'assistant') {
|
||||
const hasContent = typeof msg.content === 'string' && msg.content.trim().length > 0
|
||||
const hasTools = Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0
|
||||
const hasBlocks = Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0
|
||||
return hasContent || hasTools || hasBlocks
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
for (const msg of messages) {
|
||||
if (msg.role === 'assistant') {
|
||||
logger.debug('[serializeMessagesForDB] Input assistant message', {
|
||||
id: msg.id,
|
||||
hasContent: !!msg.content?.trim(),
|
||||
contentBlockCount: msg.contentBlocks?.length || 0,
|
||||
contentBlockTypes: msg.contentBlocks?.map((b) => b?.type) ?? [],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('[serializeMessagesForDB] Serialized messages', {
|
||||
inputCount: messages.length,
|
||||
outputCount: result.length,
|
||||
sample:
|
||||
result.length > 0
|
||||
? {
|
||||
role: result[result.length - 1].role,
|
||||
hasContent: !!result[result.length - 1].content,
|
||||
contentBlockCount: result[result.length - 1].contentBlocks?.length || 0,
|
||||
toolCallCount: result[result.length - 1].toolCalls?.length || 0,
|
||||
}
|
||||
: null,
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -18,6 +18,7 @@ export const COPILOT_MODEL_IDS = [
|
||||
'claude-4-sonnet',
|
||||
'claude-4.5-haiku',
|
||||
'claude-4.5-sonnet',
|
||||
'claude-4.6-opus',
|
||||
'claude-4.5-opus',
|
||||
'claude-4.1-opus',
|
||||
'gemini-3-pro',
|
||||
|
||||
67
apps/sim/lib/copilot/orchestrator/config.ts
Normal file
67
apps/sim/lib/copilot/orchestrator/config.ts
Normal file
@@ -0,0 +1,67 @@
|
||||
export const INTERRUPT_TOOL_NAMES = [
|
||||
'set_global_workflow_variables',
|
||||
'run_workflow',
|
||||
'run_workflow_until_block',
|
||||
'run_from_block',
|
||||
'run_block',
|
||||
'manage_mcp_tool',
|
||||
'manage_custom_tool',
|
||||
'deploy_mcp',
|
||||
'deploy_chat',
|
||||
'deploy_api',
|
||||
'create_workspace_mcp_server',
|
||||
'set_environment_variables',
|
||||
'make_api_request',
|
||||
'oauth_request_access',
|
||||
'navigate_ui',
|
||||
'knowledge_base',
|
||||
'generate_api_key',
|
||||
] as const
|
||||
|
||||
export const INTERRUPT_TOOL_SET = new Set<string>(INTERRUPT_TOOL_NAMES)
|
||||
|
||||
export const SUBAGENT_TOOL_NAMES = [
|
||||
'debug',
|
||||
'edit',
|
||||
'build',
|
||||
'plan',
|
||||
'test',
|
||||
'deploy',
|
||||
'auth',
|
||||
'research',
|
||||
'knowledge',
|
||||
'custom_tool',
|
||||
'tour',
|
||||
'info',
|
||||
'workflow',
|
||||
'evaluate',
|
||||
'superagent',
|
||||
'discovery',
|
||||
] as const
|
||||
|
||||
export const SUBAGENT_TOOL_SET = new Set<string>(SUBAGENT_TOOL_NAMES)
|
||||
|
||||
/**
|
||||
* Respond tools are internal to the copilot's subagent system.
|
||||
* They're used by subagents to signal completion and should NOT be executed by the sim side.
|
||||
* The copilot backend handles these internally.
|
||||
*/
|
||||
export const RESPOND_TOOL_NAMES = [
|
||||
'plan_respond',
|
||||
'edit_respond',
|
||||
'build_respond',
|
||||
'debug_respond',
|
||||
'info_respond',
|
||||
'research_respond',
|
||||
'deploy_respond',
|
||||
'superagent_respond',
|
||||
'discovery_respond',
|
||||
'tour_respond',
|
||||
'auth_respond',
|
||||
'workflow_respond',
|
||||
'knowledge_respond',
|
||||
'custom_tool_respond',
|
||||
'test_respond',
|
||||
] as const
|
||||
|
||||
export const RESPOND_TOOL_SET = new Set<string>(RESPOND_TOOL_NAMES)
|
||||
70
apps/sim/lib/copilot/orchestrator/index.ts
Normal file
70
apps/sim/lib/copilot/orchestrator/index.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type { OrchestratorOptions, OrchestratorResult } from '@/lib/copilot/orchestrator/types'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { buildToolCallSummaries, createStreamingContext, runStreamLoop } from './stream-core'
|
||||
|
||||
const logger = createLogger('CopilotOrchestrator')
|
||||
|
||||
export interface OrchestrateStreamOptions extends OrchestratorOptions {
|
||||
userId: string
|
||||
workflowId: string
|
||||
chatId?: string
|
||||
}
|
||||
|
||||
export async function orchestrateCopilotStream(
|
||||
requestPayload: Record<string, unknown>,
|
||||
options: OrchestrateStreamOptions
|
||||
): Promise<OrchestratorResult> {
|
||||
const { userId, workflowId, chatId } = options
|
||||
const execContext = await prepareExecutionContext(userId, workflowId)
|
||||
|
||||
const payloadMsgId = requestPayload?.messageId
|
||||
const context = createStreamingContext({
|
||||
chatId,
|
||||
messageId: typeof payloadMsgId === 'string' ? payloadMsgId : crypto.randomUUID(),
|
||||
})
|
||||
|
||||
try {
|
||||
await runStreamLoop(
|
||||
`${SIM_AGENT_API_URL}/api/chat-completion-streaming`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
|
||||
},
|
||||
body: JSON.stringify(requestPayload),
|
||||
},
|
||||
context,
|
||||
execContext,
|
||||
options
|
||||
)
|
||||
|
||||
const result: OrchestratorResult = {
|
||||
success: context.errors.length === 0,
|
||||
content: context.accumulatedContent,
|
||||
contentBlocks: context.contentBlocks,
|
||||
toolCalls: buildToolCallSummaries(context),
|
||||
chatId: context.chatId,
|
||||
conversationId: context.conversationId,
|
||||
errors: context.errors.length ? context.errors : undefined,
|
||||
}
|
||||
await options.onComplete?.(result)
|
||||
return result
|
||||
} catch (error) {
|
||||
const err = error instanceof Error ? error : new Error('Copilot orchestration failed')
|
||||
logger.error('Copilot orchestration failed', { error: err.message })
|
||||
await options.onError?.(err)
|
||||
return {
|
||||
success: false,
|
||||
content: '',
|
||||
contentBlocks: [],
|
||||
toolCalls: [],
|
||||
chatId: context.chatId,
|
||||
conversationId: context.conversationId,
|
||||
error: err.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
29
apps/sim/lib/copilot/orchestrator/persistence.ts
Normal file
29
apps/sim/lib/copilot/orchestrator/persistence.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { REDIS_TOOL_CALL_PREFIX } from '@/lib/copilot/constants'
|
||||
import { getRedisClient } from '@/lib/core/config/redis'
|
||||
|
||||
const logger = createLogger('CopilotOrchestratorPersistence')
|
||||
|
||||
/**
|
||||
* Get a tool call confirmation status from Redis.
|
||||
*/
|
||||
export async function getToolConfirmation(toolCallId: string): Promise<{
|
||||
status: string
|
||||
message?: string
|
||||
timestamp?: string
|
||||
} | null> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return null
|
||||
|
||||
try {
|
||||
const data = await redis.get(`${REDIS_TOOL_CALL_PREFIX}${toolCallId}`)
|
||||
if (!data) return null
|
||||
return JSON.parse(data) as { status: string; message?: string; timestamp?: string }
|
||||
} catch (error) {
|
||||
logger.error('Failed to read tool confirmation', {
|
||||
toolCallId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return null
|
||||
}
|
||||
}
|
||||
102
apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts
Normal file
102
apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
|
||||
import { loggerMock } from '@sim/testing'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('@sim/logger', () => loggerMock)
|
||||
|
||||
const { executeToolServerSide, markToolComplete, isIntegrationTool, isToolAvailableOnSimSide } =
|
||||
vi.hoisted(() => ({
|
||||
executeToolServerSide: vi.fn(),
|
||||
markToolComplete: vi.fn(),
|
||||
isIntegrationTool: vi.fn().mockReturnValue(false),
|
||||
isToolAvailableOnSimSide: vi.fn().mockReturnValue(true),
|
||||
}))
|
||||
|
||||
vi.mock('@/lib/copilot/orchestrator/tool-executor', () => ({
|
||||
executeToolServerSide,
|
||||
markToolComplete,
|
||||
isIntegrationTool,
|
||||
isToolAvailableOnSimSide,
|
||||
}))
|
||||
|
||||
import { sseHandlers } from '@/lib/copilot/orchestrator/sse-handlers'
|
||||
import type { ExecutionContext, StreamingContext } from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
describe('sse-handlers tool lifecycle', () => {
|
||||
let context: StreamingContext
|
||||
let execContext: ExecutionContext
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
context = {
|
||||
chatId: undefined,
|
||||
conversationId: undefined,
|
||||
messageId: 'msg-1',
|
||||
accumulatedContent: '',
|
||||
contentBlocks: [],
|
||||
toolCalls: new Map(),
|
||||
currentThinkingBlock: null,
|
||||
isInThinkingBlock: false,
|
||||
subAgentParentToolCallId: undefined,
|
||||
subAgentContent: {},
|
||||
subAgentToolCalls: {},
|
||||
pendingContent: '',
|
||||
streamComplete: false,
|
||||
wasAborted: false,
|
||||
errors: [],
|
||||
}
|
||||
execContext = {
|
||||
userId: 'user-1',
|
||||
workflowId: 'workflow-1',
|
||||
}
|
||||
})
|
||||
|
||||
it('executes tool_call and emits tool_result + mark-complete', async () => {
|
||||
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
|
||||
markToolComplete.mockResolvedValueOnce(true)
|
||||
const onEvent = vi.fn()
|
||||
|
||||
await sseHandlers.tool_call(
|
||||
{
|
||||
type: 'tool_call',
|
||||
data: { id: 'tool-1', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } },
|
||||
} as any,
|
||||
context,
|
||||
execContext,
|
||||
{ onEvent, interactive: false, timeout: 1000 }
|
||||
)
|
||||
|
||||
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
|
||||
expect(markToolComplete).toHaveBeenCalledTimes(1)
|
||||
expect(onEvent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
type: 'tool_result',
|
||||
toolCallId: 'tool-1',
|
||||
success: true,
|
||||
})
|
||||
)
|
||||
|
||||
const updated = context.toolCalls.get('tool-1')
|
||||
expect(updated?.status).toBe('success')
|
||||
expect(updated?.result?.output).toEqual({ ok: true })
|
||||
})
|
||||
|
||||
it('skips duplicate tool_call after result', async () => {
|
||||
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
|
||||
markToolComplete.mockResolvedValueOnce(true)
|
||||
|
||||
const event = {
|
||||
type: 'tool_call',
|
||||
data: { id: 'tool-dup', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } },
|
||||
}
|
||||
|
||||
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
|
||||
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
|
||||
|
||||
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
|
||||
expect(markToolComplete).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
})
|
||||
635
apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts
Normal file
635
apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts
Normal file
@@ -0,0 +1,635 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants'
|
||||
import { RESPOND_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config'
|
||||
import {
|
||||
asRecord,
|
||||
getEventData,
|
||||
markToolResultSeen,
|
||||
wasToolResultSeen,
|
||||
} from '@/lib/copilot/orchestrator/sse-utils'
|
||||
import {
|
||||
isIntegrationTool,
|
||||
isToolAvailableOnSimSide,
|
||||
markToolComplete,
|
||||
} from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type {
|
||||
ContentBlock,
|
||||
ExecutionContext,
|
||||
OrchestratorOptions,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
ToolCallState,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
import {
|
||||
executeToolAndReport,
|
||||
isInterruptToolName,
|
||||
waitForToolCompletion,
|
||||
waitForToolDecision,
|
||||
} from './tool-execution'
|
||||
|
||||
const logger = createLogger('CopilotSseHandlers')
|
||||
|
||||
/**
|
||||
* Run tools that can be executed client-side for real-time feedback
|
||||
* (block pulsing, logs, stop button). When interactive, the server defers
|
||||
* execution to the browser client instead of running executeWorkflow directly.
|
||||
*/
|
||||
const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([
|
||||
'run_workflow',
|
||||
'run_workflow_until_block',
|
||||
'run_from_block',
|
||||
'run_block',
|
||||
])
|
||||
|
||||
// Normalization + dedupe helpers live in sse-utils to keep server/client in sync.
|
||||
|
||||
function inferToolSuccess(data: Record<string, unknown> | undefined): {
|
||||
success: boolean
|
||||
hasResultData: boolean
|
||||
hasError: boolean
|
||||
} {
|
||||
const resultObj = asRecord(data?.result)
|
||||
const hasExplicitSuccess = data?.success !== undefined || resultObj.success !== undefined
|
||||
const explicitSuccess = data?.success ?? resultObj.success
|
||||
const hasResultData = data?.result !== undefined || data?.data !== undefined
|
||||
const hasError = !!data?.error || !!resultObj.error
|
||||
const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError
|
||||
return { success, hasResultData, hasError }
|
||||
}
|
||||
|
||||
export type SSEHandler = (
|
||||
event: SSEEvent,
|
||||
context: StreamingContext,
|
||||
execContext: ExecutionContext,
|
||||
options: OrchestratorOptions
|
||||
) => void | Promise<void>
|
||||
|
||||
function addContentBlock(context: StreamingContext, block: Omit<ContentBlock, 'timestamp'>): void {
|
||||
context.contentBlocks.push({
|
||||
...block,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
|
||||
export const sseHandlers: Record<string, SSEHandler> = {
|
||||
chat_id: (event, context) => {
|
||||
context.chatId = asRecord(event.data).chatId as string | undefined
|
||||
},
|
||||
title_updated: () => {},
|
||||
tool_result: (event, context) => {
|
||||
const data = getEventData(event)
|
||||
const toolCallId = event.toolCallId || (data?.id as string | undefined)
|
||||
if (!toolCallId) return
|
||||
const current = context.toolCalls.get(toolCallId)
|
||||
if (!current) return
|
||||
|
||||
const { success, hasResultData, hasError } = inferToolSuccess(data)
|
||||
|
||||
current.status = success ? 'success' : 'error'
|
||||
current.endTime = Date.now()
|
||||
if (hasResultData) {
|
||||
current.result = {
|
||||
success,
|
||||
output: data?.result || data?.data,
|
||||
}
|
||||
}
|
||||
if (hasError) {
|
||||
const resultObj = asRecord(data?.result)
|
||||
current.error = (data?.error || resultObj.error) as string | undefined
|
||||
}
|
||||
},
|
||||
tool_error: (event, context) => {
|
||||
const data = getEventData(event)
|
||||
const toolCallId = event.toolCallId || (data?.id as string | undefined)
|
||||
if (!toolCallId) return
|
||||
const current = context.toolCalls.get(toolCallId)
|
||||
if (!current) return
|
||||
current.status = 'error'
|
||||
current.error = (data?.error as string | undefined) || 'Tool execution failed'
|
||||
current.endTime = Date.now()
|
||||
},
|
||||
tool_generating: (event, context) => {
|
||||
const data = getEventData(event)
|
||||
const toolCallId =
|
||||
event.toolCallId ||
|
||||
(data?.toolCallId as string | undefined) ||
|
||||
(data?.id as string | undefined)
|
||||
const toolName =
|
||||
event.toolName || (data?.toolName as string | undefined) || (data?.name as string | undefined)
|
||||
if (!toolCallId || !toolName) return
|
||||
if (!context.toolCalls.has(toolCallId)) {
|
||||
context.toolCalls.set(toolCallId, {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status: 'pending',
|
||||
startTime: Date.now(),
|
||||
})
|
||||
}
|
||||
},
|
||||
tool_call: async (event, context, execContext, options) => {
|
||||
const toolData = getEventData(event) || ({} as Record<string, unknown>)
|
||||
const toolCallId = (toolData.id as string | undefined) || event.toolCallId
|
||||
const toolName = (toolData.name as string | undefined) || event.toolName
|
||||
if (!toolCallId || !toolName) return
|
||||
|
||||
const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as
|
||||
| Record<string, unknown>
|
||||
| undefined
|
||||
const isPartial = toolData.partial === true
|
||||
const existing = context.toolCalls.get(toolCallId)
|
||||
|
||||
// If we've already completed this tool call, ignore late/duplicate tool_call events
|
||||
// to avoid resetting UI/state back to pending and re-executing.
|
||||
if (
|
||||
existing?.endTime ||
|
||||
(existing && existing.status !== 'pending' && existing.status !== 'executing')
|
||||
) {
|
||||
if (!existing.params && args) {
|
||||
existing.params = args
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (existing) {
|
||||
if (args && !existing.params) existing.params = args
|
||||
} else {
|
||||
context.toolCalls.set(toolCallId, {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status: 'pending',
|
||||
params: args,
|
||||
startTime: Date.now(),
|
||||
})
|
||||
const created = context.toolCalls.get(toolCallId)!
|
||||
addContentBlock(context, { type: 'tool_call', toolCall: created })
|
||||
}
|
||||
|
||||
if (isPartial) return
|
||||
if (wasToolResultSeen(toolCallId)) return
|
||||
|
||||
const toolCall = context.toolCalls.get(toolCallId)
|
||||
if (!toolCall) return
|
||||
|
||||
// Subagent tools are executed by the copilot backend, not sim side.
|
||||
if (SUBAGENT_TOOL_SET.has(toolName)) {
|
||||
return
|
||||
}
|
||||
|
||||
// Respond tools are internal to copilot's subagent system - skip execution.
|
||||
// The copilot backend handles these internally to signal subagent completion.
|
||||
if (RESPOND_TOOL_SET.has(toolName)) {
|
||||
toolCall.status = 'success'
|
||||
toolCall.endTime = Date.now()
|
||||
toolCall.result = {
|
||||
success: true,
|
||||
output: 'Internal respond tool - handled by copilot backend',
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const isInterruptTool = isInterruptToolName(toolName)
|
||||
const isInteractive = options.interactive === true
|
||||
// Integration tools (user-installed) also require approval in interactive mode
|
||||
const needsApproval = isInterruptTool || isIntegrationTool(toolName)
|
||||
|
||||
if (needsApproval && isInteractive) {
|
||||
const decision = await waitForToolDecision(
|
||||
toolCallId,
|
||||
options.timeout || STREAM_TIMEOUT_MS,
|
||||
options.abortSignal
|
||||
)
|
||||
if (decision?.status === 'accepted' || decision?.status === 'success') {
|
||||
// Client-executable run tools: defer execution to the browser client.
|
||||
// The client calls executeWorkflowWithFullLogging for real-time feedback
|
||||
// (block pulsing, logs, stop button) and reports completion via
|
||||
// /api/copilot/confirm with status success/error. We poll Redis for
|
||||
// that completion signal, then fire-and-forget markToolComplete to Go.
|
||||
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
|
||||
toolCall.status = 'executing'
|
||||
const completion = await waitForToolCompletion(
|
||||
toolCallId,
|
||||
options.timeout || STREAM_TIMEOUT_MS,
|
||||
options.abortSignal
|
||||
)
|
||||
if (completion?.status === 'background') {
|
||||
toolCall.status = 'skipped'
|
||||
toolCall.endTime = Date.now()
|
||||
markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
202,
|
||||
completion.message || 'Tool execution moved to background',
|
||||
{ background: true }
|
||||
).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (run tool background)', {
|
||||
toolCallId: toolCall.id,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCallId)
|
||||
return
|
||||
}
|
||||
const success = completion?.status === 'success'
|
||||
toolCall.status = success ? 'success' : 'error'
|
||||
toolCall.endTime = Date.now()
|
||||
const msg =
|
||||
completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
|
||||
// Fire-and-forget: tell Go backend the tool is done
|
||||
// (must NOT await — see deadlock note in executeToolAndReport)
|
||||
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (run tool)', {
|
||||
toolCallId: toolCall.id,
|
||||
toolName: toolCall.name,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCallId)
|
||||
return
|
||||
}
|
||||
await executeToolAndReport(toolCallId, context, execContext, options)
|
||||
return
|
||||
}
|
||||
|
||||
if (decision?.status === 'rejected' || decision?.status === 'error') {
|
||||
toolCall.status = 'rejected'
|
||||
toolCall.endTime = Date.now()
|
||||
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
|
||||
markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
400,
|
||||
decision.message || 'Tool execution rejected',
|
||||
{ skipped: true, reason: 'user_rejected' }
|
||||
).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (rejected)', {
|
||||
toolCallId: toolCall.id,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCall.id)
|
||||
return
|
||||
}
|
||||
|
||||
if (decision?.status === 'background') {
|
||||
toolCall.status = 'skipped'
|
||||
toolCall.endTime = Date.now()
|
||||
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
|
||||
markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
202,
|
||||
decision.message || 'Tool execution moved to background',
|
||||
{ background: true }
|
||||
).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (background)', {
|
||||
toolCallId: toolCall.id,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCall.id)
|
||||
return
|
||||
}
|
||||
|
||||
// Decision was null — timed out or aborted.
|
||||
// Do NOT fall through to auto-execute. Mark the tool as timed out
|
||||
// and notify Go so it can unblock waitForExternalTool.
|
||||
toolCall.status = 'rejected'
|
||||
toolCall.endTime = Date.now()
|
||||
markToolComplete(toolCall.id, toolCall.name, 408, 'Tool approval timed out', {
|
||||
skipped: true,
|
||||
reason: 'timeout',
|
||||
}).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (timeout)', {
|
||||
toolCallId: toolCall.id,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCall.id)
|
||||
return
|
||||
}
|
||||
|
||||
if (options.autoExecuteTools !== false) {
|
||||
await executeToolAndReport(toolCallId, context, execContext, options)
|
||||
}
|
||||
},
|
||||
reasoning: (event, context) => {
|
||||
const d = asRecord(event.data)
|
||||
const phase = d.phase || asRecord(d.data).phase
|
||||
if (phase === 'start') {
|
||||
context.isInThinkingBlock = true
|
||||
context.currentThinkingBlock = {
|
||||
type: 'thinking',
|
||||
content: '',
|
||||
timestamp: Date.now(),
|
||||
}
|
||||
return
|
||||
}
|
||||
if (phase === 'end') {
|
||||
if (context.currentThinkingBlock) {
|
||||
context.contentBlocks.push(context.currentThinkingBlock)
|
||||
}
|
||||
context.isInThinkingBlock = false
|
||||
context.currentThinkingBlock = null
|
||||
return
|
||||
}
|
||||
const chunk = (d.data || d.content || event.content) as string | undefined
|
||||
if (!chunk || !context.currentThinkingBlock) return
|
||||
context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}`
|
||||
},
|
||||
content: (event, context) => {
|
||||
// Go backend sends content as a plain string in event.data, not wrapped in an object.
|
||||
let chunk: string | undefined
|
||||
if (typeof event.data === 'string') {
|
||||
chunk = event.data
|
||||
} else {
|
||||
const d = asRecord(event.data)
|
||||
chunk = (d.content || d.data || event.content) as string | undefined
|
||||
}
|
||||
if (!chunk) return
|
||||
context.accumulatedContent += chunk
|
||||
addContentBlock(context, { type: 'text', content: chunk })
|
||||
},
|
||||
done: (event, context) => {
|
||||
const d = asRecord(event.data)
|
||||
if (d.responseId) {
|
||||
context.conversationId = d.responseId as string
|
||||
}
|
||||
context.streamComplete = true
|
||||
},
|
||||
start: (event, context) => {
|
||||
const d = asRecord(event.data)
|
||||
if (d.responseId) {
|
||||
context.conversationId = d.responseId as string
|
||||
}
|
||||
},
|
||||
error: (event, context) => {
|
||||
const d = asRecord(event.data)
|
||||
const message = (d.message || d.error || event.error) as string | undefined
|
||||
if (message) {
|
||||
context.errors.push(message)
|
||||
}
|
||||
context.streamComplete = true
|
||||
},
|
||||
}
|
||||
|
||||
export const subAgentHandlers: Record<string, SSEHandler> = {
|
||||
content: (event, context) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId || !event.data) return
|
||||
// Go backend sends content as a plain string in event.data
|
||||
let chunk: string | undefined
|
||||
if (typeof event.data === 'string') {
|
||||
chunk = event.data
|
||||
} else {
|
||||
const d = asRecord(event.data)
|
||||
chunk = (d.content || d.data || event.content) as string | undefined
|
||||
}
|
||||
if (!chunk) return
|
||||
context.subAgentContent[parentToolCallId] =
|
||||
(context.subAgentContent[parentToolCallId] || '') + chunk
|
||||
addContentBlock(context, { type: 'subagent_text', content: chunk })
|
||||
},
|
||||
tool_call: async (event, context, execContext, options) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
const toolData = getEventData(event) || ({} as Record<string, unknown>)
|
||||
const toolCallId = (toolData.id as string | undefined) || event.toolCallId
|
||||
const toolName = (toolData.name as string | undefined) || event.toolName
|
||||
if (!toolCallId || !toolName) return
|
||||
const isPartial = toolData.partial === true
|
||||
const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as
|
||||
| Record<string, unknown>
|
||||
| undefined
|
||||
|
||||
const existing = context.toolCalls.get(toolCallId)
|
||||
// Ignore late/duplicate tool_call events once we already have a result.
|
||||
if (wasToolResultSeen(toolCallId) || existing?.endTime) {
|
||||
return
|
||||
}
|
||||
|
||||
const toolCall: ToolCallState = {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status: 'pending',
|
||||
params: args,
|
||||
startTime: Date.now(),
|
||||
}
|
||||
|
||||
// Store in both places - but do NOT overwrite existing tool call state for the same id.
|
||||
if (!context.subAgentToolCalls[parentToolCallId]) {
|
||||
context.subAgentToolCalls[parentToolCallId] = []
|
||||
}
|
||||
if (!context.subAgentToolCalls[parentToolCallId].some((tc) => tc.id === toolCallId)) {
|
||||
context.subAgentToolCalls[parentToolCallId].push(toolCall)
|
||||
}
|
||||
if (!context.toolCalls.has(toolCallId)) {
|
||||
context.toolCalls.set(toolCallId, toolCall)
|
||||
}
|
||||
|
||||
if (isPartial) return
|
||||
|
||||
// Respond tools are internal to copilot's subagent system - skip execution.
|
||||
if (RESPOND_TOOL_SET.has(toolName)) {
|
||||
toolCall.status = 'success'
|
||||
toolCall.endTime = Date.now()
|
||||
toolCall.result = {
|
||||
success: true,
|
||||
output: 'Internal respond tool - handled by copilot backend',
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Tools that only exist on the Go backend (e.g. search_patterns,
|
||||
// search_errors, remember_debug) should NOT be re-executed on the Sim side.
|
||||
// The Go backend already executed them and will send its own tool_result
|
||||
// SSE event with the real outcome. Trying to execute them here would fail
|
||||
// with "Tool not found" and incorrectly mark the tool as failed.
|
||||
if (!isToolAvailableOnSimSide(toolName)) {
|
||||
return
|
||||
}
|
||||
|
||||
// Interrupt tools and integration tools (user-installed) require approval
|
||||
// in interactive mode, same as top-level handler.
|
||||
const needsSubagentApproval = isInterruptToolName(toolName) || isIntegrationTool(toolName)
|
||||
if (options.interactive === true && needsSubagentApproval) {
|
||||
const decision = await waitForToolDecision(
|
||||
toolCallId,
|
||||
options.timeout || STREAM_TIMEOUT_MS,
|
||||
options.abortSignal
|
||||
)
|
||||
if (decision?.status === 'accepted' || decision?.status === 'success') {
|
||||
await executeToolAndReport(toolCallId, context, execContext, options)
|
||||
return
|
||||
}
|
||||
if (decision?.status === 'rejected' || decision?.status === 'error') {
|
||||
toolCall.status = 'rejected'
|
||||
toolCall.endTime = Date.now()
|
||||
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
|
||||
markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
400,
|
||||
decision.message || 'Tool execution rejected',
|
||||
{ skipped: true, reason: 'user_rejected' }
|
||||
).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (subagent rejected)', {
|
||||
toolCallId: toolCall.id,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCall.id)
|
||||
return
|
||||
}
|
||||
if (decision?.status === 'background') {
|
||||
toolCall.status = 'skipped'
|
||||
toolCall.endTime = Date.now()
|
||||
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
|
||||
markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
202,
|
||||
decision.message || 'Tool execution moved to background',
|
||||
{ background: true }
|
||||
).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (subagent background)', {
|
||||
toolCallId: toolCall.id,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCall.id)
|
||||
return
|
||||
}
|
||||
|
||||
// Decision was null — timed out or aborted.
|
||||
// Do NOT fall through to auto-execute.
|
||||
toolCall.status = 'rejected'
|
||||
toolCall.endTime = Date.now()
|
||||
markToolComplete(toolCall.id, toolCall.name, 408, 'Tool approval timed out', {
|
||||
skipped: true,
|
||||
reason: 'timeout',
|
||||
}).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (subagent timeout)', {
|
||||
toolCallId: toolCall.id,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCall.id)
|
||||
return
|
||||
}
|
||||
|
||||
// Client-executable run tools in interactive mode: defer to client.
|
||||
// Same pattern as main handler: wait for client completion, then tell Go.
|
||||
if (options.interactive === true && CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
|
||||
toolCall.status = 'executing'
|
||||
const completion = await waitForToolCompletion(
|
||||
toolCallId,
|
||||
options.timeout || STREAM_TIMEOUT_MS,
|
||||
options.abortSignal
|
||||
)
|
||||
if (completion?.status === 'rejected') {
|
||||
toolCall.status = 'rejected'
|
||||
toolCall.endTime = Date.now()
|
||||
markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
400,
|
||||
completion.message || 'Tool execution rejected'
|
||||
).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (subagent run tool rejected)', {
|
||||
toolCallId: toolCall.id,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCallId)
|
||||
return
|
||||
}
|
||||
if (completion?.status === 'background') {
|
||||
toolCall.status = 'skipped'
|
||||
toolCall.endTime = Date.now()
|
||||
markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
202,
|
||||
completion.message || 'Tool execution moved to background',
|
||||
{ background: true }
|
||||
).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (subagent run tool background)', {
|
||||
toolCallId: toolCall.id,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCallId)
|
||||
return
|
||||
}
|
||||
const success = completion?.status === 'success'
|
||||
toolCall.status = success ? 'success' : 'error'
|
||||
toolCall.endTime = Date.now()
|
||||
const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
|
||||
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed (subagent run tool)', {
|
||||
toolCallId: toolCall.id,
|
||||
toolName: toolCall.name,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
markToolResultSeen(toolCallId)
|
||||
return
|
||||
}
|
||||
|
||||
if (options.autoExecuteTools !== false) {
|
||||
await executeToolAndReport(toolCallId, context, execContext, options)
|
||||
}
|
||||
},
|
||||
tool_result: (event, context) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
const data = getEventData(event)
|
||||
const toolCallId = event.toolCallId || (data?.id as string | undefined)
|
||||
if (!toolCallId) return
|
||||
|
||||
// Update in subAgentToolCalls.
|
||||
const toolCalls = context.subAgentToolCalls[parentToolCallId] || []
|
||||
const subAgentToolCall = toolCalls.find((tc) => tc.id === toolCallId)
|
||||
|
||||
// Also update in main toolCalls (where we added it for execution).
|
||||
const mainToolCall = context.toolCalls.get(toolCallId)
|
||||
|
||||
const { success, hasResultData, hasError } = inferToolSuccess(data)
|
||||
|
||||
const status = success ? 'success' : 'error'
|
||||
const endTime = Date.now()
|
||||
const result = hasResultData ? { success, output: data?.result || data?.data } : undefined
|
||||
|
||||
if (subAgentToolCall) {
|
||||
subAgentToolCall.status = status
|
||||
subAgentToolCall.endTime = endTime
|
||||
if (result) subAgentToolCall.result = result
|
||||
if (hasError) {
|
||||
const resultObj = asRecord(data?.result)
|
||||
subAgentToolCall.error = (data?.error || resultObj.error) as string | undefined
|
||||
}
|
||||
}
|
||||
|
||||
if (mainToolCall) {
|
||||
mainToolCall.status = status
|
||||
mainToolCall.endTime = endTime
|
||||
if (result) mainToolCall.result = result
|
||||
if (hasError) {
|
||||
const resultObj = asRecord(data?.result)
|
||||
mainToolCall.error = (data?.error || resultObj.error) as string | undefined
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean {
|
||||
if (!event.subagent) return false
|
||||
if (!context.subAgentParentToolCallId) {
|
||||
logger.warn('Subagent event missing parent tool call', {
|
||||
type: event.type,
|
||||
subagent: event.subagent,
|
||||
})
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
2
apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts
Normal file
2
apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export type { SSEHandler } from './handlers'
|
||||
export { handleSubagentRouting, sseHandlers, subAgentHandlers } from './handlers'
|
||||
184
apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts
Normal file
184
apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts
Normal file
@@ -0,0 +1,184 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import {
|
||||
TOOL_DECISION_INITIAL_POLL_MS,
|
||||
TOOL_DECISION_MAX_POLL_MS,
|
||||
TOOL_DECISION_POLL_BACKOFF,
|
||||
} from '@/lib/copilot/constants'
|
||||
import { INTERRUPT_TOOL_SET } from '@/lib/copilot/orchestrator/config'
|
||||
import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence'
|
||||
import {
|
||||
asRecord,
|
||||
markToolResultSeen,
|
||||
wasToolResultSeen,
|
||||
} from '@/lib/copilot/orchestrator/sse-utils'
|
||||
import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
OrchestratorOptions,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
const logger = createLogger('CopilotSseToolExecution')
|
||||
|
||||
export function isInterruptToolName(toolName: string): boolean {
|
||||
return INTERRUPT_TOOL_SET.has(toolName)
|
||||
}
|
||||
|
||||
export async function executeToolAndReport(
|
||||
toolCallId: string,
|
||||
context: StreamingContext,
|
||||
execContext: ExecutionContext,
|
||||
options?: OrchestratorOptions
|
||||
): Promise<void> {
|
||||
const toolCall = context.toolCalls.get(toolCallId)
|
||||
if (!toolCall) return
|
||||
|
||||
if (toolCall.status === 'executing') return
|
||||
if (wasToolResultSeen(toolCall.id)) return
|
||||
|
||||
toolCall.status = 'executing'
|
||||
try {
|
||||
const result = await executeToolServerSide(toolCall, execContext)
|
||||
toolCall.status = result.success ? 'success' : 'error'
|
||||
toolCall.result = result
|
||||
toolCall.error = result.error
|
||||
toolCall.endTime = Date.now()
|
||||
|
||||
// If create_workflow was successful, update the execution context with the new workflowId.
|
||||
// This ensures subsequent tools in the same stream have access to the workflowId.
|
||||
const output = asRecord(result.output)
|
||||
if (
|
||||
toolCall.name === 'create_workflow' &&
|
||||
result.success &&
|
||||
output.workflowId &&
|
||||
!execContext.workflowId
|
||||
) {
|
||||
execContext.workflowId = output.workflowId as string
|
||||
if (output.workspaceId) {
|
||||
execContext.workspaceId = output.workspaceId as string
|
||||
}
|
||||
}
|
||||
|
||||
markToolResultSeen(toolCall.id)
|
||||
|
||||
// Fire-and-forget: notify the copilot backend that the tool completed.
|
||||
// IMPORTANT: We must NOT await this — the Go backend may block on the
|
||||
// mark-complete handler until it can write back on the SSE stream, but
|
||||
// the SSE reader (our for-await loop) is paused while we're in this
|
||||
// handler. Awaiting here would deadlock: sim waits for Go's response,
|
||||
// Go waits for sim to drain the SSE stream.
|
||||
markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
result.success ? 200 : 500,
|
||||
result.error || (result.success ? 'Tool completed' : 'Tool failed'),
|
||||
result.output
|
||||
).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed', {
|
||||
toolCallId: toolCall.id,
|
||||
toolName: toolCall.name,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
|
||||
const resultEvent: SSEEvent = {
|
||||
type: 'tool_result',
|
||||
toolCallId: toolCall.id,
|
||||
toolName: toolCall.name,
|
||||
success: result.success,
|
||||
result: result.output,
|
||||
data: {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
success: result.success,
|
||||
result: result.output,
|
||||
},
|
||||
}
|
||||
await options?.onEvent?.(resultEvent)
|
||||
} catch (error) {
|
||||
toolCall.status = 'error'
|
||||
toolCall.error = error instanceof Error ? error.message : String(error)
|
||||
toolCall.endTime = Date.now()
|
||||
|
||||
markToolResultSeen(toolCall.id)
|
||||
|
||||
// Fire-and-forget (same reasoning as above).
|
||||
markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error).catch((err) => {
|
||||
logger.error('markToolComplete fire-and-forget failed', {
|
||||
toolCallId: toolCall.id,
|
||||
toolName: toolCall.name,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
|
||||
const errorEvent: SSEEvent = {
|
||||
type: 'tool_error',
|
||||
toolCallId: toolCall.id,
|
||||
data: {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
error: toolCall.error,
|
||||
},
|
||||
}
|
||||
await options?.onEvent?.(errorEvent)
|
||||
}
|
||||
}
|
||||
|
||||
export async function waitForToolDecision(
|
||||
toolCallId: string,
|
||||
timeoutMs: number,
|
||||
abortSignal?: AbortSignal
|
||||
): Promise<{ status: string; message?: string } | null> {
|
||||
const start = Date.now()
|
||||
let interval = TOOL_DECISION_INITIAL_POLL_MS
|
||||
const maxInterval = TOOL_DECISION_MAX_POLL_MS
|
||||
while (Date.now() - start < timeoutMs) {
|
||||
if (abortSignal?.aborted) return null
|
||||
const decision = await getToolConfirmation(toolCallId)
|
||||
if (decision?.status) {
|
||||
return decision
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, interval))
|
||||
interval = Math.min(interval * TOOL_DECISION_POLL_BACKOFF, maxInterval)
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for a tool completion signal (success/error/rejected) from the client.
|
||||
* Unlike waitForToolDecision which returns on any status, this ignores the
|
||||
* initial 'accepted' status and only returns on terminal statuses:
|
||||
* - success: client finished executing successfully
|
||||
* - error: client execution failed
|
||||
* - rejected: user clicked Skip (subagent run tools where user hasn't auto-allowed)
|
||||
*
|
||||
* Used for client-executable run tools: the client executes the workflow
|
||||
* and posts success/error to /api/copilot/confirm when done. The server
|
||||
* polls here until that completion signal arrives.
|
||||
*/
|
||||
export async function waitForToolCompletion(
|
||||
toolCallId: string,
|
||||
timeoutMs: number,
|
||||
abortSignal?: AbortSignal
|
||||
): Promise<{ status: string; message?: string } | null> {
|
||||
const start = Date.now()
|
||||
let interval = TOOL_DECISION_INITIAL_POLL_MS
|
||||
const maxInterval = TOOL_DECISION_MAX_POLL_MS
|
||||
while (Date.now() - start < timeoutMs) {
|
||||
if (abortSignal?.aborted) return null
|
||||
const decision = await getToolConfirmation(toolCallId)
|
||||
// Return on completion/terminal statuses, not intermediate 'accepted'
|
||||
if (
|
||||
decision?.status === 'success' ||
|
||||
decision?.status === 'error' ||
|
||||
decision?.status === 'rejected' ||
|
||||
decision?.status === 'background'
|
||||
) {
|
||||
return decision
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, interval))
|
||||
interval = Math.min(interval * TOOL_DECISION_POLL_BACKOFF, maxInterval)
|
||||
}
|
||||
return null
|
||||
}
|
||||
71
apps/sim/lib/copilot/orchestrator/sse-parser.ts
Normal file
71
apps/sim/lib/copilot/orchestrator/sse-parser.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
const logger = createLogger('CopilotSseParser')
|
||||
|
||||
/**
|
||||
* Parses SSE streams from the copilot backend into typed events.
|
||||
*/
|
||||
export async function* parseSSEStream(
|
||||
reader: ReadableStreamDefaultReader<Uint8Array>,
|
||||
decoder: TextDecoder,
|
||||
abortSignal?: AbortSignal
|
||||
): AsyncGenerator<SSEEvent> {
|
||||
let buffer = ''
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
if (abortSignal?.aborted) {
|
||||
logger.info('SSE stream aborted by signal')
|
||||
break
|
||||
}
|
||||
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
const lines = buffer.split('\n')
|
||||
buffer = lines.pop() || ''
|
||||
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue
|
||||
if (!line.startsWith('data: ')) continue
|
||||
|
||||
const jsonStr = line.slice(6)
|
||||
if (jsonStr === '[DONE]') continue
|
||||
|
||||
try {
|
||||
const event = JSON.parse(jsonStr) as SSEEvent
|
||||
if (event?.type) {
|
||||
yield event
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to parse SSE event', {
|
||||
preview: jsonStr.slice(0, 200),
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (buffer.trim() && buffer.startsWith('data: ')) {
|
||||
try {
|
||||
const event = JSON.parse(buffer.slice(6)) as SSEEvent
|
||||
if (event?.type) {
|
||||
yield event
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to parse final SSE buffer', {
|
||||
preview: buffer.slice(0, 200),
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
try {
|
||||
reader.releaseLock()
|
||||
} catch {
|
||||
logger.warn('Failed to release SSE reader lock')
|
||||
}
|
||||
}
|
||||
}
|
||||
42
apps/sim/lib/copilot/orchestrator/sse-utils.test.ts
Normal file
42
apps/sim/lib/copilot/orchestrator/sse-utils.test.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import {
|
||||
normalizeSseEvent,
|
||||
shouldSkipToolCallEvent,
|
||||
shouldSkipToolResultEvent,
|
||||
} from '@/lib/copilot/orchestrator/sse-utils'
|
||||
|
||||
describe('sse-utils', () => {
|
||||
it.concurrent('normalizes tool fields from string data', () => {
|
||||
const event = {
|
||||
type: 'tool_result',
|
||||
data: JSON.stringify({
|
||||
id: 'tool_1',
|
||||
name: 'edit_workflow',
|
||||
success: true,
|
||||
result: { ok: true },
|
||||
}),
|
||||
}
|
||||
|
||||
const normalized = normalizeSseEvent(event as any)
|
||||
|
||||
expect(normalized.toolCallId).toBe('tool_1')
|
||||
expect(normalized.toolName).toBe('edit_workflow')
|
||||
expect(normalized.success).toBe(true)
|
||||
expect(normalized.result).toEqual({ ok: true })
|
||||
})
|
||||
|
||||
it.concurrent('dedupes tool_call events', () => {
|
||||
const event = { type: 'tool_call', data: { id: 'tool_call_1', name: 'plan' } }
|
||||
expect(shouldSkipToolCallEvent(event as any)).toBe(false)
|
||||
expect(shouldSkipToolCallEvent(event as any)).toBe(true)
|
||||
})
|
||||
|
||||
it.concurrent('dedupes tool_result events', () => {
|
||||
const event = { type: 'tool_result', data: { id: 'tool_result_1', name: 'plan' } }
|
||||
expect(shouldSkipToolResultEvent(event as any)).toBe(false)
|
||||
expect(shouldSkipToolResultEvent(event as any)).toBe(true)
|
||||
})
|
||||
})
|
||||
124
apps/sim/lib/copilot/orchestrator/sse-utils.ts
Normal file
124
apps/sim/lib/copilot/orchestrator/sse-utils.ts
Normal file
@@ -0,0 +1,124 @@
|
||||
import { STREAM_BUFFER_MAX_DEDUP_ENTRIES } from '@/lib/copilot/constants'
|
||||
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
type EventDataObject = Record<string, unknown> | undefined
|
||||
|
||||
/** Safely cast event.data to a record for property access. */
|
||||
export const asRecord = (data: unknown): Record<string, unknown> =>
|
||||
(data && typeof data === 'object' && !Array.isArray(data) ? data : {}) as Record<string, unknown>
|
||||
|
||||
/**
|
||||
* In-memory tool event dedupe with bounded size.
|
||||
*
|
||||
* NOTE: Process-local only. In a multi-instance setup (e.g., ECS),
|
||||
* each task maintains its own dedupe cache.
|
||||
*/
|
||||
const seenToolCalls = new Set<string>()
|
||||
const seenToolResults = new Set<string>()
|
||||
|
||||
function addToSet(set: Set<string>, id: string): void {
|
||||
if (set.size >= STREAM_BUFFER_MAX_DEDUP_ENTRIES) {
|
||||
const first = set.values().next().value
|
||||
if (first) set.delete(first)
|
||||
}
|
||||
set.add(id)
|
||||
}
|
||||
|
||||
const parseEventData = (data: unknown): EventDataObject => {
|
||||
if (!data) return undefined
|
||||
if (typeof data !== 'string') {
|
||||
return data as EventDataObject
|
||||
}
|
||||
try {
|
||||
return JSON.parse(data) as EventDataObject
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
const hasToolFields = (data: EventDataObject): boolean => {
|
||||
if (!data) return false
|
||||
return (
|
||||
data.id !== undefined ||
|
||||
data.toolCallId !== undefined ||
|
||||
data.name !== undefined ||
|
||||
data.success !== undefined ||
|
||||
data.result !== undefined ||
|
||||
data.arguments !== undefined
|
||||
)
|
||||
}
|
||||
|
||||
export const getEventData = (event: SSEEvent): EventDataObject => {
|
||||
const topLevel = parseEventData(event.data)
|
||||
if (!topLevel) return undefined
|
||||
if (hasToolFields(topLevel)) return topLevel
|
||||
const nested = parseEventData(topLevel.data)
|
||||
return nested || topLevel
|
||||
}
|
||||
|
||||
function getToolCallIdFromEvent(event: SSEEvent): string | undefined {
|
||||
const data = getEventData(event)
|
||||
return (
|
||||
event.toolCallId || (data?.id as string | undefined) || (data?.toolCallId as string | undefined)
|
||||
)
|
||||
}
|
||||
|
||||
/** Normalizes SSE events so tool metadata is available at the top level. */
|
||||
export function normalizeSseEvent(event: SSEEvent): SSEEvent {
|
||||
if (!event) return event
|
||||
const data = getEventData(event)
|
||||
if (!data) return event
|
||||
const toolCallId =
|
||||
event.toolCallId || (data.id as string | undefined) || (data.toolCallId as string | undefined)
|
||||
const toolName =
|
||||
event.toolName || (data.name as string | undefined) || (data.toolName as string | undefined)
|
||||
const success = event.success ?? (data.success as boolean | undefined)
|
||||
const result = event.result ?? data.result
|
||||
const normalizedData = typeof event.data === 'string' ? data : event.data
|
||||
return {
|
||||
...event,
|
||||
data: normalizedData,
|
||||
toolCallId,
|
||||
toolName,
|
||||
success,
|
||||
result,
|
||||
}
|
||||
}
|
||||
|
||||
function markToolCallSeen(toolCallId: string): void {
|
||||
addToSet(seenToolCalls, toolCallId)
|
||||
}
|
||||
|
||||
function wasToolCallSeen(toolCallId: string): boolean {
|
||||
return seenToolCalls.has(toolCallId)
|
||||
}
|
||||
|
||||
export function markToolResultSeen(toolCallId: string): void {
|
||||
addToSet(seenToolResults, toolCallId)
|
||||
}
|
||||
|
||||
export function wasToolResultSeen(toolCallId: string): boolean {
|
||||
return seenToolResults.has(toolCallId)
|
||||
}
|
||||
|
||||
export function shouldSkipToolCallEvent(event: SSEEvent): boolean {
|
||||
if (event.type !== 'tool_call') return false
|
||||
const toolCallId = getToolCallIdFromEvent(event)
|
||||
if (!toolCallId) return false
|
||||
const eventData = getEventData(event)
|
||||
if (eventData?.partial === true) return false
|
||||
if (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) {
|
||||
return true
|
||||
}
|
||||
markToolCallSeen(toolCallId)
|
||||
return false
|
||||
}
|
||||
|
||||
export function shouldSkipToolResultEvent(event: SSEEvent): boolean {
|
||||
if (event.type !== 'tool_result') return false
|
||||
const toolCallId = getToolCallIdFromEvent(event)
|
||||
if (!toolCallId) return false
|
||||
if (wasToolResultSeen(toolCallId)) return true
|
||||
markToolResultSeen(toolCallId)
|
||||
return false
|
||||
}
|
||||
119
apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts
Normal file
119
apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
|
||||
import { loggerMock } from '@sim/testing'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('@sim/logger', () => loggerMock)
|
||||
|
||||
type StoredEntry = { score: number; value: string }
|
||||
|
||||
const createRedisStub = () => {
|
||||
const events = new Map<string, StoredEntry[]>()
|
||||
const counters = new Map<string, number>()
|
||||
|
||||
const readEntries = (key: string, min: number, max: number) => {
|
||||
const list = events.get(key) || []
|
||||
return list
|
||||
.filter((entry) => entry.score >= min && entry.score <= max)
|
||||
.sort((a, b) => a.score - b.score)
|
||||
.map((entry) => entry.value)
|
||||
}
|
||||
|
||||
return {
|
||||
del: vi.fn().mockResolvedValue(1),
|
||||
hset: vi.fn().mockResolvedValue(1),
|
||||
hgetall: vi.fn().mockResolvedValue({}),
|
||||
expire: vi.fn().mockResolvedValue(1),
|
||||
eval: vi
|
||||
.fn()
|
||||
.mockImplementation(
|
||||
(
|
||||
_lua: string,
|
||||
_keysCount: number,
|
||||
seqKey: string,
|
||||
eventsKey: string,
|
||||
_ttl: number,
|
||||
_limit: number,
|
||||
streamId: string,
|
||||
eventJson: string
|
||||
) => {
|
||||
const current = counters.get(seqKey) || 0
|
||||
const next = current + 1
|
||||
counters.set(seqKey, next)
|
||||
const entry = JSON.stringify({ eventId: next, streamId, event: JSON.parse(eventJson) })
|
||||
const list = events.get(eventsKey) || []
|
||||
list.push({ score: next, value: entry })
|
||||
events.set(eventsKey, list)
|
||||
return next
|
||||
}
|
||||
),
|
||||
incrby: vi.fn().mockImplementation((key: string, amount: number) => {
|
||||
const current = counters.get(key) || 0
|
||||
const next = current + amount
|
||||
counters.set(key, next)
|
||||
return next
|
||||
}),
|
||||
zrangebyscore: vi.fn().mockImplementation((key: string, min: string, max: string) => {
|
||||
const minVal = Number(min)
|
||||
const maxVal = max === '+inf' ? Number.POSITIVE_INFINITY : Number(max)
|
||||
return Promise.resolve(readEntries(key, minVal, maxVal))
|
||||
}),
|
||||
pipeline: vi.fn().mockImplementation(() => {
|
||||
const api: Record<string, any> = {}
|
||||
api.zadd = vi.fn().mockImplementation((key: string, ...args: Array<string | number>) => {
|
||||
const list = events.get(key) || []
|
||||
for (let i = 0; i < args.length; i += 2) {
|
||||
list.push({ score: Number(args[i]), value: String(args[i + 1]) })
|
||||
}
|
||||
events.set(key, list)
|
||||
return api
|
||||
})
|
||||
api.expire = vi.fn().mockReturnValue(api)
|
||||
api.zremrangebyrank = vi.fn().mockReturnValue(api)
|
||||
api.exec = vi.fn().mockResolvedValue([])
|
||||
return api
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
let mockRedis: ReturnType<typeof createRedisStub>
|
||||
|
||||
vi.mock('@/lib/core/config/redis', () => ({
|
||||
getRedisClient: () => mockRedis,
|
||||
}))
|
||||
|
||||
import {
|
||||
appendStreamEvent,
|
||||
createStreamEventWriter,
|
||||
readStreamEvents,
|
||||
} from '@/lib/copilot/orchestrator/stream-buffer'
|
||||
|
||||
describe('stream-buffer', () => {
|
||||
beforeEach(() => {
|
||||
mockRedis = createRedisStub()
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
it.concurrent('replays events after a given event id', async () => {
|
||||
await appendStreamEvent('stream-1', { type: 'content', data: 'hello' })
|
||||
await appendStreamEvent('stream-1', { type: 'content', data: 'world' })
|
||||
|
||||
const allEvents = await readStreamEvents('stream-1', 0)
|
||||
expect(allEvents.map((entry) => entry.event.data)).toEqual(['hello', 'world'])
|
||||
|
||||
const replayed = await readStreamEvents('stream-1', 1)
|
||||
expect(replayed.map((entry) => entry.event.data)).toEqual(['world'])
|
||||
})
|
||||
|
||||
it.concurrent('flushes buffered events for resume', async () => {
|
||||
const writer = createStreamEventWriter('stream-2')
|
||||
await writer.write({ type: 'content', data: 'a' })
|
||||
await writer.write({ type: 'content', data: 'b' })
|
||||
await writer.flush()
|
||||
|
||||
const events = await readStreamEvents('stream-2', 0)
|
||||
expect(events.map((entry) => entry.event.data)).toEqual(['a', 'b'])
|
||||
})
|
||||
})
|
||||
309
apps/sim/lib/copilot/orchestrator/stream-buffer.ts
Normal file
309
apps/sim/lib/copilot/orchestrator/stream-buffer.ts
Normal file
@@ -0,0 +1,309 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { REDIS_COPILOT_STREAM_PREFIX } from '@/lib/copilot/constants'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { getRedisClient } from '@/lib/core/config/redis'
|
||||
|
||||
const logger = createLogger('CopilotStreamBuffer')
|
||||
|
||||
const STREAM_DEFAULTS = {
|
||||
ttlSeconds: 60 * 60,
|
||||
eventLimit: 5000,
|
||||
reserveBatch: 200,
|
||||
flushIntervalMs: 15,
|
||||
flushMaxBatch: 200,
|
||||
}
|
||||
|
||||
export type StreamBufferConfig = {
|
||||
ttlSeconds: number
|
||||
eventLimit: number
|
||||
reserveBatch: number
|
||||
flushIntervalMs: number
|
||||
flushMaxBatch: number
|
||||
}
|
||||
|
||||
const parseNumber = (value: number | string | undefined, fallback: number): number => {
|
||||
if (typeof value === 'number' && Number.isFinite(value)) return value
|
||||
const parsed = Number(value)
|
||||
return Number.isFinite(parsed) ? parsed : fallback
|
||||
}
|
||||
|
||||
export function getStreamBufferConfig(): StreamBufferConfig {
|
||||
return {
|
||||
ttlSeconds: parseNumber(env.COPILOT_STREAM_TTL_SECONDS, STREAM_DEFAULTS.ttlSeconds),
|
||||
eventLimit: parseNumber(env.COPILOT_STREAM_EVENT_LIMIT, STREAM_DEFAULTS.eventLimit),
|
||||
reserveBatch: parseNumber(env.COPILOT_STREAM_RESERVE_BATCH, STREAM_DEFAULTS.reserveBatch),
|
||||
flushIntervalMs: parseNumber(
|
||||
env.COPILOT_STREAM_FLUSH_INTERVAL_MS,
|
||||
STREAM_DEFAULTS.flushIntervalMs
|
||||
),
|
||||
flushMaxBatch: parseNumber(env.COPILOT_STREAM_FLUSH_MAX_BATCH, STREAM_DEFAULTS.flushMaxBatch),
|
||||
}
|
||||
}
|
||||
|
||||
const APPEND_STREAM_EVENT_LUA = `
|
||||
local seqKey = KEYS[1]
|
||||
local eventsKey = KEYS[2]
|
||||
local ttl = tonumber(ARGV[1])
|
||||
local limit = tonumber(ARGV[2])
|
||||
local streamId = ARGV[3]
|
||||
local eventJson = ARGV[4]
|
||||
|
||||
local id = redis.call('INCR', seqKey)
|
||||
local entry = '{"eventId":' .. id .. ',"streamId":' .. cjson.encode(streamId) .. ',"event":' .. eventJson .. '}'
|
||||
redis.call('ZADD', eventsKey, id, entry)
|
||||
redis.call('EXPIRE', eventsKey, ttl)
|
||||
redis.call('EXPIRE', seqKey, ttl)
|
||||
if limit > 0 then
|
||||
redis.call('ZREMRANGEBYRANK', eventsKey, 0, -limit-1)
|
||||
end
|
||||
return id
|
||||
`
|
||||
|
||||
function getStreamKeyPrefix(streamId: string) {
|
||||
return `${REDIS_COPILOT_STREAM_PREFIX}${streamId}`
|
||||
}
|
||||
|
||||
function getEventsKey(streamId: string) {
|
||||
return `${getStreamKeyPrefix(streamId)}:events`
|
||||
}
|
||||
|
||||
function getSeqKey(streamId: string) {
|
||||
return `${getStreamKeyPrefix(streamId)}:seq`
|
||||
}
|
||||
|
||||
function getMetaKey(streamId: string) {
|
||||
return `${getStreamKeyPrefix(streamId)}:meta`
|
||||
}
|
||||
|
||||
export type StreamStatus = 'active' | 'complete' | 'error'
|
||||
|
||||
export type StreamMeta = {
|
||||
status: StreamStatus
|
||||
userId?: string
|
||||
updatedAt?: string
|
||||
error?: string
|
||||
}
|
||||
|
||||
export type StreamEventEntry = {
|
||||
eventId: number
|
||||
streamId: string
|
||||
event: Record<string, unknown>
|
||||
}
|
||||
|
||||
export type StreamEventWriter = {
|
||||
write: (event: Record<string, unknown>) => Promise<StreamEventEntry>
|
||||
flush: () => Promise<void>
|
||||
close: () => Promise<void>
|
||||
}
|
||||
|
||||
export async function resetStreamBuffer(streamId: string): Promise<void> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return
|
||||
try {
|
||||
await redis.del(getEventsKey(streamId), getSeqKey(streamId), getMetaKey(streamId))
|
||||
} catch (error) {
|
||||
logger.warn('Failed to reset stream buffer', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export async function setStreamMeta(streamId: string, meta: StreamMeta): Promise<void> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return
|
||||
try {
|
||||
const config = getStreamBufferConfig()
|
||||
const payload: Record<string, string> = {
|
||||
status: meta.status,
|
||||
updatedAt: meta.updatedAt || new Date().toISOString(),
|
||||
}
|
||||
if (meta.userId) payload.userId = meta.userId
|
||||
if (meta.error) payload.error = meta.error
|
||||
await redis.hset(getMetaKey(streamId), payload)
|
||||
await redis.expire(getMetaKey(streamId), config.ttlSeconds)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to update stream meta', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export async function getStreamMeta(streamId: string): Promise<StreamMeta | null> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return null
|
||||
try {
|
||||
const meta = await redis.hgetall(getMetaKey(streamId))
|
||||
if (!meta || Object.keys(meta).length === 0) return null
|
||||
return meta as StreamMeta
|
||||
} catch (error) {
|
||||
logger.warn('Failed to read stream meta', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
export async function appendStreamEvent(
|
||||
streamId: string,
|
||||
event: Record<string, unknown>
|
||||
): Promise<StreamEventEntry> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
return { eventId: 0, streamId, event }
|
||||
}
|
||||
|
||||
try {
|
||||
const config = getStreamBufferConfig()
|
||||
const eventJson = JSON.stringify(event)
|
||||
const nextId = await redis.eval(
|
||||
APPEND_STREAM_EVENT_LUA,
|
||||
2,
|
||||
getSeqKey(streamId),
|
||||
getEventsKey(streamId),
|
||||
config.ttlSeconds,
|
||||
config.eventLimit,
|
||||
streamId,
|
||||
eventJson
|
||||
)
|
||||
const eventId = typeof nextId === 'number' ? nextId : Number(nextId)
|
||||
return { eventId, streamId, event }
|
||||
} catch (error) {
|
||||
logger.warn('Failed to append stream event', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return { eventId: 0, streamId, event }
|
||||
}
|
||||
}
|
||||
|
||||
export function createStreamEventWriter(streamId: string): StreamEventWriter {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
return {
|
||||
write: async (event) => ({ eventId: 0, streamId, event }),
|
||||
flush: async () => {},
|
||||
close: async () => {},
|
||||
}
|
||||
}
|
||||
|
||||
const config = getStreamBufferConfig()
|
||||
let pending: StreamEventEntry[] = []
|
||||
let nextEventId = 0
|
||||
let maxReservedId = 0
|
||||
let flushTimer: ReturnType<typeof setTimeout> | null = null
|
||||
const scheduleFlush = () => {
|
||||
if (flushTimer) return
|
||||
flushTimer = setTimeout(() => {
|
||||
flushTimer = null
|
||||
void flush()
|
||||
}, config.flushIntervalMs)
|
||||
}
|
||||
|
||||
const reserveIds = async (minCount: number) => {
|
||||
const reserveCount = Math.max(config.reserveBatch, minCount)
|
||||
const newMax = await redis.incrby(getSeqKey(streamId), reserveCount)
|
||||
const startId = newMax - reserveCount + 1
|
||||
if (nextEventId === 0 || nextEventId > maxReservedId) {
|
||||
nextEventId = startId
|
||||
maxReservedId = newMax
|
||||
}
|
||||
}
|
||||
|
||||
let flushPromise: Promise<void> | null = null
|
||||
let closed = false
|
||||
|
||||
const doFlush = async () => {
|
||||
if (pending.length === 0) return
|
||||
const batch = pending
|
||||
pending = []
|
||||
try {
|
||||
const key = getEventsKey(streamId)
|
||||
const zaddArgs: (string | number)[] = []
|
||||
for (const entry of batch) {
|
||||
zaddArgs.push(entry.eventId, JSON.stringify(entry))
|
||||
}
|
||||
const pipeline = redis.pipeline()
|
||||
pipeline.zadd(key, ...(zaddArgs as [number, string]))
|
||||
pipeline.expire(key, config.ttlSeconds)
|
||||
pipeline.expire(getSeqKey(streamId), config.ttlSeconds)
|
||||
pipeline.zremrangebyrank(key, 0, -config.eventLimit - 1)
|
||||
await pipeline.exec()
|
||||
} catch (error) {
|
||||
logger.warn('Failed to flush stream events', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
pending = batch.concat(pending)
|
||||
}
|
||||
}
|
||||
|
||||
const flush = async () => {
|
||||
if (flushPromise) {
|
||||
await flushPromise
|
||||
return
|
||||
}
|
||||
flushPromise = doFlush()
|
||||
try {
|
||||
await flushPromise
|
||||
} finally {
|
||||
flushPromise = null
|
||||
if (pending.length > 0) scheduleFlush()
|
||||
}
|
||||
}
|
||||
|
||||
const write = async (event: Record<string, unknown>) => {
|
||||
if (closed) return { eventId: 0, streamId, event }
|
||||
if (nextEventId === 0 || nextEventId > maxReservedId) {
|
||||
await reserveIds(1)
|
||||
}
|
||||
const eventId = nextEventId++
|
||||
const entry: StreamEventEntry = { eventId, streamId, event }
|
||||
pending.push(entry)
|
||||
if (pending.length >= config.flushMaxBatch) {
|
||||
await flush()
|
||||
} else {
|
||||
scheduleFlush()
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
const close = async () => {
|
||||
closed = true
|
||||
if (flushTimer) {
|
||||
clearTimeout(flushTimer)
|
||||
flushTimer = null
|
||||
}
|
||||
await flush()
|
||||
}
|
||||
|
||||
return { write, flush, close }
|
||||
}
|
||||
|
||||
export async function readStreamEvents(
|
||||
streamId: string,
|
||||
afterEventId: number
|
||||
): Promise<StreamEventEntry[]> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return []
|
||||
try {
|
||||
const raw = await redis.zrangebyscore(getEventsKey(streamId), afterEventId + 1, '+inf')
|
||||
return raw
|
||||
.map((entry) => {
|
||||
try {
|
||||
return JSON.parse(entry) as StreamEventEntry
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
})
|
||||
.filter((entry): entry is StreamEventEntry => Boolean(entry))
|
||||
} catch (error) {
|
||||
logger.warn('Failed to read stream events', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return []
|
||||
}
|
||||
}
|
||||
182
apps/sim/lib/copilot/orchestrator/stream-core.ts
Normal file
182
apps/sim/lib/copilot/orchestrator/stream-core.ts
Normal file
@@ -0,0 +1,182 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { ORCHESTRATION_TIMEOUT_MS } from '@/lib/copilot/constants'
|
||||
import {
|
||||
handleSubagentRouting,
|
||||
sseHandlers,
|
||||
subAgentHandlers,
|
||||
} from '@/lib/copilot/orchestrator/sse-handlers'
|
||||
import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser'
|
||||
import {
|
||||
normalizeSseEvent,
|
||||
shouldSkipToolCallEvent,
|
||||
shouldSkipToolResultEvent,
|
||||
} from '@/lib/copilot/orchestrator/sse-utils'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
OrchestratorOptions,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
ToolCallSummary,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
const logger = createLogger('CopilotStreamCore')
|
||||
|
||||
/**
|
||||
* Options for the shared stream processing loop.
|
||||
*/
|
||||
export interface StreamLoopOptions extends OrchestratorOptions {
|
||||
/**
|
||||
* Called for each normalized event BEFORE standard handler dispatch.
|
||||
* Return true to skip the default handler for this event.
|
||||
*/
|
||||
onBeforeDispatch?: (event: SSEEvent, context: StreamingContext) => boolean | undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a fresh StreamingContext.
|
||||
*/
|
||||
export function createStreamingContext(overrides?: Partial<StreamingContext>): StreamingContext {
|
||||
return {
|
||||
chatId: undefined,
|
||||
conversationId: undefined,
|
||||
messageId: crypto.randomUUID(),
|
||||
accumulatedContent: '',
|
||||
contentBlocks: [],
|
||||
toolCalls: new Map(),
|
||||
currentThinkingBlock: null,
|
||||
isInThinkingBlock: false,
|
||||
subAgentParentToolCallId: undefined,
|
||||
subAgentContent: {},
|
||||
subAgentToolCalls: {},
|
||||
pendingContent: '',
|
||||
streamComplete: false,
|
||||
wasAborted: false,
|
||||
errors: [],
|
||||
...overrides,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run the SSE stream processing loop.
|
||||
*
|
||||
* Handles: fetch -> parse -> normalize -> dedupe -> subagent routing -> handler dispatch.
|
||||
* Callers provide the fetch URL/options and can intercept events via onBeforeDispatch.
|
||||
*/
|
||||
export async function runStreamLoop(
|
||||
fetchUrl: string,
|
||||
fetchOptions: RequestInit,
|
||||
context: StreamingContext,
|
||||
execContext: ExecutionContext,
|
||||
options: StreamLoopOptions
|
||||
): Promise<void> {
|
||||
const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options
|
||||
|
||||
const response = await fetch(fetchUrl, {
|
||||
...fetchOptions,
|
||||
signal: abortSignal,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text().catch(() => '')
|
||||
throw new Error(
|
||||
`Copilot backend error (${response.status}): ${errorText || response.statusText}`
|
||||
)
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error('Copilot backend response missing body')
|
||||
}
|
||||
|
||||
const reader = response.body.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
context.errors.push('Request timed out')
|
||||
context.streamComplete = true
|
||||
reader.cancel().catch(() => {})
|
||||
}, timeout)
|
||||
|
||||
try {
|
||||
for await (const event of parseSSEStream(reader, decoder, abortSignal)) {
|
||||
if (abortSignal?.aborted) {
|
||||
context.wasAborted = true
|
||||
break
|
||||
}
|
||||
|
||||
const normalizedEvent = normalizeSseEvent(event)
|
||||
|
||||
// Skip duplicate tool events.
|
||||
const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent)
|
||||
const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent)
|
||||
|
||||
if (!shouldSkipToolCall && !shouldSkipToolResult) {
|
||||
try {
|
||||
await options.onEvent?.(normalizedEvent)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to forward SSE event', {
|
||||
type: normalizedEvent.type,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Let the caller intercept before standard dispatch.
|
||||
if (options.onBeforeDispatch?.(normalizedEvent, context)) {
|
||||
if (context.streamComplete) break
|
||||
continue
|
||||
}
|
||||
|
||||
// Standard subagent start/end handling.
|
||||
if (normalizedEvent.type === 'subagent_start') {
|
||||
const eventData = normalizedEvent.data as Record<string, unknown> | undefined
|
||||
const toolCallId = eventData?.tool_call_id as string | undefined
|
||||
if (toolCallId) {
|
||||
context.subAgentParentToolCallId = toolCallId
|
||||
context.subAgentContent[toolCallId] = ''
|
||||
context.subAgentToolCalls[toolCallId] = []
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (normalizedEvent.type === 'subagent_end') {
|
||||
context.subAgentParentToolCallId = undefined
|
||||
continue
|
||||
}
|
||||
|
||||
// Subagent event routing.
|
||||
if (handleSubagentRouting(normalizedEvent, context)) {
|
||||
const handler = subAgentHandlers[normalizedEvent.type]
|
||||
if (handler) {
|
||||
await handler(normalizedEvent, context, execContext, options)
|
||||
}
|
||||
if (context.streamComplete) break
|
||||
continue
|
||||
}
|
||||
|
||||
// Main event handler dispatch.
|
||||
const handler = sseHandlers[normalizedEvent.type]
|
||||
if (handler) {
|
||||
await handler(normalizedEvent, context, execContext, options)
|
||||
}
|
||||
if (context.streamComplete) break
|
||||
}
|
||||
} finally {
|
||||
clearTimeout(timeoutId)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a ToolCallSummary array from the streaming context.
|
||||
*/
|
||||
export function buildToolCallSummaries(context: StreamingContext): ToolCallSummary[] {
|
||||
return Array.from(context.toolCalls.values()).map((toolCall) => ({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
status: toolCall.status,
|
||||
params: toolCall.params,
|
||||
result: toolCall.result?.output,
|
||||
error: toolCall.error,
|
||||
durationMs:
|
||||
toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined,
|
||||
}))
|
||||
}
|
||||
137
apps/sim/lib/copilot/orchestrator/subagent.ts
Normal file
137
apps/sim/lib/copilot/orchestrator/subagent.ts
Normal file
@@ -0,0 +1,137 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
OrchestratorOptions,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
ToolCallSummary,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
|
||||
import { buildToolCallSummaries, createStreamingContext, runStreamLoop } from './stream-core'
|
||||
|
||||
const logger = createLogger('CopilotSubagentOrchestrator')
|
||||
|
||||
export interface SubagentOrchestratorOptions extends Omit<OrchestratorOptions, 'onComplete'> {
|
||||
userId: string
|
||||
workflowId?: string
|
||||
workspaceId?: string
|
||||
onComplete?: (result: SubagentOrchestratorResult) => void | Promise<void>
|
||||
}
|
||||
|
||||
export interface SubagentOrchestratorResult {
|
||||
success: boolean
|
||||
content: string
|
||||
toolCalls: ToolCallSummary[]
|
||||
structuredResult?: {
|
||||
type?: string
|
||||
summary?: string
|
||||
data?: unknown
|
||||
success?: boolean
|
||||
}
|
||||
error?: string
|
||||
errors?: string[]
|
||||
}
|
||||
|
||||
export async function orchestrateSubagentStream(
|
||||
agentId: string,
|
||||
requestPayload: Record<string, unknown>,
|
||||
options: SubagentOrchestratorOptions
|
||||
): Promise<SubagentOrchestratorResult> {
|
||||
const { userId, workflowId, workspaceId } = options
|
||||
const execContext = await buildExecutionContext(userId, workflowId, workspaceId)
|
||||
|
||||
const msgId = requestPayload?.messageId
|
||||
const context = createStreamingContext({
|
||||
messageId: typeof msgId === 'string' ? msgId : crypto.randomUUID(),
|
||||
})
|
||||
|
||||
let structuredResult: SubagentOrchestratorResult['structuredResult']
|
||||
|
||||
try {
|
||||
await runStreamLoop(
|
||||
`${SIM_AGENT_API_URL}/api/subagent/${agentId}`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
|
||||
},
|
||||
body: JSON.stringify({ ...requestPayload, userId, stream: true }),
|
||||
},
|
||||
context,
|
||||
execContext,
|
||||
{
|
||||
...options,
|
||||
onBeforeDispatch: (event: SSEEvent, ctx: StreamingContext) => {
|
||||
// Handle structured_result / subagent_result - subagent-specific.
|
||||
if (event.type === 'structured_result' || event.type === 'subagent_result') {
|
||||
structuredResult = normalizeStructuredResult(event.data)
|
||||
ctx.streamComplete = true
|
||||
return true // skip default dispatch
|
||||
}
|
||||
|
||||
// For direct subagent calls, events may have the subagent field set
|
||||
// but no subagent_start because this IS the top-level agent.
|
||||
// Skip subagent routing for events where the subagent field matches
|
||||
// the current agentId - these are top-level events.
|
||||
if (event.subagent === agentId && !ctx.subAgentParentToolCallId) {
|
||||
return false // let default dispatch handle it
|
||||
}
|
||||
|
||||
return false // let default dispatch handle it
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const result: SubagentOrchestratorResult = {
|
||||
success: context.errors.length === 0 && !context.wasAborted,
|
||||
content: context.accumulatedContent,
|
||||
toolCalls: buildToolCallSummaries(context),
|
||||
structuredResult,
|
||||
errors: context.errors.length ? context.errors : undefined,
|
||||
}
|
||||
await options.onComplete?.(result)
|
||||
return result
|
||||
} catch (error) {
|
||||
const err = error instanceof Error ? error : new Error('Subagent orchestration failed')
|
||||
logger.error('Subagent orchestration failed', { error: err.message, agentId })
|
||||
await options.onError?.(err)
|
||||
return {
|
||||
success: false,
|
||||
content: context.accumulatedContent,
|
||||
toolCalls: [],
|
||||
error: err.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeStructuredResult(data: unknown): SubagentOrchestratorResult['structuredResult'] {
|
||||
if (!data || typeof data !== 'object') return undefined
|
||||
const d = data as Record<string, unknown>
|
||||
return {
|
||||
type: (d.result_type || d.type) as string | undefined,
|
||||
summary: d.summary as string | undefined,
|
||||
data: d.data ?? d,
|
||||
success: d.success as boolean | undefined,
|
||||
}
|
||||
}
|
||||
|
||||
async function buildExecutionContext(
|
||||
userId: string,
|
||||
workflowId?: string,
|
||||
workspaceId?: string
|
||||
): Promise<ExecutionContext> {
|
||||
if (workflowId) {
|
||||
return prepareExecutionContext(userId, workflowId)
|
||||
}
|
||||
const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId)
|
||||
return {
|
||||
userId,
|
||||
workflowId: workflowId || '',
|
||||
workspaceId,
|
||||
decryptedEnvVars,
|
||||
}
|
||||
}
|
||||
129
apps/sim/lib/copilot/orchestrator/tool-executor/access.ts
Normal file
129
apps/sim/lib/copilot/orchestrator/tool-executor/access.ts
Normal file
@@ -0,0 +1,129 @@
|
||||
import { db } from '@sim/db'
|
||||
import { permissions, workflow, workspace } from '@sim/db/schema'
|
||||
import { and, asc, desc, eq, inArray, or } from 'drizzle-orm'
|
||||
|
||||
type WorkflowRecord = typeof workflow.$inferSelect
|
||||
|
||||
export async function ensureWorkflowAccess(
|
||||
workflowId: string,
|
||||
userId: string
|
||||
): Promise<{
|
||||
workflow: WorkflowRecord
|
||||
workspaceId?: string | null
|
||||
}> {
|
||||
const [workflowRecord] = await db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
if (!workflowRecord) {
|
||||
throw new Error(`Workflow ${workflowId} not found`)
|
||||
}
|
||||
|
||||
if (workflowRecord.userId === userId) {
|
||||
return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId }
|
||||
}
|
||||
|
||||
if (workflowRecord.workspaceId) {
|
||||
const [permissionRow] = await db
|
||||
.select({ permissionType: permissions.permissionType })
|
||||
.from(permissions)
|
||||
.where(
|
||||
and(
|
||||
eq(permissions.entityType, 'workspace'),
|
||||
eq(permissions.entityId, workflowRecord.workspaceId),
|
||||
eq(permissions.userId, userId)
|
||||
)
|
||||
)
|
||||
.limit(1)
|
||||
if (permissionRow) {
|
||||
return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId }
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Unauthorized workflow access')
|
||||
}
|
||||
|
||||
export async function getDefaultWorkspaceId(userId: string): Promise<string> {
|
||||
const workspaces = await db
|
||||
.select({ workspaceId: workspace.id })
|
||||
.from(permissions)
|
||||
.innerJoin(workspace, eq(permissions.entityId, workspace.id))
|
||||
.where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace')))
|
||||
.orderBy(desc(workspace.createdAt))
|
||||
.limit(1)
|
||||
|
||||
const workspaceId = workspaces[0]?.workspaceId
|
||||
if (!workspaceId) {
|
||||
throw new Error('No workspace found for user')
|
||||
}
|
||||
|
||||
return workspaceId
|
||||
}
|
||||
|
||||
export async function ensureWorkspaceAccess(
|
||||
workspaceId: string,
|
||||
userId: string,
|
||||
requireWrite: boolean
|
||||
): Promise<void> {
|
||||
const [row] = await db
|
||||
.select({
|
||||
permissionType: permissions.permissionType,
|
||||
ownerId: workspace.ownerId,
|
||||
})
|
||||
.from(permissions)
|
||||
.innerJoin(workspace, eq(permissions.entityId, workspace.id))
|
||||
.where(
|
||||
and(
|
||||
eq(permissions.entityType, 'workspace'),
|
||||
eq(permissions.entityId, workspaceId),
|
||||
eq(permissions.userId, userId)
|
||||
)
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
if (!row) {
|
||||
throw new Error(`Workspace ${workspaceId} not found`)
|
||||
}
|
||||
|
||||
const isOwner = row.ownerId === userId
|
||||
const permissionType = row.permissionType
|
||||
const canWrite = isOwner || permissionType === 'admin' || permissionType === 'write'
|
||||
|
||||
if (requireWrite && !canWrite) {
|
||||
throw new Error('Write or admin access required for this workspace')
|
||||
}
|
||||
|
||||
if (!requireWrite && !canWrite && permissionType !== 'read') {
|
||||
throw new Error('Access denied to workspace')
|
||||
}
|
||||
}
|
||||
|
||||
export async function getAccessibleWorkflowsForUser(
|
||||
userId: string,
|
||||
options?: { workspaceId?: string; folderId?: string }
|
||||
) {
|
||||
const workspaceIds = await db
|
||||
.select({ entityId: permissions.entityId })
|
||||
.from(permissions)
|
||||
.where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace')))
|
||||
|
||||
const workspaceIdList = workspaceIds.map((row) => row.entityId)
|
||||
|
||||
const workflowConditions = [eq(workflow.userId, userId)]
|
||||
if (workspaceIdList.length > 0) {
|
||||
workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList))
|
||||
}
|
||||
if (options?.workspaceId) {
|
||||
workflowConditions.push(eq(workflow.workspaceId, options.workspaceId))
|
||||
}
|
||||
if (options?.folderId) {
|
||||
workflowConditions.push(eq(workflow.folderId, options.folderId))
|
||||
}
|
||||
|
||||
return db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(or(...workflowConditions))
|
||||
.orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id))
|
||||
}
|
||||
@@ -0,0 +1,317 @@
|
||||
import crypto from 'crypto'
|
||||
import { db } from '@sim/db'
|
||||
import { chat, workflowMcpTool } from '@sim/db/schema'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema'
|
||||
import { deployWorkflow, undeployWorkflow } from '@/lib/workflows/persistence/utils'
|
||||
import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils'
|
||||
import { ensureWorkflowAccess } from '../access'
|
||||
import type { DeployApiParams, DeployChatParams, DeployMcpParams } from '../param-types'
|
||||
|
||||
export async function executeDeployApi(
|
||||
params: DeployApiParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
const action = params.action === 'undeploy' ? 'undeploy' : 'deploy'
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
if (action === 'undeploy') {
|
||||
const result = await undeployWorkflow({ workflowId })
|
||||
if (!result.success) {
|
||||
return { success: false, error: result.error || 'Failed to undeploy workflow' }
|
||||
}
|
||||
return { success: true, output: { workflowId, isDeployed: false } }
|
||||
}
|
||||
|
||||
const result = await deployWorkflow({
|
||||
workflowId,
|
||||
deployedBy: context.userId,
|
||||
workflowName: workflowRecord.name || undefined,
|
||||
})
|
||||
if (!result.success) {
|
||||
return { success: false, error: result.error || 'Failed to deploy workflow' }
|
||||
}
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
workflowId,
|
||||
isDeployed: true,
|
||||
deployedAt: result.deployedAt,
|
||||
version: result.version,
|
||||
apiEndpoint: `${baseUrl}/api/workflows/${workflowId}/run`,
|
||||
baseUrl,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeDeployChat(
|
||||
params: DeployChatParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
|
||||
const action = params.action === 'undeploy' ? 'undeploy' : 'deploy'
|
||||
if (action === 'undeploy') {
|
||||
const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1)
|
||||
if (!existing.length) {
|
||||
return { success: false, error: 'No active chat deployment found for this workflow' }
|
||||
}
|
||||
const { hasAccess } = await checkChatAccess(existing[0].id, context.userId)
|
||||
if (!hasAccess) {
|
||||
return { success: false, error: 'Unauthorized chat access' }
|
||||
}
|
||||
await db.delete(chat).where(eq(chat.id, existing[0].id))
|
||||
return { success: true, output: { success: true, action: 'undeploy', isDeployed: false } }
|
||||
}
|
||||
|
||||
const { hasAccess } = await checkWorkflowAccessForChatCreation(workflowId, context.userId)
|
||||
if (!hasAccess) {
|
||||
return { success: false, error: 'Workflow not found or access denied' }
|
||||
}
|
||||
|
||||
const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1)
|
||||
const existingDeployment = existing[0] || null
|
||||
|
||||
const identifier = String(params.identifier || existingDeployment?.identifier || '').trim()
|
||||
const title = String(params.title || existingDeployment?.title || '').trim()
|
||||
if (!identifier || !title) {
|
||||
return { success: false, error: 'Chat identifier and title are required' }
|
||||
}
|
||||
|
||||
const identifierPattern = /^[a-z0-9-]+$/
|
||||
if (!identifierPattern.test(identifier)) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Identifier can only contain lowercase letters, numbers, and hyphens',
|
||||
}
|
||||
}
|
||||
|
||||
const existingIdentifier = await db
|
||||
.select()
|
||||
.from(chat)
|
||||
.where(eq(chat.identifier, identifier))
|
||||
.limit(1)
|
||||
if (existingIdentifier.length > 0 && existingIdentifier[0].id !== existingDeployment?.id) {
|
||||
return { success: false, error: 'Identifier already in use' }
|
||||
}
|
||||
|
||||
const deployResult = await deployWorkflow({
|
||||
workflowId,
|
||||
deployedBy: context.userId,
|
||||
})
|
||||
if (!deployResult.success) {
|
||||
return { success: false, error: deployResult.error || 'Failed to deploy workflow' }
|
||||
}
|
||||
|
||||
const existingCustomizations =
|
||||
(existingDeployment?.customizations as
|
||||
| { primaryColor?: string; welcomeMessage?: string }
|
||||
| undefined) || {}
|
||||
|
||||
const payload = {
|
||||
workflowId,
|
||||
identifier,
|
||||
title,
|
||||
description: String(params.description || existingDeployment?.description || ''),
|
||||
customizations: {
|
||||
primaryColor:
|
||||
params.customizations?.primaryColor ||
|
||||
existingCustomizations.primaryColor ||
|
||||
'var(--brand-primary-hover-hex)',
|
||||
welcomeMessage:
|
||||
params.customizations?.welcomeMessage ||
|
||||
existingCustomizations.welcomeMessage ||
|
||||
'Hi there! How can I help you today?',
|
||||
},
|
||||
authType: params.authType || existingDeployment?.authType || 'public',
|
||||
password: params.password,
|
||||
allowedEmails: params.allowedEmails || existingDeployment?.allowedEmails || [],
|
||||
outputConfigs: params.outputConfigs || existingDeployment?.outputConfigs || [],
|
||||
}
|
||||
|
||||
if (existingDeployment) {
|
||||
await db
|
||||
.update(chat)
|
||||
.set({
|
||||
identifier: payload.identifier,
|
||||
title: payload.title,
|
||||
description: payload.description,
|
||||
customizations: payload.customizations,
|
||||
authType: payload.authType,
|
||||
password: payload.password || existingDeployment.password,
|
||||
allowedEmails:
|
||||
payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [],
|
||||
outputConfigs: payload.outputConfigs,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(chat.id, existingDeployment.id))
|
||||
} else {
|
||||
await db.insert(chat).values({
|
||||
id: crypto.randomUUID(),
|
||||
workflowId,
|
||||
userId: context.userId,
|
||||
identifier: payload.identifier,
|
||||
title: payload.title,
|
||||
description: payload.description,
|
||||
customizations: payload.customizations,
|
||||
isActive: true,
|
||||
authType: payload.authType,
|
||||
password: payload.password || null,
|
||||
allowedEmails:
|
||||
payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [],
|
||||
outputConfigs: payload.outputConfigs,
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
}
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
success: true,
|
||||
action: 'deploy',
|
||||
isDeployed: true,
|
||||
identifier,
|
||||
chatUrl: `${baseUrl}/chat/${identifier}`,
|
||||
apiEndpoint: `${baseUrl}/api/workflows/${workflowId}/run`,
|
||||
baseUrl,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeDeployMcp(
|
||||
params: DeployMcpParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
const workspaceId = workflowRecord.workspaceId
|
||||
if (!workspaceId) {
|
||||
return { success: false, error: 'workspaceId is required' }
|
||||
}
|
||||
|
||||
if (!workflowRecord.isDeployed) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Workflow must be deployed before adding as an MCP tool. Use deploy_api first.',
|
||||
}
|
||||
}
|
||||
|
||||
const serverId = params.serverId
|
||||
if (!serverId) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'serverId is required. Use list_workspace_mcp_servers to get available servers.',
|
||||
}
|
||||
}
|
||||
|
||||
const existingTool = await db
|
||||
.select()
|
||||
.from(workflowMcpTool)
|
||||
.where(
|
||||
and(eq(workflowMcpTool.serverId, serverId), eq(workflowMcpTool.workflowId, workflowId))
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
const toolName = sanitizeToolName(
|
||||
params.toolName || workflowRecord.name || `workflow_${workflowId}`
|
||||
)
|
||||
const toolDescription =
|
||||
params.toolDescription ||
|
||||
workflowRecord.description ||
|
||||
`Execute ${workflowRecord.name} workflow`
|
||||
const parameterSchema = params.parameterSchema || {}
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
const mcpServerUrl = `${baseUrl}/api/mcp/serve/${serverId}`
|
||||
|
||||
if (existingTool.length > 0) {
|
||||
const toolId = existingTool[0].id
|
||||
await db
|
||||
.update(workflowMcpTool)
|
||||
.set({
|
||||
toolName,
|
||||
toolDescription,
|
||||
parameterSchema,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(workflowMcpTool.id, toolId))
|
||||
return {
|
||||
success: true,
|
||||
output: { toolId, toolName, toolDescription, updated: true, mcpServerUrl, baseUrl },
|
||||
}
|
||||
}
|
||||
|
||||
const toolId = crypto.randomUUID()
|
||||
await db.insert(workflowMcpTool).values({
|
||||
id: toolId,
|
||||
serverId,
|
||||
workflowId,
|
||||
toolName,
|
||||
toolDescription,
|
||||
parameterSchema,
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: { toolId, toolName, toolDescription, updated: false, mcpServerUrl, baseUrl },
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeRedeploy(context: ExecutionContext): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
const result = await deployWorkflow({ workflowId, deployedBy: context.userId })
|
||||
if (!result.success) {
|
||||
return { success: false, error: result.error || 'Failed to redeploy workflow' }
|
||||
}
|
||||
const baseUrl = getBaseUrl()
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
workflowId,
|
||||
deployedAt: result.deployedAt || null,
|
||||
version: result.version,
|
||||
apiEndpoint: `${baseUrl}/api/workflows/${workflowId}/run`,
|
||||
baseUrl,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
export * from './deploy'
|
||||
export * from './manage'
|
||||
@@ -0,0 +1,226 @@
|
||||
import crypto from 'crypto'
|
||||
import { db } from '@sim/db'
|
||||
import { chat, workflow, workflowMcpServer, workflowMcpTool } from '@sim/db/schema'
|
||||
import { eq, inArray } from 'drizzle-orm'
|
||||
import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types'
|
||||
import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema'
|
||||
import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server'
|
||||
import { ensureWorkflowAccess } from '../access'
|
||||
import type {
|
||||
CheckDeploymentStatusParams,
|
||||
CreateWorkspaceMcpServerParams,
|
||||
ListWorkspaceMcpServersParams,
|
||||
} from '../param-types'
|
||||
|
||||
export async function executeCheckDeploymentStatus(
|
||||
params: CheckDeploymentStatusParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
const workspaceId = workflowRecord.workspaceId
|
||||
|
||||
const [apiDeploy, chatDeploy] = await Promise.all([
|
||||
db.select().from(workflow).where(eq(workflow.id, workflowId)).limit(1),
|
||||
db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1),
|
||||
])
|
||||
|
||||
const isApiDeployed = apiDeploy[0]?.isDeployed || false
|
||||
const apiDetails = {
|
||||
isDeployed: isApiDeployed,
|
||||
deployedAt: apiDeploy[0]?.deployedAt || null,
|
||||
endpoint: isApiDeployed ? `/api/workflows/${workflowId}/execute` : null,
|
||||
apiKey: workflowRecord.workspaceId ? 'Workspace API keys' : 'Personal API keys',
|
||||
needsRedeployment: false,
|
||||
}
|
||||
|
||||
const isChatDeployed = !!chatDeploy[0]
|
||||
const chatCustomizations =
|
||||
(chatDeploy[0]?.customizations as
|
||||
| { welcomeMessage?: string; primaryColor?: string }
|
||||
| undefined) || {}
|
||||
const chatDetails = {
|
||||
isDeployed: isChatDeployed,
|
||||
chatId: chatDeploy[0]?.id || null,
|
||||
identifier: chatDeploy[0]?.identifier || null,
|
||||
chatUrl: isChatDeployed ? `/chat/${chatDeploy[0]?.identifier}` : null,
|
||||
title: chatDeploy[0]?.title || null,
|
||||
description: chatDeploy[0]?.description || null,
|
||||
authType: chatDeploy[0]?.authType || null,
|
||||
allowedEmails: chatDeploy[0]?.allowedEmails || null,
|
||||
outputConfigs: chatDeploy[0]?.outputConfigs || null,
|
||||
welcomeMessage: chatCustomizations.welcomeMessage || null,
|
||||
primaryColor: chatCustomizations.primaryColor || null,
|
||||
hasPassword: Boolean(chatDeploy[0]?.password),
|
||||
}
|
||||
|
||||
const mcpDetails: {
|
||||
isDeployed: boolean
|
||||
servers: Array<{
|
||||
serverId: string
|
||||
serverName: string
|
||||
toolName: string
|
||||
toolDescription: string | null
|
||||
parameterSchema: unknown
|
||||
toolId: string
|
||||
}>
|
||||
} = { isDeployed: false, servers: [] }
|
||||
if (workspaceId) {
|
||||
const servers = await db
|
||||
.select({
|
||||
serverId: workflowMcpServer.id,
|
||||
serverName: workflowMcpServer.name,
|
||||
toolName: workflowMcpTool.toolName,
|
||||
toolDescription: workflowMcpTool.toolDescription,
|
||||
parameterSchema: workflowMcpTool.parameterSchema,
|
||||
toolId: workflowMcpTool.id,
|
||||
})
|
||||
.from(workflowMcpTool)
|
||||
.innerJoin(workflowMcpServer, eq(workflowMcpTool.serverId, workflowMcpServer.id))
|
||||
.where(eq(workflowMcpTool.workflowId, workflowId))
|
||||
|
||||
if (servers.length > 0) {
|
||||
mcpDetails.isDeployed = true
|
||||
mcpDetails.servers = servers
|
||||
}
|
||||
}
|
||||
|
||||
const isDeployed = apiDetails.isDeployed || chatDetails.isDeployed || mcpDetails.isDeployed
|
||||
return {
|
||||
success: true,
|
||||
output: { isDeployed, api: apiDetails, chat: chatDetails, mcp: mcpDetails },
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeListWorkspaceMcpServers(
|
||||
params: ListWorkspaceMcpServersParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
const workspaceId = workflowRecord.workspaceId
|
||||
if (!workspaceId) {
|
||||
return { success: false, error: 'workspaceId is required' }
|
||||
}
|
||||
|
||||
const servers = await db
|
||||
.select({
|
||||
id: workflowMcpServer.id,
|
||||
name: workflowMcpServer.name,
|
||||
description: workflowMcpServer.description,
|
||||
})
|
||||
.from(workflowMcpServer)
|
||||
.where(eq(workflowMcpServer.workspaceId, workspaceId))
|
||||
|
||||
const serverIds = servers.map((server) => server.id)
|
||||
const tools =
|
||||
serverIds.length > 0
|
||||
? await db
|
||||
.select({
|
||||
serverId: workflowMcpTool.serverId,
|
||||
toolName: workflowMcpTool.toolName,
|
||||
})
|
||||
.from(workflowMcpTool)
|
||||
.where(inArray(workflowMcpTool.serverId, serverIds))
|
||||
: []
|
||||
|
||||
const toolNamesByServer: Record<string, string[]> = {}
|
||||
for (const tool of tools) {
|
||||
if (!toolNamesByServer[tool.serverId]) {
|
||||
toolNamesByServer[tool.serverId] = []
|
||||
}
|
||||
toolNamesByServer[tool.serverId].push(tool.toolName)
|
||||
}
|
||||
|
||||
const serversWithToolNames = servers.map((server) => ({
|
||||
...server,
|
||||
toolCount: toolNamesByServer[server.id]?.length || 0,
|
||||
toolNames: toolNamesByServer[server.id] || [],
|
||||
}))
|
||||
|
||||
return { success: true, output: { servers: serversWithToolNames, count: servers.length } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeCreateWorkspaceMcpServer(
|
||||
params: CreateWorkspaceMcpServerParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
const workspaceId = workflowRecord.workspaceId
|
||||
if (!workspaceId) {
|
||||
return { success: false, error: 'workspaceId is required' }
|
||||
}
|
||||
|
||||
const name = params.name?.trim()
|
||||
if (!name) {
|
||||
return { success: false, error: 'name is required' }
|
||||
}
|
||||
|
||||
const serverId = crypto.randomUUID()
|
||||
const [server] = await db
|
||||
.insert(workflowMcpServer)
|
||||
.values({
|
||||
id: serverId,
|
||||
workspaceId,
|
||||
createdBy: context.userId,
|
||||
name,
|
||||
description: params.description?.trim() || null,
|
||||
isPublic: params.isPublic ?? false,
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.returning()
|
||||
|
||||
const workflowIds: string[] = params.workflowIds || []
|
||||
const addedTools: Array<{ workflowId: string; toolName: string }> = []
|
||||
|
||||
if (workflowIds.length > 0) {
|
||||
const workflows = await db.select().from(workflow).where(inArray(workflow.id, workflowIds))
|
||||
|
||||
for (const wf of workflows) {
|
||||
if (wf.workspaceId !== workspaceId || !wf.isDeployed) {
|
||||
continue
|
||||
}
|
||||
const hasStartBlock = await hasValidStartBlock(wf.id)
|
||||
if (!hasStartBlock) {
|
||||
continue
|
||||
}
|
||||
const toolName = sanitizeToolName(wf.name || `workflow_${wf.id}`)
|
||||
await db.insert(workflowMcpTool).values({
|
||||
id: crypto.randomUUID(),
|
||||
serverId,
|
||||
workflowId: wf.id,
|
||||
toolName,
|
||||
toolDescription: wf.description || `Execute ${wf.name} workflow`,
|
||||
parameterSchema: {},
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
addedTools.push({ workflowId: wf.id, toolName })
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, output: { server, addedTools } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
341
apps/sim/lib/copilot/orchestrator/tool-executor/index.ts
Normal file
341
apps/sim/lib/copilot/orchestrator/tool-executor/index.ts
Normal file
@@ -0,0 +1,341 @@
|
||||
import { db } from '@sim/db'
|
||||
import { workflow } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
ToolCallResult,
|
||||
ToolCallState,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
import { routeExecution } from '@/lib/copilot/tools/server/router'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
|
||||
import { getTool, resolveToolId } from '@/tools/utils'
|
||||
import {
|
||||
executeCheckDeploymentStatus,
|
||||
executeCreateWorkspaceMcpServer,
|
||||
executeDeployApi,
|
||||
executeDeployChat,
|
||||
executeDeployMcp,
|
||||
executeListWorkspaceMcpServers,
|
||||
executeRedeploy,
|
||||
} from './deployment-tools'
|
||||
import { executeIntegrationToolDirect } from './integration-tools'
|
||||
import type {
|
||||
CheckDeploymentStatusParams,
|
||||
CreateFolderParams,
|
||||
CreateWorkflowParams,
|
||||
CreateWorkspaceMcpServerParams,
|
||||
DeployApiParams,
|
||||
DeployChatParams,
|
||||
DeployMcpParams,
|
||||
GenerateApiKeyParams,
|
||||
GetBlockOutputsParams,
|
||||
GetBlockUpstreamReferencesParams,
|
||||
GetDeployedWorkflowStateParams,
|
||||
GetUserWorkflowParams,
|
||||
GetWorkflowDataParams,
|
||||
GetWorkflowFromNameParams,
|
||||
ListFoldersParams,
|
||||
ListUserWorkflowsParams,
|
||||
ListWorkspaceMcpServersParams,
|
||||
MoveFolderParams,
|
||||
MoveWorkflowParams,
|
||||
RenameWorkflowParams,
|
||||
RunBlockParams,
|
||||
RunFromBlockParams,
|
||||
RunWorkflowParams,
|
||||
RunWorkflowUntilBlockParams,
|
||||
SetGlobalWorkflowVariablesParams,
|
||||
} from './param-types'
|
||||
import { PLATFORM_ACTIONS_CONTENT } from './platform-actions'
|
||||
import {
|
||||
executeCreateFolder,
|
||||
executeCreateWorkflow,
|
||||
executeGenerateApiKey,
|
||||
executeGetBlockOutputs,
|
||||
executeGetBlockUpstreamReferences,
|
||||
executeGetDeployedWorkflowState,
|
||||
executeGetUserWorkflow,
|
||||
executeGetWorkflowData,
|
||||
executeGetWorkflowFromName,
|
||||
executeListFolders,
|
||||
executeListUserWorkflows,
|
||||
executeListUserWorkspaces,
|
||||
executeMoveFolder,
|
||||
executeMoveWorkflow,
|
||||
executeRenameWorkflow,
|
||||
executeRunBlock,
|
||||
executeRunFromBlock,
|
||||
executeRunWorkflow,
|
||||
executeRunWorkflowUntilBlock,
|
||||
executeSetGlobalWorkflowVariables,
|
||||
} from './workflow-tools'
|
||||
|
||||
const logger = createLogger('CopilotToolExecutor')
|
||||
|
||||
const SERVER_TOOLS = new Set<string>([
|
||||
'get_blocks_and_tools',
|
||||
'get_blocks_metadata',
|
||||
'get_block_options',
|
||||
'get_block_config',
|
||||
'get_trigger_blocks',
|
||||
'edit_workflow',
|
||||
'get_workflow_console',
|
||||
'search_documentation',
|
||||
'search_online',
|
||||
'set_environment_variables',
|
||||
'get_credentials',
|
||||
'make_api_request',
|
||||
'knowledge_base',
|
||||
])
|
||||
|
||||
const SIM_WORKFLOW_TOOL_HANDLERS: Record<
|
||||
string,
|
||||
(params: Record<string, unknown>, context: ExecutionContext) => Promise<ToolCallResult>
|
||||
> = {
|
||||
get_user_workflow: (p, c) => executeGetUserWorkflow(p as GetUserWorkflowParams, c),
|
||||
get_workflow_from_name: (p, c) => executeGetWorkflowFromName(p as GetWorkflowFromNameParams, c),
|
||||
list_user_workflows: (p, c) => executeListUserWorkflows(p as ListUserWorkflowsParams, c),
|
||||
list_user_workspaces: (_p, c) => executeListUserWorkspaces(c),
|
||||
list_folders: (p, c) => executeListFolders(p as ListFoldersParams, c),
|
||||
create_workflow: (p, c) => executeCreateWorkflow(p as CreateWorkflowParams, c),
|
||||
create_folder: (p, c) => executeCreateFolder(p as CreateFolderParams, c),
|
||||
rename_workflow: (p, c) => executeRenameWorkflow(p as unknown as RenameWorkflowParams, c),
|
||||
move_workflow: (p, c) => executeMoveWorkflow(p as unknown as MoveWorkflowParams, c),
|
||||
move_folder: (p, c) => executeMoveFolder(p as unknown as MoveFolderParams, c),
|
||||
get_workflow_data: (p, c) => executeGetWorkflowData(p as GetWorkflowDataParams, c),
|
||||
get_block_outputs: (p, c) => executeGetBlockOutputs(p as GetBlockOutputsParams, c),
|
||||
get_block_upstream_references: (p, c) =>
|
||||
executeGetBlockUpstreamReferences(p as unknown as GetBlockUpstreamReferencesParams, c),
|
||||
run_workflow: (p, c) => executeRunWorkflow(p as RunWorkflowParams, c),
|
||||
run_workflow_until_block: (p, c) =>
|
||||
executeRunWorkflowUntilBlock(p as unknown as RunWorkflowUntilBlockParams, c),
|
||||
run_from_block: (p, c) => executeRunFromBlock(p as unknown as RunFromBlockParams, c),
|
||||
run_block: (p, c) => executeRunBlock(p as unknown as RunBlockParams, c),
|
||||
get_deployed_workflow_state: (p, c) =>
|
||||
executeGetDeployedWorkflowState(p as GetDeployedWorkflowStateParams, c),
|
||||
generate_api_key: (p, c) => executeGenerateApiKey(p as unknown as GenerateApiKeyParams, c),
|
||||
get_platform_actions: () =>
|
||||
Promise.resolve({
|
||||
success: true,
|
||||
output: { content: PLATFORM_ACTIONS_CONTENT },
|
||||
}),
|
||||
set_global_workflow_variables: (p, c) =>
|
||||
executeSetGlobalWorkflowVariables(p as SetGlobalWorkflowVariablesParams, c),
|
||||
deploy_api: (p, c) => executeDeployApi(p as DeployApiParams, c),
|
||||
deploy_chat: (p, c) => executeDeployChat(p as DeployChatParams, c),
|
||||
deploy_mcp: (p, c) => executeDeployMcp(p as DeployMcpParams, c),
|
||||
redeploy: (_p, c) => executeRedeploy(c),
|
||||
check_deployment_status: (p, c) =>
|
||||
executeCheckDeploymentStatus(p as CheckDeploymentStatusParams, c),
|
||||
list_workspace_mcp_servers: (p, c) =>
|
||||
executeListWorkspaceMcpServers(p as ListWorkspaceMcpServersParams, c),
|
||||
create_workspace_mcp_server: (p, c) =>
|
||||
executeCreateWorkspaceMcpServer(p as CreateWorkspaceMcpServerParams, c),
|
||||
oauth_get_auth_link: async (p, _c) => {
|
||||
const providerName = (p.providerName || p.provider_name || 'the provider') as string
|
||||
try {
|
||||
const baseUrl = getBaseUrl()
|
||||
const settingsUrl = `${baseUrl}/workspace`
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
message: `To connect ${providerName}, the user must authorize via their browser.`,
|
||||
oauth_url: settingsUrl,
|
||||
instructions: `Open ${settingsUrl} in a browser and go to the workflow editor to connect ${providerName} credentials.`,
|
||||
provider: providerName,
|
||||
baseUrl,
|
||||
},
|
||||
}
|
||||
} catch {
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
message: `To connect ${providerName}, the user must authorize via their browser.`,
|
||||
instructions: `Open the Sim workspace in a browser and go to the workflow editor to connect ${providerName} credentials.`,
|
||||
provider: providerName,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether a tool can be executed on the Sim (TypeScript) side.
|
||||
*
|
||||
* Tools that are only available on the Go backend (e.g. search_patterns,
|
||||
* search_errors, remember_debug) will return false. The subagent tool_call
|
||||
* handler uses this to decide whether to execute a tool locally or let the
|
||||
* Go backend's own tool_result SSE event handle it.
|
||||
*/
|
||||
export function isToolAvailableOnSimSide(toolName: string): boolean {
|
||||
if (SERVER_TOOLS.has(toolName)) return true
|
||||
if (toolName in SIM_WORKFLOW_TOOL_HANDLERS) return true
|
||||
const resolvedToolName = resolveToolId(toolName)
|
||||
return !!getTool(resolvedToolName)
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether a tool is a user-installed integration tool (e.g. Gmail, Slack).
|
||||
* These tools exist in the tool registry but are NOT copilot server tools or
|
||||
* known workflow manipulation tools. They should require user approval in
|
||||
* interactive mode.
|
||||
*/
|
||||
export function isIntegrationTool(toolName: string): boolean {
|
||||
if (SERVER_TOOLS.has(toolName)) return false
|
||||
if (toolName in SIM_WORKFLOW_TOOL_HANDLERS) return false
|
||||
const resolvedToolName = resolveToolId(toolName)
|
||||
return !!getTool(resolvedToolName)
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a tool server-side without calling internal routes.
|
||||
*/
|
||||
export async function executeToolServerSide(
|
||||
toolCall: ToolCallState,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
const toolName = toolCall.name
|
||||
const resolvedToolName = resolveToolId(toolName)
|
||||
|
||||
if (SERVER_TOOLS.has(toolName)) {
|
||||
return executeServerToolDirect(toolName, toolCall.params || {}, context)
|
||||
}
|
||||
|
||||
if (toolName in SIM_WORKFLOW_TOOL_HANDLERS) {
|
||||
return executeSimWorkflowTool(toolName, toolCall.params || {}, context)
|
||||
}
|
||||
|
||||
const toolConfig = getTool(resolvedToolName)
|
||||
if (!toolConfig) {
|
||||
logger.warn('Tool not found in registry', { toolName, resolvedToolName })
|
||||
return {
|
||||
success: false,
|
||||
error: `Tool not found: ${toolName}`,
|
||||
}
|
||||
}
|
||||
|
||||
return executeIntegrationToolDirect(toolCall, toolConfig, context)
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a server tool directly via the server tool router.
|
||||
*/
|
||||
async function executeServerToolDirect(
|
||||
toolName: string,
|
||||
params: Record<string, unknown>,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
// Inject workflowId from context if not provided in params
|
||||
// This is needed for tools like set_environment_variables that require workflowId
|
||||
const enrichedParams = { ...params }
|
||||
if (!enrichedParams.workflowId && context.workflowId) {
|
||||
enrichedParams.workflowId = context.workflowId
|
||||
}
|
||||
|
||||
const result = await routeExecution(toolName, enrichedParams, { userId: context.userId })
|
||||
return { success: true, output: result }
|
||||
} catch (error) {
|
||||
logger.error('Server tool execution failed', {
|
||||
toolName,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Server tool execution failed',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function executeSimWorkflowTool(
|
||||
toolName: string,
|
||||
params: Record<string, unknown>,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
const handler = SIM_WORKFLOW_TOOL_HANDLERS[toolName]
|
||||
if (!handler) return { success: false, error: `Unsupported workflow tool: ${toolName}` }
|
||||
return handler(params, context)
|
||||
}
|
||||
|
||||
/** Timeout for the mark-complete POST to the copilot backend (30 s). */
|
||||
const MARK_COMPLETE_TIMEOUT_MS = 30_000
|
||||
|
||||
/**
|
||||
* Notify the copilot backend that a tool has completed.
|
||||
*/
|
||||
export async function markToolComplete(
|
||||
toolCallId: string,
|
||||
toolName: string,
|
||||
status: number,
|
||||
message?: unknown,
|
||||
data?: unknown
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
const controller = new AbortController()
|
||||
const timeoutId = setTimeout(() => controller.abort(), MARK_COMPLETE_TIMEOUT_MS)
|
||||
|
||||
try {
|
||||
const response = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
|
||||
},
|
||||
body: JSON.stringify({
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status,
|
||||
message,
|
||||
data,
|
||||
}),
|
||||
signal: controller.signal,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
logger.warn('Mark-complete call failed', { toolCallId, toolName, status: response.status })
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
} finally {
|
||||
clearTimeout(timeoutId)
|
||||
}
|
||||
} catch (error) {
|
||||
const isTimeout = error instanceof DOMException && error.name === 'AbortError'
|
||||
logger.error('Mark-complete call failed', {
|
||||
toolCallId,
|
||||
toolName,
|
||||
timedOut: isTimeout,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare execution context with cached environment values.
|
||||
*/
|
||||
export async function prepareExecutionContext(
|
||||
userId: string,
|
||||
workflowId: string
|
||||
): Promise<ExecutionContext> {
|
||||
const workflowResult = await db
|
||||
.select({ workspaceId: workflow.workspaceId })
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
const workspaceId = workflowResult[0]?.workspaceId ?? undefined
|
||||
|
||||
const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId)
|
||||
|
||||
return {
|
||||
userId,
|
||||
workflowId,
|
||||
workspaceId,
|
||||
decryptedEnvVars,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
import { db } from '@sim/db'
|
||||
import { account, workflow } from '@sim/db/schema'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
ToolCallResult,
|
||||
ToolCallState,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
|
||||
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import { resolveEnvVarReferences } from '@/executor/utils/reference-validation'
|
||||
import { executeTool } from '@/tools'
|
||||
import { resolveToolId } from '@/tools/utils'
|
||||
|
||||
export async function executeIntegrationToolDirect(
|
||||
toolCall: ToolCallState,
|
||||
toolConfig: {
|
||||
oauth?: { required?: boolean; provider?: string }
|
||||
params?: { apiKey?: { required?: boolean } }
|
||||
},
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
const { userId, workflowId } = context
|
||||
const toolName = resolveToolId(toolCall.name)
|
||||
const toolArgs = toolCall.params || {}
|
||||
|
||||
let workspaceId = context.workspaceId
|
||||
if (!workspaceId && workflowId) {
|
||||
const workflowResult = await db
|
||||
.select({ workspaceId: workflow.workspaceId })
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
workspaceId = workflowResult[0]?.workspaceId ?? undefined
|
||||
}
|
||||
|
||||
const decryptedEnvVars =
|
||||
context.decryptedEnvVars || (await getEffectiveDecryptedEnv(userId, workspaceId))
|
||||
|
||||
// Deep resolution walks nested objects to replace {{ENV_VAR}} references.
|
||||
// Safe because tool arguments originate from the LLM (not direct user input)
|
||||
// and env vars belong to the user themselves.
|
||||
const executionParams = resolveEnvVarReferences(toolArgs, decryptedEnvVars, {
|
||||
deep: true,
|
||||
}) as Record<string, unknown>
|
||||
|
||||
if (toolConfig.oauth?.required && toolConfig.oauth.provider) {
|
||||
const provider = toolConfig.oauth.provider
|
||||
const accounts = await db
|
||||
.select()
|
||||
.from(account)
|
||||
.where(and(eq(account.providerId, provider), eq(account.userId, userId)))
|
||||
.limit(1)
|
||||
|
||||
if (!accounts.length) {
|
||||
return {
|
||||
success: false,
|
||||
error: `No ${provider} account connected. Please connect your account first.`,
|
||||
}
|
||||
}
|
||||
|
||||
const acc = accounts[0]
|
||||
const requestId = generateRequestId()
|
||||
const { accessToken } = await refreshTokenIfNeeded(requestId, acc, acc.id)
|
||||
|
||||
if (!accessToken) {
|
||||
return {
|
||||
success: false,
|
||||
error: `OAuth token not available for ${provider}. Please reconnect your account.`,
|
||||
}
|
||||
}
|
||||
|
||||
executionParams.accessToken = accessToken
|
||||
}
|
||||
|
||||
if (toolConfig.params?.apiKey?.required && !executionParams.apiKey) {
|
||||
return {
|
||||
success: false,
|
||||
error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`,
|
||||
}
|
||||
}
|
||||
|
||||
executionParams._context = {
|
||||
workflowId,
|
||||
userId,
|
||||
}
|
||||
|
||||
if (toolName === 'function_execute') {
|
||||
executionParams.envVars = decryptedEnvVars
|
||||
executionParams.workflowVariables = {}
|
||||
executionParams.blockData = {}
|
||||
executionParams.blockNameMapping = {}
|
||||
executionParams.language = executionParams.language || 'javascript'
|
||||
executionParams.timeout = executionParams.timeout || 30000
|
||||
}
|
||||
|
||||
const result = await executeTool(toolName, executionParams)
|
||||
|
||||
return {
|
||||
success: result.success,
|
||||
output: result.output,
|
||||
error: result.error,
|
||||
}
|
||||
}
|
||||
187
apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts
Normal file
187
apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts
Normal file
@@ -0,0 +1,187 @@
|
||||
/**
|
||||
* Typed parameter interfaces for tool executor functions.
|
||||
* Replaces Record<string, any> with specific shapes based on actual property access.
|
||||
*/
|
||||
|
||||
// === Workflow Query Params ===
|
||||
|
||||
export interface GetUserWorkflowParams {
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
export interface GetWorkflowFromNameParams {
|
||||
workflow_name?: string
|
||||
}
|
||||
|
||||
export interface ListUserWorkflowsParams {
|
||||
workspaceId?: string
|
||||
folderId?: string
|
||||
}
|
||||
|
||||
export interface GetWorkflowDataParams {
|
||||
workflowId?: string
|
||||
data_type?: string
|
||||
dataType?: string
|
||||
}
|
||||
|
||||
export interface GetBlockOutputsParams {
|
||||
workflowId?: string
|
||||
blockIds?: string[]
|
||||
}
|
||||
|
||||
export interface GetBlockUpstreamReferencesParams {
|
||||
workflowId?: string
|
||||
blockIds: string[]
|
||||
}
|
||||
|
||||
export interface ListFoldersParams {
|
||||
workspaceId?: string
|
||||
}
|
||||
|
||||
// === Workflow Mutation Params ===
|
||||
|
||||
export interface CreateWorkflowParams {
|
||||
name?: string
|
||||
workspaceId?: string
|
||||
folderId?: string
|
||||
description?: string
|
||||
}
|
||||
|
||||
export interface CreateFolderParams {
|
||||
name?: string
|
||||
workspaceId?: string
|
||||
parentId?: string
|
||||
}
|
||||
|
||||
export interface RunWorkflowParams {
|
||||
workflowId?: string
|
||||
workflow_input?: unknown
|
||||
input?: unknown
|
||||
/** When true, runs the deployed version instead of the draft. Default: false (draft). */
|
||||
useDeployedState?: boolean
|
||||
}
|
||||
|
||||
export interface RunWorkflowUntilBlockParams {
|
||||
workflowId?: string
|
||||
workflow_input?: unknown
|
||||
input?: unknown
|
||||
/** The block ID to stop after. Execution halts once this block completes. */
|
||||
stopAfterBlockId: string
|
||||
/** When true, runs the deployed version instead of the draft. Default: false (draft). */
|
||||
useDeployedState?: boolean
|
||||
}
|
||||
|
||||
export interface RunFromBlockParams {
|
||||
workflowId?: string
|
||||
/** The block ID to start execution from. */
|
||||
startBlockId: string
|
||||
/** Optional execution ID to load the snapshot from. If omitted, uses the latest execution. */
|
||||
executionId?: string
|
||||
workflow_input?: unknown
|
||||
input?: unknown
|
||||
useDeployedState?: boolean
|
||||
}
|
||||
|
||||
export interface RunBlockParams {
|
||||
workflowId?: string
|
||||
/** The block ID to run. Only this block executes using cached upstream outputs. */
|
||||
blockId: string
|
||||
/** Optional execution ID to load the snapshot from. If omitted, uses the latest execution. */
|
||||
executionId?: string
|
||||
workflow_input?: unknown
|
||||
input?: unknown
|
||||
useDeployedState?: boolean
|
||||
}
|
||||
|
||||
export interface GetDeployedWorkflowStateParams {
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
export interface GenerateApiKeyParams {
|
||||
name: string
|
||||
workspaceId?: string
|
||||
}
|
||||
|
||||
export interface VariableOperation {
|
||||
name: string
|
||||
operation: 'add' | 'edit' | 'delete'
|
||||
value?: unknown
|
||||
type?: string
|
||||
}
|
||||
|
||||
export interface SetGlobalWorkflowVariablesParams {
|
||||
workflowId?: string
|
||||
operations?: VariableOperation[]
|
||||
}
|
||||
|
||||
// === Deployment Params ===
|
||||
|
||||
export interface DeployApiParams {
|
||||
workflowId?: string
|
||||
action?: 'deploy' | 'undeploy'
|
||||
}
|
||||
|
||||
export interface DeployChatParams {
|
||||
workflowId?: string
|
||||
action?: 'deploy' | 'undeploy' | 'update'
|
||||
identifier?: string
|
||||
title?: string
|
||||
description?: string
|
||||
customizations?: {
|
||||
primaryColor?: string
|
||||
secondaryColor?: string
|
||||
welcomeMessage?: string
|
||||
iconUrl?: string
|
||||
}
|
||||
authType?: 'none' | 'password' | 'public' | 'email' | 'sso'
|
||||
password?: string
|
||||
subdomain?: string
|
||||
allowedEmails?: string[]
|
||||
outputConfigs?: unknown[]
|
||||
}
|
||||
|
||||
export interface DeployMcpParams {
|
||||
workflowId?: string
|
||||
action?: 'deploy' | 'undeploy'
|
||||
toolName?: string
|
||||
toolDescription?: string
|
||||
serverId?: string
|
||||
parameterSchema?: Record<string, unknown>
|
||||
}
|
||||
|
||||
export interface CheckDeploymentStatusParams {
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
export interface ListWorkspaceMcpServersParams {
|
||||
workspaceId?: string
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
export interface CreateWorkspaceMcpServerParams {
|
||||
workflowId?: string
|
||||
name?: string
|
||||
description?: string
|
||||
toolName?: string
|
||||
toolDescription?: string
|
||||
serverName?: string
|
||||
isPublic?: boolean
|
||||
workflowIds?: string[]
|
||||
}
|
||||
|
||||
// === Workflow Organization Params ===
|
||||
|
||||
export interface RenameWorkflowParams {
|
||||
workflowId: string
|
||||
name: string
|
||||
}
|
||||
|
||||
export interface MoveWorkflowParams {
|
||||
workflowId: string
|
||||
folderId: string | null
|
||||
}
|
||||
|
||||
export interface MoveFolderParams {
|
||||
folderId: string
|
||||
parentId: string | null
|
||||
}
|
||||
@@ -0,0 +1,117 @@
|
||||
/**
|
||||
* Static content for the get_platform_actions tool.
|
||||
* Contains the Sim platform quick reference and keyboard shortcuts.
|
||||
*/
|
||||
export const PLATFORM_ACTIONS_CONTENT = `# Sim Platform Quick Reference & Keyboard Shortcuts
|
||||
|
||||
## Keyboard Shortcuts
|
||||
**Mod** = Cmd (macOS) / Ctrl (Windows/Linux). Shortcuts work when canvas is focused.
|
||||
|
||||
### Workflow Actions
|
||||
| Shortcut | Action |
|
||||
|----------|--------|
|
||||
| Mod+Enter | Run workflow (or cancel if running) |
|
||||
| Mod+Z | Undo |
|
||||
| Mod+Shift+Z | Redo |
|
||||
| Mod+C | Copy selected blocks |
|
||||
| Mod+V | Paste blocks |
|
||||
| Delete/Backspace | Delete selected blocks or edges |
|
||||
| Shift+L | Auto-layout canvas |
|
||||
| Mod+Shift+F | Fit to view |
|
||||
| Mod+Shift+Enter | Accept Copilot changes |
|
||||
|
||||
### Panel Navigation
|
||||
| Shortcut | Action |
|
||||
|----------|--------|
|
||||
| C | Focus Copilot tab |
|
||||
| T | Focus Toolbar tab |
|
||||
| E | Focus Editor tab |
|
||||
| Mod+F | Focus Toolbar search |
|
||||
|
||||
### Global Navigation
|
||||
| Shortcut | Action |
|
||||
|----------|--------|
|
||||
| Mod+K | Open search |
|
||||
| Mod+Shift+A | Add new agent workflow |
|
||||
| Mod+Y | Go to templates |
|
||||
| Mod+L | Go to logs |
|
||||
|
||||
### Utility
|
||||
| Shortcut | Action |
|
||||
|----------|--------|
|
||||
| Mod+D | Clear terminal console |
|
||||
| Mod+E | Clear notifications |
|
||||
|
||||
### Mouse Controls
|
||||
| Action | Control |
|
||||
|--------|---------|
|
||||
| Pan/move canvas | Left-drag on empty space, scroll, or trackpad |
|
||||
| Select multiple blocks | Right-drag to draw selection box |
|
||||
| Drag block | Left-drag on block header |
|
||||
| Add to selection | Mod+Click on blocks |
|
||||
|
||||
## Quick Reference — Workspaces
|
||||
| Action | How |
|
||||
|--------|-----|
|
||||
| Create workspace | Click workspace dropdown → New Workspace |
|
||||
| Switch workspaces | Click workspace dropdown → Select workspace |
|
||||
| Invite team members | Sidebar → Invite |
|
||||
| Rename/Duplicate/Export/Delete workspace | Right-click workspace → action |
|
||||
|
||||
## Quick Reference — Workflows
|
||||
| Action | How |
|
||||
|--------|-----|
|
||||
| Create workflow | Click + button in sidebar |
|
||||
| Reorder/move workflows | Drag workflow up/down or onto a folder |
|
||||
| Import workflow | Click import button in sidebar → Select file |
|
||||
| Multi-select workflows | Mod+Click or Shift+Click workflows in sidebar |
|
||||
| Open in new tab | Right-click workflow → Open in New Tab |
|
||||
| Rename/Color/Duplicate/Export/Delete | Right-click workflow → action |
|
||||
|
||||
## Quick Reference — Blocks
|
||||
| Action | How |
|
||||
|--------|-----|
|
||||
| Add a block | Drag from Toolbar panel, or right-click canvas → Add Block |
|
||||
| Multi-select blocks | Mod+Click additional blocks, or shift-drag selection box |
|
||||
| Copy/Paste blocks | Mod+C / Mod+V |
|
||||
| Duplicate/Delete blocks | Right-click → action |
|
||||
| Rename a block | Click block name in header |
|
||||
| Enable/Disable block | Right-click → Enable/Disable |
|
||||
| Lock/Unlock block | Hover block → Click lock icon (Admin only) |
|
||||
| Toggle handle orientation | Right-click → Toggle Handles |
|
||||
| Configure a block | Select block → use Editor panel on right |
|
||||
|
||||
## Quick Reference — Connections
|
||||
| Action | How |
|
||||
|--------|-----|
|
||||
| Create connection | Drag from output handle to input handle |
|
||||
| Delete connection | Click edge to select → Delete key |
|
||||
| Use output in another block | Drag connection tag into input field |
|
||||
|
||||
## Quick Reference — Running & Testing
|
||||
| Action | How |
|
||||
|--------|-----|
|
||||
| Run workflow | Click Run Workflow button or Mod+Enter |
|
||||
| Stop workflow | Click Stop button or Mod+Enter while running |
|
||||
| Test with chat | Use Chat panel on the right side |
|
||||
| Run from block | Hover block → Click play button, or right-click → Run from block |
|
||||
| Run until block | Right-click block → Run until block |
|
||||
| View execution logs | Open terminal panel at bottom, or Mod+L |
|
||||
| Filter/Search/Copy/Clear logs | Terminal panel controls |
|
||||
|
||||
## Quick Reference — Deployment
|
||||
| Action | How |
|
||||
|--------|-----|
|
||||
| Deploy workflow | Click Deploy button in panel |
|
||||
| Update deployment | Click Update when changes are detected |
|
||||
| Revert deployment | Previous versions in Deploy tab → Promote to live |
|
||||
| Copy API endpoint | Deploy tab → API → Copy API cURL |
|
||||
|
||||
## Quick Reference — Variables
|
||||
| Action | How |
|
||||
|--------|-----|
|
||||
| Add/Edit/Delete workflow variable | Panel → Variables → Add Variable |
|
||||
| Add environment variable | Settings → Environment Variables → Add |
|
||||
| Reference workflow variable | Use <blockName.itemName> syntax |
|
||||
| Reference environment variable | Use {{ENV_VAR}} syntax |
|
||||
`
|
||||
@@ -0,0 +1,2 @@
|
||||
export * from './mutations'
|
||||
export * from './queries'
|
||||
@@ -0,0 +1,624 @@
|
||||
import crypto from 'crypto'
|
||||
import { db } from '@sim/db'
|
||||
import { apiKey, workflow, workflowFolder } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq, isNull, max } from 'drizzle-orm'
|
||||
import { nanoid } from 'nanoid'
|
||||
import { createApiKey } from '@/lib/api-key/auth'
|
||||
import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults'
|
||||
import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow'
|
||||
import {
|
||||
getExecutionState,
|
||||
getLatestExecutionState,
|
||||
} from '@/lib/workflows/executor/execution-state'
|
||||
import { saveWorkflowToNormalizedTables } from '@/lib/workflows/persistence/utils'
|
||||
import { ensureWorkflowAccess, ensureWorkspaceAccess, getDefaultWorkspaceId } from '../access'
|
||||
import type {
|
||||
CreateFolderParams,
|
||||
CreateWorkflowParams,
|
||||
GenerateApiKeyParams,
|
||||
MoveFolderParams,
|
||||
MoveWorkflowParams,
|
||||
RenameWorkflowParams,
|
||||
RunBlockParams,
|
||||
RunFromBlockParams,
|
||||
RunWorkflowParams,
|
||||
RunWorkflowUntilBlockParams,
|
||||
SetGlobalWorkflowVariablesParams,
|
||||
VariableOperation,
|
||||
} from '../param-types'
|
||||
|
||||
const logger = createLogger('WorkflowMutations')
|
||||
|
||||
export async function executeCreateWorkflow(
|
||||
params: CreateWorkflowParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const name = typeof params?.name === 'string' ? params.name.trim() : ''
|
||||
if (!name) {
|
||||
return { success: false, error: 'name is required' }
|
||||
}
|
||||
if (name.length > 200) {
|
||||
return { success: false, error: 'Workflow name must be 200 characters or less' }
|
||||
}
|
||||
const description = typeof params?.description === 'string' ? params.description : null
|
||||
if (description && description.length > 2000) {
|
||||
return { success: false, error: 'Description must be 2000 characters or less' }
|
||||
}
|
||||
|
||||
const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId))
|
||||
const folderId = params?.folderId || null
|
||||
|
||||
await ensureWorkspaceAccess(workspaceId, context.userId, true)
|
||||
|
||||
const workflowId = crypto.randomUUID()
|
||||
const now = new Date()
|
||||
|
||||
const folderCondition = folderId ? eq(workflow.folderId, folderId) : isNull(workflow.folderId)
|
||||
const [maxResult] = await db
|
||||
.select({ maxOrder: max(workflow.sortOrder) })
|
||||
.from(workflow)
|
||||
.where(and(eq(workflow.workspaceId, workspaceId), folderCondition))
|
||||
const sortOrder = (maxResult?.maxOrder ?? 0) + 1
|
||||
|
||||
await db.insert(workflow).values({
|
||||
id: workflowId,
|
||||
userId: context.userId,
|
||||
workspaceId,
|
||||
folderId,
|
||||
sortOrder,
|
||||
name,
|
||||
description,
|
||||
color: '#3972F6',
|
||||
lastSynced: now,
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
isDeployed: false,
|
||||
runCount: 0,
|
||||
variables: {},
|
||||
})
|
||||
|
||||
const { workflowState } = buildDefaultWorkflowArtifacts()
|
||||
const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowState)
|
||||
if (!saveResult.success) {
|
||||
throw new Error(saveResult.error || 'Failed to save workflow state')
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
workflowId,
|
||||
workflowName: name,
|
||||
workspaceId,
|
||||
folderId,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeCreateFolder(
|
||||
params: CreateFolderParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const name = typeof params?.name === 'string' ? params.name.trim() : ''
|
||||
if (!name) {
|
||||
return { success: false, error: 'name is required' }
|
||||
}
|
||||
if (name.length > 200) {
|
||||
return { success: false, error: 'Folder name must be 200 characters or less' }
|
||||
}
|
||||
|
||||
const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId))
|
||||
const parentId = params?.parentId || null
|
||||
|
||||
await ensureWorkspaceAccess(workspaceId, context.userId, true)
|
||||
|
||||
const [maxResult] = await db
|
||||
.select({ maxOrder: max(workflowFolder.sortOrder) })
|
||||
.from(workflowFolder)
|
||||
.where(
|
||||
and(
|
||||
eq(workflowFolder.workspaceId, workspaceId),
|
||||
parentId ? eq(workflowFolder.parentId, parentId) : isNull(workflowFolder.parentId)
|
||||
)
|
||||
)
|
||||
const sortOrder = (maxResult?.maxOrder ?? 0) + 1
|
||||
|
||||
const folderId = crypto.randomUUID()
|
||||
await db.insert(workflowFolder).values({
|
||||
id: folderId,
|
||||
userId: context.userId,
|
||||
workspaceId,
|
||||
parentId,
|
||||
name,
|
||||
sortOrder,
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
|
||||
return { success: true, output: { folderId, name, workspaceId, parentId } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeRunWorkflow(
|
||||
params: RunWorkflowParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
const useDraftState = !params.useDeployedState
|
||||
|
||||
const result = await executeWorkflow(
|
||||
{
|
||||
id: workflowRecord.id,
|
||||
userId: workflowRecord.userId,
|
||||
workspaceId: workflowRecord.workspaceId,
|
||||
variables: workflowRecord.variables || {},
|
||||
},
|
||||
generateRequestId(),
|
||||
params.workflow_input || params.input || undefined,
|
||||
context.userId,
|
||||
{ enabled: true, useDraftState }
|
||||
)
|
||||
|
||||
return {
|
||||
success: result.success,
|
||||
output: {
|
||||
executionId: result.metadata?.executionId,
|
||||
success: result.success,
|
||||
output: result.output,
|
||||
logs: result.logs,
|
||||
},
|
||||
error: result.success ? undefined : result.error || 'Workflow execution failed',
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeSetGlobalWorkflowVariables(
|
||||
params: SetGlobalWorkflowVariablesParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
const operations: VariableOperation[] = Array.isArray(params.operations)
|
||||
? params.operations
|
||||
: []
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
interface WorkflowVariable {
|
||||
id: string
|
||||
workflowId?: string
|
||||
name: string
|
||||
type: string
|
||||
value?: unknown
|
||||
}
|
||||
const currentVarsRecord = (workflowRecord.variables as Record<string, unknown>) || {}
|
||||
const byName: Record<string, WorkflowVariable> = {}
|
||||
Object.values(currentVarsRecord).forEach((v) => {
|
||||
if (v && typeof v === 'object' && 'id' in v && 'name' in v) {
|
||||
const variable = v as WorkflowVariable
|
||||
byName[String(variable.name)] = variable
|
||||
}
|
||||
})
|
||||
|
||||
for (const op of operations) {
|
||||
const key = String(op?.name || '')
|
||||
if (!key) continue
|
||||
const nextType = op?.type || byName[key]?.type || 'plain'
|
||||
const coerceValue = (value: unknown, type: string): unknown => {
|
||||
if (value === undefined) return value
|
||||
if (type === 'number') {
|
||||
const n = Number(value)
|
||||
return Number.isNaN(n) ? value : n
|
||||
}
|
||||
if (type === 'boolean') {
|
||||
const v = String(value).trim().toLowerCase()
|
||||
if (v === 'true') return true
|
||||
if (v === 'false') return false
|
||||
return value
|
||||
}
|
||||
if (type === 'array' || type === 'object') {
|
||||
try {
|
||||
const parsed = JSON.parse(String(value))
|
||||
if (type === 'array' && Array.isArray(parsed)) return parsed
|
||||
if (type === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed))
|
||||
return parsed
|
||||
} catch (error) {
|
||||
logger.warn('Failed to parse JSON value for variable coercion', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
return value
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
if (op.operation === 'delete') {
|
||||
delete byName[key]
|
||||
continue
|
||||
}
|
||||
const typedValue = coerceValue(op.value, nextType)
|
||||
if (op.operation === 'add') {
|
||||
byName[key] = {
|
||||
id: crypto.randomUUID(),
|
||||
workflowId,
|
||||
name: key,
|
||||
type: nextType,
|
||||
value: typedValue,
|
||||
}
|
||||
continue
|
||||
}
|
||||
if (op.operation === 'edit') {
|
||||
if (!byName[key]) {
|
||||
byName[key] = {
|
||||
id: crypto.randomUUID(),
|
||||
workflowId,
|
||||
name: key,
|
||||
type: nextType,
|
||||
value: typedValue,
|
||||
}
|
||||
} else {
|
||||
byName[key] = {
|
||||
...byName[key],
|
||||
type: nextType,
|
||||
value: typedValue,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const nextVarsRecord = Object.fromEntries(Object.values(byName).map((v) => [String(v.id), v]))
|
||||
|
||||
await db
|
||||
.update(workflow)
|
||||
.set({ variables: nextVarsRecord, updatedAt: new Date() })
|
||||
.where(eq(workflow.id, workflowId))
|
||||
|
||||
return { success: true, output: { updated: Object.values(byName).length } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeRenameWorkflow(
|
||||
params: RenameWorkflowParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
const name = typeof params.name === 'string' ? params.name.trim() : ''
|
||||
if (!name) {
|
||||
return { success: false, error: 'name is required' }
|
||||
}
|
||||
if (name.length > 200) {
|
||||
return { success: false, error: 'Workflow name must be 200 characters or less' }
|
||||
}
|
||||
|
||||
await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
await db
|
||||
.update(workflow)
|
||||
.set({ name, updatedAt: new Date() })
|
||||
.where(eq(workflow.id, workflowId))
|
||||
|
||||
return { success: true, output: { workflowId, name } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeMoveWorkflow(
|
||||
params: MoveWorkflowParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
|
||||
await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
const folderId = params.folderId || null
|
||||
|
||||
await db
|
||||
.update(workflow)
|
||||
.set({ folderId, updatedAt: new Date() })
|
||||
.where(eq(workflow.id, workflowId))
|
||||
|
||||
return { success: true, output: { workflowId, folderId } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeMoveFolder(
|
||||
params: MoveFolderParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const folderId = params.folderId
|
||||
if (!folderId) {
|
||||
return { success: false, error: 'folderId is required' }
|
||||
}
|
||||
|
||||
const parentId = params.parentId || null
|
||||
|
||||
if (parentId === folderId) {
|
||||
return { success: false, error: 'A folder cannot be moved into itself' }
|
||||
}
|
||||
|
||||
await db
|
||||
.update(workflowFolder)
|
||||
.set({ parentId, updatedAt: new Date() })
|
||||
.where(eq(workflowFolder.id, folderId))
|
||||
|
||||
return { success: true, output: { folderId, parentId } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeRunWorkflowUntilBlock(
|
||||
params: RunWorkflowUntilBlockParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
if (!params.stopAfterBlockId) {
|
||||
return { success: false, error: 'stopAfterBlockId is required' }
|
||||
}
|
||||
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
const useDraftState = !params.useDeployedState
|
||||
|
||||
const result = await executeWorkflow(
|
||||
{
|
||||
id: workflowRecord.id,
|
||||
userId: workflowRecord.userId,
|
||||
workspaceId: workflowRecord.workspaceId,
|
||||
variables: workflowRecord.variables || {},
|
||||
},
|
||||
generateRequestId(),
|
||||
params.workflow_input || params.input || undefined,
|
||||
context.userId,
|
||||
{ enabled: true, useDraftState, stopAfterBlockId: params.stopAfterBlockId }
|
||||
)
|
||||
|
||||
return {
|
||||
success: result.success,
|
||||
output: {
|
||||
executionId: result.metadata?.executionId,
|
||||
success: result.success,
|
||||
stoppedAfterBlockId: params.stopAfterBlockId,
|
||||
output: result.output,
|
||||
logs: result.logs,
|
||||
},
|
||||
error: result.success ? undefined : result.error || 'Workflow execution failed',
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeGenerateApiKey(
|
||||
params: GenerateApiKeyParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const name = typeof params.name === 'string' ? params.name.trim() : ''
|
||||
if (!name) {
|
||||
return { success: false, error: 'name is required' }
|
||||
}
|
||||
if (name.length > 200) {
|
||||
return { success: false, error: 'API key name must be 200 characters or less' }
|
||||
}
|
||||
|
||||
const workspaceId = params.workspaceId || (await getDefaultWorkspaceId(context.userId))
|
||||
await ensureWorkspaceAccess(workspaceId, context.userId, true)
|
||||
|
||||
const existingKey = await db
|
||||
.select({ id: apiKey.id })
|
||||
.from(apiKey)
|
||||
.where(
|
||||
and(
|
||||
eq(apiKey.workspaceId, workspaceId),
|
||||
eq(apiKey.name, name),
|
||||
eq(apiKey.type, 'workspace')
|
||||
)
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
if (existingKey.length > 0) {
|
||||
return {
|
||||
success: false,
|
||||
error: `A workspace API key named "${name}" already exists. Choose a different name.`,
|
||||
}
|
||||
}
|
||||
|
||||
const { key: plainKey, encryptedKey } = await createApiKey(true)
|
||||
if (!encryptedKey) {
|
||||
return { success: false, error: 'Failed to encrypt API key for storage' }
|
||||
}
|
||||
|
||||
const [newKey] = await db
|
||||
.insert(apiKey)
|
||||
.values({
|
||||
id: nanoid(),
|
||||
workspaceId,
|
||||
userId: context.userId,
|
||||
createdBy: context.userId,
|
||||
name,
|
||||
key: encryptedKey,
|
||||
type: 'workspace',
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.returning({ id: apiKey.id, name: apiKey.name, createdAt: apiKey.createdAt })
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
id: newKey.id,
|
||||
name: newKey.name,
|
||||
key: plainKey,
|
||||
workspaceId,
|
||||
message:
|
||||
'API key created successfully. Copy this key now — it will not be shown again. Use this key in the x-api-key header when calling workflow API endpoints.',
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeRunFromBlock(
|
||||
params: RunFromBlockParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
if (!params.startBlockId) {
|
||||
return { success: false, error: 'startBlockId is required' }
|
||||
}
|
||||
|
||||
const snapshot = params.executionId
|
||||
? await getExecutionState(params.executionId)
|
||||
: await getLatestExecutionState(workflowId)
|
||||
|
||||
if (!snapshot) {
|
||||
return {
|
||||
success: false,
|
||||
error: params.executionId
|
||||
? `No execution state found for execution ${params.executionId}. Run the full workflow first.`
|
||||
: `No execution state found for workflow ${workflowId}. Run the full workflow first to create a snapshot.`,
|
||||
}
|
||||
}
|
||||
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
const useDraftState = !params.useDeployedState
|
||||
|
||||
const result = await executeWorkflow(
|
||||
{
|
||||
id: workflowRecord.id,
|
||||
userId: workflowRecord.userId,
|
||||
workspaceId: workflowRecord.workspaceId,
|
||||
variables: workflowRecord.variables || {},
|
||||
},
|
||||
generateRequestId(),
|
||||
params.workflow_input || params.input || undefined,
|
||||
context.userId,
|
||||
{
|
||||
enabled: true,
|
||||
useDraftState,
|
||||
runFromBlock: { startBlockId: params.startBlockId, sourceSnapshot: snapshot },
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
success: result.success,
|
||||
output: {
|
||||
executionId: result.metadata?.executionId,
|
||||
success: result.success,
|
||||
startBlockId: params.startBlockId,
|
||||
output: result.output,
|
||||
logs: result.logs,
|
||||
},
|
||||
error: result.success ? undefined : result.error || 'Workflow execution failed',
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeRunBlock(
|
||||
params: RunBlockParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
if (!params.blockId) {
|
||||
return { success: false, error: 'blockId is required' }
|
||||
}
|
||||
|
||||
const snapshot = params.executionId
|
||||
? await getExecutionState(params.executionId)
|
||||
: await getLatestExecutionState(workflowId)
|
||||
|
||||
if (!snapshot) {
|
||||
return {
|
||||
success: false,
|
||||
error: params.executionId
|
||||
? `No execution state found for execution ${params.executionId}. Run the full workflow first.`
|
||||
: `No execution state found for workflow ${workflowId}. Run the full workflow first to create a snapshot.`,
|
||||
}
|
||||
}
|
||||
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
const useDraftState = !params.useDeployedState
|
||||
|
||||
const result = await executeWorkflow(
|
||||
{
|
||||
id: workflowRecord.id,
|
||||
userId: workflowRecord.userId,
|
||||
workspaceId: workflowRecord.workspaceId,
|
||||
variables: workflowRecord.variables || {},
|
||||
},
|
||||
generateRequestId(),
|
||||
params.workflow_input || params.input || undefined,
|
||||
context.userId,
|
||||
{
|
||||
enabled: true,
|
||||
useDraftState,
|
||||
runFromBlock: { startBlockId: params.blockId, sourceSnapshot: snapshot },
|
||||
stopAfterBlockId: params.blockId,
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
success: result.success,
|
||||
output: {
|
||||
executionId: result.metadata?.executionId,
|
||||
success: result.success,
|
||||
blockId: params.blockId,
|
||||
output: result.output,
|
||||
logs: result.logs,
|
||||
},
|
||||
error: result.success ? undefined : result.error || 'Workflow execution failed',
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,615 @@
|
||||
import { db } from '@sim/db'
|
||||
import { customTools, permissions, workflow, workflowFolder, workspace } from '@sim/db/schema'
|
||||
import { and, asc, desc, eq, isNull, or } from 'drizzle-orm'
|
||||
import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types'
|
||||
import {
|
||||
formatNormalizedWorkflowForCopilot,
|
||||
normalizeWorkflowName,
|
||||
} from '@/lib/copilot/tools/shared/workflow-utils'
|
||||
import { mcpService } from '@/lib/mcp/service'
|
||||
import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace'
|
||||
import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs'
|
||||
import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator'
|
||||
import {
|
||||
loadDeployedWorkflowState,
|
||||
loadWorkflowFromNormalizedTables,
|
||||
} from '@/lib/workflows/persistence/utils'
|
||||
import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers'
|
||||
import { normalizeName } from '@/executor/constants'
|
||||
import type { Loop, Parallel } from '@/stores/workflows/workflow/types'
|
||||
import {
|
||||
ensureWorkflowAccess,
|
||||
ensureWorkspaceAccess,
|
||||
getAccessibleWorkflowsForUser,
|
||||
getDefaultWorkspaceId,
|
||||
} from '../access'
|
||||
import type {
|
||||
GetBlockOutputsParams,
|
||||
GetBlockUpstreamReferencesParams,
|
||||
GetDeployedWorkflowStateParams,
|
||||
GetUserWorkflowParams,
|
||||
GetWorkflowDataParams,
|
||||
GetWorkflowFromNameParams,
|
||||
ListFoldersParams,
|
||||
ListUserWorkflowsParams,
|
||||
} from '../param-types'
|
||||
|
||||
export async function executeGetUserWorkflow(
|
||||
params: GetUserWorkflowParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
|
||||
const { workflow: workflowRecord, workspaceId } = await ensureWorkflowAccess(
|
||||
workflowId,
|
||||
context.userId
|
||||
)
|
||||
|
||||
const normalized = await loadWorkflowFromNormalizedTables(workflowId)
|
||||
const userWorkflow = formatNormalizedWorkflowForCopilot(normalized)
|
||||
if (!userWorkflow) {
|
||||
return { success: false, error: 'Workflow has no normalized data' }
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
workflowId,
|
||||
workflowName: workflowRecord.name || '',
|
||||
workspaceId,
|
||||
userWorkflow,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeGetWorkflowFromName(
|
||||
params: GetWorkflowFromNameParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowName = typeof params.workflow_name === 'string' ? params.workflow_name.trim() : ''
|
||||
if (!workflowName) {
|
||||
return { success: false, error: 'workflow_name is required' }
|
||||
}
|
||||
|
||||
const workflows = await getAccessibleWorkflowsForUser(context.userId)
|
||||
|
||||
const targetName = normalizeWorkflowName(workflowName)
|
||||
const match = workflows.find((w) => normalizeWorkflowName(w.name) === targetName)
|
||||
if (!match) {
|
||||
return { success: false, error: `Workflow not found: ${workflowName}` }
|
||||
}
|
||||
|
||||
const normalized = await loadWorkflowFromNormalizedTables(match.id)
|
||||
const userWorkflow = formatNormalizedWorkflowForCopilot(normalized)
|
||||
if (!userWorkflow) {
|
||||
return { success: false, error: 'Workflow has no normalized data' }
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
workflowId: match.id,
|
||||
workflowName: match.name || '',
|
||||
workspaceId: match.workspaceId,
|
||||
userWorkflow,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeListUserWorkflows(
|
||||
params: ListUserWorkflowsParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workspaceId = params?.workspaceId as string | undefined
|
||||
const folderId = params?.folderId as string | undefined
|
||||
|
||||
const workflows = await getAccessibleWorkflowsForUser(context.userId, { workspaceId, folderId })
|
||||
|
||||
const workflowList = workflows.map((w) => ({
|
||||
workflowId: w.id,
|
||||
workflowName: w.name || '',
|
||||
workspaceId: w.workspaceId,
|
||||
folderId: w.folderId,
|
||||
}))
|
||||
|
||||
return { success: true, output: { workflows: workflowList } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeListUserWorkspaces(
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workspaces = await db
|
||||
.select({
|
||||
workspaceId: workspace.id,
|
||||
workspaceName: workspace.name,
|
||||
ownerId: workspace.ownerId,
|
||||
permissionType: permissions.permissionType,
|
||||
})
|
||||
.from(permissions)
|
||||
.innerJoin(workspace, eq(permissions.entityId, workspace.id))
|
||||
.where(and(eq(permissions.userId, context.userId), eq(permissions.entityType, 'workspace')))
|
||||
.orderBy(desc(workspace.createdAt))
|
||||
|
||||
const output = workspaces.map((row) => ({
|
||||
workspaceId: row.workspaceId,
|
||||
workspaceName: row.workspaceName,
|
||||
role: row.ownerId === context.userId ? 'owner' : row.permissionType,
|
||||
}))
|
||||
|
||||
return { success: true, output: { workspaces: output } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeListFolders(
|
||||
params: ListFoldersParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workspaceId =
|
||||
(params?.workspaceId as string | undefined) || (await getDefaultWorkspaceId(context.userId))
|
||||
|
||||
await ensureWorkspaceAccess(workspaceId, context.userId, false)
|
||||
|
||||
const folders = await db
|
||||
.select({
|
||||
folderId: workflowFolder.id,
|
||||
folderName: workflowFolder.name,
|
||||
parentId: workflowFolder.parentId,
|
||||
sortOrder: workflowFolder.sortOrder,
|
||||
})
|
||||
.from(workflowFolder)
|
||||
.where(eq(workflowFolder.workspaceId, workspaceId))
|
||||
.orderBy(asc(workflowFolder.sortOrder), asc(workflowFolder.createdAt))
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
workspaceId,
|
||||
folders,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeGetWorkflowData(
|
||||
params: GetWorkflowDataParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
const dataType = params.data_type || params.dataType || ''
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
if (!dataType) {
|
||||
return { success: false, error: 'data_type is required' }
|
||||
}
|
||||
|
||||
const { workflow: workflowRecord, workspaceId } = await ensureWorkflowAccess(
|
||||
workflowId,
|
||||
context.userId
|
||||
)
|
||||
|
||||
if (dataType === 'global_variables') {
|
||||
const variablesRecord = (workflowRecord.variables as Record<string, unknown>) || {}
|
||||
const variables = Object.values(variablesRecord).map((v) => {
|
||||
const variable = v as Record<string, unknown> | null
|
||||
return {
|
||||
id: String(variable?.id || ''),
|
||||
name: String(variable?.name || ''),
|
||||
value: variable?.value,
|
||||
}
|
||||
})
|
||||
return { success: true, output: { variables } }
|
||||
}
|
||||
|
||||
if (dataType === 'custom_tools') {
|
||||
if (!workspaceId) {
|
||||
return { success: false, error: 'workspaceId is required' }
|
||||
}
|
||||
const conditions = [
|
||||
eq(customTools.workspaceId, workspaceId),
|
||||
and(eq(customTools.userId, context.userId), isNull(customTools.workspaceId)),
|
||||
]
|
||||
const toolsRows = await db
|
||||
.select()
|
||||
.from(customTools)
|
||||
.where(or(...conditions))
|
||||
.orderBy(desc(customTools.createdAt))
|
||||
|
||||
const customToolsData = toolsRows.map((tool) => {
|
||||
const schema = tool.schema as Record<string, unknown> | null
|
||||
const fn = (schema?.function ?? {}) as Record<string, unknown>
|
||||
return {
|
||||
id: String(tool.id || ''),
|
||||
title: String(tool.title || ''),
|
||||
functionName: String(fn.name || ''),
|
||||
description: String(fn.description || ''),
|
||||
parameters: fn.parameters,
|
||||
}
|
||||
})
|
||||
|
||||
return { success: true, output: { customTools: customToolsData } }
|
||||
}
|
||||
|
||||
if (dataType === 'mcp_tools') {
|
||||
if (!workspaceId) {
|
||||
return { success: false, error: 'workspaceId is required' }
|
||||
}
|
||||
const tools = await mcpService.discoverTools(context.userId, workspaceId, false)
|
||||
const mcpTools = tools.map((tool) => ({
|
||||
name: String(tool.name || ''),
|
||||
serverId: String(tool.serverId || ''),
|
||||
serverName: String(tool.serverName || ''),
|
||||
description: String(tool.description || ''),
|
||||
inputSchema: tool.inputSchema,
|
||||
}))
|
||||
return { success: true, output: { mcpTools } }
|
||||
}
|
||||
|
||||
if (dataType === 'files') {
|
||||
if (!workspaceId) {
|
||||
return { success: false, error: 'workspaceId is required' }
|
||||
}
|
||||
const files = await listWorkspaceFiles(workspaceId)
|
||||
const fileResults = files.map((file) => ({
|
||||
id: String(file.id || ''),
|
||||
name: String(file.name || ''),
|
||||
key: String(file.key || ''),
|
||||
path: String(file.path || ''),
|
||||
size: Number(file.size || 0),
|
||||
type: String(file.type || ''),
|
||||
uploadedAt: String(file.uploadedAt || ''),
|
||||
}))
|
||||
return { success: true, output: { files: fileResults } }
|
||||
}
|
||||
|
||||
return { success: false, error: `Unknown data_type: ${dataType}` }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeGetBlockOutputs(
|
||||
params: GetBlockOutputsParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
const normalized = await loadWorkflowFromNormalizedTables(workflowId)
|
||||
if (!normalized) {
|
||||
return { success: false, error: 'Workflow has no normalized data' }
|
||||
}
|
||||
|
||||
const blocks = normalized.blocks || {}
|
||||
const loops = normalized.loops || {}
|
||||
const parallels = normalized.parallels || {}
|
||||
const blockIds =
|
||||
Array.isArray(params.blockIds) && params.blockIds.length > 0
|
||||
? params.blockIds
|
||||
: Object.keys(blocks)
|
||||
|
||||
const results: Array<{
|
||||
blockId: string
|
||||
blockName: string
|
||||
blockType: string
|
||||
outputs: string[]
|
||||
insideSubflowOutputs?: string[]
|
||||
outsideSubflowOutputs?: string[]
|
||||
triggerMode?: boolean
|
||||
}> = []
|
||||
|
||||
for (const blockId of blockIds) {
|
||||
const block = blocks[blockId]
|
||||
if (!block?.type) continue
|
||||
const blockName = block.name || block.type
|
||||
|
||||
if (block.type === 'loop' || block.type === 'parallel') {
|
||||
const insidePaths = getSubflowInsidePaths(block.type, blockId, loops, parallels)
|
||||
results.push({
|
||||
blockId,
|
||||
blockName,
|
||||
blockType: block.type,
|
||||
outputs: [],
|
||||
insideSubflowOutputs: formatOutputsWithPrefix(insidePaths, blockName),
|
||||
outsideSubflowOutputs: formatOutputsWithPrefix(['results'], blockName),
|
||||
triggerMode: block.triggerMode,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
const outputs = getBlockOutputPaths(block.type, block.subBlocks, block.triggerMode)
|
||||
results.push({
|
||||
blockId,
|
||||
blockName,
|
||||
blockType: block.type,
|
||||
outputs: formatOutputsWithPrefix(outputs, blockName),
|
||||
triggerMode: block.triggerMode,
|
||||
})
|
||||
}
|
||||
|
||||
const variables = await getWorkflowVariablesForTool(workflowId)
|
||||
|
||||
const payload = { blocks: results, variables }
|
||||
return { success: true, output: payload }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeGetBlockUpstreamReferences(
|
||||
params: GetBlockUpstreamReferencesParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
if (!Array.isArray(params.blockIds) || params.blockIds.length === 0) {
|
||||
return { success: false, error: 'blockIds array is required' }
|
||||
}
|
||||
await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
const normalized = await loadWorkflowFromNormalizedTables(workflowId)
|
||||
if (!normalized) {
|
||||
return { success: false, error: 'Workflow has no normalized data' }
|
||||
}
|
||||
|
||||
const blocks = normalized.blocks || {}
|
||||
const edges = normalized.edges || []
|
||||
const loops = normalized.loops || {}
|
||||
const parallels = normalized.parallels || {}
|
||||
|
||||
const graphEdges = edges.map((edge) => ({ source: edge.source, target: edge.target }))
|
||||
const variableOutputs = await getWorkflowVariablesForTool(workflowId)
|
||||
|
||||
interface AccessibleBlockEntry {
|
||||
blockId: string
|
||||
blockName: string
|
||||
blockType: string
|
||||
outputs: string[]
|
||||
triggerMode?: boolean
|
||||
accessContext?: 'inside' | 'outside'
|
||||
}
|
||||
|
||||
interface UpstreamReferenceResult {
|
||||
blockId: string
|
||||
blockName: string
|
||||
blockType: string
|
||||
accessibleBlocks: AccessibleBlockEntry[]
|
||||
insideSubflows: Array<{ blockId: string; blockName: string; blockType: string }>
|
||||
variables: Array<{ id: string; name: string; type: string; tag: string }>
|
||||
}
|
||||
|
||||
const results: UpstreamReferenceResult[] = []
|
||||
|
||||
for (const blockId of params.blockIds) {
|
||||
const targetBlock = blocks[blockId]
|
||||
if (!targetBlock) continue
|
||||
|
||||
const insideSubflows: Array<{ blockId: string; blockName: string; blockType: string }> = []
|
||||
const containingLoopIds = new Set<string>()
|
||||
const containingParallelIds = new Set<string>()
|
||||
|
||||
Object.values(loops).forEach((loop) => {
|
||||
if (loop?.nodes?.includes(blockId)) {
|
||||
containingLoopIds.add(loop.id)
|
||||
const loopBlock = blocks[loop.id]
|
||||
if (loopBlock) {
|
||||
insideSubflows.push({
|
||||
blockId: loop.id,
|
||||
blockName: loopBlock.name || loopBlock.type,
|
||||
blockType: 'loop',
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
Object.values(parallels).forEach((parallel) => {
|
||||
if (parallel?.nodes?.includes(blockId)) {
|
||||
containingParallelIds.add(parallel.id)
|
||||
const parallelBlock = blocks[parallel.id]
|
||||
if (parallelBlock) {
|
||||
insideSubflows.push({
|
||||
blockId: parallel.id,
|
||||
blockName: parallelBlock.name || parallelBlock.type,
|
||||
blockType: 'parallel',
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const ancestorIds = BlockPathCalculator.findAllPathNodes(graphEdges, blockId)
|
||||
const accessibleIds = new Set<string>(ancestorIds)
|
||||
accessibleIds.add(blockId)
|
||||
|
||||
const starterBlock = Object.values(blocks).find((b) => isInputDefinitionTrigger(b.type))
|
||||
if (starterBlock && ancestorIds.includes(starterBlock.id)) {
|
||||
accessibleIds.add(starterBlock.id)
|
||||
}
|
||||
|
||||
containingLoopIds.forEach((loopId) => {
|
||||
accessibleIds.add(loopId)
|
||||
loops[loopId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId))
|
||||
})
|
||||
|
||||
containingParallelIds.forEach((parallelId) => {
|
||||
accessibleIds.add(parallelId)
|
||||
parallels[parallelId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId))
|
||||
})
|
||||
|
||||
const accessibleBlocks: AccessibleBlockEntry[] = []
|
||||
|
||||
for (const accessibleBlockId of accessibleIds) {
|
||||
const block = blocks[accessibleBlockId]
|
||||
if (!block?.type) continue
|
||||
const canSelfReference = block.type === 'approval' || block.type === 'human_in_the_loop'
|
||||
if (accessibleBlockId === blockId && !canSelfReference) continue
|
||||
|
||||
const blockName = block.name || block.type
|
||||
let accessContext: 'inside' | 'outside' | undefined
|
||||
let outputPaths: string[]
|
||||
|
||||
if (block.type === 'loop' || block.type === 'parallel') {
|
||||
const isInside =
|
||||
(block.type === 'loop' && containingLoopIds.has(accessibleBlockId)) ||
|
||||
(block.type === 'parallel' && containingParallelIds.has(accessibleBlockId))
|
||||
accessContext = isInside ? 'inside' : 'outside'
|
||||
outputPaths = isInside
|
||||
? getSubflowInsidePaths(block.type, accessibleBlockId, loops, parallels)
|
||||
: ['results']
|
||||
} else {
|
||||
outputPaths = getBlockOutputPaths(block.type, block.subBlocks, block.triggerMode)
|
||||
}
|
||||
|
||||
const formattedOutputs = formatOutputsWithPrefix(outputPaths, blockName)
|
||||
const entry: AccessibleBlockEntry = {
|
||||
blockId: accessibleBlockId,
|
||||
blockName,
|
||||
blockType: block.type,
|
||||
outputs: formattedOutputs,
|
||||
...(block.triggerMode ? { triggerMode: true } : {}),
|
||||
...(accessContext ? { accessContext } : {}),
|
||||
}
|
||||
accessibleBlocks.push(entry)
|
||||
}
|
||||
|
||||
results.push({
|
||||
blockId,
|
||||
blockName: targetBlock.name || targetBlock.type,
|
||||
blockType: targetBlock.type,
|
||||
accessibleBlocks,
|
||||
insideSubflows,
|
||||
variables: variableOutputs,
|
||||
})
|
||||
}
|
||||
|
||||
const payload = { results }
|
||||
return { success: true, output: payload }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
async function getWorkflowVariablesForTool(
|
||||
workflowId: string
|
||||
): Promise<Array<{ id: string; name: string; type: string; tag: string }>> {
|
||||
const [workflowRecord] = await db
|
||||
.select({ variables: workflow.variables })
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
|
||||
const variablesRecord = (workflowRecord?.variables as Record<string, unknown>) || {}
|
||||
return Object.values(variablesRecord)
|
||||
.filter((v): v is Record<string, unknown> => {
|
||||
if (!v || typeof v !== 'object') return false
|
||||
const variable = v as Record<string, unknown>
|
||||
return !!variable.name && String(variable.name).trim() !== ''
|
||||
})
|
||||
.map((v) => ({
|
||||
id: String(v.id || ''),
|
||||
name: String(v.name || ''),
|
||||
type: String(v.type || 'plain'),
|
||||
tag: `variable.${normalizeName(String(v.name || ''))}`,
|
||||
}))
|
||||
}
|
||||
|
||||
function getSubflowInsidePaths(
|
||||
blockType: 'loop' | 'parallel',
|
||||
blockId: string,
|
||||
loops: Record<string, Loop>,
|
||||
parallels: Record<string, Parallel>
|
||||
): string[] {
|
||||
const paths = ['index']
|
||||
if (blockType === 'loop') {
|
||||
const loopType = loops[blockId]?.loopType || 'for'
|
||||
if (loopType === 'forEach') {
|
||||
paths.push('currentItem', 'items')
|
||||
}
|
||||
} else {
|
||||
const parallelType = parallels[blockId]?.parallelType || 'count'
|
||||
if (parallelType === 'collection') {
|
||||
paths.push('currentItem', 'items')
|
||||
}
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
function formatOutputsWithPrefix(paths: string[], blockName: string): string[] {
|
||||
const normalizedName = normalizeName(blockName)
|
||||
return paths.map((path) => `${normalizedName}.${path}`)
|
||||
}
|
||||
|
||||
export async function executeGetDeployedWorkflowState(
|
||||
params: GetDeployedWorkflowStateParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
try {
|
||||
const deployedState = await loadDeployedWorkflowState(workflowId)
|
||||
const formatted = formatNormalizedWorkflowForCopilot({
|
||||
blocks: deployedState.blocks,
|
||||
edges: deployedState.edges,
|
||||
loops: deployedState.loops as Record<string, Loop>,
|
||||
parallels: deployedState.parallels as Record<string, Parallel>,
|
||||
})
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
workflowId,
|
||||
workflowName: workflowRecord.name || '',
|
||||
isDeployed: true,
|
||||
deploymentVersionId: deployedState.deploymentVersionId,
|
||||
deployedState: formatted,
|
||||
},
|
||||
}
|
||||
} catch {
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
workflowId,
|
||||
workflowName: workflowRecord.name || '',
|
||||
isDeployed: false,
|
||||
message: 'Workflow has not been deployed yet.',
|
||||
},
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
150
apps/sim/lib/copilot/orchestrator/types.ts
Normal file
150
apps/sim/lib/copilot/orchestrator/types.ts
Normal file
@@ -0,0 +1,150 @@
|
||||
import type { CopilotProviderConfig } from '@/lib/copilot/types'
|
||||
|
||||
export type SSEEventType =
|
||||
| 'chat_id'
|
||||
| 'title_updated'
|
||||
| 'content'
|
||||
| 'reasoning'
|
||||
| 'tool_call'
|
||||
| 'tool_generating'
|
||||
| 'tool_result'
|
||||
| 'tool_error'
|
||||
| 'subagent_start'
|
||||
| 'subagent_end'
|
||||
| 'structured_result'
|
||||
| 'subagent_result'
|
||||
| 'done'
|
||||
| 'error'
|
||||
| 'start'
|
||||
|
||||
export interface SSEEvent {
|
||||
type: SSEEventType
|
||||
data?: Record<string, unknown>
|
||||
subagent?: string
|
||||
toolCallId?: string
|
||||
toolName?: string
|
||||
success?: boolean
|
||||
result?: unknown
|
||||
/** Set on chat_id events */
|
||||
chatId?: string
|
||||
/** Set on title_updated events */
|
||||
title?: string
|
||||
/** Set on error events */
|
||||
error?: string
|
||||
/** Set on content/reasoning events */
|
||||
content?: string
|
||||
/** Set on reasoning events */
|
||||
phase?: string
|
||||
/** Set on tool_result events */
|
||||
failedDependency?: boolean
|
||||
}
|
||||
|
||||
export type ToolCallStatus = 'pending' | 'executing' | 'success' | 'error' | 'skipped' | 'rejected'
|
||||
|
||||
export interface ToolCallState {
|
||||
id: string
|
||||
name: string
|
||||
status: ToolCallStatus
|
||||
params?: Record<string, unknown>
|
||||
result?: ToolCallResult
|
||||
error?: string
|
||||
startTime?: number
|
||||
endTime?: number
|
||||
}
|
||||
|
||||
export interface ToolCallResult<T = unknown> {
|
||||
success: boolean
|
||||
output?: T
|
||||
error?: string
|
||||
}
|
||||
|
||||
export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'subagent_text'
|
||||
|
||||
export interface ContentBlock {
|
||||
type: ContentBlockType
|
||||
content?: string
|
||||
toolCall?: ToolCallState
|
||||
timestamp: number
|
||||
}
|
||||
|
||||
export interface StreamingContext {
|
||||
chatId?: string
|
||||
conversationId?: string
|
||||
messageId: string
|
||||
accumulatedContent: string
|
||||
contentBlocks: ContentBlock[]
|
||||
toolCalls: Map<string, ToolCallState>
|
||||
currentThinkingBlock: ContentBlock | null
|
||||
isInThinkingBlock: boolean
|
||||
subAgentParentToolCallId?: string
|
||||
subAgentContent: Record<string, string>
|
||||
subAgentToolCalls: Record<string, ToolCallState[]>
|
||||
pendingContent: string
|
||||
streamComplete: boolean
|
||||
wasAborted: boolean
|
||||
errors: string[]
|
||||
}
|
||||
|
||||
export interface FileAttachment {
|
||||
id: string
|
||||
key: string
|
||||
name: string
|
||||
mimeType: string
|
||||
size: number
|
||||
}
|
||||
|
||||
export interface OrchestratorRequest {
|
||||
message: string
|
||||
workflowId: string
|
||||
userId: string
|
||||
chatId?: string
|
||||
mode?: 'agent' | 'ask' | 'plan'
|
||||
model?: string
|
||||
conversationId?: string
|
||||
contexts?: Array<{ type: string; content: string }>
|
||||
fileAttachments?: FileAttachment[]
|
||||
commands?: string[]
|
||||
provider?: CopilotProviderConfig
|
||||
streamToolCalls?: boolean
|
||||
version?: string
|
||||
prefetch?: boolean
|
||||
userName?: string
|
||||
}
|
||||
|
||||
export interface OrchestratorOptions {
|
||||
autoExecuteTools?: boolean
|
||||
timeout?: number
|
||||
onEvent?: (event: SSEEvent) => void | Promise<void>
|
||||
onComplete?: (result: OrchestratorResult) => void | Promise<void>
|
||||
onError?: (error: Error) => void | Promise<void>
|
||||
abortSignal?: AbortSignal
|
||||
interactive?: boolean
|
||||
}
|
||||
|
||||
export interface OrchestratorResult {
|
||||
success: boolean
|
||||
content: string
|
||||
contentBlocks: ContentBlock[]
|
||||
toolCalls: ToolCallSummary[]
|
||||
chatId?: string
|
||||
conversationId?: string
|
||||
error?: string
|
||||
errors?: string[]
|
||||
}
|
||||
|
||||
export interface ToolCallSummary {
|
||||
id: string
|
||||
name: string
|
||||
status: ToolCallStatus
|
||||
params?: Record<string, unknown>
|
||||
result?: unknown
|
||||
error?: string
|
||||
durationMs?: number
|
||||
}
|
||||
|
||||
export interface ExecutionContext {
|
||||
userId: string
|
||||
workflowId: string
|
||||
workspaceId?: string
|
||||
decryptedEnvVars?: Record<string, string>
|
||||
}
|
||||
@@ -44,29 +44,20 @@ export async function processContexts(
|
||||
ctx.kind
|
||||
)
|
||||
}
|
||||
if (ctx.kind === 'knowledge' && (ctx as any).knowledgeId) {
|
||||
return await processKnowledgeFromDb(
|
||||
(ctx as any).knowledgeId,
|
||||
ctx.label ? `@${ctx.label}` : '@'
|
||||
)
|
||||
if (ctx.kind === 'knowledge' && ctx.knowledgeId) {
|
||||
return await processKnowledgeFromDb(ctx.knowledgeId, ctx.label ? `@${ctx.label}` : '@')
|
||||
}
|
||||
if (ctx.kind === 'blocks' && (ctx as any).blockId) {
|
||||
return await processBlockMetadata((ctx as any).blockId, ctx.label ? `@${ctx.label}` : '@')
|
||||
if (ctx.kind === 'blocks' && ctx.blockIds?.length > 0) {
|
||||
return await processBlockMetadata(ctx.blockIds[0], ctx.label ? `@${ctx.label}` : '@')
|
||||
}
|
||||
if (ctx.kind === 'templates' && (ctx as any).templateId) {
|
||||
return await processTemplateFromDb(
|
||||
(ctx as any).templateId,
|
||||
ctx.label ? `@${ctx.label}` : '@'
|
||||
)
|
||||
if (ctx.kind === 'templates' && ctx.templateId) {
|
||||
return await processTemplateFromDb(ctx.templateId, ctx.label ? `@${ctx.label}` : '@')
|
||||
}
|
||||
if (ctx.kind === 'logs' && (ctx as any).executionId) {
|
||||
return await processExecutionLogFromDb(
|
||||
(ctx as any).executionId,
|
||||
ctx.label ? `@${ctx.label}` : '@'
|
||||
)
|
||||
if (ctx.kind === 'logs' && ctx.executionId) {
|
||||
return await processExecutionLogFromDb(ctx.executionId, ctx.label ? `@${ctx.label}` : '@')
|
||||
}
|
||||
if (ctx.kind === 'workflow_block' && ctx.workflowId && (ctx as any).blockId) {
|
||||
return await processWorkflowBlockFromDb(ctx.workflowId, (ctx as any).blockId, ctx.label)
|
||||
if (ctx.kind === 'workflow_block' && ctx.workflowId && ctx.blockId) {
|
||||
return await processWorkflowBlockFromDb(ctx.workflowId, ctx.blockId, ctx.label)
|
||||
}
|
||||
// Other kinds can be added here: workflow, blocks, logs, knowledge, templates, docs
|
||||
return null
|
||||
@@ -99,33 +90,24 @@ export async function processContextsServer(
|
||||
ctx.kind
|
||||
)
|
||||
}
|
||||
if (ctx.kind === 'knowledge' && (ctx as any).knowledgeId) {
|
||||
return await processKnowledgeFromDb(
|
||||
(ctx as any).knowledgeId,
|
||||
ctx.label ? `@${ctx.label}` : '@'
|
||||
)
|
||||
if (ctx.kind === 'knowledge' && ctx.knowledgeId) {
|
||||
return await processKnowledgeFromDb(ctx.knowledgeId, ctx.label ? `@${ctx.label}` : '@')
|
||||
}
|
||||
if (ctx.kind === 'blocks' && (ctx as any).blockId) {
|
||||
if (ctx.kind === 'blocks' && ctx.blockIds?.length > 0) {
|
||||
return await processBlockMetadata(
|
||||
(ctx as any).blockId,
|
||||
ctx.blockIds[0],
|
||||
ctx.label ? `@${ctx.label}` : '@',
|
||||
userId
|
||||
)
|
||||
}
|
||||
if (ctx.kind === 'templates' && (ctx as any).templateId) {
|
||||
return await processTemplateFromDb(
|
||||
(ctx as any).templateId,
|
||||
ctx.label ? `@${ctx.label}` : '@'
|
||||
)
|
||||
if (ctx.kind === 'templates' && ctx.templateId) {
|
||||
return await processTemplateFromDb(ctx.templateId, ctx.label ? `@${ctx.label}` : '@')
|
||||
}
|
||||
if (ctx.kind === 'logs' && (ctx as any).executionId) {
|
||||
return await processExecutionLogFromDb(
|
||||
(ctx as any).executionId,
|
||||
ctx.label ? `@${ctx.label}` : '@'
|
||||
)
|
||||
if (ctx.kind === 'logs' && ctx.executionId) {
|
||||
return await processExecutionLogFromDb(ctx.executionId, ctx.label ? `@${ctx.label}` : '@')
|
||||
}
|
||||
if (ctx.kind === 'workflow_block' && ctx.workflowId && (ctx as any).blockId) {
|
||||
return await processWorkflowBlockFromDb(ctx.workflowId, (ctx as any).blockId, ctx.label)
|
||||
if (ctx.kind === 'workflow_block' && ctx.workflowId && ctx.blockId) {
|
||||
return await processWorkflowBlockFromDb(ctx.workflowId, ctx.blockId, ctx.label)
|
||||
}
|
||||
if (ctx.kind === 'docs') {
|
||||
try {
|
||||
|
||||
201
apps/sim/lib/copilot/store-utils.ts
Normal file
201
apps/sim/lib/copilot/store-utils.ts
Normal file
@@ -0,0 +1,201 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Loader2 } from 'lucide-react'
|
||||
import {
|
||||
ClientToolCallState,
|
||||
type ClientToolDisplay,
|
||||
TOOL_DISPLAY_REGISTRY,
|
||||
} from '@/lib/copilot/tools/client/tool-display-registry'
|
||||
import type { CopilotStore } from '@/stores/panel/copilot/types'
|
||||
|
||||
const logger = createLogger('CopilotStoreUtils')
|
||||
|
||||
type StoreSet = (
|
||||
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
|
||||
) => void
|
||||
|
||||
/** Respond tools are internal to copilot subagents and should never be shown in the UI */
|
||||
const HIDDEN_TOOL_SUFFIX = '_respond'
|
||||
|
||||
export function resolveToolDisplay(
|
||||
toolName: string | undefined,
|
||||
state: ClientToolCallState,
|
||||
_toolCallId?: string,
|
||||
params?: Record<string, any>
|
||||
): ClientToolDisplay | undefined {
|
||||
if (!toolName) return undefined
|
||||
if (toolName.endsWith(HIDDEN_TOOL_SUFFIX)) return undefined
|
||||
const entry = TOOL_DISPLAY_REGISTRY[toolName]
|
||||
if (!entry) return humanizedFallback(toolName, state)
|
||||
|
||||
if (entry.uiConfig?.dynamicText && params) {
|
||||
const dynamicText = entry.uiConfig.dynamicText(params, state)
|
||||
const stateDisplay = entry.displayNames[state]
|
||||
if (dynamicText && stateDisplay?.icon) {
|
||||
return { text: dynamicText, icon: stateDisplay.icon }
|
||||
}
|
||||
}
|
||||
|
||||
const display = entry.displayNames[state]
|
||||
if (display?.text || display?.icon) return display
|
||||
|
||||
const fallbackOrder = [
|
||||
ClientToolCallState.generating,
|
||||
ClientToolCallState.executing,
|
||||
ClientToolCallState.success,
|
||||
]
|
||||
for (const fallbackState of fallbackOrder) {
|
||||
const fallback = entry.displayNames[fallbackState]
|
||||
if (fallback?.text || fallback?.icon) return fallback
|
||||
}
|
||||
|
||||
return humanizedFallback(toolName, state)
|
||||
}
|
||||
|
||||
export function humanizedFallback(
|
||||
toolName: string,
|
||||
state: ClientToolCallState
|
||||
): ClientToolDisplay | undefined {
|
||||
const formattedName = toolName.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase())
|
||||
const stateVerb =
|
||||
state === ClientToolCallState.success
|
||||
? 'Executed'
|
||||
: state === ClientToolCallState.error
|
||||
? 'Failed'
|
||||
: state === ClientToolCallState.rejected || state === ClientToolCallState.aborted
|
||||
? 'Skipped'
|
||||
: 'Executing'
|
||||
return { text: `${stateVerb} ${formattedName}`, icon: Loader2 }
|
||||
}
|
||||
|
||||
export function isRejectedState(state: string): boolean {
|
||||
return state === 'rejected'
|
||||
}
|
||||
|
||||
export function isReviewState(state: string): boolean {
|
||||
return state === 'review'
|
||||
}
|
||||
|
||||
export function isBackgroundState(state: string): boolean {
|
||||
return state === 'background'
|
||||
}
|
||||
|
||||
export function isTerminalState(state: string): boolean {
|
||||
return (
|
||||
state === ClientToolCallState.success ||
|
||||
state === ClientToolCallState.error ||
|
||||
state === ClientToolCallState.rejected ||
|
||||
state === ClientToolCallState.aborted ||
|
||||
isReviewState(state) ||
|
||||
isBackgroundState(state)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the appropriate terminal state for a non-terminal tool call.
|
||||
* 'executing' → 'success': the server was running it, assume it completed.
|
||||
* Everything else → 'aborted': never reached execution.
|
||||
*/
|
||||
function resolveAbortState(currentState: string): ClientToolCallState {
|
||||
return currentState === ClientToolCallState.executing
|
||||
? ClientToolCallState.success
|
||||
: ClientToolCallState.aborted
|
||||
}
|
||||
|
||||
export function abortAllInProgressTools(set: StoreSet, get: () => CopilotStore) {
|
||||
try {
|
||||
const { toolCallsById, messages } = get()
|
||||
const updatedMap = { ...toolCallsById }
|
||||
const resolvedIds = new Map<string, ClientToolCallState>()
|
||||
let hasUpdates = false
|
||||
for (const [id, tc] of Object.entries(toolCallsById)) {
|
||||
const st = tc.state
|
||||
const isTerminal =
|
||||
st === ClientToolCallState.success ||
|
||||
st === ClientToolCallState.error ||
|
||||
st === ClientToolCallState.rejected ||
|
||||
st === ClientToolCallState.aborted
|
||||
if (!isTerminal || isReviewState(st)) {
|
||||
const resolved = resolveAbortState(st)
|
||||
resolvedIds.set(id, resolved)
|
||||
updatedMap[id] = {
|
||||
...tc,
|
||||
state: resolved,
|
||||
subAgentStreaming: false,
|
||||
display: resolveToolDisplay(tc.name, resolved, id, tc.params),
|
||||
}
|
||||
hasUpdates = true
|
||||
} else if (tc.subAgentStreaming) {
|
||||
updatedMap[id] = {
|
||||
...tc,
|
||||
subAgentStreaming: false,
|
||||
}
|
||||
hasUpdates = true
|
||||
}
|
||||
}
|
||||
if (resolvedIds.size > 0 || hasUpdates) {
|
||||
set({ toolCallsById: updatedMap })
|
||||
set((s: CopilotStore) => {
|
||||
const msgs = [...s.messages]
|
||||
for (let mi = msgs.length - 1; mi >= 0; mi--) {
|
||||
const m = msgs[mi]
|
||||
if (m.role !== 'assistant' || !Array.isArray(m.contentBlocks)) continue
|
||||
let changed = false
|
||||
const blocks = m.contentBlocks.map((b: any) => {
|
||||
if (b?.type === 'tool_call' && b.toolCall?.id && resolvedIds.has(b.toolCall.id)) {
|
||||
changed = true
|
||||
const prev = b.toolCall
|
||||
const resolved = resolvedIds.get(b.toolCall.id)!
|
||||
return {
|
||||
...b,
|
||||
toolCall: {
|
||||
...prev,
|
||||
state: resolved,
|
||||
display: resolveToolDisplay(prev?.name, resolved, prev?.id, prev?.params),
|
||||
},
|
||||
}
|
||||
}
|
||||
return b
|
||||
})
|
||||
if (changed) {
|
||||
msgs[mi] = { ...m, contentBlocks: blocks }
|
||||
break
|
||||
}
|
||||
}
|
||||
return { messages: msgs }
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to abort in-progress tools', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export function cleanupActiveState(
|
||||
set: (partial: Record<string, unknown>) => void,
|
||||
get: () => Record<string, unknown>
|
||||
): void {
|
||||
abortAllInProgressTools(set as unknown as StoreSet, get as unknown as () => CopilotStore)
|
||||
try {
|
||||
const { useWorkflowDiffStore } = require('@/stores/workflow-diff/store') as {
|
||||
useWorkflowDiffStore: {
|
||||
getState: () => { clearDiff: (options?: { restoreBaseline?: boolean }) => void }
|
||||
}
|
||||
}
|
||||
useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false })
|
||||
} catch (error) {
|
||||
logger.warn('Failed to clear diff during cleanup', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export function stripTodoTags(text: string): string {
|
||||
if (!text) return text
|
||||
return text
|
||||
.replace(/<marktodo>[\s\S]*?<\/marktodo>/g, '')
|
||||
.replace(/<checkofftodo>[\s\S]*?<\/checkofftodo>/g, '')
|
||||
.replace(/<design_workflow>[\s\S]*?<\/design_workflow>/g, '')
|
||||
.replace(/[ \t]+\n/g, '\n')
|
||||
.replace(/\n{2,}/g, '\n')
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
/**
|
||||
* Base class for subagent tools.
|
||||
*
|
||||
* Subagent tools spawn a server-side subagent that does the actual work.
|
||||
* The tool auto-executes and the subagent's output is streamed back
|
||||
* as nested content under the tool call.
|
||||
*
|
||||
* Examples: edit, plan, debug, evaluate, research, etc.
|
||||
*/
|
||||
import type { LucideIcon } from 'lucide-react'
|
||||
import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState } from './base-tool'
|
||||
import type { SubagentConfig, ToolUIConfig } from './ui-config'
|
||||
import { registerToolUIConfig } from './ui-config'
|
||||
|
||||
/**
|
||||
* Configuration for creating a subagent tool
|
||||
*/
|
||||
export interface SubagentToolConfig {
|
||||
/** Unique tool ID */
|
||||
id: string
|
||||
/** Display names per state */
|
||||
displayNames: {
|
||||
streaming: { text: string; icon: LucideIcon }
|
||||
success: { text: string; icon: LucideIcon }
|
||||
error: { text: string; icon: LucideIcon }
|
||||
}
|
||||
/** Subagent UI configuration */
|
||||
subagent: SubagentConfig
|
||||
/**
|
||||
* Optional: Whether this is a "special" tool (gets gradient styling).
|
||||
* Default: false
|
||||
*/
|
||||
isSpecial?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Create metadata for a subagent tool from config
|
||||
*/
|
||||
function createSubagentMetadata(config: SubagentToolConfig): BaseClientToolMetadata {
|
||||
const { displayNames, subagent, isSpecial } = config
|
||||
const { streaming, success, error } = displayNames
|
||||
|
||||
const uiConfig: ToolUIConfig = {
|
||||
isSpecial: isSpecial ?? false,
|
||||
subagent,
|
||||
}
|
||||
|
||||
return {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: streaming,
|
||||
[ClientToolCallState.pending]: streaming,
|
||||
[ClientToolCallState.executing]: streaming,
|
||||
[ClientToolCallState.success]: success,
|
||||
[ClientToolCallState.error]: error,
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: `${config.id.charAt(0).toUpperCase() + config.id.slice(1)} skipped`,
|
||||
icon: error.icon,
|
||||
},
|
||||
[ClientToolCallState.aborted]: {
|
||||
text: `${config.id.charAt(0).toUpperCase() + config.id.slice(1)} aborted`,
|
||||
icon: error.icon,
|
||||
},
|
||||
},
|
||||
uiConfig,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Base class for subagent tools.
|
||||
* Extends BaseClientTool with subagent-specific behavior.
|
||||
*/
|
||||
export abstract class BaseSubagentTool extends BaseClientTool {
|
||||
/**
|
||||
* Subagent configuration.
|
||||
* Override in subclasses to customize behavior.
|
||||
*/
|
||||
static readonly subagentConfig: SubagentToolConfig
|
||||
|
||||
constructor(toolCallId: string, config: SubagentToolConfig) {
|
||||
super(toolCallId, config.id, createSubagentMetadata(config))
|
||||
// Register UI config for this tool
|
||||
registerToolUIConfig(config.id, this.metadata.uiConfig!)
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the subagent tool.
|
||||
* Immediately transitions to executing state - the actual work
|
||||
* is done server-side by the subagent.
|
||||
*/
|
||||
async execute(_args?: Record<string, any>): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
// The tool result will come from the server via tool_result event
|
||||
// when the subagent completes its work
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory function to create a subagent tool class.
|
||||
* Use this for simple subagent tools that don't need custom behavior.
|
||||
*/
|
||||
export function createSubagentToolClass(config: SubagentToolConfig) {
|
||||
// Register UI config at class creation time
|
||||
const uiConfig: ToolUIConfig = {
|
||||
isSpecial: config.isSpecial ?? false,
|
||||
subagent: config.subagent,
|
||||
}
|
||||
registerToolUIConfig(config.id, uiConfig)
|
||||
|
||||
return class extends BaseClientTool {
|
||||
static readonly id = config.id
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, config.id, createSubagentMetadata(config))
|
||||
}
|
||||
|
||||
async execute(_args?: Record<string, any>): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,5 @@
|
||||
// Lazy require in setState to avoid circular init issues
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { LucideIcon } from 'lucide-react'
|
||||
import type { ToolUIConfig } from './ui-config'
|
||||
|
||||
const baseToolLogger = createLogger('BaseClientTool')
|
||||
|
||||
const DEFAULT_TOOL_TIMEOUT_MS = 5 * 60 * 1000
|
||||
|
||||
export const WORKFLOW_EXECUTION_TIMEOUT_MS = 10 * 60 * 1000
|
||||
|
||||
// Client tool call states used by the new runtime
|
||||
export enum ClientToolCallState {
|
||||
generating = 'generating',
|
||||
pending = 'pending',
|
||||
@@ -22,252 +12,32 @@ export enum ClientToolCallState {
|
||||
background = 'background',
|
||||
}
|
||||
|
||||
// Display configuration for a given state
|
||||
export interface ClientToolDisplay {
|
||||
text: string
|
||||
icon: LucideIcon
|
||||
}
|
||||
|
||||
/**
|
||||
* Function to generate dynamic display text based on tool parameters and state
|
||||
* @param params - The tool call parameters
|
||||
* @param state - The current tool call state
|
||||
* @returns The dynamic text to display, or undefined to use the default text
|
||||
*/
|
||||
export interface BaseClientToolMetadata {
|
||||
displayNames: Partial<Record<ClientToolCallState, ClientToolDisplay>>
|
||||
uiConfig?: Record<string, unknown>
|
||||
getDynamicText?: (
|
||||
params: Record<string, unknown>,
|
||||
state: ClientToolCallState
|
||||
) => string | undefined
|
||||
}
|
||||
|
||||
export type DynamicTextFormatter = (
|
||||
params: Record<string, any>,
|
||||
params: Record<string, unknown>,
|
||||
state: ClientToolCallState
|
||||
) => string | undefined
|
||||
|
||||
export interface BaseClientToolMetadata {
|
||||
displayNames: Partial<Record<ClientToolCallState, ClientToolDisplay>>
|
||||
interrupt?: {
|
||||
accept: ClientToolDisplay
|
||||
reject: ClientToolDisplay
|
||||
}
|
||||
/**
|
||||
* Optional function to generate dynamic display text based on parameters
|
||||
* If provided, this will override the default text in displayNames
|
||||
*/
|
||||
getDynamicText?: DynamicTextFormatter
|
||||
/**
|
||||
* UI configuration for how this tool renders in the tool-call component.
|
||||
* This replaces hardcoded logic in tool-call.tsx with declarative config.
|
||||
*/
|
||||
uiConfig?: ToolUIConfig
|
||||
}
|
||||
|
||||
export class BaseClientTool {
|
||||
readonly toolCallId: string
|
||||
readonly name: string
|
||||
protected state: ClientToolCallState
|
||||
protected metadata: BaseClientToolMetadata
|
||||
protected isMarkedComplete = false
|
||||
protected timeoutMs: number = DEFAULT_TOOL_TIMEOUT_MS
|
||||
|
||||
constructor(toolCallId: string, name: string, metadata: BaseClientToolMetadata) {
|
||||
this.toolCallId = toolCallId
|
||||
this.name = name
|
||||
this.metadata = metadata
|
||||
this.state = ClientToolCallState.generating
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a custom timeout for this tool (in milliseconds)
|
||||
*/
|
||||
setTimeoutMs(ms: number): void {
|
||||
this.timeoutMs = ms
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if this tool has been marked complete
|
||||
*/
|
||||
hasBeenMarkedComplete(): boolean {
|
||||
return this.isMarkedComplete
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure the tool is marked complete. If not already marked, marks it with error.
|
||||
* This should be called in finally blocks to prevent leaked tool calls.
|
||||
*/
|
||||
async ensureMarkedComplete(
|
||||
fallbackMessage = 'Tool execution did not complete properly'
|
||||
): Promise<void> {
|
||||
if (!this.isMarkedComplete) {
|
||||
baseToolLogger.warn('Tool was not marked complete, marking with error', {
|
||||
toolCallId: this.toolCallId,
|
||||
toolName: this.name,
|
||||
state: this.state,
|
||||
})
|
||||
await this.markToolComplete(500, fallbackMessage)
|
||||
this.setState(ClientToolCallState.error)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute with timeout protection. Wraps the execution in a timeout and ensures
|
||||
* markToolComplete is always called.
|
||||
*/
|
||||
async executeWithTimeout(executeFn: () => Promise<void>, timeoutMs?: number): Promise<void> {
|
||||
const timeout = timeoutMs ?? this.timeoutMs
|
||||
let timeoutId: NodeJS.Timeout | null = null
|
||||
|
||||
try {
|
||||
await Promise.race([
|
||||
executeFn(),
|
||||
new Promise<never>((_, reject) => {
|
||||
timeoutId = setTimeout(() => {
|
||||
reject(new Error(`Tool execution timed out after ${timeout / 1000} seconds`))
|
||||
}, timeout)
|
||||
}),
|
||||
])
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
baseToolLogger.error('Tool execution failed or timed out', {
|
||||
toolCallId: this.toolCallId,
|
||||
toolName: this.name,
|
||||
error: message,
|
||||
})
|
||||
// Only mark complete if not already marked
|
||||
if (!this.isMarkedComplete) {
|
||||
await this.markToolComplete(500, message)
|
||||
this.setState(ClientToolCallState.error)
|
||||
}
|
||||
} finally {
|
||||
if (timeoutId) clearTimeout(timeoutId)
|
||||
// Ensure tool is always marked complete
|
||||
await this.ensureMarkedComplete()
|
||||
}
|
||||
}
|
||||
|
||||
// Intentionally left empty - specific tools can override
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
async execute(_args?: Record<string, any>): Promise<void> {
|
||||
return
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a tool as complete on the server (proxies to server-side route).
|
||||
* Once called, the tool is considered complete and won't be marked again.
|
||||
*/
|
||||
async markToolComplete(status: number, message?: any, data?: any): Promise<boolean> {
|
||||
// Prevent double-marking
|
||||
if (this.isMarkedComplete) {
|
||||
baseToolLogger.warn('markToolComplete called but tool already marked complete', {
|
||||
toolCallId: this.toolCallId,
|
||||
toolName: this.name,
|
||||
existingState: this.state,
|
||||
attemptedStatus: status,
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
this.isMarkedComplete = true
|
||||
|
||||
try {
|
||||
baseToolLogger.info('markToolComplete called', {
|
||||
toolCallId: this.toolCallId,
|
||||
toolName: this.name,
|
||||
state: this.state,
|
||||
status,
|
||||
hasMessage: message !== undefined,
|
||||
hasData: data !== undefined,
|
||||
})
|
||||
} catch {}
|
||||
|
||||
try {
|
||||
const res = await fetch('/api/copilot/tools/mark-complete', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
id: this.toolCallId,
|
||||
name: this.name,
|
||||
status,
|
||||
message,
|
||||
data,
|
||||
}),
|
||||
})
|
||||
|
||||
if (!res.ok) {
|
||||
// Try to surface server error
|
||||
let errorText = `Failed to mark tool complete (status ${res.status})`
|
||||
try {
|
||||
const { error } = await res.json()
|
||||
if (error) errorText = String(error)
|
||||
} catch {}
|
||||
throw new Error(errorText)
|
||||
}
|
||||
|
||||
const json = (await res.json()) as { success?: boolean }
|
||||
return json?.success === true
|
||||
} catch (e) {
|
||||
// Default failure path - but tool is still marked complete locally
|
||||
baseToolLogger.error('Failed to mark tool complete on server', {
|
||||
toolCallId: this.toolCallId,
|
||||
error: e instanceof Error ? e.message : String(e),
|
||||
})
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Accept (continue) for interrupt flows: move pending -> executing
|
||||
async handleAccept(): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
|
||||
// Reject (skip) for interrupt flows: mark complete with a standard skip message
|
||||
async handleReject(): Promise<void> {
|
||||
await this.markToolComplete(200, 'Tool execution was skipped by the user')
|
||||
this.setState(ClientToolCallState.rejected)
|
||||
}
|
||||
|
||||
// Return the display configuration for the current state
|
||||
getDisplayState(): ClientToolDisplay | undefined {
|
||||
return this.metadata.displayNames[this.state]
|
||||
}
|
||||
|
||||
// Return interrupt display config (labels/icons) if defined
|
||||
getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined {
|
||||
return this.metadata.interrupt
|
||||
}
|
||||
|
||||
// Transition to a new state (also sync to Copilot store)
|
||||
setState(next: ClientToolCallState, options?: { result?: any }): void {
|
||||
const prev = this.state
|
||||
this.state = next
|
||||
|
||||
// Notify store via manager to avoid import cycles
|
||||
try {
|
||||
const { syncToolState } = require('@/lib/copilot/tools/client/manager')
|
||||
syncToolState(this.toolCallId, next, options)
|
||||
} catch {}
|
||||
|
||||
// Log transition after syncing
|
||||
try {
|
||||
baseToolLogger.info('setState transition', {
|
||||
toolCallId: this.toolCallId,
|
||||
toolName: this.name,
|
||||
prev,
|
||||
next,
|
||||
hasResult: options?.result !== undefined,
|
||||
})
|
||||
} catch {}
|
||||
}
|
||||
|
||||
// Expose current state
|
||||
getState(): ClientToolCallState {
|
||||
return this.state
|
||||
}
|
||||
|
||||
hasInterrupt(): boolean {
|
||||
return !!this.metadata.interrupt
|
||||
}
|
||||
|
||||
/**
|
||||
* Get UI configuration for this tool.
|
||||
* Used by tool-call component to determine rendering behavior.
|
||||
*/
|
||||
getUIConfig(): ToolUIConfig | undefined {
|
||||
return this.metadata.uiConfig
|
||||
}
|
||||
export const WORKFLOW_EXECUTION_TIMEOUT_MS = 10 * 60 * 1000
|
||||
|
||||
/** Event detail for OAuth connect events dispatched by the copilot. */
|
||||
export interface OAuthConnectEventDetail {
|
||||
providerName: string
|
||||
serviceId: string
|
||||
providerId: string
|
||||
requiredScopes: string[]
|
||||
newScopes?: string[]
|
||||
}
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { FileCode, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import {
|
||||
ExecuteResponseSuccessSchema,
|
||||
GetBlockConfigInput,
|
||||
GetBlockConfigResult,
|
||||
} from '@/lib/copilot/tools/shared/schemas'
|
||||
import { getLatestBlock } from '@/blocks/registry'
|
||||
|
||||
interface GetBlockConfigArgs {
|
||||
blockType: string
|
||||
operation?: string
|
||||
trigger?: boolean
|
||||
}
|
||||
|
||||
export class GetBlockConfigClientTool extends BaseClientTool {
|
||||
static readonly id = 'get_block_config'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, GetBlockConfigClientTool.id, GetBlockConfigClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Getting block config', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Getting block config', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Getting block config', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Retrieved block config', icon: FileCode },
|
||||
[ClientToolCallState.error]: { text: 'Failed to get block config', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted getting block config', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: 'Skipped getting block config',
|
||||
icon: MinusCircle,
|
||||
},
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.blockType && typeof params.blockType === 'string') {
|
||||
const blockConfig = getLatestBlock(params.blockType)
|
||||
const blockName = (blockConfig?.name ?? params.blockType.replace(/_/g, ' ')).toLowerCase()
|
||||
const opSuffix = params.operation ? ` (${params.operation})` : ''
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Retrieved ${blockName}${opSuffix} config`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Retrieving ${blockName}${opSuffix} config`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to retrieve ${blockName}${opSuffix} config`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted retrieving ${blockName}${opSuffix} config`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped retrieving ${blockName}${opSuffix} config`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(args?: GetBlockConfigArgs): Promise<void> {
|
||||
const logger = createLogger('GetBlockConfigClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
const { blockType, operation, trigger } = GetBlockConfigInput.parse(args || {})
|
||||
|
||||
const res = await fetch('/api/copilot/execute-copilot-server-tool', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
toolName: 'get_block_config',
|
||||
payload: { blockType, operation, trigger },
|
||||
}),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const errorText = await res.text().catch(() => '')
|
||||
throw new Error(errorText || `Server error (${res.status})`)
|
||||
}
|
||||
const json = await res.json()
|
||||
const parsed = ExecuteResponseSuccessSchema.parse(json)
|
||||
const result = GetBlockConfigResult.parse(parsed.result)
|
||||
|
||||
const inputCount = Object.keys(result.inputs).length
|
||||
const outputCount = Object.keys(result.outputs).length
|
||||
await this.markToolComplete(200, { inputs: inputCount, outputs: outputCount }, result)
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (error: any) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
logger.error('Execute failed', { message })
|
||||
await this.markToolComplete(500, message)
|
||||
this.setState(ClientToolCallState.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import {
|
||||
ExecuteResponseSuccessSchema,
|
||||
GetBlockOptionsInput,
|
||||
GetBlockOptionsResult,
|
||||
} from '@/lib/copilot/tools/shared/schemas'
|
||||
import { getLatestBlock } from '@/blocks/registry'
|
||||
|
||||
interface GetBlockOptionsArgs {
|
||||
blockId: string
|
||||
}
|
||||
|
||||
export class GetBlockOptionsClientTool extends BaseClientTool {
|
||||
static readonly id = 'get_block_options'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, GetBlockOptionsClientTool.id, GetBlockOptionsClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Getting block operations', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Getting block operations', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Getting block operations', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Retrieved block operations', icon: ListFilter },
|
||||
[ClientToolCallState.error]: { text: 'Failed to get block operations', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted getting block operations', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: 'Skipped getting block operations',
|
||||
icon: MinusCircle,
|
||||
},
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
const blockId =
|
||||
(params as any)?.blockId ||
|
||||
(params as any)?.blockType ||
|
||||
(params as any)?.block_id ||
|
||||
(params as any)?.block_type
|
||||
if (typeof blockId === 'string') {
|
||||
const blockConfig = getLatestBlock(blockId)
|
||||
const blockName = (blockConfig?.name ?? blockId.replace(/_/g, ' ')).toLowerCase()
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Retrieved ${blockName} operations`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Retrieving ${blockName} operations`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to retrieve ${blockName} operations`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted retrieving ${blockName} operations`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped retrieving ${blockName} operations`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(args?: GetBlockOptionsArgs): Promise<void> {
|
||||
const logger = createLogger('GetBlockOptionsClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
// Handle both camelCase and snake_case parameter names, plus blockType as an alias
|
||||
const normalizedArgs = args
|
||||
? {
|
||||
blockId:
|
||||
args.blockId ||
|
||||
(args as any).block_id ||
|
||||
(args as any).blockType ||
|
||||
(args as any).block_type,
|
||||
}
|
||||
: {}
|
||||
|
||||
logger.info('execute called', { originalArgs: args, normalizedArgs })
|
||||
|
||||
const { blockId } = GetBlockOptionsInput.parse(normalizedArgs)
|
||||
|
||||
const res = await fetch('/api/copilot/execute-copilot-server-tool', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolName: 'get_block_options', payload: { blockId } }),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const errorText = await res.text().catch(() => '')
|
||||
throw new Error(errorText || `Server error (${res.status})`)
|
||||
}
|
||||
const json = await res.json()
|
||||
const parsed = ExecuteResponseSuccessSchema.parse(json)
|
||||
const result = GetBlockOptionsResult.parse(parsed.result)
|
||||
|
||||
await this.markToolComplete(200, { operations: result.operations.length }, result)
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (error: any) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
logger.error('Execute failed', { message })
|
||||
await this.markToolComplete(500, message)
|
||||
this.setState(ClientToolCallState.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Blocks, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import {
|
||||
ExecuteResponseSuccessSchema,
|
||||
GetBlocksAndToolsResult,
|
||||
} from '@/lib/copilot/tools/shared/schemas'
|
||||
|
||||
export class GetBlocksAndToolsClientTool extends BaseClientTool {
|
||||
static readonly id = 'get_blocks_and_tools'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, GetBlocksAndToolsClientTool.id, GetBlocksAndToolsClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Exploring available options', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Exploring available options', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Exploring available options', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Explored available options', icon: Blocks },
|
||||
[ClientToolCallState.error]: { text: 'Failed to explore options', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted exploring options', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped exploring options', icon: MinusCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
const logger = createLogger('GetBlocksAndToolsClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
const res = await fetch('/api/copilot/execute-copilot-server-tool', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolName: 'get_blocks_and_tools', payload: {} }),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const errorText = await res.text().catch(() => '')
|
||||
throw new Error(errorText || `Server error (${res.status})`)
|
||||
}
|
||||
const json = await res.json()
|
||||
const parsed = ExecuteResponseSuccessSchema.parse(json)
|
||||
const result = GetBlocksAndToolsResult.parse(parsed.result)
|
||||
|
||||
await this.markToolComplete(200, 'Successfully retrieved blocks and tools', result)
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (error: any) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
await this.markToolComplete(500, message)
|
||||
this.setState(ClientToolCallState.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import {
|
||||
ExecuteResponseSuccessSchema,
|
||||
GetBlocksMetadataInput,
|
||||
GetBlocksMetadataResult,
|
||||
} from '@/lib/copilot/tools/shared/schemas'
|
||||
|
||||
interface GetBlocksMetadataArgs {
|
||||
blockIds: string[]
|
||||
}
|
||||
|
||||
export class GetBlocksMetadataClientTool extends BaseClientTool {
|
||||
static readonly id = 'get_blocks_metadata'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, GetBlocksMetadataClientTool.id, GetBlocksMetadataClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Searching block choices', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Searching block choices', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Searching block choices', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Searched block choices', icon: ListFilter },
|
||||
[ClientToolCallState.error]: { text: 'Failed to search block choices', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted searching block choices', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: 'Skipped searching block choices',
|
||||
icon: MinusCircle,
|
||||
},
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.blockIds && Array.isArray(params.blockIds) && params.blockIds.length > 0) {
|
||||
const blockList = params.blockIds
|
||||
.slice(0, 3)
|
||||
.map((blockId) => blockId.replace(/_/g, ' '))
|
||||
.join(', ')
|
||||
const more = params.blockIds.length > 3 ? '...' : ''
|
||||
const blocks = `${blockList}${more}`
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Searched ${blocks}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Searching ${blocks}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to search ${blocks}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted searching ${blocks}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped searching ${blocks}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(args?: GetBlocksMetadataArgs): Promise<void> {
|
||||
const logger = createLogger('GetBlocksMetadataClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
const { blockIds } = GetBlocksMetadataInput.parse(args || {})
|
||||
|
||||
const res = await fetch('/api/copilot/execute-copilot-server-tool', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolName: 'get_blocks_metadata', payload: { blockIds } }),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const errorText = await res.text().catch(() => '')
|
||||
throw new Error(errorText || `Server error (${res.status})`)
|
||||
}
|
||||
const json = await res.json()
|
||||
const parsed = ExecuteResponseSuccessSchema.parse(json)
|
||||
const result = GetBlocksMetadataResult.parse(parsed.result)
|
||||
|
||||
await this.markToolComplete(200, { retrieved: Object.keys(result.metadata).length }, result)
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (error: any) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
logger.error('Execute failed', { message })
|
||||
await this.markToolComplete(500, message)
|
||||
this.setState(ClientToolCallState.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import {
|
||||
ExecuteResponseSuccessSchema,
|
||||
GetTriggerBlocksResult,
|
||||
} from '@/lib/copilot/tools/shared/schemas'
|
||||
|
||||
export class GetTriggerBlocksClientTool extends BaseClientTool {
|
||||
static readonly id = 'get_trigger_blocks'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, GetTriggerBlocksClientTool.id, GetTriggerBlocksClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Finding trigger blocks', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Finding trigger blocks', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Finding trigger blocks', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Found trigger blocks', icon: ListFilter },
|
||||
[ClientToolCallState.error]: { text: 'Failed to find trigger blocks', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted finding trigger blocks', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped finding trigger blocks', icon: MinusCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
const logger = createLogger('GetTriggerBlocksClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
const res = await fetch('/api/copilot/execute-copilot-server-tool', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolName: 'get_trigger_blocks', payload: {} }),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const errorText = await res.text().catch(() => '')
|
||||
try {
|
||||
const errorJson = JSON.parse(errorText)
|
||||
throw new Error(errorJson.error || errorText || `Server error (${res.status})`)
|
||||
} catch {
|
||||
throw new Error(errorText || `Server error (${res.status})`)
|
||||
}
|
||||
}
|
||||
const json = await res.json()
|
||||
const parsed = ExecuteResponseSuccessSchema.parse(json)
|
||||
const result = GetTriggerBlocksResult.parse(parsed.result)
|
||||
|
||||
await this.markToolComplete(200, 'Successfully retrieved trigger blocks', result)
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (error: any) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
await this.markToolComplete(500, message)
|
||||
this.setState(ClientToolCallState.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
import { Loader2, MinusCircle, Search, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class GetExamplesRagClientTool extends BaseClientTool {
|
||||
static readonly id = 'get_examples_rag'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, GetExamplesRagClientTool.id, GetExamplesRagClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Fetching examples', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Fetching examples', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Fetching examples', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Fetched examples', icon: Search },
|
||||
[ClientToolCallState.error]: { text: 'Failed to fetch examples', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted getting examples', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped getting examples', icon: MinusCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.query && typeof params.query === 'string') {
|
||||
const query = params.query
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Found examples for ${query}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Searching examples for ${query}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to find examples for ${query}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted searching examples for ${query}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped searching examples for ${query}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
import { Loader2, MinusCircle, XCircle, Zap } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class GetOperationsExamplesClientTool extends BaseClientTool {
|
||||
static readonly id = 'get_operations_examples'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, GetOperationsExamplesClientTool.id, GetOperationsExamplesClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Designing workflow component', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Designing workflow component', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Designing workflow component', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Designed workflow component', icon: Zap },
|
||||
[ClientToolCallState.error]: { text: 'Failed to design workflow component', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: {
|
||||
text: 'Aborted designing workflow component',
|
||||
icon: MinusCircle,
|
||||
},
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: 'Skipped designing workflow component',
|
||||
icon: MinusCircle,
|
||||
},
|
||||
},
|
||||
interrupt: undefined,
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.query && typeof params.query === 'string') {
|
||||
const query = params.query
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Designed ${query}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Designing ${query}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to design ${query}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted designing ${query}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped designing ${query}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
import { Loader2, MinusCircle, XCircle, Zap } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class GetTriggerExamplesClientTool extends BaseClientTool {
|
||||
static readonly id = 'get_trigger_examples'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, GetTriggerExamplesClientTool.id, GetTriggerExamplesClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Selecting a trigger', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Selecting a trigger', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Selecting a trigger', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Selected a trigger', icon: Zap },
|
||||
[ClientToolCallState.error]: { text: 'Failed to select a trigger', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted selecting a trigger', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped selecting a trigger', icon: MinusCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
import { Loader2, MinusCircle, PencilLine, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class SummarizeClientTool extends BaseClientTool {
|
||||
static readonly id = 'summarize_conversation'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, SummarizeClientTool.id, SummarizeClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Summarizing conversation', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Summarizing conversation', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Summarizing conversation', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Summarized conversation', icon: PencilLine },
|
||||
[ClientToolCallState.error]: { text: 'Failed to summarize conversation', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: {
|
||||
text: 'Aborted summarizing conversation',
|
||||
icon: MinusCircle,
|
||||
},
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: 'Skipped summarizing conversation',
|
||||
icon: MinusCircle,
|
||||
},
|
||||
},
|
||||
interrupt: undefined,
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
/**
|
||||
* Initialize all tool UI configurations.
|
||||
*
|
||||
* This module imports all client tools to trigger their UI config registration.
|
||||
* Import this module early in the app to ensure all tool configs are available.
|
||||
*/
|
||||
|
||||
// Other tools (subagents)
|
||||
import './other/auth'
|
||||
import './other/custom-tool'
|
||||
import './other/debug'
|
||||
import './other/deploy'
|
||||
import './other/edit'
|
||||
import './other/evaluate'
|
||||
import './other/info'
|
||||
import './other/knowledge'
|
||||
import './other/make-api-request'
|
||||
import './other/plan'
|
||||
import './other/research'
|
||||
import './other/sleep'
|
||||
import './other/superagent'
|
||||
import './other/test'
|
||||
import './other/tour'
|
||||
import './other/workflow'
|
||||
|
||||
// Workflow tools
|
||||
import './workflow/deploy-api'
|
||||
import './workflow/deploy-chat'
|
||||
import './workflow/deploy-mcp'
|
||||
import './workflow/edit-workflow'
|
||||
import './workflow/redeploy'
|
||||
import './workflow/run-workflow'
|
||||
import './workflow/set-global-workflow-variables'
|
||||
|
||||
// User tools
|
||||
import './user/set-environment-variables'
|
||||
@@ -1,143 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Database, Loader2, MinusCircle, PlusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import {
|
||||
ExecuteResponseSuccessSchema,
|
||||
type KnowledgeBaseArgs,
|
||||
} from '@/lib/copilot/tools/shared/schemas'
|
||||
import { useCopilotStore } from '@/stores/panel/copilot/store'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
|
||||
/**
|
||||
* Client tool for knowledge base operations
|
||||
*/
|
||||
export class KnowledgeBaseClientTool extends BaseClientTool {
|
||||
static readonly id = 'knowledge_base'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, KnowledgeBaseClientTool.id, KnowledgeBaseClientTool.metadata)
|
||||
}
|
||||
|
||||
/**
|
||||
* Only show interrupt for create operation
|
||||
*/
|
||||
getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined {
|
||||
const toolCallsById = useCopilotStore.getState().toolCallsById
|
||||
const toolCall = toolCallsById[this.toolCallId]
|
||||
const params = toolCall?.params as KnowledgeBaseArgs | undefined
|
||||
|
||||
// Only require confirmation for create operation
|
||||
if (params?.operation === 'create') {
|
||||
const name = params?.args?.name || 'new knowledge base'
|
||||
return {
|
||||
accept: { text: `Create "${name}"`, icon: PlusCircle },
|
||||
reject: { text: 'Skip', icon: XCircle },
|
||||
}
|
||||
}
|
||||
|
||||
// No interrupt for list, get, query - auto-execute
|
||||
return undefined
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Accessing knowledge base', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Accessing knowledge base', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Accessing knowledge base', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Accessed knowledge base', icon: Database },
|
||||
[ClientToolCallState.error]: { text: 'Failed to access knowledge base', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted knowledge base access', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped knowledge base access', icon: MinusCircle },
|
||||
},
|
||||
getDynamicText: (params: Record<string, any>, state: ClientToolCallState) => {
|
||||
const operation = params?.operation as string | undefined
|
||||
const name = params?.args?.name as string | undefined
|
||||
|
||||
const opVerbs: Record<string, { active: string; past: string; pending?: string }> = {
|
||||
create: {
|
||||
active: 'Creating knowledge base',
|
||||
past: 'Created knowledge base',
|
||||
pending: name ? `Create knowledge base "${name}"?` : 'Create knowledge base?',
|
||||
},
|
||||
list: { active: 'Listing knowledge bases', past: 'Listed knowledge bases' },
|
||||
get: { active: 'Getting knowledge base', past: 'Retrieved knowledge base' },
|
||||
query: { active: 'Querying knowledge base', past: 'Queried knowledge base' },
|
||||
}
|
||||
const defaultVerb: { active: string; past: string; pending?: string } = {
|
||||
active: 'Accessing knowledge base',
|
||||
past: 'Accessed knowledge base',
|
||||
}
|
||||
const verb = operation ? opVerbs[operation] || defaultVerb : defaultVerb
|
||||
|
||||
if (state === ClientToolCallState.success) {
|
||||
return verb.past
|
||||
}
|
||||
if (state === ClientToolCallState.pending && verb.pending) {
|
||||
return verb.pending
|
||||
}
|
||||
if (
|
||||
state === ClientToolCallState.generating ||
|
||||
state === ClientToolCallState.pending ||
|
||||
state === ClientToolCallState.executing
|
||||
) {
|
||||
return verb.active
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async handleReject(): Promise<void> {
|
||||
await super.handleReject()
|
||||
this.setState(ClientToolCallState.rejected)
|
||||
}
|
||||
|
||||
async handleAccept(args?: KnowledgeBaseArgs): Promise<void> {
|
||||
await this.execute(args)
|
||||
}
|
||||
|
||||
async execute(args?: KnowledgeBaseArgs): Promise<void> {
|
||||
const logger = createLogger('KnowledgeBaseClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
// Get the workspace ID from the workflow registry hydration state
|
||||
const { hydration } = useWorkflowRegistry.getState()
|
||||
const workspaceId = hydration.workspaceId
|
||||
|
||||
// Build payload with workspace ID included in args
|
||||
const payload: KnowledgeBaseArgs = {
|
||||
...(args || { operation: 'list' }),
|
||||
args: {
|
||||
...(args?.args || {}),
|
||||
workspaceId: workspaceId || undefined,
|
||||
},
|
||||
}
|
||||
|
||||
const res = await fetch('/api/copilot/execute-copilot-server-tool', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolName: 'knowledge_base', payload }),
|
||||
})
|
||||
|
||||
if (!res.ok) {
|
||||
const txt = await res.text().catch(() => '')
|
||||
throw new Error(txt || `Server error (${res.status})`)
|
||||
}
|
||||
|
||||
const json = await res.json()
|
||||
const parsed = ExecuteResponseSuccessSchema.parse(json)
|
||||
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, 'Knowledge base operation completed', parsed.result)
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (e: any) {
|
||||
logger.error('execute failed', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, e?.message || 'Failed to access knowledge base')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
const instances: Record<string, any> = {}
|
||||
|
||||
let syncStateFn: ((toolCallId: string, nextState: any, options?: { result?: any }) => void) | null =
|
||||
null
|
||||
|
||||
export function registerClientTool(toolCallId: string, instance: any) {
|
||||
instances[toolCallId] = instance
|
||||
}
|
||||
|
||||
export function getClientTool(toolCallId: string): any | undefined {
|
||||
return instances[toolCallId]
|
||||
}
|
||||
|
||||
export function registerToolStateSync(
|
||||
fn: (toolCallId: string, nextState: any, options?: { result?: any }) => void
|
||||
) {
|
||||
syncStateFn = fn
|
||||
}
|
||||
|
||||
export function syncToolState(toolCallId: string, nextState: any, options?: { result?: any }) {
|
||||
try {
|
||||
syncStateFn?.(toolCallId, nextState, options)
|
||||
} catch {}
|
||||
}
|
||||
@@ -1,241 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Loader2, Navigation, X, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { useCopilotStore } from '@/stores/panel/copilot/store'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
|
||||
type NavigationDestination = 'workflow' | 'logs' | 'templates' | 'vector_db' | 'settings'
|
||||
|
||||
interface NavigateUIArgs {
|
||||
destination: NavigationDestination
|
||||
workflowName?: string
|
||||
}
|
||||
|
||||
export class NavigateUIClientTool extends BaseClientTool {
|
||||
static readonly id = 'navigate_ui'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, NavigateUIClientTool.id, NavigateUIClientTool.metadata)
|
||||
}
|
||||
|
||||
/**
|
||||
* Override to provide dynamic button text based on destination
|
||||
*/
|
||||
getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined {
|
||||
const toolCallsById = useCopilotStore.getState().toolCallsById
|
||||
const toolCall = toolCallsById[this.toolCallId]
|
||||
const params = toolCall?.params as NavigateUIArgs | undefined
|
||||
|
||||
const destination = params?.destination
|
||||
const workflowName = params?.workflowName
|
||||
|
||||
let buttonText = 'Navigate'
|
||||
|
||||
if (destination === 'workflow' && workflowName) {
|
||||
buttonText = 'Open workflow'
|
||||
} else if (destination === 'logs') {
|
||||
buttonText = 'Open logs'
|
||||
} else if (destination === 'templates') {
|
||||
buttonText = 'Open templates'
|
||||
} else if (destination === 'vector_db') {
|
||||
buttonText = 'Open vector DB'
|
||||
} else if (destination === 'settings') {
|
||||
buttonText = 'Open settings'
|
||||
}
|
||||
|
||||
return {
|
||||
accept: { text: buttonText, icon: Navigation },
|
||||
reject: { text: 'Skip', icon: XCircle },
|
||||
}
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: {
|
||||
text: 'Preparing to open',
|
||||
icon: Loader2,
|
||||
},
|
||||
[ClientToolCallState.pending]: { text: 'Open?', icon: Navigation },
|
||||
[ClientToolCallState.executing]: { text: 'Opening', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Opened', icon: Navigation },
|
||||
[ClientToolCallState.error]: { text: 'Failed to open', icon: X },
|
||||
[ClientToolCallState.aborted]: {
|
||||
text: 'Aborted opening',
|
||||
icon: XCircle,
|
||||
},
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: 'Skipped opening',
|
||||
icon: XCircle,
|
||||
},
|
||||
},
|
||||
interrupt: {
|
||||
accept: { text: 'Open', icon: Navigation },
|
||||
reject: { text: 'Skip', icon: XCircle },
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
const destination = params?.destination as NavigationDestination | undefined
|
||||
const workflowName = params?.workflowName
|
||||
|
||||
const action = 'open'
|
||||
const actionCapitalized = 'Open'
|
||||
const actionPast = 'opened'
|
||||
const actionIng = 'opening'
|
||||
let target = ''
|
||||
|
||||
if (destination === 'workflow' && workflowName) {
|
||||
target = ` workflow "${workflowName}"`
|
||||
} else if (destination === 'workflow') {
|
||||
target = ' workflows'
|
||||
} else if (destination === 'logs') {
|
||||
target = ' logs'
|
||||
} else if (destination === 'templates') {
|
||||
target = ' templates'
|
||||
} else if (destination === 'vector_db') {
|
||||
target = ' vector database'
|
||||
} else if (destination === 'settings') {
|
||||
target = ' settings'
|
||||
}
|
||||
|
||||
const fullAction = `${action}${target}`
|
||||
const fullActionCapitalized = `${actionCapitalized}${target}`
|
||||
const fullActionPast = `${actionPast}${target}`
|
||||
const fullActionIng = `${actionIng}${target}`
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return fullActionPast.charAt(0).toUpperCase() + fullActionPast.slice(1)
|
||||
case ClientToolCallState.executing:
|
||||
return fullActionIng.charAt(0).toUpperCase() + fullActionIng.slice(1)
|
||||
case ClientToolCallState.generating:
|
||||
return `Preparing to ${fullAction}`
|
||||
case ClientToolCallState.pending:
|
||||
return `${fullActionCapitalized}?`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to ${fullAction}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted ${fullAction}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped ${fullAction}`
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async handleReject(): Promise<void> {
|
||||
await super.handleReject()
|
||||
this.setState(ClientToolCallState.rejected)
|
||||
}
|
||||
|
||||
async handleAccept(args?: NavigateUIArgs): Promise<void> {
|
||||
const logger = createLogger('NavigateUIClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
// Get params from copilot store if not provided directly
|
||||
let destination = args?.destination
|
||||
let workflowName = args?.workflowName
|
||||
|
||||
if (!destination) {
|
||||
const toolCallsById = useCopilotStore.getState().toolCallsById
|
||||
const toolCall = toolCallsById[this.toolCallId]
|
||||
const params = toolCall?.params as NavigateUIArgs | undefined
|
||||
destination = params?.destination
|
||||
workflowName = params?.workflowName
|
||||
}
|
||||
|
||||
if (!destination) {
|
||||
throw new Error('No destination provided')
|
||||
}
|
||||
|
||||
let navigationUrl = ''
|
||||
let successMessage = ''
|
||||
|
||||
// Get current workspace ID from URL
|
||||
const workspaceId = window.location.pathname.split('/')[2]
|
||||
|
||||
switch (destination) {
|
||||
case 'workflow':
|
||||
if (workflowName) {
|
||||
// Find workflow by name
|
||||
const { workflows } = useWorkflowRegistry.getState()
|
||||
const workflow = Object.values(workflows).find(
|
||||
(w) => w.name.toLowerCase() === workflowName.toLowerCase()
|
||||
)
|
||||
|
||||
if (!workflow) {
|
||||
throw new Error(`Workflow "${workflowName}" not found`)
|
||||
}
|
||||
|
||||
navigationUrl = `/workspace/${workspaceId}/w/${workflow.id}`
|
||||
successMessage = `Navigated to workflow "${workflowName}"`
|
||||
} else {
|
||||
navigationUrl = `/workspace/${workspaceId}/w`
|
||||
successMessage = 'Navigated to workflows'
|
||||
}
|
||||
break
|
||||
|
||||
case 'logs':
|
||||
navigationUrl = `/workspace/${workspaceId}/logs`
|
||||
successMessage = 'Navigated to logs'
|
||||
break
|
||||
|
||||
case 'templates':
|
||||
navigationUrl = `/workspace/${workspaceId}/templates`
|
||||
successMessage = 'Navigated to templates'
|
||||
break
|
||||
|
||||
case 'vector_db':
|
||||
navigationUrl = `/workspace/${workspaceId}/vector-db`
|
||||
successMessage = 'Navigated to vector database'
|
||||
break
|
||||
|
||||
case 'settings':
|
||||
window.dispatchEvent(new CustomEvent('open-settings', { detail: { tab: 'general' } }))
|
||||
successMessage = 'Opened settings'
|
||||
break
|
||||
|
||||
default:
|
||||
throw new Error(`Unknown destination: ${destination}`)
|
||||
}
|
||||
|
||||
// Navigate if URL was set
|
||||
if (navigationUrl) {
|
||||
window.location.href = navigationUrl
|
||||
}
|
||||
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, successMessage, {
|
||||
destination,
|
||||
workflowName,
|
||||
navigated: true,
|
||||
})
|
||||
} catch (e: any) {
|
||||
logger.error('Navigation failed', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
|
||||
// Get destination info for better error message
|
||||
const toolCallsById = useCopilotStore.getState().toolCallsById
|
||||
const toolCall = toolCallsById[this.toolCallId]
|
||||
const params = toolCall?.params as NavigateUIArgs | undefined
|
||||
const dest = params?.destination
|
||||
const wfName = params?.workflowName
|
||||
|
||||
let errorMessage = e?.message || 'Failed to navigate'
|
||||
if (dest === 'workflow' && wfName) {
|
||||
errorMessage = `Failed to navigate to workflow "${wfName}": ${e?.message || 'Unknown error'}`
|
||||
} else if (dest) {
|
||||
errorMessage = `Failed to navigate to ${dest}: ${e?.message || 'Unknown error'}`
|
||||
}
|
||||
|
||||
await this.markToolComplete(500, errorMessage)
|
||||
}
|
||||
}
|
||||
|
||||
async execute(args?: NavigateUIArgs): Promise<void> {
|
||||
await this.handleAccept(args)
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
import { KeyRound, Loader2, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface AuthArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Auth tool that spawns a subagent to handle authentication setup.
|
||||
* This tool auto-executes and the actual work is done by the auth subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class AuthClientTool extends BaseClientTool {
|
||||
static readonly id = 'auth'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, AuthClientTool.id, AuthClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Authenticating', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Authenticating', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Authenticating', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Authenticated', icon: KeyRound },
|
||||
[ClientToolCallState.error]: { text: 'Failed to authenticate', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped auth', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted auth', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Authenticating',
|
||||
completedLabel: 'Authenticated',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the auth tool.
|
||||
* This just marks the tool as executing - the actual auth work is done server-side
|
||||
* by the auth subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: AuthArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(AuthClientTool.id, AuthClientTool.metadata.uiConfig!)
|
||||
@@ -1,61 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Check, Loader2, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
interface CheckoffTodoArgs {
|
||||
id?: string
|
||||
todoId?: string
|
||||
}
|
||||
|
||||
export class CheckoffTodoClientTool extends BaseClientTool {
|
||||
static readonly id = 'checkoff_todo'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, CheckoffTodoClientTool.id, CheckoffTodoClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Marking todo', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Marking todo', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Marked todo complete', icon: Check },
|
||||
[ClientToolCallState.error]: { text: 'Failed to mark todo', icon: XCircle },
|
||||
},
|
||||
}
|
||||
|
||||
async execute(args?: CheckoffTodoArgs): Promise<void> {
|
||||
const logger = createLogger('CheckoffTodoClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
const todoId = args?.id || args?.todoId
|
||||
if (!todoId) {
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(400, 'Missing todo id')
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const { useCopilotStore } = await import('@/stores/panel/copilot/store')
|
||||
const store = useCopilotStore.getState()
|
||||
if (store.updatePlanTodoStatus) {
|
||||
store.updatePlanTodoStatus(todoId, 'completed')
|
||||
}
|
||||
} catch (e) {
|
||||
logger.warn('Failed to update todo status in store', { message: (e as any)?.message })
|
||||
}
|
||||
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, 'Todo checked off', { todoId })
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (e: any) {
|
||||
logger.error('execute failed', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, e?.message || 'Failed to check off todo')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
import { Globe, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class CrawlWebsiteClientTool extends BaseClientTool {
|
||||
static readonly id = 'crawl_website'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, CrawlWebsiteClientTool.id, CrawlWebsiteClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Crawling website', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Crawling website', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Crawling website', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Crawled website', icon: Globe },
|
||||
[ClientToolCallState.error]: { text: 'Failed to crawl website', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted crawling website', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped crawling website', icon: MinusCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.url && typeof params.url === 'string') {
|
||||
const url = params.url
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Crawled ${url}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Crawling ${url}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to crawl ${url}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted crawling ${url}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped crawling ${url}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
import { Loader2, Wrench, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface CustomToolArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom tool that spawns a subagent to manage custom tools.
|
||||
* This tool auto-executes and the actual work is done by the custom_tool subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class CustomToolClientTool extends BaseClientTool {
|
||||
static readonly id = 'custom_tool'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, CustomToolClientTool.id, CustomToolClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Managing custom tool', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Managing custom tool', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Managed custom tool', icon: Wrench },
|
||||
[ClientToolCallState.error]: { text: 'Failed custom tool', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped custom tool', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted custom tool', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Managing custom tool',
|
||||
completedLabel: 'Custom tool managed',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the custom_tool tool.
|
||||
* This just marks the tool as executing - the actual custom tool work is done server-side
|
||||
* by the custom_tool subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: CustomToolArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(CustomToolClientTool.id, CustomToolClientTool.metadata.uiConfig!)
|
||||
@@ -1,60 +0,0 @@
|
||||
import { Bug, Loader2, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface DebugArgs {
|
||||
error_description: string
|
||||
context?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Debug tool that spawns a subagent to diagnose workflow issues.
|
||||
* This tool auto-executes and the actual work is done by the debug subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class DebugClientTool extends BaseClientTool {
|
||||
static readonly id = 'debug'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, DebugClientTool.id, DebugClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Debugged', icon: Bug },
|
||||
[ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped debug', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted debug', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Debugging',
|
||||
completedLabel: 'Debugged',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the debug tool.
|
||||
* This just marks the tool as executing - the actual debug work is done server-side
|
||||
* by the debug subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: DebugArgs): Promise<void> {
|
||||
// Immediately transition to executing state - no user confirmation needed
|
||||
this.setState(ClientToolCallState.executing)
|
||||
// The tool result will come from the server via tool_result event
|
||||
// when the debug subagent completes its work
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(DebugClientTool.id, DebugClientTool.metadata.uiConfig!)
|
||||
@@ -1,56 +0,0 @@
|
||||
import { Loader2, Rocket, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface DeployArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Deploy tool that spawns a subagent to handle deployment.
|
||||
* This tool auto-executes and the actual work is done by the deploy subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class DeployClientTool extends BaseClientTool {
|
||||
static readonly id = 'deploy'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, DeployClientTool.id, DeployClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Deploying', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Deploying', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Deploying', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Deployed', icon: Rocket },
|
||||
[ClientToolCallState.error]: { text: 'Failed to deploy', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped deploy', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted deploy', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Deploying',
|
||||
completedLabel: 'Deployed',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the deploy tool.
|
||||
* This just marks the tool as executing - the actual deploy work is done server-side
|
||||
* by the deploy subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: DeployArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(DeployClientTool.id, DeployClientTool.metadata.uiConfig!)
|
||||
@@ -1,61 +0,0 @@
|
||||
import { Loader2, Pencil, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface EditArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Edit tool that spawns a subagent to apply code/workflow edits.
|
||||
* This tool auto-executes and the actual work is done by the edit subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class EditClientTool extends BaseClientTool {
|
||||
static readonly id = 'edit'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, EditClientTool.id, EditClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Editing', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Editing', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Editing', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Edited', icon: Pencil },
|
||||
[ClientToolCallState.error]: { text: 'Failed to apply edit', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped edit', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted edit', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
isSpecial: true,
|
||||
subagent: {
|
||||
streamingLabel: 'Editing',
|
||||
completedLabel: 'Edited',
|
||||
shouldCollapse: false, // Edit subagent stays expanded
|
||||
outputArtifacts: ['edit_summary'],
|
||||
hideThinkingText: true, // We show WorkflowEditSummary instead
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the edit tool.
|
||||
* This just marks the tool as executing - the actual edit work is done server-side
|
||||
* by the edit subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: EditArgs): Promise<void> {
|
||||
// Immediately transition to executing state - no user confirmation needed
|
||||
this.setState(ClientToolCallState.executing)
|
||||
// The tool result will come from the server via tool_result event
|
||||
// when the edit subagent completes its work
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(EditClientTool.id, EditClientTool.metadata.uiConfig!)
|
||||
@@ -1,56 +0,0 @@
|
||||
import { ClipboardCheck, Loader2, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface EvaluateArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate tool that spawns a subagent to evaluate workflows or outputs.
|
||||
* This tool auto-executes and the actual work is done by the evaluate subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class EvaluateClientTool extends BaseClientTool {
|
||||
static readonly id = 'evaluate'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, EvaluateClientTool.id, EvaluateClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Evaluating', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Evaluating', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Evaluating', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Evaluated', icon: ClipboardCheck },
|
||||
[ClientToolCallState.error]: { text: 'Failed to evaluate', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped evaluation', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted evaluation', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Evaluating',
|
||||
completedLabel: 'Evaluated',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the evaluate tool.
|
||||
* This just marks the tool as executing - the actual evaluation work is done server-side
|
||||
* by the evaluate subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: EvaluateArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(EvaluateClientTool.id, EvaluateClientTool.metadata.uiConfig!)
|
||||
@@ -1,53 +0,0 @@
|
||||
import { FileText, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class GetPageContentsClientTool extends BaseClientTool {
|
||||
static readonly id = 'get_page_contents'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, GetPageContentsClientTool.id, GetPageContentsClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Getting page contents', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Getting page contents', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Getting page contents', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Retrieved page contents', icon: FileText },
|
||||
[ClientToolCallState.error]: { text: 'Failed to get page contents', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted getting page contents', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped getting page contents', icon: MinusCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.urls && Array.isArray(params.urls) && params.urls.length > 0) {
|
||||
const firstUrl = String(params.urls[0])
|
||||
const count = params.urls.length
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return count > 1 ? `Retrieved ${count} pages` : `Retrieved ${firstUrl}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return count > 1 ? `Getting ${count} pages` : `Getting ${firstUrl}`
|
||||
case ClientToolCallState.error:
|
||||
return count > 1 ? `Failed to get ${count} pages` : `Failed to get ${firstUrl}`
|
||||
case ClientToolCallState.aborted:
|
||||
return count > 1 ? `Aborted getting ${count} pages` : `Aborted getting ${firstUrl}`
|
||||
case ClientToolCallState.rejected:
|
||||
return count > 1 ? `Skipped getting ${count} pages` : `Skipped getting ${firstUrl}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
import { Info, Loader2, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface InfoArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Info tool that spawns a subagent to retrieve information.
|
||||
* This tool auto-executes and the actual work is done by the info subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class InfoClientTool extends BaseClientTool {
|
||||
static readonly id = 'info'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, InfoClientTool.id, InfoClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Getting info', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Getting info', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Getting info', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Retrieved info', icon: Info },
|
||||
[ClientToolCallState.error]: { text: 'Failed to get info', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped info', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted info', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Getting info',
|
||||
completedLabel: 'Info retrieved',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the info tool.
|
||||
* This just marks the tool as executing - the actual info work is done server-side
|
||||
* by the info subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: InfoArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(InfoClientTool.id, InfoClientTool.metadata.uiConfig!)
|
||||
@@ -1,56 +0,0 @@
|
||||
import { BookOpen, Loader2, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface KnowledgeArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Knowledge tool that spawns a subagent to manage knowledge bases.
|
||||
* This tool auto-executes and the actual work is done by the knowledge subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class KnowledgeClientTool extends BaseClientTool {
|
||||
static readonly id = 'knowledge'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, KnowledgeClientTool.id, KnowledgeClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Managing knowledge', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Managing knowledge', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Managing knowledge', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Managed knowledge', icon: BookOpen },
|
||||
[ClientToolCallState.error]: { text: 'Failed to manage knowledge', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped knowledge', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted knowledge', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Managing knowledge',
|
||||
completedLabel: 'Knowledge managed',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the knowledge tool.
|
||||
* This just marks the tool as executing - the actual knowledge search work is done server-side
|
||||
* by the knowledge subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: KnowledgeArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(KnowledgeClientTool.id, KnowledgeClientTool.metadata.uiConfig!)
|
||||
@@ -1,127 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Globe2, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas'
|
||||
|
||||
interface MakeApiRequestArgs {
|
||||
url: string
|
||||
method: 'GET' | 'POST' | 'PUT'
|
||||
queryParams?: Record<string, string | number | boolean>
|
||||
headers?: Record<string, string>
|
||||
body?: any
|
||||
}
|
||||
|
||||
export class MakeApiRequestClientTool extends BaseClientTool {
|
||||
static readonly id = 'make_api_request'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, MakeApiRequestClientTool.id, MakeApiRequestClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Preparing API request', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Review API request', icon: Globe2 },
|
||||
[ClientToolCallState.executing]: { text: 'Executing API request', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Completed API request', icon: Globe2 },
|
||||
[ClientToolCallState.error]: { text: 'Failed to execute API request', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped API request', icon: MinusCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted API request', icon: XCircle },
|
||||
},
|
||||
interrupt: {
|
||||
accept: { text: 'Execute', icon: Globe2 },
|
||||
reject: { text: 'Skip', icon: MinusCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
interrupt: {
|
||||
accept: { text: 'Execute', icon: Globe2 },
|
||||
reject: { text: 'Skip', icon: MinusCircle },
|
||||
showAllowOnce: true,
|
||||
showAllowAlways: true,
|
||||
},
|
||||
paramsTable: {
|
||||
columns: [
|
||||
{ key: 'method', label: 'Method', width: '26%', editable: true, mono: true },
|
||||
{ key: 'url', label: 'Endpoint', width: '74%', editable: true, mono: true },
|
||||
],
|
||||
extractRows: (params) => {
|
||||
return [['request', (params.method || 'GET').toUpperCase(), params.url || '']]
|
||||
},
|
||||
},
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.url && typeof params.url === 'string') {
|
||||
const method = params.method || 'GET'
|
||||
let url = params.url
|
||||
|
||||
// Extract domain from URL for cleaner display
|
||||
try {
|
||||
const urlObj = new URL(url)
|
||||
url = urlObj.hostname + urlObj.pathname
|
||||
} catch {
|
||||
// Use URL as-is if parsing fails
|
||||
}
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `${method} ${url} complete`
|
||||
case ClientToolCallState.executing:
|
||||
return `${method} ${url}`
|
||||
case ClientToolCallState.generating:
|
||||
return `Preparing ${method} ${url}`
|
||||
case ClientToolCallState.pending:
|
||||
return `Review ${method} ${url}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed ${method} ${url}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped ${method} ${url}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted ${method} ${url}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async handleReject(): Promise<void> {
|
||||
await super.handleReject()
|
||||
this.setState(ClientToolCallState.rejected)
|
||||
}
|
||||
|
||||
async handleAccept(args?: MakeApiRequestArgs): Promise<void> {
|
||||
const logger = createLogger('MakeApiRequestClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
const res = await fetch('/api/copilot/execute-copilot-server-tool', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolName: 'make_api_request', payload: args || {} }),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const txt = await res.text().catch(() => '')
|
||||
throw new Error(txt || `Server error (${res.status})`)
|
||||
}
|
||||
const json = await res.json()
|
||||
const parsed = ExecuteResponseSuccessSchema.parse(json)
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, 'API request executed', parsed.result)
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (e: any) {
|
||||
logger.error('execute failed', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, e?.message || 'API request failed')
|
||||
}
|
||||
}
|
||||
|
||||
async execute(args?: MakeApiRequestArgs): Promise<void> {
|
||||
await this.handleAccept(args)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(MakeApiRequestClientTool.id, MakeApiRequestClientTool.metadata.uiConfig!)
|
||||
@@ -1,64 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
interface MarkTodoInProgressArgs {
|
||||
id?: string
|
||||
todoId?: string
|
||||
}
|
||||
|
||||
export class MarkTodoInProgressClientTool extends BaseClientTool {
|
||||
static readonly id = 'mark_todo_in_progress'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, MarkTodoInProgressClientTool.id, MarkTodoInProgressClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Marking todo in progress', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Marking todo in progress', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Marking todo in progress', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Marked todo in progress', icon: Loader2 },
|
||||
[ClientToolCallState.error]: { text: 'Failed to mark in progress', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted marking in progress', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped marking in progress', icon: MinusCircle },
|
||||
},
|
||||
}
|
||||
|
||||
async execute(args?: MarkTodoInProgressArgs): Promise<void> {
|
||||
const logger = createLogger('MarkTodoInProgressClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
const todoId = args?.id || args?.todoId
|
||||
if (!todoId) {
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(400, 'Missing todo id')
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const { useCopilotStore } = await import('@/stores/panel/copilot/store')
|
||||
const store = useCopilotStore.getState()
|
||||
if (store.updatePlanTodoStatus) {
|
||||
store.updatePlanTodoStatus(todoId, 'executing')
|
||||
}
|
||||
} catch (e) {
|
||||
logger.warn('Failed to update todo status in store', { message: (e as any)?.message })
|
||||
}
|
||||
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, 'Todo marked in progress', { todoId })
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (e: any) {
|
||||
logger.error('execute failed', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, e?.message || 'Failed to mark todo in progress')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { CheckCircle, Loader2, MinusCircle, PlugZap, X, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { OAUTH_PROVIDERS, type OAuthServiceConfig } from '@/lib/oauth'
|
||||
|
||||
const logger = createLogger('OAuthRequestAccessClientTool')
|
||||
|
||||
interface OAuthRequestAccessArgs {
|
||||
providerName?: string
|
||||
}
|
||||
|
||||
interface ResolvedServiceInfo {
|
||||
serviceId: string
|
||||
providerId: string
|
||||
service: OAuthServiceConfig
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the service configuration from a provider name.
|
||||
* The providerName should match the exact `name` field returned by get_credentials tool's notConnected services.
|
||||
*/
|
||||
function findServiceByName(providerName: string): ResolvedServiceInfo | null {
|
||||
const normalizedName = providerName.toLowerCase().trim()
|
||||
|
||||
// First pass: exact match (case-insensitive)
|
||||
for (const [, providerConfig] of Object.entries(OAUTH_PROVIDERS)) {
|
||||
for (const [serviceId, service] of Object.entries(providerConfig.services)) {
|
||||
if (service.name.toLowerCase() === normalizedName) {
|
||||
return { serviceId, providerId: service.providerId, service }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: partial match as fallback for flexibility
|
||||
for (const [, providerConfig] of Object.entries(OAUTH_PROVIDERS)) {
|
||||
for (const [serviceId, service] of Object.entries(providerConfig.services)) {
|
||||
if (
|
||||
service.name.toLowerCase().includes(normalizedName) ||
|
||||
normalizedName.includes(service.name.toLowerCase())
|
||||
) {
|
||||
return { serviceId, providerId: service.providerId, service }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
export interface OAuthConnectEventDetail {
|
||||
providerName: string
|
||||
serviceId: string
|
||||
providerId: string
|
||||
requiredScopes: string[]
|
||||
newScopes?: string[]
|
||||
}
|
||||
|
||||
export class OAuthRequestAccessClientTool extends BaseClientTool {
|
||||
static readonly id = 'oauth_request_access'
|
||||
|
||||
private providerName?: string
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, OAuthRequestAccessClientTool.id, OAuthRequestAccessClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Requesting integration access', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Requesting integration access', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Requesting integration access', icon: Loader2 },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped integration access', icon: MinusCircle },
|
||||
[ClientToolCallState.success]: { text: 'Requested integration access', icon: CheckCircle },
|
||||
[ClientToolCallState.error]: { text: 'Failed to request integration access', icon: X },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted integration access request', icon: XCircle },
|
||||
},
|
||||
interrupt: {
|
||||
accept: { text: 'Connect', icon: PlugZap },
|
||||
reject: { text: 'Skip', icon: MinusCircle },
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
if (params.providerName) {
|
||||
const name = params.providerName
|
||||
switch (state) {
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
case ClientToolCallState.executing:
|
||||
return `Requesting ${name} access`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped ${name} access`
|
||||
case ClientToolCallState.success:
|
||||
return `Requested ${name} access`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to request ${name} access`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted ${name} access request`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async handleAccept(args?: OAuthRequestAccessArgs): Promise<void> {
|
||||
try {
|
||||
if (args?.providerName) {
|
||||
this.providerName = args.providerName
|
||||
}
|
||||
|
||||
if (!this.providerName) {
|
||||
logger.error('No provider name provided')
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(400, 'No provider name specified')
|
||||
return
|
||||
}
|
||||
|
||||
// Find the service by name
|
||||
const serviceInfo = findServiceByName(this.providerName)
|
||||
if (!serviceInfo) {
|
||||
logger.error('Could not find OAuth service for provider', {
|
||||
providerName: this.providerName,
|
||||
})
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(400, `Unknown provider: ${this.providerName}`)
|
||||
return
|
||||
}
|
||||
|
||||
const { serviceId, providerId, service } = serviceInfo
|
||||
|
||||
logger.info('Opening OAuth connect modal', {
|
||||
providerName: this.providerName,
|
||||
serviceId,
|
||||
providerId,
|
||||
})
|
||||
|
||||
// Move to executing state
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
// Dispatch event to open the OAuth modal (same pattern as open-settings)
|
||||
window.dispatchEvent(
|
||||
new CustomEvent<OAuthConnectEventDetail>('open-oauth-connect', {
|
||||
detail: {
|
||||
providerName: this.providerName,
|
||||
serviceId,
|
||||
providerId,
|
||||
requiredScopes: service.scopes || [],
|
||||
},
|
||||
})
|
||||
)
|
||||
|
||||
// Mark as success - the user opened the prompt, but connection is not guaranteed
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(
|
||||
200,
|
||||
`The user opened the ${this.providerName} connection prompt and may have connected. Check the connected integrations to verify the connection status.`
|
||||
)
|
||||
} catch (e) {
|
||||
logger.error('Failed to open OAuth connect modal', { error: e })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, 'Failed to open OAuth connection dialog')
|
||||
}
|
||||
}
|
||||
|
||||
async handleReject(): Promise<void> {
|
||||
await super.handleReject()
|
||||
this.setState(ClientToolCallState.rejected)
|
||||
}
|
||||
|
||||
async execute(args?: OAuthRequestAccessArgs): Promise<void> {
|
||||
await this.handleAccept(args)
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
import { ListTodo, Loader2, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface PlanArgs {
|
||||
request: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Plan tool that spawns a subagent to plan an approach.
|
||||
* This tool auto-executes and the actual work is done by the plan subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class PlanClientTool extends BaseClientTool {
|
||||
static readonly id = 'plan'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, PlanClientTool.id, PlanClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Planning', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Planning', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Planning', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Planned', icon: ListTodo },
|
||||
[ClientToolCallState.error]: { text: 'Failed to plan', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped plan', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted plan', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Planning',
|
||||
completedLabel: 'Planned',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: ['plan'],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the plan tool.
|
||||
* This just marks the tool as executing - the actual planning work is done server-side
|
||||
* by the plan subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: PlanArgs): Promise<void> {
|
||||
// Immediately transition to executing state - no user confirmation needed
|
||||
this.setState(ClientToolCallState.executing)
|
||||
// The tool result will come from the server via tool_result event
|
||||
// when the plan subagent completes its work
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(PlanClientTool.id, PlanClientTool.metadata.uiConfig!)
|
||||
@@ -1,76 +0,0 @@
|
||||
import { CheckCircle2, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class RememberDebugClientTool extends BaseClientTool {
|
||||
static readonly id = 'remember_debug'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, RememberDebugClientTool.id, RememberDebugClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Validating fix', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Validating fix', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Validating fix', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Validated fix', icon: CheckCircle2 },
|
||||
[ClientToolCallState.error]: { text: 'Failed to validate', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted validation', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped validation', icon: MinusCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
getDynamicText: (params, state) => {
|
||||
const operation = params?.operation
|
||||
|
||||
if (operation === 'add' || operation === 'edit') {
|
||||
// For add/edit, show from problem or solution
|
||||
const text = params?.problem || params?.solution
|
||||
if (text && typeof text === 'string') {
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Validated fix ${text}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Validating fix ${text}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to validate fix ${text}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted validating fix ${text}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped validating fix ${text}`
|
||||
}
|
||||
}
|
||||
} else if (operation === 'delete') {
|
||||
// For delete, show from problem or solution (or id as fallback)
|
||||
const text = params?.problem || params?.solution || params?.id
|
||||
if (text && typeof text === 'string') {
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Adjusted fix ${text}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Adjusting fix ${text}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to adjust fix ${text}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted adjusting fix ${text}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped adjusting fix ${text}`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
import { Loader2, Search, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface ResearchArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Research tool that spawns a subagent to research information.
|
||||
* This tool auto-executes and the actual work is done by the research subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class ResearchClientTool extends BaseClientTool {
|
||||
static readonly id = 'research'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, ResearchClientTool.id, ResearchClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Researching', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Researching', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Researching', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Researched', icon: Search },
|
||||
[ClientToolCallState.error]: { text: 'Failed to research', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped research', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted research', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Researching',
|
||||
completedLabel: 'Researched',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the research tool.
|
||||
* This just marks the tool as executing - the actual research work is done server-side
|
||||
* by the research subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: ResearchArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(ResearchClientTool.id, ResearchClientTool.metadata.uiConfig!)
|
||||
@@ -1,52 +0,0 @@
|
||||
import { Globe, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class ScrapePageClientTool extends BaseClientTool {
|
||||
static readonly id = 'scrape_page'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, ScrapePageClientTool.id, ScrapePageClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Scraping page', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Scraping page', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Scraping page', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Scraped page', icon: Globe },
|
||||
[ClientToolCallState.error]: { text: 'Failed to scrape page', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted scraping page', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped scraping page', icon: MinusCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.url && typeof params.url === 'string') {
|
||||
const url = params.url
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Scraped ${url}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Scraping ${url}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to scrape ${url}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted scraping ${url}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped scraping ${url}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { BookOpen, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas'
|
||||
|
||||
interface SearchDocumentationArgs {
|
||||
query: string
|
||||
topK?: number
|
||||
threshold?: number
|
||||
}
|
||||
|
||||
export class SearchDocumentationClientTool extends BaseClientTool {
|
||||
static readonly id = 'search_documentation'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, SearchDocumentationClientTool.id, SearchDocumentationClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Searching documentation', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Searching documentation', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Searching documentation', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Completed documentation search', icon: BookOpen },
|
||||
[ClientToolCallState.error]: { text: 'Failed to search docs', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted documentation search', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped documentation search', icon: MinusCircle },
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.query && typeof params.query === 'string') {
|
||||
const query = params.query
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Searched docs for ${query}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Searching docs for ${query}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to search docs for ${query}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted searching docs for ${query}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped searching docs for ${query}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(args?: SearchDocumentationArgs): Promise<void> {
|
||||
const logger = createLogger('SearchDocumentationClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
const res = await fetch('/api/copilot/execute-copilot-server-tool', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolName: 'search_documentation', payload: args || {} }),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const txt = await res.text().catch(() => '')
|
||||
throw new Error(txt || `Server error (${res.status})`)
|
||||
}
|
||||
const json = await res.json()
|
||||
const parsed = ExecuteResponseSuccessSchema.parse(json)
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, 'Documentation search complete', parsed.result)
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (e: any) {
|
||||
logger.error('execute failed', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, e?.message || 'Documentation search failed')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
import { Bug, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class SearchErrorsClientTool extends BaseClientTool {
|
||||
static readonly id = 'search_errors'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, SearchErrorsClientTool.id, SearchErrorsClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Debugged', icon: Bug },
|
||||
[ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted debugging', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped debugging', icon: MinusCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.query && typeof params.query === 'string') {
|
||||
const query = params.query
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Debugged ${query}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Debugging ${query}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to debug ${query}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted debugging ${query}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped debugging ${query}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
import { BookOpen, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class SearchLibraryDocsClientTool extends BaseClientTool {
|
||||
static readonly id = 'search_library_docs'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, SearchLibraryDocsClientTool.id, SearchLibraryDocsClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Reading docs', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Reading docs', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Reading docs', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Read docs', icon: BookOpen },
|
||||
[ClientToolCallState.error]: { text: 'Failed to read docs', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted reading docs', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped reading docs', icon: MinusCircle },
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
const libraryName = params?.library_name
|
||||
if (libraryName && typeof libraryName === 'string') {
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Read ${libraryName} docs`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Reading ${libraryName} docs`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to read ${libraryName} docs`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted reading ${libraryName} docs`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped reading ${libraryName} docs`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
import { Globe, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class SearchOnlineClientTool extends BaseClientTool {
|
||||
static readonly id = 'search_online'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, SearchOnlineClientTool.id, SearchOnlineClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Searching online', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Searching online', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Searching online', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Completed online search', icon: Globe },
|
||||
[ClientToolCallState.error]: { text: 'Failed to search online', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped online search', icon: MinusCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted online search', icon: XCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.query && typeof params.query === 'string') {
|
||||
const query = params.query
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Searched online for ${query}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Searching online for ${query}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to search online for ${query}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted searching online for ${query}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped searching online for ${query}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
import { Loader2, MinusCircle, Search, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export class SearchPatternsClientTool extends BaseClientTool {
|
||||
static readonly id = 'search_patterns'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, SearchPatternsClientTool.id, SearchPatternsClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Searching workflow patterns', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Searching workflow patterns', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Searching workflow patterns', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Found workflow patterns', icon: Search },
|
||||
[ClientToolCallState.error]: { text: 'Failed to search patterns', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted pattern search', icon: MinusCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped pattern search', icon: MinusCircle },
|
||||
},
|
||||
interrupt: undefined,
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.queries && Array.isArray(params.queries) && params.queries.length > 0) {
|
||||
const firstQuery = String(params.queries[0])
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Searched ${firstQuery}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.generating:
|
||||
case ClientToolCallState.pending:
|
||||
return `Searching ${firstQuery}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to search ${firstQuery}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted searching ${firstQuery}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped searching ${firstQuery}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async execute(): Promise<void> {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Loader2, MinusCircle, Moon, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
/** Maximum sleep duration in seconds (3 minutes) */
|
||||
const MAX_SLEEP_SECONDS = 180
|
||||
|
||||
/** Track sleep start times for calculating elapsed time on wake */
|
||||
const sleepStartTimes: Record<string, number> = {}
|
||||
|
||||
interface SleepArgs {
|
||||
seconds?: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Format seconds into a human-readable duration string
|
||||
*/
|
||||
function formatDuration(seconds: number): string {
|
||||
if (seconds >= 60) {
|
||||
return `${Math.round(seconds / 60)} minute${seconds >= 120 ? 's' : ''}`
|
||||
}
|
||||
return `${seconds} second${seconds !== 1 ? 's' : ''}`
|
||||
}
|
||||
|
||||
export class SleepClientTool extends BaseClientTool {
|
||||
static readonly id = 'sleep'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, SleepClientTool.id, SleepClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Preparing to sleep', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Sleeping', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Sleeping', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Finished sleeping', icon: Moon },
|
||||
[ClientToolCallState.error]: { text: 'Interrupted sleep', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped sleep', icon: MinusCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted sleep', icon: MinusCircle },
|
||||
[ClientToolCallState.background]: { text: 'Resumed', icon: Moon },
|
||||
},
|
||||
uiConfig: {
|
||||
secondaryAction: {
|
||||
text: 'Wake',
|
||||
title: 'Wake',
|
||||
variant: 'tertiary',
|
||||
showInStates: [ClientToolCallState.executing],
|
||||
targetState: ClientToolCallState.background,
|
||||
},
|
||||
},
|
||||
// No interrupt - auto-execute immediately
|
||||
getDynamicText: (params, state) => {
|
||||
const seconds = params?.seconds
|
||||
if (typeof seconds === 'number' && seconds > 0) {
|
||||
const displayTime = formatDuration(seconds)
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Slept for ${displayTime}`
|
||||
case ClientToolCallState.executing:
|
||||
case ClientToolCallState.pending:
|
||||
return `Sleeping for ${displayTime}`
|
||||
case ClientToolCallState.generating:
|
||||
return `Preparing to sleep for ${displayTime}`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to sleep for ${displayTime}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped sleeping for ${displayTime}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted sleeping for ${displayTime}`
|
||||
case ClientToolCallState.background: {
|
||||
// Calculate elapsed time from when sleep started
|
||||
const elapsedSeconds = params?._elapsedSeconds
|
||||
if (typeof elapsedSeconds === 'number' && elapsedSeconds > 0) {
|
||||
return `Resumed after ${formatDuration(Math.round(elapsedSeconds))}`
|
||||
}
|
||||
return 'Resumed early'
|
||||
}
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Get elapsed seconds since sleep started
|
||||
*/
|
||||
getElapsedSeconds(): number {
|
||||
const startTime = sleepStartTimes[this.toolCallId]
|
||||
if (!startTime) return 0
|
||||
return (Date.now() - startTime) / 1000
|
||||
}
|
||||
|
||||
async handleReject(): Promise<void> {
|
||||
await super.handleReject()
|
||||
this.setState(ClientToolCallState.rejected)
|
||||
}
|
||||
|
||||
async handleAccept(args?: SleepArgs): Promise<void> {
|
||||
const logger = createLogger('SleepClientTool')
|
||||
|
||||
// Use a timeout slightly longer than max sleep (3 minutes + buffer)
|
||||
const timeoutMs = (MAX_SLEEP_SECONDS + 30) * 1000
|
||||
|
||||
await this.executeWithTimeout(async () => {
|
||||
const params = args || {}
|
||||
logger.debug('handleAccept() called', {
|
||||
toolCallId: this.toolCallId,
|
||||
state: this.getState(),
|
||||
hasArgs: !!args,
|
||||
seconds: params.seconds,
|
||||
})
|
||||
|
||||
// Validate and clamp seconds
|
||||
let seconds = typeof params.seconds === 'number' ? params.seconds : 0
|
||||
if (seconds < 0) seconds = 0
|
||||
if (seconds > MAX_SLEEP_SECONDS) seconds = MAX_SLEEP_SECONDS
|
||||
|
||||
logger.debug('Starting sleep', { seconds })
|
||||
|
||||
// Track start time for elapsed calculation
|
||||
sleepStartTimes[this.toolCallId] = Date.now()
|
||||
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
try {
|
||||
// Sleep for the specified duration
|
||||
await new Promise((resolve) => setTimeout(resolve, seconds * 1000))
|
||||
|
||||
logger.debug('Sleep completed successfully')
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, `Slept for ${seconds} seconds`)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
logger.error('Sleep failed', { error: message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, message)
|
||||
} finally {
|
||||
// Clean up start time tracking
|
||||
delete sleepStartTimes[this.toolCallId]
|
||||
}
|
||||
}, timeoutMs)
|
||||
}
|
||||
|
||||
async execute(args?: SleepArgs): Promise<void> {
|
||||
// Auto-execute without confirmation - go straight to executing
|
||||
await this.handleAccept(args)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(SleepClientTool.id, SleepClientTool.metadata.uiConfig!)
|
||||
@@ -1,56 +0,0 @@
|
||||
import { Loader2, Sparkles, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface SuperagentArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Superagent tool that spawns a powerful subagent for complex tasks.
|
||||
* This tool auto-executes and the actual work is done by the superagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class SuperagentClientTool extends BaseClientTool {
|
||||
static readonly id = 'superagent'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, SuperagentClientTool.id, SuperagentClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Superagent working', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Superagent working', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Superagent working', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Superagent completed', icon: Sparkles },
|
||||
[ClientToolCallState.error]: { text: 'Superagent failed', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Superagent skipped', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Superagent aborted', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Superagent working',
|
||||
completedLabel: 'Superagent completed',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the superagent tool.
|
||||
* This just marks the tool as executing - the actual work is done server-side
|
||||
* by the superagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: SuperagentArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(SuperagentClientTool.id, SuperagentClientTool.metadata.uiConfig!)
|
||||
@@ -1,56 +0,0 @@
|
||||
import { FlaskConical, Loader2, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface TestArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Test tool that spawns a subagent to run tests.
|
||||
* This tool auto-executes and the actual work is done by the test subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class TestClientTool extends BaseClientTool {
|
||||
static readonly id = 'test'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, TestClientTool.id, TestClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Testing', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Testing', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Testing', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Tested', icon: FlaskConical },
|
||||
[ClientToolCallState.error]: { text: 'Failed to test', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped test', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted test', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Testing',
|
||||
completedLabel: 'Tested',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the test tool.
|
||||
* This just marks the tool as executing - the actual test work is done server-side
|
||||
* by the test subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: TestArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(TestClientTool.id, TestClientTool.metadata.uiConfig!)
|
||||
@@ -1,56 +0,0 @@
|
||||
import { Compass, Loader2, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface TourArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Tour tool that spawns a subagent to guide the user.
|
||||
* This tool auto-executes and the actual work is done by the tour subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class TourClientTool extends BaseClientTool {
|
||||
static readonly id = 'tour'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, TourClientTool.id, TourClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Touring', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Touring', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Touring', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Completed tour', icon: Compass },
|
||||
[ClientToolCallState.error]: { text: 'Failed tour', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped tour', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted tour', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Touring',
|
||||
completedLabel: 'Tour complete',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the tour tool.
|
||||
* This just marks the tool as executing - the actual tour work is done server-side
|
||||
* by the tour subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: TourArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(TourClientTool.id, TourClientTool.metadata.uiConfig!)
|
||||
@@ -1,56 +0,0 @@
|
||||
import { GitBranch, Loader2, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
|
||||
interface WorkflowArgs {
|
||||
instruction: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Workflow tool that spawns a subagent to manage workflows.
|
||||
* This tool auto-executes and the actual work is done by the workflow subagent.
|
||||
* The subagent's output is streamed as nested content under this tool call.
|
||||
*/
|
||||
export class WorkflowClientTool extends BaseClientTool {
|
||||
static readonly id = 'workflow'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, WorkflowClientTool.id, WorkflowClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Managing workflow', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Managing workflow', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Managing workflow', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Managed workflow', icon: GitBranch },
|
||||
[ClientToolCallState.error]: { text: 'Failed to manage workflow', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped workflow', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted workflow', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
subagent: {
|
||||
streamingLabel: 'Managing workflow',
|
||||
completedLabel: 'Workflow managed',
|
||||
shouldCollapse: true,
|
||||
outputArtifacts: [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the workflow tool.
|
||||
* This just marks the tool as executing - the actual workflow work is done server-side
|
||||
* by the workflow subagent, and its output is streamed as subagent events.
|
||||
*/
|
||||
async execute(_args?: WorkflowArgs): Promise<void> {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(WorkflowClientTool.id, WorkflowClientTool.metadata.uiConfig!)
|
||||
@@ -1,34 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { ClientToolDefinition, ToolExecutionContext } from '@/lib/copilot/tools/client/types'
|
||||
|
||||
const logger = createLogger('ClientToolRegistry')
|
||||
|
||||
const tools: Record<string, ClientToolDefinition<any>> = {}
|
||||
|
||||
export function registerTool(def: ClientToolDefinition<any>) {
|
||||
tools[def.name] = def
|
||||
}
|
||||
|
||||
export function getTool(name: string): ClientToolDefinition<any> | undefined {
|
||||
return tools[name]
|
||||
}
|
||||
|
||||
export function createExecutionContext(params: {
|
||||
toolCallId: string
|
||||
toolName: string
|
||||
}): ToolExecutionContext {
|
||||
const { toolCallId, toolName } = params
|
||||
return {
|
||||
toolCallId,
|
||||
toolName,
|
||||
log: (level, message, extra) => {
|
||||
try {
|
||||
logger[level](message, { toolCallId, toolName, ...(extra || {}) })
|
||||
} catch {}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
export function getRegisteredTools(): Record<string, ClientToolDefinition<any>> {
|
||||
return { ...tools }
|
||||
}
|
||||
2604
apps/sim/lib/copilot/tools/client/tool-display-registry.ts
Normal file
2604
apps/sim/lib/copilot/tools/client/tool-display-registry.ts
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,33 +0,0 @@
|
||||
import type { BaseClientToolMetadata } from '@/lib/copilot/tools/client/base-tool'
|
||||
import { ClientToolCallState } from '@/lib/copilot/tools/client/base-tool'
|
||||
|
||||
export interface ToolExecutionContext {
|
||||
toolCallId: string
|
||||
toolName: string
|
||||
// Logging only; tools must not mutate store state directly
|
||||
log: (
|
||||
level: 'debug' | 'info' | 'warn' | 'error',
|
||||
message: string,
|
||||
extra?: Record<string, any>
|
||||
) => void
|
||||
}
|
||||
|
||||
export interface ToolRunResult {
|
||||
status: number
|
||||
message?: any
|
||||
data?: any
|
||||
}
|
||||
|
||||
export interface ClientToolDefinition<Args = any> {
|
||||
name: string
|
||||
metadata?: BaseClientToolMetadata
|
||||
// Return true if this tool requires user confirmation before execution
|
||||
hasInterrupt?: boolean | ((args?: Args) => boolean)
|
||||
// Main execution entry point. Returns a result for the store to handle.
|
||||
execute: (ctx: ToolExecutionContext, args?: Args) => Promise<ToolRunResult | undefined>
|
||||
// Optional accept/reject handlers for interrupt flows
|
||||
accept?: (ctx: ToolExecutionContext, args?: Args) => Promise<ToolRunResult | undefined>
|
||||
reject?: (ctx: ToolExecutionContext, args?: Args) => Promise<ToolRunResult | undefined>
|
||||
}
|
||||
|
||||
export { ClientToolCallState }
|
||||
@@ -1,238 +0,0 @@
|
||||
/**
|
||||
* UI Configuration Types for Copilot Tools
|
||||
*
|
||||
* This module defines the configuration interfaces that control how tools
|
||||
* are rendered in the tool-call component. All UI behavior should be defined
|
||||
* here rather than hardcoded in the rendering component.
|
||||
*/
|
||||
import type { LucideIcon } from 'lucide-react'
|
||||
import type { ClientToolCallState } from './base-tool'
|
||||
|
||||
/**
|
||||
* Configuration for a params table column
|
||||
*/
|
||||
export interface ParamsTableColumn {
|
||||
/** Key to extract from params */
|
||||
key: string
|
||||
/** Display label for the column header */
|
||||
label: string
|
||||
/** Width as percentage or CSS value */
|
||||
width?: string
|
||||
/** Whether values in this column are editable */
|
||||
editable?: boolean
|
||||
/** Whether to use monospace font */
|
||||
mono?: boolean
|
||||
/** Whether to mask the value (for passwords) */
|
||||
masked?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration for params table rendering
|
||||
*/
|
||||
export interface ParamsTableConfig {
|
||||
/** Column definitions */
|
||||
columns: ParamsTableColumn[]
|
||||
/**
|
||||
* Extract rows from tool params.
|
||||
* Returns array of [key, ...cellValues] for each row.
|
||||
*/
|
||||
extractRows: (params: Record<string, any>) => Array<[string, ...any[]]>
|
||||
/**
|
||||
* Optional: Update params when a cell is edited.
|
||||
* Returns the updated params object.
|
||||
*/
|
||||
updateCell?: (
|
||||
params: Record<string, any>,
|
||||
rowKey: string,
|
||||
columnKey: string,
|
||||
newValue: any
|
||||
) => Record<string, any>
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration for secondary action button (like "Move to Background")
|
||||
*/
|
||||
export interface SecondaryActionConfig {
|
||||
/** Button text */
|
||||
text: string
|
||||
/** Button title/tooltip */
|
||||
title?: string
|
||||
/** Button variant */
|
||||
variant?: 'tertiary' | 'default' | 'outline'
|
||||
/** States in which to show this button */
|
||||
showInStates: ClientToolCallState[]
|
||||
/**
|
||||
* Message to send when the action is triggered.
|
||||
* Used by markToolComplete.
|
||||
*/
|
||||
completionMessage?: string
|
||||
/**
|
||||
* Target state after action.
|
||||
* If not provided, defaults to 'background'.
|
||||
*/
|
||||
targetState?: ClientToolCallState
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration for subagent tools (tools that spawn subagents)
|
||||
*/
|
||||
export interface SubagentConfig {
|
||||
/** Label shown while streaming (e.g., "Planning", "Editing") */
|
||||
streamingLabel: string
|
||||
/** Label shown when complete (e.g., "Planned", "Edited") */
|
||||
completedLabel: string
|
||||
/**
|
||||
* Whether the content should collapse when streaming ends.
|
||||
* Default: true
|
||||
*/
|
||||
shouldCollapse?: boolean
|
||||
/**
|
||||
* Output artifacts that should NOT be collapsed.
|
||||
* These are rendered outside the collapsible content.
|
||||
* Examples: 'plan' for PlanSteps, 'options' for OptionsSelector
|
||||
*/
|
||||
outputArtifacts?: Array<'plan' | 'options' | 'edit_summary'>
|
||||
/**
|
||||
* Whether this subagent renders its own specialized content
|
||||
* and the thinking text should be minimal or hidden.
|
||||
* Used for tools like 'edit' where we show WorkflowEditSummary instead.
|
||||
*/
|
||||
hideThinkingText?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Interrupt button configuration
|
||||
*/
|
||||
export interface InterruptButtonConfig {
|
||||
text: string
|
||||
icon: LucideIcon
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration for interrupt behavior (Run/Skip buttons)
|
||||
*/
|
||||
export interface InterruptConfig {
|
||||
/** Accept button config */
|
||||
accept: InterruptButtonConfig
|
||||
/** Reject button config */
|
||||
reject: InterruptButtonConfig
|
||||
/**
|
||||
* Whether to show "Allow Once" button (default accept behavior).
|
||||
* Default: true
|
||||
*/
|
||||
showAllowOnce?: boolean
|
||||
/**
|
||||
* Whether to show "Allow Always" button (auto-approve this tool in future).
|
||||
* Default: true for most tools
|
||||
*/
|
||||
showAllowAlways?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete UI configuration for a tool
|
||||
*/
|
||||
export interface ToolUIConfig {
|
||||
/**
|
||||
* Whether this is a "special" tool that gets gradient styling.
|
||||
* Used for workflow operation tools like edit_workflow, build_workflow, etc.
|
||||
*/
|
||||
isSpecial?: boolean
|
||||
|
||||
/**
|
||||
* Interrupt configuration for tools that require user confirmation.
|
||||
* If not provided, tool auto-executes.
|
||||
*/
|
||||
interrupt?: InterruptConfig
|
||||
|
||||
/**
|
||||
* Secondary action button (like "Move to Background" for run_workflow)
|
||||
*/
|
||||
secondaryAction?: SecondaryActionConfig
|
||||
|
||||
/**
|
||||
* Configuration for rendering params as a table.
|
||||
* If provided, tool will show an expandable/inline table.
|
||||
*/
|
||||
paramsTable?: ParamsTableConfig
|
||||
|
||||
/**
|
||||
* Subagent configuration for tools that spawn subagents.
|
||||
* If provided, tool is treated as a subagent tool.
|
||||
*/
|
||||
subagent?: SubagentConfig
|
||||
|
||||
/**
|
||||
* Whether this tool should always show params expanded (not collapsible).
|
||||
* Used for tools like set_environment_variables that always show their table.
|
||||
*/
|
||||
alwaysExpanded?: boolean
|
||||
|
||||
/**
|
||||
* Custom component type for special rendering.
|
||||
* The tool-call component will use this to render specialized content.
|
||||
*/
|
||||
customRenderer?: 'code' | 'edit_summary' | 'none'
|
||||
}
|
||||
|
||||
/**
|
||||
* Registry of tool UI configurations.
|
||||
* Tools can register their UI config here for the tool-call component to use.
|
||||
*/
|
||||
const toolUIConfigs: Record<string, ToolUIConfig> = {}
|
||||
|
||||
/**
|
||||
* Register a tool's UI configuration
|
||||
*/
|
||||
export function registerToolUIConfig(toolName: string, config: ToolUIConfig): void {
|
||||
toolUIConfigs[toolName] = config
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a tool's UI configuration
|
||||
*/
|
||||
export function getToolUIConfig(toolName: string): ToolUIConfig | undefined {
|
||||
return toolUIConfigs[toolName]
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool is a subagent tool
|
||||
*/
|
||||
export function isSubagentTool(toolName: string): boolean {
|
||||
return !!toolUIConfigs[toolName]?.subagent
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool is a "special" tool (gets gradient styling)
|
||||
*/
|
||||
export function isSpecialTool(toolName: string): boolean {
|
||||
return !!toolUIConfigs[toolName]?.isSpecial
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool has interrupt (requires user confirmation)
|
||||
*/
|
||||
export function hasInterrupt(toolName: string): boolean {
|
||||
return !!toolUIConfigs[toolName]?.interrupt
|
||||
}
|
||||
|
||||
/**
|
||||
* Get subagent labels for a tool
|
||||
*/
|
||||
export function getSubagentLabels(
|
||||
toolName: string,
|
||||
isStreaming: boolean
|
||||
): { streaming: string; completed: string } | undefined {
|
||||
const config = toolUIConfigs[toolName]?.subagent
|
||||
if (!config) return undefined
|
||||
return {
|
||||
streaming: config.streamingLabel,
|
||||
completed: config.completedLabel,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered tool UI configs (for debugging)
|
||||
*/
|
||||
export function getAllToolUIConfigs(): Record<string, ToolUIConfig> {
|
||||
return { ...toolUIConfigs }
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Key, Loader2, MinusCircle, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
|
||||
interface GetCredentialsArgs {
|
||||
userId?: string
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
export class GetCredentialsClientTool extends BaseClientTool {
|
||||
static readonly id = 'get_credentials'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, GetCredentialsClientTool.id, GetCredentialsClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: { text: 'Fetching connected integrations', icon: Loader2 },
|
||||
[ClientToolCallState.pending]: { text: 'Fetching connected integrations', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Fetching connected integrations', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Fetched connected integrations', icon: Key },
|
||||
[ClientToolCallState.error]: {
|
||||
text: 'Failed to fetch connected integrations',
|
||||
icon: XCircle,
|
||||
},
|
||||
[ClientToolCallState.aborted]: {
|
||||
text: 'Aborted fetching connected integrations',
|
||||
icon: MinusCircle,
|
||||
},
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: 'Skipped fetching connected integrations',
|
||||
icon: MinusCircle,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
async execute(args?: GetCredentialsArgs): Promise<void> {
|
||||
const logger = createLogger('GetCredentialsClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
const payload: GetCredentialsArgs = { ...(args || {}) }
|
||||
if (!payload.workflowId && !payload.userId) {
|
||||
const { activeWorkflowId } = useWorkflowRegistry.getState()
|
||||
if (activeWorkflowId) payload.workflowId = activeWorkflowId
|
||||
}
|
||||
const res = await fetch('/api/copilot/execute-copilot-server-tool', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolName: 'get_credentials', payload }),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const txt = await res.text().catch(() => '')
|
||||
throw new Error(txt || `Server error (${res.status})`)
|
||||
}
|
||||
const json = await res.json()
|
||||
const parsed = ExecuteResponseSuccessSchema.parse(json)
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, 'Connected integrations fetched', parsed.result)
|
||||
this.setState(ClientToolCallState.success)
|
||||
} catch (e: any) {
|
||||
logger.error('execute failed', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, e?.message || 'Failed to fetch connected integrations')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Loader2, Settings2, X, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas'
|
||||
import { useEnvironmentStore } from '@/stores/settings/environment'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
|
||||
interface SetEnvArgs {
|
||||
variables: Record<string, string>
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
export class SetEnvironmentVariablesClientTool extends BaseClientTool {
|
||||
static readonly id = 'set_environment_variables'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(
|
||||
toolCallId,
|
||||
SetEnvironmentVariablesClientTool.id,
|
||||
SetEnvironmentVariablesClientTool.metadata
|
||||
)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: {
|
||||
text: 'Preparing to set environment variables',
|
||||
icon: Loader2,
|
||||
},
|
||||
[ClientToolCallState.pending]: { text: 'Set environment variables?', icon: Settings2 },
|
||||
[ClientToolCallState.executing]: { text: 'Setting environment variables', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Set environment variables', icon: Settings2 },
|
||||
[ClientToolCallState.error]: { text: 'Failed to set environment variables', icon: X },
|
||||
[ClientToolCallState.aborted]: {
|
||||
text: 'Aborted setting environment variables',
|
||||
icon: XCircle,
|
||||
},
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: 'Skipped setting environment variables',
|
||||
icon: XCircle,
|
||||
},
|
||||
},
|
||||
interrupt: {
|
||||
accept: { text: 'Apply', icon: Settings2 },
|
||||
reject: { text: 'Skip', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
alwaysExpanded: true,
|
||||
interrupt: {
|
||||
accept: { text: 'Apply', icon: Settings2 },
|
||||
reject: { text: 'Skip', icon: XCircle },
|
||||
showAllowOnce: true,
|
||||
showAllowAlways: true,
|
||||
},
|
||||
paramsTable: {
|
||||
columns: [
|
||||
{ key: 'name', label: 'Variable', width: '36%', editable: true },
|
||||
{ key: 'value', label: 'Value', width: '64%', editable: true, mono: true },
|
||||
],
|
||||
extractRows: (params) => {
|
||||
const variables = params.variables || {}
|
||||
const entries = Array.isArray(variables)
|
||||
? variables.map((v: any, i: number) => [String(i), v.name || `var_${i}`, v.value || ''])
|
||||
: Object.entries(variables).map(([key, val]) => {
|
||||
if (typeof val === 'object' && val !== null && 'value' in (val as any)) {
|
||||
return [key, key, (val as any).value]
|
||||
}
|
||||
return [key, key, val]
|
||||
})
|
||||
return entries as Array<[string, ...any[]]>
|
||||
},
|
||||
},
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
if (params?.variables && typeof params.variables === 'object') {
|
||||
const count = Object.keys(params.variables).length
|
||||
const varText = count === 1 ? 'variable' : 'variables'
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Set ${count} ${varText}`
|
||||
case ClientToolCallState.executing:
|
||||
return `Setting ${count} ${varText}`
|
||||
case ClientToolCallState.generating:
|
||||
return `Preparing to set ${count} ${varText}`
|
||||
case ClientToolCallState.pending:
|
||||
return `Set ${count} ${varText}?`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to set ${count} ${varText}`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted setting ${count} ${varText}`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped setting ${count} ${varText}`
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async handleReject(): Promise<void> {
|
||||
await super.handleReject()
|
||||
this.setState(ClientToolCallState.rejected)
|
||||
}
|
||||
|
||||
async handleAccept(args?: SetEnvArgs): Promise<void> {
|
||||
const logger = createLogger('SetEnvironmentVariablesClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
const payload: SetEnvArgs = { ...(args || { variables: {} }) }
|
||||
if (!payload.workflowId) {
|
||||
const { activeWorkflowId } = useWorkflowRegistry.getState()
|
||||
if (activeWorkflowId) payload.workflowId = activeWorkflowId
|
||||
}
|
||||
const res = await fetch('/api/copilot/execute-copilot-server-tool', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolName: 'set_environment_variables', payload }),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const txt = await res.text().catch(() => '')
|
||||
throw new Error(txt || `Server error (${res.status})`)
|
||||
}
|
||||
const json = await res.json()
|
||||
const parsed = ExecuteResponseSuccessSchema.parse(json)
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, 'Environment variables updated', parsed.result)
|
||||
this.setState(ClientToolCallState.success)
|
||||
|
||||
// Refresh the environment store so the UI reflects the new variables
|
||||
try {
|
||||
await useEnvironmentStore.getState().loadEnvironmentVariables()
|
||||
logger.info('Environment store refreshed after setting variables')
|
||||
} catch (error) {
|
||||
logger.warn('Failed to refresh environment store:', error)
|
||||
}
|
||||
} catch (e: any) {
|
||||
logger.error('execute failed', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, e?.message || 'Failed to set environment variables')
|
||||
}
|
||||
}
|
||||
|
||||
async execute(args?: SetEnvArgs): Promise<void> {
|
||||
await this.handleAccept(args)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(
|
||||
SetEnvironmentVariablesClientTool.id,
|
||||
SetEnvironmentVariablesClientTool.metadata.uiConfig!
|
||||
)
|
||||
@@ -1,142 +0,0 @@
|
||||
import {
|
||||
extractFieldsFromSchema,
|
||||
parseResponseFormatSafely,
|
||||
} from '@/lib/core/utils/response-format'
|
||||
import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs'
|
||||
import { getBlock } from '@/blocks'
|
||||
import { normalizeName } from '@/executor/constants'
|
||||
import { useVariablesStore } from '@/stores/panel/variables/store'
|
||||
import type { Variable } from '@/stores/panel/variables/types'
|
||||
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
|
||||
import type { BlockState, Loop, Parallel } from '@/stores/workflows/workflow/types'
|
||||
|
||||
export interface WorkflowContext {
|
||||
workflowId: string
|
||||
blocks: Record<string, BlockState>
|
||||
loops: Record<string, Loop>
|
||||
parallels: Record<string, Parallel>
|
||||
subBlockValues: Record<string, Record<string, any>>
|
||||
}
|
||||
|
||||
export interface VariableOutput {
|
||||
id: string
|
||||
name: string
|
||||
type: string
|
||||
tag: string
|
||||
}
|
||||
|
||||
export function getWorkflowSubBlockValues(workflowId: string): Record<string, Record<string, any>> {
|
||||
const subBlockStore = useSubBlockStore.getState()
|
||||
return subBlockStore.workflowValues[workflowId] ?? {}
|
||||
}
|
||||
|
||||
export function getMergedSubBlocks(
|
||||
blocks: Record<string, BlockState>,
|
||||
subBlockValues: Record<string, Record<string, any>>,
|
||||
targetBlockId: string
|
||||
): Record<string, any> {
|
||||
const base = blocks[targetBlockId]?.subBlocks || {}
|
||||
const live = subBlockValues?.[targetBlockId] || {}
|
||||
const merged: Record<string, any> = { ...base }
|
||||
for (const [subId, liveVal] of Object.entries(live)) {
|
||||
merged[subId] = { ...(base[subId] || {}), value: liveVal }
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
||||
export function getSubBlockValue(
|
||||
blocks: Record<string, BlockState>,
|
||||
subBlockValues: Record<string, Record<string, any>>,
|
||||
targetBlockId: string,
|
||||
subBlockId: string
|
||||
): any {
|
||||
const live = subBlockValues?.[targetBlockId]?.[subBlockId]
|
||||
if (live !== undefined) return live
|
||||
return blocks[targetBlockId]?.subBlocks?.[subBlockId]?.value
|
||||
}
|
||||
|
||||
export function getWorkflowVariables(workflowId: string): VariableOutput[] {
|
||||
const getVariablesByWorkflowId = useVariablesStore.getState().getVariablesByWorkflowId
|
||||
const workflowVariables = getVariablesByWorkflowId(workflowId)
|
||||
const validVariables = workflowVariables.filter(
|
||||
(variable: Variable) => variable.name.trim() !== ''
|
||||
)
|
||||
return validVariables.map((variable: Variable) => ({
|
||||
id: variable.id,
|
||||
name: variable.name,
|
||||
type: variable.type,
|
||||
tag: `variable.${normalizeName(variable.name)}`,
|
||||
}))
|
||||
}
|
||||
|
||||
export function getSubflowInsidePaths(
|
||||
blockType: 'loop' | 'parallel',
|
||||
blockId: string,
|
||||
loops: Record<string, Loop>,
|
||||
parallels: Record<string, Parallel>
|
||||
): string[] {
|
||||
const paths = ['index']
|
||||
if (blockType === 'loop') {
|
||||
const loopType = loops[blockId]?.loopType || 'for'
|
||||
if (loopType === 'forEach') {
|
||||
paths.push('currentItem', 'items')
|
||||
}
|
||||
} else {
|
||||
const parallelType = parallels[blockId]?.parallelType || 'count'
|
||||
if (parallelType === 'collection') {
|
||||
paths.push('currentItem', 'items')
|
||||
}
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
export function computeBlockOutputPaths(block: BlockState, ctx: WorkflowContext): string[] {
|
||||
const { blocks, loops, parallels, subBlockValues } = ctx
|
||||
const blockConfig = getBlock(block.type)
|
||||
const mergedSubBlocks = getMergedSubBlocks(blocks, subBlockValues, block.id)
|
||||
|
||||
if (block.type === 'loop' || block.type === 'parallel') {
|
||||
const insidePaths = getSubflowInsidePaths(block.type, block.id, loops, parallels)
|
||||
return ['results', ...insidePaths]
|
||||
}
|
||||
|
||||
if (block.type === 'evaluator') {
|
||||
const metricsValue = getSubBlockValue(blocks, subBlockValues, block.id, 'metrics')
|
||||
if (metricsValue && Array.isArray(metricsValue) && metricsValue.length > 0) {
|
||||
const validMetrics = metricsValue.filter((metric: { name?: string }) => metric?.name)
|
||||
return validMetrics.map((metric: { name: string }) => metric.name.toLowerCase())
|
||||
}
|
||||
return getBlockOutputPaths(block.type, mergedSubBlocks)
|
||||
}
|
||||
|
||||
if (block.type === 'variables') {
|
||||
const variablesValue = getSubBlockValue(blocks, subBlockValues, block.id, 'variables')
|
||||
if (variablesValue && Array.isArray(variablesValue) && variablesValue.length > 0) {
|
||||
const validAssignments = variablesValue.filter((assignment: { variableName?: string }) =>
|
||||
assignment?.variableName?.trim()
|
||||
)
|
||||
return validAssignments.map((assignment: { variableName: string }) =>
|
||||
assignment.variableName.trim()
|
||||
)
|
||||
}
|
||||
return []
|
||||
}
|
||||
|
||||
if (blockConfig) {
|
||||
const responseFormatValue = mergedSubBlocks?.responseFormat?.value
|
||||
const responseFormat = parseResponseFormatSafely(responseFormatValue, block.id)
|
||||
if (responseFormat) {
|
||||
const schemaFields = extractFieldsFromSchema(responseFormat)
|
||||
if (schemaFields.length > 0) {
|
||||
return schemaFields.map((field) => field.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return getBlockOutputPaths(block.type, mergedSubBlocks, block.triggerMode)
|
||||
}
|
||||
|
||||
export function formatOutputsWithPrefix(paths: string[], blockName: string): string[] {
|
||||
const normalizedName = normalizeName(blockName)
|
||||
return paths.map((path) => `${normalizedName}.${path}`)
|
||||
}
|
||||
@@ -1,215 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Loader2, Rocket, X, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
|
||||
interface CheckDeploymentStatusArgs {
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
interface ApiDeploymentDetails {
|
||||
isDeployed: boolean
|
||||
deployedAt: string | null
|
||||
endpoint: string | null
|
||||
apiKey: string | null
|
||||
needsRedeployment: boolean
|
||||
}
|
||||
|
||||
interface ChatDeploymentDetails {
|
||||
isDeployed: boolean
|
||||
chatId: string | null
|
||||
identifier: string | null
|
||||
chatUrl: string | null
|
||||
title: string | null
|
||||
description: string | null
|
||||
authType: string | null
|
||||
allowedEmails: string[] | null
|
||||
outputConfigs: Array<{ blockId: string; path: string }> | null
|
||||
welcomeMessage: string | null
|
||||
primaryColor: string | null
|
||||
hasPassword: boolean
|
||||
}
|
||||
|
||||
interface McpDeploymentDetails {
|
||||
isDeployed: boolean
|
||||
servers: Array<{
|
||||
serverId: string
|
||||
serverName: string
|
||||
toolName: string
|
||||
toolDescription: string | null
|
||||
parameterSchema?: Record<string, unknown> | null
|
||||
toolId?: string | null
|
||||
}>
|
||||
}
|
||||
|
||||
export class CheckDeploymentStatusClientTool extends BaseClientTool {
|
||||
static readonly id = 'check_deployment_status'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, CheckDeploymentStatusClientTool.id, CheckDeploymentStatusClientTool.metadata)
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: {
|
||||
text: 'Checking deployment status',
|
||||
icon: Loader2,
|
||||
},
|
||||
[ClientToolCallState.pending]: { text: 'Checking deployment status', icon: Loader2 },
|
||||
[ClientToolCallState.executing]: { text: 'Checking deployment status', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Checked deployment status', icon: Rocket },
|
||||
[ClientToolCallState.error]: { text: 'Failed to check deployment status', icon: X },
|
||||
[ClientToolCallState.aborted]: {
|
||||
text: 'Aborted checking deployment status',
|
||||
icon: XCircle,
|
||||
},
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: 'Skipped checking deployment status',
|
||||
icon: XCircle,
|
||||
},
|
||||
},
|
||||
interrupt: undefined,
|
||||
}
|
||||
|
||||
async execute(args?: CheckDeploymentStatusArgs): Promise<void> {
|
||||
const logger = createLogger('CheckDeploymentStatusClientTool')
|
||||
try {
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
const { activeWorkflowId, workflows } = useWorkflowRegistry.getState()
|
||||
const workflowId = args?.workflowId || activeWorkflowId
|
||||
|
||||
if (!workflowId) {
|
||||
throw new Error('No workflow ID provided')
|
||||
}
|
||||
|
||||
const workflow = workflows[workflowId]
|
||||
const workspaceId = workflow?.workspaceId
|
||||
|
||||
// Fetch deployment status from all sources
|
||||
const [apiDeployRes, chatDeployRes, mcpServersRes] = await Promise.all([
|
||||
fetch(`/api/workflows/${workflowId}/deploy`),
|
||||
fetch(`/api/workflows/${workflowId}/chat/status`),
|
||||
workspaceId ? fetch(`/api/mcp/workflow-servers?workspaceId=${workspaceId}`) : null,
|
||||
])
|
||||
|
||||
const apiDeploy = apiDeployRes.ok ? await apiDeployRes.json() : null
|
||||
const chatDeploy = chatDeployRes.ok ? await chatDeployRes.json() : null
|
||||
const mcpServers = mcpServersRes?.ok ? await mcpServersRes.json() : null
|
||||
|
||||
// API deployment details
|
||||
const isApiDeployed = apiDeploy?.isDeployed || false
|
||||
const appUrl = typeof window !== 'undefined' ? window.location.origin : ''
|
||||
const apiDetails: ApiDeploymentDetails = {
|
||||
isDeployed: isApiDeployed,
|
||||
deployedAt: apiDeploy?.deployedAt || null,
|
||||
endpoint: isApiDeployed ? `${appUrl}/api/workflows/${workflowId}/execute` : null,
|
||||
apiKey: apiDeploy?.apiKey || null,
|
||||
needsRedeployment: apiDeploy?.needsRedeployment === true,
|
||||
}
|
||||
|
||||
// Chat deployment details
|
||||
const isChatDeployed = !!(chatDeploy?.isDeployed && chatDeploy?.deployment)
|
||||
const chatDetails: ChatDeploymentDetails = {
|
||||
isDeployed: isChatDeployed,
|
||||
chatId: chatDeploy?.deployment?.id || null,
|
||||
identifier: chatDeploy?.deployment?.identifier || null,
|
||||
chatUrl: isChatDeployed ? `${appUrl}/chat/${chatDeploy?.deployment?.identifier}` : null,
|
||||
title: chatDeploy?.deployment?.title || null,
|
||||
description: chatDeploy?.deployment?.description || null,
|
||||
authType: chatDeploy?.deployment?.authType || null,
|
||||
allowedEmails: Array.isArray(chatDeploy?.deployment?.allowedEmails)
|
||||
? chatDeploy?.deployment?.allowedEmails
|
||||
: null,
|
||||
outputConfigs: Array.isArray(chatDeploy?.deployment?.outputConfigs)
|
||||
? chatDeploy?.deployment?.outputConfigs
|
||||
: null,
|
||||
welcomeMessage: chatDeploy?.deployment?.customizations?.welcomeMessage || null,
|
||||
primaryColor: chatDeploy?.deployment?.customizations?.primaryColor || null,
|
||||
hasPassword: chatDeploy?.deployment?.hasPassword === true,
|
||||
}
|
||||
|
||||
// MCP deployment details - find servers that have this workflow as a tool
|
||||
const mcpServerList = mcpServers?.data?.servers || []
|
||||
const mcpToolDeployments: McpDeploymentDetails['servers'] = []
|
||||
|
||||
for (const server of mcpServerList) {
|
||||
// Check if this workflow is deployed as a tool on this server
|
||||
if (server.toolNames && Array.isArray(server.toolNames)) {
|
||||
// We need to fetch the actual tools to check if this workflow is there
|
||||
try {
|
||||
const toolsRes = await fetch(
|
||||
`/api/mcp/workflow-servers/${server.id}/tools?workspaceId=${workspaceId}`
|
||||
)
|
||||
if (toolsRes.ok) {
|
||||
const toolsData = await toolsRes.json()
|
||||
const tools = toolsData.data?.tools || []
|
||||
for (const tool of tools) {
|
||||
if (tool.workflowId === workflowId) {
|
||||
mcpToolDeployments.push({
|
||||
serverId: server.id,
|
||||
serverName: server.name,
|
||||
toolName: tool.toolName,
|
||||
toolDescription: tool.toolDescription,
|
||||
parameterSchema: tool.parameterSchema ?? null,
|
||||
toolId: tool.id ?? null,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Skip this server if we can't fetch tools
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const isMcpDeployed = mcpToolDeployments.length > 0
|
||||
const mcpDetails: McpDeploymentDetails = {
|
||||
isDeployed: isMcpDeployed,
|
||||
servers: mcpToolDeployments,
|
||||
}
|
||||
|
||||
// Build deployment types list
|
||||
const deploymentTypes: string[] = []
|
||||
if (isApiDeployed) deploymentTypes.push('api')
|
||||
if (isChatDeployed) deploymentTypes.push('chat')
|
||||
if (isMcpDeployed) deploymentTypes.push('mcp')
|
||||
|
||||
const isDeployed = isApiDeployed || isChatDeployed || isMcpDeployed
|
||||
|
||||
// Build summary message
|
||||
let message = ''
|
||||
if (!isDeployed) {
|
||||
message = 'Workflow is not deployed'
|
||||
} else {
|
||||
const parts: string[] = []
|
||||
if (isApiDeployed) parts.push('API')
|
||||
if (isChatDeployed) parts.push(`Chat (${chatDetails.identifier})`)
|
||||
if (isMcpDeployed) {
|
||||
const serverNames = mcpToolDeployments.map((d) => d.serverName).join(', ')
|
||||
parts.push(`MCP (${serverNames})`)
|
||||
}
|
||||
message = `Workflow is deployed as: ${parts.join(', ')}`
|
||||
}
|
||||
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, message, {
|
||||
isDeployed,
|
||||
deploymentTypes,
|
||||
api: apiDetails,
|
||||
chat: chatDetails,
|
||||
mcp: mcpDetails,
|
||||
})
|
||||
|
||||
logger.info('Checked deployment status', { isDeployed, deploymentTypes })
|
||||
} catch (e: any) {
|
||||
logger.error('Check deployment status failed', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, e?.message || 'Failed to check deployment status')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Loader2, Plus, Server, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { useCopilotStore } from '@/stores/panel/copilot/store'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
|
||||
export interface CreateWorkspaceMcpServerArgs {
|
||||
/** Name of the MCP server */
|
||||
name: string
|
||||
/** Optional description */
|
||||
description?: string
|
||||
workspaceId?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Create workspace MCP server tool.
|
||||
* Creates a new MCP server in the workspace that workflows can be deployed to as tools.
|
||||
*/
|
||||
export class CreateWorkspaceMcpServerClientTool extends BaseClientTool {
|
||||
static readonly id = 'create_workspace_mcp_server'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(
|
||||
toolCallId,
|
||||
CreateWorkspaceMcpServerClientTool.id,
|
||||
CreateWorkspaceMcpServerClientTool.metadata
|
||||
)
|
||||
}
|
||||
|
||||
getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined {
|
||||
const toolCallsById = useCopilotStore.getState().toolCallsById
|
||||
const toolCall = toolCallsById[this.toolCallId]
|
||||
const params = toolCall?.params as CreateWorkspaceMcpServerArgs | undefined
|
||||
|
||||
const serverName = params?.name || 'MCP Server'
|
||||
|
||||
return {
|
||||
accept: { text: `Create "${serverName}"`, icon: Plus },
|
||||
reject: { text: 'Skip', icon: XCircle },
|
||||
}
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: {
|
||||
text: 'Preparing to create MCP server',
|
||||
icon: Loader2,
|
||||
},
|
||||
[ClientToolCallState.pending]: { text: 'Create MCP server?', icon: Server },
|
||||
[ClientToolCallState.executing]: { text: 'Creating MCP server', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Created MCP server', icon: Server },
|
||||
[ClientToolCallState.error]: { text: 'Failed to create MCP server', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: { text: 'Aborted creating MCP server', icon: XCircle },
|
||||
[ClientToolCallState.rejected]: { text: 'Skipped creating MCP server', icon: XCircle },
|
||||
},
|
||||
interrupt: {
|
||||
accept: { text: 'Create', icon: Plus },
|
||||
reject: { text: 'Skip', icon: XCircle },
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
const name = params?.name || 'MCP server'
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `Created MCP server "${name}"`
|
||||
case ClientToolCallState.executing:
|
||||
return `Creating MCP server "${name}"`
|
||||
case ClientToolCallState.generating:
|
||||
return `Preparing to create "${name}"`
|
||||
case ClientToolCallState.pending:
|
||||
return `Create MCP server "${name}"?`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to create "${name}"`
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
async handleReject(): Promise<void> {
|
||||
await super.handleReject()
|
||||
this.setState(ClientToolCallState.rejected)
|
||||
}
|
||||
|
||||
async handleAccept(args?: CreateWorkspaceMcpServerArgs): Promise<void> {
|
||||
const logger = createLogger('CreateWorkspaceMcpServerClientTool')
|
||||
try {
|
||||
if (!args?.name) {
|
||||
throw new Error('Server name is required')
|
||||
}
|
||||
|
||||
// Get workspace ID from active workflow if not provided
|
||||
const { activeWorkflowId, workflows } = useWorkflowRegistry.getState()
|
||||
let workspaceId = args?.workspaceId
|
||||
|
||||
if (!workspaceId && activeWorkflowId) {
|
||||
workspaceId = workflows[activeWorkflowId]?.workspaceId
|
||||
}
|
||||
|
||||
if (!workspaceId) {
|
||||
throw new Error('No workspace ID available')
|
||||
}
|
||||
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
const res = await fetch('/api/mcp/workflow-servers', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
workspaceId,
|
||||
name: args.name.trim(),
|
||||
description: args.description?.trim() || null,
|
||||
}),
|
||||
})
|
||||
|
||||
const data = await res.json()
|
||||
|
||||
if (!res.ok) {
|
||||
throw new Error(data.error || `Failed to create MCP server (${res.status})`)
|
||||
}
|
||||
|
||||
const server = data.data?.server
|
||||
if (!server) {
|
||||
throw new Error('Server creation response missing server data')
|
||||
}
|
||||
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(
|
||||
200,
|
||||
`MCP server "${args.name}" created successfully. You can now deploy workflows to it using deploy_mcp.`,
|
||||
{
|
||||
success: true,
|
||||
serverId: server.id,
|
||||
serverName: server.name,
|
||||
description: server.description,
|
||||
}
|
||||
)
|
||||
|
||||
logger.info(`Created MCP server: ${server.name} (${server.id})`)
|
||||
} catch (e: any) {
|
||||
logger.error('Failed to create MCP server', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, e?.message || 'Failed to create MCP server', {
|
||||
success: false,
|
||||
error: e?.message,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async execute(args?: CreateWorkspaceMcpServerArgs): Promise<void> {
|
||||
await this.handleAccept(args)
|
||||
}
|
||||
}
|
||||
@@ -1,286 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Loader2, Rocket, XCircle } from 'lucide-react'
|
||||
import {
|
||||
BaseClientTool,
|
||||
type BaseClientToolMetadata,
|
||||
ClientToolCallState,
|
||||
} from '@/lib/copilot/tools/client/base-tool'
|
||||
import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { getInputFormatExample } from '@/lib/workflows/operations/deployment-utils'
|
||||
import { useCopilotStore } from '@/stores/panel/copilot/store'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
|
||||
interface DeployApiArgs {
|
||||
action: 'deploy' | 'undeploy'
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Deploy API tool for deploying workflows as REST APIs.
|
||||
* This tool handles both deploying and undeploying workflows via the API endpoint.
|
||||
*/
|
||||
export class DeployApiClientTool extends BaseClientTool {
|
||||
static readonly id = 'deploy_api'
|
||||
|
||||
constructor(toolCallId: string) {
|
||||
super(toolCallId, DeployApiClientTool.id, DeployApiClientTool.metadata)
|
||||
}
|
||||
|
||||
/**
|
||||
* Override to provide dynamic button text based on action
|
||||
*/
|
||||
getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined {
|
||||
const toolCallsById = useCopilotStore.getState().toolCallsById
|
||||
const toolCall = toolCallsById[this.toolCallId]
|
||||
const params = toolCall?.params as DeployApiArgs | undefined
|
||||
|
||||
const action = params?.action || 'deploy'
|
||||
|
||||
const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId
|
||||
const isAlreadyDeployed = workflowId
|
||||
? useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId)?.isDeployed
|
||||
: false
|
||||
|
||||
let buttonText = action === 'undeploy' ? 'Undeploy' : 'Deploy'
|
||||
|
||||
if (action === 'deploy' && isAlreadyDeployed) {
|
||||
buttonText = 'Redeploy'
|
||||
}
|
||||
|
||||
return {
|
||||
accept: { text: buttonText, icon: Rocket },
|
||||
reject: { text: 'Skip', icon: XCircle },
|
||||
}
|
||||
}
|
||||
|
||||
static readonly metadata: BaseClientToolMetadata = {
|
||||
displayNames: {
|
||||
[ClientToolCallState.generating]: {
|
||||
text: 'Preparing to deploy API',
|
||||
icon: Loader2,
|
||||
},
|
||||
[ClientToolCallState.pending]: { text: 'Deploy as API?', icon: Rocket },
|
||||
[ClientToolCallState.executing]: { text: 'Deploying API', icon: Loader2 },
|
||||
[ClientToolCallState.success]: { text: 'Deployed API', icon: Rocket },
|
||||
[ClientToolCallState.error]: { text: 'Failed to deploy API', icon: XCircle },
|
||||
[ClientToolCallState.aborted]: {
|
||||
text: 'Aborted deploying API',
|
||||
icon: XCircle,
|
||||
},
|
||||
[ClientToolCallState.rejected]: {
|
||||
text: 'Skipped deploying API',
|
||||
icon: XCircle,
|
||||
},
|
||||
},
|
||||
interrupt: {
|
||||
accept: { text: 'Deploy', icon: Rocket },
|
||||
reject: { text: 'Skip', icon: XCircle },
|
||||
},
|
||||
uiConfig: {
|
||||
isSpecial: true,
|
||||
interrupt: {
|
||||
accept: { text: 'Deploy', icon: Rocket },
|
||||
reject: { text: 'Skip', icon: XCircle },
|
||||
showAllowOnce: true,
|
||||
showAllowAlways: true,
|
||||
},
|
||||
},
|
||||
getDynamicText: (params, state) => {
|
||||
const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy'
|
||||
|
||||
const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId
|
||||
const isAlreadyDeployed = workflowId
|
||||
? useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId)?.isDeployed
|
||||
: false
|
||||
|
||||
let actionText = action
|
||||
let actionTextIng = action === 'undeploy' ? 'undeploying' : 'deploying'
|
||||
const actionTextPast = action === 'undeploy' ? 'undeployed' : 'deployed'
|
||||
|
||||
if (action === 'deploy' && isAlreadyDeployed) {
|
||||
actionText = 'redeploy'
|
||||
actionTextIng = 'redeploying'
|
||||
}
|
||||
|
||||
const actionCapitalized = actionText.charAt(0).toUpperCase() + actionText.slice(1)
|
||||
|
||||
switch (state) {
|
||||
case ClientToolCallState.success:
|
||||
return `API ${actionTextPast}`
|
||||
case ClientToolCallState.executing:
|
||||
return `${actionCapitalized}ing API`
|
||||
case ClientToolCallState.generating:
|
||||
return `Preparing to ${actionText} API`
|
||||
case ClientToolCallState.pending:
|
||||
return `${actionCapitalized} API?`
|
||||
case ClientToolCallState.error:
|
||||
return `Failed to ${actionText} API`
|
||||
case ClientToolCallState.aborted:
|
||||
return `Aborted ${actionTextIng} API`
|
||||
case ClientToolCallState.rejected:
|
||||
return `Skipped ${actionTextIng} API`
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the user has any API keys (workspace or personal)
|
||||
*/
|
||||
private async hasApiKeys(workspaceId: string): Promise<boolean> {
|
||||
try {
|
||||
const [workspaceRes, personalRes] = await Promise.all([
|
||||
fetch(`/api/workspaces/${workspaceId}/api-keys`),
|
||||
fetch('/api/users/me/api-keys'),
|
||||
])
|
||||
|
||||
if (!workspaceRes.ok || !personalRes.ok) {
|
||||
return false
|
||||
}
|
||||
|
||||
const workspaceData = await workspaceRes.json()
|
||||
const personalData = await personalRes.json()
|
||||
|
||||
const workspaceKeys = (workspaceData?.keys || []) as Array<any>
|
||||
const personalKeys = (personalData?.keys || []) as Array<any>
|
||||
|
||||
return workspaceKeys.length > 0 || personalKeys.length > 0
|
||||
} catch (error) {
|
||||
const logger = createLogger('DeployApiClientTool')
|
||||
logger.warn('Failed to check API keys:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens the settings modal to the API keys tab
|
||||
*/
|
||||
private openApiKeysModal(): void {
|
||||
window.dispatchEvent(new CustomEvent('open-settings', { detail: { tab: 'apikeys' } }))
|
||||
}
|
||||
|
||||
async handleReject(): Promise<void> {
|
||||
await super.handleReject()
|
||||
this.setState(ClientToolCallState.rejected)
|
||||
}
|
||||
|
||||
async handleAccept(args?: DeployApiArgs): Promise<void> {
|
||||
const logger = createLogger('DeployApiClientTool')
|
||||
try {
|
||||
const action = args?.action || 'deploy'
|
||||
const { activeWorkflowId, workflows } = useWorkflowRegistry.getState()
|
||||
const workflowId = args?.workflowId || activeWorkflowId
|
||||
|
||||
if (!workflowId) {
|
||||
throw new Error('No workflow ID provided')
|
||||
}
|
||||
|
||||
const workflow = workflows[workflowId]
|
||||
const workspaceId = workflow?.workspaceId
|
||||
|
||||
// For deploy action, check if user has API keys first
|
||||
if (action === 'deploy') {
|
||||
if (!workspaceId) {
|
||||
throw new Error('Workflow workspace not found')
|
||||
}
|
||||
|
||||
const hasKeys = await this.hasApiKeys(workspaceId)
|
||||
|
||||
if (!hasKeys) {
|
||||
this.setState(ClientToolCallState.rejected)
|
||||
this.openApiKeysModal()
|
||||
|
||||
await this.markToolComplete(
|
||||
200,
|
||||
'Cannot deploy without an API key. Opened API key settings so you can create one. Once you have an API key, try deploying again.',
|
||||
{
|
||||
needsApiKey: true,
|
||||
message:
|
||||
'You need to create an API key before you can deploy your workflow. The API key settings have been opened for you. After creating an API key, you can deploy your workflow.',
|
||||
}
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
this.setState(ClientToolCallState.executing)
|
||||
|
||||
const endpoint = `/api/workflows/${workflowId}/deploy`
|
||||
const method = action === 'deploy' ? 'POST' : 'DELETE'
|
||||
|
||||
const res = await fetch(endpoint, {
|
||||
method,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: action === 'deploy' ? JSON.stringify({ deployChatEnabled: false }) : undefined,
|
||||
})
|
||||
|
||||
if (!res.ok) {
|
||||
const txt = await res.text().catch(() => '')
|
||||
throw new Error(txt || `Server error (${res.status})`)
|
||||
}
|
||||
|
||||
const json = await res.json()
|
||||
|
||||
let successMessage = ''
|
||||
let resultData: any = {
|
||||
action,
|
||||
isDeployed: action === 'deploy',
|
||||
deployedAt: json.deployedAt,
|
||||
}
|
||||
|
||||
if (action === 'deploy') {
|
||||
const appUrl = getBaseUrl()
|
||||
const apiEndpoint = `${appUrl}/api/workflows/${workflowId}/execute`
|
||||
const apiKeyPlaceholder = '$SIM_API_KEY'
|
||||
|
||||
const inputExample = getInputFormatExample(false)
|
||||
const curlCommand = `curl -X POST -H "X-API-Key: ${apiKeyPlaceholder}" -H "Content-Type: application/json"${inputExample} ${apiEndpoint}`
|
||||
|
||||
successMessage = 'Workflow deployed successfully as API. You can now call it via REST.'
|
||||
|
||||
resultData = {
|
||||
...resultData,
|
||||
endpoint: apiEndpoint,
|
||||
curlCommand,
|
||||
apiKeyPlaceholder,
|
||||
}
|
||||
} else {
|
||||
successMessage = 'Workflow undeployed successfully.'
|
||||
}
|
||||
|
||||
this.setState(ClientToolCallState.success)
|
||||
await this.markToolComplete(200, successMessage, resultData)
|
||||
|
||||
// Refresh the workflow registry to update deployment status
|
||||
try {
|
||||
const setDeploymentStatus = useWorkflowRegistry.getState().setDeploymentStatus
|
||||
if (action === 'deploy') {
|
||||
setDeploymentStatus(
|
||||
workflowId,
|
||||
true,
|
||||
json.deployedAt ? new Date(json.deployedAt) : undefined,
|
||||
json.apiKey || ''
|
||||
)
|
||||
} else {
|
||||
setDeploymentStatus(workflowId, false, undefined, '')
|
||||
}
|
||||
const actionPast = action === 'undeploy' ? 'undeployed' : 'deployed'
|
||||
logger.info(`Workflow ${actionPast} as API and registry updated`)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to update workflow registry:', error)
|
||||
}
|
||||
} catch (e: any) {
|
||||
logger.error('Deploy API failed', { message: e?.message })
|
||||
this.setState(ClientToolCallState.error)
|
||||
await this.markToolComplete(500, e?.message || 'Failed to deploy API')
|
||||
}
|
||||
}
|
||||
|
||||
async execute(args?: DeployApiArgs): Promise<void> {
|
||||
await this.handleAccept(args)
|
||||
}
|
||||
}
|
||||
|
||||
// Register UI config at module load
|
||||
registerToolUIConfig(DeployApiClientTool.id, DeployApiClientTool.metadata.uiConfig!)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user