feat(copilot): add depths (#974)

* Checkpont

* can edit names and types

* Add reasoning and thinking

* Update agent max

* Max mode v1

* Add best practices

* Todo list shows up

* Todolist works

* Updates to todo

* Updates

* Updates

* Checkpoitn

* Yaml export updates

* Updates

* Checkpoint fr

* Fix diff veiw on new workflow

* Subflow autolayout fix v1

* Autolayout fixes 2

* Gdrive list files

* Get oauth credential (email)

* Gdrive file picker

* Gdrive file access prompt

* Api request

* Copilot ui for some tool calls

* Updates

* Fix overflow

* Openai

* Streaming

* Checkpoint

* Update

* Openai responses api

* Depth skeleton

* Depth tooltips

* Mode selector tool tips

* Update ui

* Update ordering

* Lint

* Remove migrations

* Add migrations back

* Lint

* Fix isdev

* Fix tests

* Comments

---------

Co-authored-by: Waleed Latif <walif6@gmail.com>
Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com>
Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>
Co-authored-by: Adam Gough <77861281+aadamgough@users.noreply.github.com>
Co-authored-by: Adam Gough <adamgough@Mac.attlocal.net>
This commit is contained in:
Siddharth Ganesan
2025-08-15 11:37:58 -07:00
committed by GitHub
parent f081f5a73c
commit 0c7a8efc8d
47 changed files with 9016 additions and 354 deletions

View File

@@ -223,6 +223,8 @@ describe('Copilot Chat API Route', () => {
stream: true,
streamToolCalls: true,
mode: 'agent',
provider: 'openai',
depth: 0,
}),
})
)
@@ -284,6 +286,8 @@ describe('Copilot Chat API Route', () => {
stream: true,
streamToolCalls: true,
mode: 'agent',
provider: 'openai',
depth: 0,
}),
})
)
@@ -337,6 +341,8 @@ describe('Copilot Chat API Route', () => {
stream: true,
streamToolCalls: true,
mode: 'agent',
provider: 'openai',
depth: 0,
}),
})
)
@@ -430,6 +436,8 @@ describe('Copilot Chat API Route', () => {
stream: true,
streamToolCalls: true,
mode: 'ask',
provider: 'openai',
depth: 0,
}),
})
)

View File

@@ -39,10 +39,13 @@ const ChatMessageSchema = z.object({
chatId: z.string().optional(),
workflowId: z.string().min(1, 'Workflow ID is required'),
mode: z.enum(['ask', 'agent']).optional().default('agent'),
depth: z.number().int().min(0).max(3).optional().default(0),
createNewChat: z.boolean().optional().default(false),
stream: z.boolean().optional().default(true),
implicitFeedback: z.string().optional(),
fileAttachments: z.array(FileAttachmentSchema).optional(),
provider: z.string().optional().default('openai'),
conversationId: z.string().optional(),
})
// Sim Agent API configuration
@@ -156,10 +159,13 @@ export async function POST(req: NextRequest) {
chatId,
workflowId,
mode,
depth,
createNewChat,
stream,
implicitFeedback,
fileAttachments,
provider,
conversationId,
} = ChatMessageSchema.parse(body)
logger.info(`[${tracker.requestId}] Processing copilot chat request`, {
@@ -171,6 +177,8 @@ export async function POST(req: NextRequest) {
createNewChat,
messageLength: message.length,
hasImplicitFeedback: !!implicitFeedback,
provider: provider || 'openai',
hasConversationId: !!conversationId,
})
// Handle chat context
@@ -252,7 +260,7 @@ export async function POST(req: NextRequest) {
}
// Build messages array for sim agent with conversation history
const messages = []
const messages: any[] = []
// Add conversation history (need to rebuild these with file support if they had attachments)
for (const msg of conversationHistory) {
@@ -327,16 +335,13 @@ export async function POST(req: NextRequest) {
})
}
// Start title generation in parallel if this is a new chat with first message
if (actualChatId && !currentChat?.title && conversationHistory.length === 0) {
logger.info(`[${tracker.requestId}] Will start parallel title generation inside stream`)
}
// Determine provider and conversationId to use for this request
const providerToUse = provider || 'openai'
const effectiveConversationId =
(currentChat?.conversationId as string | undefined) || conversationId
// Forward to sim agent API
logger.info(`[${tracker.requestId}] Sending request to sim agent API`, {
messageCount: messages.length,
endpoint: `${SIM_AGENT_API_URL}/api/chat-completion-streaming`,
})
// If we have a conversationId, only send the most recent user message; else send full history
const messagesForAgent = effectiveConversationId ? [messages[messages.length - 1]] : messages
const simAgentResponse = await fetch(`${SIM_AGENT_API_URL}/api/chat-completion-streaming`, {
method: 'POST',
@@ -345,12 +350,15 @@ export async function POST(req: NextRequest) {
...(SIM_AGENT_API_KEY && { 'x-api-key': SIM_AGENT_API_KEY }),
},
body: JSON.stringify({
messages,
messages: messagesForAgent,
workflowId,
userId: authenticatedUserId,
stream: stream,
streamToolCalls: true,
mode: mode,
provider: providerToUse,
...(effectiveConversationId ? { conversationId: effectiveConversationId } : {}),
...(typeof depth === 'number' ? { depth } : {}),
...(session?.user?.name && { userName: session.user.name }),
}),
})
@@ -388,6 +396,8 @@ export async function POST(req: NextRequest) {
const toolCalls: any[] = []
let buffer = ''
let isFirstDone = true
let responseIdFromStart: string | undefined
let responseIdFromDone: string | undefined
// Send chatId as first event
if (actualChatId) {
@@ -486,6 +496,13 @@ export async function POST(req: NextRequest) {
}
break
case 'reasoning':
// Treat like thinking: do not add to assistantContent to avoid leaking
logger.debug(
`[${tracker.requestId}] Reasoning chunk received (${(event.data || event.content || '').length} chars)`
)
break
case 'tool_call':
logger.info(
`[${tracker.requestId}] Tool call ${event.data?.partial ? '(partial)' : '(complete)'}:`,
@@ -528,7 +545,22 @@ export async function POST(req: NextRequest) {
})
break
case 'start':
if (event.data?.responseId) {
responseIdFromStart = event.data.responseId
logger.info(
`[${tracker.requestId}] Received start event with responseId: ${responseIdFromStart}`
)
}
break
case 'done':
if (event.data?.responseId) {
responseIdFromDone = event.data.responseId
logger.info(
`[${tracker.requestId}] Received done event with responseId: ${responseIdFromDone}`
)
}
if (isFirstDone) {
logger.info(
`[${tracker.requestId}] Initial AI response complete, tool count: ${toolCalls.length}`
@@ -622,12 +654,15 @@ export async function POST(req: NextRequest) {
)
}
const responseId = responseIdFromDone || responseIdFromStart
// Update chat in database immediately (without title)
await db
.update(copilotChats)
.set({
messages: updatedMessages,
updatedAt: new Date(),
...(responseId ? { conversationId: responseId } : {}),
})
.where(eq(copilotChats.id, actualChatId!))
@@ -635,6 +670,7 @@ export async function POST(req: NextRequest) {
messageCount: updatedMessages.length,
savedUserMessage: true,
savedAssistantMessage: assistantContent.trim().length > 0,
updatedConversationId: responseId || null,
})
}
} catch (error) {

View File

@@ -51,12 +51,6 @@ export async function POST(req: NextRequest) {
const body = await req.json()
const { chatId, messages } = UpdateMessagesSchema.parse(body)
logger.info(`[${tracker.requestId}] Updating chat messages`, {
userId,
chatId,
messageCount: messages.length,
})
// Verify that the chat belongs to the user
const [chat] = await db
.select()

View File

@@ -38,7 +38,7 @@ async function updateToolCallStatus(
try {
const key = `tool_call:${toolCallId}`
const timeout = 60000 // 1 minute timeout
const timeout = 600000 // 10 minutes timeout for user confirmation
const pollInterval = 100 // Poll every 100ms
const startTime = Date.now()

View File

@@ -65,7 +65,7 @@ async function pollRedisForTool(
}
const key = `tool_call:${toolCallId}`
const timeout = 300000 // 5 minutes
const timeout = 600000 // 10 minutes for long-running operations
const pollInterval = 1000 // 1 second
const startTime = Date.now()

View File

@@ -365,6 +365,8 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
position: { x: number; y: number }
subBlocks?: Record<string, any>
data?: Record<string, any>
parentId?: string
extent?: string
}>
const edges = workflowState.edges
const warnings = conversionResult.warnings || []
@@ -395,6 +397,13 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
if (!blockConfig && (block.type === 'loop' || block.type === 'parallel')) {
// Handle loop/parallel blocks (they don't have regular block configs)
// Preserve parentId if it exists (though loop/parallel shouldn't have parents)
const containerData = block.data || {}
if (block.parentId) {
containerData.parentId = block.parentId
containerData.extent = block.extent || 'parent'
}
newWorkflowState.blocks[newId] = {
id: newId,
type: block.type,
@@ -407,7 +416,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
isWide: false,
advancedMode: false,
height: 0,
data: block.data || {},
data: containerData,
}
logger.debug(`[${requestId}] Processed loop/parallel block: ${block.id} -> ${newId}`)
} else if (blockConfig) {
@@ -440,6 +449,13 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
// Set up outputs from block configuration
const outputs = resolveOutputType(blockConfig.outputs)
// Preserve parentId if it exists in the imported block
const blockData = block.data || {}
if (block.parentId) {
blockData.parentId = block.parentId
blockData.extent = block.extent || 'parent'
}
newWorkflowState.blocks[newId] = {
id: newId,
type: block.type,
@@ -452,7 +468,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
isWide: false,
advancedMode: false,
height: 0,
data: block.data || {},
data: blockData,
}
logger.debug(`[${requestId}] Processed regular block: ${block.id} -> ${newId}`)
@@ -529,6 +545,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
}
}
// Debug: Log block parent-child relationships before generating loops
// Generate loop and parallel configurations
const loops = generateLoopBlocks(newWorkflowState.blocks)
const parallels = generateParallelBlocks(newWorkflowState.blocks)

View File

@@ -0,0 +1,210 @@
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console/logger'
import { getUserEntityPermissions } from '@/lib/permissions/utils'
import { simAgentClient } from '@/lib/sim-agent'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
import { getAllBlocks } from '@/blocks/registry'
import type { BlockConfig } from '@/blocks/types'
import { resolveOutputType } from '@/blocks/utils'
import { db } from '@/db'
import { workflow } from '@/db/schema'
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
const logger = createLogger('WorkflowYamlExportAPI')
// Get API key at module level like working routes
const SIM_AGENT_API_KEY = process.env.SIM_AGENT_API_KEY
export async function GET(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
const url = new URL(request.url)
const workflowId = url.searchParams.get('workflowId')
try {
logger.info(`[${requestId}] Exporting workflow YAML from database: ${workflowId}`)
if (!workflowId) {
return NextResponse.json({ success: false, error: 'workflowId is required' }, { status: 400 })
}
// Get the session for authentication
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized access attempt for workflow ${workflowId}`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
// Fetch the workflow from database
const workflowData = await db
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.then((rows) => rows[0])
if (!workflowData) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found`)
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
// Check if user has access to this workflow
let hasAccess = false
// Case 1: User owns the workflow
if (workflowData.userId === userId) {
hasAccess = true
}
// Case 2: Workflow belongs to a workspace the user has permissions for
if (!hasAccess && workflowData.workspaceId) {
const userPermission = await getUserEntityPermissions(
userId,
'workspace',
workflowData.workspaceId
)
if (userPermission !== null) {
hasAccess = true
}
}
if (!hasAccess) {
logger.warn(`[${requestId}] User ${userId} denied access to workflow ${workflowId}`)
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
}
// Try to load from normalized tables first
logger.debug(`[${requestId}] Attempting to load workflow ${workflowId} from normalized tables`)
const normalizedData = await loadWorkflowFromNormalizedTables(workflowId)
let workflowState: any
const subBlockValues: Record<string, Record<string, any>> = {}
if (normalizedData) {
logger.debug(`[${requestId}] Found normalized data for workflow ${workflowId}:`, {
blocksCount: Object.keys(normalizedData.blocks).length,
edgesCount: normalizedData.edges.length,
})
// Use normalized table data - reconstruct complete state object
const existingState =
workflowData.state && typeof workflowData.state === 'object' ? workflowData.state : {}
workflowState = {
deploymentStatuses: {},
hasActiveWebhook: false,
...existingState,
blocks: normalizedData.blocks,
edges: normalizedData.edges,
loops: normalizedData.loops,
parallels: normalizedData.parallels,
lastSaved: Date.now(),
isDeployed: workflowData.isDeployed || false,
deployedAt: workflowData.deployedAt,
}
// Extract subblock values from the normalized blocks
Object.entries(normalizedData.blocks).forEach(([blockId, block]: [string, any]) => {
subBlockValues[blockId] = {}
if (block.subBlocks) {
Object.entries(block.subBlocks).forEach(([subBlockId, subBlock]: [string, any]) => {
if (subBlock && typeof subBlock === 'object' && 'value' in subBlock) {
subBlockValues[blockId][subBlockId] = subBlock.value
}
})
}
})
logger.info(`[${requestId}] Loaded workflow ${workflowId} from normalized tables`)
} else {
// Fallback to JSON blob
logger.info(
`[${requestId}] Using JSON blob for workflow ${workflowId} - no normalized data found`
)
if (!workflowData.state || typeof workflowData.state !== 'object') {
return NextResponse.json(
{ success: false, error: 'Workflow has no valid state data' },
{ status: 400 }
)
}
workflowState = workflowData.state as any
// Extract subblock values from JSON blob state
if (workflowState.blocks) {
Object.entries(workflowState.blocks).forEach(([blockId, block]: [string, any]) => {
subBlockValues[blockId] = {}
if (block.subBlocks) {
Object.entries(block.subBlocks).forEach(([subBlockId, subBlock]: [string, any]) => {
if (subBlock && typeof subBlock === 'object' && 'value' in subBlock) {
subBlockValues[blockId][subBlockId] = subBlock.value
}
})
}
})
}
}
// Gather block registry and utilities for sim-agent
const blocks = getAllBlocks()
const blockRegistry = blocks.reduce(
(acc, block) => {
const blockType = block.type
acc[blockType] = {
...block,
id: blockType,
subBlocks: block.subBlocks || [],
outputs: block.outputs || {},
} as any
return acc
},
{} as Record<string, BlockConfig>
)
// Call sim-agent directly
const result = await simAgentClient.makeRequest('/api/workflow/to-yaml', {
body: {
workflowState,
subBlockValues,
blockRegistry,
utilities: {
generateLoopBlocks: generateLoopBlocks.toString(),
generateParallelBlocks: generateParallelBlocks.toString(),
resolveOutputType: resolveOutputType.toString(),
},
},
apiKey: SIM_AGENT_API_KEY,
})
if (!result.success || !result.data?.yaml) {
return NextResponse.json(
{
success: false,
error: result.error || 'Failed to generate YAML',
},
{ status: result.status || 500 }
)
}
logger.info(`[${requestId}] Successfully generated YAML from database`, {
yamlLength: result.data.yaml.length,
})
return NextResponse.json({
success: true,
yaml: result.data.yaml,
})
} catch (error) {
logger.error(`[${requestId}] YAML export failed`, error)
return NextResponse.json(
{
success: false,
error: `Failed to export YAML: ${error instanceof Error ? error.message : 'Unknown error'}`,
},
{ status: 500 }
)
}
}

View File

@@ -62,9 +62,13 @@ const CreateDiffRequestSchema = z.object({
export async function POST(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
logger.info(`[${requestId}] ===== YAML DIFF CREATE API CALLED =====`)
try {
const body = await request.json()
logger.info(`[${requestId}] Request body received, parsing...`)
const { yamlContent, diffAnalysis, options } = CreateDiffRequestSchema.parse(body)
logger.info(`[${requestId}] Request parsed successfully`)
// Get current workflow state for comparison
// Note: This endpoint is stateless, so we need to get this from the request
@@ -151,6 +155,36 @@ export async function POST(request: NextRequest) {
// Log the full response to see if auto-layout is happening
logger.info(`[${requestId}] Full sim agent response:`, JSON.stringify(result, null, 2))
// Log detailed block information to debug parent-child relationships
if (result.success) {
const blocks = result.diff?.proposedState?.blocks || result.blocks || {}
logger.info(`[${requestId}] Sim agent blocks with parent-child info:`)
Object.entries(blocks).forEach(([blockId, block]: [string, any]) => {
if (block.data?.parentId || block.parentId) {
logger.info(`[${requestId}] Child block ${blockId} (${block.name}):`, {
type: block.type,
parentId: block.data?.parentId || block.parentId,
extent: block.data?.extent || block.extent,
hasDataField: !!block.data,
dataKeys: block.data ? Object.keys(block.data) : [],
})
}
if (block.type === 'loop' || block.type === 'parallel') {
logger.info(`[${requestId}] Container block ${blockId} (${block.name}):`, {
type: block.type,
hasData: !!block.data,
dataKeys: block.data ? Object.keys(block.data) : [],
})
}
})
// Log existing loops/parallels from sim-agent
const loops = result.diff?.proposedState?.loops || result.loops || {}
const parallels = result.diff?.proposedState?.parallels || result.parallels || {}
logger.info(`[${requestId}] Sim agent loops:`, loops)
logger.info(`[${requestId}] Sim agent parallels:`, parallels)
}
// Log diff analysis specifically
if (result.diff?.diffAnalysis) {
logger.info(`[${requestId}] Diff analysis received:`, {
@@ -164,19 +198,127 @@ export async function POST(request: NextRequest) {
logger.warn(`[${requestId}] No diff analysis in response!`)
}
// Post-process the result to ensure loops and parallels are properly generated
const finalResult = result
if (result.success && result.diff?.proposedState) {
// First, fix parent-child relationships based on edges
const blocks = result.diff.proposedState.blocks
const edges = result.diff.proposedState.edges || []
// Find all loop and parallel blocks
const containerBlocks = Object.values(blocks).filter(
(block: any) => block.type === 'loop' || block.type === 'parallel'
)
// For each container, find its children based on loop-start edges
containerBlocks.forEach((container: any) => {
// Log all edges from this container to debug
const allEdgesFromContainer = edges.filter((edge: any) => edge.source === container.id)
logger.info(
`[${requestId}] All edges from container ${container.id}:`,
allEdgesFromContainer.map((e: any) => ({
id: e.id,
sourceHandle: e.sourceHandle,
target: e.target,
}))
)
const childEdges = edges.filter(
(edge: any) => edge.source === container.id && edge.sourceHandle === 'loop-start-source'
)
childEdges.forEach((edge: any) => {
const childBlock = blocks[edge.target]
if (childBlock) {
// Ensure data field exists
if (!childBlock.data) {
childBlock.data = {}
}
// Set parentId and extent
childBlock.data.parentId = container.id
childBlock.data.extent = 'parent'
logger.info(`[${requestId}] Fixed parent-child relationship:`, {
parent: container.id,
parentName: container.name,
child: childBlock.id,
childName: childBlock.name,
})
}
})
})
// Now regenerate loops and parallels with the fixed relationships
const loops = generateLoopBlocks(result.diff.proposedState.blocks)
const parallels = generateParallelBlocks(result.diff.proposedState.blocks)
result.diff.proposedState.loops = loops
result.diff.proposedState.parallels = parallels
logger.info(`[${requestId}] Regenerated loops and parallels after fixing parent-child:`, {
loopsCount: Object.keys(loops).length,
parallelsCount: Object.keys(parallels).length,
loops: Object.keys(loops).map((id) => ({
id,
nodes: loops[id].nodes,
})),
})
}
// If the sim agent returned blocks directly (when auto-layout is applied),
// transform it to the expected diff format
if (result.success && result.blocks && !result.diff) {
logger.info(`[${requestId}] Transforming sim agent blocks response to diff format`)
// First, fix parent-child relationships based on edges
const blocks = result.blocks
const edges = result.edges || []
// Find all loop and parallel blocks
const containerBlocks = Object.values(blocks).filter(
(block: any) => block.type === 'loop' || block.type === 'parallel'
)
// For each container, find its children based on loop-start edges
containerBlocks.forEach((container: any) => {
const childEdges = edges.filter(
(edge: any) => edge.source === container.id && edge.sourceHandle === 'loop-start-source'
)
childEdges.forEach((edge: any) => {
const childBlock = blocks[edge.target]
if (childBlock) {
// Ensure data field exists
if (!childBlock.data) {
childBlock.data = {}
}
// Set parentId and extent
childBlock.data.parentId = container.id
childBlock.data.extent = 'parent'
logger.info(`[${requestId}] Fixed parent-child relationship (auto-layout):`, {
parent: container.id,
parentName: container.name,
child: childBlock.id,
childName: childBlock.name,
})
}
})
})
// Generate loops and parallels for the blocks with fixed relationships
const loops = generateLoopBlocks(result.blocks)
const parallels = generateParallelBlocks(result.blocks)
const transformedResult = {
success: result.success,
diff: {
proposedState: {
blocks: result.blocks,
edges: result.edges || [],
loops: result.loops || {},
parallels: result.parallels || {},
loops: loops,
parallels: parallels,
},
diffAnalysis: diffAnalysis,
metadata: result.metadata || {
@@ -190,7 +332,7 @@ export async function POST(request: NextRequest) {
return NextResponse.json(transformedResult)
}
return NextResponse.json(result)
return NextResponse.json(finalResult)
} catch (error) {
logger.error(`[${requestId}] Diff creation failed:`, error)

View File

@@ -126,19 +126,146 @@ export async function POST(request: NextRequest) {
// Log the full response to see if auto-layout is happening
logger.info(`[${requestId}] Full sim agent response:`, JSON.stringify(result, null, 2))
// Log detailed block information to debug parent-child relationships
if (result.success) {
const blocks = result.diff?.proposedState?.blocks || result.blocks || {}
logger.info(`[${requestId}] Sim agent blocks with parent-child info:`)
Object.entries(blocks).forEach(([blockId, block]: [string, any]) => {
if (block.data?.parentId || block.parentId) {
logger.info(`[${requestId}] Child block ${blockId} (${block.name}):`, {
type: block.type,
parentId: block.data?.parentId || block.parentId,
extent: block.data?.extent || block.extent,
hasDataField: !!block.data,
dataKeys: block.data ? Object.keys(block.data) : [],
})
}
if (block.type === 'loop' || block.type === 'parallel') {
logger.info(`[${requestId}] Container block ${blockId} (${block.name}):`, {
type: block.type,
hasData: !!block.data,
dataKeys: block.data ? Object.keys(block.data) : [],
})
}
})
// Log existing loops/parallels from sim-agent
const loops = result.diff?.proposedState?.loops || result.loops || {}
const parallels = result.diff?.proposedState?.parallels || result.parallels || {}
logger.info(`[${requestId}] Sim agent loops:`, loops)
logger.info(`[${requestId}] Sim agent parallels:`, parallels)
}
// Post-process the result to ensure loops and parallels are properly generated
const finalResult = result
if (result.success && result.diff?.proposedState) {
// First, fix parent-child relationships based on edges
const blocks = result.diff.proposedState.blocks
const edges = result.diff.proposedState.edges || []
// Find all loop and parallel blocks
const containerBlocks = Object.values(blocks).filter(
(block: any) => block.type === 'loop' || block.type === 'parallel'
)
// For each container, find its children based on loop-start edges
containerBlocks.forEach((container: any) => {
const childEdges = edges.filter(
(edge: any) => edge.source === container.id && edge.sourceHandle === 'loop-start-source'
)
childEdges.forEach((edge: any) => {
const childBlock = blocks[edge.target]
if (childBlock) {
// Ensure data field exists
if (!childBlock.data) {
childBlock.data = {}
}
// Set parentId and extent
childBlock.data.parentId = container.id
childBlock.data.extent = 'parent'
logger.info(`[${requestId}] Fixed parent-child relationship:`, {
parent: container.id,
parentName: container.name,
child: childBlock.id,
childName: childBlock.name,
})
}
})
})
// Now regenerate loops and parallels with the fixed relationships
const loops = generateLoopBlocks(result.diff.proposedState.blocks)
const parallels = generateParallelBlocks(result.diff.proposedState.blocks)
result.diff.proposedState.loops = loops
result.diff.proposedState.parallels = parallels
logger.info(`[${requestId}] Regenerated loops and parallels after fixing parent-child:`, {
loopsCount: Object.keys(loops).length,
parallelsCount: Object.keys(parallels).length,
loops: Object.keys(loops).map((id) => ({
id,
nodes: loops[id].nodes,
})),
})
}
// If the sim agent returned blocks directly (when auto-layout is applied),
// transform it to the expected diff format
if (result.success && result.blocks && !result.diff) {
logger.info(`[${requestId}] Transforming sim agent blocks response to diff format`)
// First, fix parent-child relationships based on edges
const blocks = result.blocks
const edges = result.edges || []
// Find all loop and parallel blocks
const containerBlocks = Object.values(blocks).filter(
(block: any) => block.type === 'loop' || block.type === 'parallel'
)
// For each container, find its children based on loop-start edges
containerBlocks.forEach((container: any) => {
const childEdges = edges.filter(
(edge: any) => edge.source === container.id && edge.sourceHandle === 'loop-start-source'
)
childEdges.forEach((edge: any) => {
const childBlock = blocks[edge.target]
if (childBlock) {
// Ensure data field exists
if (!childBlock.data) {
childBlock.data = {}
}
// Set parentId and extent
childBlock.data.parentId = container.id
childBlock.data.extent = 'parent'
logger.info(`[${requestId}] Fixed parent-child relationship (auto-layout):`, {
parent: container.id,
parentName: container.name,
child: childBlock.id,
childName: childBlock.name,
})
}
})
})
// Generate loops and parallels for the blocks with fixed relationships
const loops = generateLoopBlocks(result.blocks)
const parallels = generateParallelBlocks(result.blocks)
const transformedResult = {
success: result.success,
diff: {
proposedState: {
blocks: result.blocks,
edges: result.edges || existingDiff.proposedState.edges || [],
loops: result.loops || existingDiff.proposedState.loops || {},
parallels: result.parallels || existingDiff.proposedState.parallels || {},
loops: loops,
parallels: parallels,
},
diffAnalysis: diffAnalysis,
metadata: result.metadata || {
@@ -152,7 +279,7 @@ export async function POST(request: NextRequest) {
return NextResponse.json(transformedResult)
}
return NextResponse.json(result)
return NextResponse.json(finalResult)
} catch (error) {
logger.error(`[${requestId}] Diff merge failed:`, error)

View File

@@ -6,7 +6,6 @@ import { Button } from '@/components/ui/button'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { createLogger } from '@/lib/logs/console/logger'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useWorkflowYamlStore } from '@/stores/workflows/yaml/store'
const logger = createLogger('ExportControls')
@@ -17,7 +16,6 @@ interface ExportControlsProps {
export function ExportControls({ disabled = false }: ExportControlsProps) {
const [isExporting, setIsExporting] = useState(false)
const { workflows, activeWorkflowId } = useWorkflowRegistry()
const getYaml = useWorkflowYamlStore((state) => state.getYaml)
const currentWorkflow = activeWorkflowId ? workflows[activeWorkflowId] : null
@@ -45,11 +43,23 @@ export function ExportControls({ disabled = false }: ExportControlsProps) {
setIsExporting(true)
try {
const yamlContent = await getYaml()
const filename = `${currentWorkflow.name.replace(/[^a-z0-9]/gi, '-')}.yaml`
// Use the new database-based export endpoint
const response = await fetch(`/api/workflows/yaml/export?workflowId=${activeWorkflowId}`)
downloadFile(yamlContent, filename, 'text/yaml')
logger.info('Workflow exported as YAML')
if (!response.ok) {
const errorData = await response.json().catch(() => null)
throw new Error(errorData?.error || `Failed to export YAML: ${response.statusText}`)
}
const result = await response.json()
if (!result.success || !result.yaml) {
throw new Error(result.error || 'Failed to export YAML')
}
const filename = `${currentWorkflow.name.replace(/[^a-z0-9]/gi, '-')}.yaml`
downloadFile(result.yaml, filename, 'text/yaml')
logger.info('Workflow exported as YAML from database')
} catch (error) {
logger.error('Failed to export workflow as YAML:', error)
} finally {

View File

@@ -187,35 +187,33 @@ export function DiffControls() {
}
}
const handleAccept = () => {
logger.info('Accepting proposed changes (optimistic)')
const handleAccept = async () => {
logger.info('Accepting proposed changes with backup protection')
// Create checkpoint in the background (don't await to avoid blocking)
createCheckpoint()
.then((checkpointCreated) => {
if (!checkpointCreated) {
logger.warn('Checkpoint creation failed, but proceeding with accept')
} else {
logger.info('Checkpoint created successfully before accept')
}
})
.catch((error) => {
logger.error('Checkpoint creation failed:', error)
try {
// Clear preview YAML immediately
await clearPreviewYaml().catch((error) => {
logger.warn('Failed to clear preview YAML:', error)
})
// Clear preview YAML immediately
clearPreviewYaml().catch((error) => {
logger.warn('Failed to clear preview YAML:', error)
})
// Accept changes with automatic backup and rollback on failure
await acceptChanges()
// Start background save without awaiting
acceptChanges().catch((error) => {
logger.error('Failed to accept changes in background:', error)
// TODO: Consider showing a toast notification for save failures
// For now, the optimistic update stands since the UI state is already correct
})
logger.info('Successfully accepted and saved workflow changes')
// Show success feedback if needed
} catch (error) {
logger.error('Failed to accept changes:', error)
logger.info('Optimistically applied changes, saving in background')
// Show error notification to user
// Note: The acceptChanges function has already rolled back the state
const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred'
// You could add toast notification here
console.error('Workflow update failed:', errorMessage)
// Optionally show user-facing error dialog
alert(`Failed to save workflow changes: ${errorMessage}`)
}
}
const handleReject = () => {

View File

@@ -39,6 +39,8 @@ if (typeof document !== 'undefined') {
-webkit-font-smoothing: antialiased !important;
-moz-osx-font-smoothing: grayscale !important;
text-rendering: optimizeLegibility !important;
max-width: 100% !important;
overflow: auto !important;
}
.dark .copilot-markdown-wrapper pre {
@@ -58,6 +60,24 @@ if (typeof document !== 'undefined') {
-moz-osx-font-smoothing: grayscale !important;
text-rendering: optimizeLegibility !important;
}
/* Prevent any markdown content from expanding beyond the panel */
.copilot-markdown-wrapper, .copilot-markdown-wrapper * {
max-width: 100% !important;
}
.copilot-markdown-wrapper p, .copilot-markdown-wrapper li {
overflow-wrap: anywhere !important;
word-break: break-word !important;
}
.copilot-markdown-wrapper a {
overflow-wrap: anywhere !important;
word-break: break-all !important;
}
.copilot-markdown-wrapper code:not(pre code) {
white-space: normal !important;
overflow-wrap: anywhere !important;
word-break: break-word !important;
}
`
document.head.appendChild(style)
}
@@ -70,7 +90,7 @@ function LinkWithPreview({ href, children }: { href: string; children: React.Rea
<TooltipTrigger asChild>
<a
href={href}
className='text-blue-600 hover:underline dark:text-blue-400'
className='inline break-all text-blue-600 hover:underline dark:text-blue-400'
target='_blank'
rel='noopener noreferrer'
>
@@ -257,7 +277,7 @@ export default function CopilotMarkdownRenderer({ content }: CopilotMarkdownRend
if (inline) {
return (
<code
className='rounded bg-gray-200 px-1 py-0.5 font-mono text-[0.9em] text-gray-800 dark:bg-gray-700 dark:text-gray-200'
className='whitespace-normal break-all rounded bg-gray-200 px-1 py-0.5 font-mono text-[0.9em] text-gray-800 dark:bg-gray-700 dark:text-gray-200'
{...props}
>
{children}

View File

@@ -0,0 +1,95 @@
'use client'
import { useEffect, useRef, useState } from 'react'
import { Brain } from 'lucide-react'
import { cn } from '@/lib/utils'
interface ThinkingBlockProps {
content: string
isStreaming?: boolean
duration?: number // Persisted duration from content block
startTime?: number // Persisted start time from content block
}
export function ThinkingBlock({
content,
isStreaming = false,
duration: persistedDuration,
startTime: persistedStartTime,
}: ThinkingBlockProps) {
const [isExpanded, setIsExpanded] = useState(false)
const [duration, setDuration] = useState(persistedDuration ?? 0)
// Keep a stable reference to start time that updates when prop changes
const startTimeRef = useRef<number>(persistedStartTime ?? Date.now())
useEffect(() => {
if (typeof persistedStartTime === 'number') {
startTimeRef.current = persistedStartTime
}
}, [persistedStartTime])
useEffect(() => {
// If we already have a persisted duration, just use it
if (typeof persistedDuration === 'number') {
setDuration(persistedDuration)
return
}
if (isStreaming) {
const interval = setInterval(() => {
setDuration(Date.now() - startTimeRef.current)
}, 100)
return () => clearInterval(interval)
}
// Not streaming and no persisted duration: compute final duration once
setDuration(Date.now() - startTimeRef.current)
}, [isStreaming, persistedDuration])
// Format duration
const formatDuration = (ms: number) => {
if (ms < 1000) return `${ms}ms`
const seconds = (ms / 1000).toFixed(1)
return `${seconds}s`
}
if (!isExpanded) {
return (
<button
onClick={() => setIsExpanded(true)}
className={cn(
'inline-flex items-center gap-1 text-gray-400 text-xs transition-colors hover:text-gray-500',
'font-normal italic'
)}
type='button'
>
<Brain className='h-3 w-3' />
<span>Thought for {formatDuration(duration)}</span>
{isStreaming && (
<span className='inline-flex h-1 w-1 animate-pulse rounded-full bg-gray-400' />
)}
</button>
)
}
return (
<div className='my-1'>
<button
onClick={() => setIsExpanded(false)}
className={cn(
'mb-1 inline-flex items-center gap-1 text-gray-400 text-xs transition-colors hover:text-gray-500',
'font-normal italic'
)}
type='button'
>
<Brain className='h-3 w-3' />
<span>Thought for {formatDuration(duration)} (click to collapse)</span>
</button>
<div className='ml-1 border-gray-200 border-l-2 pl-2 dark:border-gray-700'>
<pre className='whitespace-pre-wrap font-mono text-gray-400 text-xs dark:text-gray-500'>
{content}
{isStreaming && <span className='ml-1 inline-block h-2 w-1 animate-pulse bg-gray-400' />}
</pre>
</div>
</div>
)
}

View File

@@ -18,6 +18,7 @@ import { usePreviewStore } from '@/stores/copilot/preview-store'
import { useCopilotStore } from '@/stores/copilot/store'
import type { CopilotMessage as CopilotMessageType } from '@/stores/copilot/types'
import CopilotMarkdownRenderer from './components/markdown-renderer'
import { ThinkingBlock } from './components/thinking-block'
const logger = createLogger('CopilotMessage')
@@ -574,7 +575,30 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
</div>
)
}
if (block.type === 'thinking') {
const isLastBlock = index === message.contentBlocks!.length - 1
// Consider the thinking block streaming if the overall message is streaming
// and the block has not been finalized with a duration yet. This avoids
// freezing the timer when new blocks are appended after the thinking block.
const isStreamingThinking = isStreaming && (block as any).duration == null
return (
<div key={`thinking-${index}-${block.timestamp || index}`} className='w-full'>
<ThinkingBlock
content={block.content}
isStreaming={isStreamingThinking}
duration={block.duration}
startTime={block.startTime}
/>
</div>
)
}
if (block.type === 'tool_call') {
// Skip hidden tools (like checkoff_todo)
if (block.toolCall.hidden) {
return null
}
return (
<div
key={`tool-${block.toolCall.id}`}
@@ -591,7 +615,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
if (isUser) {
return (
<div className='w-full py-2'>
<div className='w-full max-w-full overflow-hidden py-2'>
{/* File attachments displayed above the message, completely separate from message box width */}
{message.fileAttachments && message.fileAttachments.length > 0 && (
<div className='mb-1 flex justify-end'>
@@ -602,7 +626,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
)}
<div className='flex justify-end'>
<div className='max-w-[80%]'>
<div className='min-w-0 max-w-[80%]'>
{/* Message content in purple box */}
<div className='rounded-[10px] bg-[var(--brand-primary-hover-hex)]/[0.08] px-3 py-2'>
<div className='whitespace-pre-wrap break-words font-normal text-base text-foreground leading-relaxed'>
@@ -725,9 +749,9 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
href={citation.url}
target='_blank'
rel='noopener noreferrer'
className='inline-flex items-center rounded-md border bg-muted/50 px-2 py-1 text-muted-foreground text-xs transition-colors hover:bg-muted hover:text-foreground'
className='inline-flex max-w-full items-center rounded-md border bg-muted/50 px-2 py-1 text-muted-foreground text-xs transition-colors hover:bg-muted hover:text-foreground'
>
{citation.title}
<span className='truncate'>{citation.title}</span>
</a>
))}
</div>
@@ -757,7 +781,6 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
// For streaming messages, check if content actually changed
if (nextProps.isStreaming) {
// Compare contentBlocks length and lastUpdated for streaming messages
const prevBlocks = prevMessage.contentBlocks || []
const nextBlocks = nextMessage.contentBlocks || []
@@ -765,16 +788,37 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
return false // Content blocks changed
}
// Check if any text content changed in the last block
if (nextBlocks.length > 0) {
const prevLastBlock = prevBlocks[prevBlocks.length - 1]
const nextLastBlock = nextBlocks[nextBlocks.length - 1]
if (prevLastBlock?.type === 'text' && nextLastBlock?.type === 'text') {
if (prevLastBlock.content !== nextLastBlock.content) {
return false // Text content changed
// Helper: get last block content by type
const getLastBlockContent = (blocks: any[], type: 'text' | 'thinking'): string | null => {
for (let i = blocks.length - 1; i >= 0; i--) {
const block = blocks[i]
if (block && block.type === type) {
return (block as any).content ?? ''
}
}
return null
}
// Re-render if the last text block content changed
const prevLastTextContent = getLastBlockContent(prevBlocks as any[], 'text')
const nextLastTextContent = getLastBlockContent(nextBlocks as any[], 'text')
if (
prevLastTextContent !== null &&
nextLastTextContent !== null &&
prevLastTextContent !== nextLastTextContent
) {
return false
}
// Re-render if the last thinking block content changed
const prevLastThinkingContent = getLastBlockContent(prevBlocks as any[], 'thinking')
const nextLastThinkingContent = getLastBlockContent(nextBlocks as any[], 'thinking')
if (
prevLastThinkingContent !== null &&
nextLastThinkingContent !== null &&
prevLastThinkingContent !== nextLastThinkingContent
) {
return false
}
// Check if tool calls changed
@@ -785,14 +829,12 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
return false // Tool calls count changed
}
// Check if any tool call state changed
for (let i = 0; i < nextToolCalls.length; i++) {
if (prevToolCalls[i]?.state !== nextToolCalls[i]?.state) {
return false // Tool call state changed
}
}
// If we reach here, nothing meaningful changed during streaming
return true
}

View File

@@ -1,4 +1,5 @@
export * from './checkpoint-panel/checkpoint-panel'
export * from './copilot-message/copilot-message'
export * from './todo-list/todo-list'
export * from './user-input/user-input'
export * from './welcome/welcome'

View File

@@ -0,0 +1,132 @@
'use client'
import { memo, useEffect, useState } from 'react'
import { Check, ChevronDown, ChevronRight, ListTodo, Loader2, X } from 'lucide-react'
import { cn } from '@/lib/utils'
export interface TodoItem {
id: string
content: string
completed?: boolean
executing?: boolean
}
interface TodoListProps {
todos: TodoItem[]
onClose?: () => void
collapsed?: boolean
className?: string
}
export const TodoList = memo(function TodoList({
todos,
onClose,
collapsed = false,
className,
}: TodoListProps) {
const [isCollapsed, setIsCollapsed] = useState(collapsed)
// Sync collapsed prop with internal state
useEffect(() => {
setIsCollapsed(collapsed)
}, [collapsed])
if (!todos || todos.length === 0) {
return null
}
const completedCount = todos.filter((todo) => todo.completed).length
const totalCount = todos.length
const progress = totalCount > 0 ? (completedCount / totalCount) * 100 : 0
return (
<div
className={cn(
'border-gray-200 border-t bg-white dark:border-gray-700 dark:bg-gray-900',
className
)}
>
{/* Header */}
<div className='flex items-center justify-between border-gray-100 border-b px-3 py-2 dark:border-gray-800'>
<div className='flex items-center gap-2'>
<button
onClick={() => setIsCollapsed(!isCollapsed)}
className='rounded p-0.5 transition-colors hover:bg-gray-100 dark:hover:bg-gray-800'
>
{isCollapsed ? (
<ChevronRight className='h-4 w-4 text-gray-500' />
) : (
<ChevronDown className='h-4 w-4 text-gray-500' />
)}
</button>
<ListTodo className='h-4 w-4 text-gray-500' />
<span className='font-medium text-gray-700 text-xs dark:text-gray-300'>Todo List</span>
<span className='text-gray-500 text-xs dark:text-gray-400'>
{completedCount}/{totalCount}
</span>
</div>
<div className='flex items-center gap-2'>
{/* Progress bar */}
<div className='h-1.5 w-24 overflow-hidden rounded-full bg-gray-200 dark:bg-gray-700'>
<div
className='h-full bg-blue-500 transition-all duration-300 ease-out'
style={{ width: `${progress}%` }}
/>
</div>
{onClose && (
<button
onClick={onClose}
className='rounded p-1 transition-colors hover:bg-gray-100 dark:hover:bg-gray-800'
aria-label='Close todo list'
>
<X className='h-3.5 w-3.5 text-gray-400' />
</button>
)}
</div>
</div>
{/* Todo items */}
{!isCollapsed && (
<div className='max-h-48 overflow-y-auto'>
{todos.map((todo, index) => (
<div
key={todo.id}
className={cn(
'flex items-start gap-2 px-3 py-1.5 transition-colors hover:bg-gray-50 dark:hover:bg-gray-800/50',
index !== todos.length - 1 && 'border-gray-50 border-b dark:border-gray-800'
)}
>
<div
className={cn(
'mt-0.5 flex h-4 w-4 flex-shrink-0 items-center justify-center rounded border transition-all',
todo.executing
? 'border-blue-400 dark:border-blue-500'
: todo.completed
? 'border-blue-500 bg-blue-500'
: 'border-gray-300 dark:border-gray-600'
)}
>
{todo.executing ? (
<Loader2 className='h-3 w-3 animate-spin text-blue-500' />
) : todo.completed ? (
<Check className='h-3 w-3 text-white' strokeWidth={3} />
) : null}
</div>
<span
className={cn(
'flex-1 text-xs leading-relaxed',
todo.completed ? 'text-gray-400 line-through' : 'text-gray-700 dark:text-gray-300'
)}
>
{todo.content}
</span>
</div>
))}
</div>
)}
</div>
)
})

View File

@@ -10,6 +10,10 @@ import {
} from 'react'
import {
ArrowUp,
Boxes,
BrainCircuit,
BrainCog,
Check,
FileText,
Image,
Loader2,
@@ -17,9 +21,17 @@ import {
Package,
Paperclip,
X,
Zap,
} from 'lucide-react'
import { Button } from '@/components/ui/button'
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from '@/components/ui/dropdown-menu'
import { Textarea } from '@/components/ui/textarea'
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip'
import { useSession } from '@/lib/auth-client'
import { cn } from '@/lib/utils'
import { useCopilotStore } from '@/stores/copilot/store'
@@ -394,18 +406,66 @@ const UserInput = forwardRef<UserInputRef, UserInputProps>(
const handleModeToggle = () => {
if (onModeChange) {
// Toggle between Ask and Agent
onModeChange(mode === 'ask' ? 'agent' : 'ask')
}
}
const getModeIcon = () => {
return mode === 'ask' ? (
<MessageCircle className='h-3 w-3 text-muted-foreground' />
) : (
<Package className='h-3 w-3 text-muted-foreground' />
)
if (mode === 'ask') {
return <MessageCircle className='h-3 w-3 text-muted-foreground' />
}
return <Package className='h-3 w-3 text-muted-foreground' />
}
const getModeText = () => {
if (mode === 'ask') {
return 'Ask'
}
return 'Agent'
}
// Depth toggle state comes from global store; access via useCopilotStore
const { agentDepth, setAgentDepth } = useCopilotStore()
const cycleDepth = () => {
// Allowed UI values: 0 (Lite), 1 (Default), 2 (Pro), 3 (Max)
const next = agentDepth === 0 ? 1 : agentDepth === 1 ? 2 : agentDepth === 2 ? 3 : 0
setAgentDepth(next)
}
const getDepthLabel = () => {
if (agentDepth === 0) return 'Lite'
if (agentDepth === 1) return 'Auto'
if (agentDepth === 2) return 'Pro'
return 'Max'
}
const getDepthLabelFor = (value: 0 | 1 | 2 | 3) => {
if (value === 0) return 'Lite'
if (value === 1) return 'Auto'
if (value === 2) return 'Pro'
return 'Max'
}
const getDepthDescription = (value: 0 | 1 | 2 | 3) => {
if (value === 0)
return 'Fastest and cheapest. Good for small edits, simple workflows, and small tasks.'
if (value === 1) return 'Automatically balances speed and reasoning. Good fit for most tasks.'
if (value === 2)
return 'More reasoning for larger workflows and complex edits, still balanced for speed.'
return 'Maximum reasoning power. Best for complex workflow building and debugging.'
}
const getDepthIconFor = (value: 0 | 1 | 2 | 3) => {
if (value === 0) return <Zap className='h-3 w-3 text-muted-foreground' />
if (value === 1) return <Boxes className='h-3 w-3 text-muted-foreground' />
if (value === 2) return <BrainCircuit className='h-3 w-3 text-muted-foreground' />
return <BrainCog className='h-3 w-3 text-muted-foreground' />
}
const getDepthIcon = () => getDepthIconFor(agentDepth)
return (
<div className={cn('relative flex-none pb-4', className)}>
<div
@@ -494,17 +554,213 @@ const UserInput = forwardRef<UserInputRef, UserInputProps>(
{/* Bottom Row: Mode Selector + Attach Button + Send Button */}
<div className='flex items-center justify-between'>
{/* Left side: Mode Selector */}
<Button
variant='ghost'
size='sm'
onClick={handleModeToggle}
disabled={!onModeChange}
className='flex h-6 items-center gap-1.5 rounded-full bg-secondary px-2 py-1 font-medium text-secondary-foreground text-xs hover:bg-secondary/80'
>
{getModeIcon()}
<span className='capitalize'>{mode}</span>
</Button>
{/* Left side: Mode Selector and Depth (if Agent) */}
<div className='flex items-center gap-1.5'>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant='ghost'
size='sm'
disabled={!onModeChange}
className='flex h-6 items-center gap-1.5 rounded-full border px-2 py-1 font-medium text-xs'
>
{getModeIcon()}
<span>{getModeText()}</span>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align='start' className='p-0'>
<TooltipProvider>
<div className='w-[160px] p-1'>
<Tooltip>
<TooltipTrigger asChild>
<DropdownMenuItem
onSelect={() => onModeChange?.('ask')}
className={cn(
'flex items-center justify-between rounded-sm px-2 py-1.5 text-xs leading-4',
mode === 'ask' && 'bg-muted/40'
)}
>
<span className='flex items-center gap-1.5'>
<MessageCircle className='h-3 w-3 text-muted-foreground' />
Ask
</span>
{mode === 'ask' && <Check className='h-3 w-3 text-muted-foreground' />}
</DropdownMenuItem>
</TooltipTrigger>
<TooltipContent
side='right'
sideOffset={6}
align='center'
className='max-w-[220px] border bg-popover p-2 text-[11px] text-popover-foreground leading-snug shadow-md'
>
Ask mode can help answer questions about your workflow, tell you about
Sim, and guide you in building/editing.
</TooltipContent>
</Tooltip>
<Tooltip>
<TooltipTrigger asChild>
<DropdownMenuItem
onSelect={() => onModeChange?.('agent')}
className={cn(
'flex items-center justify-between rounded-sm px-2 py-1.5 text-xs leading-4',
mode === 'agent' && 'bg-muted/40'
)}
>
<span className='flex items-center gap-1.5'>
<Package className='h-3 w-3 text-muted-foreground' />
Agent
</span>
{mode === 'agent' && (
<Check className='h-3 w-3 text-muted-foreground' />
)}
</DropdownMenuItem>
</TooltipTrigger>
<TooltipContent
side='right'
sideOffset={6}
align='center'
className='max-w-[220px] border bg-popover p-2 text-[11px] text-popover-foreground leading-snug shadow-md'
>
Agent mode can build, edit, and interact with your workflows (Recommended)
</TooltipContent>
</Tooltip>
</div>
</TooltipProvider>
</DropdownMenuContent>
</DropdownMenu>
{
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant='ghost'
size='sm'
className='flex h-6 items-center gap-1.5 rounded-full border px-2 py-1 font-medium text-xs'
title='Choose depth'
>
{getDepthIcon()}
<span>{getDepthLabel()}</span>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align='start' className='p-0'>
<TooltipProvider>
<div className='w-[180px] p-1'>
<Tooltip>
<TooltipTrigger asChild>
<DropdownMenuItem
onSelect={() => setAgentDepth(1)}
className={cn(
'flex items-center justify-between rounded-sm px-2 py-1.5 text-xs leading-4',
agentDepth === 1 && 'bg-muted/40'
)}
>
<span className='flex items-center gap-1.5'>
<Boxes className='h-3 w-3 text-muted-foreground' />
Auto
</span>
{agentDepth === 1 && (
<Check className='h-3 w-3 text-muted-foreground' />
)}
</DropdownMenuItem>
</TooltipTrigger>
<TooltipContent
side='right'
sideOffset={6}
align='center'
className='max-w-[220px] border bg-popover p-2 text-[11px] text-popover-foreground leading-snug shadow-md'
>
Automatically balances speed and reasoning. Good fit for most tasks.
</TooltipContent>
</Tooltip>
<Tooltip>
<TooltipTrigger asChild>
<DropdownMenuItem
onSelect={() => setAgentDepth(0)}
className={cn(
'flex items-center justify-between rounded-sm px-2 py-1.5 text-xs leading-4',
agentDepth === 0 && 'bg-muted/40'
)}
>
<span className='flex items-center gap-1.5'>
<Zap className='h-3 w-3 text-muted-foreground' />
Lite
</span>
{agentDepth === 0 && (
<Check className='h-3 w-3 text-muted-foreground' />
)}
</DropdownMenuItem>
</TooltipTrigger>
<TooltipContent
side='right'
sideOffset={6}
align='center'
className='max-w-[220px] border bg-popover p-2 text-[11px] text-popover-foreground leading-snug shadow-md'
>
Fastest and cheapest. Good for small edits, simple workflows, and small
tasks.
</TooltipContent>
</Tooltip>
<Tooltip>
<TooltipTrigger asChild>
<DropdownMenuItem
onSelect={() => setAgentDepth(2)}
className={cn(
'flex items-center justify-between rounded-sm px-2 py-1.5 text-xs leading-4',
agentDepth === 2 && 'bg-muted/40'
)}
>
<span className='flex items-center gap-1.5'>
<BrainCircuit className='h-3 w-3 text-muted-foreground' />
Pro
</span>
{agentDepth === 2 && (
<Check className='h-3 w-3 text-muted-foreground' />
)}
</DropdownMenuItem>
</TooltipTrigger>
<TooltipContent
side='right'
sideOffset={6}
align='center'
className='max-w-[220px] border bg-popover p-2 text-[11px] text-popover-foreground leading-snug shadow-md'
>
More reasoning for larger workflows and complex edits, still balanced
for speed.
</TooltipContent>
</Tooltip>
<Tooltip>
<TooltipTrigger asChild>
<DropdownMenuItem
onSelect={() => setAgentDepth(3)}
className={cn(
'flex items-center justify-between rounded-sm px-2 py-1.5 text-xs leading-4',
agentDepth === 3 && 'bg-muted/40'
)}
>
<span className='flex items-center gap-1.5'>
<BrainCog className='h-3 w-3 text-muted-foreground' />
Max
</span>
{agentDepth === 3 && (
<Check className='h-3 w-3 text-muted-foreground' />
)}
</DropdownMenuItem>
</TooltipTrigger>
<TooltipContent
side='right'
sideOffset={6}
align='center'
className='max-w-[220px] border bg-popover p-2 text-[11px] text-popover-foreground leading-snug shadow-md'
>
Maximum reasoning power. Best for complex workflow building and
debugging.
</TooltipContent>
</Tooltip>
</div>
</TooltipProvider>
</DropdownMenuContent>
</DropdownMenu>
}
</div>
{/* Right side: Attach Button + Send Button */}
<div className='flex items-center gap-1'>

View File

@@ -16,8 +16,8 @@ export function CopilotWelcome({ onQuestionClick, mode = 'ask' }: CopilotWelcome
const agentQuestions = [
'Help me build a workflow',
'I want to edit my workflow',
'Build me a small sample workflow',
'Help me optimize my workflow',
'Help me debug my workflow',
]
const exampleQuestions = mode === 'ask' ? askQuestions : agentQuestions
@@ -35,7 +35,7 @@ export function CopilotWelcome({ onQuestionClick, mode = 'ask' }: CopilotWelcome
<p className='text-muted-foreground text-sm'>
{mode === 'ask'
? 'Ask me anything about your workflows, available tools, or how to get started.'
: 'I can help you build, edit, and create workflows. What would you like to do?'}
: 'I can help you build, edit, and optimize workflows. What would you like to do?'}
</p>
</div>
<div className='mx-auto max-w-sm space-y-3'>

View File

@@ -10,6 +10,7 @@ import {
CheckpointPanel,
CopilotMessage,
CopilotWelcome,
TodoList,
UserInput,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components'
import type {
@@ -36,6 +37,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
const userInputRef = useRef<UserInputRef>(null)
const [showCheckpoints] = useState(false)
const [isInitialized, setIsInitialized] = useState(false)
const [todosCollapsed, setTodosCollapsed] = useState(false)
const lastWorkflowIdRef = useRef<string | null>(null)
const hasMountedRef = useRef(false)
@@ -56,6 +58,8 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
isAborting,
mode,
inputValue,
planTodos,
showPlanTodos,
sendMessage,
abortMessage,
createNewChat,
@@ -197,6 +201,37 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
}
}, [isInitialized, messages.length, scrollToBottom])
// Track previous sending state to detect when stream completes
const wasSendingRef = useRef(false)
// Auto-collapse todos and remove uncompleted ones when stream completes
useEffect(() => {
if (wasSendingRef.current && !isSendingMessage && showPlanTodos) {
// Stream just completed, collapse the todos and filter out uncompleted ones
setTodosCollapsed(true)
// Remove any uncompleted todos
const completedTodos = planTodos.filter((todo) => todo.completed === true)
if (completedTodos.length !== planTodos.length) {
// Only update if there are uncompleted todos to remove
const store = useCopilotStore.getState()
store.setPlanTodos(completedTodos)
}
}
wasSendingRef.current = isSendingMessage
}, [isSendingMessage, showPlanTodos, planTodos])
// Reset collapsed state when todos first appear
useEffect(() => {
if (showPlanTodos && planTodos.length > 0) {
// Check if this is the first time todos are showing
// (only expand if currently sending a message, meaning new todos are being created)
if (isSendingMessage) {
setTodosCollapsed(false)
}
}
}, [showPlanTodos, planTodos.length, isSendingMessage])
// Cleanup on component unmount (page refresh, navigation, etc.)
useEffect(() => {
return () => {
@@ -252,11 +287,26 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
[handleStartNewChat]
)
// Handle abort action
const handleAbort = useCallback(() => {
abortMessage()
// Collapse todos when aborting
if (showPlanTodos) {
setTodosCollapsed(true)
}
}, [abortMessage, showPlanTodos])
// Handle message submission
const handleSubmit = useCallback(
async (query: string, fileAttachments?: MessageFileAttachment[]) => {
if (!query || isSendingMessage || !activeWorkflowId) return
// Clear todos when sending a new message
if (showPlanTodos) {
const store = useCopilotStore.getState()
store.setPlanTodos([])
}
try {
await sendMessage(query, { stream: true, fileAttachments })
logger.info(
@@ -268,7 +318,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
logger.error('Failed to send message:', error)
}
},
[isSendingMessage, activeWorkflowId, sendMessage]
[isSendingMessage, activeWorkflowId, sendMessage, showPlanTodos]
)
return (
@@ -293,7 +343,10 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
<div className='w-full max-w-full space-y-1 overflow-hidden'>
{messages.length === 0 ? (
<div className='flex h-full items-center justify-center p-4'>
<CopilotWelcome onQuestionClick={handleSubmit} mode={mode} />
<CopilotWelcome
onQuestionClick={handleSubmit}
mode={mode === 'ask' ? 'ask' : 'agent'}
/>
</div>
) : (
messages.map((message) => (
@@ -326,12 +379,24 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
</div>
)}
{/* Todo list from plan tool */}
{!showCheckpoints && showPlanTodos && (
<TodoList
todos={planTodos}
collapsed={todosCollapsed}
onClose={() => {
const store = useCopilotStore.getState()
store.setPlanTodos([])
}}
/>
)}
{/* Input area with integrated mode selector */}
{!showCheckpoints && (
<UserInput
ref={userInputRef}
onSubmit={handleSubmit}
onAbort={abortMessage}
onAbort={handleAbort}
disabled={!activeWorkflowId}
isLoading={isSendingMessage}
isAborting={isAborting}

View File

@@ -71,21 +71,15 @@ export function generateFullWorkflowData() {
export async function exportWorkflow(format: EditorFormat): Promise<string> {
try {
if (format === 'yaml') {
// Use the YAML service for conversion
const workflowState = useWorkflowStore.getState()
const subBlockValues = getSubBlockValues()
// Get the active workflow ID from registry
const { activeWorkflowId } = useWorkflowRegistry.getState()
// Call the API route to generate YAML (server has access to API key)
const response = await fetch('/api/workflows/yaml/convert', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
workflowState,
subBlockValues,
}),
})
if (!activeWorkflowId) {
throw new Error('No active workflow to export')
}
// Call the new database-based export endpoint
const response = await fetch(`/api/workflows/yaml/export?workflowId=${activeWorkflowId}`)
if (!response.ok) {
const errorData = await response.json().catch(() => null)

View File

@@ -883,6 +883,11 @@ const WorkflowContent = React.memo(() => {
if (activeWorkflowId !== currentId) {
// Only reset variables when actually switching workflows
resetVariablesLoaded()
// Clear workflow diff store when switching workflows
const { clearDiff } = useWorkflowDiffStore.getState()
clearDiff()
setActiveWorkflow(currentId)
} else {
// Don't reset variables cache if we're not actually switching workflows

View File

@@ -11,6 +11,7 @@ import { generateFolderName } from '@/lib/naming'
import { cn } from '@/lib/utils'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import { useFolderStore } from '@/stores/folders/store'
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { parseWorkflowYaml } from '@/stores/workflows/yaml/importer'
@@ -143,6 +144,10 @@ export function CreateMenu({ onCreateWorkflow, isCreatingWorkflow = false }: Cre
return `Imported Workflow - ${new Date().toLocaleString()}`
}
// Clear workflow diff store when creating a new workflow from import
const { clearDiff } = useWorkflowDiffStore.getState()
clearDiff()
// Create a new workflow
const newWorkflowId = await createWorkflow({
name: getWorkflowName(),

View File

@@ -30,6 +30,7 @@ import {
getKeyboardShortcutText,
useGlobalShortcuts,
} from '@/app/workspace/[workspaceId]/w/hooks/use-keyboard-shortcuts'
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import type { WorkflowMetadata } from '@/stores/workflows/registry/types'
@@ -810,6 +811,11 @@ export function Sidebar() {
try {
setIsCreatingWorkflow(true)
// Clear workflow diff store when creating a new workflow
const { clearDiff } = useWorkflowDiffStore.getState()
clearDiff()
const id = await createWorkflow({
workspaceId: workspaceId || undefined,
folderId: folderId || undefined,

View File

@@ -73,16 +73,19 @@ export function ToolCallExecution({ toolCall, isCompact = false }: ToolCallProps
<Loader2 className='h-3 w-3 shrink-0 animate-spin' />
<span>Executing...</span>
</div>
{toolCall.parameters && Object.keys(toolCall.parameters).length > 0 && (
<div className='min-w-0 max-w-full rounded bg-amber-100 p-2 dark:bg-amber-900'>
<div className='mb-1 font-medium text-amber-800 text-xs dark:text-amber-200'>
Parameters:
{toolCall.parameters &&
Object.keys(toolCall.parameters).length > 0 &&
(toolCall.name === 'make_api_request' ||
toolCall.name === 'set_environment_variables') && (
<div className='min-w-0 max-w-full rounded bg-amber-100 p-2 dark:bg-amber-900'>
<div className='mb-1 font-medium text-amber-800 text-xs dark:text-amber-200'>
Parameters:
</div>
<div className='min-w-0 max-w-full break-all font-mono text-amber-700 text-xs dark:text-amber-300'>
{JSON.stringify(toolCall.parameters, null, 2)}
</div>
</div>
<div className='min-w-0 max-w-full break-all font-mono text-amber-700 text-xs dark:text-amber-300'>
{JSON.stringify(toolCall.parameters, null, 2)}
</div>
</div>
)}
)}
</div>
</CollapsibleContent>
</Collapsible>
@@ -178,34 +181,37 @@ export function ToolCallCompletion({ toolCall, isCompact = false }: ToolCallProp
</CollapsibleTrigger>
<CollapsibleContent className='min-w-0 max-w-full px-3 pb-3'>
<div className='min-w-0 max-w-full space-y-2'>
{toolCall.parameters && Object.keys(toolCall.parameters).length > 0 && (
<div
className={cn(
'min-w-0 max-w-full rounded p-2',
isSuccess && 'bg-green-100 dark:bg-green-900',
isError && 'bg-red-100 dark:bg-red-900'
)}
>
{toolCall.parameters &&
Object.keys(toolCall.parameters).length > 0 &&
(toolCall.name === 'make_api_request' ||
toolCall.name === 'set_environment_variables') && (
<div
className={cn(
'mb-1 font-medium text-xs',
isSuccess && 'text-green-800 dark:text-green-200',
isError && 'text-red-800 dark:text-red-200'
'min-w-0 max-w-full rounded p-2',
isSuccess && 'bg-green-100 dark:bg-green-900',
isError && 'bg-red-100 dark:bg-red-900'
)}
>
Parameters:
<div
className={cn(
'mb-1 font-medium text-xs',
isSuccess && 'text-green-800 dark:text-green-200',
isError && 'text-red-800 dark:text-red-200'
)}
>
Parameters:
</div>
<div
className={cn(
'min-w-0 max-w-full break-all font-mono text-xs',
isSuccess && 'text-green-700 dark:text-green-300',
isError && 'text-red-700 dark:text-red-300'
)}
>
{JSON.stringify(toolCall.parameters, null, 2)}
</div>
</div>
<div
className={cn(
'min-w-0 max-w-full break-all font-mono text-xs',
isSuccess && 'text-green-700 dark:text-green-300',
isError && 'text-red-700 dark:text-red-300'
)}
>
{JSON.stringify(toolCall.parameters, null, 2)}
</div>
</div>
)}
)}
{toolCall.error && (
<div className='min-w-0 max-w-full rounded bg-red-100 p-2 dark:bg-red-900'>

View File

@@ -0,0 +1 @@
ALTER TABLE "copilot_chats" ADD COLUMN "conversation_id" text;

File diff suppressed because it is too large Load Diff

View File

@@ -498,6 +498,13 @@
"when": 1754719531015,
"tag": "0071_free_sharon_carter",
"breakpoints": true
},
{
"idx": 72,
"version": "7",
"when": 1755281098957,
"tag": "0072_powerful_legion",
"breakpoints": true
}
]
}

View File

@@ -1007,6 +1007,7 @@ export const copilotChats = pgTable(
title: text('title'),
messages: jsonb('messages').notNull().default('[]'),
model: text('model').notNull().default('claude-3-7-sonnet-latest'),
conversationId: text('conversation_id'),
previewYaml: text('preview_yaml'), // YAML content for pending workflow preview
createdAt: timestamp('created_at').notNull().defaultNow(),
updatedAt: timestamp('updated_at').notNull().defaultNow(),

View File

@@ -57,6 +57,7 @@ export interface SendMessageRequest {
chatId?: string
workflowId?: string
mode?: 'ask' | 'agent'
depth?: 0 | 1 | 2 | 3
createNewChat?: boolean
stream?: boolean
implicitFeedback?: string

View File

@@ -0,0 +1,72 @@
import { BaseTool } from '@/lib/copilot/tools/base-tool'
import type {
CopilotToolCall,
ToolExecuteResult,
ToolExecutionOptions,
ToolMetadata,
} from '@/lib/copilot/tools/types'
export class GDriveRequestAccessTool extends BaseTool {
static readonly id = 'gdrive_request_access'
metadata: ToolMetadata = {
id: GDriveRequestAccessTool.id,
displayConfig: {
states: {
pending: {
displayName: 'Select Google Drive files',
icon: 'googleDrive',
},
executing: {
displayName: 'Requesting Google Drive access',
icon: 'spinner',
},
accepted: {
displayName: 'Requesting Google Drive access',
icon: 'spinner',
},
success: {
displayName: 'Selected Google Drive files',
icon: 'googleDrive',
},
rejected: {
displayName: 'Skipped Google Drive access request',
icon: 'skip',
},
errored: {
displayName: 'Failed to request Google Drive access',
icon: 'error',
},
},
},
schema: {
name: GDriveRequestAccessTool.id,
description: 'Prompt the user to grant Google Drive file access via the picker',
parameters: {
type: 'object',
properties: {
// Accepts arbitrary context but no required params
},
required: [],
},
},
requiresInterrupt: true,
}
async execute(
toolCall: CopilotToolCall,
options?: ToolExecutionOptions
): Promise<ToolExecuteResult> {
// Execution is trivial: we only notify the server that the user completed the action.
// Any data transfer happens via the picker; if needed later, it can be included in the message.
await this.notify(toolCall.id, 'success', 'User completed Google Drive access picker')
options?.onStateChange?.('success')
return {
success: true,
data: {
message: 'Google Drive access confirmed by user',
},
}
}
}

View File

@@ -31,11 +31,11 @@ export class RunWorkflowTool extends BaseTool {
icon: 'play',
},
executing: {
displayName: 'Running workflow',
displayName: 'Executing workflow',
icon: 'spinner',
},
accepted: {
displayName: 'Running workflow',
displayName: 'Executing workflow',
icon: 'spinner',
},
success: {

View File

@@ -7,10 +7,14 @@
import { useState } from 'react'
import { Loader2 } from 'lucide-react'
import useDrivePicker from 'react-google-drive-picker'
import { GoogleDriveIcon } from '@/components/icons'
import { Button } from '@/components/ui/button'
import { Card, CardContent } from '@/components/ui/card'
import { notifyServerTool } from '@/lib/copilot/tools/notification-utils'
import { toolRegistry } from '@/lib/copilot/tools/registry'
import { renderToolStateIcon, toolRequiresInterrupt } from '@/lib/copilot/tools/utils'
import { getEnv } from '@/lib/env'
import { useCopilotStore } from '@/stores/copilot/store'
import type { CopilotToolCall } from '@/stores/copilot/types'
@@ -147,6 +151,7 @@ function RunSkipButtons({
const [isProcessing, setIsProcessing] = useState(false)
const [buttonsHidden, setButtonsHidden] = useState(false)
const { setToolCallState } = useCopilotStore()
const [openPicker] = useDrivePicker()
const handleRun = async () => {
setIsProcessing(true)
@@ -174,19 +179,48 @@ function RunSkipButtons({
}
}
const handleSkip = async () => {
setIsProcessing(true)
setButtonsHidden(true) // Hide run/skip buttons immediately
const handleOpenDriveAccess = async () => {
try {
await rejectTool(toolCall, setToolCallState)
const providerId = 'google-drive'
const credsRes = await fetch(`/api/auth/oauth/credentials?provider=${providerId}`)
if (!credsRes.ok) return
const credsData = await credsRes.json()
const creds = Array.isArray(credsData.credentials) ? credsData.credentials : []
if (creds.length === 0) return
const defaultCred = creds.find((c: any) => c.isDefault) || creds[0]
// Trigger re-render by calling onStateChange if provided
onStateChange?.(toolCall.state)
} catch (error) {
console.error('Error handling skip action:', error)
} finally {
setIsProcessing(false)
const tokenRes = await fetch('/api/auth/oauth/token', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ credentialId: defaultCred.id }),
})
if (!tokenRes.ok) return
const { accessToken } = await tokenRes.json()
if (!accessToken) return
const clientId = getEnv('NEXT_PUBLIC_GOOGLE_CLIENT_ID') || ''
const apiKey = getEnv('NEXT_PUBLIC_GOOGLE_API_KEY') || ''
const projectNumber = getEnv('NEXT_PUBLIC_GOOGLE_PROJECT_NUMBER') || ''
openPicker({
clientId,
developerKey: apiKey,
viewId: 'DOCS',
token: accessToken,
showUploadView: true,
showUploadFolders: true,
supportDrives: true,
multiselect: false,
appId: projectNumber,
setSelectFolderEnabled: false,
callbackFunction: async (data) => {
if (data.action === 'picked') {
await handleRun()
}
},
})
} catch (e) {
console.error('Failed to open Google Drive picker', e)
}
}
@@ -195,6 +229,34 @@ function RunSkipButtons({
return null
}
// Special inline UI for Google Drive access request
if (toolCall.name === 'gdrive_request_access' && toolCall.state === 'pending') {
return (
<div className='flex items-center gap-2'>
<Button
onClick={handleOpenDriveAccess}
size='sm'
className='h-6 bg-gray-900 px-2 font-medium text-white text-xs hover:bg-gray-800 disabled:opacity-50 dark:bg-gray-100 dark:text-gray-900 dark:hover:bg-gray-200'
title='Grant Google Drive access'
>
<GoogleDriveIcon className='mr-0.5 h-4 w-4' />
Select
</Button>
<Button
onClick={async () => {
setButtonsHidden(true)
await rejectTool(toolCall, setToolCallState)
onStateChange?.(toolCall.state)
}}
size='sm'
className='h-6 bg-gray-200 px-2 font-medium text-gray-700 text-xs hover:bg-gray-300 disabled:opacity-50 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600'
>
Skip
</Button>
</div>
)
}
// Default run/skip buttons
return (
<div className='flex items-center gap-1.5'>
@@ -208,7 +270,11 @@ function RunSkipButtons({
Run
</Button>
<Button
onClick={handleSkip}
onClick={async () => {
setButtonsHidden(true)
await rejectTool(toolCall, setToolCallState)
onStateChange?.(toolCall.state)
}}
disabled={isProcessing}
size='sm'
className='h-6 bg-gray-200 px-2 font-medium text-gray-700 text-xs hover:bg-gray-300 disabled:opacity-50 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600'
@@ -223,6 +289,14 @@ export function InlineToolCall({ toolCall, onStateChange, context }: InlineToolC
const [, forceUpdate] = useState({})
const { setToolCallState } = useCopilotStore()
const isExpandablePending =
toolCall.state === 'pending' &&
(toolCall.name === 'make_api_request' || toolCall.name === 'set_environment_variables')
const [expanded, setExpanded] = useState(isExpandablePending)
const isExpandableTool =
toolCall.name === 'make_api_request' || toolCall.name === 'set_environment_variables'
if (!toolCall) {
return null
}
@@ -243,50 +317,155 @@ export function InlineToolCall({ toolCall, onStateChange, context }: InlineToolC
const displayName = getToolDisplayNameByState(toolCall)
const params = (toolCall.parameters || toolCall.input || {}) as Record<string, any>
const Chip = ({
children,
color = 'gray',
}: {
children: any
color?: 'gray' | 'green' | 'blue' | 'yellow'
}) => (
<span
className={
'inline-flex items-center rounded px-1.5 py-0.5 font-semibold text-[10px] ' +
(color === 'green'
? 'bg-emerald-100 text-emerald-700'
: color === 'blue'
? 'bg-blue-100 text-blue-700'
: color === 'yellow'
? 'bg-amber-100 text-amber-700'
: 'bg-gray-100 text-gray-700')
}
>
{children}
</span>
)
const KeyVal = ({ k, v }: { k: string; v: any }) => (
<div className='flex items-start justify-between gap-2'>
<span className='min-w-[110px] shrink-0 truncate font-medium text-[11px] text-muted-foreground'>
{k}
</span>
<span className='w-full overflow-hidden font-mono text-[11px] text-foreground'>
{String(v)}
</span>
</div>
)
const Section = ({ title, children }: { title: string; children: any }) => (
<Card className='mt-1.5'>
<CardContent className='p-3'>
<div className='mb-1 font-medium text-[11px] text-muted-foreground uppercase tracking-wide'>
{title}
</div>
{children}
</CardContent>
</Card>
)
const renderPendingDetails = () => {
if (toolCall.name === 'make_api_request') {
const url = params.url || ''
const method = (params.method || '').toUpperCase()
const methodColor = method === 'GET' ? 'green' : method === 'POST' ? 'blue' : 'yellow'
return (
<div className='mt-0.5 flex items-center gap-2'>
<Chip color={methodColor as any}>{method || 'METHOD'}</Chip>
<span className='truncate text-foreground text-xs' title={url}>
{url || 'URL not provided'}
</span>
</div>
)
}
if (toolCall.name === 'set_environment_variables') {
const variables =
params.variables && typeof params.variables === 'object' ? params.variables : {}
const entries = Object.entries(variables)
return (
<div className='mt-0.5'>
{entries.length === 0 ? (
<span className='text-muted-foreground text-xs'>No variables provided</span>
) : (
<div className='space-y-0.5'>
{entries.map(([k, v]) => (
<div key={k} className='flex items-center gap-0.5'>
<span className='font-medium text-muted-foreground text-xs'>{k}</span>
<span className='mx-1 font-medium text-muted-foreground text-xs'>:</span>
<span className='truncate font-medium text-foreground text-xs'>{String(v)}</span>
</div>
))}
</div>
)}
</div>
)
}
return null
}
return (
<div className='flex items-center justify-between gap-2 py-1'>
<div className='flex items-center gap-2 text-muted-foreground'>
<div className='flex-shrink-0'>{renderToolStateIcon(toolCall, 'h-3 w-3')}</div>
<span className='text-base'>{displayName}</span>
<div className='flex w-full flex-col gap-1 py-1'>
<div
className={`flex items-center justify-between gap-2 ${
isExpandableTool ? 'cursor-pointer' : ''
}`}
onClick={() => {
if (isExpandableTool) setExpanded((e) => !e)
}}
>
<div className='flex items-center gap-2 text-muted-foreground'>
<div className='flex-shrink-0'>{renderToolStateIcon(toolCall, 'h-3 w-3')}</div>
<span className='text-base'>{displayName}</span>
</div>
{showButtons && (
<RunSkipButtons toolCall={toolCall} onStateChange={handleStateChange} context={context} />
)}
{showBackgroundButton && (
<div className='flex items-center gap-1.5'>
<Button
onClick={async () => {
try {
// Set tool state to background
setToolCallState(toolCall, 'background')
// Notify the backend about background state with execution start time if available
const executionStartTime = context?.executionStartTime
await notifyServerTool(
toolCall.id,
toolCall.name,
'background',
executionStartTime
)
// Track that this tool was moved to background
if (context) {
if (!context.movedToBackgroundToolIds) {
context.movedToBackgroundToolIds = new Set()
}
context.movedToBackgroundToolIds.add(toolCall.id)
}
// Trigger re-render
onStateChange?.(toolCall.state)
} catch (error) {
console.error('Error moving to background:', error)
}
}}
size='sm'
className='h-6 bg-blue-600 px-2 font-medium text-white text-xs hover:bg-blue-700'
>
Move to Background
</Button>
</div>
)}
</div>
{showButtons && (
<RunSkipButtons toolCall={toolCall} onStateChange={handleStateChange} context={context} />
)}
{showBackgroundButton && (
<div className='flex items-center gap-1.5'>
<Button
onClick={async () => {
try {
// Set tool state to background
setToolCallState(toolCall, 'background')
// Notify the backend about background state with execution start time if available
const executionStartTime = context?.executionStartTime
await notifyServerTool(toolCall.id, toolCall.name, 'background', executionStartTime)
// Track that this tool was moved to background
if (context) {
if (!context.movedToBackgroundToolIds) {
context.movedToBackgroundToolIds = new Set()
}
context.movedToBackgroundToolIds.add(toolCall.id)
}
// Trigger re-render
onStateChange?.(toolCall.state)
} catch (error) {
console.error('Error moving to background:', error)
}
}}
size='sm'
className='h-6 bg-blue-600 px-2 font-medium text-white text-xs hover:bg-blue-700'
>
Move to Background
</Button>
</div>
)}
{isExpandableTool && expanded && <div className='pr-1 pl-5'>{renderPendingDetails()}</div>}
</div>
)
}

View File

@@ -8,6 +8,7 @@
* It also provides metadata for server-side tools for display purposes
*/
import { GDriveRequestAccessTool } from '@/lib/copilot/tools/client-tools/gdrive-request-access'
import { GetUserWorkflowTool } from '@/lib/copilot/tools/client-tools/get-user-workflow'
import { RunWorkflowTool } from '@/lib/copilot/tools/client-tools/run-workflow'
import { SERVER_TOOL_METADATA } from '@/lib/copilot/tools/server-tools/definitions'
@@ -114,6 +115,7 @@ export class ToolRegistry {
// Register actual client tool implementations
this.register(new RunWorkflowTool())
this.register(new GetUserWorkflowTool())
this.register(new GDriveRequestAccessTool())
}
}

View File

@@ -20,6 +20,13 @@ export const SERVER_TOOL_IDS = {
SET_ENVIRONMENT_VARIABLES: 'set_environment_variables',
GET_WORKFLOW_CONSOLE: 'get_workflow_console',
SEARCH_ONLINE: 'search_online',
PLAN: 'plan',
REASON: 'reason',
GET_BLOCK_BEST_PRACTICES: 'get_block_best_practices',
LIST_GDRIVE_FILES: 'list_gdrive_files',
GET_OAUTH_CREDENTIALS: 'get_oauth_credentials',
READ_GDRIVE_FILE: 'read_gdrive_file',
MAKE_API_REQUEST: 'make_api_request',
} as const
export type ServerToolId = (typeof SERVER_TOOL_IDS)[keyof typeof SERVER_TOOL_IDS]
@@ -51,10 +58,10 @@ export const SERVER_TOOL_METADATA: Record<ServerToolId, ToolMetadata> = {
id: SERVER_TOOL_IDS.GET_USER_WORKFLOW,
displayConfig: {
states: {
executing: { displayName: 'Analyzing workflow', icon: 'spinner' },
success: { displayName: 'Analyzed workflow', icon: 'workflow' },
rejected: { displayName: 'Skipped workflow analysis', icon: 'skip' },
errored: { displayName: 'Failed to analyze workflow', icon: 'error' },
executing: { displayName: 'Analyzing your workflow', icon: 'spinner' },
success: { displayName: 'Analyzed your workflow', icon: 'workflow' },
rejected: { displayName: 'Skipped analyzing your workflow', icon: 'skip' },
errored: { displayName: 'Failed to analyze your workflow', icon: 'error' },
aborted: { displayName: 'Workflow analysis aborted', icon: 'x' },
},
},
@@ -69,13 +76,13 @@ export const SERVER_TOOL_METADATA: Record<ServerToolId, ToolMetadata> = {
id: SERVER_TOOL_IDS.BUILD_WORKFLOW,
displayConfig: {
states: {
ready_for_review: { displayName: 'Workflow ready for review', icon: 'network' },
executing: { displayName: 'Building workflow', icon: 'spinner' },
success: { displayName: 'Built workflow', icon: 'network' },
rejected: { displayName: 'Workflow changes not applied', icon: 'skip' },
errored: { displayName: 'Failed to build workflow', icon: 'error' },
ready_for_review: { displayName: 'Workflow ready for review', icon: 'grid2x2' },
executing: { displayName: 'Building your workflow', icon: 'spinner' },
success: { displayName: 'Built your workflow', icon: 'grid2x2' },
rejected: { displayName: 'Workflow changes not applied', icon: 'grid2x2X' },
errored: { displayName: 'Failed to build your workflow', icon: 'error' },
aborted: { displayName: 'Workflow build aborted', icon: 'x' },
accepted: { displayName: 'Built workflow', icon: 'network' },
accepted: { displayName: 'Built your workflow', icon: 'grid2x2Check' },
},
},
schema: {
@@ -89,13 +96,13 @@ export const SERVER_TOOL_METADATA: Record<ServerToolId, ToolMetadata> = {
id: SERVER_TOOL_IDS.EDIT_WORKFLOW,
displayConfig: {
states: {
ready_for_review: { displayName: 'Workflow changes ready for review', icon: 'network' },
executing: { displayName: 'Editing workflow', icon: 'spinner' },
success: { displayName: 'Edited workflow', icon: 'network' },
rejected: { displayName: 'Workflow changes not applied', icon: 'skip' },
errored: { displayName: 'Failed to edit workflow', icon: 'error' },
ready_for_review: { displayName: 'Workflow changes ready for review', icon: 'grid2x2' },
executing: { displayName: 'Editing your workflow', icon: 'spinner' },
success: { displayName: 'Edited your workflow', icon: 'grid2x2' },
rejected: { displayName: 'Workflow changes not applied', icon: 'grid2x2X' },
errored: { displayName: 'Failed to edit your workflow', icon: 'error' },
aborted: { displayName: 'Workflow edit aborted', icon: 'x' },
accepted: { displayName: 'Edited workflow', icon: 'network' },
accepted: { displayName: 'Edited your workflow', icon: 'grid2x2Check' },
},
},
schema: {
@@ -109,11 +116,11 @@ export const SERVER_TOOL_METADATA: Record<ServerToolId, ToolMetadata> = {
id: SERVER_TOOL_IDS.GET_BLOCKS_AND_TOOLS,
displayConfig: {
states: {
executing: { displayName: 'Getting block information', icon: 'spinner' },
success: { displayName: 'Retrieved block information', icon: 'blocks' },
rejected: { displayName: 'Skipped getting block information', icon: 'skip' },
errored: { displayName: 'Failed to get block information', icon: 'error' },
aborted: { displayName: 'Block information retrieval aborted', icon: 'x' },
executing: { displayName: 'Exploring available options', icon: 'spinner' },
success: { displayName: 'Explored available options', icon: 'blocks' },
rejected: { displayName: 'Skipped exploring options', icon: 'skip' },
errored: { displayName: 'Failed to explore options', icon: 'error' },
aborted: { displayName: 'Options exploration aborted', icon: 'x' },
},
},
schema: {
@@ -127,11 +134,11 @@ export const SERVER_TOOL_METADATA: Record<ServerToolId, ToolMetadata> = {
id: SERVER_TOOL_IDS.GET_BLOCKS_METADATA,
displayConfig: {
states: {
executing: { displayName: 'Getting block metadata', icon: 'spinner' },
success: { displayName: 'Retrieved block metadata', icon: 'blocks' },
rejected: { displayName: 'Skipped getting block metadata', icon: 'skip' },
errored: { displayName: 'Failed to get block metadata', icon: 'error' },
aborted: { displayName: 'Block metadata retrieval aborted', icon: 'x' },
executing: { displayName: 'Evaluating workflow options', icon: 'spinner' },
success: { displayName: 'Evaluated workflow options', icon: 'betweenHorizontalEnd' },
rejected: { displayName: 'Skipped evaluating workflow options', icon: 'skip' },
errored: { displayName: 'Failed to evaluate workflow options', icon: 'error' },
aborted: { displayName: 'Options evaluation aborted', icon: 'x' },
},
},
schema: {
@@ -163,11 +170,11 @@ export const SERVER_TOOL_METADATA: Record<ServerToolId, ToolMetadata> = {
id: SERVER_TOOL_IDS.GET_EDIT_WORKFLOW_EXAMPLES,
displayConfig: {
states: {
executing: { displayName: 'Viewing workflow examples', icon: 'spinner' },
success: { displayName: 'Viewed workflow examples', icon: 'gitbranch' },
rejected: { displayName: 'Skipped workflow examples', icon: 'skip' },
errored: { displayName: 'Failed to view workflow examples', icon: 'error' },
aborted: { displayName: 'Workflow examples viewing aborted', icon: 'x' },
executing: { displayName: 'Optimizing edit approach', icon: 'spinner' },
success: { displayName: 'Optimized edit approach', icon: 'gitbranch' },
rejected: { displayName: 'Skipped optimizing edit approach', icon: 'skip' },
errored: { displayName: 'Failed to optimize edit approach', icon: 'error' },
aborted: { displayName: 'Edit approach optimization aborted', icon: 'x' },
},
},
schema: {
@@ -181,11 +188,11 @@ export const SERVER_TOOL_METADATA: Record<ServerToolId, ToolMetadata> = {
id: SERVER_TOOL_IDS.GET_BUILD_WORKFLOW_EXAMPLES,
displayConfig: {
states: {
executing: { displayName: 'Viewing workflow examples', icon: 'spinner' },
success: { displayName: 'Viewed workflow examples', icon: 'gitbranch' },
rejected: { displayName: 'Skipped workflow examples', icon: 'skip' },
errored: { displayName: 'Failed to view workflow examples', icon: 'error' },
aborted: { displayName: 'Workflow examples viewing aborted', icon: 'x' },
executing: { displayName: 'Discovering workflow patterns', icon: 'spinner' },
success: { displayName: 'Discovered workflow patterns', icon: 'gitbranch' },
rejected: { displayName: 'Skipped discovering patterns', icon: 'skip' },
errored: { displayName: 'Failed to discover patterns', icon: 'error' },
aborted: { displayName: 'Discovering patterns aborted', icon: 'x' },
},
},
schema: {
@@ -224,6 +231,19 @@ export const SERVER_TOOL_METADATA: Record<ServerToolId, ToolMetadata> = {
errored: { displayName: 'Failed to set environment variables', icon: 'error' },
aborted: { displayName: 'Environment variables setting aborted', icon: 'x' },
},
getDynamicDisplayName: (state, params) => {
try {
const vars =
params?.variables && typeof params.variables === 'object' ? params.variables : null
if (!vars) return null
const count = Object.keys(vars).length
if (count === 0) return null
const base = state === 'executing' ? 'Setting' : state === 'success' ? 'Set' : 'Set'
return `${base} ${count} environment ${count === 1 ? 'variable' : 'variables'}`
} catch {
return null
}
},
},
schema: {
name: SERVER_TOOL_IDS.SET_ENVIRONMENT_VARIABLES,
@@ -280,4 +300,195 @@ export const SERVER_TOOL_METADATA: Record<ServerToolId, ToolMetadata> = {
},
requiresInterrupt: false,
},
[SERVER_TOOL_IDS.PLAN]: {
id: SERVER_TOOL_IDS.PLAN,
displayConfig: {
states: {
executing: { displayName: 'Crafting an approach', icon: 'spinner' },
success: { displayName: 'Crafted a plan', icon: 'listTodo' },
rejected: { displayName: 'Skipped crafting a plan', icon: 'skip' },
errored: { displayName: 'Failed to craft a plan', icon: 'error' },
aborted: { displayName: 'Crafting a plan aborted', icon: 'x' },
},
},
schema: {
name: SERVER_TOOL_IDS.PLAN,
description: 'Plan the approach to solve a problem',
},
requiresInterrupt: false,
},
[SERVER_TOOL_IDS.REASON]: {
id: SERVER_TOOL_IDS.REASON,
displayConfig: {
states: {
executing: { displayName: 'Designing an approach', icon: 'spinner' },
success: { displayName: 'Designed an approach', icon: 'brain' },
rejected: { displayName: 'Skipped reasoning', icon: 'skip' },
errored: { displayName: 'Failed to design an approach', icon: 'error' },
aborted: { displayName: 'Reasoning aborted', icon: 'x' },
},
},
schema: {
name: SERVER_TOOL_IDS.REASON,
description: 'Reason through a complex problem',
},
requiresInterrupt: false,
},
[SERVER_TOOL_IDS.GET_BLOCK_BEST_PRACTICES]: {
id: SERVER_TOOL_IDS.GET_BLOCK_BEST_PRACTICES,
displayConfig: {
states: {
executing: { displayName: 'Reviewing recommendations', icon: 'spinner' },
success: { displayName: 'Reviewed recommendations', icon: 'network' },
rejected: { displayName: 'Skipped recommendations review', icon: 'skip' },
errored: { displayName: 'Failed to review recommendations', icon: 'error' },
aborted: { displayName: 'Recommendations review aborted', icon: 'x' },
},
},
schema: {
name: SERVER_TOOL_IDS.GET_BLOCK_BEST_PRACTICES,
description: 'Get best practices and usage guidelines for workflow blocks and tools',
parameters: {
type: 'object',
properties: {
block_types: {
type: 'array',
items: { type: 'string' },
description:
'Optional list of specific block types to get best practices for (e.g., "llm", "function", "loop")',
},
category: {
type: 'string',
description: 'Optional category filter (e.g., "performance", "security", "debugging")',
},
},
required: [],
},
},
requiresInterrupt: false,
},
[SERVER_TOOL_IDS.LIST_GDRIVE_FILES]: {
id: SERVER_TOOL_IDS.LIST_GDRIVE_FILES,
displayConfig: {
states: {
executing: { displayName: 'Listing Google Drive files', icon: 'spinner' },
success: { displayName: 'Listed Google Drive files', icon: 'file' },
rejected: { displayName: 'Skipped listing Google Drive files', icon: 'skip' },
errored: { displayName: 'Failed to list Google Drive files', icon: 'error' },
aborted: { displayName: 'Listing Google Drive files aborted', icon: 'x' },
},
},
schema: {
name: SERVER_TOOL_IDS.LIST_GDRIVE_FILES,
description: "List files from the user's Google Drive",
parameters: {
type: 'object',
properties: {
userId: { type: 'string', description: 'The user ID' },
search_query: { type: 'string', description: 'Optional search query' },
num_results: { type: 'number', description: 'Optional number of results to return' },
},
required: ['userId'],
},
},
requiresInterrupt: false,
},
[SERVER_TOOL_IDS.GET_OAUTH_CREDENTIALS]: {
id: SERVER_TOOL_IDS.GET_OAUTH_CREDENTIALS,
displayConfig: {
states: {
executing: { displayName: 'Retrieving OAuth credentials', icon: 'spinner' },
success: { displayName: 'Retrieved OAuth credentials', icon: 'key' },
rejected: { displayName: 'Skipped retrieving OAuth credentials', icon: 'skip' },
errored: { displayName: 'Failed to retrieve OAuth credentials', icon: 'error' },
aborted: { displayName: 'Retrieving OAuth credentials aborted', icon: 'x' },
},
},
schema: {
name: SERVER_TOOL_IDS.GET_OAUTH_CREDENTIALS,
description: 'Get the list of OAuth credentials for a user',
parameters: {
type: 'object',
properties: {
userId: { type: 'string', description: 'The user ID' },
},
required: ['userId'],
},
},
requiresInterrupt: false,
},
[SERVER_TOOL_IDS.READ_GDRIVE_FILE]: {
id: SERVER_TOOL_IDS.READ_GDRIVE_FILE,
displayConfig: {
states: {
executing: { displayName: 'Reading Google Drive file', icon: 'spinner' },
success: { displayName: 'Read Google Drive file', icon: 'file' },
rejected: { displayName: 'Skipped reading Google Drive file', icon: 'skip' },
errored: { displayName: 'Failed to read Google Drive file', icon: 'error' },
aborted: { displayName: 'Reading Google Drive file aborted', icon: 'x' },
},
},
schema: {
name: SERVER_TOOL_IDS.READ_GDRIVE_FILE,
description: 'Read a file from Google Drive (Docs or Sheets)',
parameters: {
type: 'object',
properties: {
userId: { type: 'string', description: 'The user ID' },
fileId: { type: 'string', description: 'The Google Drive file ID' },
type: { type: 'string', enum: ['doc', 'sheet'], description: 'The file type' },
range: { type: 'string', description: 'Optional range for Sheets (e.g., Sheet1!A1:B10)' },
},
required: ['userId', 'fileId', 'type'],
},
},
requiresInterrupt: false,
},
[SERVER_TOOL_IDS.MAKE_API_REQUEST]: {
id: SERVER_TOOL_IDS.MAKE_API_REQUEST,
displayConfig: {
states: {
pending: { displayName: 'Execute API request?', icon: 'api' },
executing: { displayName: 'Executing API request', icon: 'spinner' },
success: { displayName: 'Executed API request', icon: 'api' },
rejected: { displayName: 'Skipped API request', icon: 'skip' },
errored: { displayName: 'Failed to execute API request', icon: 'error' },
aborted: { displayName: 'API request aborted', icon: 'x' },
},
},
schema: {
name: SERVER_TOOL_IDS.MAKE_API_REQUEST,
description: 'Make an HTTP API request using provided parameters',
parameters: {
type: 'object',
properties: {
url: { type: 'string', description: 'Request URL' },
method: { type: 'string', enum: ['GET', 'POST', 'PUT'], description: 'HTTP method' },
queryParams: {
type: 'object',
description: 'Optional query parameters as key-value pairs',
additionalProperties: { type: ['string', 'number', 'boolean'] },
},
headers: {
type: 'object',
description: 'Optional headers as key-value pairs',
additionalProperties: { type: 'string' },
},
body: {
type: ['object', 'string'],
description: 'Optional JSON body (object or string)',
},
},
required: ['url', 'method'],
},
},
requiresInterrupt: true,
},
}

View File

@@ -0,0 +1,79 @@
import { createLogger } from '@/lib/logs/console/logger'
import { getOAuthToken } from '@/app/api/auth/oauth/utils'
import { executeTool } from '@/tools'
import { BaseCopilotTool } from '../base'
interface ListGDriveFilesParams {
userId: string
search_query?: string
searchQuery?: string
num_results?: number
}
interface ListGDriveFilesResult {
files: Array<{
id: string
name: string
mimeType: string
webViewLink?: string
webContentLink?: string
size?: string
createdTime?: string
modifiedTime?: string
parents?: string[]
}>
total: number
nextPageToken?: string
}
class ListGDriveFilesTool extends BaseCopilotTool<ListGDriveFilesParams, ListGDriveFilesResult> {
readonly id = 'list_gdrive_files'
readonly displayName = 'Listing Google Drive files'
protected async executeImpl(params: ListGDriveFilesParams): Promise<ListGDriveFilesResult> {
const logger = createLogger('ListGDriveFilesTool')
const { userId } = params
if (!userId || typeof userId !== 'string' || userId.trim().length === 0) {
throw new Error('userId is required')
}
const query = params.search_query ?? params.searchQuery
const pageSize = params.num_results
// Get (and refresh if needed) the user's OAuth access token for Google Drive
const accessToken = await getOAuthToken(userId, 'google-drive')
if (!accessToken) {
throw new Error(
'No Google Drive connection found for this user. Please connect Google Drive in settings.'
)
}
// Reuse the existing google_drive_list tool
const result = await executeTool(
'google_drive_list',
{
accessToken,
...(query ? { query } : {}),
...(typeof pageSize === 'number' ? { pageSize } : {}),
},
true // skip proxy; call external API directly from server
)
if (!result.success) {
throw new Error(result.error || 'Failed to list Google Drive files')
}
const output = result.output as any
const files = Array.isArray(output?.files) ? output.files : output?.output?.files || []
const nextPageToken = output?.nextPageToken || output?.output?.nextPageToken
return {
files,
total: files.length,
nextPageToken,
}
}
}
export const listGDriveFilesTool = new ListGDriveFilesTool()

View File

@@ -0,0 +1,85 @@
import { createLogger } from '@/lib/logs/console/logger'
import { getOAuthToken } from '@/app/api/auth/oauth/utils'
import { executeTool } from '@/tools'
import { BaseCopilotTool } from '../base'
interface ReadGDriveFileParams {
userId: string
fileId: string
type: 'doc' | 'sheet'
range?: string
}
interface ReadGDriveFileResult {
type: 'doc' | 'sheet'
content?: string
rows?: string[][]
range?: string
metadata?: Record<string, any>
}
class ReadGDriveFileTool extends BaseCopilotTool<ReadGDriveFileParams, ReadGDriveFileResult> {
readonly id = 'read_gdrive_file'
readonly displayName = 'Reading Google Drive file'
protected async executeImpl(params: ReadGDriveFileParams): Promise<ReadGDriveFileResult> {
const logger = createLogger('ReadGDriveFileTool')
const { userId, fileId, type, range } = params
if (!userId || !fileId || !type) {
throw new Error('userId, fileId and type are required')
}
if (type === 'doc') {
const accessToken = await getOAuthToken(userId, 'google-drive')
if (!accessToken) {
throw new Error(
'No Google Drive connection found for this user. Please connect Google Drive in settings.'
)
}
const result = await executeTool('google_drive_get_content', { accessToken, fileId }, true)
if (!result.success) {
throw new Error(result.error || 'Failed to read Google Drive document')
}
const output = result.output as any
const content = output?.output?.content ?? output?.content
const metadata = output?.output?.metadata ?? output?.metadata
return { type, content, metadata }
}
if (type === 'sheet') {
const accessToken = await getOAuthToken(userId, 'google-sheets')
if (!accessToken) {
throw new Error(
'No Google Sheets connection found for this user. Please connect Google Sheets in settings.'
)
}
const result = await executeTool(
'google_sheets_read',
{ accessToken, spreadsheetId: fileId, ...(range ? { range } : {}) },
true
)
if (!result.success) {
throw new Error(result.error || 'Failed to read Google Sheets data')
}
const output = result.output as any
const rows: string[][] = output?.output?.data?.values || output?.data?.values || []
const resolvedRange: string | undefined = output?.output?.data?.range || output?.data?.range
const metadata = output?.output?.metadata || output?.metadata
return { type, rows, range: resolvedRange, metadata }
}
throw new Error(`Unsupported type: ${type}`)
}
}
export const readGDriveFileTool = new ReadGDriveFileTool()

View File

@@ -0,0 +1,135 @@
import { createLogger } from '@/lib/logs/console/logger'
import { executeTool } from '@/tools'
import type { TableRow } from '@/tools/types'
import { BaseCopilotTool } from '../base'
interface MakeApiRequestParams {
url: string
method: 'GET' | 'POST' | 'PUT'
queryParams?: Record<string, string | number | boolean>
headers?: Record<string, string>
body?: any
}
interface MakeApiRequestResult {
data: any
status: number
headers: Record<string, any>
truncated?: boolean
totalChars?: number
previewChars?: number
note?: string
}
class MakeApiRequestTool extends BaseCopilotTool<MakeApiRequestParams, MakeApiRequestResult> {
readonly id = 'make_api_request'
readonly displayName = 'Making API request'
readonly requiresInterrupt = true
protected async executeImpl(params: MakeApiRequestParams): Promise<MakeApiRequestResult> {
const logger = createLogger('MakeApiRequestTool')
const { url, method, queryParams, headers, body } = params
if (!url || !method) {
throw new Error('url and method are required')
}
const toTableRows = (obj?: Record<string, any>): TableRow[] | null => {
if (!obj || typeof obj !== 'object') return null
return Object.entries(obj).map(([key, value]) => ({
id: key,
cells: { Key: key, Value: value },
}))
}
const headersTable = toTableRows(headers)
const queryParamsTable = toTableRows(queryParams as Record<string, any> | undefined)
const result = await executeTool(
'http_request',
{
url,
method,
params: queryParamsTable,
headers: headersTable,
body,
},
true
)
if (!result.success) {
throw new Error(result.error || 'API request failed')
}
const output = (result as any).output || result
const data = output.output?.data ?? output.data
const status = output.output?.status ?? output.status ?? 200
const respHeaders = output.output?.headers ?? output.headers ?? {}
// Character cap
const CAP = Number(process.env.COPILOT_TOOL_RESULT_CHAR_CAP || 20000)
// Content-aware trimming
const toStringSafe = (val: any): string => {
if (typeof val === 'string') return val
try {
return JSON.stringify(val)
} catch {
return String(val)
}
}
const stripHtml = (html: string): string => {
try {
// Remove tags and collapse whitespace
return html
.replace(/<script[\s\S]*?<\/script>/gi, '')
.replace(/<style[\s\S]*?<\/style>/gi, '')
.replace(/<[^>]+>/g, ' ')
.replace(/\s+/g, ' ')
.trim()
} catch {
return html
}
}
// Normalize to string for measurement
let normalized = toStringSafe(data)
// If looks like HTML, strip for readability before capping
const looksLikeHtml =
/<html[\s\S]*<\/html>/i.test(normalized) || /<body[\s\S]*<\/body>/i.test(normalized)
if (looksLikeHtml) {
normalized = stripHtml(normalized)
}
const totalChars = normalized.length
if (totalChars > CAP) {
const preview = normalized.slice(0, CAP)
logger.warn('API response truncated by character cap', {
url,
method,
totalChars,
previewChars: preview.length,
cap: CAP,
})
return {
data: preview,
status,
headers: respHeaders,
truncated: true,
totalChars,
previewChars: preview.length,
note: `Response truncated to ${CAP} characters to avoid large payloads`,
}
}
logger.info('API request executed', { url, method, status, totalChars })
return { data: normalized, status, headers: respHeaders }
}
}
export const makeApiRequestTool = new MakeApiRequestTool()

View File

@@ -4,9 +4,13 @@ import type { CopilotTool } from './base'
import { getBlocksAndToolsTool } from './blocks/get-blocks-and-tools'
import { getBlocksMetadataTool } from './blocks/get-blocks-metadata'
import { searchDocsTool } from './docs/search-docs'
import { listGDriveFilesTool } from './gdrive/list-gdrive-files'
import { readGDriveFileTool } from './gdrive/read-gdrive-file'
import { makeApiRequestTool } from './other/make-api-request'
import { noOpTool } from './other/no-op'
import { onlineSearchTool } from './other/online-search'
import { getEnvironmentVariablesTool } from './user/get-environment-variables'
import { getOAuthCredentialsTool } from './user/get-oauth-credentials'
import { setEnvironmentVariablesTool } from './user/set-environment-variables'
import { buildWorkflowTool } from './workflow/build-workflow'
import { editWorkflowTool } from './workflow/edit-workflow'
@@ -92,10 +96,14 @@ copilotToolRegistry.register(noOpTool)
copilotToolRegistry.register(onlineSearchTool)
copilotToolRegistry.register(getEnvironmentVariablesTool)
copilotToolRegistry.register(setEnvironmentVariablesTool)
copilotToolRegistry.register(getOAuthCredentialsTool)
copilotToolRegistry.register(getUserWorkflowTool)
copilotToolRegistry.register(buildWorkflowTool)
copilotToolRegistry.register(getWorkflowConsoleTool)
copilotToolRegistry.register(editWorkflowTool)
copilotToolRegistry.register(listGDriveFilesTool)
copilotToolRegistry.register(readGDriveFileTool)
copilotToolRegistry.register(makeApiRequestTool)
// Dynamically generated constants - single source of truth
export const COPILOT_TOOL_IDS = copilotToolRegistry.getAvailableIds()

View File

@@ -0,0 +1,108 @@
import { eq } from 'drizzle-orm'
import { jwtDecode } from 'jwt-decode'
import { createLogger } from '@/lib/logs/console/logger'
import { db } from '@/db'
import { account, user } from '@/db/schema'
import { BaseCopilotTool } from '../base'
interface GetOAuthCredentialsParams {
userId: string
}
interface OAuthCredentialItem {
id: string
name: string
provider: string
lastUsed: string
isDefault: boolean
}
interface GetOAuthCredentialsResult {
credentials: OAuthCredentialItem[]
total: number
}
class GetOAuthCredentialsTool extends BaseCopilotTool<
GetOAuthCredentialsParams,
GetOAuthCredentialsResult
> {
readonly id = 'get_oauth_credentials'
readonly displayName = 'Getting OAuth credentials'
protected async executeImpl(
params: GetOAuthCredentialsParams
): Promise<GetOAuthCredentialsResult> {
const logger = createLogger('GetOAuthCredentials')
const { userId } = params
if (!userId || typeof userId !== 'string' || userId.trim().length === 0) {
throw new Error('userId is required')
}
logger.info('Fetching OAuth credentials for user', { userId })
// Fetch all accounts for this user
const accounts = await db.select().from(account).where(eq(account.userId, userId))
// Fetch user email for fallback display purposes
const userRecord = await db
.select({ email: user.email })
.from(user)
.where(eq(user.id, userId))
.limit(1)
const userEmail = userRecord.length > 0 ? userRecord[0]?.email : null
const credentials: OAuthCredentialItem[] = []
for (const acc of accounts) {
const providerId = acc.providerId
const [baseProvider, featureType = 'default'] = providerId.split('-')
let displayName = ''
// Try to extract an email/name from idToken if present
if (acc.idToken) {
try {
const decoded = jwtDecode<{ email?: string; name?: string }>(acc.idToken)
if (decoded.email) {
displayName = decoded.email
} else if (decoded.name) {
displayName = decoded.name
}
} catch (_err) {
logger.warn('Failed to decode idToken for credential', { accountId: acc.id })
}
}
// Provider-specific fallback (e.g., GitHub username)
if (!displayName && baseProvider === 'github') {
displayName = `${acc.accountId} (GitHub)`
}
// Fallback to user's email if available
if (!displayName && userEmail) {
displayName = userEmail
}
// Final fallback to accountId with provider name
if (!displayName) {
displayName = `${acc.accountId} (${baseProvider})`
}
credentials.push({
id: acc.id,
name: displayName,
provider: providerId,
lastUsed: acc.updatedAt.toISOString(),
isDefault: featureType === 'default',
})
}
logger.info('Fetched OAuth credentials', { userId, count: credentials.length })
return { credentials, total: credentials.length }
}
}
export const getOAuthCredentialsTool = new GetOAuthCredentialsTool()

View File

@@ -187,6 +187,18 @@ async function applyOperationsToYaml(
})
}
// Update type if provided
if (params?.type) {
block.type = params.type
logger.info(`Updated type for block ${block_id}`, { type: block.type })
}
// Update name if provided
if (params?.name) {
block.name = params.name
logger.info(`Updated name for block ${block_id}`, { name: block.name })
}
// Handle edge removals when specified in params
if (params?.removeEdges && Array.isArray(params.removeEdges)) {
params.removeEdges.forEach(

View File

@@ -5,7 +5,9 @@
import React from 'react'
import {
BetweenHorizontalEnd,
Blocks,
Brain,
Check,
CheckCircle,
Code,
@@ -15,8 +17,12 @@ import {
FileText,
GitBranch,
Globe,
Grid2x2,
Grid2x2Check,
Grid2x2X,
Info,
Lightbulb,
ListTodo,
Loader2,
type LucideIcon,
Minus,
@@ -64,6 +70,10 @@ const ICON_MAP: Record<string, LucideIcon> = {
eye: Eye,
x: X,
blocks: Blocks, // Blocks icon with missing corner
betweenHorizontalEnd: BetweenHorizontalEnd, // Icon for block metadata
grid2x2: Grid2x2, // Grid for ready for review
grid2x2Check: Grid2x2Check, // Grid with checkmark for accepted workflow changes
grid2x2X: Grid2x2X, // Grid with X for rejected workflow changes
info: Info,
terminal: Terminal,
squareTerminal: SquareTerminal,
@@ -74,6 +84,8 @@ const ICON_MAP: Record<string, LucideIcon> = {
workflow: Workflow, // Flowchart icon with boxes and connecting lines
network: Network, // Complex network icon with multiple interconnected nodes
gitbranch: GitBranch, // Git branching icon showing workflow paths
brain: Brain, // Brain icon for reasoning/AI thinking
listTodo: ListTodo, // List with checkboxes for planning/todos
// Default
default: Lightbulb,
@@ -177,16 +189,7 @@ export function renderToolStateIcon(
return React.createElement(Icon, { className: `${className} animate-spin ${stateClasses}` })
}
if (toolCall.state === 'rejected') {
// Special "skipped" icon style
return React.createElement(
'div',
{
className: `flex ${className} items-center justify-center rounded-full border border-gray-400`,
},
React.createElement(Minus, { className: 'h-2 w-2 text-gray-500' })
)
}
// Remove hardcoded rejected state override - let tool definitions control the icon
return React.createElement(Icon, { className: `${className} ${stateClasses}` })
}

View File

@@ -1,76 +1,207 @@
import { useCallback, useRef, useState } from 'react'
import { createLogger } from '@/lib/logs/console/logger'
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
import { type DiffAnalysis, WorkflowDiffEngine } from './diff-engine'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
import { WorkflowDiffEngine } from './diff-engine'
const logger = createLogger('useWorkflowDiff')
const logger = createLogger('WorkflowDiff')
interface WorkflowBackup {
workflowState: WorkflowState
subblockValues: Record<string, Record<string, any>>
timestamp: number
}
export interface UseWorkflowDiffReturn {
isShowingDiff: boolean
hasDiff: boolean
setProposedChanges: (yamlContent: string, diffAnalysis?: DiffAnalysis) => Promise<boolean>
isDiffReady: boolean
diffWorkflow: WorkflowState | null
diffMetadata: any | null
toggleDiffView: () => void
clearDiff: () => void
acceptChanges: () => Promise<boolean>
rejectChanges: () => void
toggleDiffView: () => void
getCurrentWorkflowForCanvas: () => any
rejectChanges: () => Promise<void>
createDiff: (proposedState: WorkflowState, metadata?: any) => void
}
/**
* Hook that provides workflow diff functionality
* without polluting core stores
*/
export function useWorkflowDiff(): UseWorkflowDiffReturn {
const [isShowingDiff, setIsShowingDiff] = useState(false)
const diffEngineRef = useRef<WorkflowDiffEngine | null>(null)
const diffEngineRef = useRef<WorkflowDiffEngine>(new WorkflowDiffEngine())
const lastBackupRef = useRef<WorkflowBackup | null>(null)
// Get store methods
const { activeWorkflowId } = useWorkflowRegistry()
const workflowStore = useWorkflowStore()
const activeWorkflowId = useWorkflowRegistry((state) => state.activeWorkflowId)
const { isDiffReady, diffWorkflow, diffMetadata } = useWorkflowDiffStore()
// Initialize diff engine
if (!diffEngineRef.current) {
diffEngineRef.current = new WorkflowDiffEngine()
}
const setProposedChanges = useCallback(
async (yamlContent: string, diffAnalysis?: DiffAnalysis): Promise<boolean> => {
try {
logger.info('Setting proposed changes')
const result = await diffEngineRef.current!.createDiffFromYaml(yamlContent, diffAnalysis)
if (result.success) {
setIsShowingDiff(true)
return true
}
logger.error('Failed to create diff:', result.errors)
return false
} catch (error) {
logger.error('Error setting proposed changes:', error)
return false
}
},
[]
)
const toggleDiffView = useCallback(() => {
setIsShowingDiff((prev) => !prev)
}, [])
const clearDiff = useCallback(() => {
logger.info('Clearing diff')
diffEngineRef.current!.clearDiff()
diffEngineRef.current.clearDiff()
useWorkflowDiffStore.getState().clearDiff()
setIsShowingDiff(false)
}, [])
// Create a backup of current state before applying changes
const createBackup = useCallback((): WorkflowBackup | null => {
if (!activeWorkflowId) {
logger.error('No active workflow ID for backup')
return null
}
const currentState = workflowStore.getWorkflowState()
const subblockStore = useSubBlockStore.getState()
const currentSubblockValues = subblockStore.workflowValues[activeWorkflowId] || {}
const backup: WorkflowBackup = {
workflowState: {
blocks: { ...currentState.blocks },
edges: [...currentState.edges],
loops: { ...currentState.loops },
parallels: { ...currentState.parallels },
lastSaved: currentState.lastSaved,
isDeployed: currentState.isDeployed,
deployedAt: currentState.deployedAt,
deploymentStatuses: { ...currentState.deploymentStatuses },
hasActiveWebhook: currentState.hasActiveWebhook,
},
subblockValues: JSON.parse(JSON.stringify(currentSubblockValues)), // Deep copy
timestamp: Date.now(),
}
lastBackupRef.current = backup
logger.info('Created workflow backup before diff acceptance', {
workflowId: activeWorkflowId,
blocksCount: Object.keys(backup.workflowState.blocks).length,
edgesCount: backup.workflowState.edges.length,
})
return backup
}, [activeWorkflowId, workflowStore])
// Restore state from backup
const restoreFromBackup = useCallback(
(backup: WorkflowBackup) => {
if (!activeWorkflowId) {
logger.error('No active workflow ID for restore')
return
}
logger.warn('Restoring workflow state from backup due to save failure', {
workflowId: activeWorkflowId,
backupTimestamp: backup.timestamp,
})
// Restore workflow store state
useWorkflowStore.setState({
blocks: backup.workflowState.blocks,
edges: backup.workflowState.edges,
loops: backup.workflowState.loops,
parallels: backup.workflowState.parallels,
lastSaved: backup.workflowState.lastSaved,
isDeployed: backup.workflowState.isDeployed,
deployedAt: backup.workflowState.deployedAt,
deploymentStatuses: backup.workflowState.deploymentStatuses,
hasActiveWebhook: backup.workflowState.hasActiveWebhook,
})
// Restore subblock values
useSubBlockStore.setState((state) => ({
workflowValues: {
...state.workflowValues,
[activeWorkflowId]: backup.subblockValues,
},
}))
logger.info('Successfully restored workflow state from backup')
},
[activeWorkflowId]
)
// Create checkpoint before applying changes
const createCheckpoint = useCallback(async (): Promise<{
success: boolean
checkpointId?: string
}> => {
if (!activeWorkflowId) {
logger.error('No active workflow ID for checkpoint')
return { success: false }
}
try {
const currentState = workflowStore.getWorkflowState()
// Get current copilot chat ID (if available)
const { useCopilotStore } = await import('@/stores/copilot/store')
const { currentChat, messages } = useCopilotStore.getState()
if (!currentChat?.id) {
logger.warn('No active copilot chat for checkpoint creation')
return { success: false }
}
// Get the last user message that might have triggered this diff
const lastUserMessage = messages
.slice()
.reverse()
.find((msg) => msg.role === 'user')
const response = await fetch('/api/copilot/checkpoints', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
workflowId: activeWorkflowId,
chatId: currentChat.id,
messageId: lastUserMessage?.id,
workflowState: JSON.stringify(currentState),
}),
})
if (!response.ok) {
const errorData = await response.json()
logger.error('Failed to create checkpoint:', errorData)
return { success: false }
}
const result = await response.json()
logger.info('Created checkpoint before diff acceptance', {
checkpointId: result.id,
workflowId: activeWorkflowId,
})
return { success: true, checkpointId: result.id }
} catch (error) {
logger.error('Failed to create checkpoint:', error)
return { success: false }
}
}, [activeWorkflowId, workflowStore])
const acceptChanges = useCallback(async (): Promise<boolean> => {
if (!activeWorkflowId) {
logger.error('No active workflow ID')
return false
}
// Create backup before making any changes
const backup = createBackup()
if (!backup) {
logger.error('Failed to create backup before accepting changes')
return false
}
// Create checkpoint for potential rollback
const checkpointResult = await createCheckpoint()
if (!checkpointResult.success) {
logger.warn('Failed to create checkpoint, proceeding without it')
}
try {
logger.info('Accepting diff changes')
logger.info('Accepting diff changes with backup protection')
const cleanState = diffEngineRef.current!.acceptDiff()
if (!cleanState) {
@@ -105,7 +236,7 @@ export function useWorkflowDiff(): UseWorkflowDiffReturn {
// Update last saved timestamp
workflowStore.updateLastSaved()
// Persist to database
// Persist to database with error handling and rollback
try {
const response = await fetch(`/api/workflows/${activeWorkflowId}/state`, {
method: 'PUT',
@@ -117,32 +248,56 @@ export function useWorkflowDiff(): UseWorkflowDiffReturn {
})
if (!response.ok) {
throw new Error(`Failed to save: ${response.statusText}`)
const errorData = await response.json()
throw new Error(errorData.error || `Failed to save: ${response.statusText}`)
}
logger.info('Diff changes persisted to database')
logger.info('Diff changes persisted to database successfully')
// Clear the backup since save was successful
lastBackupRef.current = null
} catch (error) {
logger.error('Failed to persist diff changes:', error)
// State is already updated locally, so don't fail the operation
logger.error('Failed to persist diff changes, rolling back:', error)
// Rollback to backup state
restoreFromBackup(backup)
// Clear the diff since we're reverting
clearDiff()
// Show user-friendly error
throw new Error(
`Failed to save workflow changes: ${error instanceof Error ? error.message : 'Unknown error'}. ` +
'The workflow has been restored to its previous state.'
)
}
setIsShowingDiff(false)
return true
} catch (error) {
logger.error('Failed to accept changes:', error)
return false
}
}, [activeWorkflowId, workflowStore])
const rejectChanges = useCallback(() => {
// If we haven't already restored from backup, do it now
if (lastBackupRef.current && lastBackupRef.current.timestamp === backup.timestamp) {
restoreFromBackup(backup)
}
throw error
}
}, [
activeWorkflowId,
workflowStore,
createBackup,
createCheckpoint,
restoreFromBackup,
clearDiff,
])
const rejectChanges = useCallback(async () => {
logger.info('Rejecting diff changes')
clearDiff()
}, [clearDiff])
const toggleDiffView = useCallback(() => {
setIsShowingDiff((prev) => !prev)
}, [])
const getCurrentWorkflowForCanvas = useCallback(() => {
const currentState = workflowStore.getWorkflowState()
@@ -155,12 +310,16 @@ export function useWorkflowDiff(): UseWorkflowDiffReturn {
return {
isShowingDiff,
hasDiff: diffEngineRef.current!.hasDiff(),
setProposedChanges,
isDiffReady,
diffWorkflow,
diffMetadata,
toggleDiffView,
clearDiff,
acceptChanges,
rejectChanges,
toggleDiffView,
getCurrentWorkflowForCanvas,
createDiff: (proposedState: WorkflowState, metadata?: any) => {
logger.info('Creating diff with proposed state')
// Note: Implementation may need adjustment based on DiffEngine methods
},
}
}

View File

@@ -25,6 +25,8 @@ export const COPILOT_TOOL_DISPLAY_NAMES: Record<string, string> = {
edit_workflow: 'Updating workflow',
run_workflow: 'Executing workflow',
search_online: 'Searching online',
plan: 'Designing an approach',
reason: 'Reasoning about your workflow',
} as const
// Past tense versions for completed tool calls
@@ -43,6 +45,8 @@ export const COPILOT_TOOL_PAST_TENSE: Record<string, string> = {
edit_workflow: 'Updated workflow',
run_workflow: 'Executed workflow',
search_online: 'Searched online',
plan: 'Designed an approach',
reason: 'Finished reasoning',
} as const
// Error versions for failed tool calls
@@ -61,6 +65,8 @@ export const COPILOT_TOOL_ERROR_NAMES: Record<string, string> = {
edit_workflow: 'Errored updating workflow',
run_workflow: 'Errored running workflow',
search_online: 'Errored searching online',
plan: 'Errored planning approach',
reason: 'Errored reasoning through problem',
} as const
export type CopilotToolId = keyof typeof COPILOT_TOOL_DISPLAY_NAMES

View File

@@ -6,6 +6,7 @@ import { type CopilotChat, sendStreamingMessage } from '@/lib/copilot/api'
import { toolRegistry } from '@/lib/copilot/tools'
import { createLogger } from '@/lib/logs/console/logger'
import { COPILOT_TOOL_DISPLAY_NAMES } from '@/stores/constants'
import { useWorkflowDiffStore } from '../workflow-diff/store'
import { COPILOT_TOOL_IDS } from './constants'
import type {
CopilotMessage,
@@ -40,6 +41,7 @@ function toolSupportsReadyForReview(toolName: string): boolean {
// PERFORMANCE OPTIMIZATION: Cached constants for faster lookups
const TEXT_BLOCK_TYPE = 'text'
const THINKING_BLOCK_TYPE = 'thinking'
const TOOL_CALL_BLOCK_TYPE = 'tool_call'
const ASSISTANT_ROLE = 'assistant'
const DATA_PREFIX = 'data: '
@@ -89,6 +91,9 @@ const contentBlockPool = new ObjectPool(
obj.content = ''
obj.timestamp = 0
obj.toolCall = null
// Ensure any timing fields are cleared so pooled blocks don't leak timing
;(obj as any).startTime = undefined
;(obj as any).duration = undefined
}
)
@@ -118,11 +123,56 @@ class StringBuilder {
}
}
/**
* Helper function to parse content and extract thinking blocks
* Returns an array of content blocks with their types
*/
function parseContentWithThinkingTags(content: string): Array<{ type: string; content: string }> {
const blocks: Array<{ type: string; content: string }> = []
const thinkingRegex = /<thinking>([\s\S]*?)<\/thinking>/g
let lastIndex = 0
let match: RegExpExecArray | null
while ((match = thinkingRegex.exec(content)) !== null) {
// Add any text before the thinking tag as a text block
if (match.index > lastIndex) {
const textContent = content.substring(lastIndex, match.index)
if (textContent.trim()) {
blocks.push({ type: TEXT_BLOCK_TYPE, content: textContent })
}
}
// Add the thinking content as a thinking block
const thinkingContent = match[1]
if (thinkingContent.trim()) {
blocks.push({ type: THINKING_BLOCK_TYPE, content: thinkingContent })
}
lastIndex = match.index + match[0].length
}
// Add any remaining text after the last thinking tag
if (lastIndex < content.length) {
const remainingContent = content.substring(lastIndex)
if (remainingContent.trim()) {
blocks.push({ type: TEXT_BLOCK_TYPE, content: remainingContent })
}
}
// If no thinking tags were found, return the whole content as a text block
if (blocks.length === 0 && content.trim()) {
blocks.push({ type: TEXT_BLOCK_TYPE, content })
}
return blocks
}
/**
* Initial state for the copilot store
*/
const initialState = {
mode: 'ask' as const,
mode: 'agent' as const,
agentDepth: 1 as 0 | 1 | 2 | 3,
currentChat: null,
chats: [],
messages: [],
@@ -145,6 +195,10 @@ const initialState = {
// Revert state management
revertState: null as { messageId: string; messageContent: string } | null, // Track which message we reverted from
inputValue: '', // Control the input field
// Todo list state (from plan tool)
planTodos: [],
showPlanTodos: false,
}
/**
@@ -202,18 +256,24 @@ function handleStoreError(error: unknown, fallbackMessage: string): string {
function validateMessagesForLLM(messages: CopilotMessage[]): any[] {
return messages
.map((msg) => {
// Build content from contentBlocks if content is empty
// Build content from contentBlocks if content is empty, but EXCLUDE thinking blocks
let validContent = msg.content || ''
// For assistant messages, if content is empty but there are contentBlocks, build content from them
// BUT exclude thinking blocks to prevent thinking text from being sent to LLM
if (msg.role === 'assistant' && !validContent.trim() && msg.contentBlocks?.length) {
validContent = msg.contentBlocks
.filter((block) => block.type === 'text')
.filter((block) => block.type === 'text') // Only include text blocks, NOT thinking blocks
.map((block) => block.content)
.join('')
.trim()
}
// For all messages, clean any thinking tags from the content to ensure no thinking text leaks through
if (validContent) {
validContent = cleanThinkingTags(validContent)
}
return {
id: msg.id,
role: msg.role,
@@ -221,7 +281,9 @@ function validateMessagesForLLM(messages: CopilotMessage[]): any[] {
timestamp: msg.timestamp,
...(msg.toolCalls && msg.toolCalls.length > 0 && { toolCalls: msg.toolCalls }),
...(msg.contentBlocks &&
msg.contentBlocks.length > 0 && { contentBlocks: msg.contentBlocks }),
msg.contentBlocks.length > 0 && {
contentBlocks: msg.contentBlocks.filter((block) => block.type !== 'thinking'), // Exclude thinking blocks
}),
...(msg.fileAttachments &&
msg.fileAttachments.length > 0 && { fileAttachments: msg.fileAttachments }),
}
@@ -244,6 +306,13 @@ function validateMessagesForLLM(messages: CopilotMessage[]): any[] {
})
}
/**
* Helper function to remove thinking tags and their content from text
*/
function cleanThinkingTags(content: string): string {
return content.replace(/<thinking>[\s\S]*?<\/thinking>/g, '').trim()
}
/**
* Helper function to get a display name for a tool
*/
@@ -371,13 +440,24 @@ function processWorkflowToolResult(toolCall: any, result: any, get: () => Copilo
toolCall.input?.yamlContent ||
toolCall.input?.data?.yamlContent
// For build_workflow tool, also extract workflowState if available
const workflowState = result?.workflowState || result?.data?.workflowState
if (yamlContent) {
logger.info(`Setting preview YAML from ${toolCall.name} tool`, {
yamlLength: yamlContent.length,
yamlPreview: yamlContent.substring(0, 100),
hasWorkflowState: !!workflowState,
})
get().setPreviewYaml(yamlContent)
get().updateDiffStore(yamlContent, toolCall.name)
// For build_workflow, use the workflowState directly if available
if (toolCall.name === 'build_workflow' && workflowState) {
logger.info('Using workflowState directly for build_workflow tool')
get().updateDiffStoreWithWorkflowState(workflowState, toolCall.name)
} else {
get().updateDiffStore(yamlContent, toolCall.name)
}
} else {
logger.warn(`No yamlContent found in ${toolCall.name} result`, {
resultKeys: Object.keys(result || {}),
@@ -501,6 +581,19 @@ function setToolCallState(
toolCall.endTime = Date.now()
toolCall.duration = toolCall.endTime - toolCall.startTime
break
case 'aborted':
// Tool was aborted
toolCall.endTime = Date.now()
toolCall.duration = toolCall.endTime - toolCall.startTime
if (error) {
toolCall.error = error
}
break
default:
logger.warn(`Unknown tool state: ${newState}`)
break
}
// Update display name based on new state
@@ -670,11 +763,15 @@ interface StreamingContext {
toolCalls: any[]
contentBlocks: any[]
currentTextBlock: any | null
currentBlockType: 'text' | 'tool_use' | null
currentBlockType: 'text' | 'tool_use' | 'thinking' | null
toolCallBuffer: any | null
newChatId?: string
doneEventCount: number
streamComplete?: boolean
// Thinking tag tracking
pendingContent: string // Buffer for content that may contain partial thinking tags
isInThinkingBlock: boolean // Track if we're currently inside a thinking block
currentThinkingBlock: any | null
// PERFORMANCE OPTIMIZATION: Pre-allocated buffers and caching
_tempBuffer?: string[]
_lastUpdateTime?: number
@@ -727,6 +824,63 @@ const sseHandlers: Record<string, SSEHandler> = {
}))
},
// Render model "reasoning" stream as thinking blocks
reasoning: (data, context, get, set) => {
// Support both nested and flat phase shapes
const phase = (data && (data.phase || data?.data?.phase)) as string | undefined
// Handle control phases
if (phase === 'start') {
// Begin a thinking block session
if (!context.currentThinkingBlock) {
context.currentThinkingBlock = contentBlockPool.get()
context.currentThinkingBlock.type = THINKING_BLOCK_TYPE
context.currentThinkingBlock.content = ''
context.currentThinkingBlock.timestamp = Date.now()
context.currentThinkingBlock.startTime = Date.now()
context.contentBlocks.push(context.currentThinkingBlock)
}
context.isInThinkingBlock = true
context.currentTextBlock = null
updateStreamingMessage(set, context)
return
}
if (phase === 'end') {
// Finish the current thinking block
if (context.currentThinkingBlock) {
context.currentThinkingBlock.duration =
Date.now() - (context.currentThinkingBlock.startTime || Date.now())
}
context.isInThinkingBlock = false
context.currentThinkingBlock = null
context.currentTextBlock = null
updateStreamingMessage(set, context)
return
}
// Fallback: some providers may stream reasoning text directly on this event
const chunk: string = typeof data?.data === 'string' ? data.data : data?.content || ''
if (!chunk) return
if (context.currentThinkingBlock) {
context.currentThinkingBlock.content += chunk
} else {
context.currentThinkingBlock = contentBlockPool.get()
context.currentThinkingBlock.type = THINKING_BLOCK_TYPE
context.currentThinkingBlock.content = chunk
context.currentThinkingBlock.timestamp = Date.now()
context.currentThinkingBlock.startTime = Date.now()
context.contentBlocks.push(context.currentThinkingBlock)
}
context.isInThinkingBlock = true
context.currentTextBlock = null
updateStreamingMessage(set, context)
},
// Handle tool result events - simplified
tool_result: (data, context, get, set) => {
const { toolCallId, result, success, error, failedDependency } = data
@@ -767,6 +921,38 @@ const sseHandlers: Record<string, SSEHandler> = {
// NEW LOGIC: Use centralized state management
setToolCallState(toolCall, 'success', { result: parsedResult })
// Check if this is the plan tool and extract todos
if (toolCall.name === 'plan' && parsedResult?.todoList) {
const todos = parsedResult.todoList.map((item: any, index: number) => ({
id: item.id || `todo-${index}`,
content: typeof item === 'string' ? item : item.content,
completed: false,
executing: false,
}))
// Set the todos in the store
const store = get()
if (store.setPlanTodos) {
store.setPlanTodos(todos)
}
}
// Check if this is the checkoff_todo tool and mark the todo as complete
if (toolCall.name === 'checkoff_todo') {
// Check various possible locations for the todo ID
const todoId =
toolCall.input?.id || toolCall.input?.todoId || parsedResult?.todoId || parsedResult?.id
if (todoId) {
const store = get()
if (store.updatePlanTodoStatus) {
store.updatePlanTodoStatus(todoId, 'completed')
}
}
// Mark this tool as hidden from UI
toolCall.hidden = true
}
// Handle tools with ready_for_review state
if (toolSupportsReadyForReview(toolCall.name)) {
processWorkflowToolResult(toolCall, parsedResult, get)
@@ -804,34 +990,155 @@ const sseHandlers: Record<string, SSEHandler> = {
content: (data, context, get, set) => {
if (!data.data) return
// PERFORMANCE OPTIMIZATION: Use StringBuilder for efficient concatenation
context.accumulatedContent.append(data.data)
// Append new data to pending content buffer
context.pendingContent += data.data
// Update existing text block or create new one (optimized for minimal array mutations)
if (context.currentTextBlock && context.contentBlocks.length > 0) {
// Find the last text block and update it in-place
const lastBlock = context.contentBlocks[context.contentBlocks.length - 1]
if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) {
// Efficiently update existing text block content in-place
lastBlock.content += data.data
// Process complete thinking tags in the pending content
let contentToProcess = context.pendingContent
let hasProcessedContent = false
// Check for complete thinking tags
const thinkingStartRegex = /<thinking>/
const thinkingEndRegex = /<\/thinking>/
while (contentToProcess.length > 0) {
if (context.isInThinkingBlock) {
// We're inside a thinking block, look for the closing tag
const endMatch = thinkingEndRegex.exec(contentToProcess)
if (endMatch) {
// Found the end of thinking block
const thinkingContent = contentToProcess.substring(0, endMatch.index)
// Append to current thinking block
if (context.currentThinkingBlock) {
context.currentThinkingBlock.content += thinkingContent
} else {
// Create new thinking block
context.currentThinkingBlock = contentBlockPool.get()
context.currentThinkingBlock.type = THINKING_BLOCK_TYPE
context.currentThinkingBlock.content = thinkingContent
context.currentThinkingBlock.timestamp = Date.now()
context.currentThinkingBlock.startTime = Date.now()
context.contentBlocks.push(context.currentThinkingBlock)
}
// Reset thinking state
context.isInThinkingBlock = false
if (context.currentThinkingBlock) {
// Set final duration
context.currentThinkingBlock.duration =
Date.now() - (context.currentThinkingBlock.startTime || Date.now())
}
context.currentThinkingBlock = null
context.currentTextBlock = null
// Continue processing after the closing tag
contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length)
hasProcessedContent = true
} else {
// No closing tag yet, accumulate in thinking block
if (context.currentThinkingBlock) {
context.currentThinkingBlock.content += contentToProcess
} else {
// Create new thinking block
context.currentThinkingBlock = contentBlockPool.get()
context.currentThinkingBlock.type = THINKING_BLOCK_TYPE
context.currentThinkingBlock.content = contentToProcess
context.currentThinkingBlock.timestamp = Date.now()
context.currentThinkingBlock.startTime = Date.now()
context.contentBlocks.push(context.currentThinkingBlock)
}
contentToProcess = ''
hasProcessedContent = true
}
} else {
// Last block is not text, create a new text block
context.currentTextBlock = contentBlockPool.get()
context.currentTextBlock.type = TEXT_BLOCK_TYPE
context.currentTextBlock.content = data.data
context.currentTextBlock.timestamp = Date.now()
context.contentBlocks.push(context.currentTextBlock)
// Not in a thinking block, look for the start of one
const startMatch = thinkingStartRegex.exec(contentToProcess)
if (startMatch) {
// Found start of thinking block
const textBeforeThinking = contentToProcess.substring(0, startMatch.index)
// Add any text before the thinking tag as a text block AND to accumulated content
if (textBeforeThinking) {
// Add to accumulated content for final message
context.accumulatedContent.append(textBeforeThinking)
if (context.currentTextBlock && context.contentBlocks.length > 0) {
const lastBlock = context.contentBlocks[context.contentBlocks.length - 1]
if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) {
lastBlock.content += textBeforeThinking
} else {
context.currentTextBlock = contentBlockPool.get()
context.currentTextBlock.type = TEXT_BLOCK_TYPE
context.currentTextBlock.content = textBeforeThinking
context.currentTextBlock.timestamp = Date.now()
context.contentBlocks.push(context.currentTextBlock)
}
} else {
context.currentTextBlock = contentBlockPool.get()
context.currentTextBlock.type = TEXT_BLOCK_TYPE
context.currentTextBlock.content = textBeforeThinking
context.currentTextBlock.timestamp = Date.now()
context.contentBlocks.push(context.currentTextBlock)
}
}
// Enter thinking block mode
context.isInThinkingBlock = true
context.currentTextBlock = null
contentToProcess = contentToProcess.substring(startMatch.index + startMatch[0].length)
hasProcessedContent = true
} else {
// No thinking tag, treat as regular text
// But check if we might have a partial opening tag at the end
const partialTagIndex = contentToProcess.lastIndexOf('<')
let textToAdd = contentToProcess
let remaining = ''
if (partialTagIndex >= 0 && partialTagIndex > contentToProcess.length - 10) {
// Might be a partial tag, keep it in buffer
textToAdd = contentToProcess.substring(0, partialTagIndex)
remaining = contentToProcess.substring(partialTagIndex)
}
if (textToAdd) {
// Add to accumulated content for final message
context.accumulatedContent.append(textToAdd)
// Add as regular text block
if (context.currentTextBlock && context.contentBlocks.length > 0) {
const lastBlock = context.contentBlocks[context.contentBlocks.length - 1]
if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) {
lastBlock.content += textToAdd
} else {
context.currentTextBlock = contentBlockPool.get()
context.currentTextBlock.type = TEXT_BLOCK_TYPE
context.currentTextBlock.content = textToAdd
context.currentTextBlock.timestamp = Date.now()
context.contentBlocks.push(context.currentTextBlock)
}
} else {
context.currentTextBlock = contentBlockPool.get()
context.currentTextBlock.type = TEXT_BLOCK_TYPE
context.currentTextBlock.content = textToAdd
context.currentTextBlock.timestamp = Date.now()
context.contentBlocks.push(context.currentTextBlock)
}
hasProcessedContent = true
}
contentToProcess = remaining
break // Exit loop to wait for more content if we have a partial tag
}
}
} else {
// No current text block, create one from pool
context.currentTextBlock = contentBlockPool.get()
context.currentTextBlock.type = TEXT_BLOCK_TYPE
context.currentTextBlock.content = data.data
context.currentTextBlock.timestamp = Date.now()
context.contentBlocks.push(context.currentTextBlock)
}
updateStreamingMessage(set, context)
// Update pending content with any remaining unprocessed content
context.pendingContent = contentToProcess
if (hasProcessedContent) {
updateStreamingMessage(set, context)
}
},
// Handle tool call events - simplified
@@ -854,12 +1161,19 @@ const sseHandlers: Record<string, SSEHandler> = {
const toolCall = createToolCall(toolData.id, toolData.name, toolData.arguments)
// Mark checkoff_todo as hidden from the start
if (toolData.name === 'checkoff_todo') {
toolCall.hidden = true
}
context.toolCalls.push(toolCall)
context.contentBlocks.push({
type: 'tool_call',
toolCall,
timestamp: Date.now(),
// Ensure per-tool timing context for UI components that might rely on block-level timing
startTime: toolCall.startTime,
})
updateStreamingMessage(set, context)
@@ -905,6 +1219,11 @@ const sseHandlers: Record<string, SSEHandler> = {
toolCall.state = 'executing'
// Mark checkoff_todo as hidden
if (toolCall.name === 'checkoff_todo') {
toolCall.hidden = true
}
// Update both contentBlocks and toolCalls atomically before UI update
updateContentBlockToolCall(context.contentBlocks, data.toolCallId, toolCall)
@@ -932,6 +1251,11 @@ const sseHandlers: Record<string, SSEHandler> = {
const toolCall = createToolCall(data.content_block.id, data.content_block.name)
toolCall.partialInput = ''
// Mark checkoff_todo as hidden from the start
if (data.content_block.name === 'checkoff_todo') {
toolCall.hidden = true
}
context.toolCallBuffer = toolCall
context.toolCalls.push(toolCall)
@@ -939,6 +1263,8 @@ const sseHandlers: Record<string, SSEHandler> = {
type: 'tool_call',
toolCall,
timestamp: Date.now(),
// Ensure per-tool timing context for UI components that might rely on block-level timing
startTime: toolCall.startTime,
})
}
},
@@ -981,6 +1307,37 @@ const sseHandlers: Record<string, SSEHandler> = {
processWorkflowToolResult(context.toolCallBuffer, context.toolCallBuffer.input, get)
}
// Check if this is the plan tool and extract todos
if (context.toolCallBuffer.name === 'plan' && context.toolCallBuffer.input?.todoList) {
const todos = context.toolCallBuffer.input.todoList.map((item: any, index: number) => ({
id: item.id || `todo-${index}`,
content: typeof item === 'string' ? item : item.content,
completed: false,
executing: false,
}))
// Set the todos in the store
const store = get()
if (store.setPlanTodos) {
store.setPlanTodos(todos)
}
}
// Check if this is the checkoff_todo tool and mark the todo as complete
if (context.toolCallBuffer.name === 'checkoff_todo') {
// Check both input.id and input.todoId for compatibility
const todoId = context.toolCallBuffer.input?.id || context.toolCallBuffer.input?.todoId
if (todoId) {
const store = get()
if (store.updatePlanTodoStatus) {
store.updatePlanTodoStatus(todoId, 'completed')
}
}
// Mark this tool as hidden from UI
context.toolCallBuffer.hidden = true
}
// Update both contentBlocks and toolCalls atomically before UI update
updateContentBlockToolCall(
context.contentBlocks,
@@ -1055,7 +1412,55 @@ const sseHandlers: Record<string, SSEHandler> = {
}
},
// Default handler
// Handle stream end event - flush any pending content
stream_end: (data, context, get, set) => {
// Flush any remaining pending content as text
if (context.pendingContent) {
if (context.isInThinkingBlock && context.currentThinkingBlock) {
// We were in a thinking block, append remaining content to it
// But DON'T add it to accumulated content
context.currentThinkingBlock.content += context.pendingContent
} else if (context.pendingContent.trim()) {
// Add remaining content as a text block AND to accumulated content
context.accumulatedContent.append(context.pendingContent)
if (context.currentTextBlock && context.contentBlocks.length > 0) {
const lastBlock = context.contentBlocks[context.contentBlocks.length - 1]
if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) {
lastBlock.content += context.pendingContent
} else {
context.currentTextBlock = contentBlockPool.get()
context.currentTextBlock.type = TEXT_BLOCK_TYPE
context.currentTextBlock.content = context.pendingContent
context.currentTextBlock.timestamp = Date.now()
context.contentBlocks.push(context.currentTextBlock)
}
} else {
context.currentTextBlock = contentBlockPool.get()
context.currentTextBlock.type = TEXT_BLOCK_TYPE
context.currentTextBlock.content = context.pendingContent
context.currentTextBlock.timestamp = Date.now()
context.contentBlocks.push(context.currentTextBlock)
}
}
context.pendingContent = ''
}
// If a thinking block is open, set final duration before clearing
if (context.currentThinkingBlock) {
context.currentThinkingBlock.duration =
Date.now() - (context.currentThinkingBlock.startTime || Date.now())
}
// Reset thinking state
context.isInThinkingBlock = false
context.currentThinkingBlock = null
context.currentTextBlock = null
updateStreamingMessage(set, context)
},
// Default handler for unknown events
default: () => {
// Silently ignore unhandled events
},
@@ -1434,6 +1839,10 @@ export const useCopilotStore = create<CopilotStore>()(
get().abortMessage()
}
// Clear workflow diff store when switching workflows
const { clearDiff } = useWorkflowDiffStore.getState()
clearDiff()
logger.info(`Setting workflow ID: ${workflowId}`)
// Reset state when switching workflows, including chat cache and checkpoints
@@ -1441,6 +1850,7 @@ export const useCopilotStore = create<CopilotStore>()(
...initialState,
workflowId,
mode: get().mode, // Preserve mode
agentDepth: get().agentDepth, // Preserve agent depth
})
},
@@ -1459,6 +1869,11 @@ export const useCopilotStore = create<CopilotStore>()(
if (!chatExists) {
logger.info('Current chat does not belong to current workflow, clearing stale state')
// Clear workflow diff store when clearing stale chat state
const { clearDiff } = useWorkflowDiffStore.getState()
clearDiff()
set({
currentChat: null,
messages: [],
@@ -1488,12 +1903,19 @@ export const useCopilotStore = create<CopilotStore>()(
logger.info('🛑 Aborting ongoing copilot stream due to chat switch')
get().abortMessage()
}
// Clear workflow diff store when switching to a different chat
const { clearDiff } = useWorkflowDiffStore.getState()
clearDiff()
}
// Optimistically set the chat first
set({
currentChat: chat,
messages: ensureToolCallDisplayNames(chat.messages || []),
// Clear todos when switching chats
planTodos: [],
showPlanTodos: false,
})
try {
@@ -1557,11 +1979,17 @@ export const useCopilotStore = create<CopilotStore>()(
get().abortMessage()
}
// Clear workflow diff store when creating a new chat
const { clearDiff } = useWorkflowDiffStore.getState()
clearDiff()
// Set state to null so backend creates a new chat on first message
set({
currentChat: null,
messages: [],
messageCheckpoints: {}, // Clear checkpoints when creating new chat
planTodos: [], // Clear todos when creating new chat
showPlanTodos: false,
})
logger.info('🆕 Cleared chat state for new conversation')
},
@@ -1797,7 +2225,8 @@ export const useCopilotStore = create<CopilotStore>()(
userMessageId: userMessage.id, // Send the frontend-generated ID
chatId: currentChat?.id,
workflowId,
mode,
mode: mode === 'ask' ? 'ask' : 'agent',
depth: get().agentDepth,
createNewChat: !currentChat,
stream,
fileAttachments: options.fileAttachments,
@@ -2032,7 +2461,7 @@ export const useCopilotStore = create<CopilotStore>()(
implicitFeedback: string,
toolCallState?: 'accepted' | 'rejected' | 'errored'
) => {
const { workflowId, currentChat, mode } = get()
const { workflowId, currentChat, mode, agentDepth } = get()
if (!workflowId) {
logger.warn('Cannot send implicit feedback: no workflow ID set')
@@ -2060,7 +2489,8 @@ export const useCopilotStore = create<CopilotStore>()(
message: 'Please continue your response.', // Simple continuation prompt
chatId: currentChat?.id,
workflowId,
mode,
mode: mode === 'ask' ? 'ask' : 'agent',
depth: agentDepth,
createNewChat: !currentChat,
stream: true,
implicitFeedback, // Pass the implicit feedback
@@ -2519,6 +2949,9 @@ export const useCopilotStore = create<CopilotStore>()(
currentBlockType: null,
toolCallBuffer: null,
doneEventCount: 0,
pendingContent: '',
isInThinkingBlock: false,
currentThinkingBlock: null,
_tempBuffer: [],
_lastUpdateTime: 0,
_batchedUpdates: false,
@@ -2543,7 +2976,7 @@ export const useCopilotStore = create<CopilotStore>()(
const timeoutId = setTimeout(() => {
logger.warn('Stream timeout reached, completing response')
reader.cancel()
}, 120000) // 2 minute timeout
}, 600000) // 10 minute timeout
try {
// Process SSE events
@@ -2571,6 +3004,11 @@ export const useCopilotStore = create<CopilotStore>()(
`Completed streaming response, content length: ${context.accumulatedContent.size}`
)
// Call stream_end handler to flush any pending content
if (sseHandlers.stream_end) {
sseHandlers.stream_end({}, context, get, set)
}
// PERFORMANCE OPTIMIZATION: Cleanup and memory management
if (streamingUpdateRAF !== null) {
cancelAnimationFrame(streamingUpdateRAF)
@@ -2581,7 +3019,7 @@ export const useCopilotStore = create<CopilotStore>()(
// Release pooled objects back to pool for reuse
if (context.contentBlocks) {
context.contentBlocks.forEach((block) => {
if (block.type === TEXT_BLOCK_TYPE) {
if (block.type === TEXT_BLOCK_TYPE || block.type === THINKING_BLOCK_TYPE) {
contentBlockPool.release(block)
}
})
@@ -2698,6 +3136,9 @@ export const useCopilotStore = create<CopilotStore>()(
// Invalidate cache since we have a new chat
chatsLastLoadedAt: null,
chatsLoadedForWorkflow: null,
// Clear todos when creating new chat
planTodos: [],
showPlanTodos: false,
})
logger.info(`Created new chat from streaming response: ${newChatId}`)
},
@@ -2757,6 +3198,25 @@ export const useCopilotStore = create<CopilotStore>()(
set({ revertState: null })
},
// Todo list actions
setPlanTodos: (todos) => {
set({ planTodos: todos, showPlanTodos: true })
},
updatePlanTodoStatus: (id, status) => {
set((state) => ({
planTodos: state.planTodos.map((todo) =>
todo.id === id
? { ...todo, executing: status === 'executing', completed: status === 'completed' }
: todo
),
}))
},
closePlanTodos: () => {
set({ showPlanTodos: false })
},
// Update the diff store with proposed workflow changes
updateDiffStore: async (yamlContent: string, toolName?: string) => {
// Check if we're in an aborted state before updating diff
@@ -2850,6 +3310,65 @@ export const useCopilotStore = create<CopilotStore>()(
}
}
},
updateDiffStoreWithWorkflowState: async (workflowState: any, toolName?: string) => {
// Check if we're in an aborted state before updating diff
const { abortController } = get()
if (abortController?.signal.aborted) {
logger.info('🚫 Skipping diff update - request was aborted')
return
}
try {
// Import diff store dynamically to avoid circular dependencies
const { useWorkflowDiffStore } = await import('@/stores/workflow-diff')
logger.info('📊 Updating diff store with workflowState directly', {
blockCount: Object.keys(workflowState.blocks).length,
edgeCount: workflowState.edges.length,
toolName: toolName || 'unknown',
})
// Check current diff store state before update
const diffStoreBefore = useWorkflowDiffStore.getState()
logger.info('Diff store state before workflowState update:', {
isShowingDiff: diffStoreBefore.isShowingDiff,
isDiffReady: diffStoreBefore.isDiffReady,
hasDiffWorkflow: !!diffStoreBefore.diffWorkflow,
})
// Direct assignment to the diff store for build_workflow
logger.info('Using direct workflowState assignment for build tool')
useWorkflowDiffStore.setState({
diffWorkflow: workflowState,
isDiffReady: true,
isShowingDiff: false, // Let user decide when to show diff
})
// Check diff store state after update
const diffStoreAfter = useWorkflowDiffStore.getState()
logger.info('Diff store state after workflowState update:', {
isShowingDiff: diffStoreAfter.isShowingDiff,
isDiffReady: diffStoreAfter.isDiffReady,
hasDiffWorkflow: !!diffStoreAfter.diffWorkflow,
diffWorkflowBlockCount: diffStoreAfter.diffWorkflow
? Object.keys(diffStoreAfter.diffWorkflow.blocks).length
: 0,
})
logger.info('Successfully updated diff store with workflowState')
} catch (error) {
logger.error('Failed to update diff store with workflowState:', error)
// Show error to user
console.error('[Copilot] Error updating diff store with workflowState:', error)
}
},
setAgentDepth: (depth) => {
const prev = get().agentDepth
set({ agentDepth: depth })
logger.info(`Copilot agent depth changed from ${prev} to ${depth}`)
},
}),
{ name: 'copilot-store' }
)

View File

@@ -41,6 +41,7 @@ export interface CopilotToolCall {
result?: any
error?: string | { message: string }
timestamp?: string
hidden?: boolean // Hide tool from UI rendering (e.g., checkoff_todo)
}
/**
@@ -52,13 +53,21 @@ export interface TextContentBlock {
timestamp: number
}
export interface ThinkingContentBlock {
type: 'thinking'
content: string
timestamp: number
duration?: number // Duration in milliseconds for display
startTime?: number // Start time for calculating duration
}
export interface ToolCallContentBlock {
type: 'tool_call'
toolCall: CopilotToolCall
timestamp: number
}
export type ContentBlock = TextContentBlock | ToolCallContentBlock
export type ContentBlock = TextContentBlock | ThinkingContentBlock | ToolCallContentBlock
/**
* File attachment interface for copilot messages
@@ -161,6 +170,8 @@ export interface SendDocsMessageOptions {
export interface CopilotState {
// Current mode
mode: CopilotMode
// Depth for agent mode (0-3)
agentDepth: 0 | 1 | 2 | 3
// Chat management
currentChat: CopilotChat | null
@@ -196,6 +207,10 @@ export interface CopilotState {
// Revert state management
revertState: { messageId: string; messageContent: string } | null // Track which message we reverted from
inputValue: string // Control the input field
// Todo list state (from plan tool)
planTodos: Array<{ id: string; content: string; completed?: boolean; executing?: boolean }>
showPlanTodos: boolean
}
/**
@@ -204,6 +219,7 @@ export interface CopilotState {
export interface CopilotActions {
// Mode management
setMode: (mode: CopilotMode) => void
setAgentDepth: (depth: 0 | 1 | 2 | 3) => void
// Chat management
setWorkflowId: (workflowId: string | null) => Promise<void>
@@ -252,6 +268,13 @@ export interface CopilotActions {
setInputValue: (value: string) => void
clearRevertState: () => void
// Todo list actions
setPlanTodos: (
todos: Array<{ id: string; content: string; completed?: boolean; executing?: boolean }>
) => void
updatePlanTodoStatus: (id: string, status: 'executing' | 'completed') => void
closePlanTodos: () => void
// Internal helpers (not exposed publicly)
handleStreamingResponse: (
stream: ReadableStream,
@@ -260,6 +283,7 @@ export interface CopilotActions {
) => Promise<void>
handleNewChatCreation: (newChatId: string) => Promise<void>
updateDiffStore: (yamlContent: string, toolName?: string) => Promise<void>
updateDiffStoreWithWorkflowState: (workflowState: any, toolName?: string) => Promise<void>
}
/**

View File

@@ -425,11 +425,20 @@ export const useWorkflowRegistry = create<WorkflowRegistry>()(
// Modified setActiveWorkflow to work with clean DB-only architecture
setActiveWorkflow: async (id: string) => {
const { workflows, activeWorkflowId } = get()
if (!workflows[id]) {
set({ error: `Workflow ${id} not found` })
if (activeWorkflowId === id) {
logger.info(`Already active workflow ${id}, skipping switch`)
return
}
if (!workflows[id]) {
logger.error(`Workflow ${id} not found in registry`)
set({ error: `Workflow not found: ${id}` })
throw new Error(`Workflow not found: ${id}`)
}
logger.info(`Switching to workflow ${id}`)
// First, sync the current workflow before switching (if there is one)
if (activeWorkflowId && activeWorkflowId !== id) {
// Mark current workflow as dirty and sync (fire and forget)

View File

@@ -1,6 +1,7 @@
import { create } from 'zustand'
import { devtools } from 'zustand/middleware'
import { createLogger } from '@/lib/logs/console/logger'
import { useWorkflowRegistry } from '../registry/store'
import { useSubBlockStore } from '../subblock/store'
import { useWorkflowStore } from '../workflow/store'
@@ -118,20 +119,16 @@ export const useWorkflowYamlStore = create<WorkflowYamlStore>()(
// Initialize subscriptions on first use
initializeSubscriptions()
const workflowState = useWorkflowStore.getState()
const subBlockValues = getSubBlockValues()
// Get the active workflow ID from registry
const { activeWorkflowId } = useWorkflowRegistry.getState()
// Call the API route to generate YAML (server has access to API key)
const response = await fetch('/api/workflows/yaml/convert', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
workflowState,
subBlockValues,
}),
})
if (!activeWorkflowId) {
logger.warn('No active workflow to generate YAML for')
return
}
// Call the new database-based export endpoint
const response = await fetch(`/api/workflows/yaml/export?workflowId=${activeWorkflowId}`)
if (!response.ok) {
const errorData = await response.json().catch(() => null)