From 17513d77ea3f9572135a430609faa1d73643c56f Mon Sep 17 00:00:00 2001
From: Siddharth Ganesan
Date: Tue, 8 Jul 2025 16:38:22 -0700
Subject: [PATCH] Initial chatbot ui
---
apps/sim/app/api/docs/ask/route.ts | 352 +++++++++++++
.../panel/components/copilot/copilot.tsx | 479 ++++++++++++++++++
.../w/[workflowId]/components/panel/panel.tsx | 32 +-
apps/sim/lib/documents/docs-chunker.ts | 4 +-
apps/sim/scripts/process-docs-embeddings.ts | 3 +-
apps/sim/stores/panel/types.ts | 2 +-
6 files changed, 862 insertions(+), 10 deletions(-)
create mode 100644 apps/sim/app/api/docs/ask/route.ts
create mode 100644 apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/copilot.tsx
diff --git a/apps/sim/app/api/docs/ask/route.ts b/apps/sim/app/api/docs/ask/route.ts
new file mode 100644
index 000000000..489e355a8
--- /dev/null
+++ b/apps/sim/app/api/docs/ask/route.ts
@@ -0,0 +1,352 @@
+import { NextRequest, NextResponse } from 'next/server'
+import { z } from 'zod'
+import { db } from '@/db'
+import { docsEmbeddings } from '@/db/schema'
+import { generateEmbeddings } from '@/app/api/knowledge/utils'
+import { createLogger } from '@/lib/logs/console-logger'
+import { sql } from 'drizzle-orm'
+import { env } from '@/lib/env'
+import { executeProviderRequest } from '@/providers'
+import { getProviderDefaultModel } from '@/providers/models'
+import { getRotatingApiKey } from '@/lib/utils'
+
+const logger = createLogger('DocsRAG')
+
+// Configuration for docs RAG
+const DOCS_RAG_CONFIG = {
+ // Default provider for docs RAG - change this constant to switch providers
+ defaultProvider: 'anthropic', // Options: 'openai', 'anthropic', 'deepseek', 'google', 'xai', etc.
+ // Default model for docs RAG - will use provider's default if not specified
+ defaultModel: 'claude-3-7-sonnet-latest', // e.g., 'gpt-4o-mini', 'claude-3-5-sonnet-latest', 'deepseek-chat'
+ // Temperature for response generation
+ temperature: 0.1,
+ // Max tokens for response
+ maxTokens: 1000,
+} as const
+
+const DocsQuerySchema = z.object({
+ query: z.string().min(1, 'Query is required'),
+ topK: z.number().min(1).max(20).default(10),
+ provider: z.string().optional(), // Allow override of provider per request
+ model: z.string().optional(), // Allow override of model per request
+ stream: z.boolean().optional().default(false), // Enable streaming responses
+})
+
+/**
+ * Generate embedding for search query
+ */
+async function generateSearchEmbedding(query: string): Promise {
+ try {
+ const embeddings = await generateEmbeddings([query])
+ return embeddings[0] || []
+ } catch (error) {
+ logger.error('Failed to generate search embedding:', error)
+ throw new Error('Failed to generate search embedding')
+ }
+}
+
+/**
+ * Search docs embeddings using vector similarity
+ */
+async function searchDocs(queryEmbedding: number[], topK: number) {
+ try {
+ const results = await db
+ .select({
+ chunkId: docsEmbeddings.chunkId,
+ chunkText: docsEmbeddings.chunkText,
+ sourceDocument: docsEmbeddings.sourceDocument,
+ sourceLink: docsEmbeddings.sourceLink,
+ headerText: docsEmbeddings.headerText,
+ headerLevel: docsEmbeddings.headerLevel,
+ similarity: sql`1 - (${docsEmbeddings.embedding} <=> ${JSON.stringify(queryEmbedding)}::vector)`,
+ })
+ .from(docsEmbeddings)
+ .orderBy(sql`${docsEmbeddings.embedding} <=> ${JSON.stringify(queryEmbedding)}::vector`)
+ .limit(topK)
+
+ return results
+ } catch (error) {
+ logger.error('Failed to search docs:', error)
+ throw new Error('Failed to search docs')
+ }
+}
+
+/**
+ * Generate response using LLM with retrieved context
+ */
+async function generateResponse(query: string, chunks: any[], provider?: string, model?: string, stream: boolean = false): Promise {
+ // Determine which provider and model to use
+ const selectedProvider = provider || DOCS_RAG_CONFIG.defaultProvider
+ const selectedModel = model || DOCS_RAG_CONFIG.defaultModel || getProviderDefaultModel(selectedProvider)
+
+ // Get API key for the selected provider
+ let apiKey: string
+ try {
+ if (selectedProvider === 'openai' || selectedProvider === 'azure-openai') {
+ apiKey = getRotatingApiKey('openai')
+ } else if (selectedProvider === 'anthropic') {
+ apiKey = getRotatingApiKey('anthropic')
+ } else {
+ // For other providers, try to get from environment
+ const envKey = `${selectedProvider.toUpperCase().replace('-', '_')}_API_KEY`
+ apiKey = process.env[envKey] || ''
+ if (!apiKey) {
+ throw new Error(`API key not configured for provider: ${selectedProvider}`)
+ }
+ }
+ } catch (error) {
+ logger.error(`Failed to get API key for provider ${selectedProvider}:`, error)
+ throw new Error(`API key not configured for provider: ${selectedProvider}`)
+ }
+
+ // Format chunks as context with numbered sources
+ const context = chunks
+ .map((chunk, index) => {
+ // Ensure all chunk properties are strings to avoid object serialization
+ const headerText = typeof chunk.headerText === 'string' ? chunk.headerText : String(chunk.headerText || 'Untitled Section')
+ const sourceDocument = typeof chunk.sourceDocument === 'string' ? chunk.sourceDocument : String(chunk.sourceDocument || 'Unknown Document')
+ const sourceLink = typeof chunk.sourceLink === 'string' ? chunk.sourceLink : String(chunk.sourceLink || '#')
+ const chunkText = typeof chunk.chunkText === 'string' ? chunk.chunkText : String(chunk.chunkText || '')
+
+ return `[${index + 1}] ${headerText}
+Document: ${sourceDocument}
+URL: ${sourceLink}
+Content: ${chunkText}`
+ })
+ .join('\n\n')
+
+ const systemPrompt = `You are a helpful assistant that answers questions about Sim Studio documentation.
+
+IMPORTANT: Use inline citations throughout your response. When referencing information from the sources, include the citation number in square brackets like [1], [2], etc.
+
+Guidelines:
+- Answer the user's question accurately using the provided documentation
+- Include inline citations [1], [2], etc. when referencing specific information
+- Use multiple citations for comprehensive answers
+- Format your response in clean, readable markdown
+- Use bullet points, code blocks, and headers where appropriate
+- If information spans multiple sources, cite all relevant ones
+- If the question cannot be answered from the context, say so clearly
+- Be conversational but precise
+- NEVER include object representations like "[object Object]" - always use proper text
+- When mentioning tool names, use their actual names from the documentation
+
+The sources are numbered [1] through [${chunks.length}] in the context below.`
+
+ const userPrompt = `Question: ${query}
+
+Documentation Context:
+${context}`
+
+ try {
+ logger.info(`Generating response using provider: ${selectedProvider}, model: ${selectedModel}`)
+
+ const providerRequest = {
+ model: selectedModel,
+ systemPrompt,
+ context: userPrompt,
+ temperature: DOCS_RAG_CONFIG.temperature,
+ maxTokens: DOCS_RAG_CONFIG.maxTokens,
+ apiKey,
+ stream,
+ // Azure OpenAI specific parameters if needed
+ ...(selectedProvider === 'azure-openai' && {
+ azureEndpoint: env.AZURE_OPENAI_ENDPOINT,
+ azureApiVersion: env.AZURE_OPENAI_API_VERSION,
+ }),
+ }
+
+ const response = await executeProviderRequest(selectedProvider, providerRequest)
+
+ // Handle different response types
+ if (response instanceof ReadableStream) {
+ if (stream) {
+ return response // Return the stream directly for streaming requests
+ } else {
+ throw new Error('Unexpected streaming response when non-streaming was requested')
+ }
+ }
+
+ if ('stream' in response && 'execution' in response) {
+ // Handle StreamingExecution for providers like Anthropic
+ if (stream) {
+ return response.stream // Return the stream from StreamingExecution
+ } else {
+ throw new Error('Unexpected streaming execution response when non-streaming was requested')
+ }
+ }
+
+ // At this point, we have a ProviderResponse
+ const content = response.content || 'Sorry, I could not generate a response.'
+
+ // Clean up any object serialization artifacts
+ const cleanedContent = content
+ .replace(/\[object Object\],?/g, '') // Remove [object Object] artifacts
+ .replace(/\s+/g, ' ') // Normalize whitespace
+ .trim()
+
+ return cleanedContent
+ } catch (error) {
+ logger.error('Failed to generate LLM response:', error)
+ throw new Error(`Failed to generate response using ${selectedProvider}: ${error instanceof Error ? error.message : 'Unknown error'}`)
+ }
+}
+
+/**
+ * POST /api/docs/ask
+ * Ask questions about Sim Studio documentation using RAG
+ */
+export async function POST(req: NextRequest) {
+ const requestId = crypto.randomUUID()
+
+ try {
+ const body = await req.json()
+ const { query, topK, provider, model, stream } = DocsQuerySchema.parse(body)
+
+ logger.info(`[${requestId}] Docs RAG query: "${query}"`, {
+ provider: provider || DOCS_RAG_CONFIG.defaultProvider,
+ model: model || DOCS_RAG_CONFIG.defaultModel || getProviderDefaultModel(provider || DOCS_RAG_CONFIG.defaultProvider),
+ topK,
+ })
+
+ // Step 1: Generate embedding for the query
+ logger.info(`[${requestId}] Generating query embedding...`)
+ const queryEmbedding = await generateSearchEmbedding(query)
+
+ if (queryEmbedding.length === 0) {
+ return NextResponse.json(
+ { error: 'Failed to generate query embedding' },
+ { status: 500 }
+ )
+ }
+
+ // Step 2: Search for relevant docs chunks
+ logger.info(`[${requestId}] Searching docs for top ${topK} chunks...`)
+ const chunks = await searchDocs(queryEmbedding, topK)
+
+ if (chunks.length === 0) {
+ return NextResponse.json({
+ success: true,
+ response: "I couldn't find any relevant documentation for your question. Please try rephrasing your query or check if you're asking about a feature that exists in Sim Studio.",
+ sources: [],
+ metadata: {
+ requestId,
+ chunksFound: 0,
+ query,
+ provider: provider || DOCS_RAG_CONFIG.defaultProvider,
+ model: model || DOCS_RAG_CONFIG.defaultModel || getProviderDefaultModel(provider || DOCS_RAG_CONFIG.defaultProvider),
+ },
+ })
+ }
+
+ // Step 3: Generate response using LLM
+ logger.info(`[${requestId}] Generating LLM response with ${chunks.length} chunks...`)
+ const response = await generateResponse(query, chunks, provider, model, stream)
+
+ // Step 4: Format sources for response
+ const sources = chunks.map((chunk) => ({
+ title: chunk.headerText,
+ document: chunk.sourceDocument,
+ link: chunk.sourceLink,
+ similarity: Math.round(chunk.similarity * 100) / 100,
+ }))
+
+ // Handle streaming response
+ if (response instanceof ReadableStream) {
+ logger.info(`[${requestId}] Returning streaming response`)
+
+ // Create a new stream that includes metadata
+ const encoder = new TextEncoder()
+ const decoder = new TextDecoder()
+
+ return new Response(
+ new ReadableStream({
+ async start(controller) {
+ const reader = response.getReader()
+
+ // Send initial metadata
+ const metadata = {
+ type: 'metadata',
+ sources,
+ metadata: {
+ requestId,
+ chunksFound: chunks.length,
+ query,
+ topSimilarity: sources[0]?.similarity,
+ provider: provider || DOCS_RAG_CONFIG.defaultProvider,
+ model: model || DOCS_RAG_CONFIG.defaultModel || getProviderDefaultModel(provider || DOCS_RAG_CONFIG.defaultProvider),
+ },
+ }
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(metadata)}\n\n`))
+
+ try {
+ while (true) {
+ const { done, value } = await reader.read()
+ if (done) break
+
+ // Forward the chunk with content type
+ const chunkText = decoder.decode(value)
+ // Clean up any object serialization artifacts in streaming content
+ const cleanedChunk = chunkText.replace(/\[object Object\],?/g, '')
+ const contentChunk = {
+ type: 'content',
+ content: cleanedChunk,
+ }
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(contentChunk)}\n\n`))
+ }
+
+ // Send end marker
+ controller.enqueue(encoder.encode(`data: {"type":"done"}\n\n`))
+ } catch (error) {
+ logger.error(`[${requestId}] Streaming error:`, error)
+ const errorChunk = {
+ type: 'error',
+ error: 'Streaming failed',
+ }
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(errorChunk)}\n\n`))
+ } finally {
+ controller.close()
+ }
+ },
+ }),
+ {
+ headers: {
+ 'Content-Type': 'text/event-stream',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ },
+ }
+ )
+ }
+
+ logger.info(`[${requestId}] RAG response generated successfully`)
+
+ return NextResponse.json({
+ success: true,
+ response,
+ sources,
+ metadata: {
+ requestId,
+ chunksFound: chunks.length,
+ query,
+ topSimilarity: sources[0]?.similarity,
+ provider: provider || DOCS_RAG_CONFIG.defaultProvider,
+ model: model || DOCS_RAG_CONFIG.defaultModel || getProviderDefaultModel(provider || DOCS_RAG_CONFIG.defaultProvider),
+ },
+ })
+
+ } catch (error) {
+ if (error instanceof z.ZodError) {
+ return NextResponse.json(
+ { error: 'Invalid request data', details: error.errors },
+ { status: 400 }
+ )
+ }
+
+ logger.error(`[${requestId}] RAG error:`, error)
+ return NextResponse.json(
+ { error: 'Internal server error' },
+ { status: 500 }
+ )
+ }
+}
\ No newline at end of file
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/copilot.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/copilot.tsx
new file mode 100644
index 000000000..9281ff34d
--- /dev/null
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/copilot.tsx
@@ -0,0 +1,479 @@
+'use client'
+
+import { useState, useRef, useEffect, useImperativeHandle, forwardRef, useCallback, useMemo } from 'react'
+import { Send, Bot, User, ExternalLink, Loader2, Expand, X } from 'lucide-react'
+import { Button } from '@/components/ui/button'
+import { Input } from '@/components/ui/input'
+import { ScrollArea } from '@/components/ui/scroll-area'
+import { Dialog, DialogContent } from '@/components/ui/dialog'
+import { createLogger } from '@/lib/logs/console-logger'
+import ReactMarkdown from 'react-markdown'
+
+const logger = createLogger('Copilot')
+
+interface CopilotProps {
+ panelWidth: number
+}
+
+interface CopilotRef {
+ clearMessages: () => void
+}
+
+interface Message {
+ id: string
+ role: 'user' | 'assistant'
+ content: string
+ timestamp: Date
+ sources?: {
+ title: string
+ document: string
+ link: string
+ similarity: number
+ }[]
+ isLoading?: boolean
+ isStreaming?: boolean
+}
+
+export const Copilot = forwardRef(({ panelWidth }, ref) => {
+ const [messages, setMessages] = useState([])
+ const [input, setInput] = useState('')
+ const [isLoading, setIsLoading] = useState(false)
+ const [isFullscreen, setIsFullscreen] = useState(false)
+ const scrollAreaRef = useRef(null)
+ const inputRef = useRef(null)
+
+ // Expose clear function to parent
+ useImperativeHandle(ref, () => ({
+ clearMessages: () => {
+ setMessages([])
+ logger.info('Copilot messages cleared')
+ }
+ }), [])
+
+ // Auto-scroll to bottom when new messages are added
+ useEffect(() => {
+ if (scrollAreaRef.current) {
+ const scrollContainer = scrollAreaRef.current.querySelector('[data-radix-scroll-area-viewport]')
+ if (scrollContainer) {
+ scrollContainer.scrollTop = scrollContainer.scrollHeight
+ }
+ }
+ }, [messages])
+
+
+
+ const handleSubmit = useCallback(async (e: React.FormEvent) => {
+ e.preventDefault()
+ if (!input.trim() || isLoading) return
+
+ const userMessage: Message = {
+ id: crypto.randomUUID(),
+ role: 'user',
+ content: input.trim(),
+ timestamp: new Date(),
+ }
+
+ const streamingMessage: Message = {
+ id: crypto.randomUUID(),
+ role: 'assistant',
+ content: '',
+ timestamp: new Date(),
+ isStreaming: true,
+ }
+
+ setMessages(prev => [...prev, userMessage, streamingMessage])
+ const query = input.trim()
+ setInput('')
+ setIsLoading(true)
+
+ try {
+ logger.info('Sending docs RAG query:', { query })
+
+ const response = await fetch('/api/docs/ask', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ query,
+ topK: 5,
+ stream: true,
+ }),
+ })
+
+ if (!response.ok) {
+ throw new Error(`HTTP ${response.status}: ${await response.text()}`)
+ }
+
+ // Handle streaming response
+ if (response.headers.get('content-type')?.includes('text/event-stream')) {
+ const reader = response.body?.getReader()
+ const decoder = new TextDecoder()
+ let accumulatedContent = ''
+ let sources: any[] = []
+
+ if (!reader) {
+ throw new Error('Failed to get response reader')
+ }
+
+ while (true) {
+ const { done, value } = await reader.read()
+ if (done) break
+
+ const chunk = decoder.decode(value, { stream: true })
+ const lines = chunk.split('\n')
+
+ for (const line of lines) {
+ if (line.startsWith('data: ')) {
+ try {
+ const data = JSON.parse(line.slice(6))
+
+ if (data.type === 'metadata') {
+ sources = data.sources || []
+ } else if (data.type === 'content') {
+ accumulatedContent += data.content
+
+ // Update the streaming message with accumulated content
+ setMessages(prev => prev.map(msg =>
+ msg.id === streamingMessage.id
+ ? { ...msg, content: accumulatedContent, sources }
+ : msg
+ ))
+ } else if (data.type === 'done') {
+ // Finish streaming
+ setMessages(prev => prev.map(msg =>
+ msg.id === streamingMessage.id
+ ? { ...msg, isStreaming: false, sources }
+ : msg
+ ))
+ } else if (data.type === 'error') {
+ throw new Error(data.error || 'Streaming error')
+ }
+ } catch (parseError) {
+ logger.warn('Failed to parse SSE data:', parseError)
+ }
+ }
+ }
+ }
+
+ logger.info('Received docs RAG response:', {
+ contentLength: accumulatedContent.length,
+ sourcesCount: sources.length,
+ })
+
+ } else {
+ // Fallback to non-streaming response
+ const data = await response.json()
+
+ const assistantMessage: Message = {
+ id: streamingMessage.id,
+ role: 'assistant',
+ content: data.response || 'Sorry, I could not generate a response.',
+ timestamp: new Date(),
+ sources: data.sources || [],
+ isStreaming: false,
+ }
+
+ setMessages(prev => prev.slice(0, -1).concat(assistantMessage))
+ }
+
+ } catch (error) {
+ logger.error('Docs RAG error:', error)
+
+ const errorMessage: Message = {
+ id: streamingMessage.id,
+ role: 'assistant',
+ content: 'Sorry, I encountered an error while searching the documentation. Please try again.',
+ timestamp: new Date(),
+ isStreaming: false,
+ }
+
+ setMessages(prev => prev.slice(0, -1).concat(errorMessage))
+ } finally {
+ setIsLoading(false)
+ }
+ }, [input, isLoading])
+
+ const formatTimestamp = (date: Date) => {
+ return date.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })
+ }
+
+ // Function to render content with inline hyperlinked citations and basic markdown
+ const renderContentWithCitations = (content: string, sources: Message['sources'] = []) => {
+ if (!content) return content
+
+ let processedContent = content
+
+ // Replace [1], [2], etc. with hyperlinked citations
+ processedContent = processedContent.replace(/\[(\d+)\]/g, (match, num) => {
+ const sourceIndex = parseInt(num) - 1
+ const source = sources[sourceIndex]
+
+ if (source) {
+ return `${match}`
+ }
+
+ return match
+ })
+
+ // Basic markdown processing for better formatting
+ processedContent = processedContent
+ // Handle code blocks
+ .replace(/```(\w+)?\n([\s\S]*?)```/g, '$2
')
+ // Handle inline code
+ .replace(/`([^`]+)`/g, '$1')
+ // Handle bold text
+ .replace(/\*\*(.*?)\*\*/g, '$1')
+ // Handle italic text
+ .replace(/\*(.*?)\*/g, '$1')
+ // Handle headers
+ .replace(/^### (.*$)/gm, '$1
')
+ .replace(/^## (.*$)/gm, '$1
')
+ .replace(/^# (.*$)/gm, '$1
')
+ // Handle unordered lists
+ .replace(/^\* (.*$)/gm, '• $1')
+ .replace(/^- (.*$)/gm, '• $1')
+ // Handle line breaks
+ .replace(/\n\n/g, '
')
+ .replace(/\n/g, '
')
+
+ // Wrap in paragraph tags if not already wrapped
+ if (!processedContent.includes('
') && !processedContent.includes('
') && !processedContent.includes('') && !processedContent.includes('')) {
+ processedContent = `
${processedContent}
`
+ }
+
+ return processedContent
+ }
+
+ const renderMessage = (message: Message) => {
+ if (message.isStreaming && !message.content) {
+ return (
+
+
+
+
+
+
+ Copilot
+ {formatTimestamp(message.timestamp)}
+
+
+
+ Searching documentation...
+
+
+
+ )
+ }
+
+ return (
+
+
+ {message.role === 'user' ? (
+
+ ) : (
+
+ )}
+
+
+
+
+ {message.role === 'user' ? 'You' : 'Copilot'}
+
+
{formatTimestamp(message.timestamp)}
+ {message.isStreaming && (
+
+
+ Responding...
+
+ )}
+
+
+ {/* Enhanced content rendering with inline citations */}
+
+
+ {/* Streaming cursor */}
+ {message.isStreaming && message.content && (
+
+ )}
+
+
+ )
+ }
+
+ return (
+ <>
+ {/* Main Panel Content */}
+
+ {/* Header */}
+
+
+
+
+
+
Documentation Copilot
+
Ask questions about Sim Studio
+
+
+
+
+
+
+
+
+ {/* Messages */}
+
+ {messages.length === 0 ? (
+
+
+
Welcome to Documentation Copilot
+
+ Ask me anything about Sim Studio features, workflows, tools, or how to get started.
+
+
+
Try asking:
+
+
"How do I create a workflow?"
+
"What tools are available?"
+
"How do I deploy my workflow?"
+
+
+
+ ) : (
+
+ {messages.map(renderMessage)}
+
+ )}
+
+
+ {/* Input */}
+
+
+
+
+
+ {/* Fullscreen Modal */}
+ {isFullscreen && (
+
+ )}
+ >
+ )
+})
+
+Copilot.displayName = 'Copilot'
\ No newline at end of file
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/panel.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/panel.tsx
index 27494712d..17dbfee99 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/panel.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/panel.tsx
@@ -1,6 +1,6 @@
'use client'
-import { useEffect, useState } from 'react'
+import { useEffect, useState, useRef } from 'react'
import { Expand, PanelRight } from 'lucide-react'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { useChatStore } from '@/stores/panel/chat/store'
@@ -11,12 +11,14 @@ import { Chat } from './components/chat/chat'
import { ChatModal } from './components/chat/components/chat-modal/chat-modal'
import { Console } from './components/console/console'
import { Variables } from './components/variables/variables'
+import { Copilot } from './components/copilot/copilot'
export function Panel() {
const [width, setWidth] = useState(336) // 84 * 4 = 336px (default width)
const [isDragging, setIsDragging] = useState(false)
const [chatMessage, setChatMessage] = useState('')
const [isChatModalOpen, setIsChatModalOpen] = useState(false)
+ const copilotRef = useRef<{ clearMessages: () => void }>(null)
const isOpen = usePanelStore((state) => state.isOpen)
const togglePanel = usePanelStore((state) => state.togglePanel)
@@ -116,15 +118,29 @@ export function Panel() {
>
Variables
+
- {(activeTab === 'console' || activeTab === 'chat') && (
+ {(activeTab === 'console' || activeTab === 'chat' || activeTab === 'copilot') && (