feat(copilot): enterprise configuration (#3184)

* Copilot enterprise models

* Fix azure anthropic

* Fix

* Consolidation

* Cleanup

* Clean up code

* Fix lint

* cleanup

* Fix greptile
This commit is contained in:
Siddharth Ganesan
2026-02-10 16:37:30 -08:00
committed by GitHub
parent 20b230d1aa
commit c5dd90e79d
21 changed files with 410 additions and 743 deletions

View File

@@ -8,9 +8,8 @@ import { getSession } from '@/lib/auth'
import { buildConversationHistory } from '@/lib/copilot/chat-context'
import { resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat-payload'
import { generateChatTitle } from '@/lib/copilot/chat-title'
import { getCopilotModel } from '@/lib/copilot/config'
import { COPILOT_MODEL_IDS, COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import {
createStreamEventWriter,
@@ -29,6 +28,49 @@ import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
const logger = createLogger('CopilotChatAPI')
async function requestChatTitleFromCopilot(params: {
message: string
model: string
provider?: string
}): Promise<string | null> {
const { message, model, provider } = params
if (!message || !model) return null
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (env.COPILOT_API_KEY) {
headers['x-api-key'] = env.COPILOT_API_KEY
}
try {
const response = await fetch(`${SIM_AGENT_API_URL}/api/generate-chat-title`, {
method: 'POST',
headers,
body: JSON.stringify({
message,
model,
...(provider ? { provider } : {}),
}),
})
const payload = await response.json().catch(() => ({}))
if (!response.ok) {
logger.warn('Failed to generate chat title via copilot backend', {
status: response.status,
error: payload,
})
return null
}
const title = typeof payload?.title === 'string' ? payload.title.trim() : ''
return title || null
} catch (error) {
logger.error('Error generating chat title:', error)
return null
}
}
const FileAttachmentSchema = z.object({
id: z.string(),
key: z.string(),
@@ -43,14 +85,14 @@ const ChatMessageSchema = z.object({
chatId: z.string().optional(),
workflowId: z.string().optional(),
workflowName: z.string().optional(),
model: z.enum(COPILOT_MODEL_IDS).optional().default('claude-4.6-opus'),
model: z.string().optional().default('claude-opus-4-6'),
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
prefetch: z.boolean().optional(),
createNewChat: z.boolean().optional().default(false),
stream: z.boolean().optional().default(true),
implicitFeedback: z.string().optional(),
fileAttachments: z.array(FileAttachmentSchema).optional(),
provider: z.string().optional().default('openai'),
provider: z.string().optional(),
conversationId: z.string().optional(),
contexts: z
.array(
@@ -173,14 +215,14 @@ export async function POST(req: NextRequest) {
let currentChat: any = null
let conversationHistory: any[] = []
let actualChatId = chatId
const selectedModel = model || 'claude-opus-4-6'
if (chatId || createNewChat) {
const defaultsForChatRow = getCopilotModel('chat')
const chatResult = await resolveOrCreateChat({
chatId,
userId: authenticatedUserId,
workflowId,
model: defaultsForChatRow.model,
model: selectedModel,
})
currentChat = chatResult.chat
actualChatId = chatResult.chatId || chatId
@@ -191,8 +233,6 @@ export async function POST(req: NextRequest) {
conversationHistory = history.history
}
const defaults = getCopilotModel('chat')
const selectedModel = model || defaults.model
const effectiveMode = mode === 'agent' ? 'build' : mode
const effectiveConversationId =
(currentChat?.conversationId as string | undefined) || conversationId
@@ -205,6 +245,7 @@ export async function POST(req: NextRequest) {
userMessageId: userMessageIdToUse,
mode,
model: selectedModel,
provider,
conversationHistory,
contexts: agentContexts,
fileAttachments,
@@ -283,7 +324,7 @@ export async function POST(req: NextRequest) {
}
if (actualChatId && !currentChat?.title && conversationHistory.length === 0) {
generateChatTitle(message)
requestChatTitleFromCopilot({ message, model: selectedModel, provider })
.then(async (title) => {
if (title) {
await db
@@ -372,10 +413,7 @@ export async function POST(req: NextRequest) {
content: nonStreamingResult.content,
toolCalls: nonStreamingResult.toolCalls,
model: selectedModel,
provider:
(requestPayload?.provider as Record<string, unknown>)?.provider ||
env.COPILOT_PROVIDER ||
'openai',
provider: typeof requestPayload?.provider === 'string' ? requestPayload.provider : undefined,
}
logger.info(`[${tracker.requestId}] Non-streaming response from orchestrator:`, {
@@ -413,7 +451,7 @@ export async function POST(req: NextRequest) {
// Start title generation in parallel if this is first message (non-streaming)
if (actualChatId && !currentChat.title && conversationHistory.length === 0) {
logger.info(`[${tracker.requestId}] Starting title generation for non-streaming response`)
generateChatTitle(message)
requestChatTitleFromCopilot({ message, model: selectedModel, provider })
.then(async (title) => {
if (title) {
await db

View File

@@ -0,0 +1,84 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import type { AvailableModel } from '@/lib/copilot/types'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotModelsAPI')
interface RawAvailableModel {
id: string
friendlyName?: string
displayName?: string
provider?: string
}
function isRawAvailableModel(item: unknown): item is RawAvailableModel {
return (
typeof item === 'object' &&
item !== null &&
'id' in item &&
typeof (item as { id: unknown }).id === 'string'
)
}
export async function GET(_req: NextRequest) {
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (env.COPILOT_API_KEY) {
headers['x-api-key'] = env.COPILOT_API_KEY
}
try {
const response = await fetch(`${SIM_AGENT_API_URL}/api/get-available-models`, {
method: 'GET',
headers,
cache: 'no-store',
})
const payload = await response.json().catch(() => ({}))
if (!response.ok) {
logger.warn('Failed to fetch available models from copilot backend', {
status: response.status,
})
return NextResponse.json(
{
success: false,
error: payload?.error || 'Failed to fetch available models',
models: [],
},
{ status: response.status }
)
}
const rawModels = Array.isArray(payload?.models) ? payload.models : []
const models: AvailableModel[] = rawModels
.filter((item: unknown): item is RawAvailableModel => isRawAvailableModel(item))
.map((item: RawAvailableModel) => ({
id: item.id,
friendlyName: item.friendlyName || item.displayName || item.id,
provider: item.provider || 'unknown',
}))
return NextResponse.json({ success: true, models })
} catch (error) {
logger.error('Error fetching available models', {
error: error instanceof Error ? error.message : String(error),
})
return NextResponse.json(
{
success: false,
error: 'Failed to fetch available models',
models: [],
},
{ status: 500 }
)
}
}

View File

@@ -1,139 +0,0 @@
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import type { CopilotModelId } from '@/lib/copilot/models'
import { db } from '@/../../packages/db'
import { settings } from '@/../../packages/db/schema'
const logger = createLogger('CopilotUserModelsAPI')
const DEFAULT_ENABLED_MODELS: Record<CopilotModelId, boolean> = {
'gpt-4o': false,
'gpt-4.1': false,
'gpt-5-fast': false,
'gpt-5': true,
'gpt-5-medium': false,
'gpt-5-high': false,
'gpt-5.1-fast': false,
'gpt-5.1': false,
'gpt-5.1-medium': false,
'gpt-5.1-high': false,
'gpt-5-codex': false,
'gpt-5.1-codex': false,
'gpt-5.2': false,
'gpt-5.2-codex': true,
'gpt-5.2-pro': true,
o3: true,
'claude-4-sonnet': false,
'claude-4.5-haiku': true,
'claude-4.5-sonnet': true,
'claude-4.6-opus': true,
'claude-4.5-opus': true,
'claude-4.1-opus': false,
'gemini-3-pro': true,
}
// GET - Fetch user's enabled models
export async function GET(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const [userSettings] = await db
.select()
.from(settings)
.where(eq(settings.userId, userId))
.limit(1)
if (userSettings) {
const userModelsMap = (userSettings.copilotEnabledModels as Record<string, boolean>) || {}
const mergedModels = { ...DEFAULT_ENABLED_MODELS }
for (const [modelId, enabled] of Object.entries(userModelsMap)) {
if (modelId in mergedModels) {
mergedModels[modelId as CopilotModelId] = enabled
}
}
const hasNewModels = Object.keys(DEFAULT_ENABLED_MODELS).some(
(key) => !(key in userModelsMap)
)
if (hasNewModels) {
await db
.update(settings)
.set({
copilotEnabledModels: mergedModels,
updatedAt: new Date(),
})
.where(eq(settings.userId, userId))
}
return NextResponse.json({
enabledModels: mergedModels,
})
}
await db.insert(settings).values({
id: userId,
userId,
copilotEnabledModels: DEFAULT_ENABLED_MODELS,
})
logger.info('Created new settings record with default models', { userId })
return NextResponse.json({
enabledModels: DEFAULT_ENABLED_MODELS,
})
} catch (error) {
logger.error('Failed to fetch user models', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}
// PUT - Update user's enabled models
export async function PUT(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const body = await request.json()
if (!body.enabledModels || typeof body.enabledModels !== 'object') {
return NextResponse.json({ error: 'enabledModels must be an object' }, { status: 400 })
}
const [existing] = await db.select().from(settings).where(eq(settings.userId, userId)).limit(1)
if (existing) {
await db
.update(settings)
.set({
copilotEnabledModels: body.enabledModels,
updatedAt: new Date(),
})
.where(eq(settings.userId, userId))
} else {
await db.insert(settings).values({
id: userId,
userId,
copilotEnabledModels: body.enabledModels,
})
}
return NextResponse.json({ success: true })
} catch (error) {
logger.error('Failed to update user models', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -17,7 +17,6 @@ import { createLogger } from '@sim/logger'
import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { getCopilotModel } from '@/lib/copilot/config'
import {
ORCHESTRATION_TIMEOUT_MS,
SIM_AGENT_API_URL,
@@ -39,6 +38,7 @@ import {
const logger = createLogger('CopilotMcpAPI')
const mcpRateLimiter = new RateLimiter()
const DEFAULT_COPILOT_MODEL = 'claude-opus-4-6'
export const dynamic = 'force-dynamic'
export const runtime = 'nodejs'
@@ -627,7 +627,6 @@ async function handleBuildToolCall(
): Promise<CallToolResult> {
try {
const requestText = (args.request as string) || JSON.stringify(args)
const { model } = getCopilotModel('chat')
const workflowId = args.workflowId as string | undefined
const resolved = workflowId
@@ -666,7 +665,7 @@ async function handleBuildToolCall(
message: requestText,
workflowId: resolved.workflowId,
userId,
model,
model: DEFAULT_COPILOT_MODEL,
mode: 'agent',
commands: ['fast'],
messageId: randomUUID(),
@@ -733,8 +732,6 @@ async function handleSubagentToolCall(
context.plan = args.plan
}
const { model } = getCopilotModel('chat')
const result = await orchestrateSubagentStream(
toolDef.agentId,
{
@@ -742,7 +739,7 @@ async function handleSubagentToolCall(
workflowId: args.workflowId,
workspaceId: args.workspaceId,
context,
model,
model: DEFAULT_COPILOT_MODEL,
headless: true,
source: 'mcp',
},

View File

@@ -1,7 +1,6 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getCopilotModel } from '@/lib/copilot/config'
import { SIM_AGENT_VERSION } from '@/lib/copilot/constants'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
@@ -9,6 +8,7 @@ import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
import { authenticateV1Request } from '@/app/api/v1/auth'
const logger = createLogger('CopilotHeadlessAPI')
const DEFAULT_COPILOT_MODEL = 'claude-opus-4-6'
const RequestSchema = z.object({
message: z.string().min(1, 'message is required'),
@@ -42,8 +42,7 @@ export async function POST(req: NextRequest) {
try {
const body = await req.json()
const parsed = RequestSchema.parse(body)
const defaults = getCopilotModel('chat')
const selectedModel = parsed.model || defaults.model
const selectedModel = parsed.model || DEFAULT_COPILOT_MODEL
// Resolve workflow ID
const resolved = await resolveWorkflowIdForUser(

View File

@@ -1,6 +1,6 @@
'use client'
import { useEffect, useRef, useState } from 'react'
import { useEffect, useMemo, useRef, useState } from 'react'
import {
Badge,
Popover,
@@ -9,8 +9,8 @@ import {
PopoverItem,
PopoverScrollArea,
} from '@/components/emcn'
import { getProviderIcon } from '@/providers/utils'
import { MODEL_OPTIONS } from '../../constants'
import { AnthropicIcon, AzureIcon, BedrockIcon, GeminiIcon, OpenAIIcon } from '@/components/icons'
import { useCopilotStore } from '@/stores/panel'
interface ModelSelectorProps {
/** Currently selected model */
@@ -22,14 +22,22 @@ interface ModelSelectorProps {
}
/**
* Gets the appropriate icon component for a model
* Map a provider string (from the available-models API) to its icon component.
* Falls back to null when the provider is unrecognised.
*/
function getModelIconComponent(modelValue: string) {
const IconComponent = getProviderIcon(modelValue)
if (!IconComponent) {
return null
}
return <IconComponent className='h-3.5 w-3.5' />
const PROVIDER_ICON_MAP: Record<string, React.ComponentType<{ className?: string }>> = {
anthropic: AnthropicIcon,
openai: OpenAIIcon,
gemini: GeminiIcon,
google: GeminiIcon,
bedrock: BedrockIcon,
azure: AzureIcon,
'azure-openai': AzureIcon,
'azure-anthropic': AzureIcon,
}
function getIconForProvider(provider: string): React.ComponentType<{ className?: string }> | null {
return PROVIDER_ICON_MAP[provider] ?? null
}
/**
@@ -43,17 +51,45 @@ export function ModelSelector({ selectedModel, isNearTop, onModelSelect }: Model
const [open, setOpen] = useState(false)
const triggerRef = useRef<HTMLDivElement>(null)
const popoverRef = useRef<HTMLDivElement>(null)
const availableModels = useCopilotStore((state) => state.availableModels)
const modelOptions = useMemo(() => {
return availableModels.map((model) => ({
value: model.id,
label: model.friendlyName || model.id,
provider: model.provider,
}))
}, [availableModels])
/**
* Extract the provider from a composite model key (e.g. "bedrock/claude-opus-4-6" → "bedrock").
* This mirrors the agent block pattern where model IDs are provider-prefixed.
*/
const getProviderForModel = (compositeKey: string): string | undefined => {
const slashIdx = compositeKey.indexOf('/')
if (slashIdx !== -1) return compositeKey.slice(0, slashIdx)
// Legacy migration path: allow old raw IDs (without provider prefix)
// by resolving against current available model options.
const exact = modelOptions.find((m) => m.value === compositeKey)
if (exact?.provider) return exact.provider
const byRawSuffix = modelOptions.find((m) => m.value.endsWith(`/${compositeKey}`))
return byRawSuffix?.provider
}
const getCollapsedModeLabel = () => {
const model = MODEL_OPTIONS.find((m) => m.value === selectedModel)
return model ? model.label : 'claude-4.5-sonnet'
const model =
modelOptions.find((m) => m.value === selectedModel) ??
modelOptions.find((m) => m.value.endsWith(`/${selectedModel}`))
return model?.label || selectedModel || 'No models available'
}
const getModelIcon = () => {
const IconComponent = getProviderIcon(selectedModel)
if (!IconComponent) {
return null
}
const provider = getProviderForModel(selectedModel)
if (!provider) return null
const IconComponent = getIconForProvider(provider)
if (!IconComponent) return null
return (
<span className='flex-shrink-0'>
<IconComponent className='h-3 w-3' />
@@ -61,6 +97,14 @@ export function ModelSelector({ selectedModel, isNearTop, onModelSelect }: Model
)
}
const getModelIconComponent = (modelValue: string) => {
const provider = getProviderForModel(modelValue)
if (!provider) return null
const IconComponent = getIconForProvider(provider)
if (!IconComponent) return null
return <IconComponent className='h-3.5 w-3.5' />
}
const handleSelect = (modelValue: string) => {
onModelSelect(modelValue)
setOpen(false)
@@ -124,16 +168,20 @@ export function ModelSelector({ selectedModel, isNearTop, onModelSelect }: Model
onCloseAutoFocus={(e) => e.preventDefault()}
>
<PopoverScrollArea className='space-y-[2px]'>
{MODEL_OPTIONS.map((option) => (
<PopoverItem
key={option.value}
active={selectedModel === option.value}
onClick={() => handleSelect(option.value)}
>
{getModelIconComponent(option.value)}
<span>{option.label}</span>
</PopoverItem>
))}
{modelOptions.length > 0 ? (
modelOptions.map((option) => (
<PopoverItem
key={option.value}
active={selectedModel === option.value}
onClick={() => handleSelect(option.value)}
>
{getModelIconComponent(option.value)}
<span>{option.label}</span>
</PopoverItem>
))
) : (
<div className='px-2 py-2 text-[var(--text-muted)] text-xs'>No models available</div>
)}
</PopoverScrollArea>
</PopoverContent>
</Popover>

View File

@@ -242,19 +242,6 @@ export function getCommandDisplayLabel(commandId: string): string {
return command?.label || commandId.charAt(0).toUpperCase() + commandId.slice(1)
}
/**
* Model configuration options
*/
export const MODEL_OPTIONS = [
{ value: 'claude-4.6-opus', label: 'Claude 4.6 Opus' },
{ value: 'claude-4.5-opus', label: 'Claude 4.5 Opus' },
{ value: 'claude-4.5-sonnet', label: 'Claude 4.5 Sonnet' },
{ value: 'claude-4.5-haiku', label: 'Claude 4.5 Haiku' },
{ value: 'gpt-5.2-codex', label: 'GPT 5.2 Codex' },
{ value: 'gpt-5.2-pro', label: 'GPT 5.2 Pro' },
{ value: 'gemini-3-pro', label: 'Gemini 3 Pro' },
] as const
/**
* Threshold for considering input "near top" of viewport (in pixels)
*/

View File

@@ -112,6 +112,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
closePlanTodos,
clearPlanArtifact,
savePlanArtifact,
loadAvailableModels,
loadAutoAllowedTools,
resumeActiveStream,
} = useCopilotStore()
@@ -123,6 +124,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
chatsLoadedForWorkflow,
setCopilotWorkflowId,
loadChats,
loadAvailableModels,
loadAutoAllowedTools,
currentChat,
isSendingMessage,

View File

@@ -11,6 +11,7 @@ interface UseCopilotInitializationProps {
chatsLoadedForWorkflow: string | null
setCopilotWorkflowId: (workflowId: string | null) => Promise<void>
loadChats: (forceRefresh?: boolean) => Promise<void>
loadAvailableModels: () => Promise<void>
loadAutoAllowedTools: () => Promise<void>
currentChat: any
isSendingMessage: boolean
@@ -30,6 +31,7 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
chatsLoadedForWorkflow,
setCopilotWorkflowId,
loadChats,
loadAvailableModels,
loadAutoAllowedTools,
currentChat,
isSendingMessage,
@@ -129,6 +131,17 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
}
}, [loadAutoAllowedTools])
/** Load available models once on mount */
const hasLoadedModelsRef = useRef(false)
useEffect(() => {
if (!hasLoadedModelsRef.current) {
hasLoadedModelsRef.current = true
loadAvailableModels().catch((err) => {
logger.warn('[Copilot] Failed to load available models', err)
})
}
}, [loadAvailableModels])
return {
isInitialized,
}

View File

@@ -69,6 +69,7 @@ export interface SendMessageRequest {
workflowId?: string
mode?: CopilotMode | CopilotTransportMode
model?: CopilotModelId
provider?: string
prefetch?: boolean
createNewChat?: boolean
stream?: boolean

View File

@@ -1,10 +1,7 @@
import { createLogger } from '@sim/logger'
import { processFileAttachments } from '@/lib/copilot/chat-context'
import { getCopilotModel } from '@/lib/copilot/config'
import { SIM_AGENT_VERSION } from '@/lib/copilot/constants'
import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials'
import type { CopilotProviderConfig } from '@/lib/copilot/types'
import { env } from '@/lib/core/config/env'
import { tools } from '@/tools/registry'
import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils'
@@ -17,6 +14,7 @@ export interface BuildPayloadParams {
userMessageId: string
mode: string
model: string
provider?: string
conversationHistory?: unknown[]
contexts?: Array<{ type: string; content: string }>
fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }>
@@ -46,57 +44,12 @@ interface CredentialsPayload {
}
}
function buildProviderConfig(selectedModel: string): CopilotProviderConfig | undefined {
const defaults = getCopilotModel('chat')
const envModel = env.COPILOT_MODEL || defaults.model
const providerEnv = env.COPILOT_PROVIDER
if (!providerEnv) return undefined
if (providerEnv === 'azure-openai') {
return {
provider: 'azure-openai',
model: envModel,
apiKey: env.AZURE_OPENAI_API_KEY,
apiVersion: 'preview',
endpoint: env.AZURE_OPENAI_ENDPOINT,
}
}
if (providerEnv === 'azure-anthropic') {
return {
provider: 'azure-anthropic',
model: envModel,
apiKey: env.AZURE_ANTHROPIC_API_KEY,
apiVersion: env.AZURE_ANTHROPIC_API_VERSION,
endpoint: env.AZURE_ANTHROPIC_ENDPOINT,
}
}
if (providerEnv === 'vertex') {
return {
provider: 'vertex',
model: envModel,
apiKey: env.COPILOT_API_KEY,
vertexProject: env.VERTEX_PROJECT,
vertexLocation: env.VERTEX_LOCATION,
}
}
return {
provider: providerEnv as Exclude<string, 'azure-openai' | 'vertex'>,
model: selectedModel,
apiKey: env.COPILOT_API_KEY,
} as CopilotProviderConfig
}
/**
* Build the request payload for the copilot backend.
*/
export async function buildCopilotRequestPayload(
params: BuildPayloadParams,
options: {
providerConfig?: CopilotProviderConfig
selectedModel: string
}
): Promise<Record<string, unknown>> {
@@ -106,6 +59,7 @@ export async function buildCopilotRequestPayload(
userId,
userMessageId,
mode,
provider,
contexts,
fileAttachments,
commands,
@@ -113,7 +67,6 @@ export async function buildCopilotRequestPayload(
} = params
const selectedModel = options.selectedModel
const providerConfig = options.providerConfig ?? buildProviderConfig(selectedModel)
const effectiveMode = mode === 'agent' ? 'build' : mode
const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode
@@ -195,10 +148,10 @@ export async function buildCopilotRequestPayload(
workflowId,
userId,
model: selectedModel,
...(provider ? { provider } : {}),
mode: transportMode,
messageId: userMessageId,
version: SIM_AGENT_VERSION,
...(providerConfig ? { provider: providerConfig } : {}),
...(contexts && contexts.length > 0 ? { context: contexts } : {}),
...(chatId ? { chatId } : {}),
...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}),

View File

@@ -1,79 +0,0 @@
import { createLogger } from '@sim/logger'
import { env } from '@/lib/core/config/env'
import { extractResponseText } from '@/providers/openai/utils'
const logger = createLogger('SimAgentUtils')
const azureApiKey = env.AZURE_OPENAI_API_KEY
const azureEndpoint = env.AZURE_OPENAI_ENDPOINT
const azureApiVersion = env.AZURE_OPENAI_API_VERSION
const chatTitleModelName = env.WAND_OPENAI_MODEL_NAME || 'gpt-4o'
const openaiApiKey = env.OPENAI_API_KEY
const useChatTitleAzure = azureApiKey && azureEndpoint && azureApiVersion
/**
* Generates a short title for a chat based on the first message
* @param message First user message in the chat
* @returns A short title or null if API key is not available
*/
export async function generateChatTitle(message: string): Promise<string | null> {
if (!useChatTitleAzure && !openaiApiKey) {
return null
}
try {
const apiUrl = useChatTitleAzure
? `${azureEndpoint?.replace(/\/$/, '')}/openai/v1/responses?api-version=${azureApiVersion}`
: 'https://api.openai.com/v1/responses'
const headers: Record<string, string> = {
'Content-Type': 'application/json',
'OpenAI-Beta': 'responses=v1',
}
if (useChatTitleAzure) {
headers['api-key'] = azureApiKey!
} else {
headers.Authorization = `Bearer ${openaiApiKey}`
}
const response = await fetch(apiUrl, {
method: 'POST',
headers,
body: JSON.stringify({
model: useChatTitleAzure ? chatTitleModelName : 'gpt-4o',
input: [
{
role: 'system',
content:
'Generate a very short title (3-5 words max) for a chat that starts with this message. The title should be concise and descriptive. Do not wrap the title in quotes.',
},
{
role: 'user',
content: message,
},
],
max_output_tokens: 20,
temperature: 0.2,
}),
})
if (!response.ok) {
const errorText = await response.text()
logger.error('Error generating chat title:', {
status: response.status,
statusText: response.statusText,
error: errorText,
})
return null
}
const data = await response.json()
const title = extractResponseText(data.output)?.trim() || null
return title
} catch (error) {
logger.error('Error generating chat title:', error)
return null
}
}

View File

@@ -1,337 +0,0 @@
import { createLogger } from '@sim/logger'
import { AGENT_MODE_SYSTEM_PROMPT } from '@/lib/copilot/prompts'
import { getProviderDefaultModel } from '@/providers/models'
import type { ProviderId } from '@/providers/types'
const logger = createLogger('CopilotConfig')
/**
* Valid provider IDs for validation
*/
const VALID_PROVIDER_IDS: readonly ProviderId[] = [
'openai',
'azure-openai',
'anthropic',
'azure-anthropic',
'google',
'deepseek',
'xai',
'cerebras',
'mistral',
'groq',
'ollama',
] as const
/**
* Configuration validation constraints
*/
const VALIDATION_CONSTRAINTS = {
temperature: { min: 0, max: 2 },
maxTokens: { min: 1, max: 100000 },
maxSources: { min: 1, max: 20 },
similarityThreshold: { min: 0, max: 1 },
maxConversationHistory: { min: 1, max: 50 },
} as const
/**
* Copilot model types
*/
export type CopilotModelType = 'chat' | 'rag' | 'title'
/**
* Configuration validation result
*/
export interface ValidationResult {
isValid: boolean
errors: string[]
}
/**
* Copilot configuration interface
*/
export interface CopilotConfig {
// Chat LLM configuration
chat: {
defaultProvider: ProviderId
defaultModel: string
temperature: number
maxTokens: number
systemPrompt: string
}
// RAG (documentation search) LLM configuration
rag: {
defaultProvider: ProviderId
defaultModel: string
temperature: number
maxTokens: number
embeddingModel: string
maxSources: number
similarityThreshold: number
}
// General configuration
general: {
streamingEnabled: boolean
maxConversationHistory: number
titleGenerationModel: string
}
}
function validateProviderId(value: string | undefined): ProviderId | null {
if (!value) return null
return VALID_PROVIDER_IDS.includes(value as ProviderId) ? (value as ProviderId) : null
}
function parseFloatEnv(value: string | undefined, name: string): number | null {
if (!value) return null
const parsed = Number.parseFloat(value)
if (Number.isNaN(parsed)) {
logger.warn(`Invalid ${name}: ${value}. Expected a valid number.`)
return null
}
return parsed
}
function parseIntEnv(value: string | undefined, name: string): number | null {
if (!value) return null
const parsed = Number.parseInt(value, 10)
if (Number.isNaN(parsed)) {
logger.warn(`Invalid ${name}: ${value}. Expected a valid integer.`)
return null
}
return parsed
}
function parseBooleanEnv(value: string | undefined): boolean | null {
if (!value) return null
return value.toLowerCase() === 'true'
}
export const DEFAULT_COPILOT_CONFIG: CopilotConfig = {
chat: {
defaultProvider: 'anthropic',
defaultModel: 'claude-4.6-opus',
temperature: 0.1,
maxTokens: 8192,
systemPrompt: AGENT_MODE_SYSTEM_PROMPT,
},
rag: {
defaultProvider: 'anthropic',
defaultModel: 'claude-4.6-opus',
temperature: 0.1,
maxTokens: 2000,
embeddingModel: 'text-embedding-3-small',
maxSources: 10,
similarityThreshold: 0.3,
},
general: {
streamingEnabled: true,
maxConversationHistory: 10,
titleGenerationModel: 'claude-3-haiku-20240307',
},
}
function applyEnvironmentOverrides(config: CopilotConfig): void {
const chatProvider = validateProviderId(process.env.COPILOT_CHAT_PROVIDER)
if (chatProvider) {
config.chat.defaultProvider = chatProvider
} else if (process.env.COPILOT_CHAT_PROVIDER) {
logger.warn(
`Invalid COPILOT_CHAT_PROVIDER: ${process.env.COPILOT_CHAT_PROVIDER}. Valid providers: ${VALID_PROVIDER_IDS.join(', ')}`
)
}
if (process.env.COPILOT_CHAT_MODEL) {
config.chat.defaultModel = process.env.COPILOT_CHAT_MODEL
}
const chatTemperature = parseFloatEnv(
process.env.COPILOT_CHAT_TEMPERATURE,
'COPILOT_CHAT_TEMPERATURE'
)
if (chatTemperature !== null) {
config.chat.temperature = chatTemperature
}
const chatMaxTokens = parseIntEnv(process.env.COPILOT_CHAT_MAX_TOKENS, 'COPILOT_CHAT_MAX_TOKENS')
if (chatMaxTokens !== null) {
config.chat.maxTokens = chatMaxTokens
}
const ragProvider = validateProviderId(process.env.COPILOT_RAG_PROVIDER)
if (ragProvider) {
config.rag.defaultProvider = ragProvider
} else if (process.env.COPILOT_RAG_PROVIDER) {
logger.warn(
`Invalid COPILOT_RAG_PROVIDER: ${process.env.COPILOT_RAG_PROVIDER}. Valid providers: ${VALID_PROVIDER_IDS.join(', ')}`
)
}
if (process.env.COPILOT_RAG_MODEL) {
config.rag.defaultModel = process.env.COPILOT_RAG_MODEL
}
const ragTemperature = parseFloatEnv(
process.env.COPILOT_RAG_TEMPERATURE,
'COPILOT_RAG_TEMPERATURE'
)
if (ragTemperature !== null) {
config.rag.temperature = ragTemperature
}
const ragMaxTokens = parseIntEnv(process.env.COPILOT_RAG_MAX_TOKENS, 'COPILOT_RAG_MAX_TOKENS')
if (ragMaxTokens !== null) {
config.rag.maxTokens = ragMaxTokens
}
const ragMaxSources = parseIntEnv(process.env.COPILOT_RAG_MAX_SOURCES, 'COPILOT_RAG_MAX_SOURCES')
if (ragMaxSources !== null) {
config.rag.maxSources = ragMaxSources
}
const ragSimilarityThreshold = parseFloatEnv(
process.env.COPILOT_RAG_SIMILARITY_THRESHOLD,
'COPILOT_RAG_SIMILARITY_THRESHOLD'
)
if (ragSimilarityThreshold !== null) {
config.rag.similarityThreshold = ragSimilarityThreshold
}
const streamingEnabled = parseBooleanEnv(process.env.COPILOT_STREAMING_ENABLED)
if (streamingEnabled !== null) {
config.general.streamingEnabled = streamingEnabled
}
const maxConversationHistory = parseIntEnv(
process.env.COPILOT_MAX_CONVERSATION_HISTORY,
'COPILOT_MAX_CONVERSATION_HISTORY'
)
if (maxConversationHistory !== null) {
config.general.maxConversationHistory = maxConversationHistory
}
if (process.env.COPILOT_TITLE_GENERATION_MODEL) {
config.general.titleGenerationModel = process.env.COPILOT_TITLE_GENERATION_MODEL
}
}
export function getCopilotConfig(): CopilotConfig {
const config = structuredClone(DEFAULT_COPILOT_CONFIG)
try {
applyEnvironmentOverrides(config)
} catch (error) {
logger.warn('Error applying environment variable overrides, using defaults', { error })
}
return config
}
export function getCopilotModel(type: CopilotModelType): {
provider: ProviderId
model: string
} {
const config = getCopilotConfig()
switch (type) {
case 'chat':
return {
provider: config.chat.defaultProvider,
model: config.chat.defaultModel,
}
case 'rag':
return {
provider: config.rag.defaultProvider,
model: config.rag.defaultModel,
}
case 'title':
return {
provider: config.chat.defaultProvider,
model: config.general.titleGenerationModel,
}
default:
throw new Error(`Unknown copilot model type: ${type}`)
}
}
function validateNumericValue(
value: number,
constraint: { min: number; max: number },
name: string
): string | null {
if (value < constraint.min || value > constraint.max) {
return `${name} must be between ${constraint.min} and ${constraint.max}`
}
return null
}
export function validateCopilotConfig(config: CopilotConfig): ValidationResult {
const errors: string[] = []
try {
const chatDefaultModel = getProviderDefaultModel(config.chat.defaultProvider)
if (!chatDefaultModel) {
errors.push(`Chat provider '${config.chat.defaultProvider}' not found`)
}
} catch (error) {
errors.push(`Invalid chat provider: ${config.chat.defaultProvider}`)
}
try {
const ragDefaultModel = getProviderDefaultModel(config.rag.defaultProvider)
if (!ragDefaultModel) {
errors.push(`RAG provider '${config.rag.defaultProvider}' not found`)
}
} catch (error) {
errors.push(`Invalid RAG provider: ${config.rag.defaultProvider}`)
}
const validationChecks = [
{
value: config.chat.temperature,
constraint: VALIDATION_CONSTRAINTS.temperature,
name: 'Chat temperature',
},
{
value: config.rag.temperature,
constraint: VALIDATION_CONSTRAINTS.temperature,
name: 'RAG temperature',
},
{
value: config.chat.maxTokens,
constraint: VALIDATION_CONSTRAINTS.maxTokens,
name: 'Chat maxTokens',
},
{
value: config.rag.maxTokens,
constraint: VALIDATION_CONSTRAINTS.maxTokens,
name: 'RAG maxTokens',
},
{
value: config.rag.maxSources,
constraint: VALIDATION_CONSTRAINTS.maxSources,
name: 'RAG maxSources',
},
{
value: config.rag.similarityThreshold,
constraint: VALIDATION_CONSTRAINTS.similarityThreshold,
name: 'RAG similarityThreshold',
},
{
value: config.general.maxConversationHistory,
constraint: VALIDATION_CONSTRAINTS.maxConversationHistory,
name: 'General maxConversationHistory',
},
]
for (const check of validationChecks) {
const error = validateNumericValue(check.value, check.constraint, check.name)
if (error) {
errors.push(error)
}
}
return {
isValid: errors.length === 0,
errors,
}
}

View File

@@ -104,6 +104,9 @@ export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/rev
/** GET/POST/DELETE — manage auto-allowed tools. */
export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools'
/** GET — fetch dynamically available copilot models. */
export const COPILOT_MODELS_API_PATH = '/api/copilot/models'
/** GET — fetch user credentials for masking. */
export const COPILOT_CREDENTIALS_API_PATH = '/api/copilot/credentials'

View File

@@ -1,30 +1,4 @@
export const COPILOT_MODEL_IDS = [
'gpt-5-fast',
'gpt-5',
'gpt-5-medium',
'gpt-5-high',
'gpt-5.1-fast',
'gpt-5.1',
'gpt-5.1-medium',
'gpt-5.1-high',
'gpt-5-codex',
'gpt-5.1-codex',
'gpt-5.2',
'gpt-5.2-codex',
'gpt-5.2-pro',
'gpt-4o',
'gpt-4.1',
'o3',
'claude-4-sonnet',
'claude-4.5-haiku',
'claude-4.5-sonnet',
'claude-4.6-opus',
'claude-4.5-opus',
'claude-4.1-opus',
'gemini-3-pro',
] as const
export type CopilotModelId = (typeof COPILOT_MODEL_IDS)[number]
export type CopilotModelId = string
export const COPILOT_MODES = ['ask', 'build', 'plan'] as const
export type CopilotMode = (typeof COPILOT_MODES)[number]

View File

@@ -1,5 +1,3 @@
import type { CopilotProviderConfig } from '@/lib/copilot/types'
export type SSEEventType =
| 'chat_id'
| 'title_updated'
@@ -104,7 +102,7 @@ export interface OrchestratorRequest {
contexts?: Array<{ type: string; content: string }>
fileAttachments?: FileAttachment[]
commands?: string[]
provider?: CopilotProviderConfig
provider?: string
streamToolCalls?: boolean
version?: string
prefetch?: boolean

View File

@@ -10,6 +10,8 @@ interface DocsSearchParams {
threshold?: number
}
const DEFAULT_DOCS_SIMILARITY_THRESHOLD = 0.3
export const searchDocumentationServerTool: BaseServerTool<DocsSearchParams, any> = {
name: 'search_documentation',
async execute(params: DocsSearchParams): Promise<any> {
@@ -19,9 +21,7 @@ export const searchDocumentationServerTool: BaseServerTool<DocsSearchParams, any
logger.info('Executing docs search', { query, topK })
const { getCopilotConfig } = await import('@/lib/copilot/config')
const config = getCopilotConfig()
const similarityThreshold = threshold ?? config.rag.similarityThreshold
const similarityThreshold = threshold ?? DEFAULT_DOCS_SIMILARITY_THRESHOLD
const { generateSearchEmbedding } = await import('@/lib/knowledge/embeddings')
const queryEmbedding = await generateSearchEmbedding(query)

View File

@@ -1,4 +1,3 @@
import type { ProviderId } from '@/providers/types'
import type { CopilotToolCall, ToolState } from '@/stores/panel'
export type NotificationStatus =
@@ -11,32 +10,8 @@ export type NotificationStatus =
export type { CopilotToolCall, ToolState }
// Provider configuration for Sim Agent requests.
// This type is only for the `provider` field in requests sent to the Sim Agent.
export type CopilotProviderConfig =
| {
provider: 'azure-openai'
model: string
apiKey?: string
apiVersion?: string
endpoint?: string
}
| {
provider: 'azure-anthropic'
model: string
apiKey?: string
apiVersion?: string
endpoint?: string
}
| {
provider: 'vertex'
model: string
apiKey?: string
vertexProject?: string
vertexLocation?: string
}
| {
provider: Exclude<ProviderId, 'azure-openai' | 'azure-anthropic' | 'vertex'>
model?: string
apiKey?: string
}
export interface AvailableModel {
id: string
friendlyName: string
provider: string
}

View File

@@ -29,8 +29,6 @@ export const env = createEnv({
INTERNAL_API_SECRET: z.string().min(32), // Secret for internal API authentication
// Copilot
COPILOT_PROVIDER: z.string().optional(), // Provider for copilot API calls
COPILOT_MODEL: z.string().optional(), // Model for copilot API calls
COPILOT_API_KEY: z.string().min(1).optional(), // Secret for internal sim agent API authentication
SIM_AGENT_API_URL: z.string().url().optional(), // URL for internal sim agent API
AGENT_INDEXER_URL: z.string().url().optional(), // URL for agent training data indexer

View File

@@ -26,6 +26,7 @@ import {
COPILOT_CONFIRM_API_PATH,
COPILOT_CREDENTIALS_API_PATH,
COPILOT_DELETE_CHAT_API_PATH,
COPILOT_MODELS_API_PATH,
MAX_RESUME_ATTEMPTS,
OPTIMISTIC_TITLE_MAX_LENGTH,
QUEUE_PROCESS_DELAY_MS,
@@ -50,6 +51,7 @@ import {
stripTodoTags,
} from '@/lib/copilot/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { AvailableModel } from '@/lib/copilot/types'
import { getQueryClient } from '@/app/_shell/providers/query-provider'
import { subscriptionKeys } from '@/hooks/queries/subscription'
import type {
@@ -297,6 +299,75 @@ type InitiateStreamResult =
| { kind: 'success'; result: Awaited<ReturnType<typeof sendStreamingMessage>> }
| { kind: 'error'; error: unknown }
/**
* Parse a composite model key (e.g. "bedrock/claude-opus-4-6") into provider and raw model ID.
* This mirrors the agent block pattern in providers/models.ts where model IDs are prefixed
* with the provider (e.g. "azure-anthropic/claude-sonnet-4-5", "bedrock/claude-opus-4-6").
*/
function parseModelKey(compositeKey: string): { provider: string; modelId: string } {
const slashIdx = compositeKey.indexOf('/')
if (slashIdx === -1) return { provider: '', modelId: compositeKey }
return { provider: compositeKey.slice(0, slashIdx), modelId: compositeKey.slice(slashIdx + 1) }
}
const MODEL_PROVIDER_PRIORITY = [
'anthropic',
'bedrock',
'azure-anthropic',
'openai',
'azure-openai',
'gemini',
'google',
'azure',
'unknown',
] as const
const KNOWN_COPILOT_PROVIDERS = new Set<string>(MODEL_PROVIDER_PRIORITY)
function isCompositeModelId(modelId: string): boolean {
const slashIdx = modelId.indexOf('/')
if (slashIdx <= 0 || slashIdx === modelId.length - 1) return false
const provider = modelId.slice(0, slashIdx)
return KNOWN_COPILOT_PROVIDERS.has(provider)
}
function toCompositeModelId(modelId: string, provider: string): string {
if (!modelId) return modelId
return isCompositeModelId(modelId) ? modelId : `${provider}/${modelId}`
}
function pickPreferredProviderModel(matches: AvailableModel[]): AvailableModel | undefined {
for (const provider of MODEL_PROVIDER_PRIORITY) {
const found = matches.find((m) => m.provider === provider)
if (found) return found
}
return matches[0]
}
function normalizeSelectedModelKey(selectedModel: string, models: AvailableModel[]): string {
if (!selectedModel || models.length === 0) return selectedModel
if (models.some((m) => m.id === selectedModel)) return selectedModel
const { provider, modelId } = parseModelKey(selectedModel)
const targetModelId = modelId || selectedModel
const matches = models.filter((m) => m.id.endsWith(`/${targetModelId}`))
if (matches.length === 0) return selectedModel
if (provider) {
const sameProvider = matches.find((m) => m.provider === provider)
if (sameProvider) return sameProvider.id
}
return (pickPreferredProviderModel(matches) ?? matches[0]).id
}
/** Look up the provider for the currently selected model from the composite key. */
function getSelectedProvider(get: CopilotGet): string | undefined {
const { provider } = parseModelKey(get().selectedModel)
return provider || undefined
}
function prepareSendContext(
get: CopilotGet,
set: CopilotSet,
@@ -480,13 +551,17 @@ async function initiateStream(
}) as string[] | undefined
const filteredContexts = contexts?.filter((c) => c.kind !== 'slash_command')
const { provider: selectedProvider, modelId: selectedModelId } = parseModelKey(
get().selectedModel
)
const result = await sendStreamingMessage({
message: messageToSend,
userMessageId: prepared.userMessage.id,
chatId: prepared.currentChat?.id,
workflowId: prepared.workflowId || undefined,
mode: apiMode,
model: get().selectedModel,
model: selectedModelId,
provider: selectedProvider || undefined,
prefetch: get().agentPrefetch,
createNewChat: !prepared.currentChat,
stream: prepared.stream,
@@ -554,7 +629,7 @@ async function finalizeStream(
errorType = 'usage_limit'
} else if (result.status === 403) {
errorContent =
'_Provider config not allowed for non-enterprise users. Please remove the provider config and try again_'
'_Access denied by the Copilot backend. Please verify your API key and server configuration._'
errorType = 'forbidden'
} else if (result.status === 426) {
errorContent =
@@ -857,13 +932,15 @@ async function resumeFromLiveStream(
assistantMessageId: resume.nextStream.assistantMessageId,
chatId: resume.nextStream.chatId,
})
const { provider: resumeProvider, modelId: resumeModelId } = parseModelKey(get().selectedModel)
const result = await sendStreamingMessage({
message: resume.nextStream.userMessageContent || '',
userMessageId: resume.nextStream.userMessageId,
workflowId: resume.nextStream.workflowId,
chatId: resume.nextStream.chatId || get().currentChat?.id || undefined,
mode: get().mode === 'ask' ? 'ask' : get().mode === 'plan' ? 'plan' : 'agent',
model: get().selectedModel,
model: resumeModelId,
provider: resumeProvider || undefined,
prefetch: get().agentPrefetch,
stream: true,
resumeFromEventId: resume.resumeFromEventId,
@@ -910,9 +987,10 @@ const cachedAutoAllowedTools = readAutoAllowedToolsFromStorage()
// Initial state (subset required for UI/streaming)
const initialState = {
mode: 'build' as const,
selectedModel: 'claude-4.6-opus' as CopilotStore['selectedModel'],
selectedModel: 'anthropic/claude-opus-4-6' as CopilotStore['selectedModel'],
agentPrefetch: false,
enabledModels: null as string[] | null, // Null means not loaded yet, empty array means all disabled
availableModels: [] as AvailableModel[],
isLoadingModels: false,
isCollapsed: false,
currentChat: null as CopilotChat | null,
chats: [] as CopilotChat[],
@@ -978,7 +1056,8 @@ export const useCopilotStore = create<CopilotStore>()(
mode: get().mode,
selectedModel: get().selectedModel,
agentPrefetch: get().agentPrefetch,
enabledModels: get().enabledModels,
availableModels: get().availableModels,
isLoadingModels: get().isLoadingModels,
autoAllowedTools: get().autoAllowedTools,
autoAllowedToolsLoaded: get().autoAllowedToolsLoaded,
})
@@ -1425,12 +1504,14 @@ export const useCopilotStore = create<CopilotStore>()(
try {
const apiMode: 'ask' | 'agent' | 'plan' =
mode === 'ask' ? 'ask' : mode === 'plan' ? 'plan' : 'agent'
const { provider: fbProvider, modelId: fbModelId } = parseModelKey(selectedModel)
const result = await sendStreamingMessage({
message: 'Please continue your response.',
chatId: currentChat?.id,
workflowId,
mode: apiMode,
model: selectedModel,
model: fbModelId,
provider: fbProvider || undefined,
prefetch: get().agentPrefetch,
createNewChat: !currentChat,
stream: true,
@@ -2190,7 +2271,76 @@ export const useCopilotStore = create<CopilotStore>()(
set({ selectedModel: model })
},
setAgentPrefetch: (prefetch) => set({ agentPrefetch: prefetch }),
setEnabledModels: (models) => set({ enabledModels: models }),
loadAvailableModels: async () => {
set({ isLoadingModels: true })
try {
const response = await fetch(COPILOT_MODELS_API_PATH, { method: 'GET' })
if (!response.ok) {
throw new Error(`Failed to fetch available models: ${response.status}`)
}
const data = await response.json()
const models: unknown[] = Array.isArray(data?.models) ? data.models : []
const seenModelIds = new Set<string>()
const normalizedModels: AvailableModel[] = models
.filter((model: unknown): model is AvailableModel => {
return (
typeof model === 'object' &&
model !== null &&
'id' in model &&
typeof (model as { id: unknown }).id === 'string'
)
})
.map((model: AvailableModel) => {
const idProvider = isCompositeModelId(model.id) ? parseModelKey(model.id).provider : ''
const provider = model.provider || idProvider || 'unknown'
// Use stable composite provider/modelId keys so same model IDs from different
// providers remain uniquely addressable.
const compositeId = toCompositeModelId(model.id, provider)
return {
id: compositeId,
friendlyName: model.friendlyName || model.id,
provider,
}
})
.filter((model) => {
if (seenModelIds.has(model.id)) return false
seenModelIds.add(model.id)
return true
})
const { selectedModel } = get()
const normalizedSelectedModel = normalizeSelectedModelKey(selectedModel, normalizedModels)
const selectedModelExists = normalizedModels.some(
(model) => model.id === normalizedSelectedModel
)
// Pick the best default: prefer claude-opus-4-6 with provider priority:
// direct anthropic > bedrock > azure-anthropic > any other.
let nextSelectedModel = normalizedSelectedModel
if (!selectedModelExists && normalizedModels.length > 0) {
let opus46: AvailableModel | undefined
for (const prov of MODEL_PROVIDER_PRIORITY) {
opus46 = normalizedModels.find((m) => m.id === `${prov}/claude-opus-4-6`)
if (opus46) break
}
if (!opus46) opus46 = normalizedModels.find((m) => m.id.endsWith('/claude-opus-4-6'))
nextSelectedModel = opus46 ? opus46.id : normalizedModels[0].id
}
set({
availableModels: normalizedModels,
selectedModel: nextSelectedModel as CopilotStore['selectedModel'],
isLoadingModels: false,
})
} catch (error) {
logger.warn('[Copilot] Failed to load available models', {
error: error instanceof Error ? error.message : String(error),
})
set({ isLoadingModels: false })
}
},
loadAutoAllowedTools: async () => {
try {

View File

@@ -1,4 +1,5 @@
import type { CopilotMode, CopilotModelId } from '@/lib/copilot/models'
import type { AvailableModel } from '@/lib/copilot/types'
export type { CopilotMode, CopilotModelId } from '@/lib/copilot/models'
@@ -115,7 +116,8 @@ export interface CopilotState {
mode: CopilotMode
selectedModel: CopilotModelId
agentPrefetch: boolean
enabledModels: string[] | null // Null means not loaded yet, array of model IDs when loaded
availableModels: AvailableModel[]
isLoadingModels: boolean
isCollapsed: boolean
currentChat: CopilotChat | null
@@ -183,7 +185,7 @@ export interface CopilotActions {
setMode: (mode: CopilotMode) => void
setSelectedModel: (model: CopilotStore['selectedModel']) => Promise<void>
setAgentPrefetch: (prefetch: boolean) => void
setEnabledModels: (models: string[] | null) => void
loadAvailableModels: () => Promise<void>
setWorkflowId: (workflowId: string | null) => Promise<void>
validateCurrentChat: () => boolean