Compare commits

...

4 Commits

Author SHA1 Message Date
Siddharth Ganesan
aca7f43e35 Codex 2026-01-14 20:41:55 -08:00
Siddharth Ganesan
2423255944 Clean up model options 2026-01-14 19:12:52 -08:00
Siddharth Ganesan
3aee702340 Fix lint 2026-01-14 15:15:33 -08:00
Siddharth Ganesan
7c8ca1c14a Ux 2026-01-14 15:15:17 -08:00
10 changed files with 88 additions and 107 deletions

View File

@@ -52,6 +52,9 @@ const ChatMessageSchema = z.object({
'gpt-5.1-high',
'gpt-5-codex',
'gpt-5.1-codex',
'gpt-5.2',
'gpt-5.2-codex',
'gpt-5.2-pro',
'gpt-4o',
'gpt-4.1',
'o3',

View File

@@ -15,11 +15,14 @@ const DEFAULT_ENABLED_MODELS: Record<string, boolean> = {
'gpt-5-medium': false,
'gpt-5-high': false,
'gpt-5.1-fast': false,
'gpt-5.1': true,
'gpt-5.1-medium': true,
'gpt-5.1': false,
'gpt-5.1-medium': false,
'gpt-5.1-high': false,
'gpt-5-codex': false,
'gpt-5.1-codex': true,
'gpt-5.1-codex': false,
'gpt-5.2': false,
'gpt-5.2-codex': true,
'gpt-5.2-pro': true,
o3: true,
'claude-4-sonnet': false,
'claude-4.5-haiku': true,

View File

@@ -2,29 +2,9 @@ import { memo, useEffect, useRef, useState } from 'react'
import CopilotMarkdownRenderer from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/markdown-renderer'
/**
* Minimum delay between characters (fast catch-up mode)
* Character animation delay in milliseconds
*/
const MIN_DELAY = 1
/**
* Maximum delay between characters (when waiting for content)
*/
const MAX_DELAY = 12
/**
* Default delay when streaming normally
*/
const DEFAULT_DELAY = 4
/**
* How far behind (in characters) before we speed up
*/
const CATCH_UP_THRESHOLD = 20
/**
* How close to content before we slow down
*/
const SLOW_DOWN_THRESHOLD = 5
const CHARACTER_DELAY = 3
/**
* StreamingIndicator shows animated dots during message streaming
@@ -54,50 +34,21 @@ interface SmoothStreamingTextProps {
isStreaming: boolean
}
/**
* Calculates adaptive delay based on how far behind animation is from actual content
*
* @param displayedLength - Current displayed content length
* @param totalLength - Total available content length
* @returns Delay in milliseconds
*/
function calculateAdaptiveDelay(displayedLength: number, totalLength: number): number {
const charsRemaining = totalLength - displayedLength
if (charsRemaining > CATCH_UP_THRESHOLD) {
// Far behind - speed up to catch up
// Scale from MIN_DELAY to DEFAULT_DELAY based on how far behind
const catchUpFactor = Math.min(1, (charsRemaining - CATCH_UP_THRESHOLD) / 50)
return MIN_DELAY + (DEFAULT_DELAY - MIN_DELAY) * (1 - catchUpFactor)
}
if (charsRemaining <= SLOW_DOWN_THRESHOLD) {
// Close to content edge - slow down to feel natural
// The closer we are, the slower we go (up to MAX_DELAY)
const slowFactor = 1 - charsRemaining / SLOW_DOWN_THRESHOLD
return DEFAULT_DELAY + (MAX_DELAY - DEFAULT_DELAY) * slowFactor
}
// Normal streaming speed
return DEFAULT_DELAY
}
/**
* SmoothStreamingText component displays text with character-by-character animation
* Creates a smooth streaming effect for AI responses with adaptive speed
*
* Uses adaptive pacing: speeds up when catching up, slows down near content edge
* Creates a smooth streaming effect for AI responses
*
* @param props - Component props
* @returns Streaming text with smooth animation
*/
export const SmoothStreamingText = memo(
({ content, isStreaming }: SmoothStreamingTextProps) => {
const [displayedContent, setDisplayedContent] = useState('')
// Initialize with full content when not streaming to avoid flash on page load
const [displayedContent, setDisplayedContent] = useState(() => (isStreaming ? '' : content))
const contentRef = useRef(content)
const rafRef = useRef<number | null>(null)
const indexRef = useRef(0)
const lastFrameTimeRef = useRef<number>(0)
const timeoutRef = useRef<NodeJS.Timeout | null>(null)
// Initialize index based on streaming state
const indexRef = useRef(isStreaming ? 0 : content.length)
const isAnimatingRef = useRef(false)
useEffect(() => {
@@ -110,42 +61,33 @@ export const SmoothStreamingText = memo(
}
if (isStreaming) {
if (indexRef.current < content.length && !isAnimatingRef.current) {
isAnimatingRef.current = true
lastFrameTimeRef.current = performance.now()
const animateText = (timestamp: number) => {
if (indexRef.current < content.length) {
const animateText = () => {
const currentContent = contentRef.current
const currentIndex = indexRef.current
const elapsed = timestamp - lastFrameTimeRef.current
// Calculate adaptive delay based on how far behind we are
const delay = calculateAdaptiveDelay(currentIndex, currentContent.length)
if (elapsed >= delay) {
if (currentIndex < currentContent.length) {
const newDisplayed = currentContent.slice(0, currentIndex + 1)
setDisplayedContent(newDisplayed)
indexRef.current = currentIndex + 1
lastFrameTimeRef.current = timestamp
}
}
if (indexRef.current < currentContent.length) {
rafRef.current = requestAnimationFrame(animateText)
if (currentIndex < currentContent.length) {
const newDisplayed = currentContent.slice(0, currentIndex + 1)
setDisplayedContent(newDisplayed)
indexRef.current = currentIndex + 1
timeoutRef.current = setTimeout(animateText, CHARACTER_DELAY)
} else {
isAnimatingRef.current = false
}
}
rafRef.current = requestAnimationFrame(animateText)
} else if (indexRef.current < content.length && isAnimatingRef.current) {
// Animation already running, it will pick up new content automatically
if (!isAnimatingRef.current) {
if (timeoutRef.current) {
clearTimeout(timeoutRef.current)
}
isAnimatingRef.current = true
animateText()
}
}
} else {
// Streaming ended - show full content immediately
if (rafRef.current) {
cancelAnimationFrame(rafRef.current)
if (timeoutRef.current) {
clearTimeout(timeoutRef.current)
}
setDisplayedContent(content)
indexRef.current = content.length
@@ -153,8 +95,8 @@ export const SmoothStreamingText = memo(
}
return () => {
if (rafRef.current) {
cancelAnimationFrame(rafRef.current)
if (timeoutRef.current) {
clearTimeout(timeoutRef.current)
}
isAnimatingRef.current = false
}

View File

@@ -46,12 +46,14 @@ interface SmoothThinkingTextProps {
*/
const SmoothThinkingText = memo(
({ content, isStreaming }: SmoothThinkingTextProps) => {
const [displayedContent, setDisplayedContent] = useState('')
// Initialize with full content when not streaming to avoid flash on page load
const [displayedContent, setDisplayedContent] = useState(() => (isStreaming ? '' : content))
const [showGradient, setShowGradient] = useState(false)
const contentRef = useRef(content)
const textRef = useRef<HTMLDivElement>(null)
const rafRef = useRef<number | null>(null)
const indexRef = useRef(0)
// Initialize index based on streaming state
const indexRef = useRef(isStreaming ? 0 : content.length)
const lastFrameTimeRef = useRef<number>(0)
const isAnimatingRef = useRef(false)

View File

@@ -1952,7 +1952,12 @@ export function ToolCall({ toolCall: toolCallProp, toolCallId, onStateChange }:
}, [params])
// Skip rendering some internal tools
if (toolCall.name === 'checkoff_todo' || toolCall.name === 'mark_todo_in_progress') return null
if (
toolCall.name === 'checkoff_todo' ||
toolCall.name === 'mark_todo_in_progress' ||
toolCall.name === 'tool_search_tool_regex'
)
return null
// Special rendering for subagent tools - show as thinking text with tool calls at top level
const SUBAGENT_TOOLS = [

View File

@@ -32,13 +32,6 @@ function getModelIconComponent(modelValue: string) {
return <IconComponent className='h-3.5 w-3.5' />
}
/**
* Checks if a model should display the MAX badge
*/
function isMaxModel(modelValue: string): boolean {
return modelValue === 'claude-4.5-sonnet' || modelValue === 'claude-4.5-opus'
}
/**
* Model selector dropdown for choosing AI model.
* Displays model icon and label.
@@ -139,11 +132,6 @@ export function ModelSelector({ selectedModel, isNearTop, onModelSelect }: Model
>
{getModelIconComponent(option.value)}
<span>{option.label}</span>
{isMaxModel(option.value) && (
<Badge size='sm' className='ml-auto'>
MAX
</Badge>
)}
</PopoverItem>
))}
</PopoverScrollArea>

View File

@@ -238,8 +238,8 @@ export const MODEL_OPTIONS = [
{ value: 'claude-4.5-opus', label: 'Claude 4.5 Opus' },
{ value: 'claude-4.5-sonnet', label: 'Claude 4.5 Sonnet' },
{ value: 'claude-4.5-haiku', label: 'Claude 4.5 Haiku' },
{ value: 'gpt-5.1-codex', label: 'GPT 5.1 Codex' },
{ value: 'gpt-5.1-medium', label: 'GPT 5.1 Medium' },
{ value: 'gpt-5.2-codex', label: 'GPT 5.2 Codex' },
{ value: 'gpt-5.2-pro', label: 'GPT 5.2 Pro' },
{ value: 'gemini-3-pro', label: 'Gemini 3 Pro' },
] as const

View File

@@ -77,6 +77,9 @@ export interface SendMessageRequest {
| 'gpt-5.1-high'
| 'gpt-5-codex'
| 'gpt-5.1-codex'
| 'gpt-5.2'
| 'gpt-5.2-codex'
| 'gpt-5.2-pro'
| 'gpt-4o'
| 'gpt-4.1'
| 'o3'

View File

@@ -422,7 +422,8 @@ function abortAllInProgressTools(set: any, get: () => CopilotStore) {
* Loads messages from DB for UI rendering.
* Messages are stored exactly as they render, so we just need to:
* 1. Register client tool instances for any tool calls
* 2. Return the messages as-is
* 2. Clear any streaming flags (messages loaded from DB are never actively streaming)
* 3. Return the messages
*/
function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] {
try {
@@ -438,23 +439,54 @@ function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] {
}
}
// Register client tool instances for all tool calls so they can be looked up
// Register client tool instances and clear streaming flags for all tool calls
for (const message of messages) {
if (message.contentBlocks) {
for (const block of message.contentBlocks as any[]) {
if (block?.type === 'tool_call' && block.toolCall) {
registerToolCallInstances(block.toolCall)
clearStreamingFlags(block.toolCall)
}
}
}
// Also clear from toolCalls array (legacy format)
if (message.toolCalls) {
for (const toolCall of message.toolCalls) {
clearStreamingFlags(toolCall)
}
}
}
// Return messages as-is - they're already in the correct format for rendering
return messages
} catch {
return messages
}
}
/**
* Recursively clears streaming flags from a tool call and its nested subagent tool calls.
* This ensures messages loaded from DB don't appear to be streaming.
*/
function clearStreamingFlags(toolCall: any): void {
if (!toolCall) return
// Always set subAgentStreaming to false - messages loaded from DB are never streaming
toolCall.subAgentStreaming = false
// Clear nested subagent tool calls
if (Array.isArray(toolCall.subAgentBlocks)) {
for (const block of toolCall.subAgentBlocks) {
if (block?.type === 'subagent_tool_call' && block.toolCall) {
clearStreamingFlags(block.toolCall)
}
}
}
if (Array.isArray(toolCall.subAgentToolCalls)) {
for (const subTc of toolCall.subAgentToolCalls) {
clearStreamingFlags(subTc)
}
}
}
/**
* Recursively registers client tool instances for a tool call and its nested subagent tool calls.
*/

View File

@@ -106,6 +106,9 @@ export interface CopilotState {
| 'gpt-5.1-high'
| 'gpt-5-codex'
| 'gpt-5.1-codex'
| 'gpt-5.2'
| 'gpt-5.2-codex'
| 'gpt-5.2-pro'
| 'gpt-4o'
| 'gpt-4.1'
| 'o3'