mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-08 14:43:54 -05:00
feat(voice): speech to speech mode for deployed chat (#467)
* finished barebones, optimized speech to speech chat deploy * better visualization, interruption still not good * fixed some turn detection, still not ideal. have to press mute + unmute to process successive queries * improvements * removed MediaSource in favor of blob, simplified echo cancellation and overall logic * simplified * cleanups * ack PR comments
This commit is contained in:
118
apps/sim/app/api/proxy/tts/stream/route.ts
Normal file
118
apps/sim/app/api/proxy/tts/stream/route.ts
Normal file
@@ -0,0 +1,118 @@
|
||||
import type { NextRequest } from 'next/server'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
|
||||
const logger = createLogger('ProxyTTSStreamAPI')
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const body = await request.json()
|
||||
const { text, voiceId, modelId = 'eleven_turbo_v2_5' } = body
|
||||
|
||||
if (!text || !voiceId) {
|
||||
return new Response('Missing required parameters', { status: 400 })
|
||||
}
|
||||
|
||||
const apiKey = env.ELEVENLABS_API_KEY
|
||||
if (!apiKey) {
|
||||
logger.error('ELEVENLABS_API_KEY not configured on server')
|
||||
return new Response('ElevenLabs service not configured', { status: 503 })
|
||||
}
|
||||
|
||||
const endpoint = `https://api.elevenlabs.io/v1/text-to-speech/${voiceId}/stream`
|
||||
|
||||
const response = await fetch(endpoint, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Accept: 'audio/mpeg',
|
||||
'Content-Type': 'application/json',
|
||||
'xi-api-key': apiKey,
|
||||
},
|
||||
body: JSON.stringify({
|
||||
text,
|
||||
model_id: modelId,
|
||||
// Maximum performance settings
|
||||
optimize_streaming_latency: 4,
|
||||
output_format: 'mp3_22050_32', // Fastest format
|
||||
voice_settings: {
|
||||
stability: 0.5,
|
||||
similarity_boost: 0.8,
|
||||
style: 0.0,
|
||||
use_speaker_boost: false,
|
||||
},
|
||||
enable_ssml_parsing: false,
|
||||
apply_text_normalization: 'off',
|
||||
// Use auto mode for fastest possible streaming
|
||||
// Note: This may sacrifice some quality for speed
|
||||
use_pvc_as_ivc: false, // Use fastest voice processing
|
||||
}),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
logger.error(`Failed to generate Stream TTS: ${response.status} ${response.statusText}`)
|
||||
return new Response(`Failed to generate TTS: ${response.status} ${response.statusText}`, {
|
||||
status: response.status,
|
||||
})
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
logger.error('No response body received from ElevenLabs')
|
||||
return new Response('No audio stream received', { status: 422 })
|
||||
}
|
||||
|
||||
// Create optimized streaming response
|
||||
const { readable, writable } = new TransformStream({
|
||||
transform(chunk, controller) {
|
||||
// Pass through chunks immediately without buffering
|
||||
controller.enqueue(chunk)
|
||||
},
|
||||
flush(controller) {
|
||||
// Ensure all data is flushed immediately
|
||||
controller.terminate()
|
||||
},
|
||||
})
|
||||
|
||||
const writer = writable.getWriter()
|
||||
const reader = response.body.getReader()
|
||||
|
||||
;(async () => {
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) {
|
||||
await writer.close()
|
||||
break
|
||||
}
|
||||
// Write immediately without waiting
|
||||
writer.write(value).catch(logger.error)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error during Stream streaming:', error)
|
||||
await writer.abort(error)
|
||||
}
|
||||
})()
|
||||
|
||||
return new Response(readable, {
|
||||
headers: {
|
||||
'Content-Type': 'audio/mpeg',
|
||||
'Transfer-Encoding': 'chunked',
|
||||
'Cache-Control': 'no-cache, no-store, must-revalidate',
|
||||
Pragma: 'no-cache',
|
||||
Expires: '0',
|
||||
'X-Content-Type-Options': 'nosniff',
|
||||
'Access-Control-Allow-Origin': '*',
|
||||
Connection: 'keep-alive',
|
||||
// Stream headers for better streaming
|
||||
'X-Accel-Buffering': 'no', // Disable nginx buffering
|
||||
'X-Stream-Type': 'real-time',
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Error in Stream TTS:', error)
|
||||
|
||||
return new Response(
|
||||
`Internal Server Error: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
import { type RefObject, useCallback, useEffect, useRef, useState } from 'react'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { noop } from '@/lib/utils'
|
||||
import { getFormattedGitHubStars } from '@/app/(landing)/actions/github'
|
||||
import EmailAuth from './components/auth/email/email-auth'
|
||||
import PasswordAuth from './components/auth/password/password-auth'
|
||||
@@ -11,8 +13,12 @@ import { ChatInput } from './components/input/input'
|
||||
import { ChatLoadingState } from './components/loading-state/loading-state'
|
||||
import type { ChatMessage } from './components/message/message'
|
||||
import { ChatMessageContainer } from './components/message-container/message-container'
|
||||
import { VoiceInterface } from './components/voice-interface/voice-interface'
|
||||
import { useAudioStreaming } from './hooks/use-audio-streaming'
|
||||
import { useChatStreaming } from './hooks/use-chat-streaming'
|
||||
|
||||
const logger = createLogger('ChatClient')
|
||||
|
||||
interface ChatConfig {
|
||||
id: string
|
||||
title: string
|
||||
@@ -26,6 +32,39 @@ interface ChatConfig {
|
||||
authType?: 'public' | 'password' | 'email'
|
||||
}
|
||||
|
||||
interface AudioStreamingOptions {
|
||||
voiceId: string
|
||||
onError: (error: Error) => void
|
||||
}
|
||||
|
||||
const DEFAULT_VOICE_SETTINGS = {
|
||||
voiceId: 'EXAVITQu4vr4xnSDxMaL', // Default ElevenLabs voice (Bella)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an audio stream handler for text-to-speech conversion
|
||||
* @param streamTextToAudio - Function to stream text to audio
|
||||
* @param voiceId - The voice ID to use for TTS
|
||||
* @returns Audio stream handler function or undefined
|
||||
*/
|
||||
function createAudioStreamHandler(
|
||||
streamTextToAudio: (text: string, options: AudioStreamingOptions) => Promise<void>,
|
||||
voiceId: string
|
||||
) {
|
||||
return async (text: string) => {
|
||||
try {
|
||||
await streamTextToAudio(text, {
|
||||
voiceId,
|
||||
onError: (error: Error) => {
|
||||
logger.error('Audio streaming error:', error)
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('TTS error:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function throttle<T extends (...args: any[]) => any>(func: T, delay: number): T {
|
||||
let timeoutId: NodeJS.Timeout | null = null
|
||||
let lastExecTime = 0
|
||||
@@ -60,19 +99,17 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
const [starCount, setStarCount] = useState('3.4k')
|
||||
const [conversationId, setConversationId] = useState('')
|
||||
|
||||
// Simple state for showing scroll button
|
||||
const [showScrollButton, setShowScrollButton] = useState(false)
|
||||
|
||||
// Track if user has manually scrolled during response
|
||||
const [userHasScrolled, setUserHasScrolled] = useState(false)
|
||||
const isUserScrollingRef = useRef(false)
|
||||
|
||||
// Authentication state
|
||||
const [authRequired, setAuthRequired] = useState<'password' | 'email' | null>(null)
|
||||
|
||||
// Use the custom streaming hook
|
||||
const [isVoiceFirstMode, setIsVoiceFirstMode] = useState(false)
|
||||
const { isStreamingResponse, abortControllerRef, stopStreaming, handleStreamedResponse } =
|
||||
useChatStreaming()
|
||||
const audioContextRef = useRef<AudioContext | null>(null)
|
||||
const { isPlayingAudio, streamTextToAudio, stopAudio } = useAudioStreaming(audioContextRef)
|
||||
|
||||
const scrollToBottom = useCallback(() => {
|
||||
if (messagesEndRef.current) {
|
||||
@@ -193,7 +230,7 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
])
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching chat config:', error)
|
||||
logger.error('Error fetching chat config:', error)
|
||||
setError('This chat is currently unavailable. Please try again later.')
|
||||
}
|
||||
}
|
||||
@@ -208,7 +245,7 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
setStarCount(formattedStars)
|
||||
})
|
||||
.catch((err) => {
|
||||
console.error('Failed to fetch GitHub stars:', err)
|
||||
logger.error('Failed to fetch GitHub stars:', err)
|
||||
})
|
||||
}, [subdomain])
|
||||
|
||||
@@ -224,7 +261,7 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
}
|
||||
|
||||
// Handle sending a message
|
||||
const handleSendMessage = async (messageParam?: string) => {
|
||||
const handleSendMessage = async (messageParam?: string, isVoiceInput = false) => {
|
||||
const messageToSend = messageParam ?? inputValue
|
||||
if (!messageToSend.trim() || isLoading) return
|
||||
|
||||
@@ -278,18 +315,32 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
const contentType = response.headers.get('Content-Type') || ''
|
||||
|
||||
if (contentType.includes('text/plain')) {
|
||||
// Handle streaming response - pass the current userHasScrolled value
|
||||
const shouldPlayAudio = isVoiceInput || isVoiceFirstMode
|
||||
|
||||
const audioStreamHandler = shouldPlayAudio
|
||||
? createAudioStreamHandler(streamTextToAudio, DEFAULT_VOICE_SETTINGS.voiceId)
|
||||
: undefined
|
||||
|
||||
// Handle streaming response with audio support
|
||||
await handleStreamedResponse(
|
||||
response,
|
||||
setMessages,
|
||||
setIsLoading,
|
||||
scrollToBottom,
|
||||
userHasScrolled
|
||||
userHasScrolled,
|
||||
{
|
||||
voiceSettings: {
|
||||
isVoiceEnabled: true,
|
||||
voiceId: DEFAULT_VOICE_SETTINGS.voiceId,
|
||||
autoPlayResponses: isVoiceInput || isVoiceFirstMode,
|
||||
},
|
||||
audioStreamHandler,
|
||||
}
|
||||
)
|
||||
} else {
|
||||
// Fallback to JSON response handling
|
||||
const responseData = await response.json()
|
||||
console.log('Message response:', responseData)
|
||||
logger.info('Message response:', responseData)
|
||||
|
||||
// Handle different response formats from API
|
||||
if (
|
||||
@@ -321,6 +372,23 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
|
||||
// Add all messages at once
|
||||
setMessages((prev) => [...prev, ...assistantMessages])
|
||||
|
||||
// Play audio for the full response if voice mode is enabled
|
||||
if (isVoiceInput || isVoiceFirstMode) {
|
||||
const fullContent = assistantMessages.map((m: ChatMessage) => m.content).join(' ')
|
||||
if (fullContent.trim()) {
|
||||
try {
|
||||
await streamTextToAudio(fullContent, {
|
||||
voiceId: DEFAULT_VOICE_SETTINGS.voiceId,
|
||||
onError: (error) => {
|
||||
logger.error('Audio playback error:', error)
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('TTS error:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Handle single output as before
|
||||
let messageContent = responseData.output
|
||||
@@ -349,10 +417,29 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
}
|
||||
|
||||
setMessages((prev) => [...prev, assistantMessage])
|
||||
|
||||
// Play audio for the response if voice mode is enabled
|
||||
if ((isVoiceInput || isVoiceFirstMode) && assistantMessage.content) {
|
||||
const contentString =
|
||||
typeof assistantMessage.content === 'string'
|
||||
? assistantMessage.content
|
||||
: JSON.stringify(assistantMessage.content)
|
||||
|
||||
try {
|
||||
await streamTextToAudio(contentString, {
|
||||
voiceId: DEFAULT_VOICE_SETTINGS.voiceId,
|
||||
onError: (error) => {
|
||||
logger.error('Audio playback error:', error)
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('TTS error:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error sending message:', error)
|
||||
logger.error('Error sending message:', error)
|
||||
|
||||
const errorMessage: ChatMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
@@ -367,6 +454,45 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
}
|
||||
}
|
||||
|
||||
// Stop audio when component unmounts or when streaming is stopped
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
stopAudio()
|
||||
if (audioContextRef.current && audioContextRef.current.state !== 'closed') {
|
||||
audioContextRef.current.close()
|
||||
}
|
||||
}
|
||||
}, [stopAudio])
|
||||
|
||||
// Voice interruption - stop audio when user starts speaking
|
||||
const handleVoiceInterruption = useCallback(() => {
|
||||
stopAudio()
|
||||
|
||||
// Stop any ongoing streaming response
|
||||
if (isStreamingResponse) {
|
||||
stopStreaming(setMessages)
|
||||
}
|
||||
}, [isStreamingResponse, stopStreaming, setMessages, stopAudio])
|
||||
|
||||
// Handle voice mode activation
|
||||
const handleVoiceStart = useCallback(() => {
|
||||
setIsVoiceFirstMode(true)
|
||||
}, [])
|
||||
|
||||
// Handle exiting voice mode
|
||||
const handleExitVoiceMode = useCallback(() => {
|
||||
setIsVoiceFirstMode(false)
|
||||
stopAudio() // Stop any playing audio when exiting
|
||||
}, [stopAudio])
|
||||
|
||||
// Handle voice transcript from voice-first interface
|
||||
const handleVoiceTranscript = useCallback(
|
||||
(transcript: string) => {
|
||||
handleSendMessage(transcript, true)
|
||||
},
|
||||
[handleSendMessage]
|
||||
)
|
||||
|
||||
// If error, show error message using the extracted component
|
||||
if (error) {
|
||||
return <ChatErrorState error={error} starCount={starCount} />
|
||||
@@ -405,6 +531,27 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
return <ChatLoadingState />
|
||||
}
|
||||
|
||||
// Voice-first mode interface
|
||||
if (isVoiceFirstMode) {
|
||||
return (
|
||||
<VoiceInterface
|
||||
onCallEnd={handleExitVoiceMode}
|
||||
onVoiceTranscript={handleVoiceTranscript}
|
||||
onVoiceStart={noop}
|
||||
onVoiceEnd={noop}
|
||||
onInterrupt={handleVoiceInterruption}
|
||||
isStreaming={isStreamingResponse}
|
||||
isPlayingAudio={isPlayingAudio}
|
||||
audioContextRef={audioContextRef}
|
||||
messages={messages.map((msg) => ({
|
||||
content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
|
||||
type: msg.type,
|
||||
}))}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
// Standard text-based chat interface
|
||||
return (
|
||||
<div className='fixed inset-0 z-[100] flex flex-col bg-background'>
|
||||
{/* Header component */}
|
||||
@@ -426,11 +573,12 @@ export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
<div className='relative p-4 pb-6'>
|
||||
<div className='relative mx-auto max-w-3xl'>
|
||||
<ChatInput
|
||||
onSubmit={(value) => {
|
||||
void handleSendMessage(value)
|
||||
onSubmit={(value, isVoiceInput) => {
|
||||
void handleSendMessage(value, isVoiceInput)
|
||||
}}
|
||||
isStreaming={isStreamingResponse}
|
||||
onStopStreaming={() => stopStreaming(setMessages)}
|
||||
onVoiceStart={handleVoiceStart}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,967 +0,0 @@
|
||||
'use client'
|
||||
|
||||
import { type KeyboardEvent, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { ArrowUp, Loader2, Lock, Mail } from 'lucide-react'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Input } from '@/components/ui/input'
|
||||
import { OTPInputForm } from '@/components/ui/input-otp-form'
|
||||
import { getFormattedGitHubStars } from '@/app/(landing)/actions/github'
|
||||
import HeaderLinks from './components/header-links/header-links'
|
||||
import MarkdownRenderer from './components/markdown-renderer/markdown-renderer'
|
||||
|
||||
// Define message type
|
||||
interface ChatMessage {
|
||||
id: string
|
||||
content: string
|
||||
type: 'user' | 'assistant'
|
||||
timestamp: Date
|
||||
}
|
||||
|
||||
// Define chat config type
|
||||
interface ChatConfig {
|
||||
id: string
|
||||
title: string
|
||||
description: string
|
||||
customizations: {
|
||||
primaryColor?: string
|
||||
logoUrl?: string
|
||||
welcomeMessage?: string
|
||||
headerText?: string
|
||||
}
|
||||
authType?: 'public' | 'password' | 'email'
|
||||
}
|
||||
|
||||
// ChatGPT-style message component
|
||||
function ClientChatMessage({ message }: { message: ChatMessage }) {
|
||||
// Check if content is a JSON object
|
||||
const isJsonObject = useMemo(() => {
|
||||
return typeof message.content === 'object' && message.content !== null
|
||||
}, [message.content])
|
||||
|
||||
// For user messages (on the right)
|
||||
if (message.type === 'user') {
|
||||
return (
|
||||
<div className='px-4 py-5'>
|
||||
<div className='mx-auto max-w-3xl'>
|
||||
<div className='flex justify-end'>
|
||||
<div className='max-w-[80%] rounded-3xl bg-[#F4F4F4] px-4 py-3 dark:bg-gray-600'>
|
||||
<div className='whitespace-pre-wrap break-words text-[#0D0D0D] text-base leading-relaxed'>
|
||||
{isJsonObject ? (
|
||||
<pre>{JSON.stringify(message.content, null, 2)}</pre>
|
||||
) : (
|
||||
<span>{message.content}</span>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// For assistant messages (on the left)
|
||||
return (
|
||||
<div className='px-4 py-5'>
|
||||
<div className='mx-auto max-w-3xl'>
|
||||
<div className='flex'>
|
||||
<div className='max-w-[80%]'>
|
||||
<div className='whitespace-pre-wrap break-words text-base leading-relaxed'>
|
||||
{isJsonObject ? (
|
||||
<pre>{JSON.stringify(message.content, null, 2)}</pre>
|
||||
) : (
|
||||
<MarkdownRenderer content={message.content as string} />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default function ChatClient({ subdomain }: { subdomain: string }) {
|
||||
const [messages, setMessages] = useState<ChatMessage[]>([])
|
||||
const [inputValue, setInputValue] = useState('')
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
const [chatConfig, setChatConfig] = useState<ChatConfig | null>(null)
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
const messagesEndRef = useRef<HTMLDivElement>(null)
|
||||
const messagesContainerRef = useRef<HTMLDivElement>(null)
|
||||
const inputRef = useRef<HTMLInputElement>(null)
|
||||
const [starCount, setStarCount] = useState('3.4k')
|
||||
const [conversationId, setConversationId] = useState('')
|
||||
|
||||
// Authentication state
|
||||
const [authRequired, setAuthRequired] = useState<'password' | 'email' | null>(null)
|
||||
const [password, setPassword] = useState('')
|
||||
const [email, setEmail] = useState('')
|
||||
const [authError, setAuthError] = useState<string | null>(null)
|
||||
const [isAuthenticating, setIsAuthenticating] = useState(false)
|
||||
|
||||
// OTP verification state
|
||||
const [showOtpVerification, setShowOtpVerification] = useState(false)
|
||||
const [otpValue, setOtpValue] = useState('')
|
||||
const [isSendingOtp, setIsSendingOtp] = useState(false)
|
||||
const [isVerifyingOtp, setIsVerifyingOtp] = useState(false)
|
||||
|
||||
// Fetch chat config function
|
||||
const fetchChatConfig = async () => {
|
||||
try {
|
||||
// Use relative URL instead of absolute URL with env.NEXT_PUBLIC_APP_URL
|
||||
const response = await fetch(`/api/chat/${subdomain}`, {
|
||||
credentials: 'same-origin',
|
||||
headers: {
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
},
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
// Check if auth is required
|
||||
if (response.status === 401) {
|
||||
const errorData = await response.json()
|
||||
|
||||
if (errorData.error === 'auth_required_password') {
|
||||
setAuthRequired('password')
|
||||
return
|
||||
}
|
||||
if (errorData.error === 'auth_required_email') {
|
||||
setAuthRequired('email')
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Failed to load chat configuration: ${response.status}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
// The API returns the data directly without a wrapper
|
||||
setChatConfig(data)
|
||||
|
||||
// Add welcome message if configured
|
||||
if (data?.customizations?.welcomeMessage) {
|
||||
setMessages([
|
||||
{
|
||||
id: 'welcome',
|
||||
content: data.customizations.welcomeMessage,
|
||||
type: 'assistant',
|
||||
timestamp: new Date(),
|
||||
},
|
||||
])
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching chat config:', error)
|
||||
setError('This chat is currently unavailable. Please try again later.')
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch chat config on mount and generate new conversation ID
|
||||
useEffect(() => {
|
||||
fetchChatConfig()
|
||||
// Generate a new conversation ID whenever the page/chat is refreshed
|
||||
setConversationId(uuidv4())
|
||||
|
||||
// Fetch GitHub stars
|
||||
getFormattedGitHubStars()
|
||||
.then((formattedStars) => {
|
||||
setStarCount(formattedStars)
|
||||
})
|
||||
.catch((err) => {
|
||||
console.error('Failed to fetch GitHub stars:', err)
|
||||
})
|
||||
}, [subdomain])
|
||||
|
||||
// Handle keyboard input for message sending
|
||||
const handleKeyDown = (e: KeyboardEvent<HTMLInputElement>) => {
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault()
|
||||
handleSendMessage()
|
||||
}
|
||||
}
|
||||
|
||||
// Handle keyboard input for auth forms
|
||||
const _handleAuthKeyDown = (e: KeyboardEvent<HTMLInputElement>) => {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault()
|
||||
handleAuthenticate()
|
||||
}
|
||||
}
|
||||
|
||||
// Handle authentication
|
||||
const handleAuthenticate = async () => {
|
||||
if (authRequired === 'password') {
|
||||
// Password auth remains the same
|
||||
setAuthError(null)
|
||||
setIsAuthenticating(true)
|
||||
|
||||
try {
|
||||
const payload = { password }
|
||||
|
||||
const response = await fetch(`/api/chat/${subdomain}`, {
|
||||
method: 'POST',
|
||||
credentials: 'same-origin',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json()
|
||||
setAuthError(errorData.error || 'Authentication failed')
|
||||
return
|
||||
}
|
||||
|
||||
await response.json()
|
||||
|
||||
// Authentication successful, fetch config again
|
||||
await fetchChatConfig()
|
||||
|
||||
// Reset auth state
|
||||
setAuthRequired(null)
|
||||
setPassword('')
|
||||
} catch (error) {
|
||||
console.error('Authentication error:', error)
|
||||
setAuthError('An error occurred during authentication')
|
||||
} finally {
|
||||
setIsAuthenticating(false)
|
||||
}
|
||||
} else if (authRequired === 'email') {
|
||||
// For email auth, we now send an OTP first
|
||||
if (!showOtpVerification) {
|
||||
// Step 1: User has entered email, send OTP
|
||||
setAuthError(null)
|
||||
setIsSendingOtp(true)
|
||||
|
||||
try {
|
||||
const response = await fetch(`/api/chat/${subdomain}/otp`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
},
|
||||
body: JSON.stringify({ email }),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json()
|
||||
setAuthError(errorData.error || 'Failed to send verification code')
|
||||
return
|
||||
}
|
||||
|
||||
// OTP sent successfully, show OTP input
|
||||
setShowOtpVerification(true)
|
||||
} catch (error) {
|
||||
console.error('Error sending OTP:', error)
|
||||
setAuthError('An error occurred while sending the verification code')
|
||||
} finally {
|
||||
setIsSendingOtp(false)
|
||||
}
|
||||
} else {
|
||||
// Step 2: User has entered OTP, verify it
|
||||
setAuthError(null)
|
||||
setIsVerifyingOtp(true)
|
||||
|
||||
try {
|
||||
const response = await fetch(`/api/chat/${subdomain}/otp`, {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
},
|
||||
body: JSON.stringify({ email, otp: otpValue }),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json()
|
||||
setAuthError(errorData.error || 'Invalid verification code')
|
||||
return
|
||||
}
|
||||
|
||||
await response.json()
|
||||
|
||||
// OTP verified successfully, fetch config again
|
||||
await fetchChatConfig()
|
||||
|
||||
// Reset auth state
|
||||
setAuthRequired(null)
|
||||
setEmail('')
|
||||
setOtpValue('')
|
||||
setShowOtpVerification(false)
|
||||
} catch (error) {
|
||||
console.error('Error verifying OTP:', error)
|
||||
setAuthError('An error occurred during verification')
|
||||
} finally {
|
||||
setIsVerifyingOtp(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add this function to handle resending OTP
|
||||
const handleResendOtp = async () => {
|
||||
setAuthError(null)
|
||||
setIsSendingOtp(true)
|
||||
|
||||
try {
|
||||
const response = await fetch(`/api/chat/${subdomain}/otp`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
},
|
||||
body: JSON.stringify({ email }),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json()
|
||||
setAuthError(errorData.error || 'Failed to resend verification code')
|
||||
return
|
||||
}
|
||||
|
||||
// Show a message that OTP was sent
|
||||
setAuthError('Verification code sent. Please check your email.')
|
||||
} catch (error) {
|
||||
console.error('Error resending OTP:', error)
|
||||
setAuthError('An error occurred while resending the verification code')
|
||||
} finally {
|
||||
setIsSendingOtp(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Add a function to handle email input key down
|
||||
const handleEmailKeyDown = (e: KeyboardEvent<HTMLInputElement>) => {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault()
|
||||
handleAuthenticate()
|
||||
}
|
||||
}
|
||||
|
||||
// Add a function to handle OTP input key down
|
||||
const _handleOtpKeyDown = (e: KeyboardEvent<HTMLInputElement>) => {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault()
|
||||
handleAuthenticate()
|
||||
}
|
||||
}
|
||||
|
||||
// Scroll to bottom of messages
|
||||
useEffect(() => {
|
||||
if (messagesEndRef.current) {
|
||||
messagesEndRef.current.scrollIntoView({ behavior: 'smooth' })
|
||||
}
|
||||
}, [messages])
|
||||
|
||||
// Handle sending a message
|
||||
const handleSendMessage = async () => {
|
||||
if (!inputValue.trim() || isLoading) return
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
content: inputValue,
|
||||
type: 'user',
|
||||
timestamp: new Date(),
|
||||
}
|
||||
|
||||
setMessages((prev) => [...prev, userMessage])
|
||||
setInputValue('')
|
||||
setIsLoading(true)
|
||||
|
||||
// Ensure focus remains on input field
|
||||
if (inputRef.current) {
|
||||
inputRef.current.focus()
|
||||
}
|
||||
|
||||
try {
|
||||
// Send structured payload to maintain chat context
|
||||
const payload = {
|
||||
message: userMessage.content,
|
||||
conversationId,
|
||||
}
|
||||
|
||||
// Use relative URL with credentials
|
||||
const response = await fetch(`/api/chat/${subdomain}`, {
|
||||
method: 'POST',
|
||||
credentials: 'same-origin',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to get response')
|
||||
}
|
||||
|
||||
// Detect streaming response via content-type (text/plain) or absence of JSON content-type
|
||||
const contentType = response.headers.get('Content-Type') || ''
|
||||
|
||||
if (contentType.includes('text/plain')) {
|
||||
// Handle streaming response
|
||||
const messageId = crypto.randomUUID()
|
||||
|
||||
// Add placeholder message
|
||||
setMessages((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: messageId,
|
||||
content: '',
|
||||
type: 'assistant',
|
||||
timestamp: new Date(),
|
||||
},
|
||||
])
|
||||
|
||||
// Stop showing loading indicator once streaming begins
|
||||
setIsLoading(false)
|
||||
|
||||
// Ensure the response body exists and is a ReadableStream
|
||||
const reader = response.body?.getReader()
|
||||
if (reader) {
|
||||
const decoder = new TextDecoder()
|
||||
let done = false
|
||||
while (!done) {
|
||||
const { value, done: readerDone } = await reader.read()
|
||||
if (value) {
|
||||
const chunk = decoder.decode(value, { stream: true })
|
||||
if (chunk) {
|
||||
setMessages((prev) =>
|
||||
prev.map((msg) =>
|
||||
msg.id === messageId ? { ...msg, content: msg.content + chunk } : msg
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
done = readerDone
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Fallback to JSON response handling
|
||||
const responseData = await response.json()
|
||||
console.log('Message response:', responseData)
|
||||
|
||||
// Handle different response formats from API
|
||||
if (
|
||||
responseData.multipleOutputs &&
|
||||
responseData.contents &&
|
||||
Array.isArray(responseData.contents)
|
||||
) {
|
||||
// For multiple outputs, create separate assistant messages for each
|
||||
const assistantMessages = responseData.contents.map((content: any) => {
|
||||
// Format the content appropriately
|
||||
let formattedContent = content
|
||||
|
||||
// Convert objects to strings for display
|
||||
if (typeof formattedContent === 'object' && formattedContent !== null) {
|
||||
try {
|
||||
formattedContent = JSON.stringify(formattedContent)
|
||||
} catch (_e) {
|
||||
formattedContent = 'Received structured data response'
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
id: crypto.randomUUID(),
|
||||
content: formattedContent || 'No content found',
|
||||
type: 'assistant' as const,
|
||||
timestamp: new Date(),
|
||||
}
|
||||
})
|
||||
|
||||
// Add all messages at once
|
||||
setMessages((prev) => [...prev, ...assistantMessages])
|
||||
} else {
|
||||
// Handle single output as before
|
||||
let messageContent = responseData.output
|
||||
|
||||
if (!messageContent && responseData.content) {
|
||||
if (typeof responseData.content === 'object') {
|
||||
if (responseData.content.text) {
|
||||
messageContent = responseData.content.text
|
||||
} else {
|
||||
try {
|
||||
messageContent = JSON.stringify(responseData.content)
|
||||
} catch (_e) {
|
||||
messageContent = 'Received structured data response'
|
||||
}
|
||||
}
|
||||
} else {
|
||||
messageContent = responseData.content
|
||||
}
|
||||
}
|
||||
|
||||
const assistantMessage: ChatMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
content: messageContent || "Sorry, I couldn't process your request.",
|
||||
type: 'assistant',
|
||||
timestamp: new Date(),
|
||||
}
|
||||
|
||||
setMessages((prev) => [...prev, assistantMessage])
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error sending message:', error)
|
||||
|
||||
const errorMessage: ChatMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
content: 'Sorry, there was an error processing your message. Please try again.',
|
||||
type: 'assistant',
|
||||
timestamp: new Date(),
|
||||
}
|
||||
|
||||
setMessages((prev) => [...prev, errorMessage])
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
// Ensure focus remains on input field even after the response
|
||||
if (inputRef.current) {
|
||||
inputRef.current.focus()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If error, show error message
|
||||
if (error) {
|
||||
return (
|
||||
<div className='flex min-h-screen items-center justify-center bg-gray-50'>
|
||||
<div className='mx-auto max-w-md rounded-xl bg-white p-6 shadow-md'>
|
||||
<div className='mb-2 flex items-center justify-between'>
|
||||
<a href='https://simstudio.ai' target='_blank' rel='noopener noreferrer'>
|
||||
<svg
|
||||
width='32'
|
||||
height='32'
|
||||
viewBox='0 0 50 50'
|
||||
fill='none'
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
className='rounded-[6px]'
|
||||
>
|
||||
<rect width='50' height='50' fill='#701FFC' />
|
||||
<path
|
||||
d='M34.1455 20.0728H16.0364C12.7026 20.0728 10 22.7753 10 26.1091V35.1637C10 38.4975 12.7026 41.2 16.0364 41.2H34.1455C37.4792 41.2 40.1818 38.4975 40.1818 35.1637V26.1091C40.1818 22.7753 37.4792 20.0728 34.1455 20.0728Z'
|
||||
fill='#701FFC'
|
||||
stroke='white'
|
||||
strokeWidth='3.5'
|
||||
strokeLinecap='round'
|
||||
strokeLinejoin='round'
|
||||
/>
|
||||
<path
|
||||
d='M25.0919 14.0364C26.7588 14.0364 28.1101 12.6851 28.1101 11.0182C28.1101 9.35129 26.7588 8 25.0919 8C23.425 8 22.0737 9.35129 22.0737 11.0182C22.0737 12.6851 23.425 14.0364 25.0919 14.0364Z'
|
||||
fill='#701FFC'
|
||||
stroke='white'
|
||||
strokeWidth='4'
|
||||
strokeLinecap='round'
|
||||
strokeLinejoin='round'
|
||||
/>
|
||||
<path
|
||||
d='M25.0915 14.856V19.0277V14.856ZM20.5645 32.1398V29.1216V32.1398ZM29.619 29.1216V32.1398V29.1216Z'
|
||||
fill='#701FFC'
|
||||
/>
|
||||
<path
|
||||
d='M25.0915 14.856V19.0277M20.5645 32.1398V29.1216M29.619 29.1216V32.1398'
|
||||
stroke='white'
|
||||
strokeWidth='4'
|
||||
strokeLinecap='round'
|
||||
strokeLinejoin='round'
|
||||
/>
|
||||
<circle cx='25' cy='11' r='2' fill='#701FFC' />
|
||||
</svg>
|
||||
</a>
|
||||
<HeaderLinks stars={starCount} />
|
||||
</div>
|
||||
<h2 className='mb-2 font-bold text-red-500 text-xl'>Error</h2>
|
||||
<p className='text-gray-700'>{error}</p>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// If authentication is required, show auth form
|
||||
if (authRequired) {
|
||||
// Get title and description from the URL params or use defaults
|
||||
const title = new URLSearchParams(window.location.search).get('title') || 'chat'
|
||||
const _primaryColor = new URLSearchParams(window.location.search).get('color') || '#802FFF'
|
||||
|
||||
return (
|
||||
<div className='flex min-h-screen items-center justify-center bg-gray-50'>
|
||||
<div className='mx-auto w-full max-w-md rounded-xl bg-white p-6 shadow-md'>
|
||||
<div className='mb-4 flex w-full items-center justify-between'>
|
||||
<a href='https://simstudio.ai' target='_blank' rel='noopener noreferrer'>
|
||||
<svg
|
||||
width='32'
|
||||
height='32'
|
||||
viewBox='0 0 50 50'
|
||||
fill='none'
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
className='rounded-[6px]'
|
||||
>
|
||||
<rect width='50' height='50' fill='#701FFC' />
|
||||
<path
|
||||
d='M34.1455 20.0728H16.0364C12.7026 20.0728 10 22.7753 10 26.1091V35.1637C10 38.4975 12.7026 41.2 16.0364 41.2H34.1455C37.4792 41.2 40.1818 38.4975 40.1818 35.1637V26.1091C40.1818 22.7753 37.4792 20.0728 34.1455 20.0728Z'
|
||||
fill='#701FFC'
|
||||
stroke='white'
|
||||
strokeWidth='3.5'
|
||||
strokeLinecap='round'
|
||||
strokeLinejoin='round'
|
||||
/>
|
||||
<path
|
||||
d='M25.0919 14.0364C26.7588 14.0364 28.1101 12.6851 28.1101 11.0182C28.1101 9.35129 26.7588 8 25.0919 8C23.425 8 22.0737 9.35129 22.0737 11.0182C22.0737 12.6851 23.425 14.0364 25.0919 14.0364Z'
|
||||
fill='#701FFC'
|
||||
stroke='white'
|
||||
strokeWidth='4'
|
||||
strokeLinecap='round'
|
||||
strokeLinejoin='round'
|
||||
/>
|
||||
<path
|
||||
d='M25.0915 14.856V19.0277V14.856ZM20.5645 32.1398V29.1216V32.1398ZM29.619 29.1216V32.1398V29.1216Z'
|
||||
fill='#701FFC'
|
||||
/>
|
||||
<path
|
||||
d='M25.0915 14.856V19.0277M20.5645 32.1398V29.1216M29.619 29.1216V32.1398'
|
||||
stroke='white'
|
||||
strokeWidth='4'
|
||||
strokeLinecap='round'
|
||||
strokeLinejoin='round'
|
||||
/>
|
||||
<circle cx='25' cy='11' r='2' fill='#701FFC' />
|
||||
</svg>
|
||||
</a>
|
||||
<HeaderLinks stars={starCount} />
|
||||
</div>
|
||||
<div className='mb-6 text-center'>
|
||||
<h2 className='mb-2 font-bold text-xl'>{title}</h2>
|
||||
<p className='text-gray-600'>
|
||||
{authRequired === 'password'
|
||||
? 'This chat is password-protected. Please enter the password to continue.'
|
||||
: 'This chat requires email verification. Please enter your email to continue.'}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{authError && (
|
||||
<div className='mb-4 rounded-md border border-red-200 bg-red-50 p-3 text-red-600'>
|
||||
{authError}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className='space-y-4'>
|
||||
{authRequired === 'password' ? (
|
||||
<div className='mx-auto w-full max-w-sm'>
|
||||
<div className='space-y-4 rounded-lg border border-neutral-200 bg-white p-6 shadow-sm dark:border-neutral-800 dark:bg-black/10'>
|
||||
<div className='flex items-center justify-center'>
|
||||
<div className='rounded-full bg-primary/10 p-2 text-primary'>
|
||||
<Lock className='h-5 w-5' />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h2 className='text-center font-medium text-lg'>Password Required</h2>
|
||||
<p className='text-center text-neutral-500 text-sm dark:text-neutral-400'>
|
||||
Enter the password to access this chat
|
||||
</p>
|
||||
|
||||
<form
|
||||
onSubmit={(e) => {
|
||||
e.preventDefault()
|
||||
handleAuthenticate()
|
||||
}}
|
||||
>
|
||||
<div className='space-y-3'>
|
||||
<div className='space-y-1'>
|
||||
<label htmlFor='password' className='sr-only font-medium text-sm'>
|
||||
Password
|
||||
</label>
|
||||
<Input
|
||||
id='password'
|
||||
type='password'
|
||||
value={password}
|
||||
onChange={(e) => setPassword(e.target.value)}
|
||||
placeholder='Enter password'
|
||||
disabled={isAuthenticating}
|
||||
className='w-full'
|
||||
/>
|
||||
</div>
|
||||
|
||||
{authError && (
|
||||
<div className='text-red-600 text-sm dark:text-red-500'>{authError}</div>
|
||||
)}
|
||||
|
||||
<Button
|
||||
type='submit'
|
||||
disabled={!password || isAuthenticating}
|
||||
className='w-full'
|
||||
style={{
|
||||
backgroundColor: chatConfig?.customizations?.primaryColor || '#802FFF',
|
||||
}}
|
||||
>
|
||||
{isAuthenticating ? (
|
||||
<div className='flex items-center justify-center'>
|
||||
<Loader2 className='mr-2 h-4 w-4 animate-spin' />
|
||||
Authenticating...
|
||||
</div>
|
||||
) : (
|
||||
'Continue'
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div className='mx-auto w-full max-w-sm'>
|
||||
<div className='space-y-4 rounded-lg border border-neutral-200 bg-white p-6 shadow-md dark:border-neutral-800 dark:bg-black/10'>
|
||||
<div className='flex items-center justify-center'>
|
||||
<div className='rounded-full bg-primary/10 p-2 text-primary'>
|
||||
<Mail className='h-5 w-5' />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h2 className='text-center font-medium text-lg'>Email Verification</h2>
|
||||
|
||||
{!showOtpVerification ? (
|
||||
// Step 1: Email Input
|
||||
<>
|
||||
<p className='text-center text-neutral-500 text-sm dark:text-neutral-400'>
|
||||
Enter your email address to access this chat
|
||||
</p>
|
||||
|
||||
<div className='space-y-3'>
|
||||
<div className='space-y-1'>
|
||||
<label htmlFor='email' className='sr-only font-medium text-sm'>
|
||||
Email
|
||||
</label>
|
||||
<Input
|
||||
id='email'
|
||||
type='email'
|
||||
placeholder='Email address'
|
||||
value={email}
|
||||
onChange={(e) => setEmail(e.target.value)}
|
||||
onKeyDown={handleEmailKeyDown}
|
||||
disabled={isSendingOtp || isAuthenticating}
|
||||
className='w-full'
|
||||
/>
|
||||
</div>
|
||||
|
||||
{authError && (
|
||||
<div className='text-red-600 text-sm dark:text-red-500'>{authError}</div>
|
||||
)}
|
||||
|
||||
<Button
|
||||
onClick={handleAuthenticate}
|
||||
disabled={!email || isSendingOtp || isAuthenticating}
|
||||
className='w-full'
|
||||
style={{
|
||||
backgroundColor: chatConfig?.customizations?.primaryColor || '#802FFF',
|
||||
}}
|
||||
>
|
||||
{isSendingOtp ? (
|
||||
<div className='flex items-center justify-center'>
|
||||
<Loader2 className='mr-2 h-4 w-4 animate-spin' />
|
||||
Sending Code...
|
||||
</div>
|
||||
) : (
|
||||
'Continue'
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
// Step 2: OTP Verification with OTPInputForm
|
||||
<>
|
||||
<p className='text-center text-neutral-500 text-sm dark:text-neutral-400'>
|
||||
Enter the verification code sent to
|
||||
</p>
|
||||
<p className='mb-3 break-all text-center font-medium text-sm'>{email}</p>
|
||||
|
||||
<OTPInputForm
|
||||
onSubmit={(value) => {
|
||||
setOtpValue(value)
|
||||
handleAuthenticate()
|
||||
}}
|
||||
isLoading={isVerifyingOtp}
|
||||
error={authError}
|
||||
/>
|
||||
|
||||
<div className='flex items-center justify-center pt-3'>
|
||||
<button
|
||||
type='button'
|
||||
onClick={() => handleResendOtp()}
|
||||
disabled={isSendingOtp}
|
||||
className='text-primary text-sm hover:underline disabled:opacity-50'
|
||||
>
|
||||
{isSendingOtp ? 'Sending...' : 'Resend code'}
|
||||
</button>
|
||||
<span className='mx-2 text-neutral-300 dark:text-neutral-600'>•</span>
|
||||
<button
|
||||
type='button'
|
||||
onClick={() => {
|
||||
setShowOtpVerification(false)
|
||||
setOtpValue('')
|
||||
setAuthError(null)
|
||||
}}
|
||||
className='text-primary text-sm hover:underline'
|
||||
>
|
||||
Change email
|
||||
</button>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Loading state while fetching config
|
||||
if (!chatConfig) {
|
||||
return (
|
||||
<div className='flex min-h-screen items-center justify-center bg-gray-50'>
|
||||
<div className='animate-pulse text-center'>
|
||||
<div className='mx-auto mb-4 h-8 w-48 rounded bg-gray-200' />
|
||||
<div className='mx-auto h-4 w-64 rounded bg-gray-200' />
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className='fixed inset-0 z-[100] flex flex-col bg-background'>
|
||||
<style jsx>{`
|
||||
@keyframes growShrink {
|
||||
0%,
|
||||
100% {
|
||||
transform: scale(0.9);
|
||||
}
|
||||
50% {
|
||||
transform: scale(1.1);
|
||||
}
|
||||
}
|
||||
.loading-dot {
|
||||
animation: growShrink 1.5s infinite ease-in-out;
|
||||
}
|
||||
`}</style>
|
||||
|
||||
{/* Header with title and links */}
|
||||
<div className='flex items-center justify-between px-6 py-4'>
|
||||
<div className='flex items-center gap-2'>
|
||||
{chatConfig?.customizations?.logoUrl && (
|
||||
<img
|
||||
src={chatConfig.customizations.logoUrl}
|
||||
alt={`${chatConfig?.title || 'Chat'} logo`}
|
||||
className='h-6 w-6 object-contain'
|
||||
/>
|
||||
)}
|
||||
<h2 className='font-medium text-lg'>
|
||||
{chatConfig?.customizations?.headerText || chatConfig?.title || 'Chat'}
|
||||
</h2>
|
||||
</div>
|
||||
<div className='flex items-center gap-3'>
|
||||
<HeaderLinks stars={starCount} />
|
||||
{!chatConfig?.customizations?.logoUrl && (
|
||||
<a href='https://simstudio.ai' target='_blank' rel='noopener noreferrer'>
|
||||
<svg
|
||||
width='32'
|
||||
height='32'
|
||||
viewBox='0 0 50 50'
|
||||
fill='none'
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
className='rounded-[6px]'
|
||||
>
|
||||
<rect width='50' height='50' fill='#701FFC' />
|
||||
<path
|
||||
d='M34.1455 20.0728H16.0364C12.7026 20.0728 10 22.7753 10 26.1091V35.1637C10 38.4975 12.7026 41.2 16.0364 41.2H34.1455C37.4792 41.2 40.1818 38.4975 40.1818 35.1637V26.1091C40.1818 22.7753 37.4792 20.0728 34.1455 20.0728Z'
|
||||
fill='#701FFC'
|
||||
stroke='white'
|
||||
strokeWidth='3.5'
|
||||
strokeLinecap='round'
|
||||
strokeLinejoin='round'
|
||||
/>
|
||||
<path
|
||||
d='M25.0919 14.0364C26.7588 14.0364 28.1101 12.6851 28.1101 11.0182C28.1101 9.35129 26.7588 8 25.0919 8C23.425 8 22.0737 9.35129 22.0737 11.0182C22.0737 12.6851 23.425 14.0364 25.0919 14.0364Z'
|
||||
fill='#701FFC'
|
||||
stroke='white'
|
||||
strokeWidth='4'
|
||||
strokeLinecap='round'
|
||||
strokeLinejoin='round'
|
||||
/>
|
||||
<path
|
||||
d='M25.0915 14.856V19.0277V14.856ZM20.5645 32.1398V29.1216V32.1398ZM29.619 29.1216V32.1398V29.1216Z'
|
||||
fill='#701FFC'
|
||||
/>
|
||||
<path
|
||||
d='M25.0915 14.856V19.0277M20.5645 32.1398V29.1216M29.619 29.1216V32.1398'
|
||||
stroke='white'
|
||||
strokeWidth='4'
|
||||
strokeLinecap='round'
|
||||
strokeLinejoin='round'
|
||||
/>
|
||||
<circle cx='25' cy='11' r='2' fill='#701FFC' />
|
||||
</svg>
|
||||
</a>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Messages container */}
|
||||
<div ref={messagesContainerRef} className='flex-1 overflow-y-auto'>
|
||||
<div className='mx-auto max-w-3xl'>
|
||||
{messages.length === 0 ? (
|
||||
<div className='flex h-full flex-col items-center justify-center px-4 py-10'>
|
||||
<div className='space-y-2 text-center'>
|
||||
<h3 className='font-medium text-lg'>How can I help you today?</h3>
|
||||
<p className='text-muted-foreground text-sm'>
|
||||
{chatConfig.description || 'Ask me anything.'}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
messages.map((message) => <ClientChatMessage key={message.id} message={message} />)
|
||||
)}
|
||||
|
||||
{/* Loading indicator (shows only when executing) */}
|
||||
{isLoading && (
|
||||
<div className='px-4 py-5'>
|
||||
<div className='mx-auto max-w-3xl'>
|
||||
<div className='flex'>
|
||||
<div className='max-w-[80%]'>
|
||||
<div className='flex h-6 items-center'>
|
||||
<div className='loading-dot h-3 w-3 rounded-full bg-black dark:bg-black' />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div ref={messagesEndRef} className='h-1' />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Input area (fixed at bottom) */}
|
||||
<div className='bg-background p-6'>
|
||||
<div className='mx-auto max-w-3xl'>
|
||||
<div className='relative rounded-2xl border bg-background shadow-sm'>
|
||||
<Input
|
||||
ref={inputRef}
|
||||
value={inputValue}
|
||||
onChange={(e) => setInputValue(e.target.value)}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder='Message...'
|
||||
className='min-h-[50px] flex-1 rounded-2xl border-0 bg-transparent py-7 pr-16 pl-6 text-base focus-visible:ring-0 focus-visible:ring-offset-0'
|
||||
/>
|
||||
<Button
|
||||
onClick={handleSendMessage}
|
||||
size='icon'
|
||||
disabled={!inputValue.trim() || isLoading}
|
||||
className='-translate-y-1/2 absolute top-1/2 right-3 h-10 w-10 rounded-xl bg-black p-0 text-white hover:bg-gray-800'
|
||||
>
|
||||
<ArrowUp className='h-4 w-4' />
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -4,8 +4,10 @@ import type React from 'react'
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { motion } from 'framer-motion'
|
||||
import { Send, Square } from 'lucide-react'
|
||||
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip'
|
||||
import { VoiceInput } from './voice-input'
|
||||
|
||||
const PLACEHOLDER = 'Enter a message'
|
||||
const PLACEHOLDER = 'Enter a message or click the mic to speak'
|
||||
const MAX_TEXTAREA_HEIGHT = 160 // Max height in pixels (e.g., for about 4-5 lines)
|
||||
|
||||
const containerVariants = {
|
||||
@@ -20,15 +22,21 @@ const containerVariants = {
|
||||
} as const
|
||||
|
||||
export const ChatInput: React.FC<{
|
||||
onSubmit?: (value: string) => void
|
||||
onSubmit?: (value: string, isVoiceInput?: boolean) => void
|
||||
isStreaming?: boolean
|
||||
onStopStreaming?: () => void
|
||||
}> = ({ onSubmit, isStreaming = false, onStopStreaming }) => {
|
||||
onVoiceStart?: () => void
|
||||
voiceOnly?: boolean
|
||||
}> = ({ onSubmit, isStreaming = false, onStopStreaming, onVoiceStart, voiceOnly = false }) => {
|
||||
const wrapperRef = useRef<HTMLDivElement>(null)
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null) // Ref for the textarea
|
||||
const [isActive, setIsActive] = useState(false)
|
||||
const [inputValue, setInputValue] = useState('')
|
||||
|
||||
// Check if speech-to-text is available in the browser
|
||||
const isSttAvailable =
|
||||
typeof window !== 'undefined' && !!(window.SpeechRecognition || window.webkitSpeechRecognition)
|
||||
|
||||
// Function to adjust textarea height
|
||||
const adjustTextareaHeight = () => {
|
||||
if (textareaRef.current) {
|
||||
@@ -84,7 +92,7 @@ export const ChatInput: React.FC<{
|
||||
|
||||
const handleSubmit = () => {
|
||||
if (!inputValue.trim()) return
|
||||
onSubmit?.(inputValue.trim())
|
||||
onSubmit?.(inputValue.trim(), false) // false = not voice input
|
||||
setInputValue('')
|
||||
if (textareaRef.current) {
|
||||
textareaRef.current.style.height = 'auto' // Reset height after submit
|
||||
@@ -97,114 +105,143 @@ export const ChatInput: React.FC<{
|
||||
setInputValue(e.target.value)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className='fixed right-0 bottom-0 left-0 flex w-full items-center justify-center bg-gradient-to-t from-white to-transparent pb-4 text-black'>
|
||||
<motion.div
|
||||
ref={wrapperRef}
|
||||
className='w-full max-w-3xl px-4'
|
||||
variants={containerVariants}
|
||||
animate={'expanded'}
|
||||
initial='collapsed'
|
||||
style={{
|
||||
overflow: 'hidden',
|
||||
borderRadius: 32,
|
||||
background: '#fff',
|
||||
border: '1px solid rgba(0,0,0,0.1)',
|
||||
marginLeft: 'auto',
|
||||
marginRight: 'auto',
|
||||
}}
|
||||
onClick={handleActivate}
|
||||
>
|
||||
<div className='flex h-full w-full items-center rounded-full p-2'>
|
||||
{/* Text Input & Placeholder */}
|
||||
<div className='relative mx-2 flex-1'>
|
||||
<textarea
|
||||
ref={textareaRef}
|
||||
rows={1}
|
||||
value={inputValue}
|
||||
onChange={handleInputChange}
|
||||
onKeyDown={(e: React.KeyboardEvent<HTMLTextAreaElement>) => {
|
||||
// Submit on Enter without Shift
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault()
|
||||
handleSubmit()
|
||||
}
|
||||
// Submit on Cmd/Ctrl + Enter for consistency with other chat apps
|
||||
else if (e.key === 'Enter' && (e.metaKey || e.ctrlKey)) {
|
||||
e.preventDefault()
|
||||
handleSubmit()
|
||||
}
|
||||
// Allow Enter with Shift for newline by not preventing default
|
||||
}}
|
||||
className='w-full flex-1 resize-none rounded-md border-0 bg-transparent py-3 font-normal text-base outline-0 transition-height duration-100 ease-out'
|
||||
style={{
|
||||
position: 'relative',
|
||||
zIndex: 1,
|
||||
lineHeight: '1.5',
|
||||
minHeight: '44px', // Set a fixed min-height for consistent text alignment
|
||||
verticalAlign: 'middle',
|
||||
paddingLeft: '12px', // Add left padding to move cursor to the right
|
||||
}}
|
||||
onFocus={handleActivate}
|
||||
onBlur={() => {
|
||||
if (!inputValue) {
|
||||
setIsActive(false)
|
||||
if (textareaRef.current) {
|
||||
textareaRef.current.style.height = 'auto'
|
||||
textareaRef.current.style.overflowY = 'hidden'
|
||||
}
|
||||
}
|
||||
}}
|
||||
placeholder=' ' /* keep native placeholder empty – we draw ours */
|
||||
/>
|
||||
<div className='pointer-events-none absolute top-0 left-0 flex h-full w-full items-center'>
|
||||
{!isActive && !inputValue && (
|
||||
<div
|
||||
className='-translate-y-1/2 absolute top-1/2 left-3 select-none text-gray-400'
|
||||
style={{
|
||||
whiteSpace: 'nowrap',
|
||||
zIndex: 0,
|
||||
background:
|
||||
'linear-gradient(90deg, rgba(150,150,150,0.2) 0%, rgba(150,150,150,0.8) 50%, rgba(150,150,150,0.2) 100%)',
|
||||
backgroundSize: '200% 100%',
|
||||
WebkitBackgroundClip: 'text',
|
||||
WebkitTextFillColor: 'transparent',
|
||||
animation: 'shimmer 10s infinite linear',
|
||||
}}
|
||||
>
|
||||
{PLACEHOLDER}
|
||||
<style jsx global>{`
|
||||
@keyframes shimmer {
|
||||
0% {
|
||||
background-position: 200% 0;
|
||||
}
|
||||
100% {
|
||||
background-position: -200% 0;
|
||||
}
|
||||
}
|
||||
`}</style>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
// Handle voice start with smooth transition to voice-first mode
|
||||
const handleVoiceStart = () => {
|
||||
onVoiceStart?.() // This will trigger the voice-first mode transition
|
||||
}
|
||||
|
||||
<button
|
||||
className='flex items-center justify-center rounded-full bg-black p-3 text-white hover:bg-zinc-700'
|
||||
title={isStreaming ? 'Stop' : 'Send'}
|
||||
type='button'
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
if (isStreaming) {
|
||||
onStopStreaming?.()
|
||||
} else {
|
||||
handleSubmit()
|
||||
}
|
||||
}}
|
||||
>
|
||||
{isStreaming ? <Square size={18} /> : <Send size={18} />}
|
||||
</button>
|
||||
</div>
|
||||
</motion.div>
|
||||
</div>
|
||||
// Voice-only mode interface (for voice-first UI)
|
||||
if (voiceOnly) {
|
||||
return (
|
||||
<div className='flex items-center justify-center'>
|
||||
{/* Voice Input Only */}
|
||||
{isSttAvailable && (
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<div>
|
||||
<VoiceInput onVoiceStart={handleVoiceStart} disabled={isStreaming} large={true} />
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side='top' className='border border-gray-200 bg-white text-gray-900'>
|
||||
<p>Start voice conversation</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className='fixed right-0 bottom-0 left-0 flex w-full items-center justify-center bg-gradient-to-t from-white to-transparent pb-4 text-black'>
|
||||
<motion.div
|
||||
ref={wrapperRef}
|
||||
className='w-full max-w-3xl px-4'
|
||||
variants={containerVariants}
|
||||
animate={'expanded'}
|
||||
initial='collapsed'
|
||||
style={{
|
||||
overflow: 'hidden',
|
||||
borderRadius: 32,
|
||||
background: '#fff',
|
||||
border: '1px solid rgba(0,0,0,0.1)',
|
||||
marginLeft: 'auto',
|
||||
marginRight: 'auto',
|
||||
}}
|
||||
onClick={handleActivate}
|
||||
>
|
||||
<div className='flex h-full w-full items-center rounded-full p-2'>
|
||||
{/* Voice Input with Tooltip */}
|
||||
{isSttAvailable && (
|
||||
<div className='mr-2'>
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<div>
|
||||
<VoiceInput onVoiceStart={handleVoiceStart} disabled={isStreaming} />
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side='top'>
|
||||
<p>Start voice conversation</p>
|
||||
<span className='text-gray-500 text-xs'>Click to enter voice mode</span>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Text Input & Placeholder */}
|
||||
<div className='relative min-h-[40px] flex-1'>
|
||||
<textarea
|
||||
ref={textareaRef}
|
||||
value={inputValue}
|
||||
onChange={handleInputChange}
|
||||
className='w-full resize-none overflow-hidden bg-transparent px-3 py-3 text-base outline-none placeholder:text-gray-400'
|
||||
placeholder={isActive ? '' : ''}
|
||||
rows={1}
|
||||
style={{
|
||||
minHeight: '40px',
|
||||
lineHeight: '1.4',
|
||||
}}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault()
|
||||
handleSubmit()
|
||||
}
|
||||
}}
|
||||
/>
|
||||
|
||||
<div className='pointer-events-none absolute top-0 left-0 flex h-full w-full items-center'>
|
||||
{!isActive && !inputValue && (
|
||||
<div
|
||||
className='-translate-y-1/2 absolute top-1/2 left-3 select-none text-gray-400'
|
||||
style={{
|
||||
whiteSpace: 'nowrap',
|
||||
zIndex: 0,
|
||||
background:
|
||||
'linear-gradient(90deg, rgba(150,150,150,0.2) 0%, rgba(150,150,150,0.8) 50%, rgba(150,150,150,0.2) 100%)',
|
||||
backgroundSize: '200% 100%',
|
||||
WebkitBackgroundClip: 'text',
|
||||
WebkitTextFillColor: 'transparent',
|
||||
animation: 'shimmer 10s infinite linear',
|
||||
}}
|
||||
>
|
||||
{PLACEHOLDER}
|
||||
<style jsx global>{`
|
||||
@keyframes shimmer {
|
||||
0% {
|
||||
background-position: 200% 0;
|
||||
}
|
||||
100% {
|
||||
background-position: -200% 0;
|
||||
}
|
||||
}
|
||||
`}</style>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<button
|
||||
className='flex items-center justify-center rounded-full bg-black p-3 text-white hover:bg-zinc-700'
|
||||
title={isStreaming ? 'Stop' : 'Send'}
|
||||
type='button'
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
if (isStreaming) {
|
||||
onStopStreaming?.()
|
||||
} else {
|
||||
handleSubmit()
|
||||
}
|
||||
}}
|
||||
>
|
||||
{isStreaming ? <Square size={18} /> : <Send size={18} />}
|
||||
</button>
|
||||
</div>
|
||||
</motion.div>
|
||||
</div>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
114
apps/sim/app/chat/[subdomain]/components/input/voice-input.tsx
Normal file
114
apps/sim/app/chat/[subdomain]/components/input/voice-input.tsx
Normal file
@@ -0,0 +1,114 @@
|
||||
'use client'
|
||||
|
||||
import { useCallback, useEffect, useState } from 'react'
|
||||
import { motion } from 'framer-motion'
|
||||
import { Mic } from 'lucide-react'
|
||||
|
||||
interface SpeechRecognitionEvent extends Event {
|
||||
resultIndex: number
|
||||
results: SpeechRecognitionResultList
|
||||
}
|
||||
|
||||
interface SpeechRecognitionErrorEvent extends Event {
|
||||
error: string
|
||||
message?: string
|
||||
}
|
||||
|
||||
interface SpeechRecognition extends EventTarget {
|
||||
continuous: boolean
|
||||
interimResults: boolean
|
||||
lang: string
|
||||
start(): void
|
||||
stop(): void
|
||||
abort(): void
|
||||
onstart: ((this: SpeechRecognition, ev: Event) => any) | null
|
||||
onend: ((this: SpeechRecognition, ev: Event) => any) | null
|
||||
onresult: ((this: SpeechRecognition, ev: SpeechRecognitionEvent) => any) | null
|
||||
onerror: ((this: SpeechRecognition, ev: SpeechRecognitionErrorEvent) => any) | null
|
||||
}
|
||||
|
||||
interface SpeechRecognitionStatic {
|
||||
new (): SpeechRecognition
|
||||
}
|
||||
|
||||
declare global {
|
||||
interface Window {
|
||||
SpeechRecognition?: SpeechRecognitionStatic
|
||||
webkitSpeechRecognition?: SpeechRecognitionStatic
|
||||
}
|
||||
}
|
||||
|
||||
interface VoiceInputProps {
|
||||
onVoiceStart: () => void
|
||||
isListening?: boolean
|
||||
disabled?: boolean
|
||||
large?: boolean
|
||||
}
|
||||
|
||||
export function VoiceInput({
|
||||
onVoiceStart,
|
||||
isListening = false,
|
||||
disabled = false,
|
||||
large = false,
|
||||
}: VoiceInputProps) {
|
||||
const [isSupported, setIsSupported] = useState(false)
|
||||
|
||||
// Check if speech recognition is supported
|
||||
useEffect(() => {
|
||||
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
|
||||
setIsSupported(!!SpeechRecognition)
|
||||
}, [])
|
||||
|
||||
const handleVoiceClick = useCallback(() => {
|
||||
if (disabled) return
|
||||
onVoiceStart()
|
||||
}, [disabled, onVoiceStart])
|
||||
|
||||
if (!isSupported) {
|
||||
return null
|
||||
}
|
||||
|
||||
if (large) {
|
||||
return (
|
||||
<div className='flex flex-col items-center'>
|
||||
{/* Large Voice Button */}
|
||||
<motion.button
|
||||
type='button'
|
||||
onClick={handleVoiceClick}
|
||||
disabled={disabled}
|
||||
className={`flex items-center justify-center rounded-full border-2 p-6 transition-all duration-200 ${
|
||||
isListening
|
||||
? 'border-red-400 bg-red-500/20 text-red-600 hover:bg-red-500/30'
|
||||
: 'border-blue-300 bg-blue-500/10 text-blue-600 hover:bg-blue-500/20'
|
||||
} ${disabled ? 'cursor-not-allowed opacity-50' : 'cursor-pointer'}`}
|
||||
whileHover={{ scale: 1.05 }}
|
||||
whileTap={{ scale: 0.95 }}
|
||||
title='Start voice conversation'
|
||||
>
|
||||
<Mic size={32} />
|
||||
</motion.button>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className='flex items-center'>
|
||||
{/* Voice Button */}
|
||||
<motion.button
|
||||
type='button'
|
||||
onClick={handleVoiceClick}
|
||||
disabled={disabled}
|
||||
className={`flex items-center justify-center rounded-full p-2 transition-all duration-200 ${
|
||||
isListening
|
||||
? 'bg-red-500 text-white hover:bg-red-600'
|
||||
: 'bg-gray-100 text-gray-600 hover:bg-gray-200'
|
||||
} ${disabled ? 'cursor-not-allowed opacity-50' : 'cursor-pointer'}`}
|
||||
whileHover={{ scale: 1.05 }}
|
||||
whileTap={{ scale: 0.95 }}
|
||||
title='Start voice conversation'
|
||||
>
|
||||
<Mic size={16} />
|
||||
</motion.button>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -26,6 +26,7 @@ function EnhancedMarkdownRenderer({ content }: { content: string }) {
|
||||
export const ClientChatMessage = memo(
|
||||
function ClientChatMessage({ message }: { message: ChatMessage }) {
|
||||
const [isCopied, setIsCopied] = useState(false)
|
||||
|
||||
const isJsonObject = useMemo(() => {
|
||||
return typeof message.content === 'object' && message.content !== null
|
||||
}, [message.content])
|
||||
@@ -67,11 +68,10 @@ export const ClientChatMessage = memo(
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
{message.type === 'assistant' &&
|
||||
!isJsonObject &&
|
||||
!message.isInitialMessage &&
|
||||
!message.isStreaming && (
|
||||
<div className='flex justify-start'>
|
||||
{message.type === 'assistant' && !isJsonObject && !message.isInitialMessage && (
|
||||
<div className='flex items-center justify-start space-x-2'>
|
||||
{/* Copy Button - Only show when not streaming */}
|
||||
{!message.isStreaming && (
|
||||
<TooltipProvider>
|
||||
<Tooltip delayDuration={300}>
|
||||
<TooltipTrigger asChild>
|
||||
@@ -101,8 +101,9 @@ export const ClientChatMessage = memo(
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
</div>
|
||||
)}
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -0,0 +1,503 @@
|
||||
'use client'
|
||||
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
import * as THREE from 'three'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
|
||||
const logger = createLogger('Particles')
|
||||
|
||||
interface ShaderUniforms {
|
||||
u_time: { type: string; value: number }
|
||||
u_frequency: { type: string; value: number }
|
||||
u_red: { type: string; value: number }
|
||||
u_green: { type: string; value: number }
|
||||
u_blue: { type: string; value: number }
|
||||
}
|
||||
|
||||
interface ParticlesProps {
|
||||
audioLevels: number[]
|
||||
isListening: boolean
|
||||
isPlayingAudio: boolean
|
||||
isStreaming: boolean
|
||||
isMuted: boolean
|
||||
isProcessingInterruption?: boolean
|
||||
className?: string
|
||||
}
|
||||
|
||||
class SimpleBloomComposer {
|
||||
private renderer: THREE.WebGLRenderer
|
||||
private scene: THREE.Scene
|
||||
private camera: THREE.Camera
|
||||
private bloomScene: THREE.Scene
|
||||
private bloomMaterial: THREE.ShaderMaterial
|
||||
private renderTarget: THREE.WebGLRenderTarget
|
||||
private quad: THREE.Mesh
|
||||
|
||||
constructor(renderer: THREE.WebGLRenderer, scene: THREE.Scene, camera: THREE.Camera) {
|
||||
this.renderer = renderer
|
||||
this.scene = scene
|
||||
this.camera = camera
|
||||
|
||||
this.bloomScene = new THREE.Scene()
|
||||
|
||||
this.renderTarget = new THREE.WebGLRenderTarget(
|
||||
renderer.domElement.width,
|
||||
renderer.domElement.height,
|
||||
{
|
||||
minFilter: THREE.LinearFilter,
|
||||
magFilter: THREE.LinearFilter,
|
||||
format: THREE.RGBAFormat,
|
||||
}
|
||||
)
|
||||
|
||||
this.bloomMaterial = new THREE.ShaderMaterial({
|
||||
uniforms: {
|
||||
tDiffuse: { value: null },
|
||||
strength: { value: 1.5 },
|
||||
threshold: { value: 0.3 },
|
||||
radius: { value: 0.8 },
|
||||
},
|
||||
vertexShader: `
|
||||
varying vec2 vUv;
|
||||
void main() {
|
||||
vUv = uv;
|
||||
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
|
||||
}
|
||||
`,
|
||||
fragmentShader: `
|
||||
uniform sampler2D tDiffuse;
|
||||
uniform float strength;
|
||||
uniform float threshold;
|
||||
uniform float radius;
|
||||
varying vec2 vUv;
|
||||
|
||||
void main() {
|
||||
vec4 color = texture2D(tDiffuse, vUv);
|
||||
|
||||
// Simple bloom effect
|
||||
float brightness = dot(color.rgb, vec3(0.299, 0.587, 0.114));
|
||||
if (brightness > threshold) {
|
||||
color.rgb *= strength;
|
||||
}
|
||||
|
||||
gl_FragColor = color;
|
||||
}
|
||||
`,
|
||||
})
|
||||
|
||||
const geometry = new THREE.PlaneGeometry(2, 2)
|
||||
this.quad = new THREE.Mesh(geometry, this.bloomMaterial)
|
||||
this.bloomScene.add(this.quad)
|
||||
}
|
||||
|
||||
render() {
|
||||
this.renderer.setRenderTarget(this.renderTarget)
|
||||
this.renderer.render(this.scene, this.camera)
|
||||
|
||||
this.bloomMaterial.uniforms.tDiffuse.value = this.renderTarget.texture
|
||||
this.renderer.setRenderTarget(null)
|
||||
this.renderer.render(this.bloomScene, new THREE.OrthographicCamera(-1, 1, 1, -1, 0, 1))
|
||||
}
|
||||
|
||||
setSize(width: number, height: number) {
|
||||
this.renderTarget.setSize(width, height)
|
||||
}
|
||||
|
||||
dispose() {
|
||||
this.renderTarget.dispose()
|
||||
this.bloomMaterial.dispose()
|
||||
}
|
||||
}
|
||||
|
||||
const vertexShader = `
|
||||
vec3 mod289(vec3 x)
|
||||
{
|
||||
return x - floor(x * (1.0 / 289.0)) * 289.0;
|
||||
}
|
||||
|
||||
vec4 mod289(vec4 x)
|
||||
{
|
||||
return x - floor(x * (1.0 / 289.0)) * 289.0;
|
||||
}
|
||||
|
||||
vec4 permute(vec4 x)
|
||||
{
|
||||
return mod289(((x*34.0)+10.0)*x);
|
||||
}
|
||||
|
||||
vec4 taylorInvSqrt(vec4 r)
|
||||
{
|
||||
return 1.79284291400159 - 0.85373472095314 * r;
|
||||
}
|
||||
|
||||
vec3 fade(vec3 t) {
|
||||
return t*t*t*(t*(t*6.0-15.0)+10.0);
|
||||
}
|
||||
|
||||
float pnoise(vec3 P, vec3 rep)
|
||||
{
|
||||
vec3 Pi0 = mod(floor(P), rep); // Integer part, modulo period
|
||||
vec3 Pi1 = mod(Pi0 + vec3(1.0), rep); // Integer part + 1, mod period
|
||||
Pi0 = mod289(Pi0);
|
||||
Pi1 = mod289(Pi1);
|
||||
vec3 Pf0 = fract(P); // Fractional part for interpolation
|
||||
vec3 Pf1 = Pf0 - vec3(1.0); // Fractional part - 1.0
|
||||
vec4 ix = vec4(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
|
||||
vec4 iy = vec4(Pi0.yy, Pi1.yy);
|
||||
vec4 iz0 = Pi0.zzzz;
|
||||
vec4 iz1 = Pi1.zzzz;
|
||||
|
||||
vec4 ixy = permute(permute(ix) + iy);
|
||||
vec4 ixy0 = permute(ixy + iz0);
|
||||
vec4 ixy1 = permute(ixy + iz1);
|
||||
|
||||
vec4 gx0 = ixy0 * (1.0 / 7.0);
|
||||
vec4 gy0 = fract(floor(gx0) * (1.0 / 7.0)) - 0.5;
|
||||
gx0 = fract(gx0);
|
||||
vec4 gz0 = vec4(0.5) - abs(gx0) - abs(gy0);
|
||||
vec4 sz0 = step(gz0, vec4(0.0));
|
||||
gx0 -= sz0 * (step(0.0, gx0) - 0.5);
|
||||
gy0 -= sz0 * (step(0.0, gy0) - 0.5);
|
||||
|
||||
vec4 gx1 = ixy1 * (1.0 / 7.0);
|
||||
vec4 gy1 = fract(floor(gx1) * (1.0 / 7.0)) - 0.5;
|
||||
gx1 = fract(gx1);
|
||||
vec4 gz1 = vec4(0.5) - abs(gx1) - abs(gy1);
|
||||
vec4 sz1 = step(gz1, vec4(0.0));
|
||||
gx1 -= sz1 * (step(0.0, gx1) - 0.5);
|
||||
gy1 -= sz1 * (step(0.0, gy1) - 0.5);
|
||||
|
||||
vec3 g000 = vec3(gx0.x,gy0.x,gz0.x);
|
||||
vec3 g100 = vec3(gx0.y,gy0.y,gz0.y);
|
||||
vec3 g010 = vec3(gx0.z,gy0.z,gz0.z);
|
||||
vec3 g110 = vec3(gx0.w,gy0.w,gz0.w);
|
||||
vec3 g001 = vec3(gx1.x,gy1.x,gz1.x);
|
||||
vec3 g101 = vec3(gx1.y,gy1.y,gz1.y);
|
||||
vec3 g011 = vec3(gx1.z,gy1.z,gz1.z);
|
||||
vec3 g111 = vec3(gx1.w,gy1.w,gz1.w);
|
||||
|
||||
vec4 norm0 = taylorInvSqrt(vec4(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110)));
|
||||
g000 *= norm0.x;
|
||||
g010 *= norm0.y;
|
||||
g100 *= norm0.z;
|
||||
g110 *= norm0.w;
|
||||
vec4 norm1 = taylorInvSqrt(vec4(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111)));
|
||||
g001 *= norm1.x;
|
||||
g011 *= norm1.y;
|
||||
g101 *= norm1.z;
|
||||
g111 *= norm1.w;
|
||||
|
||||
float n000 = dot(g000, Pf0);
|
||||
float n100 = dot(g100, vec3(Pf1.x, Pf0.yz));
|
||||
float n010 = dot(g010, vec3(Pf0.x, Pf1.y, Pf0.z));
|
||||
float n110 = dot(g110, vec3(Pf1.xy, Pf0.z));
|
||||
float n001 = dot(g001, vec3(Pf0.xy, Pf1.z));
|
||||
float n101 = dot(g101, vec3(Pf1.x, Pf0.y, Pf1.z));
|
||||
float n011 = dot(g011, vec3(Pf0.x, Pf1.yz));
|
||||
float n111 = dot(g111, Pf1);
|
||||
|
||||
vec3 fade_xyz = fade(Pf0);
|
||||
vec4 n_z = mix(vec4(n000, n100, n010, n110), vec4(n001, n101, n011, n111), fade_xyz.z);
|
||||
vec2 n_yz = mix(n_z.xy, n_z.zw, fade_xyz.y);
|
||||
float n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x);
|
||||
return 2.2 * n_xyz;
|
||||
}
|
||||
|
||||
uniform float u_time;
|
||||
uniform float u_frequency;
|
||||
|
||||
void main() {
|
||||
float noise = 5. * pnoise(position + u_time, vec3(10.));
|
||||
|
||||
float displacement = (u_frequency / 30.) * (noise / 10.);
|
||||
|
||||
vec3 newPosition = position + normal * displacement;
|
||||
gl_Position = projectionMatrix * modelViewMatrix * vec4(newPosition, 1.0);
|
||||
}
|
||||
`
|
||||
|
||||
const fragmentShader = `
|
||||
uniform float u_red;
|
||||
uniform float u_blue;
|
||||
uniform float u_green;
|
||||
|
||||
void main() {
|
||||
gl_FragColor = vec4(vec3(u_red, u_green, u_blue), 1.0);
|
||||
}
|
||||
`
|
||||
|
||||
export function ParticlesVisualization({
|
||||
audioLevels,
|
||||
isListening,
|
||||
isPlayingAudio,
|
||||
isStreaming,
|
||||
isMuted,
|
||||
isProcessingInterruption,
|
||||
className,
|
||||
}: ParticlesProps) {
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
const rendererRef = useRef<THREE.WebGLRenderer | null>(null)
|
||||
const sceneRef = useRef<THREE.Scene | null>(null)
|
||||
const cameraRef = useRef<THREE.PerspectiveCamera | null>(null)
|
||||
const meshRef = useRef<THREE.Mesh | null>(null)
|
||||
const uniformsRef = useRef<ShaderUniforms | null>(null)
|
||||
const clockRef = useRef<THREE.Clock | null>(null)
|
||||
const bloomComposerRef = useRef<SimpleBloomComposer | null>(null)
|
||||
const animationFrameRef = useRef<number>(0)
|
||||
const mouseRef = useRef({ x: 0, y: 0 })
|
||||
const isInitializedRef = useRef(false)
|
||||
|
||||
const cleanup = useCallback(() => {
|
||||
if (animationFrameRef.current) {
|
||||
cancelAnimationFrame(animationFrameRef.current)
|
||||
animationFrameRef.current = 0
|
||||
}
|
||||
|
||||
if (bloomComposerRef.current) {
|
||||
bloomComposerRef.current.dispose()
|
||||
bloomComposerRef.current = null
|
||||
}
|
||||
|
||||
if (rendererRef.current) {
|
||||
if (rendererRef.current.domElement?.parentNode) {
|
||||
rendererRef.current.domElement.parentNode.removeChild(rendererRef.current.domElement)
|
||||
}
|
||||
rendererRef.current.dispose()
|
||||
rendererRef.current = null
|
||||
}
|
||||
|
||||
sceneRef.current = null
|
||||
cameraRef.current = null
|
||||
meshRef.current = null
|
||||
uniformsRef.current = null
|
||||
clockRef.current = null
|
||||
isInitializedRef.current = false
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
if (!containerRef.current || isInitializedRef.current) return
|
||||
|
||||
const container = containerRef.current
|
||||
const containerWidth = 400
|
||||
const containerHeight = 400
|
||||
|
||||
isInitializedRef.current = true
|
||||
|
||||
while (container.firstChild) {
|
||||
container.removeChild(container.firstChild)
|
||||
}
|
||||
|
||||
const renderer = new THREE.WebGLRenderer({ antialias: true, alpha: true })
|
||||
renderer.setSize(containerWidth, containerHeight)
|
||||
renderer.setClearColor(0x000000, 0)
|
||||
renderer.outputColorSpace = THREE.SRGBColorSpace
|
||||
container.appendChild(renderer.domElement)
|
||||
rendererRef.current = renderer
|
||||
|
||||
const scene = new THREE.Scene()
|
||||
sceneRef.current = scene
|
||||
|
||||
const camera = new THREE.PerspectiveCamera(45, containerWidth / containerHeight, 0.1, 1000)
|
||||
camera.position.set(0, -2, 14)
|
||||
camera.lookAt(0, 0, 0)
|
||||
cameraRef.current = camera
|
||||
|
||||
const uniforms = {
|
||||
u_time: { type: 'f', value: 0.0 },
|
||||
u_frequency: { type: 'f', value: 0.0 },
|
||||
u_red: { type: 'f', value: 0.8 },
|
||||
u_green: { type: 'f', value: 0.6 },
|
||||
u_blue: { type: 'f', value: 1.0 },
|
||||
}
|
||||
uniformsRef.current = uniforms
|
||||
|
||||
let mat: THREE.Material
|
||||
try {
|
||||
mat = new THREE.ShaderMaterial({
|
||||
uniforms,
|
||||
vertexShader,
|
||||
fragmentShader,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('❌ Shader compilation error, using fallback material:', error)
|
||||
mat = new THREE.MeshBasicMaterial({
|
||||
color: 0xb794f6, // Light purple color
|
||||
wireframe: true,
|
||||
})
|
||||
}
|
||||
|
||||
const geo = new THREE.IcosahedronGeometry(4, 30) // Match tutorial: radius 4, subdivisions 30
|
||||
const mesh = new THREE.Mesh(geo, mat)
|
||||
|
||||
if (mat instanceof THREE.ShaderMaterial || mat instanceof THREE.MeshBasicMaterial) {
|
||||
mat.wireframe = true
|
||||
}
|
||||
|
||||
scene.add(mesh)
|
||||
meshRef.current = mesh
|
||||
|
||||
const bloomComposer = new SimpleBloomComposer(renderer, scene, camera)
|
||||
bloomComposerRef.current = bloomComposer
|
||||
|
||||
const clock = new THREE.Clock()
|
||||
clockRef.current = clock
|
||||
|
||||
const handleMouseMove = (e: MouseEvent) => {
|
||||
const rect = container.getBoundingClientRect()
|
||||
const windowHalfX = containerWidth / 2
|
||||
const windowHalfY = containerHeight / 2
|
||||
mouseRef.current.x = (e.clientX - rect.left - windowHalfX) / 100
|
||||
mouseRef.current.y = (e.clientY - rect.top - windowHalfY) / 100
|
||||
}
|
||||
|
||||
container.addEventListener('mousemove', handleMouseMove)
|
||||
|
||||
const updateCameraPosition = () => {
|
||||
if (!camera || !scene) return
|
||||
camera.position.x += (mouseRef.current.x - camera.position.x) * 0.05
|
||||
camera.position.y += (-mouseRef.current.y - camera.position.y) * 0.5
|
||||
camera.lookAt(scene.position)
|
||||
}
|
||||
|
||||
const calculateAudioIntensity = (elapsedTime: number, avgLevel: number) => {
|
||||
const baselineIntensity = 8 + Math.sin(elapsedTime * 0.5) * 3
|
||||
let audioIntensity = baselineIntensity
|
||||
|
||||
if (isMuted) {
|
||||
// When muted, only show minimal baseline animation
|
||||
audioIntensity = baselineIntensity * 0.2
|
||||
} else if (isProcessingInterruption) {
|
||||
// Special pulsing effect during interruption processing
|
||||
audioIntensity = 35 + Math.sin(elapsedTime * 4) * 10
|
||||
} else if (isPlayingAudio) {
|
||||
// Strong animation when AI is speaking - use simulated levels + enhancement
|
||||
const aiIntensity = 60 + Math.sin(elapsedTime * 3) * 20
|
||||
audioIntensity = Math.max(avgLevel * 0.8, aiIntensity)
|
||||
} else if (isStreaming) {
|
||||
// Pulsing animation when AI is thinking/streaming
|
||||
audioIntensity = 40 + Math.sin(elapsedTime * 2) * 15
|
||||
} else if (isListening && avgLevel > 0) {
|
||||
// Scale user input more dramatically for better visual feedback
|
||||
const userVoiceIntensity = avgLevel * 2.5 // Amplify user voice significantly
|
||||
audioIntensity = Math.max(userVoiceIntensity, baselineIntensity * 1.5)
|
||||
|
||||
// Add some dynamic variation based on audio levels
|
||||
const variationFactor = Math.min(avgLevel / 20, 1) // Cap at reasonable level
|
||||
audioIntensity += Math.sin(elapsedTime * 8) * (10 * variationFactor)
|
||||
} else {
|
||||
// Idle state - subtle breathing animation
|
||||
audioIntensity = baselineIntensity
|
||||
}
|
||||
|
||||
// Clamp to reasonable range
|
||||
audioIntensity = Math.max(audioIntensity, 3) // Never completely still
|
||||
audioIntensity = Math.min(audioIntensity, 120) // Prevent excessive animation
|
||||
|
||||
return audioIntensity
|
||||
}
|
||||
|
||||
const updateShaderColors = (
|
||||
uniforms: ShaderUniforms,
|
||||
elapsedTime: number,
|
||||
avgLevel: number
|
||||
) => {
|
||||
if (isMuted) {
|
||||
// Muted: dim gray-blue
|
||||
uniforms.u_red.value = 0.4
|
||||
uniforms.u_green.value = 0.4
|
||||
uniforms.u_blue.value = 0.6
|
||||
} else if (isProcessingInterruption) {
|
||||
// Interruption: bright orange/yellow
|
||||
uniforms.u_red.value = 1.0
|
||||
uniforms.u_green.value = 0.7
|
||||
uniforms.u_blue.value = 0.2
|
||||
} else if (isPlayingAudio) {
|
||||
// AI speaking: bright blue-purple
|
||||
uniforms.u_red.value = 0.6
|
||||
uniforms.u_green.value = 0.4
|
||||
uniforms.u_blue.value = 1.0
|
||||
} else if (isListening && avgLevel > 10) {
|
||||
// User speaking: bright green-blue with intensity-based variation
|
||||
const intensity = Math.min(avgLevel / 50, 1)
|
||||
uniforms.u_red.value = 0.2 + intensity * 0.3
|
||||
uniforms.u_green.value = 0.8 + intensity * 0.2
|
||||
uniforms.u_blue.value = 0.6 + intensity * 0.4
|
||||
} else if (isStreaming) {
|
||||
// AI thinking: pulsing purple
|
||||
const pulse = (Math.sin(elapsedTime * 2) + 1) / 2
|
||||
uniforms.u_red.value = 0.7 + pulse * 0.3
|
||||
uniforms.u_green.value = 0.3
|
||||
uniforms.u_blue.value = 0.9 + pulse * 0.1
|
||||
} else {
|
||||
// Default idle: soft blue-purple
|
||||
uniforms.u_red.value = 0.8
|
||||
uniforms.u_green.value = 0.6
|
||||
uniforms.u_blue.value = 1.0
|
||||
}
|
||||
}
|
||||
|
||||
const animate = () => {
|
||||
if (!camera || !clock || !scene || !bloomComposer || !isInitializedRef.current) return
|
||||
|
||||
updateCameraPosition()
|
||||
|
||||
if (uniforms) {
|
||||
const elapsedTime = clock.getElapsedTime()
|
||||
const avgLevel = audioLevels.reduce((sum, level) => sum + level, 0) / audioLevels.length
|
||||
|
||||
uniforms.u_time.value = elapsedTime
|
||||
|
||||
const audioIntensity = calculateAudioIntensity(elapsedTime, avgLevel)
|
||||
updateShaderColors(uniforms, elapsedTime, avgLevel)
|
||||
|
||||
uniforms.u_frequency.value = audioIntensity
|
||||
}
|
||||
|
||||
bloomComposer.render()
|
||||
animationFrameRef.current = requestAnimationFrame(animate)
|
||||
}
|
||||
|
||||
animate()
|
||||
|
||||
return () => {
|
||||
container.removeEventListener('mousemove', handleMouseMove)
|
||||
cleanup()
|
||||
}
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
const handleResize = () => {
|
||||
if (
|
||||
rendererRef.current &&
|
||||
cameraRef.current &&
|
||||
bloomComposerRef.current &&
|
||||
containerRef.current
|
||||
) {
|
||||
const containerWidth = 400
|
||||
const containerHeight = 400
|
||||
|
||||
cameraRef.current.aspect = containerWidth / containerHeight
|
||||
cameraRef.current.updateProjectionMatrix()
|
||||
rendererRef.current.setSize(containerWidth, containerHeight)
|
||||
bloomComposerRef.current.setSize(containerWidth, containerHeight)
|
||||
}
|
||||
}
|
||||
|
||||
window.addEventListener('resize', handleResize)
|
||||
return () => window.removeEventListener('resize', handleResize)
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={containerRef}
|
||||
className={className}
|
||||
style={{
|
||||
width: '400px',
|
||||
height: '400px',
|
||||
display: 'flex',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
}}
|
||||
/>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,510 @@
|
||||
'use client'
|
||||
|
||||
import { type RefObject, useCallback, useEffect, useRef, useState } from 'react'
|
||||
import { Mic, MicOff, Phone, X } from 'lucide-react'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { ParticlesVisualization } from './components/particles'
|
||||
|
||||
const logger = createLogger('VoiceInterface')
|
||||
|
||||
interface SpeechRecognitionEvent extends Event {
|
||||
resultIndex: number
|
||||
results: SpeechRecognitionResultList
|
||||
}
|
||||
|
||||
interface SpeechRecognitionErrorEvent extends Event {
|
||||
error: string
|
||||
message?: string
|
||||
}
|
||||
|
||||
interface SpeechRecognition extends EventTarget {
|
||||
continuous: boolean
|
||||
interimResults: boolean
|
||||
lang: string
|
||||
start(): void
|
||||
stop(): void
|
||||
abort(): void
|
||||
onstart: ((this: SpeechRecognition, ev: Event) => any) | null
|
||||
onend: ((this: SpeechRecognition, ev: Event) => any) | null
|
||||
onresult: ((this: SpeechRecognition, ev: SpeechRecognitionEvent) => any) | null
|
||||
onerror: ((this: SpeechRecognition, ev: SpeechRecognitionErrorEvent) => any) | null
|
||||
}
|
||||
|
||||
interface SpeechRecognitionStatic {
|
||||
new (): SpeechRecognition
|
||||
}
|
||||
|
||||
declare global {
|
||||
interface Window {
|
||||
SpeechRecognition?: SpeechRecognitionStatic
|
||||
webkitSpeechRecognition?: SpeechRecognitionStatic
|
||||
}
|
||||
}
|
||||
|
||||
interface VoiceInterfaceProps {
|
||||
onCallEnd?: () => void
|
||||
onVoiceTranscript?: (transcript: string) => void
|
||||
onVoiceStart?: () => void
|
||||
onVoiceEnd?: () => void
|
||||
onInterrupt?: () => void
|
||||
isStreaming?: boolean
|
||||
isPlayingAudio?: boolean
|
||||
audioContextRef?: RefObject<AudioContext | null>
|
||||
messages?: Array<{ content: string; type: 'user' | 'assistant' }>
|
||||
className?: string
|
||||
}
|
||||
|
||||
export function VoiceInterface({
|
||||
onCallEnd,
|
||||
onVoiceTranscript,
|
||||
onVoiceStart,
|
||||
onVoiceEnd,
|
||||
onInterrupt,
|
||||
isStreaming = false,
|
||||
isPlayingAudio = false,
|
||||
audioContextRef: sharedAudioContextRef,
|
||||
messages = [],
|
||||
className,
|
||||
}: VoiceInterfaceProps) {
|
||||
const [isListening, setIsListening] = useState(false)
|
||||
const [isMuted, setIsMuted] = useState(false)
|
||||
const [audioLevels, setAudioLevels] = useState<number[]>(new Array(200).fill(0))
|
||||
const [permissionStatus, setPermissionStatus] = useState<'granted' | 'denied' | 'prompt'>(
|
||||
'prompt'
|
||||
)
|
||||
const [isInitialized, setIsInitialized] = useState(false)
|
||||
|
||||
const recognitionRef = useRef<SpeechRecognition | null>(null)
|
||||
const localAudioContextRef = useRef<AudioContext | null>(null)
|
||||
const audioContextRef = sharedAudioContextRef || localAudioContextRef
|
||||
const analyserRef = useRef<AnalyserNode | null>(null)
|
||||
const mediaStreamRef = useRef<MediaStream | null>(null)
|
||||
const animationFrameRef = useRef<number | null>(null)
|
||||
const isStartingRef = useRef(false)
|
||||
const isMutedRef = useRef(false)
|
||||
const compressorRef = useRef<DynamicsCompressorNode | null>(null)
|
||||
const gainNodeRef = useRef<GainNode | null>(null)
|
||||
|
||||
const isSupported =
|
||||
typeof window !== 'undefined' && !!(window.SpeechRecognition || window.webkitSpeechRecognition)
|
||||
|
||||
useEffect(() => {
|
||||
isMutedRef.current = isMuted
|
||||
}, [isMuted])
|
||||
|
||||
const cleanup = useCallback(() => {
|
||||
if (animationFrameRef.current) {
|
||||
cancelAnimationFrame(animationFrameRef.current)
|
||||
animationFrameRef.current = null
|
||||
}
|
||||
|
||||
if (mediaStreamRef.current) {
|
||||
mediaStreamRef.current.getTracks().forEach((track) => track.stop())
|
||||
mediaStreamRef.current = null
|
||||
}
|
||||
|
||||
if (audioContextRef.current && audioContextRef.current.state !== 'closed') {
|
||||
audioContextRef.current.close()
|
||||
audioContextRef.current = null
|
||||
}
|
||||
|
||||
if (recognitionRef.current) {
|
||||
try {
|
||||
recognitionRef.current.stop()
|
||||
} catch (e) {
|
||||
// Ignore errors during cleanup
|
||||
}
|
||||
recognitionRef.current = null
|
||||
}
|
||||
|
||||
analyserRef.current = null
|
||||
setAudioLevels(new Array(200).fill(0))
|
||||
setIsListening(false)
|
||||
}, [])
|
||||
|
||||
const setupAudioVisualization = useCallback(async () => {
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: {
|
||||
echoCancellation: true,
|
||||
noiseSuppression: true,
|
||||
autoGainControl: true,
|
||||
sampleRate: 44100,
|
||||
channelCount: 1,
|
||||
// Enhanced echo cancellation settings to prevent picking up speaker output
|
||||
suppressLocalAudioPlayback: true, // Modern browsers
|
||||
googEchoCancellation: true, // Chrome-specific
|
||||
googAutoGainControl: true,
|
||||
googNoiseSuppression: true,
|
||||
googHighpassFilter: true,
|
||||
googTypingNoiseDetection: true,
|
||||
} as any, // Type assertion for experimental properties
|
||||
})
|
||||
|
||||
setPermissionStatus('granted')
|
||||
mediaStreamRef.current = stream
|
||||
|
||||
if (!audioContextRef.current) {
|
||||
const AudioContextConstructor = window.AudioContext || window.webkitAudioContext
|
||||
if (!AudioContextConstructor) {
|
||||
throw new Error('AudioContext is not supported in this browser')
|
||||
}
|
||||
audioContextRef.current = new AudioContextConstructor()
|
||||
}
|
||||
const audioContext = audioContextRef.current
|
||||
|
||||
if (audioContext.state === 'suspended') {
|
||||
await audioContext.resume()
|
||||
}
|
||||
|
||||
const source = audioContext.createMediaStreamSource(stream)
|
||||
|
||||
const gainNode = audioContext.createGain()
|
||||
gainNode.gain.setValueAtTime(1, audioContext.currentTime)
|
||||
|
||||
const compressor = audioContext.createDynamicsCompressor()
|
||||
compressor.threshold.setValueAtTime(-50, audioContext.currentTime)
|
||||
compressor.knee.setValueAtTime(40, audioContext.currentTime)
|
||||
compressor.ratio.setValueAtTime(12, audioContext.currentTime)
|
||||
compressor.attack.setValueAtTime(0, audioContext.currentTime)
|
||||
compressor.release.setValueAtTime(0.25, audioContext.currentTime)
|
||||
|
||||
const analyser = audioContext.createAnalyser()
|
||||
analyser.fftSize = 256
|
||||
analyser.smoothingTimeConstant = 0.5
|
||||
|
||||
source.connect(gainNode)
|
||||
gainNode.connect(compressor)
|
||||
compressor.connect(analyser)
|
||||
|
||||
audioContextRef.current = audioContext
|
||||
analyserRef.current = analyser
|
||||
compressorRef.current = compressor
|
||||
gainNodeRef.current = gainNode
|
||||
|
||||
// Start visualization loop
|
||||
const updateVisualization = () => {
|
||||
if (!analyserRef.current) return
|
||||
|
||||
if (isMutedRef.current) {
|
||||
setAudioLevels(new Array(200).fill(0))
|
||||
animationFrameRef.current = requestAnimationFrame(updateVisualization)
|
||||
return
|
||||
}
|
||||
|
||||
const bufferLength = analyserRef.current.frequencyBinCount
|
||||
const dataArray = new Uint8Array(bufferLength)
|
||||
analyserRef.current.getByteFrequencyData(dataArray)
|
||||
|
||||
const levels = []
|
||||
for (let i = 0; i < 200; i++) {
|
||||
const dataIndex = Math.floor((i / 200) * bufferLength)
|
||||
const value = dataArray[dataIndex] || 0
|
||||
levels.push((value / 255) * 100)
|
||||
}
|
||||
|
||||
setAudioLevels(levels)
|
||||
animationFrameRef.current = requestAnimationFrame(updateVisualization)
|
||||
}
|
||||
|
||||
updateVisualization()
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error('Error setting up audio:', error)
|
||||
setPermissionStatus('denied')
|
||||
return false
|
||||
}
|
||||
}, [isMuted])
|
||||
|
||||
// Start listening
|
||||
const startListening = useCallback(async () => {
|
||||
if (
|
||||
!isSupported ||
|
||||
!recognitionRef.current ||
|
||||
isListening ||
|
||||
isMuted ||
|
||||
isStartingRef.current
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
isStartingRef.current = true
|
||||
|
||||
if (!mediaStreamRef.current) {
|
||||
await setupAudioVisualization()
|
||||
}
|
||||
|
||||
recognitionRef.current.start()
|
||||
} catch (error) {
|
||||
isStartingRef.current = false
|
||||
logger.error('Error starting voice input:', error)
|
||||
setIsListening(false)
|
||||
}
|
||||
}, [isSupported, isListening, setupAudioVisualization, isMuted])
|
||||
|
||||
const initializeSpeechRecognition = useCallback(() => {
|
||||
if (!isSupported || recognitionRef.current) return
|
||||
|
||||
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
|
||||
if (!SpeechRecognition) return
|
||||
|
||||
const recognition = new SpeechRecognition()
|
||||
recognition.continuous = true
|
||||
recognition.interimResults = true
|
||||
recognition.lang = 'en-US'
|
||||
|
||||
recognition.onstart = () => {
|
||||
isStartingRef.current = false
|
||||
setIsListening(true)
|
||||
onVoiceStart?.()
|
||||
}
|
||||
|
||||
recognition.onresult = (event: SpeechRecognitionEvent) => {
|
||||
// Don't process results if muted
|
||||
if (isMutedRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
let finalTranscript = ''
|
||||
|
||||
for (let i = event.resultIndex; i < event.results.length; i++) {
|
||||
const result = event.results[i]
|
||||
if (result.isFinal) {
|
||||
finalTranscript += result[0].transcript
|
||||
}
|
||||
}
|
||||
|
||||
if (finalTranscript) {
|
||||
if (isPlayingAudio) {
|
||||
const cleanTranscript = finalTranscript.trim().toLowerCase()
|
||||
const isSubstantialSpeech = cleanTranscript.length >= 10
|
||||
const hasMultipleWords = cleanTranscript.split(/\s+/).length >= 3
|
||||
|
||||
if (isSubstantialSpeech && hasMultipleWords) {
|
||||
onInterrupt?.()
|
||||
onVoiceTranscript?.(finalTranscript)
|
||||
}
|
||||
} else {
|
||||
onVoiceTranscript?.(finalTranscript)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
recognition.onerror = (event: SpeechRecognitionErrorEvent) => {
|
||||
isStartingRef.current = false
|
||||
logger.error('Speech recognition error:', event.error)
|
||||
|
||||
if (event.error === 'not-allowed') {
|
||||
setPermissionStatus('denied')
|
||||
setIsListening(false)
|
||||
onVoiceEnd?.()
|
||||
return
|
||||
}
|
||||
|
||||
if (!isMutedRef.current && !isStartingRef.current) {
|
||||
setTimeout(() => {
|
||||
if (recognitionRef.current && !isMutedRef.current && !isStartingRef.current) {
|
||||
startListening()
|
||||
}
|
||||
}, 500)
|
||||
}
|
||||
}
|
||||
|
||||
recognition.onend = () => {
|
||||
isStartingRef.current = false
|
||||
setIsListening(false)
|
||||
onVoiceEnd?.()
|
||||
|
||||
if (!isMutedRef.current && !isStartingRef.current) {
|
||||
setTimeout(() => {
|
||||
if (recognitionRef.current && !isMutedRef.current && !isStartingRef.current) {
|
||||
startListening()
|
||||
}
|
||||
}, 200)
|
||||
}
|
||||
}
|
||||
|
||||
recognitionRef.current = recognition
|
||||
setIsInitialized(true)
|
||||
}, [
|
||||
isSupported,
|
||||
isPlayingAudio,
|
||||
isMuted,
|
||||
onVoiceStart,
|
||||
onVoiceEnd,
|
||||
onVoiceTranscript,
|
||||
onInterrupt,
|
||||
startListening,
|
||||
])
|
||||
|
||||
const toggleMute = useCallback(() => {
|
||||
const newMutedState = !isMuted
|
||||
|
||||
if (newMutedState) {
|
||||
isStartingRef.current = false
|
||||
|
||||
if (recognitionRef.current) {
|
||||
try {
|
||||
recognitionRef.current.stop()
|
||||
} catch (e) {
|
||||
// Ignore errors
|
||||
}
|
||||
}
|
||||
|
||||
if (mediaStreamRef.current) {
|
||||
mediaStreamRef.current.getAudioTracks().forEach((track) => {
|
||||
track.enabled = false
|
||||
})
|
||||
}
|
||||
|
||||
setIsListening(false)
|
||||
} else {
|
||||
if (mediaStreamRef.current) {
|
||||
mediaStreamRef.current.getAudioTracks().forEach((track) => {
|
||||
track.enabled = true
|
||||
})
|
||||
}
|
||||
setTimeout(() => {
|
||||
if (!isMutedRef.current) {
|
||||
startListening()
|
||||
}
|
||||
}, 200)
|
||||
}
|
||||
|
||||
setIsMuted(newMutedState)
|
||||
}, [isMuted, isListening, startListening])
|
||||
|
||||
const handleEndCall = useCallback(() => {
|
||||
cleanup()
|
||||
onCallEnd?.()
|
||||
}, [cleanup, onCallEnd])
|
||||
|
||||
const getStatusText = () => {
|
||||
if (isStreaming) return 'Thinking...'
|
||||
if (isPlayingAudio) return 'Speaking...'
|
||||
if (isListening) return 'Listening...'
|
||||
return 'Ready'
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
if (isSupported) {
|
||||
initializeSpeechRecognition()
|
||||
}
|
||||
}, [isSupported, initializeSpeechRecognition])
|
||||
|
||||
useEffect(() => {
|
||||
if (isInitialized && !isMuted && !isListening) {
|
||||
const startAudio = async () => {
|
||||
try {
|
||||
if (!mediaStreamRef.current) {
|
||||
const success = await setupAudioVisualization()
|
||||
if (!success) {
|
||||
logger.error('Failed to setup audio visualization')
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
setTimeout(() => {
|
||||
if (!isListening && !isMuted && !isStartingRef.current) {
|
||||
startListening()
|
||||
}
|
||||
}, 300)
|
||||
} catch (error) {
|
||||
logger.error('Error setting up audio:', error)
|
||||
}
|
||||
}
|
||||
|
||||
startAudio()
|
||||
}
|
||||
}, [isInitialized, isMuted, isListening, setupAudioVisualization, startListening])
|
||||
|
||||
// Gain ducking during audio playback
|
||||
useEffect(() => {
|
||||
if (gainNodeRef.current && audioContextRef.current) {
|
||||
const gainNode = gainNodeRef.current
|
||||
const audioContext = audioContextRef.current
|
||||
|
||||
if (isPlayingAudio) {
|
||||
gainNode.gain.setTargetAtTime(0.1, audioContext.currentTime, 0.1)
|
||||
} else {
|
||||
gainNode.gain.setTargetAtTime(1, audioContext.currentTime, 0.2)
|
||||
}
|
||||
}
|
||||
}, [isPlayingAudio])
|
||||
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
cleanup()
|
||||
}
|
||||
}, [cleanup])
|
||||
|
||||
return (
|
||||
<div className={cn('fixed inset-0 z-[100] flex flex-col bg-white text-gray-900', className)}>
|
||||
{/* Header with close button */}
|
||||
<div className='flex justify-end p-4'>
|
||||
<Button
|
||||
variant='ghost'
|
||||
size='icon'
|
||||
onClick={handleEndCall}
|
||||
className='h-10 w-10 rounded-full hover:bg-gray-100'
|
||||
>
|
||||
<X className='h-5 w-5' />
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* Main content area */}
|
||||
<div className='flex flex-1 flex-col items-center justify-center px-8'>
|
||||
{/* Voice visualization */}
|
||||
<div className='relative mb-16'>
|
||||
<ParticlesVisualization
|
||||
audioLevels={audioLevels}
|
||||
isListening={isListening}
|
||||
isPlayingAudio={isPlayingAudio}
|
||||
isStreaming={isStreaming}
|
||||
isMuted={isMuted}
|
||||
isProcessingInterruption={false}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Status text */}
|
||||
<div className='mb-8 text-center'>
|
||||
<p className='font-light text-gray-600 text-lg'>
|
||||
{getStatusText()}
|
||||
{isMuted && <span className='ml-2 text-gray-400 text-sm'>(Muted)</span>}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Bottom controls */}
|
||||
<div className='px-8 pb-12'>
|
||||
<div className='flex items-center justify-center space-x-12'>
|
||||
{/* End call button */}
|
||||
<Button
|
||||
onClick={handleEndCall}
|
||||
variant='outline'
|
||||
size='icon'
|
||||
className='h-14 w-14 rounded-full border-gray-300 hover:bg-gray-50'
|
||||
>
|
||||
<Phone className='h-6 w-6 rotate-[135deg]' />
|
||||
</Button>
|
||||
|
||||
{/* Mute/unmute button */}
|
||||
<Button
|
||||
onClick={toggleMute}
|
||||
variant='outline'
|
||||
size='icon'
|
||||
className={cn(
|
||||
'h-14 w-14 rounded-full border-gray-300 bg-transparent text-gray-600 hover:bg-gray-50',
|
||||
isMuted && 'text-gray-400'
|
||||
)}
|
||||
>
|
||||
{isMuted ? <MicOff className='h-6 w-6' /> : <Mic className='h-6 w-6' />}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
159
apps/sim/app/chat/[subdomain]/hooks/use-audio-streaming.ts
Normal file
159
apps/sim/app/chat/[subdomain]/hooks/use-audio-streaming.ts
Normal file
@@ -0,0 +1,159 @@
|
||||
'use client'
|
||||
|
||||
import { type RefObject, useCallback, useRef, useState } from 'react'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
|
||||
const logger = createLogger('UseAudioStreaming')
|
||||
|
||||
declare global {
|
||||
interface Window {
|
||||
webkitAudioContext?: typeof AudioContext
|
||||
}
|
||||
}
|
||||
|
||||
interface AudioStreamingOptions {
|
||||
voiceId: string
|
||||
modelId?: string
|
||||
onAudioStart?: () => void
|
||||
onAudioEnd?: () => void
|
||||
onError?: (error: Error) => void
|
||||
}
|
||||
|
||||
interface AudioQueueItem {
|
||||
text: string
|
||||
options: AudioStreamingOptions
|
||||
}
|
||||
|
||||
export function useAudioStreaming(sharedAudioContextRef?: RefObject<AudioContext | null>) {
|
||||
const [isPlayingAudio, setIsPlayingAudio] = useState(false)
|
||||
const localAudioContextRef = useRef<AudioContext | null>(null)
|
||||
const audioContextRef = sharedAudioContextRef || localAudioContextRef
|
||||
const currentSourceRef = useRef<AudioBufferSourceNode | null>(null)
|
||||
const abortControllerRef = useRef<AbortController | null>(null)
|
||||
const audioQueueRef = useRef<AudioQueueItem[]>([])
|
||||
const isProcessingQueueRef = useRef(false)
|
||||
|
||||
const getAudioContext = useCallback(() => {
|
||||
if (!audioContextRef.current) {
|
||||
const AudioContextConstructor = window.AudioContext || window.webkitAudioContext
|
||||
if (!AudioContextConstructor) {
|
||||
throw new Error('AudioContext is not supported in this browser')
|
||||
}
|
||||
audioContextRef.current = new AudioContextConstructor()
|
||||
}
|
||||
return audioContextRef.current
|
||||
}, [])
|
||||
|
||||
const stopAudio = useCallback(() => {
|
||||
abortControllerRef.current?.abort()
|
||||
|
||||
if (currentSourceRef.current) {
|
||||
try {
|
||||
currentSourceRef.current.stop()
|
||||
} catch (e) {
|
||||
// Already stopped
|
||||
}
|
||||
currentSourceRef.current = null
|
||||
}
|
||||
|
||||
audioQueueRef.current = []
|
||||
isProcessingQueueRef.current = false
|
||||
|
||||
setIsPlayingAudio(false)
|
||||
}, [])
|
||||
|
||||
const processAudioQueue = useCallback(async () => {
|
||||
if (isProcessingQueueRef.current || audioQueueRef.current.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
isProcessingQueueRef.current = true
|
||||
const item = audioQueueRef.current.shift()
|
||||
|
||||
if (!item) {
|
||||
isProcessingQueueRef.current = false
|
||||
return
|
||||
}
|
||||
|
||||
const { text, options } = item
|
||||
const { voiceId, modelId = 'eleven_turbo_v2_5', onAudioStart, onAudioEnd, onError } = options
|
||||
|
||||
try {
|
||||
const audioContext = getAudioContext()
|
||||
|
||||
if (audioContext.state === 'suspended') {
|
||||
await audioContext.resume()
|
||||
}
|
||||
const response = await fetch('/api/proxy/tts/stream', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
text,
|
||||
voiceId,
|
||||
modelId,
|
||||
}),
|
||||
signal: abortControllerRef.current?.signal,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`TTS request failed: ${response.statusText}`)
|
||||
}
|
||||
|
||||
const arrayBuffer = await response.arrayBuffer()
|
||||
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer)
|
||||
|
||||
const source = audioContext.createBufferSource()
|
||||
source.buffer = audioBuffer
|
||||
source.connect(audioContext.destination)
|
||||
source.onended = () => {
|
||||
currentSourceRef.current = null
|
||||
onAudioEnd?.()
|
||||
|
||||
isProcessingQueueRef.current = false
|
||||
|
||||
if (audioQueueRef.current.length === 0) {
|
||||
setIsPlayingAudio(false)
|
||||
}
|
||||
|
||||
setTimeout(() => processAudioQueue(), 0)
|
||||
}
|
||||
|
||||
currentSourceRef.current = source
|
||||
source.start(0)
|
||||
setIsPlayingAudio(true)
|
||||
onAudioStart?.()
|
||||
} catch (error) {
|
||||
if (error instanceof Error && error.name !== 'AbortError') {
|
||||
logger.error('Audio streaming error:', error)
|
||||
onError?.(error)
|
||||
}
|
||||
|
||||
isProcessingQueueRef.current = false
|
||||
setTimeout(() => processAudioQueue(), 0)
|
||||
}
|
||||
}, [getAudioContext])
|
||||
|
||||
const streamTextToAudio = useCallback(
|
||||
async (text: string, options: AudioStreamingOptions) => {
|
||||
if (!text.trim()) {
|
||||
return
|
||||
}
|
||||
|
||||
if (!abortControllerRef.current || abortControllerRef.current.signal.aborted) {
|
||||
abortControllerRef.current = new AbortController()
|
||||
}
|
||||
|
||||
audioQueueRef.current.push({ text, options })
|
||||
processAudioQueue()
|
||||
},
|
||||
[processAudioQueue]
|
||||
)
|
||||
|
||||
return {
|
||||
isPlayingAudio,
|
||||
streamTextToAudio,
|
||||
stopAudio,
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,34 @@
|
||||
'use client'
|
||||
|
||||
import { useRef, useState } from 'react'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import type { ChatMessage } from '../components/message/message'
|
||||
|
||||
const logger = createLogger('UseChatStreaming')
|
||||
|
||||
export interface VoiceSettings {
|
||||
isVoiceEnabled: boolean
|
||||
voiceId: string
|
||||
autoPlayResponses: boolean
|
||||
voiceFirstMode?: boolean
|
||||
textStreamingInVoiceMode?: 'hidden' | 'synced' | 'normal'
|
||||
conversationMode?: boolean
|
||||
}
|
||||
|
||||
export interface StreamingOptions {
|
||||
voiceSettings?: VoiceSettings
|
||||
onAudioStart?: () => void
|
||||
onAudioEnd?: () => void
|
||||
audioStreamHandler?: (text: string) => Promise<void>
|
||||
}
|
||||
|
||||
export function useChatStreaming() {
|
||||
const [isStreamingResponse, setIsStreamingResponse] = useState(false)
|
||||
const abortControllerRef = useRef<AbortController | null>(null)
|
||||
const accumulatedTextRef = useRef<string>('')
|
||||
const lastStreamedPositionRef = useRef<number>(0)
|
||||
const audioStreamingActiveRef = useRef<boolean>(false)
|
||||
const lastDisplayedPositionRef = useRef<number>(0) // Track displayed text in synced mode
|
||||
|
||||
const stopStreaming = (setMessages: React.Dispatch<React.SetStateAction<ChatMessage[]>>) => {
|
||||
if (abortControllerRef.current) {
|
||||
@@ -35,8 +58,12 @@ export function useChatStreaming() {
|
||||
return prev
|
||||
})
|
||||
|
||||
// Reset streaming state
|
||||
// Reset streaming state immediately
|
||||
setIsStreamingResponse(false)
|
||||
accumulatedTextRef.current = ''
|
||||
lastStreamedPositionRef.current = 0
|
||||
lastDisplayedPositionRef.current = 0
|
||||
audioStreamingActiveRef.current = false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,33 +72,90 @@ export function useChatStreaming() {
|
||||
setMessages: React.Dispatch<React.SetStateAction<ChatMessage[]>>,
|
||||
setIsLoading: React.Dispatch<React.SetStateAction<boolean>>,
|
||||
scrollToBottom: () => void,
|
||||
userHasScrolled?: boolean
|
||||
userHasScrolled?: boolean,
|
||||
streamingOptions?: StreamingOptions
|
||||
) => {
|
||||
const messageId = crypto.randomUUID()
|
||||
|
||||
// Set streaming state before adding the assistant message
|
||||
setIsStreamingResponse(true)
|
||||
|
||||
// Reset refs
|
||||
accumulatedTextRef.current = ''
|
||||
lastStreamedPositionRef.current = 0
|
||||
lastDisplayedPositionRef.current = 0
|
||||
audioStreamingActiveRef.current = false
|
||||
|
||||
// Check if we should stream audio
|
||||
const shouldStreamAudio =
|
||||
streamingOptions?.voiceSettings?.isVoiceEnabled &&
|
||||
streamingOptions?.voiceSettings?.autoPlayResponses &&
|
||||
streamingOptions?.audioStreamHandler
|
||||
|
||||
// Get voice-first mode settings
|
||||
const voiceFirstMode = streamingOptions?.voiceSettings?.voiceFirstMode
|
||||
const textStreamingMode = streamingOptions?.voiceSettings?.textStreamingInVoiceMode || 'normal'
|
||||
const conversationMode = streamingOptions?.voiceSettings?.conversationMode
|
||||
|
||||
// In voice-first mode with hidden text, don't show text at all
|
||||
const shouldShowText = !voiceFirstMode || textStreamingMode !== 'hidden'
|
||||
|
||||
// Add placeholder message
|
||||
setMessages((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: messageId,
|
||||
content: '',
|
||||
content: shouldShowText ? '' : '🎵 Generating audio response...',
|
||||
type: 'assistant',
|
||||
timestamp: new Date(),
|
||||
isStreaming: true,
|
||||
isVoiceOnly: voiceFirstMode && textStreamingMode === 'hidden',
|
||||
},
|
||||
])
|
||||
|
||||
// Stop showing loading indicator once streaming begins
|
||||
setIsLoading(false)
|
||||
|
||||
// Start audio if in voice mode
|
||||
if (shouldStreamAudio) {
|
||||
streamingOptions.onAudioStart?.()
|
||||
audioStreamingActiveRef.current = true
|
||||
}
|
||||
|
||||
// Helper function to update displayed text based on mode
|
||||
const updateDisplayedText = (fullText: string, audioPosition?: number) => {
|
||||
let displayText = fullText
|
||||
|
||||
if (voiceFirstMode && textStreamingMode === 'synced') {
|
||||
// Only show text up to where audio has been streamed
|
||||
displayText = fullText.substring(0, audioPosition || lastStreamedPositionRef.current)
|
||||
} else if (voiceFirstMode && textStreamingMode === 'hidden') {
|
||||
// Don't update text content, keep voice indicator
|
||||
return
|
||||
}
|
||||
|
||||
setMessages((prev) =>
|
||||
prev.map((msg) => {
|
||||
if (msg.id === messageId) {
|
||||
return {
|
||||
...msg,
|
||||
content: displayText,
|
||||
}
|
||||
}
|
||||
return msg
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
// Helper function to clean up after streaming ends (success or error)
|
||||
const cleanupStreaming = (messageContent?: string, appendContent = false) => {
|
||||
// Reset streaming state and controller
|
||||
setIsStreamingResponse(false)
|
||||
abortControllerRef.current = null
|
||||
accumulatedTextRef.current = ''
|
||||
lastStreamedPositionRef.current = 0
|
||||
lastDisplayedPositionRef.current = 0
|
||||
audioStreamingActiveRef.current = false
|
||||
|
||||
// Update message content and remove isStreaming flag
|
||||
setMessages((prev) =>
|
||||
@@ -83,6 +167,7 @@ export function useChatStreaming() {
|
||||
? msg.content + (messageContent || '')
|
||||
: messageContent || msg.content,
|
||||
isStreaming: false,
|
||||
isVoiceOnly: false,
|
||||
}
|
||||
}
|
||||
return msg
|
||||
@@ -95,6 +180,11 @@ export function useChatStreaming() {
|
||||
scrollToBottom()
|
||||
}, 300)
|
||||
}
|
||||
|
||||
// End audio streaming
|
||||
if (shouldStreamAudio) {
|
||||
streamingOptions.onAudioEnd?.()
|
||||
}
|
||||
}
|
||||
|
||||
// Check if response body exists and is a ReadableStream
|
||||
@@ -112,7 +202,6 @@ export function useChatStreaming() {
|
||||
while (!done) {
|
||||
// Check if aborted before awaiting reader.read()
|
||||
if (abortControllerRef.current === null) {
|
||||
console.log('Stream reading aborted')
|
||||
break
|
||||
}
|
||||
|
||||
@@ -122,11 +211,107 @@ export function useChatStreaming() {
|
||||
if (value) {
|
||||
const chunk = decoder.decode(value, { stream: true })
|
||||
if (chunk) {
|
||||
setMessages((prev) =>
|
||||
prev.map((msg) =>
|
||||
msg.id === messageId ? { ...msg, content: msg.content + chunk } : msg
|
||||
// Accumulate text
|
||||
accumulatedTextRef.current += chunk
|
||||
|
||||
// Update the message with the accumulated text based on mode
|
||||
if (shouldShowText) {
|
||||
updateDisplayedText(accumulatedTextRef.current)
|
||||
}
|
||||
|
||||
// Stream audio in real-time for meaningful sentences
|
||||
if (
|
||||
shouldStreamAudio &&
|
||||
streamingOptions.audioStreamHandler &&
|
||||
audioStreamingActiveRef.current
|
||||
) {
|
||||
const newText = accumulatedTextRef.current.substring(
|
||||
lastStreamedPositionRef.current
|
||||
)
|
||||
)
|
||||
|
||||
// Use sentence-based streaming for natural audio flow
|
||||
const sentenceEndings = ['. ', '! ', '? ', '.\n', '!\n', '?\n', '.', '!', '?']
|
||||
let sentenceEnd = -1
|
||||
|
||||
// Find the first complete sentence
|
||||
for (const ending of sentenceEndings) {
|
||||
const index = newText.indexOf(ending)
|
||||
if (index > 0) {
|
||||
// Make sure we include the punctuation
|
||||
sentenceEnd = index + ending.length
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we found a complete sentence, stream it
|
||||
if (sentenceEnd > 0) {
|
||||
const sentence = newText.substring(0, sentenceEnd).trim()
|
||||
if (sentence && sentence.length >= 3) {
|
||||
// Only send meaningful sentences
|
||||
try {
|
||||
// Stream this sentence to audio
|
||||
await streamingOptions.audioStreamHandler(sentence)
|
||||
lastStreamedPositionRef.current += sentenceEnd
|
||||
|
||||
// Update displayed text in synced mode
|
||||
if (voiceFirstMode && textStreamingMode === 'synced') {
|
||||
updateDisplayedText(
|
||||
accumulatedTextRef.current,
|
||||
lastStreamedPositionRef.current
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error streaming audio sentence:', error)
|
||||
// Don't stop on individual sentence errors, but log them
|
||||
if (error instanceof Error && error.message.includes('401')) {
|
||||
logger.warn('TTS authentication error, stopping audio streaming')
|
||||
audioStreamingActiveRef.current = false
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (newText.length > 200 && done) {
|
||||
// If streaming has ended and we have a long incomplete sentence, stream it anyway
|
||||
const incompleteSentence = newText.trim()
|
||||
if (incompleteSentence && incompleteSentence.length >= 10) {
|
||||
try {
|
||||
await streamingOptions.audioStreamHandler(incompleteSentence)
|
||||
lastStreamedPositionRef.current += newText.length
|
||||
|
||||
if (voiceFirstMode && textStreamingMode === 'synced') {
|
||||
updateDisplayedText(
|
||||
accumulatedTextRef.current,
|
||||
lastStreamedPositionRef.current
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error streaming incomplete sentence:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle any remaining text for audio streaming when streaming completes
|
||||
if (
|
||||
shouldStreamAudio &&
|
||||
streamingOptions.audioStreamHandler &&
|
||||
audioStreamingActiveRef.current
|
||||
) {
|
||||
const remainingText = accumulatedTextRef.current
|
||||
.substring(lastStreamedPositionRef.current)
|
||||
.trim()
|
||||
if (remainingText && remainingText.length >= 3) {
|
||||
try {
|
||||
await streamingOptions.audioStreamHandler(remainingText)
|
||||
|
||||
// Final update for synced mode
|
||||
if (voiceFirstMode && textStreamingMode === 'synced') {
|
||||
updateDisplayedText(accumulatedTextRef.current, accumulatedTextRef.current.length)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error streaming final remaining text:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -134,7 +319,7 @@ export function useChatStreaming() {
|
||||
// Show error to user in the message
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : 'Unknown error during streaming'
|
||||
console.error('Error reading stream:', error)
|
||||
logger.error('Error reading stream:', error)
|
||||
cleanupStreaming(`\n\n_Error: ${errorMessage}_`, true)
|
||||
return // Skip the finally block's cleanupStreaming call
|
||||
} finally {
|
||||
|
||||
@@ -73,6 +73,7 @@ export const env = createEnv({
|
||||
NODE_ENV: z.string().optional(),
|
||||
GITHUB_TOKEN: z.string().optional(),
|
||||
CHONKIE_API_KEY: z.string().min(1).optional(),
|
||||
ELEVENLABS_API_KEY: z.string().min(1).optional(),
|
||||
|
||||
// OAuth blocks (all optional)
|
||||
GOOGLE_CLIENT_ID: z.string().optional(),
|
||||
|
||||
@@ -378,3 +378,8 @@ export function getInvalidCharacters(name: string): string[] {
|
||||
const invalidChars = name.match(/[^a-zA-Z0-9_\s]/g)
|
||||
return invalidChars ? [...new Set(invalidChars)] : []
|
||||
}
|
||||
|
||||
/**
|
||||
* No-operation function for use as default callback
|
||||
*/
|
||||
export const noop = () => {}
|
||||
|
||||
@@ -62,6 +62,7 @@
|
||||
"@radix-ui/react-tooltip": "^1.1.6",
|
||||
"@react-email/components": "^0.0.34",
|
||||
"@sentry/nextjs": "^9.15.0",
|
||||
"@types/three": "0.177.0",
|
||||
"@vercel/og": "^0.6.5",
|
||||
"@vercel/speed-insights": "^1.2.0",
|
||||
"ai": "^4.3.2",
|
||||
@@ -74,6 +75,7 @@
|
||||
"croner": "^9.0.0",
|
||||
"csv-parse": "^5.6.0",
|
||||
"csv-parser": "^3.2.0",
|
||||
"dat.gui": "0.7.9",
|
||||
"date-fns": "4.1.0",
|
||||
"drizzle-orm": "^0.41.0",
|
||||
"framer-motion": "^12.5.0",
|
||||
@@ -105,6 +107,7 @@
|
||||
"stripe": "^17.7.0",
|
||||
"tailwind-merge": "^2.6.0",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
"three": "0.177.0",
|
||||
"uuid": "^11.1.0",
|
||||
"zod": "^3.24.2"
|
||||
},
|
||||
|
||||
25
bun.lock
25
bun.lock
@@ -93,6 +93,7 @@
|
||||
"@radix-ui/react-tooltip": "^1.1.6",
|
||||
"@react-email/components": "^0.0.34",
|
||||
"@sentry/nextjs": "^9.15.0",
|
||||
"@types/three": "0.177.0",
|
||||
"@vercel/og": "^0.6.5",
|
||||
"@vercel/speed-insights": "^1.2.0",
|
||||
"ai": "^4.3.2",
|
||||
@@ -105,6 +106,7 @@
|
||||
"croner": "^9.0.0",
|
||||
"csv-parse": "^5.6.0",
|
||||
"csv-parser": "^3.2.0",
|
||||
"dat.gui": "0.7.9",
|
||||
"date-fns": "4.1.0",
|
||||
"drizzle-orm": "^0.41.0",
|
||||
"framer-motion": "^12.5.0",
|
||||
@@ -136,6 +138,7 @@
|
||||
"stripe": "^17.7.0",
|
||||
"tailwind-merge": "^2.6.0",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
"three": "0.177.0",
|
||||
"uuid": "^11.1.0",
|
||||
"zod": "^3.24.2",
|
||||
},
|
||||
@@ -412,6 +415,8 @@
|
||||
|
||||
"@csstools/css-tokenizer": ["@csstools/css-tokenizer@3.0.4", "", {}, "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw=="],
|
||||
|
||||
"@dimforge/rapier3d-compat": ["@dimforge/rapier3d-compat@0.12.0", "", {}, "sha512-uekIGetywIgopfD97oDL5PfeezkFpNhwlzlaEYNOA0N6ghdsOvh/HYjSMek5Q2O1PYvRSDFcqFVJl4r4ZBwOow=="],
|
||||
|
||||
"@drizzle-team/brocli": ["@drizzle-team/brocli@0.10.2", "", {}, "sha512-z33Il7l5dKjUgGULTqBsQBQwckHh5AbIuxhdsIxDDiZAzBOrZO6q9ogcWC65kU382AfynTfgNumVcNIjuIua6w=="],
|
||||
|
||||
"@emnapi/runtime": ["@emnapi/runtime@1.4.3", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-pBPWdu6MLKROBX05wSNKcNb++m5Er+KQ9QkB+WVM+pW2Kx9hoSrVTnu3BdkI5eBLZoKu/J6mW/B6i6bJB2ytXQ=="],
|
||||
@@ -1186,6 +1191,8 @@
|
||||
|
||||
"@trivago/prettier-plugin-sort-imports": ["@trivago/prettier-plugin-sort-imports@5.2.2", "", { "dependencies": { "@babel/generator": "^7.26.5", "@babel/parser": "^7.26.7", "@babel/traverse": "^7.26.7", "@babel/types": "^7.26.7", "javascript-natural-sort": "^0.7.1", "lodash": "^4.17.21" }, "peerDependencies": { "@vue/compiler-sfc": "3.x", "prettier": "2.x - 3.x", "prettier-plugin-svelte": "3.x", "svelte": "4.x || 5.x" }, "optionalPeers": ["@vue/compiler-sfc", "prettier-plugin-svelte", "svelte"] }, "sha512-fYDQA9e6yTNmA13TLVSA+WMQRc5Bn/c0EUBditUHNfMMxN7M82c38b1kEggVE3pLpZ0FwkwJkUEKMiOi52JXFA=="],
|
||||
|
||||
"@tweenjs/tween.js": ["@tweenjs/tween.js@23.1.3", "", {}, "sha512-vJmvvwFxYuGnF2axRtPYocag6Clbb5YS7kLL+SO/TeVFzHqDIWrNKYtcsPMibjDx9O+bu+psAy9NKfWklassUA=="],
|
||||
|
||||
"@types/aria-query": ["@types/aria-query@5.0.4", "", {}, "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw=="],
|
||||
|
||||
"@types/babel__core": ["@types/babel__core@7.20.5", "", { "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", "@types/babel__generator": "*", "@types/babel__template": "*", "@types/babel__traverse": "*" } }, "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA=="],
|
||||
@@ -1324,8 +1331,12 @@
|
||||
|
||||
"@types/shimmer": ["@types/shimmer@1.2.0", "", {}, "sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg=="],
|
||||
|
||||
"@types/stats.js": ["@types/stats.js@0.17.4", "", {}, "sha512-jIBvWWShCvlBqBNIZt0KAshWpvSjhkwkEu4ZUcASoAvhmrgAUI2t1dXrjSL4xXVLB4FznPrIsX3nKXFl/Dt4vA=="],
|
||||
|
||||
"@types/tedious": ["@types/tedious@4.0.14", "", { "dependencies": { "@types/node": "*" } }, "sha512-KHPsfX/FoVbUGbyYvk1q9MMQHLPeRZhRJZdO45Q4YjvFkv4hMNghCWTvy7rdKessBsmtz4euWCWAB6/tVpI1Iw=="],
|
||||
|
||||
"@types/three": ["@types/three@0.177.0", "", { "dependencies": { "@dimforge/rapier3d-compat": "~0.12.0", "@tweenjs/tween.js": "~23.1.3", "@types/stats.js": "*", "@types/webxr": "*", "@webgpu/types": "*", "fflate": "~0.8.2", "meshoptimizer": "~0.18.1" } }, "sha512-/ZAkn4OLUijKQySNci47lFO+4JLE1TihEjsGWPUT+4jWqxtwOPPEwJV1C3k5MEx0mcBPCdkFjzRzDOnHEI1R+A=="],
|
||||
|
||||
"@types/through": ["@types/through@0.0.33", "", { "dependencies": { "@types/node": "*" } }, "sha512-HsJ+z3QuETzP3cswwtzt2vEIiHBk/dCcHGhbmG5X3ecnwFD/lPrMpliGXxSCg03L9AhrdwA4Oz/qfspkDW+xGQ=="],
|
||||
|
||||
"@types/tough-cookie": ["@types/tough-cookie@4.0.5", "", {}, "sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA=="],
|
||||
@@ -1334,6 +1345,8 @@
|
||||
|
||||
"@types/uuid": ["@types/uuid@9.0.8", "", {}, "sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA=="],
|
||||
|
||||
"@types/webxr": ["@types/webxr@0.5.22", "", {}, "sha512-Vr6Stjv5jPRqH690f5I5GLjVk8GSsoQSYJ2FVd/3jJF7KaqfwPi3ehfBS96mlQ2kPCwZaX6U0rG2+NGHBKkA/A=="],
|
||||
|
||||
"@types/yargs": ["@types/yargs@15.0.19", "", { "dependencies": { "@types/yargs-parser": "*" } }, "sha512-2XUaGVmyQjgyAZldf0D0c14vvo/yv0MhQBSTJcejMMaitsn3nxCB6TmH4G0ZQf+uxROOa9mpanoSm8h6SG/1ZA=="],
|
||||
|
||||
"@types/yargs-parser": ["@types/yargs-parser@21.0.3", "", {}, "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ=="],
|
||||
@@ -1394,6 +1407,8 @@
|
||||
|
||||
"@webassemblyjs/wast-printer": ["@webassemblyjs/wast-printer@1.14.1", "", { "dependencies": { "@webassemblyjs/ast": "1.14.1", "@xtuc/long": "4.2.2" } }, "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw=="],
|
||||
|
||||
"@webgpu/types": ["@webgpu/types@0.1.61", "", {}, "sha512-w2HbBvH+qO19SB5pJOJFKs533CdZqxl3fcGonqL321VHkW7W/iBo6H8bjDy6pr/+pbMwIu5dnuaAxH7NxBqUrQ=="],
|
||||
|
||||
"@xmldom/xmldom": ["@xmldom/xmldom@0.8.10", "", {}, "sha512-2WALfTl4xo2SkGCYRt6rDTFfk9R1czmBvUQy12gK2KuRKIpWEhcbbzy8EZXtz/jkRqHX8bFEc6FC1HjX4TUWYw=="],
|
||||
|
||||
"@xtuc/ieee754": ["@xtuc/ieee754@1.2.0", "", {}, "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA=="],
|
||||
@@ -1674,6 +1689,8 @@
|
||||
|
||||
"d3-zoom": ["d3-zoom@3.0.0", "", { "dependencies": { "d3-dispatch": "1 - 3", "d3-drag": "2 - 3", "d3-interpolate": "1 - 3", "d3-selection": "2 - 3", "d3-transition": "2 - 3" } }, "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw=="],
|
||||
|
||||
"dat.gui": ["dat.gui@0.7.9", "", {}, "sha512-sCNc1OHobc+Erc1HqiswYgHdVNpSJUlk/Hz8vzOCsER7rl+oF/4+v8GXFUyCgtXpoCX6+bnmg07DedLvBLwYKQ=="],
|
||||
|
||||
"data-urls": ["data-urls@5.0.0", "", { "dependencies": { "whatwg-mimetype": "^4.0.0", "whatwg-url": "^14.0.0" } }, "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg=="],
|
||||
|
||||
"date-fns": ["date-fns@4.1.0", "", {}, "sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg=="],
|
||||
@@ -1864,7 +1881,7 @@
|
||||
|
||||
"fdir": ["fdir@6.4.5", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-4BG7puHpVsIYxZUbiUE3RqGloLaSSwzYie5jvasC4LWuBWzZawynvYouhjbQKw2JuIGYdm0DzIxl8iVidKlUEw=="],
|
||||
|
||||
"fflate": ["fflate@0.7.4", "", {}, "sha512-5u2V/CDW15QM1XbbgS+0DfPxVB+jUKhWEKuuFuHncbk3tEEqzmoXL+2KyOFuKGqOnmdIy0/davWF1CkuwtibCw=="],
|
||||
"fflate": ["fflate@0.8.2", "", {}, "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A=="],
|
||||
|
||||
"figures": ["figures@3.2.0", "", { "dependencies": { "escape-string-regexp": "^1.0.5" } }, "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg=="],
|
||||
|
||||
@@ -2268,6 +2285,8 @@
|
||||
|
||||
"merge2": ["merge2@1.4.1", "", {}, "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="],
|
||||
|
||||
"meshoptimizer": ["meshoptimizer@0.18.1", "", {}, "sha512-ZhoIoL7TNV4s5B6+rx5mC//fw8/POGyNxS/DZyCJeiZ12ScLfVwRE/GfsxwiTkMYYD5DmK2/JXnEVXqL4rF+Sw=="],
|
||||
|
||||
"micromark": ["micromark@4.0.2", "", { "dependencies": { "@types/debug": "^4.0.0", "debug": "^4.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA=="],
|
||||
|
||||
"micromark-core-commonmark": ["micromark-core-commonmark@2.0.3", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-destination": "^2.0.0", "micromark-factory-label": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-factory-title": "^2.0.0", "micromark-factory-whitespace": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-html-tag-name": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg=="],
|
||||
@@ -2874,6 +2893,8 @@
|
||||
|
||||
"thread-stream": ["thread-stream@3.1.0", "", { "dependencies": { "real-require": "^0.2.0" } }, "sha512-OqyPZ9u96VohAyMfJykzmivOrY2wfMSf3C5TtFJVgN+Hm6aj+voFhlK+kZEIv2FBh1X6Xp3DlnCOfEQ3B2J86A=="],
|
||||
|
||||
"three": ["three@0.177.0", "", {}, "sha512-EiXv5/qWAaGI+Vz2A+JfavwYCMdGjxVsrn3oBwllUoqYeaBO75J63ZfyaQKoiLrqNHoTlUc6PFgMXnS0kI45zg=="],
|
||||
|
||||
"thriftrw": ["thriftrw@3.11.4", "", { "dependencies": { "bufrw": "^1.2.1", "error": "7.0.2", "long": "^2.4.0" }, "bin": { "thrift2json": "thrift2json.js" } }, "sha512-UcuBd3eanB3T10nXWRRMwfwoaC6VMk7qe3/5YIWP2Jtw+EbHqJ0p1/K3x8ixiR5dozKSSfcg1W+0e33G1Di3XA=="],
|
||||
|
||||
"throttleit": ["throttleit@2.1.0", "", {}, "sha512-nt6AMGKW1p/70DF/hGBdJB57B8Tspmbp5gfJ8ilhLnt7kkr2ye7hzD6NVG8GGErk2HWF34igrL2CXmNIkzKqKw=="],
|
||||
@@ -3338,6 +3359,8 @@
|
||||
|
||||
"@sentry/webpack-plugin/uuid": ["uuid@9.0.1", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA=="],
|
||||
|
||||
"@shuding/opentype.js/fflate": ["fflate@0.7.4", "", {}, "sha512-5u2V/CDW15QM1XbbgS+0DfPxVB+jUKhWEKuuFuHncbk3tEEqzmoXL+2KyOFuKGqOnmdIy0/davWF1CkuwtibCw=="],
|
||||
|
||||
"@smithy/middleware-retry/uuid": ["uuid@9.0.1", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA=="],
|
||||
|
||||
"@tailwindcss/node/jiti": ["jiti@2.4.2", "", { "bin": { "jiti": "lib/jiti-cli.mjs" } }, "sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A=="],
|
||||
|
||||
Reference in New Issue
Block a user