mirror of
https://github.com/simstudioai/sim.git
synced 2026-02-07 05:05:15 -05:00
* fix(azure): add azure-anthropic support to router, evaluator, copilot, and tokenization * added azure anthropic values to env * fix(azure): make anthropic-version configurable for azure-anthropic provider * fix(azure): thread provider credentials through guardrails and fix translate missing bedrockAccessKeyId * updated guardrails * ack'd PR comments * fix(azure): unify credential passing pattern across all LLM handlers - Pass all provider credentials unconditionally in router, evaluator (matching agent pattern) - Remove conditional if-branching on providerId for credential fields - Thread workspaceId through guardrails → hallucination validator for BYOK key resolution - Remove getApiKey() from hallucination validator, let executeProviderRequest handle it - Resolve vertex OAuth credentials in hallucination validator matching agent handler pattern Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
82 lines
2.0 KiB
TypeScript
82 lines
2.0 KiB
TypeScript
/**
|
|
* Configuration constants for tokenization functionality
|
|
*/
|
|
|
|
import type { ProviderTokenizationConfig } from '@/lib/tokenization/types'
|
|
|
|
export const TOKENIZATION_CONFIG = {
|
|
providers: {
|
|
openai: {
|
|
avgCharsPerToken: 4,
|
|
confidence: 'high',
|
|
supportedMethods: ['heuristic', 'fallback'],
|
|
},
|
|
'azure-openai': {
|
|
avgCharsPerToken: 4,
|
|
confidence: 'high',
|
|
supportedMethods: ['heuristic', 'fallback'],
|
|
},
|
|
anthropic: {
|
|
avgCharsPerToken: 4.5,
|
|
confidence: 'high',
|
|
supportedMethods: ['heuristic', 'fallback'],
|
|
},
|
|
'azure-anthropic': {
|
|
avgCharsPerToken: 4.5,
|
|
confidence: 'high',
|
|
supportedMethods: ['heuristic', 'fallback'],
|
|
},
|
|
google: {
|
|
avgCharsPerToken: 5,
|
|
confidence: 'medium',
|
|
supportedMethods: ['heuristic', 'fallback'],
|
|
},
|
|
deepseek: {
|
|
avgCharsPerToken: 4,
|
|
confidence: 'medium',
|
|
supportedMethods: ['heuristic', 'fallback'],
|
|
},
|
|
xai: {
|
|
avgCharsPerToken: 4,
|
|
confidence: 'medium',
|
|
supportedMethods: ['heuristic', 'fallback'],
|
|
},
|
|
cerebras: {
|
|
avgCharsPerToken: 4,
|
|
confidence: 'medium',
|
|
supportedMethods: ['heuristic', 'fallback'],
|
|
},
|
|
mistral: {
|
|
avgCharsPerToken: 4,
|
|
confidence: 'medium',
|
|
supportedMethods: ['heuristic', 'fallback'],
|
|
},
|
|
groq: {
|
|
avgCharsPerToken: 4,
|
|
confidence: 'medium',
|
|
supportedMethods: ['heuristic', 'fallback'],
|
|
},
|
|
ollama: {
|
|
avgCharsPerToken: 4,
|
|
confidence: 'low',
|
|
supportedMethods: ['fallback'],
|
|
},
|
|
} satisfies Record<string, ProviderTokenizationConfig>,
|
|
|
|
fallback: {
|
|
avgCharsPerToken: 4,
|
|
confidence: 'low',
|
|
supportedMethods: ['fallback'],
|
|
} satisfies ProviderTokenizationConfig,
|
|
|
|
defaults: {
|
|
model: 'gpt-4o',
|
|
provider: 'openai',
|
|
},
|
|
} as const
|
|
|
|
export const LLM_BLOCK_TYPES = ['agent', 'router', 'evaluator'] as const
|
|
|
|
export const MIN_TEXT_LENGTH_FOR_ESTIMATION = 1
|
|
export const MAX_PREVIEW_LENGTH = 100
|