mirror of
https://github.com/simstudioai/sim.git
synced 2026-02-08 05:35:14 -05:00
v0.5.84: model request sanitization
This commit is contained in:
@@ -8,7 +8,10 @@ import {
|
||||
calculateCost,
|
||||
generateStructuredOutputInstructions,
|
||||
shouldBillModelUsage,
|
||||
supportsReasoningEffort,
|
||||
supportsTemperature,
|
||||
supportsThinking,
|
||||
supportsVerbosity,
|
||||
} from '@/providers/utils'
|
||||
|
||||
const logger = createLogger('Providers')
|
||||
@@ -21,11 +24,24 @@ export const MAX_TOOL_ITERATIONS = 20
|
||||
|
||||
function sanitizeRequest(request: ProviderRequest): ProviderRequest {
|
||||
const sanitizedRequest = { ...request }
|
||||
const model = sanitizedRequest.model
|
||||
|
||||
if (sanitizedRequest.model && !supportsTemperature(sanitizedRequest.model)) {
|
||||
if (model && !supportsTemperature(model)) {
|
||||
sanitizedRequest.temperature = undefined
|
||||
}
|
||||
|
||||
if (model && !supportsReasoningEffort(model)) {
|
||||
sanitizedRequest.reasoningEffort = undefined
|
||||
}
|
||||
|
||||
if (model && !supportsVerbosity(model)) {
|
||||
sanitizedRequest.verbosity = undefined
|
||||
}
|
||||
|
||||
if (model && !supportsThinking(model)) {
|
||||
sanitizedRequest.thinkingLevel = undefined
|
||||
}
|
||||
|
||||
return sanitizedRequest
|
||||
}
|
||||
|
||||
|
||||
@@ -33,8 +33,11 @@ import {
|
||||
prepareToolExecution,
|
||||
prepareToolsWithUsageControl,
|
||||
shouldBillModelUsage,
|
||||
supportsReasoningEffort,
|
||||
supportsTemperature,
|
||||
supportsThinking,
|
||||
supportsToolUsageControl,
|
||||
supportsVerbosity,
|
||||
updateOllamaProviderModels,
|
||||
} from '@/providers/utils'
|
||||
|
||||
@@ -333,6 +336,82 @@ describe('Model Capabilities', () => {
|
||||
)
|
||||
})
|
||||
|
||||
describe('supportsReasoningEffort', () => {
|
||||
it.concurrent('should return true for models with reasoning effort capability', () => {
|
||||
expect(supportsReasoningEffort('gpt-5')).toBe(true)
|
||||
expect(supportsReasoningEffort('gpt-5-mini')).toBe(true)
|
||||
expect(supportsReasoningEffort('gpt-5.1')).toBe(true)
|
||||
expect(supportsReasoningEffort('gpt-5.2')).toBe(true)
|
||||
expect(supportsReasoningEffort('o3')).toBe(true)
|
||||
expect(supportsReasoningEffort('o4-mini')).toBe(true)
|
||||
expect(supportsReasoningEffort('azure/gpt-5')).toBe(true)
|
||||
expect(supportsReasoningEffort('azure/o3')).toBe(true)
|
||||
})
|
||||
|
||||
it.concurrent('should return false for models without reasoning effort capability', () => {
|
||||
expect(supportsReasoningEffort('gpt-4o')).toBe(false)
|
||||
expect(supportsReasoningEffort('gpt-4.1')).toBe(false)
|
||||
expect(supportsReasoningEffort('claude-sonnet-4-5')).toBe(false)
|
||||
expect(supportsReasoningEffort('claude-opus-4-6')).toBe(false)
|
||||
expect(supportsReasoningEffort('gemini-2.5-flash')).toBe(false)
|
||||
expect(supportsReasoningEffort('unknown-model')).toBe(false)
|
||||
})
|
||||
|
||||
it.concurrent('should be case-insensitive', () => {
|
||||
expect(supportsReasoningEffort('GPT-5')).toBe(true)
|
||||
expect(supportsReasoningEffort('O3')).toBe(true)
|
||||
expect(supportsReasoningEffort('GPT-4O')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('supportsVerbosity', () => {
|
||||
it.concurrent('should return true for models with verbosity capability', () => {
|
||||
expect(supportsVerbosity('gpt-5')).toBe(true)
|
||||
expect(supportsVerbosity('gpt-5-mini')).toBe(true)
|
||||
expect(supportsVerbosity('gpt-5.1')).toBe(true)
|
||||
expect(supportsVerbosity('gpt-5.2')).toBe(true)
|
||||
expect(supportsVerbosity('azure/gpt-5')).toBe(true)
|
||||
})
|
||||
|
||||
it.concurrent('should return false for models without verbosity capability', () => {
|
||||
expect(supportsVerbosity('gpt-4o')).toBe(false)
|
||||
expect(supportsVerbosity('o3')).toBe(false)
|
||||
expect(supportsVerbosity('o4-mini')).toBe(false)
|
||||
expect(supportsVerbosity('claude-sonnet-4-5')).toBe(false)
|
||||
expect(supportsVerbosity('unknown-model')).toBe(false)
|
||||
})
|
||||
|
||||
it.concurrent('should be case-insensitive', () => {
|
||||
expect(supportsVerbosity('GPT-5')).toBe(true)
|
||||
expect(supportsVerbosity('GPT-4O')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('supportsThinking', () => {
|
||||
it.concurrent('should return true for models with thinking capability', () => {
|
||||
expect(supportsThinking('claude-opus-4-6')).toBe(true)
|
||||
expect(supportsThinking('claude-opus-4-5')).toBe(true)
|
||||
expect(supportsThinking('claude-sonnet-4-5')).toBe(true)
|
||||
expect(supportsThinking('claude-sonnet-4-0')).toBe(true)
|
||||
expect(supportsThinking('claude-haiku-4-5')).toBe(true)
|
||||
expect(supportsThinking('gemini-3-pro-preview')).toBe(true)
|
||||
expect(supportsThinking('gemini-3-flash-preview')).toBe(true)
|
||||
})
|
||||
|
||||
it.concurrent('should return false for models without thinking capability', () => {
|
||||
expect(supportsThinking('gpt-4o')).toBe(false)
|
||||
expect(supportsThinking('gpt-5')).toBe(false)
|
||||
expect(supportsThinking('o3')).toBe(false)
|
||||
expect(supportsThinking('deepseek-v3')).toBe(false)
|
||||
expect(supportsThinking('unknown-model')).toBe(false)
|
||||
})
|
||||
|
||||
it.concurrent('should be case-insensitive', () => {
|
||||
expect(supportsThinking('CLAUDE-OPUS-4-6')).toBe(true)
|
||||
expect(supportsThinking('GPT-4O')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Model Constants', () => {
|
||||
it.concurrent('should have correct models in MODELS_TEMP_RANGE_0_2', () => {
|
||||
expect(MODELS_TEMP_RANGE_0_2).toContain('gpt-4o')
|
||||
|
||||
@@ -959,6 +959,18 @@ export function supportsTemperature(model: string): boolean {
|
||||
return supportsTemperatureFromDefinitions(model)
|
||||
}
|
||||
|
||||
export function supportsReasoningEffort(model: string): boolean {
|
||||
return MODELS_WITH_REASONING_EFFORT.includes(model.toLowerCase())
|
||||
}
|
||||
|
||||
export function supportsVerbosity(model: string): boolean {
|
||||
return MODELS_WITH_VERBOSITY.includes(model.toLowerCase())
|
||||
}
|
||||
|
||||
export function supportsThinking(model: string): boolean {
|
||||
return MODELS_WITH_THINKING.includes(model.toLowerCase())
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the maximum temperature value for a model
|
||||
* @returns Maximum temperature value (1 or 2) or undefined if temperature not supported
|
||||
|
||||
Reference in New Issue
Block a user