improvement(gpt-5): added reasoning level and verbosity to gpt-5 models (#1058)

This commit is contained in:
Waleed Latif
2025-08-20 17:04:39 -07:00
committed by GitHub
parent 6fd6f921dc
commit cea42f5135
7 changed files with 187 additions and 0 deletions

View File

@@ -9,7 +9,9 @@ import {
getProviderIcon,
MODELS_TEMP_RANGE_0_1,
MODELS_TEMP_RANGE_0_2,
MODELS_WITH_REASONING_EFFORT,
MODELS_WITH_TEMPERATURE_SUPPORT,
MODELS_WITH_VERBOSITY,
providers,
} from '@/providers/utils'
@@ -210,6 +212,36 @@ Create a system prompt appropriately detailed for the request, using clear langu
},
},
},
{
id: 'reasoningEffort',
title: 'Reasoning Effort',
type: 'combobox',
layout: 'half',
placeholder: 'Select reasoning effort...',
options: () => {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
},
condition: {
field: 'model',
value: MODELS_WITH_REASONING_EFFORT,
},
},
{
id: 'verbosity',
title: 'Verbosity',
type: 'slider',
layout: 'half',
min: 0,
max: 2,
condition: {
field: 'model',
value: MODELS_WITH_VERBOSITY,
},
},
{
id: 'apiKey',
title: 'API Key',
@@ -485,6 +517,8 @@ Example 3 (Array Input):
},
},
temperature: { type: 'number', description: 'Response randomness level' },
reasoningEffort: { type: 'string', description: 'Reasoning effort level for GPT-5 models' },
verbosity: { type: 'number', description: 'Verbosity level for GPT-5 models' },
tools: { type: 'json', description: 'Available tools configuration' },
},
outputs: {

View File

@@ -144,6 +144,10 @@ export const azureOpenAIProvider: ProviderConfig = {
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
// Add GPT-5 specific parameters
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
if (request.verbosity !== undefined) payload.verbosity = request.verbosity
// Add response format for structured output if specified
if (request.responseFormat) {
// Use Azure OpenAI's JSON schema format

View File

@@ -34,6 +34,15 @@ export interface ModelCapabilities {
}
toolUsageControl?: boolean
computerUse?: boolean
reasoningEffort?: {
min: string
max: string
values: string[]
}
verbosity?: {
min: number
max: number
}
}
export interface ModelDefinition {
@@ -87,6 +96,12 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -99,6 +114,12 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -111,6 +132,12 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -233,6 +260,12 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -245,6 +278,12 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -257,6 +296,12 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -844,3 +889,33 @@ export const EMBEDDING_MODEL_PRICING: Record<string, ModelPricing> = {
export function getEmbeddingModelPricing(modelId: string): ModelPricing | null {
return EMBEDDING_MODEL_PRICING[modelId] || null
}
/**
* Get all models that support reasoning effort
*/
export function getModelsWithReasoningEffort(): string[] {
const models: string[] = []
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
for (const model of provider.models) {
if (model.capabilities.reasoningEffort) {
models.push(model.id)
}
}
}
return models
}
/**
* Get all models that support verbosity
*/
export function getModelsWithVerbosity(): string[] {
const models: string[] = []
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
for (const model of provider.models) {
if (model.capabilities.verbosity) {
models.push(model.id)
}
}
}
return models
}

View File

@@ -130,6 +130,10 @@ export const openaiProvider: ProviderConfig = {
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
// Add GPT-5 specific parameters
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
if (request.verbosity !== undefined) payload.verbosity = request.verbosity
// Add response format for structured output if specified
if (request.responseFormat) {
// Use OpenAI's JSON schema format

View File

@@ -156,6 +156,9 @@ export interface ProviderRequest {
// Azure OpenAI specific parameters
azureEndpoint?: string
azureApiVersion?: string
// GPT-5 specific parameters
reasoningEffort?: string
verbosity?: number
}
// Map of provider IDs to their configurations

View File

@@ -19,7 +19,9 @@ import {
getProviderModels,
MODELS_TEMP_RANGE_0_1,
MODELS_TEMP_RANGE_0_2,
MODELS_WITH_REASONING_EFFORT,
MODELS_WITH_TEMPERATURE_SUPPORT,
MODELS_WITH_VERBOSITY,
PROVIDERS_WITH_TOOL_USAGE_CONTROL,
prepareToolsWithUsageControl,
supportsTemperature,
@@ -144,6 +146,15 @@ describe('Model Capabilities', () => {
'deepseek-chat',
'azure/gpt-4.1',
'azure/model-router',
// GPT-5 models don't support temperature (removed in our implementation)
'gpt-5',
'gpt-5-mini',
'gpt-5-nano',
'gpt-5-chat-latest',
'azure/gpt-5',
'azure/gpt-5-mini',
'azure/gpt-5-nano',
'azure/gpt-5-chat-latest',
]
for (const model of unsupportedModels) {
@@ -198,6 +209,15 @@ describe('Model Capabilities', () => {
expect(getMaxTemperature('azure/o3')).toBeUndefined()
expect(getMaxTemperature('azure/o4-mini')).toBeUndefined()
expect(getMaxTemperature('deepseek-r1')).toBeUndefined()
// GPT-5 models don't support temperature (removed in our implementation)
expect(getMaxTemperature('gpt-5')).toBeUndefined()
expect(getMaxTemperature('gpt-5-mini')).toBeUndefined()
expect(getMaxTemperature('gpt-5-nano')).toBeUndefined()
expect(getMaxTemperature('gpt-5-chat-latest')).toBeUndefined()
expect(getMaxTemperature('azure/gpt-5')).toBeUndefined()
expect(getMaxTemperature('azure/gpt-5-mini')).toBeUndefined()
expect(getMaxTemperature('azure/gpt-5-nano')).toBeUndefined()
expect(getMaxTemperature('azure/gpt-5-chat-latest')).toBeUndefined()
})
it.concurrent('should be case insensitive', () => {
@@ -266,6 +286,49 @@ describe('Model Capabilities', () => {
expect(MODELS_WITH_TEMPERATURE_SUPPORT).toContain('claude-sonnet-4-0') // From 0-1 range
}
)
it.concurrent('should have correct models in MODELS_WITH_REASONING_EFFORT', () => {
// Should contain GPT-5 models that support reasoning effort
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5')
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5-mini')
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5-nano')
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5')
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5-mini')
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5-nano')
// Should NOT contain non-reasoning GPT-5 models
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('gpt-5-chat-latest')
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('azure/gpt-5-chat-latest')
// Should NOT contain other models
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('gpt-4o')
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('claude-sonnet-4-0')
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('o1')
})
it.concurrent('should have correct models in MODELS_WITH_VERBOSITY', () => {
// Should contain GPT-5 models that support verbosity
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5')
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5-mini')
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5-nano')
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5')
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5-mini')
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5-nano')
// Should NOT contain non-reasoning GPT-5 models
expect(MODELS_WITH_VERBOSITY).not.toContain('gpt-5-chat-latest')
expect(MODELS_WITH_VERBOSITY).not.toContain('azure/gpt-5-chat-latest')
// Should NOT contain other models
expect(MODELS_WITH_VERBOSITY).not.toContain('gpt-4o')
expect(MODELS_WITH_VERBOSITY).not.toContain('claude-sonnet-4-0')
expect(MODELS_WITH_VERBOSITY).not.toContain('o1')
})
it.concurrent('should have same models in both reasoning effort and verbosity arrays', () => {
// GPT-5 models that support reasoning effort should also support verbosity and vice versa
expect(MODELS_WITH_REASONING_EFFORT.sort()).toEqual(MODELS_WITH_VERBOSITY.sort())
})
})
})

View File

@@ -12,9 +12,11 @@ import {
getHostedModels as getHostedModelsFromDefinitions,
getMaxTemperature as getMaxTempFromDefinitions,
getModelPricing as getModelPricingFromDefinitions,
getModelsWithReasoningEffort,
getModelsWithTemperatureSupport,
getModelsWithTempRange01,
getModelsWithTempRange02,
getModelsWithVerbosity,
getProviderModels as getProviderModelsFromDefinitions,
getProvidersWithToolUsageControl,
PROVIDER_DEFINITIONS,
@@ -878,6 +880,8 @@ export function trackForcedToolUsage(
export const MODELS_TEMP_RANGE_0_2 = getModelsWithTempRange02()
export const MODELS_TEMP_RANGE_0_1 = getModelsWithTempRange01()
export const MODELS_WITH_TEMPERATURE_SUPPORT = getModelsWithTemperatureSupport()
export const MODELS_WITH_REASONING_EFFORT = getModelsWithReasoningEffort()
export const MODELS_WITH_VERBOSITY = getModelsWithVerbosity()
export const PROVIDERS_WITH_TOOL_USAGE_CONTROL = getProvidersWithToolUsageControl()
/**