fix(gpt-5): fixed verbosity and reasoning params (#1069)

* fix(gpt-5): fixed verbosity and reasoning parsm

* fixed dropdown

* default values for verbosity and reasoning effort

* cleanup

* use default value in dropdown
This commit is contained in:
Waleed Latif
2025-08-20 20:18:02 -07:00
committed by GitHub
parent 692ba69864
commit ff43528d35
11 changed files with 118 additions and 38 deletions

View File

@@ -39,6 +39,8 @@ export async function POST(request: NextRequest) {
stream,
messages,
environmentVariables,
reasoningEffort,
verbosity,
} = body
logger.info(`[${requestId}] Provider request details`, {
@@ -58,6 +60,8 @@ export async function POST(request: NextRequest) {
messageCount: messages?.length || 0,
hasEnvironmentVariables:
!!environmentVariables && Object.keys(environmentVariables).length > 0,
reasoningEffort,
verbosity,
})
let finalApiKey: string
@@ -99,6 +103,8 @@ export async function POST(request: NextRequest) {
stream,
messages,
environmentVariables,
reasoningEffort,
verbosity,
})
const executionTime = Date.now() - startTime

View File

@@ -22,6 +22,7 @@ interface DropdownProps {
previewValue?: string | null
disabled?: boolean
placeholder?: string
config?: import('@/blocks/types').SubBlockConfig
}
export function Dropdown({
@@ -34,6 +35,7 @@ export function Dropdown({
previewValue,
disabled,
placeholder = 'Select an option...',
config,
}: DropdownProps) {
const [storeValue, setStoreValue] = useSubBlockValue<string>(blockId, subBlockId)
const [storeInitialized, setStoreInitialized] = useState(false)
@@ -281,7 +283,7 @@ export function Dropdown({
{/* Dropdown */}
{open && (
<div className='absolute top-full left-0 z-[100] mt-1 w-full min-w-[286px]'>
<div className='absolute top-full left-0 z-[100] mt-1 w-full'>
<div className='allow-scroll fade-in-0 zoom-in-95 animate-in rounded-md border bg-popover text-popover-foreground shadow-lg'>
<div
ref={dropdownRef}

View File

@@ -126,9 +126,12 @@ export function SubBlock({
blockId={blockId}
subBlockId={config.id}
options={config.options as { label: string; id: string }[]}
defaultValue={typeof config.value === 'function' ? config.value({}) : config.value}
placeholder={config.placeholder}
isPreview={isPreview}
previewValue={previewValue}
disabled={isDisabled}
config={config}
/>
</div>
)
@@ -139,6 +142,7 @@ export function SubBlock({
blockId={blockId}
subBlockId={config.id}
options={config.options as { label: string; id: string }[]}
defaultValue={typeof config.value === 'function' ? config.value({}) : config.value}
placeholder={config.placeholder}
isPreview={isPreview}
previewValue={previewValue}

View File

@@ -215,16 +215,16 @@ Create a system prompt appropriately detailed for the request, using clear langu
{
id: 'reasoningEffort',
title: 'Reasoning Effort',
type: 'combobox',
type: 'dropdown',
layout: 'half',
placeholder: 'Select reasoning effort...',
options: () => {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
},
options: [
{ label: 'minimal', id: 'minimal' },
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
],
value: () => 'medium',
condition: {
field: 'model',
value: MODELS_WITH_REASONING_EFFORT,
@@ -233,10 +233,15 @@ Create a system prompt appropriately detailed for the request, using clear langu
{
id: 'verbosity',
title: 'Verbosity',
type: 'slider',
type: 'dropdown',
layout: 'half',
min: 0,
max: 2,
placeholder: 'Select verbosity...',
options: [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
],
value: () => 'medium',
condition: {
field: 'model',
value: MODELS_WITH_VERBOSITY,
@@ -518,7 +523,7 @@ Example 3 (Array Input):
},
temperature: { type: 'number', description: 'Response randomness level' },
reasoningEffort: { type: 'string', description: 'Reasoning effort level for GPT-5 models' },
verbosity: { type: 'number', description: 'Verbosity level for GPT-5 models' },
verbosity: { type: 'string', description: 'Verbosity level for GPT-5 models' },
tools: { type: 'json', description: 'Available tools configuration' },
},
outputs: {

View File

@@ -1326,5 +1326,59 @@ describe('AgentBlockHandler', () => {
expect(requestBody.model).toBe('azure/gpt-4o')
expect(requestBody.apiKey).toBe('test-azure-api-key')
})
it('should pass GPT-5 specific parameters (reasoningEffort and verbosity) through the request pipeline', async () => {
const inputs = {
model: 'gpt-5',
systemPrompt: 'You are a helpful assistant.',
userPrompt: 'Hello!',
apiKey: 'test-api-key',
reasoningEffort: 'minimal',
verbosity: 'high',
temperature: 0.7,
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
expect(mockFetch).toHaveBeenCalledWith(expect.any(String), expect.any(Object))
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Check that GPT-5 parameters are included in the request
expect(requestBody.reasoningEffort).toBe('minimal')
expect(requestBody.verbosity).toBe('high')
expect(requestBody.provider).toBe('openai')
expect(requestBody.model).toBe('gpt-5')
expect(requestBody.apiKey).toBe('test-api-key')
})
it('should handle missing GPT-5 parameters gracefully', async () => {
const inputs = {
model: 'gpt-5',
systemPrompt: 'You are a helpful assistant.',
userPrompt: 'Hello!',
apiKey: 'test-api-key',
temperature: 0.7,
// No reasoningEffort or verbosity provided
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
expect(mockFetch).toHaveBeenCalledWith(expect.any(String), expect.any(Object))
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Check that GPT-5 parameters are undefined when not provided
expect(requestBody.reasoningEffort).toBeUndefined()
expect(requestBody.verbosity).toBeUndefined()
expect(requestBody.provider).toBe('openai')
expect(requestBody.model).toBe('gpt-5')
})
})
})

View File

@@ -368,6 +368,8 @@ export class AgentBlockHandler implements BlockHandler {
stream: streaming,
messages,
environmentVariables: context.environmentVariables || {},
reasoningEffort: inputs.reasoningEffort,
verbosity: inputs.verbosity,
}
}

View File

@@ -10,6 +10,8 @@ export interface AgentInputs {
apiKey?: string
azureEndpoint?: string
azureApiVersion?: string
reasoningEffort?: string
verbosity?: string
}
export interface ToolInput {

View File

@@ -145,7 +145,11 @@ export const azureOpenAIProvider: ProviderConfig = {
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
// Add GPT-5 specific parameters
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
if (request.reasoningEffort !== undefined) {
payload.reasoning = {
effort: request.reasoningEffort,
}
}
if (request.verbosity !== undefined) payload.verbosity = request.verbosity
// Add response format for structured output if specified

View File

@@ -35,13 +35,10 @@ export interface ModelCapabilities {
toolUsageControl?: boolean
computerUse?: boolean
reasoningEffort?: {
min: string
max: string
values: string[]
}
verbosity?: {
min: number
max: number
values: string[]
}
}
@@ -97,11 +94,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['minimal', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -115,11 +112,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['minimal', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -133,11 +130,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['minimal', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -261,11 +258,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['minimal', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -279,11 +276,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['minimal', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{
@@ -297,11 +294,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
toolUsageControl: true,
reasoningEffort: {
min: 'low',
max: 'high',
values: ['minimal', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
verbosity: { min: 0, max: 2 },
},
},
{

View File

@@ -131,7 +131,11 @@ export const openaiProvider: ProviderConfig = {
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
// Add GPT-5 specific parameters
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
if (request.reasoningEffort !== undefined) {
payload.reasoning = {
effort: request.reasoningEffort,
}
}
if (request.verbosity !== undefined) payload.verbosity = request.verbosity
// Add response format for structured output if specified

View File

@@ -158,7 +158,7 @@ export interface ProviderRequest {
azureApiVersion?: string
// GPT-5 specific parameters
reasoningEffort?: string
verbosity?: number
verbosity?: string
}
// Map of provider IDs to their configurations