feat(models): added gpt-5.1 (#2007)

This commit is contained in:
Waleed
2025-11-14 23:23:47 -08:00
committed by GitHub
parent bc8947caa6
commit f8070f9029
4 changed files with 178 additions and 4 deletions

View File

@@ -42,10 +42,10 @@ The user prompt represents the primary input data for inference processing. This
The Agent block supports multiple LLM providers through a unified inference interface. Available models include:
- **OpenAI**: GPT-5, GPT-4o, o1, o3, o4-mini, gpt-4.1
- **Anthropic**: Claude 3.7 Sonnet
- **OpenAI**: GPT-5.1, GPT-5, GPT-4o, o1, o3, o4-mini, gpt-4.1
- **Anthropic**: Claude 4.5 Sonnet, Claude Opus 4.1
- **Google**: Gemini 2.5 Pro, Gemini 2.0 Flash
- **Other Providers**: Groq, Cerebras, xAI, DeepSeek
- **Other Providers**: Groq, Cerebras, xAI, Azure OpenAI, OpenRouter
- **Local Models**: Ollama-compatible models
### Temperature

View File

@@ -210,6 +210,7 @@ Create a system prompt appropriately detailed for the request, using clear langu
type: 'dropdown',
placeholder: 'Select reasoning effort...',
options: [
{ label: 'none', id: 'none' },
{ label: 'minimal', id: 'minimal' },
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },

View File

@@ -101,6 +101,74 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
temperature: { min: 0, max: 2 },
},
},
{
id: 'gpt-5.1',
pricing: {
input: 1.25,
cachedInput: 0.125,
output: 10.0,
updatedAt: '2025-11-14',
},
capabilities: {
reasoningEffort: {
values: ['none', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
},
},
{
id: 'gpt-5.1-mini',
pricing: {
input: 0.25,
cachedInput: 0.025,
output: 2.0,
updatedAt: '2025-11-14',
},
capabilities: {
reasoningEffort: {
values: ['none', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
},
},
{
id: 'gpt-5.1-nano',
pricing: {
input: 0.05,
cachedInput: 0.005,
output: 0.4,
updatedAt: '2025-11-14',
},
capabilities: {
reasoningEffort: {
values: ['none', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
},
},
{
id: 'gpt-5.1-codex',
pricing: {
input: 1.25,
cachedInput: 0.125,
output: 10.0,
updatedAt: '2025-11-14',
},
capabilities: {
reasoningEffort: {
values: ['none', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
},
},
{
id: 'gpt-5',
pricing: {
@@ -253,6 +321,74 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
temperature: { min: 0, max: 2 },
},
},
{
id: 'azure/gpt-5.1',
pricing: {
input: 1.25,
cachedInput: 0.125,
output: 10.0,
updatedAt: '2025-11-14',
},
capabilities: {
reasoningEffort: {
values: ['none', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
},
},
{
id: 'azure/gpt-5.1-mini',
pricing: {
input: 0.25,
cachedInput: 0.025,
output: 2.0,
updatedAt: '2025-11-14',
},
capabilities: {
reasoningEffort: {
values: ['none', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
},
},
{
id: 'azure/gpt-5.1-nano',
pricing: {
input: 0.05,
cachedInput: 0.005,
output: 0.4,
updatedAt: '2025-11-14',
},
capabilities: {
reasoningEffort: {
values: ['none', 'low', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
},
},
{
id: 'azure/gpt-5.1-codex',
pricing: {
input: 1.25,
cachedInput: 0.125,
output: 10.0,
updatedAt: '2025-11-14',
},
capabilities: {
reasoningEffort: {
values: ['none', 'medium', 'high'],
},
verbosity: {
values: ['low', 'medium', 'high'],
},
},
},
{
id: 'azure/gpt-5',
pricing: {

View File

@@ -35,7 +35,6 @@ const mockGetRotatingApiKey = vi.fn().mockReturnValue('rotating-server-key')
const originalRequire = module.require
describe('getApiKey', () => {
// Save original env and reset between tests
const originalEnv = { ...process.env }
beforeEach(() => {
@@ -146,6 +145,15 @@ describe('Model Capabilities', () => {
'deepseek-chat',
'azure/gpt-4.1',
'azure/model-router',
// GPT-5.1 models don't support temperature (removed in our implementation)
'gpt-5.1',
'gpt-5.1-mini',
'gpt-5.1-nano',
'gpt-5.1-codex',
'azure/gpt-5.1',
'azure/gpt-5.1-mini',
'azure/gpt-5.1-nano',
'azure/gpt-5.1-codex',
// GPT-5 models don't support temperature (removed in our implementation)
'gpt-5',
'gpt-5-mini',
@@ -218,6 +226,15 @@ describe('Model Capabilities', () => {
expect(getMaxTemperature('azure/o3')).toBeUndefined()
expect(getMaxTemperature('azure/o4-mini')).toBeUndefined()
expect(getMaxTemperature('deepseek-r1')).toBeUndefined()
// GPT-5.1 models don't support temperature
expect(getMaxTemperature('gpt-5.1')).toBeUndefined()
expect(getMaxTemperature('gpt-5.1-mini')).toBeUndefined()
expect(getMaxTemperature('gpt-5.1-nano')).toBeUndefined()
expect(getMaxTemperature('gpt-5.1-codex')).toBeUndefined()
expect(getMaxTemperature('azure/gpt-5.1')).toBeUndefined()
expect(getMaxTemperature('azure/gpt-5.1-mini')).toBeUndefined()
expect(getMaxTemperature('azure/gpt-5.1-nano')).toBeUndefined()
expect(getMaxTemperature('azure/gpt-5.1-codex')).toBeUndefined()
// GPT-5 models don't support temperature
expect(getMaxTemperature('gpt-5')).toBeUndefined()
expect(getMaxTemperature('gpt-5-mini')).toBeUndefined()
@@ -306,6 +323,16 @@ describe('Model Capabilities', () => {
)
it.concurrent('should have correct models in MODELS_WITH_REASONING_EFFORT', () => {
// Should contain GPT-5.1 models that support reasoning effort
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.1')
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.1-mini')
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.1-nano')
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.1-codex')
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1')
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1-mini')
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1-nano')
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1-codex')
// Should contain GPT-5 models that support reasoning effort
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5')
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5-mini')
@@ -325,6 +352,16 @@ describe('Model Capabilities', () => {
})
it.concurrent('should have correct models in MODELS_WITH_VERBOSITY', () => {
// Should contain GPT-5.1 models that support verbosity
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.1')
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.1-mini')
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.1-nano')
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.1-codex')
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1')
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1-mini')
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1-nano')
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1-codex')
// Should contain GPT-5 models that support verbosity
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5')
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5-mini')