diff --git a/src/engine/openAi.ts b/src/engine/openAi.ts index 03148cb..4449877 100644 --- a/src/engine/openAi.ts +++ b/src/engine/openAi.ts @@ -45,16 +45,16 @@ export class OpenAiEngine implements AiEngine { ): Promise => { const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model); - const params: Record = { + const params = { model: this.config.model, messages, ...(isReasoningModel ? { max_completion_tokens: this.config.maxTokensOutput } : { - temperature: 0, - top_p: 0.1, - max_tokens: this.config.maxTokensOutput - }) + temperature: 0, + top_p: 0.1, + max_tokens: this.config.maxTokensOutput + }) }; try { @@ -68,7 +68,9 @@ export class OpenAiEngine implements AiEngine { ) throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens); - const completion = await this.client.chat.completions.create(params); + const completion = await this.client.chat.completions.create( + params as OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming + ); const message = completion.choices[0].message; let content = message?.content; diff --git a/test/unit/openAi.test.ts b/test/unit/openAi.test.ts new file mode 100644 index 0000000..c1defd5 --- /dev/null +++ b/test/unit/openAi.test.ts @@ -0,0 +1,26 @@ +// Test the reasoning model detection regex used in OpenAiEngine. +// Integration test with the engine is not possible because mistral.ts +// uses require() which is unavailable in the ESM test environment. +const REASONING_MODEL_RE = /^(o[1-9]|gpt-5)/; + +describe('OpenAiEngine reasoning model detection', () => { + it.each([ + ['o1', true], + ['o1-preview', true], + ['o1-mini', true], + ['o3', true], + ['o3-mini', true], + ['o4-mini', true], + ['gpt-5', true], + ['gpt-5-nano', true], + ['gpt-4o', false], + ['gpt-4o-mini', false], + ['gpt-4', false], + ['gpt-3.5-turbo', false] + ])( + 'model "%s" isReasoning=%s', + (model, expected) => { + expect(REASONING_MODEL_RE.test(model)).toBe(expected); + } + ); +});