fix: improve type safety for max_completion_tokens params

Remove Record<string, unknown> type annotation to let TypeScript infer
the params object type, preserving type checking on all properties.
Cast to ChatCompletionCreateParamsNonStreaming at the create() call site
to accommodate the SDK's missing max_completion_tokens type. Add unit
test for reasoning model detection regex.

Signed-off-by: majiayu000 <1835304752@qq.com>
This commit is contained in:
majiayu000
2026-03-21 10:49:27 +08:00
parent dc7f7f6552
commit 6982e76cf5
2 changed files with 34 additions and 6 deletions

View File

@@ -45,16 +45,16 @@ export class OpenAiEngine implements AiEngine {
): Promise<string | null> => {
const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model);
const params: Record<string, unknown> = {
const params = {
model: this.config.model,
messages,
...(isReasoningModel
? { max_completion_tokens: this.config.maxTokensOutput }
: {
temperature: 0,
top_p: 0.1,
max_tokens: this.config.maxTokensOutput
})
temperature: 0,
top_p: 0.1,
max_tokens: this.config.maxTokensOutput
})
};
try {
@@ -68,7 +68,9 @@ export class OpenAiEngine implements AiEngine {
)
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
const completion = await this.client.chat.completions.create(params);
const completion = await this.client.chat.completions.create(
params as OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
);
const message = completion.choices[0].message;
let content = message?.content;