mirror of
https://github.com/di-sukharev/opencommit.git
synced 2026-04-20 03:02:51 -04:00
fix: improve type safety for max_completion_tokens params
Remove Record<string, unknown> type annotation to let TypeScript infer the params object type, preserving type checking on all properties. Cast to ChatCompletionCreateParamsNonStreaming at the create() call site to accommodate the SDK's missing max_completion_tokens type. Add unit test for reasoning model detection regex. Signed-off-by: majiayu000 <1835304752@qq.com>
This commit is contained in:
@@ -45,16 +45,16 @@ export class OpenAiEngine implements AiEngine {
|
||||
): Promise<string | null> => {
|
||||
const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model);
|
||||
|
||||
const params: Record<string, unknown> = {
|
||||
const params = {
|
||||
model: this.config.model,
|
||||
messages,
|
||||
...(isReasoningModel
|
||||
? { max_completion_tokens: this.config.maxTokensOutput }
|
||||
: {
|
||||
temperature: 0,
|
||||
top_p: 0.1,
|
||||
max_tokens: this.config.maxTokensOutput
|
||||
})
|
||||
temperature: 0,
|
||||
top_p: 0.1,
|
||||
max_tokens: this.config.maxTokensOutput
|
||||
})
|
||||
};
|
||||
|
||||
try {
|
||||
@@ -68,7 +68,9 @@ export class OpenAiEngine implements AiEngine {
|
||||
)
|
||||
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
|
||||
|
||||
const completion = await this.client.chat.completions.create(params);
|
||||
const completion = await this.client.chat.completions.create(
|
||||
params as OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
|
||||
);
|
||||
|
||||
const message = completion.choices[0].message;
|
||||
let content = message?.content;
|
||||
|
||||
Reference in New Issue
Block a user