mirror of
https://github.com/di-sukharev/opencommit.git
synced 2026-01-20 02:58:05 -05:00
This update introduces a centralized error handling mechanism for various AI engines, improving the consistency and clarity of error messages. The new `normalizeEngineError` function standardizes error responses, allowing for better user feedback and recovery suggestions. Additionally, specific error classes for insufficient credits, rate limits, and service availability have been implemented, along with user-friendly formatting for error messages. This refactor aims to enhance the overall user experience when interacting with the AI services.
68 lines
2.0 KiB
TypeScript
68 lines
2.0 KiB
TypeScript
import { OpenAI } from 'openai';
|
|
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
|
import { parseCustomHeaders } from '../utils/engine';
|
|
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
|
import { removeContentTags } from '../utils/removeContentTags';
|
|
import { tokenCount } from '../utils/tokenCount';
|
|
import { AiEngine, AiEngineConfig } from './Engine';
|
|
|
|
export interface OpenAiConfig extends AiEngineConfig {}
|
|
|
|
export class OpenAiEngine implements AiEngine {
|
|
config: OpenAiConfig;
|
|
client: OpenAI;
|
|
|
|
constructor(config: OpenAiConfig) {
|
|
this.config = config;
|
|
|
|
const clientOptions: OpenAI.ClientOptions = {
|
|
apiKey: config.apiKey
|
|
};
|
|
|
|
if (config.baseURL) {
|
|
clientOptions.baseURL = config.baseURL;
|
|
}
|
|
|
|
if (config.customHeaders) {
|
|
const headers = parseCustomHeaders(config.customHeaders);
|
|
if (Object.keys(headers).length > 0) {
|
|
clientOptions.defaultHeaders = headers;
|
|
}
|
|
}
|
|
|
|
this.client = new OpenAI(clientOptions);
|
|
}
|
|
|
|
public generateCommitMessage = async (
|
|
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
|
|
): Promise<string | null> => {
|
|
const params = {
|
|
model: this.config.model,
|
|
messages,
|
|
temperature: 0,
|
|
top_p: 0.1,
|
|
max_tokens: this.config.maxTokensOutput
|
|
};
|
|
|
|
try {
|
|
const REQUEST_TOKENS = messages
|
|
.map((msg) => tokenCount(msg.content as string) + 4)
|
|
.reduce((a, b) => a + b, 0);
|
|
|
|
if (
|
|
REQUEST_TOKENS >
|
|
this.config.maxTokensInput - this.config.maxTokensOutput
|
|
)
|
|
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
|
|
|
|
const completion = await this.client.chat.completions.create(params);
|
|
|
|
const message = completion.choices[0].message;
|
|
let content = message?.content;
|
|
return removeContentTags(content, 'think');
|
|
} catch (error) {
|
|
throw normalizeEngineError(error, 'openai', this.config.model);
|
|
}
|
|
};
|
|
}
|