mirror of
https://github.com/di-sukharev/opencommit.git
synced 2026-04-20 03:02:51 -04:00
This update introduces a centralized error handling mechanism for various AI engines, improving the consistency and clarity of error messages. The new `normalizeEngineError` function standardizes error responses, allowing for better user feedback and recovery suggestions. Additionally, specific error classes for insufficient credits, rate limits, and service availability have been implemented, along with user-friendly formatting for error messages. This refactor aims to enhance the overall user experience when interacting with the AI services.
70 lines
2.2 KiB
TypeScript
70 lines
2.2 KiB
TypeScript
import { OpenAI } from 'openai';
|
|
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
|
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
|
import { removeContentTags } from '../utils/removeContentTags';
|
|
import { tokenCount } from '../utils/tokenCount';
|
|
import { AiEngine, AiEngineConfig } from './Engine';
|
|
|
|
// Using any for Mistral types to avoid type declaration issues
|
|
export interface MistralAiConfig extends AiEngineConfig {}
|
|
export type MistralCompletionMessageParam = Array<any>;
|
|
|
|
// Import Mistral dynamically to avoid TS errors
|
|
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
|
const Mistral = require('@mistralai/mistralai').Mistral;
|
|
|
|
export class MistralAiEngine implements AiEngine {
|
|
config: MistralAiConfig;
|
|
client: any; // Using any type for Mistral client to avoid TS errors
|
|
|
|
constructor(config: MistralAiConfig) {
|
|
this.config = config;
|
|
|
|
if (!config.baseURL) {
|
|
this.client = new Mistral({ apiKey: config.apiKey });
|
|
} else {
|
|
this.client = new Mistral({
|
|
apiKey: config.apiKey,
|
|
serverURL: config.baseURL
|
|
});
|
|
}
|
|
}
|
|
|
|
public generateCommitMessage = async (
|
|
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
|
|
): Promise<string | null> => {
|
|
const params = {
|
|
model: this.config.model,
|
|
messages: messages as MistralCompletionMessageParam,
|
|
topP: 0.1,
|
|
maxTokens: this.config.maxTokensOutput
|
|
};
|
|
|
|
try {
|
|
const REQUEST_TOKENS = messages
|
|
.map((msg) => tokenCount(msg.content as string) + 4)
|
|
.reduce((a, b) => a + b, 0);
|
|
|
|
if (
|
|
REQUEST_TOKENS >
|
|
this.config.maxTokensInput - this.config.maxTokensOutput
|
|
)
|
|
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
|
|
|
|
const completion = await this.client.chat.complete(params);
|
|
|
|
if (!completion.choices) throw Error('No completion choice available.');
|
|
|
|
const message = completion.choices[0].message;
|
|
|
|
if (!message || !message.content)
|
|
throw Error('No completion choice available.');
|
|
|
|
let content = message.content as string;
|
|
return removeContentTags(content, 'think');
|
|
} catch (error) {
|
|
throw normalizeEngineError(error, 'mistral', this.config.model);
|
|
}
|
|
};
|
|
}
|