mirror of
https://github.com/di-sukharev/opencommit.git
synced 2026-01-21 03:27:59 -05:00
This update introduces a centralized error handling mechanism for various AI engines, improving the consistency and clarity of error messages. The new `normalizeEngineError` function standardizes error responses, allowing for better user feedback and recovery suggestions. Additionally, specific error classes for insufficient credits, rate limits, and service availability have been implemented, along with user-friendly formatting for error messages. This refactor aims to enhance the overall user experience when interacting with the AI services.
63 lines
2.0 KiB
TypeScript
63 lines
2.0 KiB
TypeScript
import AnthropicClient from '@anthropic-ai/sdk';
|
|
import {
|
|
MessageCreateParamsNonStreaming,
|
|
MessageParam
|
|
} from '@anthropic-ai/sdk/resources/messages.mjs';
|
|
import { OpenAI } from 'openai';
|
|
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
|
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
|
import { removeContentTags } from '../utils/removeContentTags';
|
|
import { tokenCount } from '../utils/tokenCount';
|
|
import { AiEngine, AiEngineConfig } from './Engine';
|
|
|
|
interface AnthropicConfig extends AiEngineConfig {}
|
|
|
|
export class AnthropicEngine implements AiEngine {
|
|
config: AnthropicConfig;
|
|
client: AnthropicClient;
|
|
|
|
constructor(config) {
|
|
this.config = config;
|
|
this.client = new AnthropicClient({ apiKey: this.config.apiKey });
|
|
}
|
|
|
|
public generateCommitMessage = async (
|
|
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
|
|
): Promise<string | undefined> => {
|
|
const systemMessage = messages.find((msg) => msg.role === 'system')
|
|
?.content as string;
|
|
const restMessages = messages.filter(
|
|
(msg) => msg.role !== 'system'
|
|
) as MessageParam[];
|
|
|
|
const params: MessageCreateParamsNonStreaming = {
|
|
model: this.config.model,
|
|
system: systemMessage,
|
|
messages: restMessages,
|
|
temperature: 0,
|
|
top_p: 0.1,
|
|
max_tokens: this.config.maxTokensOutput
|
|
};
|
|
try {
|
|
const REQUEST_TOKENS = messages
|
|
.map((msg) => tokenCount(msg.content as string) + 4)
|
|
.reduce((a, b) => a + b, 0);
|
|
|
|
if (
|
|
REQUEST_TOKENS >
|
|
this.config.maxTokensInput - this.config.maxTokensOutput
|
|
) {
|
|
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
|
|
}
|
|
|
|
const data = await this.client.messages.create(params);
|
|
|
|
const message = data?.content[0].text;
|
|
let content = message;
|
|
return removeContentTags(content, 'think');
|
|
} catch (error) {
|
|
throw normalizeEngineError(error, 'anthropic', this.config.model);
|
|
}
|
|
};
|
|
}
|