update imports

This commit is contained in:
di-sukharev
2024-08-19 13:09:46 +03:00
parent eb3be62a4f
commit 745bb5218f
7 changed files with 151 additions and 126 deletions

View File

@@ -1,12 +1,12 @@
import Anthropic from '@anthropic-ai/sdk';
import {
MessageCreateParamsNonStreaming,
MessageParam
} from '@anthropic-ai/sdk/resources/messages.mjs';
import { intro, outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import Anthropic from '@anthropic-ai/sdk';
import {ChatCompletionRequestMessage} from 'openai'
import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk/resources';
import { intro, outro } from '@clack/prompts';
import { ChatCompletionRequestMessage } from 'openai';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
@@ -15,7 +15,6 @@ import {
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine';
import { MODEL_LIST } from '../commands/config';
const config = getConfig();
@@ -47,14 +46,18 @@ if (
}
const MODEL = config?.OCO_MODEL;
if (provider === 'anthropic' &&
typeof MODEL !== 'string' &&
if (
provider === 'anthropic' &&
typeof MODEL !== 'string' &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
outro(
`${chalk.red('✖')} Unsupported model ${MODEL}. The model can be any string, but the current configuration is not supported.`
);
process.exit(1);
mode !== CONFIG_MODES.set
) {
outro(
`${chalk.red(
'✖'
)} Unsupported model ${MODEL}. The model can be any string, but the current configuration is not supported.`
);
process.exit(1);
}
export class AnthropicAi implements AiEngine {
@@ -70,9 +73,11 @@ export class AnthropicAi implements AiEngine {
public generateCommitMessage = async (
messages: Array<ChatCompletionRequestMessage>
): Promise<string | undefined> => {
const systemMessage = messages.find(msg => msg.role === 'system')?.content as string;
const restMessages = messages.filter((msg) => msg.role !== 'system') as MessageParam[];
const systemMessage = messages.find((msg) => msg.role === 'system')
?.content as string;
const restMessages = messages.filter(
(msg) => msg.role !== 'system'
) as MessageParam[];
const params: MessageCreateParamsNonStreaming = {
model: MODEL,
@@ -91,7 +96,7 @@ export class AnthropicAi implements AiEngine {
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
}
const data = await this.anthropicAI.messages.create(params);
const data = await this.anthropicAI.messages.create(params);
const message = data?.content[0].text;

View File

@@ -1,14 +1,8 @@
import { AzureKeyCredential, OpenAIClient } from '@azure/openai';
import { intro, outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import { execa } from 'execa';
import {
ChatCompletionRequestMessage,
} from 'openai';
import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
import { intro, outro } from '@clack/prompts';
import { ChatCompletionRequestMessage } from 'openai';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
@@ -59,7 +53,10 @@ export class Azure implements AiEngine {
constructor() {
if (provider === 'azure') {
this.openAI = new OpenAIClient(apiEndpoint, new AzureKeyCredential(apiKey));
this.openAI = new OpenAIClient(
apiEndpoint,
new AzureKeyCredential(apiKey)
);
}
}

View File

@@ -1,32 +1,28 @@
import axios, { AxiosError } from 'axios';
import axios from 'axios';
import { ChatCompletionRequestMessage } from 'openai';
import { getConfig } from '../commands/config';
import { AiEngine } from './Engine';
import {
getConfig
} from '../commands/config';
const config = getConfig();
export class FlowiseAi implements AiEngine {
async generateCommitMessage(
messages: Array<ChatCompletionRequestMessage>
): Promise<string | undefined> {
const gitDiff = messages[ messages.length - 1 ]?.content?.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\n/g, '\\n')
.replace(/\r/g, '\\r')
.replace(/\t/g, '\\t');
const url = `http://${config?.OCO_FLOWISE_ENDPOINT}/api/v1/prediction/${config?.OCO_FLOWISE_API_KEY}`;
const gitDiff = messages[messages.length - 1]?.content
?.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\n/g, '\\n')
.replace(/\r/g, '\\r')
.replace(/\t/g, '\\t');
const url = `http://${config?.OCO_FLOWISE_ENDPOINT}/api/v1/prediction/${config?.OCO_FLOWISE_API_KEY}`;
const payload = {
question : gitDiff,
overrideConfig : {
systemMessagePrompt: messages[0]?.content,
},
history : messages.slice( 1, -1 )
}
question: gitDiff,
overrideConfig: {
systemMessagePrompt: messages[0]?.content
},
history: messages.slice(1, -1)
};
try {
const response = await axios.post(url, payload, {
headers: {

View File

@@ -1,19 +1,30 @@
import { ChatCompletionRequestMessage } from 'openai';
import { AiEngine } from './Engine';
import { Content, GenerativeModel, GoogleGenerativeAI, HarmBlockThreshold, HarmCategory, Part } from '@google/generative-ai';
import { CONFIG_MODES, ConfigType, DEFAULT_TOKEN_LIMITS, getConfig, MODEL_LIST } from '../commands/config';
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk';
import {
Content,
GenerativeModel,
GoogleGenerativeAI,
HarmBlockThreshold,
HarmCategory,
Part
} from '@google/generative-ai';
import axios from 'axios';
import chalk from 'chalk';
import { ChatCompletionRequestMessage } from 'openai';
import {
CONFIG_MODES,
ConfigType,
DEFAULT_TOKEN_LIMITS,
getConfig,
MODEL_LIST
} from '../commands/config';
import { AiEngine } from './Engine';
export class Gemini implements AiEngine {
private readonly config: ConfigType;
private readonly googleGenerativeAi: GoogleGenerativeAI;
private ai: GenerativeModel;
// vars
// vars
private maxTokens = {
input: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT,
output: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT
@@ -24,57 +35,69 @@ export class Gemini implements AiEngine {
constructor() {
this.config = getConfig() as ConfigType;
this.googleGenerativeAi = new GoogleGenerativeAI(this.config.OCO_GEMINI_API_KEY);
this.googleGenerativeAi = new GoogleGenerativeAI(
this.config.OCO_GEMINI_API_KEY
);
this.warmup();
}
async generateCommitMessage(messages: ChatCompletionRequestMessage[]): Promise<string | undefined> {
const systemInstruction = messages.filter(m => m.role === 'system')
.map(m => m.content)
async generateCommitMessage(
messages: ChatCompletionRequestMessage[]
): Promise<string | undefined> {
const systemInstruction = messages
.filter((m) => m.role === 'system')
.map((m) => m.content)
.join('\n');
this.ai = this.googleGenerativeAi.getGenerativeModel({
model: this.model,
systemInstruction,
systemInstruction
});
const contents = messages.filter(m => m.role !== 'system')
.map(m => ({ parts: [{ text: m.content } as Part], role: m.role == 'user' ? m.role : 'model', } as Content));
const contents = messages
.filter((m) => m.role !== 'system')
.map(
(m) =>
({
parts: [{ text: m.content } as Part],
role: m.role == 'user' ? m.role : 'model'
} as Content)
);
try {
const result = await this.ai.generateContent({
contents,
contents,
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
},
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
}
],
generationConfig: {
maxOutputTokens: this.maxTokens.output,
temperature: 0,
topP: 0.1,
},
topP: 0.1
}
});
return result.response.text();
} catch (error) {
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
@@ -86,14 +109,16 @@ export class Gemini implements AiEngine {
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
}
throw err;
}
}
private warmup(): void {
if (this.config.OCO_TOKENS_MAX_INPUT !== undefined) this.maxTokens.input = this.config.OCO_TOKENS_MAX_INPUT;
if (this.config.OCO_TOKENS_MAX_OUTPUT !== undefined) this.maxTokens.output = this.config.OCO_TOKENS_MAX_OUTPUT;
if (this.config.OCO_TOKENS_MAX_INPUT !== undefined)
this.maxTokens.input = this.config.OCO_TOKENS_MAX_INPUT;
if (this.config.OCO_TOKENS_MAX_OUTPUT !== undefined)
this.maxTokens.output = this.config.OCO_TOKENS_MAX_OUTPUT;
this.basePath = this.config.OCO_GEMINI_BASE_PATH;
this.apiKey = this.config.OCO_GEMINI_API_KEY;
@@ -101,11 +126,17 @@ export class Gemini implements AiEngine {
const provider = this.config.OCO_AI_PROVIDER;
if (provider === 'gemini' && !this.apiKey &&
command !== 'config' && mode !== 'set') {
if (
provider === 'gemini' &&
!this.apiKey &&
command !== 'config' &&
mode !== 'set'
) {
intro('opencommit');
outro('OCO_GEMINI_API_KEY is not set, please run `oco config set OCO_GEMINI_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.');
outro(
'OCO_GEMINI_API_KEY is not set, please run `oco config set OCO_GEMINI_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.'
);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
@@ -113,21 +144,22 @@ export class Gemini implements AiEngine {
process.exit(1);
}
this.model = this.config.OCO_MODEL || MODEL_LIST.gemini[0];
if (provider === 'gemini' &&
if (
provider === 'gemini' &&
!MODEL_LIST.gemini.includes(this.model) &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
mode !== CONFIG_MODES.set
) {
outro(
`${chalk.red('✖')} Unsupported model ${this.model} for Gemini. Supported models are: ${MODEL_LIST.gemini.join(
', '
)}`
`${chalk.red('✖')} Unsupported model ${
this.model
} for Gemini. Supported models are: ${MODEL_LIST.gemini.join(', ')}`
);
process.exit(1);
}
}
}
}

View File

@@ -1,23 +1,21 @@
import axios, { AxiosError } from 'axios';
import axios from 'axios';
import { ChatCompletionRequestMessage } from 'openai';
import { getConfig } from '../commands/config';
import { AiEngine } from './Engine';
import {
getConfig
} from '../commands/config';
const config = getConfig();
export class OllamaAi implements AiEngine {
private model = "mistral"; // as default model of Ollama
private url = "http://localhost:11434/api/chat"; // default URL of Ollama API
private model = 'mistral'; // as default model of Ollama
private url = 'http://localhost:11434/api/chat'; // default URL of Ollama API
setModel(model: string) {
this.model = model ?? config?.OCO_MODEL ?? 'mistral';
}
setUrl(url: string) {
this.url = url ?? config?.OCO_OLLAMA_API_URL ?? 'http://localhost:11434/api/chat';
this.url =
url ?? config?.OCO_OLLAMA_API_URL ?? 'http://localhost:11434/api/chat';
}
async generateCommitMessage(
messages: Array<ChatCompletionRequestMessage>

View File

@@ -1,15 +1,11 @@
import { intro, outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import { execa } from 'execa';
import {
ChatCompletionRequestMessage,
Configuration as OpenAiApiConfiguration,
OpenAIApi
OpenAIApi,
Configuration as OpenAiApiConfiguration
} from 'openai';
import { intro, outro } from '@clack/prompts';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
@@ -18,7 +14,6 @@ import {
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine';
import { MODEL_LIST } from '../commands/config';
const config = getConfig();
@@ -53,18 +48,21 @@ if (
}
const MODEL = config?.OCO_MODEL || 'gpt-3.5-turbo';
if (provider === 'openai' &&
typeof MODEL !== 'string' &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
if (
provider === 'openai' &&
typeof MODEL !== 'string' &&
command !== 'config' &&
mode !== CONFIG_MODES.set
) {
outro(
`${chalk.red('✖')} Unsupported model ${MODEL}. The model can be any string, but the current configuration is not supported.`
`${chalk.red(
'✖'
)} Unsupported model ${MODEL}. The model can be any string, but the current configuration is not supported.`
);
process.exit(1);
}
export class OpenAi implements AiEngine {
private openAiApiConfiguration = new OpenAiApiConfiguration({
apiKey: apiKey
});
@@ -122,6 +120,4 @@ export class OpenAi implements AiEngine {
throw err;
}
};
}

View File

@@ -1,12 +1,12 @@
import { ChatCompletionRequestMessage } from 'openai';
import { AiEngine } from './Engine';
import { getConfig } from '../commands/config';
import { AiEngine } from './Engine';
export const TEST_MOCK_TYPES = [
'commit-message',
'prompt-module-commitlint-config',
] as const
type TestMockType = typeof TEST_MOCK_TYPES[number];
'prompt-module-commitlint-config'
] as const;
type TestMockType = (typeof TEST_MOCK_TYPES)[number];
export class TestAi implements AiEngine {
async generateCommitMessage(
@@ -17,15 +17,16 @@ export class TestAi implements AiEngine {
case 'commit-message':
return 'fix(testAi.ts): test commit message';
case 'prompt-module-commitlint-config':
return `{\n` +
return (
`{\n` +
` "localLanguage": "english",\n` +
` "commitFix": "fix(server): Change 'port' variable to uppercase 'PORT'",\n` +
` "commitFeat": "feat(server): Allow server to listen on a port specified through environment variable",\n` +
` "commitDescription": "Change 'port' variable to uppercase 'PORT'. Allow server to listen on a port specified through environment variable."\n` +
`}`
);
default:
throw Error('unsupported test mock type')
throw Error('unsupported test mock type');
}
}
}