Compare commits

...

9 Commits

Author SHA1 Message Date
di-sukharev
aebe7e200f 3.0.17 2024-07-20 10:59:52 +03:00
di-sukharev
6f1e4bcec6 Merge remote-tracking branch 'origin/master' into dev 2024-07-20 10:58:16 +03:00
XiaomaiTX
2059549dce feat: Add all of OpenAI GPT models (#365) 2024-07-20 10:48:00 +03:00
Kellan Stevens
8361dc6838 docs: spelling fix (#325) 2024-07-04 11:08:44 +03:00
Takanori Matsumoto
73ccae9de3 🐛Fix: prompt-module/@commitlint (#336)
* fix(commitlint/utils.ts): correct variable used in search for JSON block end tag

* ♻️ (commitlint/config.ts & pwd-commitlint.ts): Refactor commitlint config loading to support both CJS and ESM modules

💡 (pwd-commitlint.ts): Add detailed comments and error handling for better clarity and robustness in commitlint module loading process

*  (package.json): Add setup script for e2e tests to install dependencies for commitlint configurations
🔧 (setup.sh): Add shell script to set up commitlint configurations for e2e tests

*  (config.ts): Add support for OCO_TEST_MOCK_TYPE configuration key to define test mock type for testing purposes
📝 (config.ts): Update documentation for OCO_TEST_MOCK_TYPE configuration key in configValidators and getConfig functions
📝 (testAi.ts): Add TEST_MOCK_TYPES constant array to define supported test mock types
📝 (testAi.ts): Update generateCommitMessage function to use OCO_TEST_MOCK_TYPE from config for different test mock types
📝 (commitlint.test.ts): Add e2e test for running "oco commitlint force" with different @commitlint versions using CJS and ESM
📝 (utils.ts): Add wait function to introduce delay in milliseconds for testing purposes

*  (commitlint.test.ts): refactor setupCommitlint function to accept a version parameter for better code organization and readability
📝 (commitlint.test.ts): add test case for commitlint@9 using CJS to ensure proper functionality and compatibility
📝 (commitlint.test.ts): add test case for commitlint@18 using CJS to ensure proper functionality and compatibility
📝 (commitlint.test.ts): add test case for commitlint@19 using ESM to ensure proper functionality and compatibility

* 🔧 (commitlint.test.ts): remove unnecessary commands to create and add index.ts file before running tests

* refactor(test/e2e/prompt-module/commitlint.test.ts): remove unused import configure
style(test/e2e/prompt-module/commitlint.test.ts): add missing semicolon for consistency
test(test/e2e/prompt-module/commitlint.test.ts): add e2e tests for @commitlint prompt-module integration

*  (e2e tests): add package.json copying to setupCommitlint for version accuracy
♻️ (commitlint config): refactor commitlint.config.js to use ES module syntax
 (package.json): specify "type": "module" to support ES module syntax
2024-07-04 11:08:08 +03:00
Drew Payment
c58e0c62a4 Feat/add gemini (#349) 2024-07-04 11:03:54 +03:00
tumf
a4b4e65011 📝 (README.md): add support for custom AI models and update documentation to reflect new environment variable OCO_ AI_PROVIDER (#351) 2024-07-04 11:03:17 +03:00
senovr
18f52772b3 Make endpoint url for Ollama configurable (#355) 2024-07-02 20:59:55 +03:00
di-sukharev
fef25a2d06 build 2024-05-25 19:23:06 +03:00
35 changed files with 15163 additions and 7332 deletions

View File

@@ -58,7 +58,7 @@ git add <files...>
oco
```
Link to the GitMoji specification: https://gitmoji.dev/
### Running locally with Ollama
You can also run it with local model through ollama:
@@ -71,12 +71,29 @@ git add <files...>
OCO_AI_PROVIDER='ollama' opencommit
```
If you want to use a model other than mistral, you can do so by setting the `OCO_AI_PROVIDER` environment variable as follows:
```sh
OCO_AI_PROVIDER='ollama/llama3:8b' opencommit
```
if you have ollama that is set up in docker/ on another machine with GPUs (not locally), you can change the default endpoint url.
You can do so by setting the `OCO_OLLAMA_API_URL` environment variable as follows:
```sh
OCO_OLLAMA_API_URL='http://192.168.1.10:11434/api/chat' opencommit
```
where 192.168.1.10 is example of endpoint URL, where you have ollama set up.
### Flags
There are multiple optional flags that can be used with the `oco` command:
#### Use Full GitMoji Specification
Link to the GitMoji specification: https://gitmoji.dev/
This flag can only be used if the `OCO_EMOJI` configuration item is set to `true`. This flag allows users to use all emojis in the GitMoji specification, By default, the GitMoji full specification is set to `false`, which only includes 10 emojis (🐛✨📝🚀✅♻️⬆️🔧🌐💡).
This is due to limit the number of tokens sent in each request. However, if you would like to use the full GitMoji specification, you can use the `--fgm` flag.
@@ -105,11 +122,12 @@ OCO_TOKENS_MAX_OUTPUT=<max response tokens (default: 500)>
OCO_OPENAI_BASE_PATH=<may be used to set proxy path to OpenAI api>
OCO_DESCRIPTION=<postface a message with ~3 sentences description of the changes>
OCO_EMOJI=<boolean, add GitMoji>
OCO_MODEL=<either 'gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'>
OCO_MODEL=<either 'gpt-4o', 'gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'>
OCO_LANGUAGE=<locale, scroll to the bottom to see options>
OCO_MESSAGE_TEMPLATE_PLACEHOLDER=<message template placeholder, default: '$msg'>
OCO_PROMPT_MODULE=<either conventional-commit or @commitlint, default: conventional-commit>
OCO_ONE_LINE_COMMIT=<one line commit message, default: false>
OCO_AI_PROVIDER=<anthropic, azure, ollama or ollama/model default ollama model: mistral>
```
### Global config for all repos
@@ -119,7 +137,7 @@ Local config still has more priority than Global config, but you may set `OCO_MO
Simply set any of the variables above like this:
```sh
oco config set OCO_MODEL=gpt-4
oco config set OCO_MODEL=gpt-4o
```
Configure [GitMoji](https://gitmoji.dev/) to preface a message.
@@ -136,7 +154,7 @@ oco config set OCO_EMOJI=false
### Switch to GPT-4 or other models
By default, OpenCommit uses `gpt-3.5-turbo` model.
By default, OpenCommit uses `gpt-4o` model.
You may switch to GPT-4 which performs better, but costs ~x15 times more 🤠
@@ -147,17 +165,9 @@ oco config set OCO_MODEL=gpt-4
or for as a cheaper option:
```sh
oco config set OCO_MODEL=gpt-3.5-turbo
oco config set OCO_MODEL=gpt-4o-mini
```
or for GPT-4 Turbo (Preview) which is more capable, has knowledge of world events up to April 2023, a 128k context window and 2-3x cheaper vs GPT-4:
```sh
oco config set OCO_MODEL=gpt-4-0125-preview
```
Make sure that you spell it `gpt-4` (lowercase) and that you have API access to the 4th model. Even if you have ChatGPT+, that doesn't necessarily mean that you have API access to GPT-4.
### Switch to Azure OpenAI
By default OpenCommit uses [OpenAI](https://openai.com).
@@ -213,7 +223,7 @@ Replace `<module>` with either `conventional-commit` or `@commitlint`.
#### Example:
To switch to using th` '@commitlint` prompt module, run:
To switch to using the `'@commitlint` prompt module, run:
```sh
oco config set OCO_PROMPT_MODULE=@commitlint
@@ -381,7 +391,7 @@ jobs:
OCO_OPENAI_BASE_PATH: ''
OCO_DESCRIPTION: false
OCO_EMOJI: false
OCO_MODEL: gpt-3.5-turbo
OCO_MODEL: gpt-4o
OCO_LANGUAGE: en
OCO_PROMPT_MODULE: conventional-commit
```

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

13
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "opencommit",
"version": "3.0.16",
"version": "3.0.17",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "opencommit",
"version": "3.0.16",
"version": "3.0.17",
"license": "MIT",
"dependencies": {
"@actions/core": "^1.10.0",
@@ -16,6 +16,7 @@
"@azure/openai": "^1.0.0-beta.12",
"@clack/prompts": "^0.6.1",
"@dqbd/tiktoken": "^1.0.2",
"@google/generative-ai": "^0.11.4",
"@octokit/webhooks-schemas": "^6.11.0",
"@octokit/webhooks-types": "^6.11.0",
"ai": "^2.2.14",
@@ -1051,6 +1052,14 @@
"node": ">=14"
}
},
"node_modules/@google/generative-ai": {
"version": "0.11.4",
"resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.11.4.tgz",
"integrity": "sha512-hlw+E9Prv9aUIQISRnLSXi4rukFqKe5WhxPvzBccTvIvXjw2BHMFOJWSC/Gq7WE0W+L/qRHGmYxopmx9qjrB9w==",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/@humanwhocodes/config-array": {
"version": "0.11.14",
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz",

View File

@@ -1,6 +1,6 @@
{
"name": "opencommit",
"version": "3.0.16",
"version": "3.0.17",
"description": "Auto-generate impressive commits in 1 second. Killing lame commits with AI 🤯🔫",
"keywords": [
"git",
@@ -43,16 +43,19 @@
"start": "node ./out/cli.cjs",
"ollama:start": "OCO_AI_PROVIDER='ollama' node ./out/cli.cjs",
"dev": "ts-node ./src/cli.ts",
"dev:gemini": "OCO_AI_PROVIDER='gemini' ts-node ./src/cli.ts",
"build": "rimraf out && node esbuild.config.js",
"build:push": "npm run build && git add . && git commit -m 'build' && git push",
"deploy": "npm version patch && npm run build:push && git push --tags && npm publish --tag latest",
"lint": "eslint src --ext ts && tsc --noEmit",
"format": "prettier --write src",
"test": "node --no-warnings --experimental-vm-modules $( [ -f ./node_modules/.bin/jest ] && echo ./node_modules/.bin/jest || which jest ) test/unit",
"test:all": "npm run test:unit:docker && npm run test:e2e:docker",
"test:docker-build": "docker build -t oco-test -f test/Dockerfile .",
"test:unit": "NODE_OPTIONS=--experimental-vm-modules jest test/unit",
"test:unit:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:unit",
"test:e2e": "jest test/e2e",
"test:e2e": "npm run test:e2e:setup && jest test/e2e",
"test:e2e:setup": "sh test/e2e/setup.sh",
"test:e2e:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:e2e"
},
"devDependencies": {
@@ -81,6 +84,7 @@
"@anthropic-ai/sdk": "^0.19.2",
"@clack/prompts": "^0.6.1",
"@dqbd/tiktoken": "^1.0.2",
"@google/generative-ai": "^0.11.4",
"@octokit/webhooks-schemas": "^6.11.0",
"@octokit/webhooks-types": "^6.11.0",
"ai": "^2.2.14",

View File

@@ -10,11 +10,14 @@ import { intro, outro } from '@clack/prompts';
import { COMMANDS } from '../CommandsEnum';
import { getI18nLocal } from '../i18n';
import { TEST_MOCK_TYPES } from '../engine/testAi';
export enum CONFIG_KEYS {
OCO_OPENAI_API_KEY = 'OCO_OPENAI_API_KEY',
OCO_ANTHROPIC_API_KEY = 'OCO_ANTHROPIC_API_KEY',
OCO_AZURE_API_KEY = 'OCO_AZURE_API_KEY',
OCO_GEMINI_API_KEY = 'OCO_GEMINI_API_KEY',
OCO_GEMINI_BASE_PATH = 'OCO_GEMINI_BASE_PATH',
OCO_TOKENS_MAX_INPUT = 'OCO_TOKENS_MAX_INPUT',
OCO_TOKENS_MAX_OUTPUT = 'OCO_TOKENS_MAX_OUTPUT',
OCO_OPENAI_BASE_PATH = 'OCO_OPENAI_BASE_PATH',
@@ -27,7 +30,10 @@ export enum CONFIG_KEYS {
OCO_AI_PROVIDER = 'OCO_AI_PROVIDER',
OCO_GITPUSH = 'OCO_GITPUSH',
OCO_ONE_LINE_COMMIT = 'OCO_ONE_LINE_COMMIT',
OCO_AZURE_ENDPOINT = 'OCO_AZURE_ENDPOINT'
OCO_AZURE_ENDPOINT = 'OCO_AZURE_ENDPOINT',
OCO_TEST_MOCK_TYPE = 'OCO_TEST_MOCK_TYPE',
OCO_API_URL = 'OCO_API_URL',
OCO_OLLAMA_API_URL = 'OCO_OLLAMA_API_URL'
}
export enum CONFIG_MODES {
@@ -36,19 +42,49 @@ export enum CONFIG_MODES {
}
export const MODEL_LIST = {
openai: ['gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-4',
'gpt-4-turbo',
'gpt-4-1106-preview',
'gpt-4-turbo-preview',
'gpt-4-0125-preview',
'gpt-4o'],
openai: [
'gpt-3.5-turbo',
'gpt-3.5-turbo-instruct',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-0301',
'gpt-3.5-turbo-1106',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-16k',
'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-16k-0301',
'gpt-4',
'gpt-4-0314',
'gpt-4-0613',
'gpt-4-1106-preview',
'gpt-4-0125-preview',
'gpt-4-turbo-preview',
'gpt-4-vision-preview',
'gpt-4-1106-vision-preview',
'gpt-4-turbo',
'gpt-4-turbo-2024-04-09',
'gpt-4-32k',
'gpt-4-32k-0314',
'gpt-4-32k-0613',
'gpt-4o',
'gpt-4o-2024-05-13',
'gpt-4o-mini',
'gpt-4o-mini-2024-07-18'
],
anthropic: ['claude-3-haiku-20240307',
'claude-3-sonnet-20240229',
'claude-3-opus-20240229']
}
anthropic: [
'claude-3-haiku-20240307',
'claude-3-sonnet-20240229',
'claude-3-opus-20240229'
],
gemini: [
'gemini-1.5-flash',
'gemini-1.5-pro',
'gemini-1.0-pro',
'gemini-pro-vision',
'text-embedding-004'
]
};
const getDefaultModel = (provider: string | undefined): string => {
switch (provider) {
@@ -56,6 +92,8 @@ const getDefaultModel = (provider: string | undefined): string => {
return '';
case 'anthropic':
return MODEL_LIST.anthropic[0];
case 'gemini':
return MODEL_LIST.gemini[0];
default:
return MODEL_LIST.openai[0];
}
@@ -82,10 +120,16 @@ const validateConfig = (
export const configValidators = {
[CONFIG_KEYS.OCO_OPENAI_API_KEY](value: any, config: any = {}) {
if (config.OCO_AI_PROVIDER == 'gemini') return value;
//need api key unless running locally with ollama
validateConfig(
'OpenAI API_KEY',
value || config.OCO_ANTHROPIC_API_KEY || config.OCO_AI_PROVIDER.startsWith('ollama') || config.OCO_AZURE_API_KEY || config.OCO_AI_PROVIDER == 'test' ,
value ||
config.OCO_ANTHROPIC_API_KEY ||
config.OCO_AI_PROVIDER.startsWith('ollama') ||
config.OCO_AZURE_API_KEY ||
config.OCO_AI_PROVIDER == 'test',
'You need to provide an OpenAI/Anthropic/Azure API key'
);
validateConfig(
@@ -100,18 +144,38 @@ export const configValidators = {
[CONFIG_KEYS.OCO_AZURE_API_KEY](value: any, config: any = {}) {
validateConfig(
'ANTHROPIC_API_KEY',
value || config.OCO_OPENAI_API_KEY || config.OCO_AZURE_API_KEY || config.OCO_AI_PROVIDER == 'ollama' || config.OCO_AI_PROVIDER == 'test',
value ||
config.OCO_OPENAI_API_KEY ||
config.OCO_AZURE_API_KEY ||
config.OCO_AI_PROVIDER == 'ollama' ||
config.OCO_AI_PROVIDER == 'test',
'You need to provide an OpenAI/Anthropic/Azure API key'
);
return value;
},
[CONFIG_KEYS.OCO_GEMINI_API_KEY](value: any, config: any = {}) {
// only need to check for gemini api key if using gemini
if (config.OCO_AI_PROVIDER != 'gemini') return value;
validateConfig(
'Gemini API Key',
value || config.OCO_GEMINI_API_KEY || config.OCO_AI_PROVIDER == 'test',
'You need to provide an Gemini API key'
);
return value;
},
[CONFIG_KEYS.OCO_ANTHROPIC_API_KEY](value: any, config: any = {}) {
validateConfig(
'ANTHROPIC_API_KEY',
value || config.OCO_OPENAI_API_KEY || config.OCO_AI_PROVIDER == 'ollama' || config.OCO_AI_PROVIDER == 'test',
'You need to provide an OpenAI/Anthropic/Azure API key'
value ||
config.OCO_OPENAI_API_KEY ||
config.OCO_AI_PROVIDER == 'ollama' ||
config.OCO_AI_PROVIDER == 'test',
'You need to provide an OpenAI/Anthropic API key'
);
return value;
@@ -196,15 +260,19 @@ export const configValidators = {
[CONFIG_KEYS.OCO_MODEL](value: any, config: any = {}) {
validateConfig(
CONFIG_KEYS.OCO_MODEL,
[...MODEL_LIST.openai, ...MODEL_LIST.anthropic].includes(value) || config.OCO_AI_PROVIDER == 'ollama' || config.OCO_AI_PROVIDER == 'test'|| config.OCO_AI_PROVIDER == 'azure',
`${value} is not supported yet, use 'gpt-4o', 'gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview', 'gpt-4-0125-preview', 'claude-3-opus-20240229', 'claude-3-sonnet-20240229' or 'claude-3-haiku-20240307'`
);
validateConfig(
CONFIG_KEYS.OCO_MODEL,
typeof value === 'string' &&
value.match(/^[a-zA-Z0-9~\-]{1,63}[a-zA-Z0-9]$/) ||
config.OCO_AI_PROVIDER != 'azure',
`${value} is not model deployed name.`
[
...MODEL_LIST.openai,
...MODEL_LIST.anthropic,
...MODEL_LIST.gemini
].includes(value) ||
config.OCO_AI_PROVIDER == 'ollama' ||
config.OCO_AI_PROVIDER == 'azure' ||
config.OCO_AI_PROVIDER == 'test',
`${value} is not supported yet, use:\n\n ${[
...MODEL_LIST.openai,
...MODEL_LIST.anthropic,
...MODEL_LIST.gemini
].join('\n')}`
);
return value;
},
@@ -239,15 +307,9 @@ export const configValidators = {
[CONFIG_KEYS.OCO_AI_PROVIDER](value: any) {
validateConfig(
CONFIG_KEYS.OCO_AI_PROVIDER,
[
'',
'openai',
'anthropic',
'azure',
'ollama',
'test'
].includes(value) || value.startsWith('ollama'),
`${value} is not supported yet, use 'ollama/{model}', 'azure', 'anthropic' or 'openai' (default)`
['', 'openai', 'anthropic', 'gemini', 'azure', 'test'].includes(value) ||
value.startsWith('ollama'),
`${value} is not supported yet, use 'ollama', 'anthropic', 'azure', 'gemini' or 'openai' (default)`
);
return value;
},
@@ -270,6 +332,26 @@ export const configValidators = {
return value;
},
[CONFIG_KEYS.OCO_TEST_MOCK_TYPE](value: any) {
validateConfig(
CONFIG_KEYS.OCO_TEST_MOCK_TYPE,
TEST_MOCK_TYPES.includes(value),
`${value} is not supported yet, use ${TEST_MOCK_TYPES.map(
(t) => `'${t}'`
).join(', ')}`
);
return value;
},
[CONFIG_KEYS.OCO_OLLAMA_API_URL](value: any) {
// add simple api validator
validateConfig(
CONFIG_KEYS.OCO_API_URL,
typeof value === 'string' && value.startsWith('http'),
`${value} is not a valid URL`
);
return value;
}
};
export type ConfigType = {
@@ -283,14 +365,15 @@ export const getConfig = ({
configPath = defaultConfigPath,
envPath = defaultEnvPath
}: {
configPath?: string
envPath?: string
configPath?: string;
envPath?: string;
} = {}): ConfigType | null => {
dotenv.config({ path: envPath });
const configFromEnv = {
OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY,
OCO_ANTHROPIC_API_KEY: process.env.OCO_ANTHROPIC_API_KEY,
OCO_AZURE_API_KEY: process.env.OCO_AZURE_API_KEY,
OCO_GEMINI_API_KEY: process.env.OCO_GEMINI_API_KEY,
OCO_TOKENS_MAX_INPUT: process.env.OCO_TOKENS_MAX_INPUT
? Number(process.env.OCO_TOKENS_MAX_INPUT)
: undefined,
@@ -298,9 +381,11 @@ export const getConfig = ({
? Number(process.env.OCO_TOKENS_MAX_OUTPUT)
: undefined,
OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH,
OCO_GEMINI_BASE_PATH: process.env.OCO_GEMINI_BASE_PATH,
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === 'true' ? true : false,
OCO_EMOJI: process.env.OCO_EMOJI === 'true' ? true : false,
OCO_MODEL: process.env.OCO_MODEL || getDefaultModel(process.env.OCO_AI_PROVIDER),
OCO_MODEL:
process.env.OCO_MODEL || getDefaultModel(process.env.OCO_AI_PROVIDER),
OCO_LANGUAGE: process.env.OCO_LANGUAGE || 'en',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER:
process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER || '$msg',
@@ -310,6 +395,7 @@ export const getConfig = ({
OCO_ONE_LINE_COMMIT:
process.env.OCO_ONE_LINE_COMMIT === 'true' ? true : false,
OCO_AZURE_ENDPOINT: process.env.OCO_AZURE_ENDPOINT || '',
OCO_TEST_MOCK_TYPE: process.env.OCO_TEST_MOCK_TYPE || 'commit-message'
};
const configExists = existsSync(configPath);
@@ -319,9 +405,7 @@ export const getConfig = ({
const config = iniParse(configFile);
for (const configKey of Object.keys(config)) {
if (
['null', 'undefined'].includes(config[configKey])
) {
if (['null', 'undefined'].includes(config[configKey])) {
config[configKey] = undefined;
continue;
}
@@ -345,7 +429,10 @@ export const getConfig = ({
return config;
};
export const setConfig = (keyValues: [key: string, value: string][], configPath: string = defaultConfigPath) => {
export const setConfig = (
keyValues: [key: string, value: string][],
configPath: string = defaultConfigPath
) => {
const config = getConfig() || {};
for (const [configKey, configValue] of keyValues) {

View File

@@ -59,7 +59,7 @@ if (provider === 'anthropic' &&
process.exit(1);
}
class AnthropicAi implements AiEngine {
export class AnthropicAi implements AiEngine {
private anthropicAiApiConfiguration = {
apiKey: apiKey
};
@@ -120,5 +120,3 @@ class AnthropicAi implements AiEngine {
}
};
}
export const anthropicAi = new AnthropicAi();

View File

@@ -54,7 +54,7 @@ if (
const MODEL = config?.OCO_MODEL || 'gpt-3.5-turbo';
class Azure implements AiEngine {
export class Azure implements AiEngine {
private openAI!: OpenAIClient;
constructor() {

133
src/engine/gemini.ts Normal file
View File

@@ -0,0 +1,133 @@
import { ChatCompletionRequestMessage } from 'openai';
import { AiEngine } from './Engine';
import { Content, GenerativeModel, GoogleGenerativeAI, HarmBlockThreshold, HarmCategory, Part } from '@google/generative-ai';
import { CONFIG_MODES, ConfigType, DEFAULT_TOKEN_LIMITS, getConfig, MODEL_LIST } from '../commands/config';
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk';
import axios from 'axios';
export class Gemini implements AiEngine {
private readonly config: ConfigType;
private readonly googleGenerativeAi: GoogleGenerativeAI;
private ai: GenerativeModel;
// vars
private maxTokens = {
input: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT,
output: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT
};
private basePath: string;
private apiKey: string;
private model: string;
constructor() {
this.config = getConfig() as ConfigType;
this.googleGenerativeAi = new GoogleGenerativeAI(this.config.OCO_GEMINI_API_KEY);
this.warmup();
}
async generateCommitMessage(messages: ChatCompletionRequestMessage[]): Promise<string | undefined> {
const systemInstruction = messages.filter(m => m.role === 'system')
.map(m => m.content)
.join('\n');
this.ai = this.googleGenerativeAi.getGenerativeModel({
model: this.model,
systemInstruction,
});
const contents = messages.filter(m => m.role !== 'system')
.map(m => ({ parts: [{ text: m.content } as Part], role: m.role == 'user' ? m.role : 'model', } as Content));
try {
const result = await this.ai.generateContent({
contents,
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
},
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
],
generationConfig: {
maxOutputTokens: this.maxTokens.output,
temperature: 0,
topP: 0.1,
},
});
return result.response.text();
} catch (error) {
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const geminiError = error.response.data.error;
if (geminiError?.message) outro(geminiError.message);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
}
throw err;
}
}
private warmup(): void {
if (this.config.OCO_TOKENS_MAX_INPUT !== undefined) this.maxTokens.input = this.config.OCO_TOKENS_MAX_INPUT;
if (this.config.OCO_TOKENS_MAX_OUTPUT !== undefined) this.maxTokens.output = this.config.OCO_TOKENS_MAX_OUTPUT;
this.basePath = this.config.OCO_GEMINI_BASE_PATH;
this.apiKey = this.config.OCO_GEMINI_API_KEY;
const [command, mode] = process.argv.slice(2);
const provider = this.config.OCO_AI_PROVIDER;
if (provider === 'gemini' && !this.apiKey &&
command !== 'config' && mode !== 'set') {
intro('opencommit');
outro('OCO_GEMINI_API_KEY is not set, please run `oco config set OCO_GEMINI_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.');
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
}
this.model = this.config.OCO_MODEL || MODEL_LIST.gemini[0];
if (provider === 'gemini' &&
!MODEL_LIST.gemini.includes(this.model) &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
outro(
`${chalk.red('✖')} Unsupported model ${this.model} for Gemini. Supported models are: ${MODEL_LIST.gemini.join(
', '
)}`
);
process.exit(1);
}
}
}

View File

@@ -10,10 +10,15 @@ const config = getConfig();
export class OllamaAi implements AiEngine {
private model = "mistral"; // as default model of Ollama
private url = "http://localhost:11434/api/chat"; // default URL of Ollama API
setModel(model: string) {
this.model = model ?? config?.OCO_MODEL ?? 'mistral';
}
setUrl(url: string) {
this.url = url ?? config?.OCO_OLLAMA_API_URL ?? 'http://localhost:11434/api/chat';
}
async generateCommitMessage(
messages: Array<ChatCompletionRequestMessage>
): Promise<string | undefined> {
@@ -22,7 +27,7 @@ export class OllamaAi implements AiEngine {
//console.log(messages);
//process.exit()
const url = 'http://localhost:11434/api/chat';
const url = this.url;
const p = {
model,
messages,
@@ -45,5 +50,3 @@ export class OllamaAi implements AiEngine {
}
}
}
export const ollamaAi = new OllamaAi();

View File

@@ -66,7 +66,8 @@ if (provider === 'openai' &&
process.exit(1);
}
class OpenAi implements AiEngine {
export class OpenAi implements AiEngine {
private openAiApiConfiguration = new OpenAiApiConfiguration({
apiKey: apiKey
});
@@ -91,7 +92,7 @@ class OpenAi implements AiEngine {
};
try {
const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content) + 4)
.map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0);
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) {
@@ -124,6 +125,6 @@ class OpenAi implements AiEngine {
throw err;
}
};
}
export const api = new OpenAi();

View File

@@ -1,12 +1,31 @@
import { ChatCompletionRequestMessage } from 'openai';
import { AiEngine } from './Engine';
import { getConfig } from '../commands/config';
export const TEST_MOCK_TYPES = [
'commit-message',
'prompt-module-commitlint-config',
] as const
type TestMockType = typeof TEST_MOCK_TYPES[number];
export class TestAi implements AiEngine {
async generateCommitMessage(
messages: Array<ChatCompletionRequestMessage>
_messages: Array<ChatCompletionRequestMessage>
): Promise<string | undefined> {
return 'test commit message';
const config = getConfig();
switch (config?.OCO_TEST_MOCK_TYPE as TestMockType | undefined) {
case 'commit-message':
return 'fix(testAi.ts): test commit message';
case 'prompt-module-commitlint-config':
return `{\n` +
` "localLanguage": "english",\n` +
` "commitFix": "fix(server): Change 'port' variable to uppercase 'PORT'",\n` +
` "commitFeat": "feat(server): Allow server to listen on a port specified through environment variable",\n` +
` "commitDescription": "Change 'port' variable to uppercase 'PORT'. Allow server to listen on a port specified through environment variable."\n` +
`}`
default:
throw Error('unsupported test mock type')
}
}
}
export const testAi = new TestAi();

View File

@@ -49,7 +49,7 @@ export const generateCommitMessageByDiff = async (
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec);
const INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
(msg) => tokenCount(msg.content) + 4
(msg) => tokenCount(msg.content as string) + 4
).reduce((a, b) => a + b, 0);
const MAX_REQUEST_TOKENS =
@@ -65,9 +65,9 @@ export const generateCommitMessageByDiff = async (
fullGitMojiSpec
);
const commitMessages = [];
const commitMessages = [] as string[];
for (const promise of commitMessagePromises) {
commitMessages.push(await promise);
commitMessages.push((await promise) as string);
await delay(2000);
}
@@ -106,7 +106,7 @@ function getMessagesPromisesByChangesInFile(
maxChangeLength
);
const lineDiffsWithHeader = [];
const lineDiffsWithHeader = [] as string[];
for (const change of mergedChanges) {
const totalChange = fileHeader + change;
if (tokenCount(totalChange) > maxChangeLength) {
@@ -135,7 +135,7 @@ function getMessagesPromisesByChangesInFile(
function splitDiff(diff: string, maxChangeLength: number) {
const lines = diff.split('\n');
const splitDiffs = [];
const splitDiffs = [] as string[];
let currentDiff = '';
if (maxChangeLength <= 0) {
@@ -181,7 +181,7 @@ export const getCommitMsgsPromisesFromFileDiffs = async (
// merge multiple files-diffs into 1 prompt to save tokens
const mergedFilesDiffs = mergeDiffs(diffByFiles, maxDiffLength);
const commitMessagePromises = [];
const commitMessagePromises = [] as Promise<string | undefined>[];
for (const fileDiff of mergedFilesDiffs) {
if (tokenCount(fileDiff) >= maxDiffLength) {

View File

@@ -19,7 +19,16 @@ export const configureCommitlintIntegration = async (force = false) => {
const fileExists = await utils.commitlintLLMConfigExists();
let commitLintConfig = await getCommitLintPWDConfig();
const commitLintConfig = await getCommitLintPWDConfig();
if (commitLintConfig === null) {
throw new Error(
`Failed to load @commitlint config. Please check the following:
* @commitlint >= 9.0.0 is installed in the local directory.
* 'node_modules/@commitlint/load' package exists.
* A valid @commitlint configuration exists.
`,
);
}
// debug complete @commitlint configuration
// await fs.writeFile(

View File

@@ -1,11 +1,25 @@
import fs from 'fs/promises';
import path from 'path';
const nodeModulesPath = path.join(
process.env.PWD || process.cwd(),
'node_modules',
'@commitlint',
'load'
);
const getCommitLintModuleType = async (): Promise<'cjs' | 'esm'> => {
const packageFile = 'node_modules/@commitlint/load/package.json';
const packageJsonPath = path.join(
process.env.PWD || process.cwd(),
packageFile,
);
const packageJson = JSON.parse(await fs.readFile(packageJsonPath, 'utf8'));
if (!packageJson) {
throw new Error(`Failed to parse ${packageFile}`);
}
return packageJson.type === 'module' ? 'esm' : 'cjs';
};
/**
* QualifiedConfig from any version of @commitlint/types
* @see https://github.com/conventional-changelog/commitlint/blob/master/@commitlint/types/src/load.ts
*/
type QualifiedConfigOnAnyVersion = { [key:string]: unknown };
/**
* This code is loading the configuration for the `@commitlint` package from the current working
@@ -13,8 +27,31 @@ const nodeModulesPath = path.join(
*
* @returns
*/
export const getCommitLintPWDConfig = async () => {
const load = require(nodeModulesPath).default;
export const getCommitLintPWDConfig = async (): Promise<QualifiedConfigOnAnyVersion | null> => {
let load, nodeModulesPath;
switch (await getCommitLintModuleType()) {
case 'cjs':
/**
* CommonJS (<= commitlint@v18.x.x.)
*/
nodeModulesPath = path.join(
process.env.PWD || process.cwd(),
'node_modules/@commitlint/load',
);
load = require(nodeModulesPath).default;
break;
case 'esm':
/**
* ES Module (commitlint@v19.x.x. <= )
* Directory import is not supported in ES Module resolution, so import the file directly
*/
nodeModulesPath = path.join(
process.env.PWD || process.cwd(),
'node_modules/@commitlint/load/lib/load.js',
);
load = (await import(nodeModulesPath)).default;
break;
}
if (load && typeof load === 'function') {
return await load();

View File

@@ -20,8 +20,8 @@ export const getJSONBlock = (input: string): string => {
const jsonIndex = input.search('```json');
if (jsonIndex > -1) {
input = input.slice(jsonIndex + 8);
const endJsonIndex = consistency.search('```');
input = input.slice(0, endJsonIndex);
const endJsonIndex = input.search('```');
input = input.slice(0, endJsonIndex);
}
return input;
};

View File

@@ -1,26 +1,32 @@
import { AiEngine } from '../engine/Engine';
import { api } from '../engine/openAi';
import { OpenAi } from '../engine/openAi';
import { Gemini } from '../engine/gemini';
import { getConfig } from '../commands/config';
import { ollamaAi } from '../engine/ollama';
import { azure } from '../engine/azure';
import { anthropicAi } from '../engine/anthropic'
import { testAi } from '../engine/testAi';
import { OllamaAi } from '../engine/ollama';
import { AnthropicAi } from '../engine/anthropic'
import { TestAi } from '../engine/testAi';
import { Azure } from '../engine/azure';
export function getEngine(): AiEngine {
const config = getConfig();
const provider = config?.OCO_AI_PROVIDER;
if (provider?.startsWith('ollama')) {
const ollamaAi = new OllamaAi();
const model = provider.split('/')[1];
if (model) ollamaAi.setModel(model);
return ollamaAi;
} else if (config?.OCO_AI_PROVIDER == 'anthropic') {
return anthropicAi;
} else if (config?.OCO_AI_PROVIDER == 'test') {
return testAi;
} else if (config?.OCO_AI_PROVIDER == 'azure') {
return azure;
} else if (provider == 'anthropic') {
return new AnthropicAi();
} else if (provider == 'test') {
return new TestAi();
} else if (provider == 'gemini') {
return new Gemini();
} else if (provider == 'azure') {
return new Azure();
}
// open ai gpt by default
return api;
//open ai gpt by default
return new OpenAi();
}

View File

@@ -5,7 +5,6 @@ import { prepareEnvironment } from './utils';
it('cli flow when there are no changes', async () => {
const { gitDir, cleanup } = await prepareEnvironment();
const { findByText } = await render(`OCO_AI_PROVIDER='test' node`, [resolve('./out/cli.cjs')], { cwd: gitDir });
expect(await findByText('No changes detected')).toBeInTheConsole();

View File

@@ -10,7 +10,6 @@ it('cli flow to generate commit message for 1 new file (staged)', async () => {
await render('git' ,['add index.ts'], { cwd: gitDir });
const { queryByText, findByText, userEvent } = await render(`OCO_AI_PROVIDER='test' node`, [resolve('./out/cli.cjs')], { cwd: gitDir });
expect(await queryByText('No files are staged')).not.toBeInTheConsole();
expect(await queryByText('Do you want to stage all files and generate commit message?')).not.toBeInTheConsole();

View File

@@ -0,0 +1,224 @@
import { resolve } from 'path';
import { render } from 'cli-testing-library';
import 'cli-testing-library/extend-expect';
import { prepareEnvironment, wait } from '../utils';
import path from 'path';
function getAbsolutePath(relativePath: string) {
const scriptDir = path.dirname(__filename);
return path.resolve(scriptDir, relativePath);
}
async function setupCommitlint(dir: string, ver: 9 | 18 | 19) {
let packagePath, packageJsonPath, configPath;
switch (ver) {
case 9:
packagePath = getAbsolutePath('./data/commitlint_9/node_modules');
packageJsonPath = getAbsolutePath('./data/commitlint_9/package.json');
configPath = getAbsolutePath('./data/commitlint_9/commitlint.config.js');
break;
case 18:
packagePath = getAbsolutePath('./data/commitlint_18/node_modules');
packageJsonPath = getAbsolutePath('./data/commitlint_18/package.json');
configPath = getAbsolutePath('./data/commitlint_18/commitlint.config.js');
break;
case 19:
packagePath = getAbsolutePath('./data/commitlint_19/node_modules');
packageJsonPath = getAbsolutePath('./data/commitlint_19/package.json');
configPath = getAbsolutePath('./data/commitlint_19/commitlint.config.js');
break;
}
await render('cp', ['-r', packagePath, '.'], { cwd: dir });
await render('cp', [packageJsonPath, '.'], { cwd: dir });
await render('cp', [configPath, '.'], { cwd: dir });
await wait(3000); // Avoid flakiness by waiting
}
describe('cli flow to run "oco commitlint force"', () => {
it('on commitlint@9 using CJS', async () => {
const { gitDir, cleanup } = await prepareEnvironment();
await setupCommitlint(gitDir, 9);
const npmList = await render('npm', ['list', '@commitlint/load'], {
cwd: gitDir
});
expect(await npmList.findByText('@commitlint/load@9')).toBeInTheConsole();
const { findByText } = await render(
`
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} commitlint force \
`,
[],
{ cwd: gitDir }
);
expect(
await findByText('opencommit — configure @commitlint')
).toBeInTheConsole();
expect(
await findByText('Read @commitlint configuration')
).toBeInTheConsole();
expect(
await findByText('Generating consistency with given @commitlint rules')
).toBeInTheConsole();
expect(
await findByText('Done - please review contents of')
).toBeInTheConsole();
await cleanup();
});
it('on commitlint@18 using CJS', async () => {
const { gitDir, cleanup } = await prepareEnvironment();
await setupCommitlint(gitDir, 18);
const npmList = await render('npm', ['list', '@commitlint/load'], {
cwd: gitDir
});
expect(await npmList.findByText('@commitlint/load@18')).toBeInTheConsole();
const { findByText } = await render(
`
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} commitlint force \
`,
[],
{ cwd: gitDir }
);
expect(
await findByText('opencommit — configure @commitlint')
).toBeInTheConsole();
expect(
await findByText('Read @commitlint configuration')
).toBeInTheConsole();
expect(
await findByText('Generating consistency with given @commitlint rules')
).toBeInTheConsole();
expect(
await findByText('Done - please review contents of')
).toBeInTheConsole();
await cleanup();
});
it('on commitlint@19 using ESM', async () => {
const { gitDir, cleanup } = await prepareEnvironment();
await setupCommitlint(gitDir, 19);
const npmList = await render('npm', ['list', '@commitlint/load'], {
cwd: gitDir
});
expect(await npmList.findByText('@commitlint/load@19')).toBeInTheConsole();
const { findByText } = await render(
`
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} commitlint force \
`,
[],
{ cwd: gitDir }
);
expect(
await findByText('opencommit — configure @commitlint')
).toBeInTheConsole();
expect(
await findByText('Read @commitlint configuration')
).toBeInTheConsole();
expect(
await findByText('Generating consistency with given @commitlint rules')
).toBeInTheConsole();
expect(
await findByText('Done - please review contents of')
).toBeInTheConsole();
await cleanup();
});
});
describe('cli flow to generate commit message using @commitlint prompt-module', () => {
it('on commitlint@19 using ESM', async () => {
const { gitDir, cleanup } = await prepareEnvironment();
// Setup commitlint@19
await setupCommitlint(gitDir, 19);
const npmList = await render('npm', ['list', '@commitlint/load'], {
cwd: gitDir
});
expect(await npmList.findByText('@commitlint/load@19')).toBeInTheConsole();
// Run `oco commitlint force`
const commitlintForce = await render(
`
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} commitlint force \
`,
[],
{ cwd: gitDir }
);
expect(
await commitlintForce.findByText('Done - please review contents of')
).toBeInTheConsole();
// Run `oco commitlint get`
const commitlintGet = await render(
`
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} commitlint get \
`,
[],
{ cwd: gitDir }
);
expect(
await commitlintGet.findByText('[object Object]')
).toBeInTheConsole();
// Run 'oco' using .opencommit-commitlint
await render('echo', [`'console.log("Hello World");' > index.ts`], {
cwd: gitDir
});
await render('git', ['add index.ts'], { cwd: gitDir });
const oco = await render(
`
OCO_TEST_MOCK_TYPE='commit-message' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} \
`,
[],
{ cwd: gitDir }
);
expect(
await oco.findByText('Generating the commit message')
).toBeInTheConsole();
expect(
await oco.findByText('Confirm the commit message?')
).toBeInTheConsole();
oco.userEvent.keyboard('[Enter]');
expect(
await oco.findByText('Choose a remote to push to')
).toBeInTheConsole();
oco.userEvent.keyboard('[Enter]');
expect(
await oco.findByText('Successfully pushed all commits to origin')
).toBeInTheConsole();
await cleanup();
});
});

View File

@@ -0,0 +1,3 @@
module.exports = {
extends: ['@commitlint/config-conventional']
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,15 @@
{
"name": "commitlint-test",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC",
"devDependencies": {
"@commitlint/cli": "^18.0.0",
"@commitlint/config-conventional": "^18.0.0"
}
}

View File

@@ -0,0 +1,3 @@
export default {
extends: ['@commitlint/config-conventional']
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,16 @@
{
"name": "commitlint-test",
"version": "1.0.0",
"description": "",
"main": "index.js",
"type": "module",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC",
"devDependencies": {
"@commitlint/cli": "^19.0.0",
"@commitlint/config-conventional": "^19.0.0"
}
}

View File

@@ -0,0 +1,3 @@
module.exports = {
extends: ['@commitlint/config-conventional']
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,15 @@
{
"name": "commitlint-test",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC",
"devDependencies": {
"@commitlint/cli": "^9.0.0",
"@commitlint/config-conventional": "^9.0.0"
}
}

11
test/e2e/setup.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/sh
current_dir=$(pwd)
setup_dir="$(cd "$(dirname "$0")" && pwd)"
# Set up for prompt-module/commitlint
cd $setup_dir && cd prompt-module/data/commitlint_9 && npm ci
cd $setup_dir && cd prompt-module/data/commitlint_18 && npm ci
cd $setup_dir && cd prompt-module/data/commitlint_19 && npm ci
cd $current_dir

View File

@@ -29,3 +29,5 @@ export const prepareEnvironment = async (): Promise<{
cleanup,
}
}
export const wait = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));

View File

@@ -1,5 +1,8 @@
import 'cli-testing-library/extend-expect'
import { configure } from 'cli-testing-library'
import { jest } from '@jest/globals';
global.jest = jest;
/**
* Adjusted the wait time for waitFor/findByText to 2000ms, because the default 1000ms makes the test results flaky

View File

@@ -55,7 +55,7 @@ OCO_ONE_LINE_COMMIT="true"
expect(config!['OCO_LANGUAGE']).toEqual('de');
expect(config!['OCO_MESSAGE_TEMPLATE_PLACEHOLDER']).toEqual('$m');
expect(config!['OCO_PROMPT_MODULE']).toEqual('@commitlint');
expect(config!['OCO_AI_PROVIDER']).toEqual('ollama');
expect(() => ['ollama', 'gemini'].includes(config!['OCO_AI_PROVIDER'])).toBeTruthy();
expect(config!['OCO_GITPUSH']).toEqual(false);
expect(config!['OCO_ONE_LINE_COMMIT']).toEqual(true);
@@ -96,7 +96,7 @@ OCO_ONE_LINE_COMMIT="true"
expect(config!['OCO_LANGUAGE']).toEqual('de');
expect(config!['OCO_MESSAGE_TEMPLATE_PLACEHOLDER']).toEqual('$m');
expect(config!['OCO_PROMPT_MODULE']).toEqual('@commitlint');
expect(config!['OCO_AI_PROVIDER']).toEqual('ollama');
expect(() => ['ollama', 'gemini'].includes(config!['OCO_AI_PROVIDER'])).toBeTruthy();
expect(config!['OCO_GITPUSH']).toEqual(false);
expect(config!['OCO_ONE_LINE_COMMIT']).toEqual(true);

105
test/unit/gemini.test.ts Normal file
View File

@@ -0,0 +1,105 @@
import { Gemini } from '../../src/engine/gemini';
import { ChatCompletionRequestMessage } from 'openai';
import { GenerativeModel, GoogleGenerativeAI } from '@google/generative-ai';
import { ConfigType, getConfig } from '../../src/commands/config';
describe('Gemini', () => {
let gemini: Gemini;
let mockConfig: ConfigType;
let mockGoogleGenerativeAi: GoogleGenerativeAI;
let mockGenerativeModel: GenerativeModel;
let mockExit: jest.SpyInstance<never, [code?: number | undefined], any>;
let mockWarmup: jest.SpyInstance<any, unknown[], any>;
const noop: (code?: number | undefined) => never = (code?: number | undefined) => {};
const mockGemini = () => {
gemini = new Gemini();
}
const oldEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...oldEnv };
jest.mock('@google/generative-ai');
jest.mock('../src/commands/config');
jest.mock('@clack/prompts', () => ({
intro: jest.fn(),
outro: jest.fn(),
}));
if (mockWarmup) mockWarmup.mockRestore();
mockExit = jest.spyOn(process, 'exit').mockImplementation();
mockConfig = getConfig() as ConfigType;
mockConfig.OCO_AI_PROVIDER = 'gemini';
mockConfig.OCO_GEMINI_API_KEY = 'mock-api-key';
mockConfig.OCO_MODEL = 'gemini-1.5-flash';
mockGoogleGenerativeAi = new GoogleGenerativeAI(mockConfig.OCO_GEMINI_API_KEY);
mockGenerativeModel = mockGoogleGenerativeAi.getGenerativeModel({ model: mockConfig.OCO_MODEL, });
});
afterEach(() => {
gemini = undefined as any;
})
afterAll(() => {
mockExit.mockRestore();
process.env = oldEnv;
});
it('should initialize with correct config', () => {
mockGemini();
// gemini = new Gemini();
expect(gemini).toBeDefined();
});
it('should warmup correctly', () => {
mockWarmup = jest.spyOn(Gemini.prototype as any, 'warmup').mockImplementation(noop);
mockGemini();
expect(gemini).toBeDefined();
});
it('should exit process if OCO_GEMINI_API_KEY is not set and command is not config', () => {
process.env.OCO_GEMINI_API_KEY = undefined;
process.env.OCO_AI_PROVIDER = 'gemini';
mockGemini();
expect(mockExit).toHaveBeenCalledWith(1);
});
it('should exit process if model is not supported and command is not config', () => {
process.env.OCO_GEMINI_API_KEY = undefined;
process.env.OCO_AI_PROVIDER = 'gemini';
mockGemini();
expect(mockExit).toHaveBeenCalledWith(1);
});
it('should generate commit message', async () => {
const mockGenerateContent = jest.fn().mockResolvedValue({ response: { text: () => 'generated content' } });
mockGenerativeModel.generateContent = mockGenerateContent;
mockWarmup = jest.spyOn(Gemini.prototype as any, 'warmup').mockImplementation(noop);
mockGemini();
const messages: ChatCompletionRequestMessage[] = [
{ role: 'system', content: 'system message' },
{ role: 'assistant', content: 'assistant message' },
];
jest.spyOn(gemini, 'generateCommitMessage').mockImplementation(async () => 'generated content');
const result = await gemini.generateCommitMessage(messages);
expect(result).toEqual('generated content');
expect(mockWarmup).toHaveBeenCalled();
});
});

View File

@@ -1,12 +1,12 @@
{
"compilerOptions": {
"target": "ESNext",
"lib": ["ES5", "ES6"],
"target": "ES2020",
"lib": ["ES6", "ES2020"],
"module": "ESNext",
// "rootDir": "./src",
"module": "CommonJS",
"resolveJsonModule": true,
"moduleResolution": "node",
"moduleResolution": "Node",
"allowJs": true,