Compare commits

..

33 Commits

Author SHA1 Message Date
di-sukharev
2e1a39fd2f 3.2.5 2024-12-14 20:07:56 +01:00
di-sukharev
30ddd05764 jump to 3.2.4 2024-12-14 20:07:52 +01:00
di-sukharev
5fd84937c5 build 2024-12-14 20:05:17 +01:00
di-sukharev
dc4fe43642 build 2024-12-14 20:03:35 +01:00
di-sukharev
98afbe21ea 3.2.4 2024-12-14 20:03:34 +01:00
di-sukharev
041465a81c 3.2.3 2024-12-14 20:03:11 +01:00
GPT8
40fa275b4f 3.2.3 (#431)
* 378: fix hook env (#402)

* fix(prepare-commit-msg-hook): update error handling to provide clearer instructions for setting API keys and improve user guidance

* Fix: a bug that causes an error when pushing without setting git remote (#396)

* update deploy commands

* feat(cli): add context flag for providing additional commit message input

* Fix [Bug]: punycode` module is deprecated #426 (#433)

Signed-off-by: Tiger Kaovilai <passawit.kaovilai@gmail.com>

* npm audit fix (#432)

Signed-off-by: Tiger Kaovilai <passawit.kaovilai@gmail.com>

* Feat: Add an option to `Don't push` when there are multiple git remotes (#434)

---------

Co-authored-by: GPT8 <57486732+di-sukharev@users.noreply.github.com>

* feat(engine): add support for MLX AI provider (#437)

---------

Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
Co-authored-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>

* feat(config, engine): add support for Mistral AI provider and engine (#436)

* docs(CONTRIBUTING.md): update `TODO.md` reference (#435)

Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>

* feat(config, engine): add support for Mistral AI provider and engine

* ```
feat(package): add mistralai and zod dependencies
```

* fix: recreate package-lock.json with node20

* fix: recreate package-lock.json with node v20.18.1 based on branch dev

---------

Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
Co-authored-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
Co-authored-by: pedro-valentim <>

---------

Signed-off-by: Tiger Kaovilai <passawit.kaovilai@gmail.com>
Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
Co-authored-by: Takanori Matsumoto <matscube@gmail.com>
Co-authored-by: BILLY Maxime <ozeliurs@gmail.com>
Co-authored-by: Welington Sampaio <welington.sampaio@icloud.com>
Co-authored-by: Tiger Kaovilai <passawit.kaovilai@gmail.com>
Co-authored-by: albi ️ <sigismondi.alberto@gmail.com>
Co-authored-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
Co-authored-by: Pedro Valentim Silva Leite <18179935+pedro-valentim@users.noreply.github.com>
2024-12-14 20:02:25 +01:00
Emmanuel Ferdman
6f16191af2 docs(CONTRIBUTING.md): update TODO.md reference (#435)
Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
2024-11-29 21:33:39 +01:00
di-sukharev
25105e4c3a docs(CONTRIBUTING.md): update links to point to the correct repository name for consistency and clarity 2024-09-07 19:11:37 +03:00
GPT8
2769121842 3.2.2 (#413)
* feat(config): add support for groq AI provider, including config validation and engine implementation (#381)

* fix migrations (#414)

---------

Co-authored-by: Takanori Matsumoto <matscube@gmail.com>
Co-authored-by: BILLY Maxime <ozeliurs@gmail.com>
2024-09-07 18:17:17 +03:00
di-sukharev
8ae927e2dc build 2024-09-06 13:59:28 +03:00
di-sukharev
2859d4ebe3 3.2.1 2024-09-06 13:59:25 +03:00
GPT8
306522e796 3.2.0 (#412)
* 378: fix hook env (#402)

* fix(prepare-commit-msg-hook): update error handling to provide clearer instructions for setting API keys and improve user guidance

* Fix: a bug that causes an error when pushing without setting git remote (#396)

* refactoring v2 (#408)

* 3.2.0

* update deploy commands

---------

Co-authored-by: Takanori Matsumoto <matscube@gmail.com>
2024-09-06 13:58:54 +03:00
di-sukharev
69b3c00a52 docs(README): update OCO_AI_PROVIDER and OCO_MODEL instructions for clarity and add valid model name options to OCO_MODEL description 2024-09-02 11:13:02 +03:00
di-sukharev
6f4afbfb52 build 2024-09-02 10:19:33 +03:00
di-sukharev
796de7b07e 3.1.2 2024-09-02 10:19:31 +03:00
unconstructive
9ad281a4ee 🔧 (ollama.ts): update client post request to use getUri method for endpoint URL construction (#404) 2024-09-02 10:18:01 +03:00
di-sukharev
1ce357b023 Merge branch 'dev' of github.com:di-sukharev/opencommit into dev 2024-09-01 18:26:33 +03:00
di-sukharev
45dd07d229 Merge branch 'master' into dev 2024-09-01 18:26:20 +03:00
di-sukharev
fa164377e4 build 2024-09-01 18:23:16 +03:00
di-sukharev
0b89767de0 3.1.1 2024-09-01 18:23:14 +03:00
GPT10
2dded4caa4 v 3.1.0 (#397) 2024-09-01 18:21:56 +03:00
GPT10
670f74ebc7 398: make why configurable (#403)
* feat(config): add OCO_WHY configuration option to enable output of change explanations in commit messages
docs(README): document the new OCO_WHY config option and its usage for outputting reasons for changes
2024-09-01 18:17:25 +03:00
Aloha
89d2aa603b feat: support pnpm (#394)
* feat: support pnpm

* fix(commitlint.ts): format commitlint config output as JSON for better readability

* test(commitlint.test.ts): update expected console output for commit message consistency
2024-09-01 16:11:26 +03:00
GPT10
8702c17758 390 add config set tests (#399)
* fix(commit.ts): improve user confirmation handling by exiting on cancel actions to prevent unintended behavior
refactor(commit.ts): streamline conditional checks for user confirmations to enhance code readability and maintainability

* refactor(commit.ts): rename spinner variables for clarity and consistency in commit message generation process
fix(commit.ts): ensure proper stopping of spinners in case of errors during commit message generation and committing process

* refactor(config.ts): extract default configuration to a constant for better maintainability and readability
refactor(config.ts): improve initGlobalConfig function to accept a configPath parameter for flexibility
feat(config.ts): enhance getConfig function to support separate paths for global and environment configurations
test(config.test.ts): update tests to reflect changes in config handling and ensure proper functionality
style(utils.ts): clean up code formatting for consistency and readability
style(tsconfig.json): adjust formatting in tsconfig.json for better clarity and maintainability

* fix(utils.ts): add existsSync check before removing temp directory to prevent errors if directory does not exist (#401)

---------

Co-authored-by: Takanori Matsumoto <matscube@gmail.com>
2024-09-01 13:28:06 +03:00
di-sukharev
60597d23eb pump v to 3.1.0 2024-08-27 17:31:40 +03:00
di-sukharev
6f04927369 build 2024-08-27 17:31:23 +03:00
di-sukharev
0c0cf9c627 Merge remote-tracking branch 'origin/master' into dev 2024-08-27 17:31:13 +03:00
GPT10
8fe8e614ac refactoring_v1 (#391) 2024-08-27 17:04:36 +03:00
Takanori Matsumoto
68c9ed359c fix(dependencies): update tr46, webidl-conversions, and whatwg-url to latest versions for compatibility and security improvements (#395) 2024-08-26 10:28:34 +03:00
di-sukharev
1b29f3a9fd build 2024-08-19 12:37:38 +03:00
GPT10
596dcd7cea 3.0.20 (#389) 2024-08-19 12:37:17 +03:00
di-sukharev
eb3be62a4f 3.0.20 2024-08-19 12:34:35 +03:00
46 changed files with 86075 additions and 39271 deletions

View File

@@ -18,7 +18,7 @@ To get started, follow these steps:
1. Clone the project repository locally.
2. Install dependencies with `npm install`.
3. Run the project with `npm run dev`.
4. See [issues](https://github.com/di-sukharev/open-commit/issues) or [TODO.md](../TODO.md) to help the project.
4. See [issues](https://github.com/di-sukharev/opencommit/issues) or [TODO.md](TODO.md) to help the project.
## Commit message guidelines
@@ -30,7 +30,7 @@ If you encounter any issues while using the project, please report them on the G
## Contacts
If you have any questions about contributing to the project, please contact by [creating an issue](https://github.com/di-sukharev/open-commit/issues) on the GitHub issue tracker.
If you have any questions about contributing to the project, please contact by [creating an issue](https://github.com/di-sukharev/opencommit/issues) on the GitHub issue tracker.
## License

3
.gitignore vendored
View File

@@ -10,4 +10,5 @@ uncaughtExceptions.log
.vscode
src/*.json
.idea
test.ts
test.ts
notes.md

104
README.md
View File

@@ -2,7 +2,7 @@
<div>
<img src=".github/logo-grad.svg" alt="OpenCommit logo"/>
<h1 align="center">OpenCommit</h1>
<h4 align="center">Follow the bird <a href="https://twitter.com/_sukharev_"><img src="https://img.shields.io/twitter/follow/_sukharev_?style=flat&label=_sukharev_&logo=twitter&color=0bf&logoColor=fff" align="center"></a>
<h4 align="center">Author <a href="https://twitter.com/_sukharev_"><img src="https://img.shields.io/twitter/follow/_sukharev_?style=flat&label=_sukharev_&logo=twitter&color=0bf&logoColor=fff" align="center"></a>
</div>
<h2>Auto-generate meaningful commits in a second</h2>
<p>Killing lame commits with AI 🤯🔫</p>
@@ -16,7 +16,7 @@
<img src=".github/opencommit-example.png" alt="OpenCommit example"/>
</div>
All the commits in this repo are authored by OpenCommit — look at [the commits](https://github.com/di-sukharev/opencommit/commit/eae7618d575ee8d2e9fff5de56da79d40c4bc5fc) to see how OpenCommit works. Emojis and long commit descriptions are configurable.
All the commits in this repo are authored by OpenCommit — look at [the commits](https://github.com/di-sukharev/opencommit/commit/eae7618d575ee8d2e9fff5de56da79d40c4bc5fc) to see how OpenCommit works. Emojis and long commit descriptions are configurable, basically everything is.
## Setup OpenCommit as a CLI tool
@@ -28,36 +28,27 @@ You can use OpenCommit by simply running it via the CLI like this `oco`. 2 secon
npm install -g opencommit
```
Alternatively run it via `npx opencommit` or `bunx opencommit`
MacOS may ask to run the command with `sudo` when installing a package globally.
2. Get your API key from [OpenAI](https://platform.openai.com/account/api-keys). Make sure that you add your payment details, so the API works.
2. Get your API key from [OpenAI](https://platform.openai.com/account/api-keys) or other supported LLM providers (we support them all). Make sure that you add your OpenAI payment details to your account, so the API works.
3. Set the key to OpenCommit config:
```sh
oco config set OCO_OPENAI_API_KEY=<your_api_key>
oco config set OCO_API_KEY=<your_api_key>
```
Your API key is stored locally in the `~/.opencommit` config file.
## Usage
You can call OpenCommit directly to generate a commit message for your staged changes:
```sh
git add <files...>
opencommit
```
You can also use the `oco` shortcut:
You can call OpenCommit with `oco` command to generate a commit message for your staged changes:
```sh
git add <files...>
oco
```
Running `git add` is optional, `oco` will do it for you.
### Running locally with Ollama
You can also run it with local model through ollama:
@@ -68,20 +59,17 @@ You can also run it with local model through ollama:
```sh
git add <files...>
OCO_AI_PROVIDER='ollama' opencommit
oco config set OCO_AI_PROVIDER='ollama' OCO_MODEL='llama3:8b'
```
If you want to use a model other than mistral, you can do so by setting the `OCO_AI_PROVIDER` environment variable as follows:
Default model is `mistral`.
If you have ollama that is set up in docker/ on another machine with GPUs (not locally), you can change the default endpoint url.
You can do so by setting the `OCO_API_URL` environment variable as follows:
```sh
OCO_AI_PROVIDER='ollama/llama3:8b' opencommit
```
if you have ollama that is set up in docker/ on another machine with GPUs (not locally), you can change the default endpoint url.
You can do so by setting the `OCO_OLLAMA_API_URL` environment variable as follows:
```sh
OCO_OLLAMA_API_URL='http://192.168.1.10:11434/api/chat' opencommit
oco config set OCO_API_URL='http://192.168.1.10:11434/api/chat'
```
where 192.168.1.10 is example of endpoint URL, where you have ollama set up.
@@ -95,6 +83,7 @@ There are multiple optional flags that can be used with the `oco` command:
Link to the GitMoji specification: https://gitmoji.dev/
This flag can only be used if the `OCO_EMOJI` configuration item is set to `true`. This flag allows users to use all emojis in the GitMoji specification, By default, the GitMoji full specification is set to `false`, which only includes 10 emojis (🐛✨📝🚀✅♻️⬆️🔧🌐💡).
This is due to limit the number of tokens sent in each request. However, if you would like to use the full GitMoji specification, you can use the `--fgm` flag.
```
@@ -116,20 +105,23 @@ oco --yes
Create a `.env` file and add OpenCommit config variables there like this:
```env
OCO_OPENAI_API_KEY=<your OpenAI API token>
...
OCO_AI_PROVIDER=<openai (default), anthropic, azure, ollama, gemini, flowise>
OCO_API_KEY=<your OpenAI API token> // or other LLM provider API token
OCO_API_URL=<may be used to set proxy path to OpenAI api>
OCO_TOKENS_MAX_INPUT=<max model token limit (default: 4096)>
OCO_TOKENS_MAX_OUTPUT=<max response tokens (default: 500)>
OCO_OPENAI_BASE_PATH=<may be used to set proxy path to OpenAI api>
OCO_DESCRIPTION=<postface a message with ~3 sentences description of the changes>
OCO_EMOJI=<boolean, add GitMoji>
OCO_MODEL=<either 'gpt-4o', 'gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'>
OCO_MODEL=<either 'gpt-4o', 'gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview' or any Anthropic or Ollama model or any string basically, but it should be a valid model name>
OCO_LANGUAGE=<locale, scroll to the bottom to see options>
OCO_MESSAGE_TEMPLATE_PLACEHOLDER=<message template placeholder, default: '$msg'>
OCO_PROMPT_MODULE=<either conventional-commit or @commitlint, default: conventional-commit>
OCO_ONE_LINE_COMMIT=<one line commit message, default: false>
OCO_AI_PROVIDER=<anthropic, azure, ollama or ollama/model default ollama model: mistral>
```
Global configs are same as local configs, but they are stored in the global `~/.opencommit` config file and set with `oco config set` command, e.g. `oco config set OCO_MODEL=gpt-4o`.
### Global config for all repos
Local config still has more priority than Global config, but you may set `OCO_MODEL` and `OCO_LOCALE` globally and set local configs for `OCO_EMOJI` and `OCO_DESCRIPTION` per repo which is more convenient.
@@ -137,7 +129,7 @@ Local config still has more priority than Global config, but you may set `OCO_MO
Simply set any of the variables above like this:
```sh
oco config set OCO_MODEL=gpt-4o
oco config set OCO_MODEL=gpt-4o-mini
```
Configure [GitMoji](https://gitmoji.dev/) to preface a message.
@@ -152,42 +144,54 @@ To remove preface emojis:
oco config set OCO_EMOJI=false
```
### Switch to GPT-4 or other models
Other config options are behaving the same.
By default, OpenCommit uses `gpt-4o` model.
### Output WHY the changes were done (WIP)
You may switch to GPT-4 which performs better, but costs ~x15 times more 🤠
You can set the `OCO_WHY` config to `true` to have OpenCommit output a short description of WHY the changes were done after the commit message. Default is `false`.
To make this perform accurate we must store 'what files do' in some kind of an index or embedding and perform a lookup (kinda RAG) for the accurate git commit message. If you feel like building this comment on this ticket https://github.com/di-sukharev/opencommit/issues/398 and let's go from there together.
```sh
oco config set OCO_MODEL=gpt-4
oco config set OCO_WHY=true
```
### Switch to GPT-4 or other models
By default, OpenCommit uses `gpt-4o-mini` model.
You may switch to gpt-4o which performs better, but costs more 🤠
```sh
oco config set OCO_MODEL=gpt-4o
```
or for as a cheaper option:
```sh
oco config set OCO_MODEL=gpt-4o-mini
oco config set OCO_MODEL=gpt-3.5-turbo
```
### Switch to Azure OpenAI
### Switch to other LLM providers with a custom URL
By default OpenCommit uses [OpenAI](https://openai.com).
You could switch to [Azure OpenAI Service](https://learn.microsoft.com/azure/cognitive-services/openai/)🚀
You could switch to [Azure OpenAI Service](https://learn.microsoft.com/azure/cognitive-services/openai/) or Flowise or Ollama.
```sh
opencommit config set OCO_AI_PROVIDER=azure
```
oco config set OCO_AI_PROVIDER=azure OCO_API_KEY=<your_azure_api_key> OCO_API_URL=<your_azure_endpoint>
Of course need to set 'OPENAI_API_KEY'. And also need to set the
'OPENAI_BASE_PATH' for the endpoint and set the deployment name to
'model'.
oco config set OCO_AI_PROVIDER=flowise OCO_API_KEY=<your_flowise_api_key> OCO_API_URL=<your_flowise_endpoint>
oco config set OCO_AI_PROVIDER=ollama OCO_API_KEY=<your_ollama_api_key> OCO_API_URL=<your_ollama_endpoint>
```
### Locale configuration
To globally specify the language used to generate commit messages:
```sh
# de, German ,Deutsch
# de, German, Deutsch
oco config set OCO_LANGUAGE=de
oco config set OCO_LANGUAGE=German
oco config set OCO_LANGUAGE=Deutsch
@@ -201,14 +205,16 @@ oco config set OCO_LANGUAGE=française
The default language setting is **English**
All available languages are currently listed in the [i18n](https://github.com/di-sukharev/opencommit/tree/master/src/i18n) folder
### Push to git
### Push to git (gonna be deprecated)
Pushing to git is on by default but if you would like to turn it off just use:
A prompt for pushing to git is on by default but if you would like to turn it off just use:
```sh
oco config set OCO_GITPUSH=false
```
and it will exit right after commit is confirmed without asking if you would like to push to remote.
### Switch to `@commitlint`
OpenCommit allows you to choose the prompt module used to generate commit messages. By default, OpenCommit uses its conventional-commit message generator. However, you can switch to using the `@commitlint` prompt module if you prefer. This option lets you generate commit messages in respect with the local config.
@@ -291,7 +297,7 @@ In our codebase, the implementation of this feature can be found in the followin
```javascript
commitMessage = messageTemplate.replace(
config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
commitMessage
);
```
@@ -348,7 +354,7 @@ Or follow the process of your IDE Source Control feature, when it calls `git com
OpenCommit is now available as a GitHub Action which automatically improves all new commits messages when you push to remote!
This is great if you want to make sure all of the commits in all of your repository branches are meaningful and not lame like `fix1` or `done2`.
This is great if you want to make sure all commits in all of your repository branches are meaningful and not lame like `fix1` or `done2`.
Create a file `.github/workflows/opencommit.yml` with the contents below:
@@ -383,7 +389,7 @@ jobs:
# set openAI api key in repo actions secrets,
# for openAI keys go to: https://platform.openai.com/account/api-keys
# for repo secret go to: <your_repo_url>/settings/secrets/actions
OCO_OPENAI_API_KEY: ${{ secrets.OCO_OPENAI_API_KEY }}
OCO_API_KEY: ${{ secrets.OCO_API_KEY }}
# customization
OCO_TOKENS_MAX_INPUT: 4096

View File

64032
out/cli.cjs

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

735
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "opencommit",
"version": "3.0.19",
"version": "3.2.5",
"description": "Auto-generate impressive commits in 1 second. Killing lame commits with AI 🤯🔫",
"keywords": [
"git",
@@ -17,11 +17,11 @@
],
"main": "cli.js",
"bin": {
"opencommit": "./out/cli.cjs",
"oco": "./out/cli.cjs"
"opencommit": "out/cli.cjs",
"oco": "out/cli.cjs"
},
"repository": {
"url": "https://github.com/di-sukharev/opencommit"
"url": "git+https://github.com/di-sukharev/opencommit.git"
},
"type": "module",
"author": "https://github.com/di-sukharev",
@@ -46,8 +46,9 @@
"dev:gemini": "OCO_AI_PROVIDER='gemini' ts-node ./src/cli.ts",
"build": "rimraf out && node esbuild.config.js",
"build:push": "npm run build && git add . && git commit -m 'build' && git push",
"deploy": "npm run build:push && git push --tags && npm publish --tag latest",
"deploy:patch": "npm version patch && npm run deploy",
"deploy": "npm publish --tag latest",
"deploy:build": "npm run build:push && git push --tags && npm run deploy",
"deploy:patch": "npm version patch && npm run deploy:build",
"lint": "eslint src --ext ts && tsc --noEmit",
"format": "prettier --write src",
"test": "node --no-warnings --experimental-vm-modules $( [ -f ./node_modules/.bin/jest ] && echo ./node_modules/.bin/jest || which jest ) test/unit",
@@ -57,7 +58,8 @@
"test:unit:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:unit",
"test:e2e": "npm run test:e2e:setup && jest test/e2e",
"test:e2e:setup": "sh test/e2e/setup.sh",
"test:e2e:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:e2e"
"test:e2e:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:e2e",
"mlx:start": "OCO_AI_PROVIDER='mlx' node ./out/cli.cjs"
},
"devDependencies": {
"@commitlint/types": "^17.4.4",
@@ -81,14 +83,14 @@
"@actions/core": "^1.10.0",
"@actions/exec": "^1.1.1",
"@actions/github": "^5.1.1",
"@azure/openai": "^1.0.0-beta.12",
"@anthropic-ai/sdk": "^0.19.2",
"@azure/openai": "^1.0.0-beta.12",
"@clack/prompts": "^0.6.1",
"@dqbd/tiktoken": "^1.0.2",
"@google/generative-ai": "^0.11.4",
"@mistralai/mistralai": "^1.3.5",
"@octokit/webhooks-schemas": "^6.11.0",
"@octokit/webhooks-types": "^6.11.0",
"ai": "^2.2.14",
"axios": "^1.3.4",
"chalk": "^5.2.0",
"cleye": "^1.3.2",
@@ -97,9 +99,8 @@
"ignore": "^5.2.4",
"ini": "^3.0.1",
"inquirer": "^9.1.4",
"openai": "^3.2.1"
},
"overrides": {
"whatwg-url": "13.0.0"
"openai": "^4.57.0",
"punycode": "^2.3.1",
"zod": "^3.23.8"
}
}

View File

@@ -9,6 +9,7 @@ import { configCommand } from './commands/config';
import { hookCommand, isHookCalled } from './commands/githook.js';
import { prepareCommitMessageHook } from './commands/prepare-commit-msg-hook';
import { checkIsLatestVersion } from './utils/checkIsLatestVersion';
import { runMigrations } from './migrations/_run.js';
const extraArgs = process.argv.slice(2);
@@ -19,6 +20,12 @@ cli(
commands: [configCommand, hookCommand, commitlintConfigCommand],
flags: {
fgm: Boolean,
context: {
type: String,
alias: 'c',
description: 'Additional user input context for the commit message',
default: ''
},
yes: {
type: Boolean,
alias: 'y',
@@ -30,12 +37,13 @@ cli(
help: { description: packageJSON.description }
},
async ({ flags }) => {
await runMigrations();
await checkIsLatestVersion();
if (await isHookCalled()) {
prepareCommitMessageHook();
} else {
commit(extraArgs, false, flags.fgm, flags.yes);
commit(extraArgs, flags.context, false, flags.fgm, flags.yes);
}
},
extraArgs

5
src/commands/ENUMS.ts Normal file
View File

@@ -0,0 +1,5 @@
export enum COMMANDS {
config = 'config',
hook = 'hook',
commitlint = 'commitlint'
}

View File

@@ -1,6 +1,3 @@
import chalk from 'chalk';
import { execa } from 'execa';
import {
confirm,
intro,
@@ -10,7 +7,8 @@ import {
select,
spinner
} from '@clack/prompts';
import chalk from 'chalk';
import { execa } from 'execa';
import { generateCommitMessageByDiff } from '../generateCommitMessageFromGitDiff';
import {
assertGitRepo,
@@ -32,43 +30,53 @@ const getGitRemotes = async () => {
// Check for the presence of message templates
const checkMessageTemplate = (extraArgs: string[]): string | false => {
for (const key in extraArgs) {
if (extraArgs[key].includes(config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER))
if (extraArgs[key].includes(config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER))
return extraArgs[key];
}
return false;
};
const generateCommitMessageFromGitDiff = async (
diff: string,
extraArgs: string[],
fullGitMojiSpec: boolean,
skipCommitConfirmation: boolean
): Promise<void> => {
interface GenerateCommitMessageFromGitDiffParams {
diff: string;
extraArgs: string[];
context?: string;
fullGitMojiSpec?: boolean;
skipCommitConfirmation?: boolean;
}
const generateCommitMessageFromGitDiff = async ({
diff,
extraArgs,
context = '',
fullGitMojiSpec = false,
skipCommitConfirmation = false
}: GenerateCommitMessageFromGitDiffParams): Promise<void> => {
await assertGitRepo();
const commitSpinner = spinner();
commitSpinner.start('Generating the commit message');
const commitGenerationSpinner = spinner();
commitGenerationSpinner.start('Generating the commit message');
try {
let commitMessage = await generateCommitMessageByDiff(
diff,
fullGitMojiSpec
fullGitMojiSpec,
context
);
const messageTemplate = checkMessageTemplate(extraArgs);
if (
config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER &&
config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER &&
typeof messageTemplate === 'string'
) {
const messageTemplateIndex = extraArgs.indexOf(messageTemplate);
extraArgs.splice(messageTemplateIndex, 1);
commitMessage = messageTemplate.replace(
config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
commitMessage
);
}
commitSpinner.stop('📝 Commit message generated');
commitGenerationSpinner.stop('📝 Commit message generated');
outro(
`Generated commit message:
@@ -77,27 +85,33 @@ ${commitMessage}
${chalk.grey('——————————————————')}`
);
const isCommitConfirmedByUser = skipCommitConfirmation || await confirm({
message: 'Confirm the commit message?'
});
const isCommitConfirmedByUser =
skipCommitConfirmation ||
(await confirm({
message: 'Confirm the commit message?'
}));
if (isCommitConfirmedByUser && !isCancel(isCommitConfirmedByUser)) {
if (isCancel(isCommitConfirmedByUser)) process.exit(1);
if (isCommitConfirmedByUser) {
const committingChangesSpinner = spinner();
committingChangesSpinner.start('Committing the changes');
const { stdout } = await execa('git', [
'commit',
'-m',
commitMessage,
...extraArgs
]);
outro(`${chalk.green('✔')} Successfully committed`);
committingChangesSpinner.stop(
`${chalk.green('✔')} Successfully committed`
);
outro(stdout);
const remotes = await getGitRemotes();
// user isn't pushing, return early
if (config?.OCO_GITPUSH === false)
return
if (config.OCO_GITPUSH === false) return;
if (!remotes.length) {
const { stdout } = await execa('git', ['push']);
@@ -105,12 +119,14 @@ ${chalk.grey('——————————————————')}`
process.exit(0);
}
if (remotes.length === 1 && config?.OCO_GITPUSH !== true) {
if (remotes.length === 1) {
const isPushConfirmedByUser = await confirm({
message: 'Do you want to run `git push`?'
});
if (isPushConfirmedByUser && !isCancel(isPushConfirmedByUser)) {
if (isCancel(isPushConfirmedByUser)) process.exit(1);
if (isPushConfirmedByUser) {
const pushSpinner = spinner();
pushSpinner.start(`Running 'git push ${remotes[0]}'`);
@@ -122,8 +138,7 @@ ${chalk.grey('——————————————————')}`
]);
pushSpinner.stop(
`${chalk.green('✔')} Successfully pushed all commits to ${
remotes[0]
`${chalk.green('✔')} Successfully pushed all commits to ${remotes[0]
}`
);
@@ -133,42 +148,51 @@ ${chalk.grey('——————————————————')}`
process.exit(0);
}
} else {
const skipOption = `don't push`
const selectedRemote = (await select({
message: 'Choose a remote to push to',
options: remotes.map((remote) => ({ value: remote, label: remote }))
options: [...remotes, skipOption].map((remote) => ({ value: remote, label: remote })),
})) as string;
if (!isCancel(selectedRemote)) {
if (isCancel(selectedRemote)) process.exit(1);
if (selectedRemote !== skipOption) {
const pushSpinner = spinner();
pushSpinner.start(`Running 'git push ${selectedRemote}'`);
const { stdout } = await execa('git', ['push', selectedRemote]);
if (stdout) outro(stdout);
pushSpinner.stop(
`${chalk.green(
'✔'
)} Successfully pushed all commits to ${selectedRemote}`
)} successfully pushed all commits to ${selectedRemote}`
);
if (stdout) outro(stdout);
} else outro(`${chalk.gray('✖')} process cancelled`);
}
}
}
if (!isCommitConfirmedByUser && !isCancel(isCommitConfirmedByUser)) {
} else {
const regenerateMessage = await confirm({
message: 'Do you want to regenerate the message ?'
message: 'Do you want to regenerate the message?'
});
if (regenerateMessage && !isCancel(isCommitConfirmedByUser)) {
await generateCommitMessageFromGitDiff(
if (isCancel(regenerateMessage)) process.exit(1);
if (regenerateMessage) {
await generateCommitMessageFromGitDiff({
diff,
extraArgs,
fullGitMojiSpec
)
});
}
}
} catch (error) {
commitSpinner.stop('📝 Commit message generated');
commitGenerationSpinner.stop(
`${chalk.red('✖')} Failed to generate the commit message`
);
console.log(error);
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
@@ -178,6 +202,7 @@ ${chalk.grey('——————————————————')}`
export async function commit(
extraArgs: string[] = [],
context: string = '',
isStageAllFlag: Boolean = false,
fullGitMojiSpec: boolean = false,
skipCommitConfirmation: boolean = false
@@ -216,11 +241,10 @@ export async function commit(
message: 'Do you want to stage all files and generate commit message?'
});
if (
isStageAllAndCommitConfirmedByUser &&
!isCancel(isStageAllAndCommitConfirmedByUser)
) {
await commit(extraArgs, true, fullGitMojiSpec);
if (isCancel(isStageAllAndCommitConfirmedByUser)) process.exit(1);
if (isStageAllAndCommitConfirmedByUser) {
await commit(extraArgs, context, true, fullGitMojiSpec);
process.exit(1);
}
@@ -238,7 +262,7 @@ export async function commit(
await gitAdd({ files });
}
await commit(extraArgs, false, fullGitMojiSpec);
await commit(extraArgs, context, false, fullGitMojiSpec);
process.exit(1);
}
@@ -249,12 +273,13 @@ export async function commit(
);
const [, generateCommitError] = await trytm(
generateCommitMessageFromGitDiff(
await getDiff({ files: stagedFiles }),
generateCommitMessageFromGitDiff({
diff: await getDiff({ files: stagedFiles }),
extraArgs,
context,
fullGitMojiSpec,
skipCommitConfirmation
)
})
);
if (generateCommitError) {

View File

@@ -1,11 +1,9 @@
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { intro, outro } from '@clack/prompts';
import { COMMANDS } from '../CommandsEnum';
import { configureCommitlintIntegration } from '../modules/commitlint/config';
import { getCommitlintLLMConfig } from '../modules/commitlint/utils';
import { COMMANDS } from './ENUMS';
export enum CONFIG_MODES {
get = 'get',
@@ -25,7 +23,7 @@ export const commitlintConfigCommand = command(
if (mode === CONFIG_MODES.get) {
const commitLintConfig = await getCommitlintLLMConfig();
outro(commitLintConfig.toString());
outro(JSON.stringify(commitLintConfig, null, 2));
return;
}

View File

@@ -1,3 +1,4 @@
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import * as dotenv from 'dotenv';
@@ -5,37 +6,26 @@ import { existsSync, readFileSync, writeFileSync } from 'fs';
import { parse as iniParse, stringify as iniStringify } from 'ini';
import { homedir } from 'os';
import { join as pathJoin, resolve as pathResolve } from 'path';
import { intro, outro } from '@clack/prompts';
import { COMMANDS } from '../CommandsEnum';
import { getI18nLocal } from '../i18n';
import { COMMANDS } from './ENUMS';
import { TEST_MOCK_TYPES } from '../engine/testAi';
import { getI18nLocal, i18n } from '../i18n';
export enum CONFIG_KEYS {
OCO_OPENAI_API_KEY = 'OCO_OPENAI_API_KEY',
OCO_ANTHROPIC_API_KEY = 'OCO_ANTHROPIC_API_KEY',
OCO_AZURE_API_KEY = 'OCO_AZURE_API_KEY',
OCO_GEMINI_API_KEY = 'OCO_GEMINI_API_KEY',
OCO_GEMINI_BASE_PATH = 'OCO_GEMINI_BASE_PATH',
OCO_API_KEY = 'OCO_API_KEY',
OCO_TOKENS_MAX_INPUT = 'OCO_TOKENS_MAX_INPUT',
OCO_TOKENS_MAX_OUTPUT = 'OCO_TOKENS_MAX_OUTPUT',
OCO_OPENAI_BASE_PATH = 'OCO_OPENAI_BASE_PATH',
OCO_DESCRIPTION = 'OCO_DESCRIPTION',
OCO_EMOJI = 'OCO_EMOJI',
OCO_MODEL = 'OCO_MODEL',
OCO_LANGUAGE = 'OCO_LANGUAGE',
OCO_WHY = 'OCO_WHY',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER = 'OCO_MESSAGE_TEMPLATE_PLACEHOLDER',
OCO_PROMPT_MODULE = 'OCO_PROMPT_MODULE',
OCO_AI_PROVIDER = 'OCO_AI_PROVIDER',
OCO_GITPUSH = 'OCO_GITPUSH',
OCO_ONE_LINE_COMMIT = 'OCO_ONE_LINE_COMMIT',
OCO_AZURE_ENDPOINT = 'OCO_AZURE_ENDPOINT',
OCO_TEST_MOCK_TYPE = 'OCO_TEST_MOCK_TYPE',
OCO_API_URL = 'OCO_API_URL',
OCO_OLLAMA_API_URL = 'OCO_OLLAMA_API_URL',
OCO_FLOWISE_ENDPOINT = 'OCO_FLOWISE_ENDPOINT',
OCO_FLOWISE_API_KEY = 'OCO_FLOWISE_API_KEY'
OCO_GITPUSH = 'OCO_GITPUSH' // todo: deprecate
}
export enum CONFIG_MODES {
@@ -86,6 +76,58 @@ export const MODEL_LIST = {
'gemini-1.0-pro',
'gemini-pro-vision',
'text-embedding-004'
],
groq: [
'llama3-70b-8192', // Meta Llama 3 70B (default one, no daily token limit and 14 400 reqs/day)
'llama3-8b-8192', // Meta Llama 3 8B
'llama-guard-3-8b', // Llama Guard 3 8B
'llama-3.1-8b-instant', // Llama 3.1 8B (Preview)
'llama-3.1-70b-versatile', // Llama 3.1 70B (Preview)
'gemma-7b-it', // Gemma 7B
'gemma2-9b-it' // Gemma 2 9B
],
mistral: [
'ministral-3b-2410',
'ministral-3b-latest',
'ministral-8b-2410',
'ministral-8b-latest',
'open-mistral-7b',
'mistral-tiny',
'mistral-tiny-2312',
'open-mistral-nemo',
'open-mistral-nemo-2407',
'mistral-tiny-2407',
'mistral-tiny-latest',
'open-mixtral-8x7b',
'mistral-small',
'mistral-small-2312',
'open-mixtral-8x22b',
'open-mixtral-8x22b-2404',
'mistral-small-2402',
'mistral-small-2409',
'mistral-small-latest',
'mistral-medium-2312',
'mistral-medium',
'mistral-medium-latest',
'mistral-large-2402',
'mistral-large-2407',
'mistral-large-2411',
'mistral-large-latest',
'pixtral-large-2411',
'pixtral-large-latest',
'codestral-2405',
'codestral-latest',
'codestral-mamba-2407',
'open-codestral-mamba',
'codestral-mamba-latest',
'pixtral-12b-2409',
'pixtral-12b',
'pixtral-12b-latest',
'mistral-embed',
'mistral-moderation-2411',
'mistral-moderation-latest',
]
};
@@ -93,18 +135,24 @@ const getDefaultModel = (provider: string | undefined): string => {
switch (provider) {
case 'ollama':
return '';
case 'mlx':
return '';
case 'anthropic':
return MODEL_LIST.anthropic[0];
case 'gemini':
return MODEL_LIST.gemini[0];
case 'groq':
return MODEL_LIST.groq[0];
case 'mistral':
return MODEL_LIST.mistral[0];
default:
return MODEL_LIST.openai[0];
}
};
export enum DEFAULT_TOKEN_LIMITS {
DEFAULT_MAX_TOKENS_INPUT = 4096,
DEFAULT_MAX_TOKENS_OUTPUT = 500
DEFAULT_MAX_TOKENS_INPUT = 40960,
DEFAULT_MAX_TOKENS_OUTPUT = 4096
}
const validateConfig = (
@@ -113,8 +161,10 @@ const validateConfig = (
validationMessage: string
) => {
if (!condition) {
outro(`${chalk.red('✖')} wrong value for ${key}: ${validationMessage}.`);
outro(
`${chalk.red('✖')} Unsupported config key ${key}: ${validationMessage}`
'For more help refer to docs https://github.com/di-sukharev/opencommit'
);
process.exit(1);
@@ -122,76 +172,19 @@ const validateConfig = (
};
export const configValidators = {
[CONFIG_KEYS.OCO_OPENAI_API_KEY](value: any, config: any = {}) {
if (config.OCO_AI_PROVIDER == 'gemini') return value;
[CONFIG_KEYS.OCO_API_KEY](value: any, config: any = {}) {
if (config.OCO_AI_PROVIDER !== 'openai') return value;
//need api key unless running locally with ollama
validateConfig(
'OpenAI API_KEY',
value ||
config.OCO_ANTHROPIC_API_KEY ||
config.OCO_AI_PROVIDER.startsWith('ollama') ||
config.OCO_AZURE_API_KEY ||
config.OCO_AI_PROVIDER == 'test' ||
config.OCO_AI_PROVIDER == 'flowise',
'You need to provide an OpenAI/Anthropic/Azure or other provider API key via `oco config set OCO_OPENAI_API_KEY=your_key`, for help refer to docs https://github.com/di-sukharev/opencommit'
);
validateConfig(
CONFIG_KEYS.OCO_OPENAI_API_KEY,
value.startsWith('sk-') || config.OCO_AI_PROVIDER != 'openai',
'Must start with "sk-" for openai provider'
'OCO_API_KEY',
typeof value === 'string' && value.length > 0,
'Empty value is not allowed'
);
return value;
},
[CONFIG_KEYS.OCO_AZURE_API_KEY](value: any, config: any = {}) {
validateConfig(
'ANTHROPIC_API_KEY',
value ||
config.OCO_OPENAI_API_KEY ||
config.OCO_AZURE_API_KEY ||
config.OCO_AI_PROVIDER == 'ollama' ||
config.OCO_AI_PROVIDER == 'test' ||
config.OCO_AI_PROVIDER == 'flowise',
'You need to provide an OpenAI/Anthropic/Azure API key'
);
return value;
},
[CONFIG_KEYS.OCO_GEMINI_API_KEY](value: any, config: any = {}) {
// only need to check for gemini api key if using gemini
if (config.OCO_AI_PROVIDER != 'gemini') return value;
validateConfig(
'Gemini API Key',
value || config.OCO_GEMINI_API_KEY || config.OCO_AI_PROVIDER == 'test',
'You need to provide an Gemini API key'
);
return value;
},
[CONFIG_KEYS.OCO_ANTHROPIC_API_KEY](value: any, config: any = {}) {
validateConfig(
'ANTHROPIC_API_KEY',
value ||
config.OCO_OPENAI_API_KEY ||
config.OCO_AI_PROVIDER == 'ollama' ||
config.OCO_AI_PROVIDER == 'test' ||
config.OCO_AI_PROVIDER == 'flowise',
'You need to provide an OpenAI/Anthropic API key'
);
return value;
},
[CONFIG_KEYS.OCO_FLOWISE_API_KEY](value: any, config: any = {}) {
validateConfig(
CONFIG_KEYS.OCO_FLOWISE_API_KEY,
value || config.OCO_AI_PROVIDER != 'flowise',
'You need to provide a flowise API key'
'OCO_API_KEY',
value,
'You need to provide the OCO_API_KEY when OCO_AI_PROVIDER set to "openai" (default) or "ollama" or "mlx" or "azure" or "gemini" or "flowise" or "anthropic". Run `oco config set OCO_API_KEY=your_key OCO_AI_PROVIDER=openai`'
);
return value;
@@ -201,25 +194,17 @@ export const configValidators = {
validateConfig(
CONFIG_KEYS.OCO_DESCRIPTION,
typeof value === 'boolean',
'Must be true or false'
'Must be boolean: true or false'
);
return value;
},
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT](value: any) {
// If the value is a string, convert it to a number.
if (typeof value === 'string') {
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_INPUT,
!isNaN(value),
'Must be a number'
);
}
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_INPUT,
value ? typeof value === 'number' : undefined,
!isNaN(value),
'Must be a number'
);
@@ -227,18 +212,10 @@ export const configValidators = {
},
[CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT](value: any) {
// If the value is a string, convert it to a number.
if (typeof value === 'string') {
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT,
!isNaN(value),
'Must be a number'
);
}
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT,
value ? typeof value === 'number' : undefined,
!isNaN(value),
'Must be a number'
);
@@ -249,26 +226,29 @@ export const configValidators = {
validateConfig(
CONFIG_KEYS.OCO_EMOJI,
typeof value === 'boolean',
'Must be true or false'
'Must be boolean: true or false'
);
return value;
},
[CONFIG_KEYS.OCO_LANGUAGE](value: any) {
const supportedLanguages = Object.keys(i18n);
validateConfig(
CONFIG_KEYS.OCO_LANGUAGE,
getI18nLocal(value),
`${value} is not supported yet`
`${value} is not supported yet. Supported languages: ${supportedLanguages}`
);
return getI18nLocal(value);
},
[CONFIG_KEYS.OCO_OPENAI_BASE_PATH](value: any) {
[CONFIG_KEYS.OCO_API_URL](value: any) {
validateConfig(
CONFIG_KEYS.OCO_OPENAI_BASE_PATH,
CONFIG_KEYS.OCO_API_URL,
typeof value === 'string',
'Must be string'
`${value} is not a valid URL. It should start with 'http://' or 'https://'.`
);
return value;
},
@@ -304,6 +284,7 @@ export const configValidators = {
return value;
},
// todo: deprecate
[CONFIG_KEYS.OCO_GITPUSH](value: any) {
validateConfig(
CONFIG_KEYS.OCO_GITPUSH,
@@ -314,19 +295,23 @@ export const configValidators = {
},
[CONFIG_KEYS.OCO_AI_PROVIDER](value: any) {
if (!value) value = 'openai';
validateConfig(
CONFIG_KEYS.OCO_AI_PROVIDER,
[
'',
'openai',
'mistral',
'anthropic',
'gemini',
'azure',
'test',
'flowise'
'flowise',
'groq'
].includes(value) || value.startsWith('ollama'),
`${value} is not supported yet, use 'ollama', 'anthropic', 'azure', 'gemini', 'flowise' or 'openai' (default)`
`${value} is not supported yet, use 'ollama', 'mlx', 'anthropic', 'azure', 'gemini', 'flowise', 'mistral' or 'openai' (default)`
);
return value;
},
@@ -340,26 +325,6 @@ export const configValidators = {
return value;
},
[CONFIG_KEYS.OCO_AZURE_ENDPOINT](value: any) {
validateConfig(
CONFIG_KEYS.OCO_AZURE_ENDPOINT,
value.includes('openai.azure.com'),
'Must be in format "https://<resource name>.openai.azure.com/"'
);
return value;
},
[CONFIG_KEYS.OCO_FLOWISE_ENDPOINT](value: any) {
validateConfig(
CONFIG_KEYS.OCO_FLOWISE_ENDPOINT,
typeof value === 'string' && value.includes(':'),
'Value must be string and should include both I.P. and port number' // Considering the possibility of DNS lookup or feeding the I.P. explicitely, there is no pattern to verify, except a column for the port number
);
return value;
},
[CONFIG_KEYS.OCO_TEST_MOCK_TYPE](value: any) {
validateConfig(
CONFIG_KEYS.OCO_TEST_MOCK_TYPE,
@@ -371,83 +336,64 @@ export const configValidators = {
return value;
},
[CONFIG_KEYS.OCO_OLLAMA_API_URL](value: any) {
// add simple api validator
[CONFIG_KEYS.OCO_WHY](value: any) {
validateConfig(
CONFIG_KEYS.OCO_API_URL,
typeof value === 'string' && value.startsWith('http'),
`${value} is not a valid URL`
CONFIG_KEYS.OCO_WHY,
typeof value === 'boolean',
'Must be true or false'
);
return value;
}
};
export enum OCO_AI_PROVIDER_ENUM {
OLLAMA = 'ollama',
OPENAI = 'openai',
ANTHROPIC = 'anthropic',
GEMINI = 'gemini',
AZURE = 'azure',
TEST = 'test',
FLOWISE = 'flowise',
GROQ = 'groq',
MISTRAL = 'mistral',
MLX = 'mlx'
}
export type ConfigType = {
[key in CONFIG_KEYS]?: any;
[CONFIG_KEYS.OCO_API_KEY]?: string;
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT]: number;
[CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT]: number;
[CONFIG_KEYS.OCO_API_URL]?: string;
[CONFIG_KEYS.OCO_DESCRIPTION]: boolean;
[CONFIG_KEYS.OCO_EMOJI]: boolean;
[CONFIG_KEYS.OCO_WHY]: boolean;
[CONFIG_KEYS.OCO_MODEL]: string;
[CONFIG_KEYS.OCO_LANGUAGE]: string;
[CONFIG_KEYS.OCO_MESSAGE_TEMPLATE_PLACEHOLDER]: string;
[CONFIG_KEYS.OCO_PROMPT_MODULE]: OCO_PROMPT_MODULE_ENUM;
[CONFIG_KEYS.OCO_AI_PROVIDER]: OCO_AI_PROVIDER_ENUM;
[CONFIG_KEYS.OCO_GITPUSH]: boolean;
[CONFIG_KEYS.OCO_ONE_LINE_COMMIT]: boolean;
[CONFIG_KEYS.OCO_TEST_MOCK_TYPE]: string;
};
const defaultConfigPath = pathJoin(homedir(), '.opencommit');
const defaultEnvPath = pathResolve(process.cwd(), '.env');
export const getConfig = ({
configPath = defaultConfigPath,
envPath = defaultEnvPath
}: {
configPath?: string;
envPath?: string;
} = {}): ConfigType | null => {
dotenv.config({ path: envPath });
const configFromEnv = {
OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY,
OCO_ANTHROPIC_API_KEY: process.env.OCO_ANTHROPIC_API_KEY,
OCO_AZURE_API_KEY: process.env.OCO_AZURE_API_KEY,
OCO_GEMINI_API_KEY: process.env.OCO_GEMINI_API_KEY,
OCO_TOKENS_MAX_INPUT: process.env.OCO_TOKENS_MAX_INPUT
? Number(process.env.OCO_TOKENS_MAX_INPUT)
: undefined,
OCO_TOKENS_MAX_OUTPUT: process.env.OCO_TOKENS_MAX_OUTPUT
? Number(process.env.OCO_TOKENS_MAX_OUTPUT)
: undefined,
OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH,
OCO_GEMINI_BASE_PATH: process.env.OCO_GEMINI_BASE_PATH,
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === 'true' ? true : false,
OCO_EMOJI: process.env.OCO_EMOJI === 'true' ? true : false,
OCO_MODEL:
process.env.OCO_MODEL || getDefaultModel(process.env.OCO_AI_PROVIDER),
OCO_LANGUAGE: process.env.OCO_LANGUAGE || 'en',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER:
process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER || '$msg',
OCO_PROMPT_MODULE: process.env.OCO_PROMPT_MODULE || 'conventional-commit',
OCO_AI_PROVIDER: process.env.OCO_AI_PROVIDER || 'openai',
OCO_GITPUSH: process.env.OCO_GITPUSH === 'false' ? false : true,
OCO_ONE_LINE_COMMIT:
process.env.OCO_ONE_LINE_COMMIT === 'true' ? true : false,
OCO_AZURE_ENDPOINT: process.env.OCO_AZURE_ENDPOINT || undefined,
OCO_TEST_MOCK_TYPE: process.env.OCO_TEST_MOCK_TYPE || 'commit-message',
OCO_FLOWISE_ENDPOINT: process.env.OCO_FLOWISE_ENDPOINT || ':',
OCO_FLOWISE_API_KEY: process.env.OCO_FLOWISE_API_KEY || undefined,
OCO_OLLAMA_API_URL: process.env.OCO_OLLAMA_API_URL || undefined
};
const configExists = existsSync(configPath);
if (!configExists) return configFromEnv;
export const defaultConfigPath = pathJoin(homedir(), '.opencommit');
export const defaultEnvPath = pathResolve(process.cwd(), '.env');
const configFile = readFileSync(configPath, 'utf8');
const config = iniParse(configFile);
const assertConfigsAreValid = (config: Record<string, any>) => {
for (const [key, value] of Object.entries(config)) {
if (!value) continue;
for (const configKey of Object.keys(config)) {
if (['null', 'undefined'].includes(config[configKey])) {
config[configKey] = undefined;
if (typeof value === 'string' && ['null', 'undefined'].includes(value)) {
config[key] = undefined;
continue;
}
try {
const validator = configValidators[configKey as CONFIG_KEYS];
const validValue = validator(
config[configKey] ?? configFromEnv[configKey as CONFIG_KEYS],
config
);
config[configKey] = validValue;
try {
const validate = configValidators[key as CONFIG_KEYS];
validate(value, config);
} catch (error) {
outro(`Unknown '${configKey}' config option or missing validator.`);
outro(`Unknown '${key}' config option or missing validator.`);
outro(
`Manually fix the '.env' file or global '~/.opencommit' config file.`
);
@@ -455,37 +401,187 @@ export const getConfig = ({
process.exit(1);
}
}
};
return config;
enum OCO_PROMPT_MODULE_ENUM {
CONVENTIONAL_COMMIT = 'conventional-commit',
COMMITLINT = '@commitlint'
}
export const DEFAULT_CONFIG = {
OCO_TOKENS_MAX_INPUT: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT,
OCO_TOKENS_MAX_OUTPUT: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT,
OCO_DESCRIPTION: false,
OCO_EMOJI: false,
OCO_MODEL: getDefaultModel('openai'),
OCO_LANGUAGE: 'en',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER: '$msg',
OCO_PROMPT_MODULE: OCO_PROMPT_MODULE_ENUM.CONVENTIONAL_COMMIT,
OCO_AI_PROVIDER: OCO_AI_PROVIDER_ENUM.OPENAI,
OCO_ONE_LINE_COMMIT: false,
OCO_TEST_MOCK_TYPE: 'commit-message',
OCO_WHY: false,
OCO_GITPUSH: true // todo: deprecate
};
const initGlobalConfig = (configPath: string = defaultConfigPath) => {
writeFileSync(configPath, iniStringify(DEFAULT_CONFIG), 'utf8');
return DEFAULT_CONFIG;
};
const parseConfigVarValue = (value?: any) => {
try {
return JSON.parse(value);
} catch (error) {
return value;
}
};
const getEnvConfig = (envPath: string) => {
dotenv.config({ path: envPath });
return {
OCO_MODEL: process.env.OCO_MODEL,
OCO_API_URL: process.env.OCO_API_URL,
OCO_API_KEY: process.env.OCO_API_KEY,
OCO_AI_PROVIDER: process.env.OCO_AI_PROVIDER as OCO_AI_PROVIDER_ENUM,
OCO_TOKENS_MAX_INPUT: parseConfigVarValue(process.env.OCO_TOKENS_MAX_INPUT),
OCO_TOKENS_MAX_OUTPUT: parseConfigVarValue(
process.env.OCO_TOKENS_MAX_OUTPUT
),
OCO_DESCRIPTION: parseConfigVarValue(process.env.OCO_DESCRIPTION),
OCO_EMOJI: parseConfigVarValue(process.env.OCO_EMOJI),
OCO_LANGUAGE: process.env.OCO_LANGUAGE,
OCO_MESSAGE_TEMPLATE_PLACEHOLDER:
process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
OCO_PROMPT_MODULE: process.env.OCO_PROMPT_MODULE as OCO_PROMPT_MODULE_ENUM,
OCO_ONE_LINE_COMMIT: parseConfigVarValue(process.env.OCO_ONE_LINE_COMMIT),
OCO_TEST_MOCK_TYPE: process.env.OCO_TEST_MOCK_TYPE,
OCO_GITPUSH: parseConfigVarValue(process.env.OCO_GITPUSH) // todo: deprecate
};
};
export const setGlobalConfig = (
config: ConfigType,
configPath: string = defaultConfigPath
) => {
writeFileSync(configPath, iniStringify(config), 'utf8');
};
export const getIsGlobalConfigFileExist = (
configPath: string = defaultConfigPath
) => {
return existsSync(configPath);
};
export const getGlobalConfig = (configPath: string = defaultConfigPath) => {
let globalConfig: ConfigType;
const isGlobalConfigFileExist = getIsGlobalConfigFileExist(configPath);
if (!isGlobalConfigFileExist) globalConfig = initGlobalConfig(configPath);
else {
const configFile = readFileSync(configPath, 'utf8');
globalConfig = iniParse(configFile) as ConfigType;
}
return globalConfig;
};
/**
* Merges two configs.
* Env config takes precedence over global ~/.opencommit config file
* @param main - env config
* @param fallback - global ~/.opencommit config file
* @returns merged config
*/
const mergeConfigs = (main: Partial<ConfigType>, fallback: ConfigType) => {
const allKeys = new Set([...Object.keys(main), ...Object.keys(fallback)]);
return Array.from(allKeys).reduce((acc, key) => {
acc[key] = parseConfigVarValue(main[key] ?? fallback[key]);
return acc;
}, {} as ConfigType);
};
interface GetConfigOptions {
globalPath?: string;
envPath?: string;
setDefaultValues?: boolean;
}
const cleanUndefinedValues = (config: ConfigType) => {
return Object.fromEntries(
Object.entries(config).map(([_, v]) => {
try {
if (typeof v === 'string') {
if (v === 'undefined') return [_, undefined];
if (v === 'null') return [_, null];
const parsedValue = JSON.parse(v);
return [_, parsedValue];
}
return [_, v];
} catch (error) {
return [_, v];
}
})
);
};
export const getConfig = ({
envPath = defaultEnvPath,
globalPath = defaultConfigPath
}: GetConfigOptions = {}): ConfigType => {
const envConfig = getEnvConfig(envPath);
const globalConfig = getGlobalConfig(globalPath);
const config = mergeConfigs(envConfig, globalConfig);
const cleanConfig = cleanUndefinedValues(config);
return cleanConfig as ConfigType;
};
export const setConfig = (
keyValues: [key: string, value: string][],
configPath: string = defaultConfigPath
keyValues: [key: string, value: string | boolean | number | null][],
globalConfigPath: string = defaultConfigPath
) => {
const config = getConfig() || {};
const config = getConfig({
globalPath: globalConfigPath
});
for (const [configKey, configValue] of keyValues) {
if (!configValidators.hasOwnProperty(configKey)) {
throw new Error(`Unsupported config key: ${configKey}`);
const configToSet = {};
for (let [key, value] of keyValues) {
if (!configValidators.hasOwnProperty(key)) {
const supportedKeys = Object.keys(configValidators).join('\n');
throw new Error(
`Unsupported config key: ${key}. Expected keys are:\n\n${supportedKeys}.\n\nFor more help refer to our docs: https://github.com/di-sukharev/opencommit`
);
}
let parsedConfigValue;
try {
parsedConfigValue = JSON.parse(configValue);
if (typeof value === 'string') parsedConfigValue = JSON.parse(value);
else parsedConfigValue = value;
} catch (error) {
parsedConfigValue = configValue;
parsedConfigValue = value;
}
const validValue =
configValidators[configKey as CONFIG_KEYS](parsedConfigValue);
config[configKey as CONFIG_KEYS] = validValue;
const validValue = configValidators[key as CONFIG_KEYS](
parsedConfigValue,
config
);
configToSet[key] = validValue;
}
writeFileSync(configPath, iniStringify(config), 'utf8');
setGlobalConfig(mergeConfigs(configToSet, config), globalConfigPath);
outro(`${chalk.green('✔')} Config successfully set`);
outro(`${chalk.green('✔')} config successfully set`);
};
export const configCommand = command(
@@ -494,9 +590,9 @@ export const configCommand = command(
parameters: ['<mode>', '<key=values...>']
},
async (argv) => {
intro('opencommit — config');
try {
const { mode, keyValues } = argv._;
intro(`COMMAND: config ${mode} ${keyValues}`);
if (mode === CONFIG_MODES.get) {
const config = getConfig() || {};

View File

@@ -1,13 +1,11 @@
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { existsSync } from 'fs';
import fs from 'fs/promises';
import path from 'path';
import { intro, outro } from '@clack/prompts';
import { COMMANDS } from '../CommandsEnum.js';
import { assertGitRepo, getCoreHooksPath } from '../utils/git.js';
import { COMMANDS } from './ENUMS';
const HOOK_NAME = 'prepare-commit-msg';
const DEFAULT_SYMLINK_URL = path.join('.git', 'hooks', HOOK_NAME);
@@ -94,7 +92,7 @@ export const hookCommand = command(
}
throw new Error(
`Unsupported mode: ${mode}. Supported modes are: 'set' or 'unset', do: \`oco hook set\``
`Unsupported mode: ${mode}. Supported modes are: 'set' or 'unset'. Run: \`oco hook set\``
);
} catch (error) {
outro(`${chalk.red('✖')} ${error}`);

View File

@@ -39,10 +39,11 @@ export const prepareCommitMessageHook = async (
const config = getConfig();
if (!config?.OCO_OPENAI_API_KEY && !config?.OCO_ANTHROPIC_API_KEY && !config?.OCO_AZURE_API_KEY) {
throw new Error(
'No OPEN_AI_API or OCO_ANTHROPIC_API_KEY or OCO_AZURE_API_KEY exists. Set your key in ~/.opencommit'
if (!config.OCO_API_KEY) {
outro(
'No OCO_API_KEY is set. Set your key via `oco config set OCO_API_KEY=<value>. For more info see https://github.com/di-sukharev/opencommit'
);
return;
}
const spin = spinner();

View File

@@ -1,7 +1,30 @@
import { ChatCompletionRequestMessage } from 'openai';
import AnthropicClient from '@anthropic-ai/sdk';
import { OpenAIClient as AzureOpenAIClient } from '@azure/openai';
import { GoogleGenerativeAI as GeminiClient } from '@google/generative-ai';
import { AxiosInstance as RawAxiosClient } from 'axios';
import { OpenAI as OpenAIClient } from 'openai';
import { Mistral as MistralClient } from '@mistralai/mistralai';
export interface AiEngineConfig {
apiKey: string;
model: string;
maxTokensOutput: number;
maxTokensInput: number;
baseURL?: string;
}
type Client =
| OpenAIClient
| AzureOpenAIClient
| AnthropicClient
| RawAxiosClient
| GeminiClient
| MistralClient;
export interface AiEngine {
config: AiEngineConfig;
client: Client;
generateCommitMessage(
messages: Array<ChatCompletionRequestMessage>
): Promise<string | undefined>;
messages: Array<OpenAIClient.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | null | undefined>;
}

View File

@@ -1,104 +1,62 @@
import AnthropicClient from '@anthropic-ai/sdk';
import {
MessageCreateParamsNonStreaming,
MessageParam
} from '@anthropic-ai/sdk/resources/messages.mjs';
import { outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import Anthropic from '@anthropic-ai/sdk';
import {ChatCompletionRequestMessage} from 'openai'
import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk/resources';
import { intro, outro } from '@clack/prompts';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
getConfig
} from '../commands/config';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine';
import { MODEL_LIST } from '../commands/config';
import { AiEngine, AiEngineConfig } from './Engine';
const config = getConfig();
interface AnthropicConfig extends AiEngineConfig {}
const MAX_TOKENS_OUTPUT =
config?.OCO_TOKENS_MAX_OUTPUT ||
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT;
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
export class AnthropicEngine implements AiEngine {
config: AnthropicConfig;
client: AnthropicClient;
let provider = config?.OCO_AI_PROVIDER;
let apiKey = config?.OCO_ANTHROPIC_API_KEY;
const [command, mode] = process.argv.slice(2);
if (
provider === 'anthropic' &&
!apiKey &&
command !== 'config' &&
mode !== CONFIG_MODES.set
) {
intro('opencommit');
outro(
'OCO_ANTHROPIC_API_KEY is not set, please run `oco config set OCO_ANTHROPIC_API_KEY=<your token> . If you are using Claude, make sure you add payment details, so API works.`'
);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
}
const MODEL = config?.OCO_MODEL;
if (provider === 'anthropic' &&
typeof MODEL !== 'string' &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
outro(
`${chalk.red('✖')} Unsupported model ${MODEL}. The model can be any string, but the current configuration is not supported.`
);
process.exit(1);
}
export class AnthropicAi implements AiEngine {
private anthropicAiApiConfiguration = {
apiKey: apiKey
};
private anthropicAI!: Anthropic;
constructor() {
this.anthropicAI = new Anthropic(this.anthropicAiApiConfiguration);
constructor(config) {
this.config = config;
this.client = new AnthropicClient({ apiKey: this.config.apiKey });
}
public generateCommitMessage = async (
messages: Array<ChatCompletionRequestMessage>
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> => {
const systemMessage = messages.find(msg => msg.role === 'system')?.content as string;
const restMessages = messages.filter((msg) => msg.role !== 'system') as MessageParam[];
const systemMessage = messages.find((msg) => msg.role === 'system')
?.content as string;
const restMessages = messages.filter(
(msg) => msg.role !== 'system'
) as MessageParam[];
const params: MessageCreateParamsNonStreaming = {
model: MODEL,
model: this.config.model,
system: systemMessage,
messages: restMessages,
temperature: 0,
top_p: 0.1,
max_tokens: MAX_TOKENS_OUTPUT
max_tokens: this.config.maxTokensOutput
};
try {
const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0);
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) {
if (
REQUEST_TOKENS >
this.config.maxTokensInput - this.config.maxTokensOutput
) {
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
}
const data = await this.anthropicAI.messages.create(params);
const data = await this.client.messages.create(params);
const message = data?.content[0].text;
return message;
} catch (error) {
outro(`${chalk.red('✖')} ${JSON.stringify(params)}`);
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);

View File

@@ -1,81 +1,51 @@
import {
AzureKeyCredential,
OpenAIClient as AzureOpenAIClient
} from '@azure/openai';
import { outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import { execa } from 'execa';
import {
ChatCompletionRequestMessage,
} from 'openai';
import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
import { intro, outro } from '@clack/prompts';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
getConfig
} from '../commands/config';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine';
import { AiEngine, AiEngineConfig } from './Engine';
const config = getConfig();
const MAX_TOKENS_OUTPUT =
config?.OCO_TOKENS_MAX_OUTPUT ||
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT;
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
let basePath = config?.OCO_OPENAI_BASE_PATH;
let apiKey = config?.OCO_AZURE_API_KEY;
let apiEndpoint = config?.OCO_AZURE_ENDPOINT;
const [command, mode] = process.argv.slice(2);
const provider = config?.OCO_AI_PROVIDER;
if (
provider === 'azure' &&
!apiKey &&
!apiEndpoint &&
command !== 'config' &&
mode !== CONFIG_MODES.set
) {
intro('opencommit');
outro(
'OCO_AZURE_API_KEY or OCO_AZURE_ENDPOINT are not set, please run `oco config set OCO_AZURE_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.`'
);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
interface AzureAiEngineConfig extends AiEngineConfig {
baseURL: string;
apiKey: string;
}
const MODEL = config?.OCO_MODEL || 'gpt-3.5-turbo';
export class AzureEngine implements AiEngine {
config: AzureAiEngineConfig;
client: AzureOpenAIClient;
export class Azure implements AiEngine {
private openAI!: OpenAIClient;
constructor() {
if (provider === 'azure') {
this.openAI = new OpenAIClient(apiEndpoint, new AzureKeyCredential(apiKey));
}
constructor(config: AzureAiEngineConfig) {
this.config = config;
this.client = new AzureOpenAIClient(
this.config.baseURL,
new AzureKeyCredential(this.config.apiKey)
);
}
public generateCommitMessage = async (
messages: Array<ChatCompletionRequestMessage>
generateCommitMessage = async (
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> => {
try {
const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content) + 4)
.map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0);
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) {
if (
REQUEST_TOKENS >
this.config.maxTokensInput - this.config.maxTokensOutput
) {
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
}
const data = await this.openAI.getChatCompletions(MODEL, messages);
const data = await this.client.getChatCompletions(
this.config.model,
messages
);
const message = data.choices[0].message;
@@ -84,10 +54,10 @@ export class Azure implements AiEngine {
}
return message?.content;
} catch (error) {
outro(`${chalk.red('✖')} ${MODEL}`);
outro(`${chalk.red('✖')} ${this.config.model}`);
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
outro(`${chalk.red('✖')} ${JSON.stringify(error)}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
@@ -105,5 +75,3 @@ export class Azure implements AiEngine {
}
};
}
export const azure = new Azure();

View File

@@ -1,38 +1,40 @@
import axios, { AxiosError } from 'axios';
import { ChatCompletionRequestMessage } from 'openai';
import { AiEngine } from './Engine';
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { AiEngine, AiEngineConfig } from './Engine';
import {
getConfig
} from '../commands/config';
interface FlowiseAiConfig extends AiEngineConfig {}
const config = getConfig();
export class FlowiseEngine implements AiEngine {
config: FlowiseAiConfig;
client: AxiosInstance;
export class FlowiseAi implements AiEngine {
constructor(config) {
this.config = config;
this.client = axios.create({
url: `${config.baseURL}/${config.apiKey}`,
headers: { 'Content-Type': 'application/json' }
});
}
async generateCommitMessage(
messages: Array<ChatCompletionRequestMessage>
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> {
const gitDiff = (messages[messages.length - 1]?.content as string)
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\n/g, '\\n')
.replace(/\r/g, '\\r')
.replace(/\t/g, '\\t');
const gitDiff = messages[ messages.length - 1 ]?.content?.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\n/g, '\\n')
.replace(/\r/g, '\\r')
.replace(/\t/g, '\\t');
const url = `http://${config?.OCO_FLOWISE_ENDPOINT}/api/v1/prediction/${config?.OCO_FLOWISE_API_KEY}`;
const payload = {
question : gitDiff,
overrideConfig : {
systemMessagePrompt: messages[0]?.content,
},
history : messages.slice( 1, -1 )
}
question: gitDiff,
overrideConfig: {
systemMessagePrompt: messages[0]?.content
},
history: messages.slice(1, -1)
};
try {
const response = await axios.post(url, payload, {
headers: {
'Content-Type': 'application/json'
}
});
const response = await this.client.post('', payload);
const message = response.data;
return message?.text;
} catch (err: any) {

View File

@@ -1,133 +1,88 @@
import { ChatCompletionRequestMessage } from 'openai';
import { AiEngine } from './Engine';
import { Content, GenerativeModel, GoogleGenerativeAI, HarmBlockThreshold, HarmCategory, Part } from '@google/generative-ai';
import { CONFIG_MODES, ConfigType, DEFAULT_TOKEN_LIMITS, getConfig, MODEL_LIST } from '../commands/config';
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk';
import {
Content,
GoogleGenerativeAI,
HarmBlockThreshold,
HarmCategory,
Part
} from '@google/generative-ai';
import axios from 'axios';
import { OpenAI } from 'openai';
import { AiEngine, AiEngineConfig } from './Engine';
interface GeminiConfig extends AiEngineConfig {}
export class Gemini implements AiEngine {
export class GeminiEngine implements AiEngine {
config: GeminiConfig;
client: GoogleGenerativeAI;
private readonly config: ConfigType;
private readonly googleGenerativeAi: GoogleGenerativeAI;
private ai: GenerativeModel;
// vars
private maxTokens = {
input: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT,
output: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT
};
private basePath: string;
private apiKey: string;
private model: string;
constructor() {
this.config = getConfig() as ConfigType;
this.googleGenerativeAi = new GoogleGenerativeAI(this.config.OCO_GEMINI_API_KEY);
this.warmup();
constructor(config) {
this.client = new GoogleGenerativeAI(config.apiKey);
this.config = config;
}
async generateCommitMessage(messages: ChatCompletionRequestMessage[]): Promise<string | undefined> {
const systemInstruction = messages.filter(m => m.role === 'system')
.map(m => m.content)
async generateCommitMessage(
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> {
const systemInstruction = messages
.filter((m) => m.role === 'system')
.map((m) => m.content)
.join('\n');
this.ai = this.googleGenerativeAi.getGenerativeModel({
model: this.model,
systemInstruction,
const gemini = this.client.getGenerativeModel({
model: this.config.model,
systemInstruction
});
const contents = messages.filter(m => m.role !== 'system')
.map(m => ({ parts: [{ text: m.content } as Part], role: m.role == 'user' ? m.role : 'model', } as Content));
const contents = messages
.filter((m) => m.role !== 'system')
.map(
(m) =>
({
parts: [{ text: m.content } as Part],
role: m.role === 'user' ? m.role : 'model'
} as Content)
);
try {
const result = await this.ai.generateContent({
contents,
const result = await gemini.generateContent({
contents,
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
},
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
}
],
generationConfig: {
maxOutputTokens: this.maxTokens.output,
maxOutputTokens: this.config.maxTokensOutput,
temperature: 0,
topP: 0.1,
},
topP: 0.1
}
});
return result.response.text();
} catch (error) {
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const geminiError = error.response.data.error;
if (geminiError?.message) outro(geminiError.message);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
if (geminiError) throw new Error(geminiError?.message);
}
throw err;
}
}
private warmup(): void {
if (this.config.OCO_TOKENS_MAX_INPUT !== undefined) this.maxTokens.input = this.config.OCO_TOKENS_MAX_INPUT;
if (this.config.OCO_TOKENS_MAX_OUTPUT !== undefined) this.maxTokens.output = this.config.OCO_TOKENS_MAX_OUTPUT;
this.basePath = this.config.OCO_GEMINI_BASE_PATH;
this.apiKey = this.config.OCO_GEMINI_API_KEY;
const [command, mode] = process.argv.slice(2);
const provider = this.config.OCO_AI_PROVIDER;
if (provider === 'gemini' && !this.apiKey &&
command !== 'config' && mode !== 'set') {
intro('opencommit');
outro('OCO_GEMINI_API_KEY is not set, please run `oco config set OCO_GEMINI_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.');
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
}
this.model = this.config.OCO_MODEL || MODEL_LIST.gemini[0];
if (provider === 'gemini' &&
!MODEL_LIST.gemini.includes(this.model) &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
outro(
`${chalk.red('✖')} Unsupported model ${this.model} for Gemini. Supported models are: ${MODEL_LIST.gemini.join(
', '
)}`
);
process.exit(1);
}
}
}
}

10
src/engine/groq.ts Normal file
View File

@@ -0,0 +1,10 @@
import { OpenAiConfig, OpenAiEngine } from './openAi';
interface GroqConfig extends OpenAiConfig {}
export class GroqEngine extends OpenAiEngine {
constructor(config: GroqConfig) {
config.baseURL = 'https://api.groq.com/openai/v1';
super(config);
}
}

82
src/engine/mistral.ts Normal file
View File

@@ -0,0 +1,82 @@
import axios from 'axios';
import { Mistral } from '@mistralai/mistralai';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
import {
AssistantMessage as MistralAssistantMessage,
SystemMessage as MistralSystemMessage,
ToolMessage as MistralToolMessage,
UserMessage as MistralUserMessage
} from '@mistralai/mistralai/models/components';
export interface MistralAiConfig extends AiEngineConfig {}
export type MistralCompletionMessageParam = Array<
| (MistralSystemMessage & { role: "system" })
| (MistralUserMessage & { role: "user" })
| (MistralAssistantMessage & { role: "assistant" })
| (MistralToolMessage & { role: "tool" })
>
export class MistralAiEngine implements AiEngine {
config: MistralAiConfig;
client: Mistral;
constructor(config: MistralAiConfig) {
this.config = config;
if (!config.baseURL) {
this.client = new Mistral({ apiKey: config.apiKey });
} else {
this.client = new Mistral({ apiKey: config.apiKey, serverURL: config.baseURL });
}
}
public generateCommitMessage = async (
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | null> => {
const params = {
model: this.config.model,
messages: messages as MistralCompletionMessageParam,
topP: 0.1,
maxTokens: this.config.maxTokensOutput
};
try {
const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0);
if (
REQUEST_TOKENS >
this.config.maxTokensInput - this.config.maxTokensOutput
)
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
const completion = await this.client.chat.complete(params);
if (!completion.choices)
throw Error('No completion choice available.')
const message = completion.choices[0].message;
if (!message || !message.content)
throw Error('No completion choice available.')
return message.content as string;
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const mistralError = error.response.data.error;
if (mistralError) throw new Error(mistralError.message);
}
throw err;
}
};
}

47
src/engine/mlx.ts Normal file
View File

@@ -0,0 +1,47 @@
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { AiEngine, AiEngineConfig } from './Engine';
import { chown } from 'fs';
interface MLXConfig extends AiEngineConfig {}
export class MLXEngine implements AiEngine {
config: MLXConfig;
client: AxiosInstance;
constructor(config) {
this.config = config;
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
: 'http://localhost:8080/v1/chat/completions',
headers: { 'Content-Type': 'application/json' }
});
}
async generateCommitMessage(
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>):
Promise<string | undefined> {
const params = {
messages,
temperature: 0,
top_p: 0.1,
repetition_penalty: 1.5,
stream: false
};
try {
const response = await this.client.post(
this.client.getUri(this.config),
params
);
const choices = response.data.choices;
const message = choices[0].message;
return message?.content;
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
throw new Error(`MLX provider error: ${message}`);
}
}
}

View File

@@ -1,52 +1,44 @@
import axios, { AxiosError } from 'axios';
import { ChatCompletionRequestMessage } from 'openai';
import { AiEngine } from './Engine';
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { AiEngine, AiEngineConfig } from './Engine';
import {
getConfig
} from '../commands/config';
interface OllamaConfig extends AiEngineConfig {}
const config = getConfig();
export class OllamaEngine implements AiEngine {
config: OllamaConfig;
client: AxiosInstance;
export class OllamaAi implements AiEngine {
private model = "mistral"; // as default model of Ollama
private url = "http://localhost:11434/api/chat"; // default URL of Ollama API
setModel(model: string) {
this.model = model ?? config?.OCO_MODEL ?? 'mistral';
}
setUrl(url: string) {
this.url = url ?? config?.OCO_OLLAMA_API_URL ?? 'http://localhost:11434/api/chat';
constructor(config) {
this.config = config;
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
: 'http://localhost:11434/api/chat',
headers: { 'Content-Type': 'application/json' }
});
}
async generateCommitMessage(
messages: Array<ChatCompletionRequestMessage>
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> {
const model = this.model;
//console.log(messages);
//process.exit()
const url = this.url;
const p = {
model,
const params = {
model: this.config.model ?? 'mistral',
messages,
options: { temperature: 0, top_p: 0.1 },
stream: false
};
try {
const response = await axios.post(url, p, {
headers: {
'Content-Type': 'application/json'
}
});
const response = await this.client.post(
this.client.getUri(this.config),
params
);
const message = response.data.message;
return message?.content;
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
throw new Error('local model issues. details: ' + message);
throw new Error(`Ollama provider error: ${message}`);
}
}
}

View File

@@ -1,127 +1,64 @@
import axios from 'axios';
import chalk from 'chalk';
import { execa } from 'execa';
import {
ChatCompletionRequestMessage,
Configuration as OpenAiApiConfiguration,
OpenAIApi
} from 'openai';
import { intro, outro } from '@clack/prompts';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
getConfig
} from '../commands/config';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine';
import { MODEL_LIST } from '../commands/config';
import { AiEngine, AiEngineConfig } from './Engine';
const config = getConfig();
export interface OpenAiConfig extends AiEngineConfig {}
const MAX_TOKENS_OUTPUT =
config?.OCO_TOKENS_MAX_OUTPUT ||
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT;
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
let basePath = config?.OCO_OPENAI_BASE_PATH;
let apiKey = config?.OCO_OPENAI_API_KEY;
export class OpenAiEngine implements AiEngine {
config: OpenAiConfig;
client: OpenAI;
const [command, mode] = process.argv.slice(2);
constructor(config: OpenAiConfig) {
this.config = config;
const provider = config?.OCO_AI_PROVIDER;
if (
provider === 'openai' &&
!apiKey &&
command !== 'config' &&
mode !== CONFIG_MODES.set
) {
intro('opencommit');
outro(
'OCO_OPENAI_API_KEY is not set, please run `oco config set OCO_OPENAI_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.`'
);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
}
const MODEL = config?.OCO_MODEL || 'gpt-3.5-turbo';
if (provider === 'openai' &&
typeof MODEL !== 'string' &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
outro(
`${chalk.red('✖')} Unsupported model ${MODEL}. The model can be any string, but the current configuration is not supported.`
);
process.exit(1);
}
export class OpenAi implements AiEngine {
private openAiApiConfiguration = new OpenAiApiConfiguration({
apiKey: apiKey
});
private openAI!: OpenAIApi;
constructor() {
if (basePath) {
this.openAiApiConfiguration.basePath = basePath;
if (!config.baseURL) {
this.client = new OpenAI({ apiKey: config.apiKey });
} else {
this.client = new OpenAI({ apiKey: config.apiKey, baseURL: config.baseURL });
}
this.openAI = new OpenAIApi(this.openAiApiConfiguration);
}
public generateCommitMessage = async (
messages: Array<ChatCompletionRequestMessage>
): Promise<string | undefined> => {
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | null> => {
const params = {
model: MODEL,
model: this.config.model,
messages,
temperature: 0,
top_p: 0.1,
max_tokens: MAX_TOKENS_OUTPUT
max_tokens: this.config.maxTokensOutput
};
try {
const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0);
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) {
if (
REQUEST_TOKENS >
this.config.maxTokensInput - this.config.maxTokensOutput
)
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
}
const { data } = await this.openAI.createChatCompletion(params);
const completion = await this.client.chat.completions.create(params);
const message = data.choices[0].message;
const message = completion.choices[0].message;
return message?.content;
} catch (error) {
outro(`${chalk.red('✖')} ${JSON.stringify(params)}`);
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const openAiError = error.response.data.error;
if (openAiError?.message) outro(openAiError.message);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
if (openAiError) throw new Error(openAiError.message);
}
throw err;
}
};
}

View File

@@ -1,31 +1,47 @@
import { ChatCompletionRequestMessage } from 'openai';
import { OpenAI } from 'openai';
import { AiEngine } from './Engine';
import { getConfig } from '../commands/config';
export const TEST_MOCK_TYPES = [
'commit-message',
'prompt-module-commitlint-config',
] as const
type TestMockType = typeof TEST_MOCK_TYPES[number];
'prompt-module-commitlint-config'
] as const;
export type TestMockType = (typeof TEST_MOCK_TYPES)[number];
type TestAiEngine = Partial<AiEngine> & {
mockType: TestMockType;
};
export class TestAi implements TestAiEngine {
mockType: TestMockType;
// those are not used in the test engine
config: any;
client: any;
// ---
constructor(mockType: TestMockType) {
this.mockType = mockType;
}
export class TestAi implements AiEngine {
async generateCommitMessage(
_messages: Array<ChatCompletionRequestMessage>
_messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> {
const config = getConfig();
switch (config?.OCO_TEST_MOCK_TYPE as TestMockType | undefined) {
switch (this.mockType) {
case 'commit-message':
return 'fix(testAi.ts): test commit message';
case 'prompt-module-commitlint-config':
return `{\n` +
return (
`{\n` +
` "localLanguage": "english",\n` +
` "commitFix": "fix(server): Change 'port' variable to uppercase 'PORT'",\n` +
` "commitFeat": "feat(server): Allow server to listen on a port specified through environment variable",\n` +
` "commitDescription": "Change 'port' variable to uppercase 'PORT'. Allow server to listen on a port specified through environment variable."\n` +
`}`
);
default:
throw Error('unsupported test mock type')
throw Error('unsupported test mock type');
}
}
}

View File

@@ -1,31 +1,25 @@
import {
ChatCompletionRequestMessage,
ChatCompletionRequestMessageRoleEnum
} from 'openai';
import { OpenAI } from 'openai';
import { DEFAULT_TOKEN_LIMITS, getConfig } from './commands/config';
import { getMainCommitPrompt } from './prompts';
import { getEngine } from './utils/engine';
import { mergeDiffs } from './utils/mergeDiffs';
import { tokenCount } from './utils/tokenCount';
import { getEngine } from './utils/engine';
const config = getConfig();
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
const MAX_TOKENS_OUTPUT =
config?.OCO_TOKENS_MAX_OUTPUT ||
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT;
const MAX_TOKENS_INPUT = config.OCO_TOKENS_MAX_INPUT;
const MAX_TOKENS_OUTPUT = config.OCO_TOKENS_MAX_OUTPUT;
const generateCommitMessageChatCompletionPrompt = async (
diff: string,
fullGitMojiSpec: boolean
): Promise<Array<ChatCompletionRequestMessage>> => {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec);
fullGitMojiSpec: boolean,
context: string
): Promise<Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>> => {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec, context);
const chatContextAsCompletionRequest = [...INIT_MESSAGES_PROMPT];
chatContextAsCompletionRequest.push({
role: ChatCompletionRequestMessageRoleEnum.User,
role: 'user',
content: diff
});
@@ -43,10 +37,14 @@ const ADJUSTMENT_FACTOR = 20;
export const generateCommitMessageByDiff = async (
diff: string,
fullGitMojiSpec: boolean
fullGitMojiSpec: boolean = false,
context: string = ""
): Promise<string> => {
try {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec);
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(
fullGitMojiSpec,
context
);
const INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
(msg) => tokenCount(msg.content as string) + 4
@@ -76,7 +74,8 @@ export const generateCommitMessageByDiff = async (
const messages = await generateCommitMessageChatCompletionPrompt(
diff,
fullGitMojiSpec
fullGitMojiSpec,
context,
);
const engine = getEngine();
@@ -181,7 +180,7 @@ export const getCommitMsgsPromisesFromFileDiffs = async (
// merge multiple files-diffs into 1 prompt to save tokens
const mergedFilesDiffs = mergeDiffs(diffByFiles, maxDiffLength);
const commitMessagePromises = [] as Promise<string | undefined>[];
const commitMessagePromises = [] as Promise<string | null | undefined>[];
for (const fileDiff of mergedFilesDiffs) {
if (tokenCount(fileDiff) >= maxDiffLength) {

View File

@@ -1,11 +1,9 @@
import { unlinkSync, writeFileSync } from 'fs';
import core from '@actions/core';
import exec from '@actions/exec';
import github from '@actions/github';
import { intro, outro } from '@clack/prompts';
import { PushEvent } from '@octokit/webhooks-types';
import { unlinkSync, writeFileSync } from 'fs';
import { generateCommitMessageByDiff } from './generateCommitMessageFromGitDiff';
import { randomIntFromInterval } from './utils/randomIntFromInterval';
import { sleep } from './utils/sleep';
@@ -54,7 +52,7 @@ async function improveMessagesInChunks(diffsAndSHAs: DiffAndSHA[]) {
const chunkSize = diffsAndSHAs!.length % 2 === 0 ? 4 : 3;
outro(`Improving commit messages in chunks of ${chunkSize}.`);
const improvePromises = diffsAndSHAs!.map((commit) =>
generateCommitMessageByDiff(commit.diff)
generateCommitMessageByDiff(commit.diff, false)
);
let improvedMessagesAndSHAs: MsgAndSHA[] = [];

View File

@@ -0,0 +1,45 @@
import {
CONFIG_KEYS,
getConfig,
OCO_AI_PROVIDER_ENUM,
setConfig
} from '../commands/config';
export default function () {
const config = getConfig({ setDefaultValues: false });
const aiProvider = config.OCO_AI_PROVIDER;
let apiKey: string | undefined;
let apiUrl: string | undefined;
if (aiProvider === OCO_AI_PROVIDER_ENUM.OLLAMA) {
apiKey = config['OCO_OLLAMA_API_KEY'];
apiUrl = config['OCO_OLLAMA_API_URL'];
} else if (aiProvider === OCO_AI_PROVIDER_ENUM.ANTHROPIC) {
apiKey = config['OCO_ANTHROPIC_API_KEY'];
apiUrl = config['OCO_ANTHROPIC_BASE_PATH'];
} else if (aiProvider === OCO_AI_PROVIDER_ENUM.OPENAI) {
apiKey = config['OCO_OPENAI_API_KEY'];
apiUrl = config['OCO_OPENAI_BASE_PATH'];
} else if (aiProvider === OCO_AI_PROVIDER_ENUM.AZURE) {
apiKey = config['OCO_AZURE_API_KEY'];
apiUrl = config['OCO_AZURE_ENDPOINT'];
} else if (aiProvider === OCO_AI_PROVIDER_ENUM.GEMINI) {
apiKey = config['OCO_GEMINI_API_KEY'];
apiUrl = config['OCO_GEMINI_BASE_PATH'];
} else if (aiProvider === OCO_AI_PROVIDER_ENUM.FLOWISE) {
apiKey = config['OCO_FLOWISE_API_KEY'];
apiUrl = config['OCO_FLOWISE_ENDPOINT'];
} else {
throw new Error(
`Migration failed, set AI provider first. Run "oco config set OCO_AI_PROVIDER=<provider>", where <provider> is one of: ${Object.values(
OCO_AI_PROVIDER_ENUM
).join(', ')}`
);
}
if (apiKey) setConfig([[CONFIG_KEYS.OCO_API_KEY, apiKey]]);
if (apiUrl) setConfig([[CONFIG_KEYS.OCO_API_URL, apiUrl]]);
}

View File

@@ -0,0 +1,26 @@
import { getGlobalConfig, setGlobalConfig } from '../commands/config';
export default function () {
const obsoleteKeys = [
'OCO_OLLAMA_API_KEY',
'OCO_OLLAMA_API_URL',
'OCO_ANTHROPIC_API_KEY',
'OCO_ANTHROPIC_BASE_PATH',
'OCO_OPENAI_API_KEY',
'OCO_OPENAI_BASE_PATH',
'OCO_AZURE_API_KEY',
'OCO_AZURE_ENDPOINT',
'OCO_GEMINI_API_KEY',
'OCO_GEMINI_BASE_PATH',
'OCO_FLOWISE_API_KEY',
'OCO_FLOWISE_ENDPOINT'
];
const globalConfig = getGlobalConfig();
const configToOverride = { ...globalConfig };
for (const key of obsoleteKeys) delete configToOverride[key];
setGlobalConfig(configToOverride);
}

View File

@@ -0,0 +1,22 @@
import {
ConfigType,
DEFAULT_CONFIG,
getGlobalConfig,
setConfig
} from '../commands/config';
export default function () {
const setDefaultConfigValues = (config: ConfigType) => {
const entriesToSet: [key: string, value: string | boolean | number][] = [];
for (const entry of Object.entries(DEFAULT_CONFIG)) {
const [key, _value] = entry;
if (config[key] === 'undefined' || config[key] === undefined)
entriesToSet.push(entry);
}
if (entriesToSet.length > 0) setConfig(entriesToSet);
console.log(entriesToSet);
};
setDefaultConfigValues(getGlobalConfig());
}

View File

@@ -0,0 +1,18 @@
import migration00 from './00_use_single_api_key_and_url';
import migration01 from './01_remove_obsolete_config_keys_from_global_file';
import migration02 from './02_set_missing_default_values';
export const migrations = [
{
name: '00_use_single_api_key_and_url',
run: migration00
},
{
name: '01_remove_obsolete_config_keys_from_global_file',
run: migration01
},
{
name: '02_set_missing_default_values',
run: migration02
}
];

71
src/migrations/_run.ts Normal file
View File

@@ -0,0 +1,71 @@
import fs from 'fs';
import { homedir } from 'os';
import { join as pathJoin } from 'path';
import { migrations } from './_migrations';
import { outro } from '@clack/prompts';
import chalk from 'chalk';
import {
getConfig,
getIsGlobalConfigFileExist,
OCO_AI_PROVIDER_ENUM
} from '../commands/config';
const migrationsFile = pathJoin(homedir(), '.opencommit_migrations');
const getCompletedMigrations = (): string[] => {
if (!fs.existsSync(migrationsFile)) {
return [];
}
const data = fs.readFileSync(migrationsFile, 'utf-8');
return data ? JSON.parse(data) : [];
};
const saveCompletedMigration = (migrationName: string) => {
const completedMigrations = getCompletedMigrations();
completedMigrations.push(migrationName);
fs.writeFileSync(
migrationsFile,
JSON.stringify(completedMigrations, null, 2)
);
};
export const runMigrations = async () => {
// if no config file, we assume it's a new installation and no migrations are needed
if (!getIsGlobalConfigFileExist()) return;
const config = getConfig();
if (config.OCO_AI_PROVIDER === OCO_AI_PROVIDER_ENUM.TEST) return;
const completedMigrations = getCompletedMigrations();
let isMigrated = false;
for (const migration of migrations) {
if (!completedMigrations.includes(migration.name)) {
try {
console.log('Applying migration', migration.name);
migration.run();
console.log('Migration applied successfully', migration.name);
saveCompletedMigration(migration.name);
} catch (error) {
outro(
`${chalk.red('Failed to apply migration')} ${
migration.name
}: ${error}`
);
process.exit(1);
}
isMigrated = true;
}
}
if (isMigrated) {
outro(
`${chalk.green(
'✔'
)} Migrations to your config were applied successfully. Please rerun.`
);
process.exit(0);
}
};

View File

@@ -2,16 +2,16 @@ import { spinner } from '@clack/prompts';
import { getConfig } from '../../commands/config';
import { i18n, I18nLocals } from '../../i18n';
import { getEngine } from '../../utils/engine';
import { COMMITLINT_LLM_CONFIG_PATH } from './constants';
import { computeHash } from './crypto';
import { commitlintPrompts, inferPromptsFromCommitlintConfig } from './prompts';
import { getCommitLintPWDConfig } from './pwd-commitlint';
import { CommitlintLLMConfig } from './types';
import * as utils from './utils';
import { getEngine } from '../../utils/engine';
const config = getConfig();
const translation = i18n[(config?.OCO_LANGUAGE as I18nLocals) || 'en'];
const translation = i18n[(config.OCO_LANGUAGE as I18nLocals) || 'en'];
export const configureCommitlintIntegration = async (force = false) => {
const spin = spinner();
@@ -26,7 +26,7 @@ export const configureCommitlintIntegration = async (force = false) => {
* @commitlint >= 9.0.0 is installed in the local directory.
* 'node_modules/@commitlint/load' package exists.
* A valid @commitlint configuration exists.
`,
`
);
}

View File

@@ -1,8 +1,5 @@
import chalk from 'chalk';
import {
ChatCompletionRequestMessage,
ChatCompletionRequestMessageRoleEnum
} from 'openai';
import { OpenAI } from 'openai';
import { outro } from '@clack/prompts';
import {
@@ -17,7 +14,7 @@ import { i18n, I18nLocals } from '../../i18n';
import { IDENTITY, INIT_DIFF_PROMPT } from '../../prompts';
const config = getConfig();
const translation = i18n[(config?.OCO_LANGUAGE as I18nLocals) || 'en'];
const translation = i18n[(config.OCO_LANGUAGE as I18nLocals) || 'en'];
type DeepPartial<T> = {
[P in keyof T]?: {
@@ -214,11 +211,10 @@ const STRUCTURE_OF_COMMIT = `
// Prompt to generate LLM-readable rules based on @commitlint rules.
const GEN_COMMITLINT_CONSISTENCY_PROMPT = (
prompts: string[]
): ChatCompletionRequestMessage[] => [
): OpenAI.Chat.Completions.ChatCompletionMessageParam[] => [
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
// prettier-ignore
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages for two different changes in a single codebase and output them in the provided JSON format: one for a bug fix and another for a new feature.
role: 'system',
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages for two different changes in a single codebase and output them in the provided JSON format: one for a bug fix and another for a new feature.
Here are the specific requirements and conventions that should be strictly followed:
@@ -260,22 +256,33 @@ Example Git Diff is to follow:`
const INIT_MAIN_PROMPT = (
language: string,
prompts: string[]
): ChatCompletionRequestMessage => ({
role: ChatCompletionRequestMessageRoleEnum.System,
// prettier-ignore
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages in the given @commitlint convention and explain WHAT were the changes and WHY the changes were done. I'll send you an output of 'git diff --staged' command, and you convert it into a commit message.
${config?.OCO_EMOJI ? 'Use GitMoji convention to preface the commit.' : 'Do not preface the commit with anything.'}
${config?.OCO_DESCRIPTION ? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.' : "Don't add any descriptions to the commit, only commit message."}
): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
role: 'system',
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages in the given @commitlint convention and explain WHAT were the changes ${
config.OCO_WHY ? 'and WHY the changes were done' : ''
}. I'll send you an output of 'git diff --staged' command, and you convert it into a commit message.
${
config.OCO_EMOJI
? 'Use GitMoji convention to preface the commit.'
: 'Do not preface the commit with anything.'
}
${
config.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message."
}
Use the present tense. Use ${language} to answer.
${ config?.OCO_ONE_LINE_COMMIT ? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.' : ""}
${
config.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: ''
}
You will strictly follow the following conventions to generate the content of the commit message:
- ${prompts.join('\n- ')}
The conventions refers to the following structure of commit message:
${STRUCTURE_OF_COMMIT}
`
${STRUCTURE_OF_COMMIT}`
});
export const commitlintPrompts = {

View File

@@ -1,13 +1,29 @@
import fs from 'fs/promises';
import path from 'path';
const findModulePath = (moduleName: string) => {
const searchPaths = [
path.join('node_modules', moduleName),
path.join('node_modules', '.pnpm')
];
for (const basePath of searchPaths) {
try {
const resolvedPath = require.resolve(moduleName, { paths: [basePath] });
return resolvedPath;
} catch {
// Continue to the next search path if the module is not found
}
}
throw new Error(`Cannot find module ${moduleName}`);
};
const getCommitLintModuleType = async (): Promise<'cjs' | 'esm'> => {
const packageFile = 'node_modules/@commitlint/load/package.json';
const packageJsonPath = path.join(
process.env.PWD || process.cwd(),
packageFile,
);
const packageFile = '@commitlint/load/package.json';
const packageJsonPath = findModulePath(packageFile);
const packageJson = JSON.parse(await fs.readFile(packageJsonPath, 'utf8'));
if (!packageJson) {
throw new Error(`Failed to parse ${packageFile}`);
}
@@ -19,7 +35,7 @@ const getCommitLintModuleType = async (): Promise<'cjs' | 'esm'> => {
* QualifiedConfig from any version of @commitlint/types
* @see https://github.com/conventional-changelog/commitlint/blob/master/@commitlint/types/src/load.ts
*/
type QualifiedConfigOnAnyVersion = { [key:string]: unknown };
type QualifiedConfigOnAnyVersion = { [key: string]: unknown };
/**
* This code is loading the configuration for the `@commitlint` package from the current working
@@ -27,36 +43,31 @@ type QualifiedConfigOnAnyVersion = { [key:string]: unknown };
*
* @returns
*/
export const getCommitLintPWDConfig = async (): Promise<QualifiedConfigOnAnyVersion | null> => {
let load, nodeModulesPath;
switch (await getCommitLintModuleType()) {
case 'cjs':
/**
* CommonJS (<= commitlint@v18.x.x.)
*/
nodeModulesPath = path.join(
process.env.PWD || process.cwd(),
'node_modules/@commitlint/load',
);
load = require(nodeModulesPath).default;
break;
case 'esm':
/**
* ES Module (commitlint@v19.x.x. <= )
* Directory import is not supported in ES Module resolution, so import the file directly
*/
nodeModulesPath = path.join(
process.env.PWD || process.cwd(),
'node_modules/@commitlint/load/lib/load.js',
);
load = (await import(nodeModulesPath)).default;
break;
}
export const getCommitLintPWDConfig =
async (): Promise<QualifiedConfigOnAnyVersion | null> => {
let load: Function, modulePath: string;
switch (await getCommitLintModuleType()) {
case 'cjs':
/**
* CommonJS (<= commitlint@v18.x.x.)
*/
modulePath = findModulePath('@commitlint/load');
load = require(modulePath).default;
break;
case 'esm':
/**
* ES Module (commitlint@v19.x.x. <= )
* Directory import is not supported in ES Module resolution, so import the file directly
*/
modulePath = await findModulePath('@commitlint/load/lib/load.js');
load = (await import(modulePath)).default;
break;
}
if (load && typeof load === 'function') {
return await load();
}
if (load && typeof load === 'function') {
return await load();
}
// @commitlint/load is not a function
return null;
};
// @commitlint/load is not a function
return null;
};

View File

@@ -1,10 +1,5 @@
import {
ChatCompletionRequestMessage,
ChatCompletionRequestMessageRoleEnum
} from 'openai';
import { note } from '@clack/prompts';
import { OpenAI } from 'openai';
import { getConfig } from './commands/config';
import { i18n, I18nLocals } from './i18n';
import { configureCommitlintIntegration } from './modules/commitlint/config';
@@ -14,117 +9,148 @@ import * as utils from './modules/commitlint/utils';
import { removeConventionalCommitWord } from './utils/removeConventionalCommitWord';
const config = getConfig();
const translation = i18n[(config?.OCO_LANGUAGE as I18nLocals) || 'en'];
const translation = i18n[(config.OCO_LANGUAGE as I18nLocals) || 'en'];
export const IDENTITY =
'You are to act as the author of a commit message in git.';
'You are to act as an author of a commit message in git.';
const GITMOJI_HELP = `Use GitMoji convention to preface the commit. Here are some help to choose the right emoji (emoji, description):
🐛, Fix a bug;
✨, Introduce new features;
📝, Add or update documentation;
🚀, Deploy stuff;
✅, Add, update, or pass tests;
♻️, Refactor code;
⬆️, Upgrade dependencies;
🔧, Add or update configuration files;
🌐, Internationalization and localization;
💡, Add or update comments in source code;`;
const FULL_GITMOJI_SPEC = `${GITMOJI_HELP}
🎨, Improve structure / format of the code;
⚡️, Improve performance;
🔥, Remove code or files;
🚑️, Critical hotfix;
💄, Add or update the UI and style files;
🎉, Begin a project;
🔒️, Fix security issues;
🔐, Add or update secrets;
🔖, Release / Version tags;
🚨, Fix compiler / linter warnings;
🚧, Work in progress;
💚, Fix CI Build;
⬇️, Downgrade dependencies;
📌, Pin dependencies to specific versions;
👷, Add or update CI build system;
📈, Add or update analytics or track code;
, Add a dependency;
, Remove a dependency;
🔨, Add or update development scripts;
✏️, Fix typos;
💩, Write bad code that needs to be improved;
⏪️, Revert changes;
🔀, Merge branches;
📦️, Add or update compiled files or packages;
👽️, Update code due to external API changes;
🚚, Move or rename resources (e.g.: files, paths, routes);
📄, Add or update license;
💥, Introduce breaking changes;
🍱, Add or update assets;
♿️, Improve accessibility;
🍻, Write code drunkenly;
💬, Add or update text and literals;
🗃️, Perform database related changes;
🔊, Add or update logs;
🔇, Remove logs;
👥, Add or update contributor(s);
🚸, Improve user experience / usability;
🏗️, Make architectural changes;
📱, Work on responsive design;
🤡, Mock things;
🥚, Add or update an easter egg;
🙈, Add or update a .gitignore file;
📸, Add or update snapshots;
⚗️, Perform experiments;
🔍️, Improve SEO;
🏷️, Add or update types;
🌱, Add or update seed files;
🚩, Add, update, or remove feature flags;
🥅, Catch errors;
💫, Add or update animations and transitions;
🗑️, Deprecate code that needs to be cleaned up;
🛂, Work on code related to authorization, roles and permissions;
🩹, Simple fix for a non-critical issue;
🧐, Data exploration/inspection;
⚰️, Remove dead code;
🧪, Add a failing test;
👔, Add or update business logic;
🩺, Add or update healthcheck;
🧱, Infrastructure related changes;
🧑‍💻, Improve developer experience;
💸, Add sponsorships or money related infrastructure;
🧵, Add or update code related to multithreading or concurrency;
🦺, Add or update code related to validation.`;
const CONVENTIONAL_COMMIT_KEYWORDS =
'Do not preface the commit with anything, except for the conventional commit keywords: fix, feat, build, chore, ci, docs, style, refactor, perf, test.';
const getCommitConvention = (fullGitMojiSpec: boolean) =>
config.OCO_EMOJI
? fullGitMojiSpec
? FULL_GITMOJI_SPEC
: GITMOJI_HELP
: CONVENTIONAL_COMMIT_KEYWORDS;
const getDescriptionInstruction = () =>
config.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message.";
const getOneLineCommitInstruction = () =>
config.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: '';
/**
* Get the context of the user input
* @param extraArgs - The arguments passed to the command line
* @example
* $ oco -- This is a context used to generate the commit message
* @returns - The context of the user input
*/
const userInputCodeContext = (context: string) => {
if (context !== '' && context !== ' ') {
return `Additional context provided by the user: <context>${context}</context>\nConsider this context when generating the commit message, incorporating relevant information when appropriate.`;
}
return '';
};
const INIT_MAIN_PROMPT = (
language: string,
fullGitMojiSpec: boolean
): ChatCompletionRequestMessage => ({
role: ChatCompletionRequestMessageRoleEnum.System,
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages as per the ${
fullGitMojiSpec ? 'GitMoji specification' : 'conventional commit convention'
} and explain WHAT were the changes and mainly WHY the changes were done. I'll send you an output of 'git diff --staged' command, and you are to convert it into a commit message.
${
config?.OCO_EMOJI
? 'Use GitMoji convention to preface the commit. Here are some help to choose the right emoji (emoji, description): ' +
'🐛, Fix a bug; ' +
'✨, Introduce new features; ' +
'📝, Add or update documentation; ' +
'🚀, Deploy stuff; ' +
'✅, Add, update, or pass tests; ' +
'♻️, Refactor code; ' +
'⬆️, Upgrade dependencies; ' +
'🔧, Add or update configuration files; ' +
'🌐, Internationalization and localization; ' +
'💡, Add or update comments in source code; ' +
`${
fullGitMojiSpec
? '🎨, Improve structure / format of the code; ' +
'⚡️, Improve performance; ' +
'🔥, Remove code or files; ' +
'🚑️, Critical hotfix; ' +
'💄, Add or update the UI and style files; ' +
'🎉, Begin a project; ' +
'🔒️, Fix security issues; ' +
'🔐, Add or update secrets; ' +
'🔖, Release / Version tags; ' +
'🚨, Fix compiler / linter warnings; ' +
'🚧, Work in progress; ' +
'💚, Fix CI Build; ' +
'⬇️, Downgrade dependencies; ' +
'📌, Pin dependencies to specific versions; ' +
'👷, Add or update CI build system; ' +
'📈, Add or update analytics or track code; ' +
', Add a dependency; ' +
', Remove a dependency; ' +
'🔨, Add or update development scripts; ' +
'✏️, Fix typos; ' +
'💩, Write bad code that needs to be improved; ' +
'⏪️, Revert changes; ' +
'🔀, Merge branches; ' +
'📦️, Add or update compiled files or packages; ' +
'👽️, Update code due to external API changes; ' +
'🚚, Move or rename resources (e.g.: files, paths, routes); ' +
'📄, Add or update license; ' +
'💥, Introduce breaking changes; ' +
'🍱, Add or update assets; ' +
'♿️, Improve accessibility; ' +
'🍻, Write code drunkenly; ' +
'💬, Add or update text and literals; ' +
'🗃️, Perform database related changes; ' +
'🔊, Add or update logs; ' +
'🔇, Remove logs; ' +
'👥, Add or update contributor(s); ' +
'🚸, Improve user experience / usability; ' +
'🏗️, Make architectural changes; ' +
'📱, Work on responsive design; ' +
'🤡, Mock things; ' +
'🥚, Add or update an easter egg; ' +
'🙈, Add or update a .gitignore file; ' +
'📸, Add or update snapshots; ' +
'⚗️, Perform experiments; ' +
'🔍️, Improve SEO; ' +
'🏷️, Add or update types; ' +
'🌱, Add or update seed files; ' +
'🚩, Add, update, or remove feature flags; ' +
'🥅, Catch errors; ' +
'💫, Add or update animations and transitions; ' +
'🗑️, Deprecate code that needs to be cleaned up; ' +
'🛂, Work on code related to authorization, roles and permissions; ' +
'🩹, Simple fix for a non-critical issue; ' +
'🧐, Data exploration/inspection; ' +
'⚰️, Remove dead code; ' +
'🧪, Add a failing test; ' +
'👔, Add or update business logic; ' +
'🩺, Add or update healthcheck; ' +
'🧱, Infrastructure related changes; ' +
'🧑‍💻, Improve developer experience; ' +
'💸, Add sponsorships or money related infrastructure; ' +
'🧵, Add or update code related to multithreading or concurrency; ' +
'🦺, Add or update code related to validation.'
: ''
}`
: 'Do not preface the commit with anything. Conventional commit keywords:' +
'fix, feat, build, chore, ci, docs, style, refactor, perf, test.'
}
${
config?.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message."
}
${
config?.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: ''
}
Use the present tense. Lines must not be longer than 74 characters. Use ${language} for the commit message.`
fullGitMojiSpec: boolean,
context: string
): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
role: 'system',
content: (() => {
const commitConvention = fullGitMojiSpec
? 'GitMoji specification'
: 'Conventional Commit Convention';
const missionStatement = `${IDENTITY} Your mission is to create clean and comprehensive commit messages as per the ${commitConvention} and explain WHAT were the changes and mainly WHY the changes were done.`;
const diffInstruction =
"I'll send you an output of 'git diff --staged' command, and you are to convert it into a commit message.";
const conventionGuidelines = getCommitConvention(fullGitMojiSpec);
const descriptionGuideline = getDescriptionInstruction();
const oneLineCommitGuideline = getOneLineCommitInstruction();
const generalGuidelines = `Use the present tense. Lines must not be longer than 74 characters. Use ${language} for the commit message.`;
const userInputContext = userInputCodeContext(context);
return `${missionStatement}\n${diffInstruction}\n${conventionGuidelines}\n${descriptionGuideline}\n${oneLineCommitGuideline}\n${generalGuidelines}\n${userInputContext}`;
})()
});
export const INIT_DIFF_PROMPT: ChatCompletionRequestMessage = {
role: ChatCompletionRequestMessageRoleEnum.User,
export const INIT_DIFF_PROMPT: OpenAI.Chat.Completions.ChatCompletionMessageParam =
{
role: 'user',
content: `diff --git a/src/server.ts b/src/server.ts
index ad4db42..f3b18a9 100644
--- a/src/server.ts
@@ -151,27 +177,34 @@ export const INIT_DIFF_PROMPT: ChatCompletionRequestMessage = {
});`
};
const getContent = (translation: ConsistencyPrompt) => {
const fix = config.OCO_EMOJI
? `🐛 ${removeConventionalCommitWord(translation.commitFix)}`
: translation.commitFix;
const feat = config.OCO_EMOJI
? `${removeConventionalCommitWord(translation.commitFeat)}`
: translation.commitFeat;
const description = config.OCO_DESCRIPTION
? translation.commitDescription
: '';
return `${fix}\n${feat}\n${description}`;
};
const INIT_CONSISTENCY_PROMPT = (
translation: ConsistencyPrompt
): ChatCompletionRequestMessage => ({
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: `${
config?.OCO_EMOJI
? `🐛 ${removeConventionalCommitWord(translation.commitFix)}`
: translation.commitFix
}
${
config?.OCO_EMOJI
? `${removeConventionalCommitWord(translation.commitFeat)}`
: translation.commitFeat
}
${config?.OCO_DESCRIPTION ? translation.commitDescription : ''}`
): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
role: 'assistant',
content: getContent(translation)
});
export const getMainCommitPrompt = async (
fullGitMojiSpec: boolean
): Promise<ChatCompletionRequestMessage[]> => {
switch (config?.OCO_PROMPT_MODULE) {
fullGitMojiSpec: boolean,
context: string
): Promise<Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>> => {
switch (config.OCO_PROMPT_MODULE) {
case '@commitlint':
if (!(await utils.commitlintLLMConfigExists())) {
note(
@@ -191,15 +224,14 @@ export const getMainCommitPrompt = async (
INIT_DIFF_PROMPT,
INIT_CONSISTENCY_PROMPT(
commitLintConfig.consistency[
translation.localLanguage
translation.localLanguage
] as ConsistencyPrompt
)
];
default:
// conventional-commit
return [
INIT_MAIN_PROMPT(translation.localLanguage, fullGitMojiSpec),
INIT_MAIN_PROMPT(translation.localLanguage, fullGitMojiSpec, context),
INIT_DIFF_PROMPT,
INIT_CONSISTENCY_PROMPT(translation)
];

View File

@@ -1,37 +1,57 @@
import { getConfig, OCO_AI_PROVIDER_ENUM } from '../commands/config';
import { AnthropicEngine } from '../engine/anthropic';
import { AzureEngine } from '../engine/azure';
import { AiEngine } from '../engine/Engine';
import { OpenAi } from '../engine/openAi';
import { Gemini } from '../engine/gemini';
import { getConfig } from '../commands/config';
import { OllamaAi } from '../engine/ollama';
import { AnthropicAi } from '../engine/anthropic'
import { TestAi } from '../engine/testAi';
import { Azure } from '../engine/azure';
import { FlowiseAi } from '../engine/flowise'
import { FlowiseEngine } from '../engine/flowise';
import { GeminiEngine } from '../engine/gemini';
import { OllamaEngine } from '../engine/ollama';
import { OpenAiEngine } from '../engine/openAi';
import { MistralAiEngine } from '../engine/mistral';
import { TestAi, TestMockType } from '../engine/testAi';
import { GroqEngine } from '../engine/groq';
import { MLXEngine } from '../engine/mlx';
export function getEngine(): AiEngine {
const config = getConfig();
const provider = config?.OCO_AI_PROVIDER;
if (provider?.startsWith('ollama')) {
const ollamaAi = new OllamaAi();
const model = provider.substring('ollama/'.length);
if (model) {
ollamaAi.setModel(model);
ollamaAi.setUrl(config?.OCO_OLLAMA_API_URL);
}
return ollamaAi;
} else if (provider == 'anthropic') {
return new AnthropicAi();
} else if (provider == 'test') {
return new TestAi();
} else if (provider == 'gemini') {
return new Gemini();
} else if (provider == 'azure') {
return new Azure();
} else if( provider == 'flowise'){
return new FlowiseAi();
const provider = config.OCO_AI_PROVIDER;
const DEFAULT_CONFIG = {
model: config.OCO_MODEL!,
maxTokensOutput: config.OCO_TOKENS_MAX_OUTPUT!,
maxTokensInput: config.OCO_TOKENS_MAX_INPUT!,
baseURL: config.OCO_API_URL!,
apiKey: config.OCO_API_KEY!
};
switch (provider) {
case OCO_AI_PROVIDER_ENUM.OLLAMA:
return new OllamaEngine(DEFAULT_CONFIG);
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
return new AnthropicEngine(DEFAULT_CONFIG);
case OCO_AI_PROVIDER_ENUM.TEST:
return new TestAi(config.OCO_TEST_MOCK_TYPE as TestMockType);
case OCO_AI_PROVIDER_ENUM.GEMINI:
return new GeminiEngine(DEFAULT_CONFIG);
case OCO_AI_PROVIDER_ENUM.AZURE:
return new AzureEngine(DEFAULT_CONFIG);
case OCO_AI_PROVIDER_ENUM.FLOWISE:
return new FlowiseEngine(DEFAULT_CONFIG);
case OCO_AI_PROVIDER_ENUM.GROQ:
return new GroqEngine(DEFAULT_CONFIG);
case OCO_AI_PROVIDER_ENUM.MISTRAL:
return new MistralAiEngine(DEFAULT_CONFIG);
case OCO_AI_PROVIDER_ENUM.MLX:
return new MLXEngine(DEFAULT_CONFIG);
default:
return new OpenAiEngine(DEFAULT_CONFIG);
}
//open ai gpt by default
return new OpenAi();
}

205
test/e2e/gitPush.test.ts Normal file
View File

@@ -0,0 +1,205 @@
import path from 'path';
import 'cli-testing-library/extend-expect';
import { exec } from 'child_process';
import { prepareTempDir } from './utils';
import { promisify } from 'util';
import { render } from 'cli-testing-library';
import { resolve } from 'path';
import { rm } from 'fs';
const fsExec = promisify(exec);
const fsRemove = promisify(rm);
/**
* git remote -v
*
* [no remotes]
*/
const prepareNoRemoteGitRepository = async (): Promise<{
gitDir: string;
cleanup: () => Promise<void>;
}> => {
const tempDir = await prepareTempDir();
await fsExec('git init test', { cwd: tempDir });
const gitDir = path.resolve(tempDir, 'test');
const cleanup = async () => {
return fsRemove(tempDir, { recursive: true });
};
return {
gitDir,
cleanup
};
};
/**
* git remote -v
*
* origin /tmp/remote.git (fetch)
* origin /tmp/remote.git (push)
*/
const prepareOneRemoteGitRepository = async (): Promise<{
gitDir: string;
cleanup: () => Promise<void>;
}> => {
const tempDir = await prepareTempDir();
await fsExec('git init --bare remote.git', { cwd: tempDir });
await fsExec('git clone remote.git test', { cwd: tempDir });
const gitDir = path.resolve(tempDir, 'test');
const cleanup = async () => {
return fsRemove(tempDir, { recursive: true });
};
return {
gitDir,
cleanup
};
};
/**
* git remote -v
*
* origin /tmp/remote.git (fetch)
* origin /tmp/remote.git (push)
* other ../remote2.git (fetch)
* other ../remote2.git (push)
*/
const prepareTwoRemotesGitRepository = async (): Promise<{
gitDir: string;
cleanup: () => Promise<void>;
}> => {
const tempDir = await prepareTempDir();
await fsExec('git init --bare remote.git', { cwd: tempDir });
await fsExec('git init --bare other.git', { cwd: tempDir });
await fsExec('git clone remote.git test', { cwd: tempDir });
const gitDir = path.resolve(tempDir, 'test');
await fsExec('git remote add other ../other.git', { cwd: gitDir });
const cleanup = async () => {
return fsRemove(tempDir, { recursive: true });
};
return {
gitDir,
cleanup
};
};
describe('cli flow to push git branch', () => {
it('do nothing when OCO_GITPUSH is set to false', async () => {
const { gitDir, cleanup } = await prepareNoRemoteGitRepository();
await render('echo', [`'console.log("Hello World");' > index.ts`], {
cwd: gitDir
});
await render('git', ['add index.ts'], { cwd: gitDir });
const { queryByText, findByText, userEvent } = await render(
`OCO_AI_PROVIDER='test' OCO_GITPUSH='false' node`,
[resolve('./out/cli.cjs')],
{ cwd: gitDir }
);
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
userEvent.keyboard('[Enter]');
expect(
await queryByText('Choose a remote to push to')
).not.toBeInTheConsole();
expect(
await queryByText('Do you want to run `git push`?')
).not.toBeInTheConsole();
expect(
await queryByText('Successfully pushed all commits to origin')
).not.toBeInTheConsole();
expect(
await queryByText('Command failed with exit code 1')
).not.toBeInTheConsole();
await cleanup();
});
it('push and cause error when there is no remote', async () => {
const { gitDir, cleanup } = await prepareNoRemoteGitRepository();
await render('echo', [`'console.log("Hello World");' > index.ts`], {
cwd: gitDir
});
await render('git', ['add index.ts'], { cwd: gitDir });
const { queryByText, findByText, userEvent } = await render(
`OCO_AI_PROVIDER='test' node`,
[resolve('./out/cli.cjs')],
{ cwd: gitDir }
);
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
userEvent.keyboard('[Enter]');
expect(
await queryByText('Choose a remote to push to')
).not.toBeInTheConsole();
expect(
await queryByText('Do you want to run `git push`?')
).not.toBeInTheConsole();
expect(
await queryByText('Successfully pushed all commits to origin')
).not.toBeInTheConsole();
expect(
await findByText('Command failed with exit code 1')
).toBeInTheConsole();
await cleanup();
});
it('push when one remote is set', async () => {
const { gitDir, cleanup } = await prepareOneRemoteGitRepository();
await render('echo', [`'console.log("Hello World");' > index.ts`], {
cwd: gitDir
});
await render('git', ['add index.ts'], { cwd: gitDir });
const { findByText, userEvent } = await render(
`OCO_AI_PROVIDER='test' node`,
[resolve('./out/cli.cjs')],
{ cwd: gitDir }
);
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
userEvent.keyboard('[Enter]');
expect(
await findByText('Do you want to run `git push`?')
).toBeInTheConsole();
userEvent.keyboard('[Enter]');
expect(
await findByText('Successfully pushed all commits to origin')
).toBeInTheConsole();
await cleanup();
});
it('push when two remotes are set', async () => {
const { gitDir, cleanup } = await prepareTwoRemotesGitRepository();
await render('echo', [`'console.log("Hello World");' > index.ts`], {
cwd: gitDir
});
await render('git', ['add index.ts'], { cwd: gitDir });
const { findByText, userEvent } = await render(
`OCO_AI_PROVIDER='test' node`,
[resolve('./out/cli.cjs')],
{ cwd: gitDir }
);
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
userEvent.keyboard('[Enter]');
expect(await findByText('Choose a remote to push to')).toBeInTheConsole();
userEvent.keyboard('[Enter]');
expect(
await findByText('Successfully pushed all commits to origin')
).toBeInTheConsole();
await cleanup();
});
});

View File

@@ -17,7 +17,7 @@ it('cli flow to generate commit message for 1 new file (staged)', async () => {
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
userEvent.keyboard('[Enter]');
expect(await findByText('Choose a remote to push to')).toBeInTheConsole();
expect(await findByText('Do you want to run `git push`?')).toBeInTheConsole();
userEvent.keyboard('[Enter]');
expect(await findByText('Successfully pushed all commits to origin')).toBeInTheConsole();
@@ -46,7 +46,7 @@ it('cli flow to generate commit message for 1 changed file (not staged)', async
expect(await findByText('Successfully committed')).toBeInTheConsole();
expect(await findByText('Choose a remote to push to')).toBeInTheConsole();
expect(await findByText('Do you want to run `git push`?')).toBeInTheConsole();
userEvent.keyboard('[Enter]');
expect(await findByText('Successfully pushed all commits to origin')).toBeInTheConsole();

View File

@@ -181,9 +181,7 @@ describe('cli flow to generate commit message using @commitlint prompt-module',
[],
{ cwd: gitDir }
);
expect(
await commitlintGet.findByText('[object Object]')
).toBeInTheConsole();
expect(await commitlintGet.findByText('consistency')).toBeInTheConsole();
// Run 'oco' using .opencommit-commitlint
await render('echo', [`'console.log("Hello World");' > index.ts`], {
@@ -211,7 +209,7 @@ describe('cli flow to generate commit message using @commitlint prompt-module',
oco.userEvent.keyboard('[Enter]');
expect(
await oco.findByText('Choose a remote to push to')
await oco.findByText('Do you want to run `git push`?')
).toBeInTheConsole();
oco.userEvent.keyboard('[Enter]');

View File

@@ -15,7 +15,7 @@ export const prepareEnvironment = async (): Promise<{
gitDir: string;
cleanup: () => Promise<void>;
}> => {
const tempDir = await fsMakeTempDir(path.join(tmpdir(), 'opencommit-test-'));
const tempDir = await prepareTempDir();
// Create a remote git repository int the temp directory. This is necessary to execute the `git push` command
await fsExec('git init --bare remote.git', { cwd: tempDir });
await fsExec('git clone remote.git test', { cwd: tempDir });
@@ -30,4 +30,8 @@ export const prepareEnvironment = async (): Promise<{
}
}
export const prepareTempDir = async(): Promise<string> => {
return await fsMakeTempDir(path.join(tmpdir(), 'opencommit-test-'));
}
export const wait = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));

View File

@@ -1,8 +1,18 @@
import { getConfig } from '../../src/commands/config';
import { existsSync, readFileSync, rmSync } from 'fs';
import {
CONFIG_KEYS,
DEFAULT_CONFIG,
getConfig,
setConfig
} from '../../src/commands/config';
import { prepareFile } from './utils';
import { dirname } from 'path';
describe('getConfig', () => {
describe('config', () => {
const originalEnv = { ...process.env };
let globalConfigFile: { filePath: string; cleanup: () => Promise<void> };
let envConfigFile: { filePath: string; cleanup: () => Promise<void> };
function resetEnv(env: NodeJS.ProcessEnv) {
Object.keys(process.env).forEach((key) => {
if (!(key in env)) {
@@ -13,93 +23,281 @@ describe('getConfig', () => {
});
}
beforeEach(() => {
beforeEach(async () => {
resetEnv(originalEnv);
if (globalConfigFile) await globalConfigFile.cleanup();
if (envConfigFile) await envConfigFile.cleanup();
});
afterEach(async () => {
if (globalConfigFile) await globalConfigFile.cleanup();
if (envConfigFile) await envConfigFile.cleanup();
});
afterAll(() => {
resetEnv(originalEnv);
});
it('return config values from the global config file', async () => {
const configFile = await prepareFile(
'.opencommit',
`
OCO_OPENAI_API_KEY="sk-key"
OCO_ANTHROPIC_API_KEY="secret-key"
OCO_TOKENS_MAX_INPUT="8192"
OCO_TOKENS_MAX_OUTPUT="1000"
OCO_OPENAI_BASE_PATH="/openai/api"
OCO_DESCRIPTION="true"
OCO_EMOJI="true"
OCO_MODEL="gpt-4"
OCO_LANGUAGE="de"
OCO_MESSAGE_TEMPLATE_PLACEHOLDER="$m"
OCO_PROMPT_MODULE="@commitlint"
OCO_AI_PROVIDER="ollama"
OCO_GITPUSH="false"
OCO_ONE_LINE_COMMIT="true"
`
);
const config = getConfig({ configPath: configFile.filePath, envPath: '' });
const generateConfig = async (
fileName: string,
content: Record<string, string>
) => {
const fileContent = Object.entries(content)
.map(([key, value]) => `${key}="${value}"`)
.join('\n');
return await prepareFile(fileName, fileContent);
};
expect(config).not.toEqual(null);
expect(config!['OCO_OPENAI_API_KEY']).toEqual('sk-key');
expect(config!['OCO_ANTHROPIC_API_KEY']).toEqual('secret-key');
expect(config!['OCO_TOKENS_MAX_INPUT']).toEqual(8192);
expect(config!['OCO_TOKENS_MAX_OUTPUT']).toEqual(1000);
expect(config!['OCO_OPENAI_BASE_PATH']).toEqual('/openai/api');
expect(config!['OCO_DESCRIPTION']).toEqual(true);
expect(config!['OCO_EMOJI']).toEqual(true);
expect(config!['OCO_MODEL']).toEqual('gpt-4');
expect(config!['OCO_LANGUAGE']).toEqual('de');
expect(config!['OCO_MESSAGE_TEMPLATE_PLACEHOLDER']).toEqual('$m');
expect(config!['OCO_PROMPT_MODULE']).toEqual('@commitlint');
expect(() => ['ollama', 'gemini'].includes(config!['OCO_AI_PROVIDER'])).toBeTruthy();
expect(config!['OCO_GITPUSH']).toEqual(false);
expect(config!['OCO_ONE_LINE_COMMIT']).toEqual(true);
describe('getConfig', () => {
it('should prioritize local .env over global .opencommit config', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_API_KEY: 'global-key',
OCO_MODEL: 'gpt-3.5-turbo',
OCO_LANGUAGE: 'en'
});
await configFile.cleanup();
envConfigFile = await generateConfig('.env', {
OCO_API_KEY: 'local-key',
OCO_LANGUAGE: 'fr'
});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_API_KEY).toEqual('local-key');
expect(config.OCO_MODEL).toEqual('gpt-3.5-turbo');
expect(config.OCO_LANGUAGE).toEqual('fr');
});
it('should fallback to global config when local config is not set', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_API_KEY: 'global-key',
OCO_MODEL: 'gpt-4',
OCO_LANGUAGE: 'de',
OCO_DESCRIPTION: 'true'
});
envConfigFile = await generateConfig('.env', {
OCO_API_URL: 'local-api-url'
});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_API_KEY).toEqual('global-key');
expect(config.OCO_API_URL).toEqual('local-api-url');
expect(config.OCO_MODEL).toEqual('gpt-4');
expect(config.OCO_LANGUAGE).toEqual('de');
expect(config.OCO_DESCRIPTION).toEqual(true);
});
it('should handle boolean and numeric values correctly', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_TOKENS_MAX_INPUT: '4096',
OCO_TOKENS_MAX_OUTPUT: '500',
OCO_GITPUSH: 'true'
});
envConfigFile = await generateConfig('.env', {
OCO_TOKENS_MAX_INPUT: '8192',
OCO_ONE_LINE_COMMIT: 'false'
});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_TOKENS_MAX_INPUT).toEqual(8192);
expect(config.OCO_TOKENS_MAX_OUTPUT).toEqual(500);
expect(config.OCO_GITPUSH).toEqual(true);
expect(config.OCO_ONE_LINE_COMMIT).toEqual(false);
});
it('should handle empty local config correctly', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_API_KEY: 'global-key',
OCO_MODEL: 'gpt-4',
OCO_LANGUAGE: 'es'
});
envConfigFile = await generateConfig('.env', {});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_API_KEY).toEqual('global-key');
expect(config.OCO_MODEL).toEqual('gpt-4');
expect(config.OCO_LANGUAGE).toEqual('es');
});
it('should override global config with null values in local .env', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_API_KEY: 'global-key',
OCO_MODEL: 'gpt-4',
OCO_LANGUAGE: 'es'
});
envConfigFile = await generateConfig('.env', {
OCO_API_KEY: 'null'
});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_API_KEY).toEqual(null);
});
it('should handle empty global config', async () => {
globalConfigFile = await generateConfig('.opencommit', {});
envConfigFile = await generateConfig('.env', {});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_API_KEY).toEqual(undefined);
});
});
it('return config values from the local env file', async () => {
const envFile = await prepareFile(
'.env',
`
OCO_OPENAI_API_KEY="sk-key"
OCO_ANTHROPIC_API_KEY="secret-key"
OCO_TOKENS_MAX_INPUT="8192"
OCO_TOKENS_MAX_OUTPUT="1000"
OCO_OPENAI_BASE_PATH="/openai/api"
OCO_DESCRIPTION="true"
OCO_EMOJI="true"
OCO_MODEL="gpt-4"
OCO_LANGUAGE="de"
OCO_MESSAGE_TEMPLATE_PLACEHOLDER="$m"
OCO_PROMPT_MODULE="@commitlint"
OCO_AI_PROVIDER="ollama"
OCO_GITPUSH="false"
OCO_ONE_LINE_COMMIT="true"
`
);
const config = getConfig({ configPath: '', envPath: envFile.filePath });
describe('setConfig', () => {
beforeEach(async () => {
// we create and delete the file to have the parent directory, but not the file, to test the creation of the file
globalConfigFile = await generateConfig('.opencommit', {});
rmSync(globalConfigFile.filePath);
});
expect(config).not.toEqual(null);
expect(config!['OCO_OPENAI_API_KEY']).toEqual('sk-key');
expect(config!['OCO_ANTHROPIC_API_KEY']).toEqual('secret-key');
expect(config!['OCO_TOKENS_MAX_INPUT']).toEqual(8192);
expect(config!['OCO_TOKENS_MAX_OUTPUT']).toEqual(1000);
expect(config!['OCO_OPENAI_BASE_PATH']).toEqual('/openai/api');
expect(config!['OCO_DESCRIPTION']).toEqual(true);
expect(config!['OCO_EMOJI']).toEqual(true);
expect(config!['OCO_MODEL']).toEqual('gpt-4');
expect(config!['OCO_LANGUAGE']).toEqual('de');
expect(config!['OCO_MESSAGE_TEMPLATE_PLACEHOLDER']).toEqual('$m');
expect(config!['OCO_PROMPT_MODULE']).toEqual('@commitlint');
expect(() => ['ollama', 'gemini'].includes(config!['OCO_AI_PROVIDER'])).toBeTruthy();
expect(config!['OCO_GITPUSH']).toEqual(false);
expect(config!['OCO_ONE_LINE_COMMIT']).toEqual(true);
it('should create .opencommit file with DEFAULT CONFIG if it does not exist on first setConfig run', async () => {
const isGlobalConfigFileExist = existsSync(globalConfigFile.filePath);
expect(isGlobalConfigFileExist).toBe(false);
await envFile.cleanup();
await setConfig(
[[CONFIG_KEYS.OCO_API_KEY, 'persisted-key_1']],
globalConfigFile.filePath
);
const fileContent = readFileSync(globalConfigFile.filePath, 'utf8');
expect(fileContent).toContain('OCO_API_KEY=persisted-key_1');
Object.entries(DEFAULT_CONFIG).forEach(([key, value]) => {
expect(fileContent).toContain(`${key}=${value}`);
});
});
it('should set new config values', async () => {
globalConfigFile = await generateConfig('.opencommit', {});
await setConfig(
[
[CONFIG_KEYS.OCO_API_KEY, 'new-key'],
[CONFIG_KEYS.OCO_MODEL, 'gpt-4']
],
globalConfigFile.filePath
);
const config = getConfig({
globalPath: globalConfigFile.filePath
});
expect(config.OCO_API_KEY).toEqual('new-key');
expect(config.OCO_MODEL).toEqual('gpt-4');
});
it('should update existing config values', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_API_KEY: 'initial-key'
});
await setConfig(
[[CONFIG_KEYS.OCO_API_KEY, 'updated-key']],
globalConfigFile.filePath
);
const config = getConfig({
globalPath: globalConfigFile.filePath
});
expect(config.OCO_API_KEY).toEqual('updated-key');
});
it('should handle boolean and numeric values correctly', async () => {
globalConfigFile = await generateConfig('.opencommit', {});
await setConfig(
[
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT, '8192'],
[CONFIG_KEYS.OCO_DESCRIPTION, 'true'],
[CONFIG_KEYS.OCO_ONE_LINE_COMMIT, 'false']
],
globalConfigFile.filePath
);
const config = getConfig({
globalPath: globalConfigFile.filePath
});
expect(config.OCO_TOKENS_MAX_INPUT).toEqual(8192);
expect(config.OCO_DESCRIPTION).toEqual(true);
expect(config.OCO_ONE_LINE_COMMIT).toEqual(false);
});
it('should throw an error for unsupported config keys', async () => {
globalConfigFile = await generateConfig('.opencommit', {});
try {
await setConfig(
[['UNSUPPORTED_KEY', 'value']],
globalConfigFile.filePath
);
throw new Error('NEVER_REACHED');
} catch (error) {
expect(error.message).toContain(
'Unsupported config key: UNSUPPORTED_KEY'
);
expect(error.message).not.toContain('NEVER_REACHED');
}
});
it('should persist changes to the config file', async () => {
const isGlobalConfigFileExist = existsSync(globalConfigFile.filePath);
expect(isGlobalConfigFileExist).toBe(false);
await setConfig(
[[CONFIG_KEYS.OCO_API_KEY, 'persisted-key']],
globalConfigFile.filePath
);
const fileContent = readFileSync(globalConfigFile.filePath, 'utf8');
expect(fileContent).toContain('OCO_API_KEY=persisted-key');
});
it('should set multiple configs in a row and keep the changes', async () => {
const isGlobalConfigFileExist = existsSync(globalConfigFile.filePath);
expect(isGlobalConfigFileExist).toBe(false);
await setConfig(
[[CONFIG_KEYS.OCO_API_KEY, 'persisted-key']],
globalConfigFile.filePath
);
const fileContent1 = readFileSync(globalConfigFile.filePath, 'utf8');
expect(fileContent1).toContain('OCO_API_KEY=persisted-key');
await setConfig(
[[CONFIG_KEYS.OCO_MODEL, 'gpt-4']],
globalConfigFile.filePath
);
const fileContent2 = readFileSync(globalConfigFile.filePath, 'utf8');
expect(fileContent2).toContain('OCO_MODEL=gpt-4');
});
});
});
});

View File

@@ -1,80 +1,69 @@
import { Gemini } from '../../src/engine/gemini';
import { ChatCompletionRequestMessage } from 'openai';
import { GeminiEngine } from '../../src/engine/gemini';
import { GenerativeModel, GoogleGenerativeAI } from '@google/generative-ai';
import { ConfigType, getConfig } from '../../src/commands/config';
import {
ConfigType,
getConfig,
OCO_AI_PROVIDER_ENUM
} from '../../src/commands/config';
import { OpenAI } from 'openai';
describe('Gemini', () => {
let gemini: Gemini;
let gemini: GeminiEngine;
let mockConfig: ConfigType;
let mockGoogleGenerativeAi: GoogleGenerativeAI;
let mockGenerativeModel: GenerativeModel;
let mockExit: jest.SpyInstance<never, [code?: number | undefined], any>;
let mockWarmup: jest.SpyInstance<any, unknown[], any>;
const noop: (code?: number | undefined) => never = (code?: number | undefined) => {};
const noop: (...args: any[]) => any = (...args: any[]) => {};
const mockGemini = () => {
gemini = new Gemini();
}
mockConfig = getConfig() as ConfigType;
gemini = new GeminiEngine({
apiKey: mockConfig.OCO_API_KEY,
model: mockConfig.OCO_MODEL
});
};
const oldEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...oldEnv };
jest.mock('@google/generative-ai');
jest.mock('../src/commands/config');
jest.mock('@clack/prompts', () => ({
intro: jest.fn(),
outro: jest.fn(),
outro: jest.fn()
}));
if (mockWarmup) mockWarmup.mockRestore();
mockExit = jest.spyOn(process, 'exit').mockImplementation();
mockConfig = getConfig() as ConfigType;
mockConfig.OCO_AI_PROVIDER = 'gemini';
mockConfig.OCO_GEMINI_API_KEY = 'mock-api-key';
mockConfig.OCO_AI_PROVIDER = OCO_AI_PROVIDER_ENUM.GEMINI;
mockConfig.OCO_API_KEY = 'mock-api-key';
mockConfig.OCO_MODEL = 'gemini-1.5-flash';
mockGoogleGenerativeAi = new GoogleGenerativeAI(mockConfig.OCO_GEMINI_API_KEY);
mockGenerativeModel = mockGoogleGenerativeAi.getGenerativeModel({ model: mockConfig.OCO_MODEL, });
mockGoogleGenerativeAi = new GoogleGenerativeAI(mockConfig.OCO_API_KEY);
mockGenerativeModel = mockGoogleGenerativeAi.getGenerativeModel({
model: mockConfig.OCO_MODEL
});
});
afterEach(() => {
gemini = undefined as any;
})
});
afterAll(() => {
mockExit.mockRestore();
process.env = oldEnv;
});
it('should initialize with correct config', () => {
mockGemini();
// gemini = new Gemini();
expect(gemini).toBeDefined();
});
it('should warmup correctly', () => {
mockWarmup = jest.spyOn(Gemini.prototype as any, 'warmup').mockImplementation(noop);
mockGemini();
expect(gemini).toBeDefined();
});
it('should exit process if OCO_GEMINI_API_KEY is not set and command is not config', () => {
process.env.OCO_GEMINI_API_KEY = undefined;
process.env.OCO_AI_PROVIDER = 'gemini';
mockGemini();
expect(mockExit).toHaveBeenCalledWith(1);
});
it('should exit process if model is not supported and command is not config', () => {
it.skip('should exit process if OCO_GEMINI_API_KEY is not set and command is not config', () => {
process.env.OCO_GEMINI_API_KEY = undefined;
process.env.OCO_AI_PROVIDER = 'gemini';
@@ -82,24 +71,26 @@ describe('Gemini', () => {
expect(mockExit).toHaveBeenCalledWith(1);
});
it('should generate commit message', async () => {
const mockGenerateContent = jest.fn().mockResolvedValue({ response: { text: () => 'generated content' } });
const mockGenerateContent = jest
.fn()
.mockResolvedValue({ response: { text: () => 'generated content' } });
mockGenerativeModel.generateContent = mockGenerateContent;
mockWarmup = jest.spyOn(Gemini.prototype as any, 'warmup').mockImplementation(noop);
mockGemini();
const messages: ChatCompletionRequestMessage[] = [
{ role: 'system', content: 'system message' },
{ role: 'assistant', content: 'assistant message' },
];
jest.spyOn(gemini, 'generateCommitMessage').mockImplementation(async () => 'generated content');
const messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam> =
[
{ role: 'system', content: 'system message' },
{ role: 'assistant', content: 'assistant message' }
];
jest
.spyOn(gemini, 'generateCommitMessage')
.mockImplementation(async () => 'generated content');
const result = await gemini.generateCommitMessage(messages);
expect(result).toEqual('generated content');
expect(mockWarmup).toHaveBeenCalled();
});
});
});

View File

@@ -1,7 +1,7 @@
import path from 'path';
import { mkdtemp, rm, writeFile } from 'fs';
import { promisify } from 'util';
import { existsSync, mkdtemp, rm, writeFile } from 'fs';
import { tmpdir } from 'os';
import path from 'path';
import { promisify } from 'util';
const fsMakeTempDir = promisify(mkdtemp);
const fsRemove = promisify(rm);
const fsWriteFile = promisify(writeFile);
@@ -20,8 +20,11 @@ export async function prepareFile(
const filePath = path.resolve(tempDir, fileName);
await fsWriteFile(filePath, content);
const cleanup = async () => {
return fsRemove(tempDir, { recursive: true });
if (existsSync(tempDir)) {
await fsRemove(tempDir, { recursive: true });
}
};
return {
filePath,
cleanup

View File

@@ -4,7 +4,7 @@
"lib": ["ES6", "ES2020"],
"module": "CommonJS",
"resolveJsonModule": true,
"moduleResolution": "Node",
@@ -21,9 +21,7 @@
"skipLibCheck": true
},
"include": [
"test/jest-setup.ts"
],
"include": ["test/jest-setup.ts"],
"exclude": ["node_modules"],
"ts-node": {
"esm": true,